1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "i915_drv.h" 199 #include "i915_oa_hsw.h" 200 #include "i915_oa_bdw.h" 201 #include "i915_oa_chv.h" 202 #include "i915_oa_sklgt2.h" 203 #include "i915_oa_sklgt3.h" 204 #include "i915_oa_sklgt4.h" 205 #include "i915_oa_bxt.h" 206 #include "i915_oa_kblgt2.h" 207 #include "i915_oa_kblgt3.h" 208 #include "i915_oa_glk.h" 209 #include "i915_oa_cflgt2.h" 210 #include "i915_oa_cflgt3.h" 211 #include "i915_oa_cnl.h" 212 213 /* HW requires this to be a power of two, between 128k and 16M, though driver 214 * is currently generally designed assuming the largest 16M size is used such 215 * that the overflow cases are unlikely in normal operation. 216 */ 217 #define OA_BUFFER_SIZE SZ_16M 218 219 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 220 221 /** 222 * DOC: OA Tail Pointer Race 223 * 224 * There's a HW race condition between OA unit tail pointer register updates and 225 * writes to memory whereby the tail pointer can sometimes get ahead of what's 226 * been written out to the OA buffer so far (in terms of what's visible to the 227 * CPU). 228 * 229 * Although this can be observed explicitly while copying reports to userspace 230 * by checking for a zeroed report-id field in tail reports, we want to account 231 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant 232 * read() attempts. 233 * 234 * In effect we define a tail pointer for reading that lags the real tail 235 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough 236 * time for the corresponding reports to become visible to the CPU. 237 * 238 * To manage this we actually track two tail pointers: 239 * 1) An 'aging' tail with an associated timestamp that is tracked until we 240 * can trust the corresponding data is visible to the CPU; at which point 241 * it is considered 'aged'. 242 * 2) An 'aged' tail that can be used for read()ing. 243 * 244 * The two separate pointers let us decouple read()s from tail pointer aging. 245 * 246 * The tail pointers are checked and updated at a limited rate within a hrtimer 247 * callback (the same callback that is used for delivering EPOLLIN events) 248 * 249 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 250 * indicates that an updated tail pointer is needed. 251 * 252 * Most of the implementation details for this workaround are in 253 * oa_buffer_check_unlocked() and _append_oa_reports() 254 * 255 * Note for posterity: previously the driver used to define an effective tail 256 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 257 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 258 * This was flawed considering that the OA unit may also automatically generate 259 * non-periodic reports (such as on context switch) or the OA unit may be 260 * enabled without any periodic sampling. 261 */ 262 #define OA_TAIL_MARGIN_NSEC 100000ULL 263 #define INVALID_TAIL_PTR 0xffffffff 264 265 /* frequency for checking whether the OA unit has written new reports to the 266 * circular OA buffer... 267 */ 268 #define POLL_FREQUENCY 200 269 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) 270 271 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 272 static int zero; 273 static int one = 1; 274 static u32 i915_perf_stream_paranoid = true; 275 276 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 277 * of the 64bit timestamp bits to trigger reports from) but there's currently 278 * no known use case for sampling as infrequently as once per 47 thousand years. 279 * 280 * Since the timestamps included in OA reports are only 32bits it seems 281 * reasonable to limit the OA exponent where it's still possible to account for 282 * overflow in OA report timestamps. 283 */ 284 #define OA_EXPONENT_MAX 31 285 286 #define INVALID_CTX_ID 0xffffffff 287 288 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 289 #define OAREPORT_REASON_MASK 0x3f 290 #define OAREPORT_REASON_SHIFT 19 291 #define OAREPORT_REASON_TIMER (1<<0) 292 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 293 #define OAREPORT_REASON_CLK_RATIO (1<<5) 294 295 296 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 297 * 298 * The highest sampling frequency we can theoretically program the OA unit 299 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 300 * 301 * Initialized just before we register the sysctl parameter. 302 */ 303 static int oa_sample_rate_hard_limit; 304 305 /* Theoretically we can program the OA unit to sample every 160ns but don't 306 * allow that by default unless root... 307 * 308 * The default threshold of 100000Hz is based on perf's similar 309 * kernel.perf_event_max_sample_rate sysctl parameter. 310 */ 311 static u32 i915_oa_max_sample_rate = 100000; 312 313 /* XXX: beware if future OA HW adds new report formats that the current 314 * code assumes all reports have a power-of-two size and ~(size - 1) can 315 * be used as a mask to align the OA tail pointer. 316 */ 317 static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 318 [I915_OA_FORMAT_A13] = { 0, 64 }, 319 [I915_OA_FORMAT_A29] = { 1, 128 }, 320 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 321 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 322 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 323 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 324 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 325 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 326 }; 327 328 static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 329 [I915_OA_FORMAT_A12] = { 0, 64 }, 330 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 331 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 332 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 333 }; 334 335 #define SAMPLE_OA_REPORT (1<<0) 336 337 /** 338 * struct perf_open_properties - for validated properties given to open a stream 339 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 340 * @single_context: Whether a single or all gpu contexts should be monitored 341 * @ctx_handle: A gem ctx handle for use with @single_context 342 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 343 * @oa_format: An OA unit HW report format 344 * @oa_periodic: Whether to enable periodic OA unit sampling 345 * @oa_period_exponent: The OA unit sampling period is derived from this 346 * 347 * As read_properties_unlocked() enumerates and validates the properties given 348 * to open a stream of metrics the configuration is built up in the structure 349 * which starts out zero initialized. 350 */ 351 struct perf_open_properties { 352 u32 sample_flags; 353 354 u64 single_context:1; 355 u64 ctx_handle; 356 357 /* OA sampling state */ 358 int metrics_set; 359 int oa_format; 360 bool oa_periodic; 361 int oa_period_exponent; 362 }; 363 364 static void free_oa_config(struct drm_i915_private *dev_priv, 365 struct i915_oa_config *oa_config) 366 { 367 if (!PTR_ERR(oa_config->flex_regs)) 368 kfree(oa_config->flex_regs); 369 if (!PTR_ERR(oa_config->b_counter_regs)) 370 kfree(oa_config->b_counter_regs); 371 if (!PTR_ERR(oa_config->mux_regs)) 372 kfree(oa_config->mux_regs); 373 kfree(oa_config); 374 } 375 376 static void put_oa_config(struct drm_i915_private *dev_priv, 377 struct i915_oa_config *oa_config) 378 { 379 if (!atomic_dec_and_test(&oa_config->ref_count)) 380 return; 381 382 free_oa_config(dev_priv, oa_config); 383 } 384 385 static int get_oa_config(struct drm_i915_private *dev_priv, 386 int metrics_set, 387 struct i915_oa_config **out_config) 388 { 389 int ret; 390 391 if (metrics_set == 1) { 392 *out_config = &dev_priv->perf.oa.test_config; 393 atomic_inc(&dev_priv->perf.oa.test_config.ref_count); 394 return 0; 395 } 396 397 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 398 if (ret) 399 return ret; 400 401 *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set); 402 if (!*out_config) 403 ret = -EINVAL; 404 else 405 atomic_inc(&(*out_config)->ref_count); 406 407 mutex_unlock(&dev_priv->perf.metrics_lock); 408 409 return ret; 410 } 411 412 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv) 413 { 414 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 415 } 416 417 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv) 418 { 419 u32 oastatus1 = I915_READ(GEN7_OASTATUS1); 420 421 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 422 } 423 424 /** 425 * oa_buffer_check_unlocked - check for data and update tail ptr state 426 * @dev_priv: i915 device instance 427 * 428 * This is either called via fops (for blocking reads in user ctx) or the poll 429 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 430 * if there is data available for userspace to read. 431 * 432 * This function is central to providing a workaround for the OA unit tail 433 * pointer having a race with respect to what data is visible to the CPU. 434 * It is responsible for reading tail pointers from the hardware and giving 435 * the pointers time to 'age' before they are made available for reading. 436 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 437 * 438 * Besides returning true when there is data available to read() this function 439 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp 440 * and .aged_tail_idx state used for reading. 441 * 442 * Note: It's safe to read OA config state here unlocked, assuming that this is 443 * only called while the stream is enabled, while the global OA configuration 444 * can't be modified. 445 * 446 * Returns: %true if the OA buffer contains data, else %false 447 */ 448 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv) 449 { 450 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 451 unsigned long flags; 452 unsigned int aged_idx; 453 u32 head, hw_tail, aged_tail, aging_tail; 454 u64 now; 455 456 /* We have to consider the (unlikely) possibility that read() errors 457 * could result in an OA buffer reset which might reset the head, 458 * tails[] and aged_tail state. 459 */ 460 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 461 462 /* NB: The head we observe here might effectively be a little out of 463 * date (between head and tails[aged_idx].offset if there is currently 464 * a read() in progress. 465 */ 466 head = dev_priv->perf.oa.oa_buffer.head; 467 468 aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 469 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset; 470 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset; 471 472 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv); 473 474 /* The tail pointer increases in 64 byte increments, 475 * not in report_size steps... 476 */ 477 hw_tail &= ~(report_size - 1); 478 479 now = ktime_get_mono_fast_ns(); 480 481 /* Update the aged tail 482 * 483 * Flip the tail pointer available for read()s once the aging tail is 484 * old enough to trust that the corresponding data will be visible to 485 * the CPU... 486 * 487 * Do this before updating the aging pointer in case we may be able to 488 * immediately start aging a new pointer too (if new data has become 489 * available) without needing to wait for a later hrtimer callback. 490 */ 491 if (aging_tail != INVALID_TAIL_PTR && 492 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) > 493 OA_TAIL_MARGIN_NSEC)) { 494 495 aged_idx ^= 1; 496 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx; 497 498 aged_tail = aging_tail; 499 500 /* Mark that we need a new pointer to start aging... */ 501 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; 502 aging_tail = INVALID_TAIL_PTR; 503 } 504 505 /* Update the aging tail 506 * 507 * We throttle aging tail updates until we have a new tail that 508 * represents >= one report more data than is already available for 509 * reading. This ensures there will be enough data for a successful 510 * read once this new pointer has aged and ensures we will give the new 511 * pointer time to age. 512 */ 513 if (aging_tail == INVALID_TAIL_PTR && 514 (aged_tail == INVALID_TAIL_PTR || 515 OA_TAKEN(hw_tail, aged_tail) >= report_size)) { 516 struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma; 517 u32 gtt_offset = i915_ggtt_offset(vma); 518 519 /* Be paranoid and do a bounds check on the pointer read back 520 * from hardware, just in case some spurious hardware condition 521 * could put the tail out of bounds... 522 */ 523 if (hw_tail >= gtt_offset && 524 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { 525 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = 526 aging_tail = hw_tail; 527 dev_priv->perf.oa.oa_buffer.aging_timestamp = now; 528 } else { 529 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", 530 hw_tail); 531 } 532 } 533 534 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 535 536 return aged_tail == INVALID_TAIL_PTR ? 537 false : OA_TAKEN(aged_tail, head) >= report_size; 538 } 539 540 /** 541 * append_oa_status - Appends a status record to a userspace read() buffer. 542 * @stream: An i915-perf stream opened for OA metrics 543 * @buf: destination buffer given by userspace 544 * @count: the number of bytes userspace wants to read 545 * @offset: (inout): the current position for writing into @buf 546 * @type: The kind of status to report to userspace 547 * 548 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 549 * into the userspace read() buffer. 550 * 551 * The @buf @offset will only be updated on success. 552 * 553 * Returns: 0 on success, negative error code on failure. 554 */ 555 static int append_oa_status(struct i915_perf_stream *stream, 556 char __user *buf, 557 size_t count, 558 size_t *offset, 559 enum drm_i915_perf_record_type type) 560 { 561 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 562 563 if ((count - *offset) < header.size) 564 return -ENOSPC; 565 566 if (copy_to_user(buf + *offset, &header, sizeof(header))) 567 return -EFAULT; 568 569 (*offset) += header.size; 570 571 return 0; 572 } 573 574 /** 575 * append_oa_sample - Copies single OA report into userspace read() buffer. 576 * @stream: An i915-perf stream opened for OA metrics 577 * @buf: destination buffer given by userspace 578 * @count: the number of bytes userspace wants to read 579 * @offset: (inout): the current position for writing into @buf 580 * @report: A single OA report to (optionally) include as part of the sample 581 * 582 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 583 * properties when opening a stream, tracked as `stream->sample_flags`. This 584 * function copies the requested components of a single sample to the given 585 * read() @buf. 586 * 587 * The @buf @offset will only be updated on success. 588 * 589 * Returns: 0 on success, negative error code on failure. 590 */ 591 static int append_oa_sample(struct i915_perf_stream *stream, 592 char __user *buf, 593 size_t count, 594 size_t *offset, 595 const u8 *report) 596 { 597 struct drm_i915_private *dev_priv = stream->dev_priv; 598 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 599 struct drm_i915_perf_record_header header; 600 u32 sample_flags = stream->sample_flags; 601 602 header.type = DRM_I915_PERF_RECORD_SAMPLE; 603 header.pad = 0; 604 header.size = stream->sample_size; 605 606 if ((count - *offset) < header.size) 607 return -ENOSPC; 608 609 buf += *offset; 610 if (copy_to_user(buf, &header, sizeof(header))) 611 return -EFAULT; 612 buf += sizeof(header); 613 614 if (sample_flags & SAMPLE_OA_REPORT) { 615 if (copy_to_user(buf, report, report_size)) 616 return -EFAULT; 617 } 618 619 (*offset) += header.size; 620 621 return 0; 622 } 623 624 /** 625 * Copies all buffered OA reports into userspace read() buffer. 626 * @stream: An i915-perf stream opened for OA metrics 627 * @buf: destination buffer given by userspace 628 * @count: the number of bytes userspace wants to read 629 * @offset: (inout): the current position for writing into @buf 630 * 631 * Notably any error condition resulting in a short read (-%ENOSPC or 632 * -%EFAULT) will be returned even though one or more records may 633 * have been successfully copied. In this case it's up to the caller 634 * to decide if the error should be squashed before returning to 635 * userspace. 636 * 637 * Note: reports are consumed from the head, and appended to the 638 * tail, so the tail chases the head?... If you think that's mad 639 * and back-to-front you're not alone, but this follows the 640 * Gen PRM naming convention. 641 * 642 * Returns: 0 on success, negative error code on failure. 643 */ 644 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 645 char __user *buf, 646 size_t count, 647 size_t *offset) 648 { 649 struct drm_i915_private *dev_priv = stream->dev_priv; 650 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 651 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 652 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 653 u32 mask = (OA_BUFFER_SIZE - 1); 654 size_t start_offset = *offset; 655 unsigned long flags; 656 unsigned int aged_tail_idx; 657 u32 head, tail; 658 u32 taken; 659 int ret = 0; 660 661 if (WARN_ON(!stream->enabled)) 662 return -EIO; 663 664 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 665 666 head = dev_priv->perf.oa.oa_buffer.head; 667 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 668 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 669 670 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 671 672 /* 673 * An invalid tail pointer here means we're still waiting for the poll 674 * hrtimer callback to give us a pointer 675 */ 676 if (tail == INVALID_TAIL_PTR) 677 return -EAGAIN; 678 679 /* 680 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 681 * while indexing relative to oa_buf_base. 682 */ 683 head -= gtt_offset; 684 tail -= gtt_offset; 685 686 /* 687 * An out of bounds or misaligned head or tail pointer implies a driver 688 * bug since we validate + align the tail pointers we read from the 689 * hardware and we are in full control of the head pointer which should 690 * only be incremented by multiples of the report size (notably also 691 * all a power of two). 692 */ 693 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 694 tail > OA_BUFFER_SIZE || tail % report_size, 695 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 696 head, tail)) 697 return -EIO; 698 699 700 for (/* none */; 701 (taken = OA_TAKEN(tail, head)); 702 head = (head + report_size) & mask) { 703 u8 *report = oa_buf_base + head; 704 u32 *report32 = (void *)report; 705 u32 ctx_id; 706 u32 reason; 707 708 /* 709 * All the report sizes factor neatly into the buffer 710 * size so we never expect to see a report split 711 * between the beginning and end of the buffer. 712 * 713 * Given the initial alignment check a misalignment 714 * here would imply a driver bug that would result 715 * in an overrun. 716 */ 717 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 718 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 719 break; 720 } 721 722 /* 723 * The reason field includes flags identifying what 724 * triggered this specific report (mostly timer 725 * triggered or e.g. due to a context switch). 726 * 727 * This field is never expected to be zero so we can 728 * check that the report isn't invalid before copying 729 * it to userspace... 730 */ 731 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 732 OAREPORT_REASON_MASK); 733 if (reason == 0) { 734 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 735 DRM_NOTE("Skipping spurious, invalid OA report\n"); 736 continue; 737 } 738 739 /* 740 * XXX: Just keep the lower 21 bits for now since I'm not 741 * entirely sure if the HW touches any of the higher bits in 742 * this field 743 */ 744 ctx_id = report32[2] & 0x1fffff; 745 746 /* 747 * Squash whatever is in the CTX_ID field if it's marked as 748 * invalid to be sure we avoid false-positive, single-context 749 * filtering below... 750 * 751 * Note: that we don't clear the valid_ctx_bit so userspace can 752 * understand that the ID has been squashed by the kernel. 753 */ 754 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit)) 755 ctx_id = report32[2] = INVALID_CTX_ID; 756 757 /* 758 * NB: For Gen 8 the OA unit no longer supports clock gating 759 * off for a specific context and the kernel can't securely 760 * stop the counters from updating as system-wide / global 761 * values. 762 * 763 * Automatic reports now include a context ID so reports can be 764 * filtered on the cpu but it's not worth trying to 765 * automatically subtract/hide counter progress for other 766 * contexts while filtering since we can't stop userspace 767 * issuing MI_REPORT_PERF_COUNT commands which would still 768 * provide a side-band view of the real values. 769 * 770 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 771 * to normalize counters for a single filtered context then it 772 * needs be forwarded bookend context-switch reports so that it 773 * can track switches in between MI_REPORT_PERF_COUNT commands 774 * and can itself subtract/ignore the progress of counters 775 * associated with other contexts. Note that the hardware 776 * automatically triggers reports when switching to a new 777 * context which are tagged with the ID of the newly active 778 * context. To avoid the complexity (and likely fragility) of 779 * reading ahead while parsing reports to try and minimize 780 * forwarding redundant context switch reports (i.e. between 781 * other, unrelated contexts) we simply elect to forward them 782 * all. 783 * 784 * We don't rely solely on the reason field to identify context 785 * switches since it's not-uncommon for periodic samples to 786 * identify a switch before any 'context switch' report. 787 */ 788 if (!dev_priv->perf.oa.exclusive_stream->ctx || 789 dev_priv->perf.oa.specific_ctx_id == ctx_id || 790 (dev_priv->perf.oa.oa_buffer.last_ctx_id == 791 dev_priv->perf.oa.specific_ctx_id) || 792 reason & OAREPORT_REASON_CTX_SWITCH) { 793 794 /* 795 * While filtering for a single context we avoid 796 * leaking the IDs of other contexts. 797 */ 798 if (dev_priv->perf.oa.exclusive_stream->ctx && 799 dev_priv->perf.oa.specific_ctx_id != ctx_id) { 800 report32[2] = INVALID_CTX_ID; 801 } 802 803 ret = append_oa_sample(stream, buf, count, offset, 804 report); 805 if (ret) 806 break; 807 808 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id; 809 } 810 811 /* 812 * The above reason field sanity check is based on 813 * the assumption that the OA buffer is initially 814 * zeroed and we reset the field after copying so the 815 * check is still meaningful once old reports start 816 * being overwritten. 817 */ 818 report32[0] = 0; 819 } 820 821 if (start_offset != *offset) { 822 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 823 824 /* 825 * We removed the gtt_offset for the copy loop above, indexing 826 * relative to oa_buf_base so put back here... 827 */ 828 head += gtt_offset; 829 830 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); 831 dev_priv->perf.oa.oa_buffer.head = head; 832 833 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 834 } 835 836 return ret; 837 } 838 839 /** 840 * gen8_oa_read - copy status records then buffered OA reports 841 * @stream: An i915-perf stream opened for OA metrics 842 * @buf: destination buffer given by userspace 843 * @count: the number of bytes userspace wants to read 844 * @offset: (inout): the current position for writing into @buf 845 * 846 * Checks OA unit status registers and if necessary appends corresponding 847 * status records for userspace (such as for a buffer full condition) and then 848 * initiate appending any buffered OA reports. 849 * 850 * Updates @offset according to the number of bytes successfully copied into 851 * the userspace buffer. 852 * 853 * NB: some data may be successfully copied to the userspace buffer 854 * even if an error is returned, and this is reflected in the 855 * updated @offset. 856 * 857 * Returns: zero on success or a negative error code 858 */ 859 static int gen8_oa_read(struct i915_perf_stream *stream, 860 char __user *buf, 861 size_t count, 862 size_t *offset) 863 { 864 struct drm_i915_private *dev_priv = stream->dev_priv; 865 u32 oastatus; 866 int ret; 867 868 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 869 return -EIO; 870 871 oastatus = I915_READ(GEN8_OASTATUS); 872 873 /* 874 * We treat OABUFFER_OVERFLOW as a significant error: 875 * 876 * Although theoretically we could handle this more gracefully 877 * sometimes, some Gens don't correctly suppress certain 878 * automatically triggered reports in this condition and so we 879 * have to assume that old reports are now being trampled 880 * over. 881 * 882 * Considering how we don't currently give userspace control 883 * over the OA buffer size and always configure a large 16MB 884 * buffer, then a buffer overflow does anyway likely indicate 885 * that something has gone quite badly wrong. 886 */ 887 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 888 ret = append_oa_status(stream, buf, count, offset, 889 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 890 if (ret) 891 return ret; 892 893 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 894 dev_priv->perf.oa.period_exponent); 895 896 dev_priv->perf.oa.ops.oa_disable(dev_priv); 897 dev_priv->perf.oa.ops.oa_enable(dev_priv); 898 899 /* 900 * Note: .oa_enable() is expected to re-init the oabuffer and 901 * reset GEN8_OASTATUS for us 902 */ 903 oastatus = I915_READ(GEN8_OASTATUS); 904 } 905 906 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 907 ret = append_oa_status(stream, buf, count, offset, 908 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 909 if (ret) 910 return ret; 911 I915_WRITE(GEN8_OASTATUS, 912 oastatus & ~GEN8_OASTATUS_REPORT_LOST); 913 } 914 915 return gen8_append_oa_reports(stream, buf, count, offset); 916 } 917 918 /** 919 * Copies all buffered OA reports into userspace read() buffer. 920 * @stream: An i915-perf stream opened for OA metrics 921 * @buf: destination buffer given by userspace 922 * @count: the number of bytes userspace wants to read 923 * @offset: (inout): the current position for writing into @buf 924 * 925 * Notably any error condition resulting in a short read (-%ENOSPC or 926 * -%EFAULT) will be returned even though one or more records may 927 * have been successfully copied. In this case it's up to the caller 928 * to decide if the error should be squashed before returning to 929 * userspace. 930 * 931 * Note: reports are consumed from the head, and appended to the 932 * tail, so the tail chases the head?... If you think that's mad 933 * and back-to-front you're not alone, but this follows the 934 * Gen PRM naming convention. 935 * 936 * Returns: 0 on success, negative error code on failure. 937 */ 938 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 939 char __user *buf, 940 size_t count, 941 size_t *offset) 942 { 943 struct drm_i915_private *dev_priv = stream->dev_priv; 944 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 945 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 946 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 947 u32 mask = (OA_BUFFER_SIZE - 1); 948 size_t start_offset = *offset; 949 unsigned long flags; 950 unsigned int aged_tail_idx; 951 u32 head, tail; 952 u32 taken; 953 int ret = 0; 954 955 if (WARN_ON(!stream->enabled)) 956 return -EIO; 957 958 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 959 960 head = dev_priv->perf.oa.oa_buffer.head; 961 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 962 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 963 964 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 965 966 /* An invalid tail pointer here means we're still waiting for the poll 967 * hrtimer callback to give us a pointer 968 */ 969 if (tail == INVALID_TAIL_PTR) 970 return -EAGAIN; 971 972 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 973 * while indexing relative to oa_buf_base. 974 */ 975 head -= gtt_offset; 976 tail -= gtt_offset; 977 978 /* An out of bounds or misaligned head or tail pointer implies a driver 979 * bug since we validate + align the tail pointers we read from the 980 * hardware and we are in full control of the head pointer which should 981 * only be incremented by multiples of the report size (notably also 982 * all a power of two). 983 */ 984 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 985 tail > OA_BUFFER_SIZE || tail % report_size, 986 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 987 head, tail)) 988 return -EIO; 989 990 991 for (/* none */; 992 (taken = OA_TAKEN(tail, head)); 993 head = (head + report_size) & mask) { 994 u8 *report = oa_buf_base + head; 995 u32 *report32 = (void *)report; 996 997 /* All the report sizes factor neatly into the buffer 998 * size so we never expect to see a report split 999 * between the beginning and end of the buffer. 1000 * 1001 * Given the initial alignment check a misalignment 1002 * here would imply a driver bug that would result 1003 * in an overrun. 1004 */ 1005 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 1006 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 1007 break; 1008 } 1009 1010 /* The report-ID field for periodic samples includes 1011 * some undocumented flags related to what triggered 1012 * the report and is never expected to be zero so we 1013 * can check that the report isn't invalid before 1014 * copying it to userspace... 1015 */ 1016 if (report32[0] == 0) { 1017 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 1018 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1019 continue; 1020 } 1021 1022 ret = append_oa_sample(stream, buf, count, offset, report); 1023 if (ret) 1024 break; 1025 1026 /* The above report-id field sanity check is based on 1027 * the assumption that the OA buffer is initially 1028 * zeroed and we reset the field after copying so the 1029 * check is still meaningful once old reports start 1030 * being overwritten. 1031 */ 1032 report32[0] = 0; 1033 } 1034 1035 if (start_offset != *offset) { 1036 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1037 1038 /* We removed the gtt_offset for the copy loop above, indexing 1039 * relative to oa_buf_base so put back here... 1040 */ 1041 head += gtt_offset; 1042 1043 I915_WRITE(GEN7_OASTATUS2, 1044 ((head & GEN7_OASTATUS2_HEAD_MASK) | 1045 OA_MEM_SELECT_GGTT)); 1046 dev_priv->perf.oa.oa_buffer.head = head; 1047 1048 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1049 } 1050 1051 return ret; 1052 } 1053 1054 /** 1055 * gen7_oa_read - copy status records then buffered OA reports 1056 * @stream: An i915-perf stream opened for OA metrics 1057 * @buf: destination buffer given by userspace 1058 * @count: the number of bytes userspace wants to read 1059 * @offset: (inout): the current position for writing into @buf 1060 * 1061 * Checks Gen 7 specific OA unit status registers and if necessary appends 1062 * corresponding status records for userspace (such as for a buffer full 1063 * condition) and then initiate appending any buffered OA reports. 1064 * 1065 * Updates @offset according to the number of bytes successfully copied into 1066 * the userspace buffer. 1067 * 1068 * Returns: zero on success or a negative error code 1069 */ 1070 static int gen7_oa_read(struct i915_perf_stream *stream, 1071 char __user *buf, 1072 size_t count, 1073 size_t *offset) 1074 { 1075 struct drm_i915_private *dev_priv = stream->dev_priv; 1076 u32 oastatus1; 1077 int ret; 1078 1079 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 1080 return -EIO; 1081 1082 oastatus1 = I915_READ(GEN7_OASTATUS1); 1083 1084 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1085 * bits while the OA unit is enabled (while the tail pointer 1086 * may be updated asynchronously) so we ignore status bits 1087 * that have already been reported to userspace. 1088 */ 1089 oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; 1090 1091 /* We treat OABUFFER_OVERFLOW as a significant error: 1092 * 1093 * - The status can be interpreted to mean that the buffer is 1094 * currently full (with a higher precedence than OA_TAKEN() 1095 * which will start to report a near-empty buffer after an 1096 * overflow) but it's awkward that we can't clear the status 1097 * on Haswell, so without a reset we won't be able to catch 1098 * the state again. 1099 * 1100 * - Since it also implies the HW has started overwriting old 1101 * reports it may also affect our sanity checks for invalid 1102 * reports when copying to userspace that assume new reports 1103 * are being written to cleared memory. 1104 * 1105 * - In the future we may want to introduce a flight recorder 1106 * mode where the driver will automatically maintain a safe 1107 * guard band between head/tail, avoiding this overflow 1108 * condition, but we avoid the added driver complexity for 1109 * now. 1110 */ 1111 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1112 ret = append_oa_status(stream, buf, count, offset, 1113 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1114 if (ret) 1115 return ret; 1116 1117 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1118 dev_priv->perf.oa.period_exponent); 1119 1120 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1121 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1122 1123 oastatus1 = I915_READ(GEN7_OASTATUS1); 1124 } 1125 1126 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1127 ret = append_oa_status(stream, buf, count, offset, 1128 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1129 if (ret) 1130 return ret; 1131 dev_priv->perf.oa.gen7_latched_oastatus1 |= 1132 GEN7_OASTATUS1_REPORT_LOST; 1133 } 1134 1135 return gen7_append_oa_reports(stream, buf, count, offset); 1136 } 1137 1138 /** 1139 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1140 * @stream: An i915-perf stream opened for OA metrics 1141 * 1142 * Called when userspace tries to read() from a blocking stream FD opened 1143 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1144 * OA buffer and wakes us. 1145 * 1146 * Note: it's acceptable to have this return with some false positives 1147 * since any subsequent read handling will return -EAGAIN if there isn't 1148 * really data ready for userspace yet. 1149 * 1150 * Returns: zero on success or a negative error code 1151 */ 1152 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1153 { 1154 struct drm_i915_private *dev_priv = stream->dev_priv; 1155 1156 /* We would wait indefinitely if periodic sampling is not enabled */ 1157 if (!dev_priv->perf.oa.periodic) 1158 return -EIO; 1159 1160 return wait_event_interruptible(dev_priv->perf.oa.poll_wq, 1161 oa_buffer_check_unlocked(dev_priv)); 1162 } 1163 1164 /** 1165 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1166 * @stream: An i915-perf stream opened for OA metrics 1167 * @file: An i915 perf stream file 1168 * @wait: poll() state table 1169 * 1170 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1171 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1172 * when it sees data ready to read in the circular OA buffer. 1173 */ 1174 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1175 struct file *file, 1176 poll_table *wait) 1177 { 1178 struct drm_i915_private *dev_priv = stream->dev_priv; 1179 1180 poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); 1181 } 1182 1183 /** 1184 * i915_oa_read - just calls through to &i915_oa_ops->read 1185 * @stream: An i915-perf stream opened for OA metrics 1186 * @buf: destination buffer given by userspace 1187 * @count: the number of bytes userspace wants to read 1188 * @offset: (inout): the current position for writing into @buf 1189 * 1190 * Updates @offset according to the number of bytes successfully copied into 1191 * the userspace buffer. 1192 * 1193 * Returns: zero on success or a negative error code 1194 */ 1195 static int i915_oa_read(struct i915_perf_stream *stream, 1196 char __user *buf, 1197 size_t count, 1198 size_t *offset) 1199 { 1200 struct drm_i915_private *dev_priv = stream->dev_priv; 1201 1202 return dev_priv->perf.oa.ops.read(stream, buf, count, offset); 1203 } 1204 1205 /** 1206 * oa_get_render_ctx_id - determine and hold ctx hw id 1207 * @stream: An i915-perf stream opened for OA metrics 1208 * 1209 * Determine the render context hw id, and ensure it remains fixed for the 1210 * lifetime of the stream. This ensures that we don't have to worry about 1211 * updating the context ID in OACONTROL on the fly. 1212 * 1213 * Returns: zero on success or a negative error code 1214 */ 1215 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1216 { 1217 struct drm_i915_private *dev_priv = stream->dev_priv; 1218 1219 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 1220 dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; 1221 } else { 1222 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1223 struct intel_ring *ring; 1224 int ret; 1225 1226 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1227 if (ret) 1228 return ret; 1229 1230 /* 1231 * As the ID is the gtt offset of the context's vma we 1232 * pin the vma to ensure the ID remains fixed. 1233 * 1234 * NB: implied RCS engine... 1235 */ 1236 ring = engine->context_pin(engine, stream->ctx); 1237 mutex_unlock(&dev_priv->drm.struct_mutex); 1238 if (IS_ERR(ring)) 1239 return PTR_ERR(ring); 1240 1241 1242 /* 1243 * Explicitly track the ID (instead of calling 1244 * i915_ggtt_offset() on the fly) considering the difference 1245 * with gen8+ and execlists 1246 */ 1247 dev_priv->perf.oa.specific_ctx_id = 1248 i915_ggtt_offset(stream->ctx->engine[engine->id].state); 1249 } 1250 1251 return 0; 1252 } 1253 1254 /** 1255 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1256 * @stream: An i915-perf stream opened for OA metrics 1257 * 1258 * In case anything needed doing to ensure the context HW ID would remain valid 1259 * for the lifetime of the stream, then that can be undone here. 1260 */ 1261 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1262 { 1263 struct drm_i915_private *dev_priv = stream->dev_priv; 1264 1265 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 1266 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1267 } else { 1268 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1269 1270 mutex_lock(&dev_priv->drm.struct_mutex); 1271 1272 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1273 engine->context_unpin(engine, stream->ctx); 1274 1275 mutex_unlock(&dev_priv->drm.struct_mutex); 1276 } 1277 } 1278 1279 static void 1280 free_oa_buffer(struct drm_i915_private *i915) 1281 { 1282 mutex_lock(&i915->drm.struct_mutex); 1283 1284 i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); 1285 i915_vma_unpin(i915->perf.oa.oa_buffer.vma); 1286 i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); 1287 1288 i915->perf.oa.oa_buffer.vma = NULL; 1289 i915->perf.oa.oa_buffer.vaddr = NULL; 1290 1291 mutex_unlock(&i915->drm.struct_mutex); 1292 } 1293 1294 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1295 { 1296 struct drm_i915_private *dev_priv = stream->dev_priv; 1297 1298 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); 1299 1300 /* 1301 * Unset exclusive_stream first, it will be checked while disabling 1302 * the metric set on gen8+. 1303 */ 1304 mutex_lock(&dev_priv->drm.struct_mutex); 1305 dev_priv->perf.oa.exclusive_stream = NULL; 1306 mutex_unlock(&dev_priv->drm.struct_mutex); 1307 1308 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1309 1310 free_oa_buffer(dev_priv); 1311 1312 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1313 intel_runtime_pm_put(dev_priv); 1314 1315 if (stream->ctx) 1316 oa_put_render_ctx_id(stream); 1317 1318 put_oa_config(dev_priv, stream->oa_config); 1319 1320 if (dev_priv->perf.oa.spurious_report_rs.missed) { 1321 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1322 dev_priv->perf.oa.spurious_report_rs.missed); 1323 } 1324 } 1325 1326 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) 1327 { 1328 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1332 1333 /* Pre-DevBDW: OABUFFER must be set with counters off, 1334 * before OASTATUS1, but after OASTATUS2 1335 */ 1336 I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */ 1337 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1338 1339 I915_WRITE(GEN7_OABUFFER, gtt_offset); 1340 1341 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ 1342 1343 /* Mark that we need updated tail pointers to read from... */ 1344 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1345 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1346 1347 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1348 1349 /* On Haswell we have to track which OASTATUS1 flags we've 1350 * already seen since they can't be cleared while periodic 1351 * sampling is enabled. 1352 */ 1353 dev_priv->perf.oa.gen7_latched_oastatus1 = 0; 1354 1355 /* NB: although the OA buffer will initially be allocated 1356 * zeroed via shmfs (and so this memset is redundant when 1357 * first allocating), we may re-init the OA buffer, either 1358 * when re-enabling a stream or in error/reset paths. 1359 * 1360 * The reason we clear the buffer for each re-init is for the 1361 * sanity check in gen7_append_oa_reports() that looks at the 1362 * report-id field to make sure it's non-zero which relies on 1363 * the assumption that new reports are being written to zeroed 1364 * memory... 1365 */ 1366 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1367 1368 /* Maybe make ->pollin per-stream state if we support multiple 1369 * concurrent streams in the future. 1370 */ 1371 dev_priv->perf.oa.pollin = false; 1372 } 1373 1374 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv) 1375 { 1376 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1377 unsigned long flags; 1378 1379 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1380 1381 I915_WRITE(GEN8_OASTATUS, 0); 1382 I915_WRITE(GEN8_OAHEADPTR, gtt_offset); 1383 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1384 1385 I915_WRITE(GEN8_OABUFFER_UDW, 0); 1386 1387 /* 1388 * PRM says: 1389 * 1390 * "This MMIO must be set before the OATAILPTR 1391 * register and after the OAHEADPTR register. This is 1392 * to enable proper functionality of the overflow 1393 * bit." 1394 */ 1395 I915_WRITE(GEN8_OABUFFER, gtt_offset | 1396 OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT); 1397 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1398 1399 /* Mark that we need updated tail pointers to read from... */ 1400 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1401 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1402 1403 /* 1404 * Reset state used to recognise context switches, affecting which 1405 * reports we will forward to userspace while filtering for a single 1406 * context. 1407 */ 1408 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID; 1409 1410 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1411 1412 /* 1413 * NB: although the OA buffer will initially be allocated 1414 * zeroed via shmfs (and so this memset is redundant when 1415 * first allocating), we may re-init the OA buffer, either 1416 * when re-enabling a stream or in error/reset paths. 1417 * 1418 * The reason we clear the buffer for each re-init is for the 1419 * sanity check in gen8_append_oa_reports() that looks at the 1420 * reason field to make sure it's non-zero which relies on 1421 * the assumption that new reports are being written to zeroed 1422 * memory... 1423 */ 1424 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1425 1426 /* 1427 * Maybe make ->pollin per-stream state if we support multiple 1428 * concurrent streams in the future. 1429 */ 1430 dev_priv->perf.oa.pollin = false; 1431 } 1432 1433 static int alloc_oa_buffer(struct drm_i915_private *dev_priv) 1434 { 1435 struct drm_i915_gem_object *bo; 1436 struct i915_vma *vma; 1437 int ret; 1438 1439 if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) 1440 return -ENODEV; 1441 1442 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1443 if (ret) 1444 return ret; 1445 1446 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1447 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1448 1449 bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); 1450 if (IS_ERR(bo)) { 1451 DRM_ERROR("Failed to allocate OA buffer\n"); 1452 ret = PTR_ERR(bo); 1453 goto unlock; 1454 } 1455 1456 ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); 1457 if (ret) 1458 goto err_unref; 1459 1460 /* PreHSW required 512K alignment, HSW requires 16M */ 1461 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1462 if (IS_ERR(vma)) { 1463 ret = PTR_ERR(vma); 1464 goto err_unref; 1465 } 1466 dev_priv->perf.oa.oa_buffer.vma = vma; 1467 1468 dev_priv->perf.oa.oa_buffer.vaddr = 1469 i915_gem_object_pin_map(bo, I915_MAP_WB); 1470 if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { 1471 ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); 1472 goto err_unpin; 1473 } 1474 1475 dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); 1476 1477 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", 1478 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), 1479 dev_priv->perf.oa.oa_buffer.vaddr); 1480 1481 goto unlock; 1482 1483 err_unpin: 1484 __i915_vma_unpin(vma); 1485 1486 err_unref: 1487 i915_gem_object_put(bo); 1488 1489 dev_priv->perf.oa.oa_buffer.vaddr = NULL; 1490 dev_priv->perf.oa.oa_buffer.vma = NULL; 1491 1492 unlock: 1493 mutex_unlock(&dev_priv->drm.struct_mutex); 1494 return ret; 1495 } 1496 1497 static void config_oa_regs(struct drm_i915_private *dev_priv, 1498 const struct i915_oa_reg *regs, 1499 u32 n_regs) 1500 { 1501 u32 i; 1502 1503 for (i = 0; i < n_regs; i++) { 1504 const struct i915_oa_reg *reg = regs + i; 1505 1506 I915_WRITE(reg->addr, reg->value); 1507 } 1508 } 1509 1510 static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, 1511 const struct i915_oa_config *oa_config) 1512 { 1513 /* PRM: 1514 * 1515 * OA unit is using “crclk” for its functionality. When trunk 1516 * level clock gating takes place, OA clock would be gated, 1517 * unable to count the events from non-render clock domain. 1518 * Render clock gating must be disabled when OA is enabled to 1519 * count the events from non-render domain. Unit level clock 1520 * gating for RCS should also be disabled. 1521 */ 1522 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1523 ~GEN7_DOP_CLOCK_GATE_ENABLE)); 1524 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | 1525 GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1526 1527 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1528 1529 /* It apparently takes a fairly long time for a new MUX 1530 * configuration to be be applied after these register writes. 1531 * This delay duration was derived empirically based on the 1532 * render_basic config but hopefully it covers the maximum 1533 * configuration latency. 1534 * 1535 * As a fallback, the checks in _append_oa_reports() to skip 1536 * invalid OA reports do also seem to work to discard reports 1537 * generated before this config has completed - albeit not 1538 * silently. 1539 * 1540 * Unfortunately this is essentially a magic number, since we 1541 * don't currently know of a reliable mechanism for predicting 1542 * how long the MUX config will take to apply and besides 1543 * seeing invalid reports we don't know of a reliable way to 1544 * explicitly check that the MUX config has landed. 1545 * 1546 * It's even possible we've miss characterized the underlying 1547 * problem - it just seems like the simplest explanation why 1548 * a delay at this location would mitigate any invalid reports. 1549 */ 1550 usleep_range(15000, 20000); 1551 1552 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1553 oa_config->b_counter_regs_len); 1554 1555 return 0; 1556 } 1557 1558 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) 1559 { 1560 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & 1561 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1562 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | 1563 GEN7_DOP_CLOCK_GATE_ENABLE)); 1564 1565 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1566 ~GT_NOA_ENABLE)); 1567 } 1568 1569 /* 1570 * NB: It must always remain pointer safe to run this even if the OA unit 1571 * has been disabled. 1572 * 1573 * It's fine to put out-of-date values into these per-context registers 1574 * in the case that the OA unit has been disabled. 1575 */ 1576 static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, 1577 u32 *reg_state, 1578 const struct i915_oa_config *oa_config) 1579 { 1580 struct drm_i915_private *dev_priv = ctx->i915; 1581 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; 1582 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; 1583 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1584 u32 flex_mmio[] = { 1585 i915_mmio_reg_offset(EU_PERF_CNTL0), 1586 i915_mmio_reg_offset(EU_PERF_CNTL1), 1587 i915_mmio_reg_offset(EU_PERF_CNTL2), 1588 i915_mmio_reg_offset(EU_PERF_CNTL3), 1589 i915_mmio_reg_offset(EU_PERF_CNTL4), 1590 i915_mmio_reg_offset(EU_PERF_CNTL5), 1591 i915_mmio_reg_offset(EU_PERF_CNTL6), 1592 }; 1593 int i; 1594 1595 reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1596 reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent << 1597 GEN8_OA_TIMER_PERIOD_SHIFT) | 1598 (dev_priv->perf.oa.periodic ? 1599 GEN8_OA_TIMER_ENABLE : 0) | 1600 GEN8_OA_COUNTER_RESUME; 1601 1602 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { 1603 u32 state_offset = ctx_flexeu0 + i * 2; 1604 u32 mmio = flex_mmio[i]; 1605 1606 /* 1607 * This arbitrary default will select the 'EU FPU0 Pipeline 1608 * Active' event. In the future it's anticipated that there 1609 * will be an explicit 'No Event' we can select, but not yet... 1610 */ 1611 u32 value = 0; 1612 1613 if (oa_config) { 1614 u32 j; 1615 1616 for (j = 0; j < oa_config->flex_regs_len; j++) { 1617 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { 1618 value = oa_config->flex_regs[j].value; 1619 break; 1620 } 1621 } 1622 } 1623 1624 reg_state[state_offset] = mmio; 1625 reg_state[state_offset+1] = value; 1626 } 1627 } 1628 1629 /* 1630 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This 1631 * is only used by the kernel context. 1632 */ 1633 static int gen8_emit_oa_config(struct drm_i915_gem_request *req, 1634 const struct i915_oa_config *oa_config) 1635 { 1636 struct drm_i915_private *dev_priv = req->i915; 1637 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1638 u32 flex_mmio[] = { 1639 i915_mmio_reg_offset(EU_PERF_CNTL0), 1640 i915_mmio_reg_offset(EU_PERF_CNTL1), 1641 i915_mmio_reg_offset(EU_PERF_CNTL2), 1642 i915_mmio_reg_offset(EU_PERF_CNTL3), 1643 i915_mmio_reg_offset(EU_PERF_CNTL4), 1644 i915_mmio_reg_offset(EU_PERF_CNTL5), 1645 i915_mmio_reg_offset(EU_PERF_CNTL6), 1646 }; 1647 u32 *cs; 1648 int i; 1649 1650 cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); 1651 if (IS_ERR(cs)) 1652 return PTR_ERR(cs); 1653 1654 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); 1655 1656 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1657 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1658 (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | 1659 GEN8_OA_COUNTER_RESUME; 1660 1661 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { 1662 u32 mmio = flex_mmio[i]; 1663 1664 /* 1665 * This arbitrary default will select the 'EU FPU0 Pipeline 1666 * Active' event. In the future it's anticipated that there 1667 * will be an explicit 'No Event' we can select, but not 1668 * yet... 1669 */ 1670 u32 value = 0; 1671 1672 if (oa_config) { 1673 u32 j; 1674 1675 for (j = 0; j < oa_config->flex_regs_len; j++) { 1676 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { 1677 value = oa_config->flex_regs[j].value; 1678 break; 1679 } 1680 } 1681 } 1682 1683 *cs++ = mmio; 1684 *cs++ = value; 1685 } 1686 1687 *cs++ = MI_NOOP; 1688 intel_ring_advance(req, cs); 1689 1690 return 0; 1691 } 1692 1693 static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv, 1694 const struct i915_oa_config *oa_config) 1695 { 1696 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1697 struct i915_gem_timeline *timeline; 1698 struct drm_i915_gem_request *req; 1699 int ret; 1700 1701 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1702 1703 i915_gem_retire_requests(dev_priv); 1704 1705 req = i915_gem_request_alloc(engine, dev_priv->kernel_context); 1706 if (IS_ERR(req)) 1707 return PTR_ERR(req); 1708 1709 ret = gen8_emit_oa_config(req, oa_config); 1710 if (ret) { 1711 i915_add_request(req); 1712 return ret; 1713 } 1714 1715 /* Queue this switch after all other activity */ 1716 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 1717 struct drm_i915_gem_request *prev; 1718 struct intel_timeline *tl; 1719 1720 tl = &timeline->engine[engine->id]; 1721 prev = i915_gem_active_raw(&tl->last_request, 1722 &dev_priv->drm.struct_mutex); 1723 if (prev) 1724 i915_sw_fence_await_sw_fence_gfp(&req->submit, 1725 &prev->submit, 1726 GFP_KERNEL); 1727 } 1728 1729 i915_add_request(req); 1730 1731 return 0; 1732 } 1733 1734 /* 1735 * Manages updating the per-context aspects of the OA stream 1736 * configuration across all contexts. 1737 * 1738 * The awkward consideration here is that OACTXCONTROL controls the 1739 * exponent for periodic sampling which is primarily used for system 1740 * wide profiling where we'd like a consistent sampling period even in 1741 * the face of context switches. 1742 * 1743 * Our approach of updating the register state context (as opposed to 1744 * say using a workaround batch buffer) ensures that the hardware 1745 * won't automatically reload an out-of-date timer exponent even 1746 * transiently before a WA BB could be parsed. 1747 * 1748 * This function needs to: 1749 * - Ensure the currently running context's per-context OA state is 1750 * updated 1751 * - Ensure that all existing contexts will have the correct per-context 1752 * OA state if they are scheduled for use. 1753 * - Ensure any new contexts will be initialized with the correct 1754 * per-context OA state. 1755 * 1756 * Note: it's only the RCS/Render context that has any OA state. 1757 */ 1758 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1759 const struct i915_oa_config *oa_config, 1760 bool interruptible) 1761 { 1762 struct i915_gem_context *ctx; 1763 int ret; 1764 unsigned int wait_flags = I915_WAIT_LOCKED; 1765 1766 if (interruptible) { 1767 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1768 if (ret) 1769 return ret; 1770 1771 wait_flags |= I915_WAIT_INTERRUPTIBLE; 1772 } else { 1773 mutex_lock(&dev_priv->drm.struct_mutex); 1774 } 1775 1776 /* Switch away from any user context. */ 1777 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); 1778 if (ret) 1779 goto out; 1780 1781 /* 1782 * The OA register config is setup through the context image. This image 1783 * might be written to by the GPU on context switch (in particular on 1784 * lite-restore). This means we can't safely update a context's image, 1785 * if this context is scheduled/submitted to run on the GPU. 1786 * 1787 * We could emit the OA register config through the batch buffer but 1788 * this might leave small interval of time where the OA unit is 1789 * configured at an invalid sampling period. 1790 * 1791 * So far the best way to work around this issue seems to be draining 1792 * the GPU from any submitted work. 1793 */ 1794 ret = i915_gem_wait_for_idle(dev_priv, wait_flags); 1795 if (ret) 1796 goto out; 1797 1798 /* Update all contexts now that we've stalled the submission. */ 1799 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1800 struct intel_context *ce = &ctx->engine[RCS]; 1801 u32 *regs; 1802 1803 /* OA settings will be set upon first use */ 1804 if (!ce->state) 1805 continue; 1806 1807 regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 1808 if (IS_ERR(regs)) { 1809 ret = PTR_ERR(regs); 1810 goto out; 1811 } 1812 1813 ce->state->obj->mm.dirty = true; 1814 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); 1815 1816 gen8_update_reg_state_unlocked(ctx, regs, oa_config); 1817 1818 i915_gem_object_unpin_map(ce->state->obj); 1819 } 1820 1821 out: 1822 mutex_unlock(&dev_priv->drm.struct_mutex); 1823 1824 return ret; 1825 } 1826 1827 static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, 1828 const struct i915_oa_config *oa_config) 1829 { 1830 int ret; 1831 1832 /* 1833 * We disable slice/unslice clock ratio change reports on SKL since 1834 * they are too noisy. The HW generates a lot of redundant reports 1835 * where the ratio hasn't really changed causing a lot of redundant 1836 * work to processes and increasing the chances we'll hit buffer 1837 * overruns. 1838 * 1839 * Although we don't currently use the 'disable overrun' OABUFFER 1840 * feature it's worth noting that clock ratio reports have to be 1841 * disabled before considering to use that feature since the HW doesn't 1842 * correctly block these reports. 1843 * 1844 * Currently none of the high-level metrics we have depend on knowing 1845 * this ratio to normalize. 1846 * 1847 * Note: This register is not power context saved and restored, but 1848 * that's OK considering that we disable RC6 while the OA unit is 1849 * enabled. 1850 * 1851 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 1852 * be read back from automatically triggered reports, as part of the 1853 * RPT_ID field. 1854 */ 1855 if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { 1856 I915_WRITE(GEN8_OA_DEBUG, 1857 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 1858 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 1859 } 1860 1861 /* 1862 * Update all contexts prior writing the mux configurations as we need 1863 * to make sure all slices/subslices are ON before writing to NOA 1864 * registers. 1865 */ 1866 ret = gen8_configure_all_contexts(dev_priv, oa_config, true); 1867 if (ret) 1868 return ret; 1869 1870 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1871 1872 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1873 oa_config->b_counter_regs_len); 1874 1875 return 0; 1876 } 1877 1878 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1879 { 1880 /* Reset all contexts' slices/subslices configurations. */ 1881 gen8_configure_all_contexts(dev_priv, NULL, false); 1882 1883 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1884 ~GT_NOA_ENABLE)); 1885 1886 } 1887 1888 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1889 { 1890 /* Reset all contexts' slices/subslices configurations. */ 1891 gen8_configure_all_contexts(dev_priv, NULL, false); 1892 1893 /* Make sure we disable noa to save power. */ 1894 I915_WRITE(RPM_CONFIG1, 1895 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); 1896 } 1897 1898 static void gen7_oa_enable(struct drm_i915_private *dev_priv) 1899 { 1900 /* 1901 * Reset buf pointers so we don't forward reports from before now. 1902 * 1903 * Think carefully if considering trying to avoid this, since it 1904 * also ensures status flags and the buffer itself are cleared 1905 * in error paths, and we have checks for invalid reports based 1906 * on the assumption that certain fields are written to zeroed 1907 * memory which this helps maintains. 1908 */ 1909 gen7_init_oa_buffer(dev_priv); 1910 1911 if (dev_priv->perf.oa.exclusive_stream->enabled) { 1912 struct i915_gem_context *ctx = 1913 dev_priv->perf.oa.exclusive_stream->ctx; 1914 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; 1915 1916 bool periodic = dev_priv->perf.oa.periodic; 1917 u32 period_exponent = dev_priv->perf.oa.period_exponent; 1918 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1919 1920 I915_WRITE(GEN7_OACONTROL, 1921 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 1922 (period_exponent << 1923 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 1924 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 1925 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 1926 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 1927 GEN7_OACONTROL_ENABLE); 1928 } else 1929 I915_WRITE(GEN7_OACONTROL, 0); 1930 } 1931 1932 static void gen8_oa_enable(struct drm_i915_private *dev_priv) 1933 { 1934 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1935 1936 /* 1937 * Reset buf pointers so we don't forward reports from before now. 1938 * 1939 * Think carefully if considering trying to avoid this, since it 1940 * also ensures status flags and the buffer itself are cleared 1941 * in error paths, and we have checks for invalid reports based 1942 * on the assumption that certain fields are written to zeroed 1943 * memory which this helps maintains. 1944 */ 1945 gen8_init_oa_buffer(dev_priv); 1946 1947 /* 1948 * Note: we don't rely on the hardware to perform single context 1949 * filtering and instead filter on the cpu based on the context-id 1950 * field of reports 1951 */ 1952 I915_WRITE(GEN8_OACONTROL, (report_format << 1953 GEN8_OA_REPORT_FORMAT_SHIFT) | 1954 GEN8_OA_COUNTER_ENABLE); 1955 } 1956 1957 /** 1958 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 1959 * @stream: An i915 perf stream opened for OA metrics 1960 * 1961 * [Re]enables hardware periodic sampling according to the period configured 1962 * when opening the stream. This also starts a hrtimer that will periodically 1963 * check for data in the circular OA buffer for notifying userspace (e.g. 1964 * during a read() or poll()). 1965 */ 1966 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 1967 { 1968 struct drm_i915_private *dev_priv = stream->dev_priv; 1969 1970 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1971 1972 if (dev_priv->perf.oa.periodic) 1973 hrtimer_start(&dev_priv->perf.oa.poll_check_timer, 1974 ns_to_ktime(POLL_PERIOD), 1975 HRTIMER_MODE_REL_PINNED); 1976 } 1977 1978 static void gen7_oa_disable(struct drm_i915_private *dev_priv) 1979 { 1980 I915_WRITE(GEN7_OACONTROL, 0); 1981 } 1982 1983 static void gen8_oa_disable(struct drm_i915_private *dev_priv) 1984 { 1985 I915_WRITE(GEN8_OACONTROL, 0); 1986 } 1987 1988 /** 1989 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 1990 * @stream: An i915 perf stream opened for OA metrics 1991 * 1992 * Stops the OA unit from periodically writing counter reports into the 1993 * circular OA buffer. This also stops the hrtimer that periodically checks for 1994 * data in the circular OA buffer, for notifying userspace. 1995 */ 1996 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 1997 { 1998 struct drm_i915_private *dev_priv = stream->dev_priv; 1999 2000 dev_priv->perf.oa.ops.oa_disable(dev_priv); 2001 2002 if (dev_priv->perf.oa.periodic) 2003 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); 2004 } 2005 2006 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 2007 .destroy = i915_oa_stream_destroy, 2008 .enable = i915_oa_stream_enable, 2009 .disable = i915_oa_stream_disable, 2010 .wait_unlocked = i915_oa_wait_unlocked, 2011 .poll_wait = i915_oa_poll_wait, 2012 .read = i915_oa_read, 2013 }; 2014 2015 /** 2016 * i915_oa_stream_init - validate combined props for OA stream and init 2017 * @stream: An i915 perf stream 2018 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2019 * @props: The property state that configures stream (individually validated) 2020 * 2021 * While read_properties_unlocked() validates properties in isolation it 2022 * doesn't ensure that the combination necessarily makes sense. 2023 * 2024 * At this point it has been determined that userspace wants a stream of 2025 * OA metrics, but still we need to further validate the combined 2026 * properties are OK. 2027 * 2028 * If the configuration makes sense then we can allocate memory for 2029 * a circular OA buffer and apply the requested metric set configuration. 2030 * 2031 * Returns: zero on success or a negative error code. 2032 */ 2033 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2034 struct drm_i915_perf_open_param *param, 2035 struct perf_open_properties *props) 2036 { 2037 struct drm_i915_private *dev_priv = stream->dev_priv; 2038 int format_size; 2039 int ret; 2040 2041 /* If the sysfs metrics/ directory wasn't registered for some 2042 * reason then don't let userspace try their luck with config 2043 * IDs 2044 */ 2045 if (!dev_priv->perf.metrics_kobj) { 2046 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 2047 return -EINVAL; 2048 } 2049 2050 if (!(props->sample_flags & SAMPLE_OA_REPORT)) { 2051 DRM_DEBUG("Only OA report sampling supported\n"); 2052 return -EINVAL; 2053 } 2054 2055 if (!dev_priv->perf.oa.ops.init_oa_buffer) { 2056 DRM_DEBUG("OA unit not supported\n"); 2057 return -ENODEV; 2058 } 2059 2060 /* To avoid the complexity of having to accurately filter 2061 * counter reports and marshal to the appropriate client 2062 * we currently only allow exclusive access 2063 */ 2064 if (dev_priv->perf.oa.exclusive_stream) { 2065 DRM_DEBUG("OA unit already in use\n"); 2066 return -EBUSY; 2067 } 2068 2069 if (!props->oa_format) { 2070 DRM_DEBUG("OA report format not specified\n"); 2071 return -EINVAL; 2072 } 2073 2074 /* We set up some ratelimit state to potentially throttle any _NOTES 2075 * about spurious, invalid OA reports which we don't forward to 2076 * userspace. 2077 * 2078 * The initialization is associated with opening the stream (not driver 2079 * init) considering we print a _NOTE about any throttling when closing 2080 * the stream instead of waiting until driver _fini which no one would 2081 * ever see. 2082 * 2083 * Using the same limiting factors as printk_ratelimit() 2084 */ 2085 ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs, 2086 5 * HZ, 10); 2087 /* Since we use a DRM_NOTE for spurious reports it would be 2088 * inconsistent to let __ratelimit() automatically print a warning for 2089 * throttling. 2090 */ 2091 ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs, 2092 RATELIMIT_MSG_ON_RELEASE); 2093 2094 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2095 2096 format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; 2097 2098 stream->sample_flags |= SAMPLE_OA_REPORT; 2099 stream->sample_size += format_size; 2100 2101 dev_priv->perf.oa.oa_buffer.format_size = format_size; 2102 if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) 2103 return -EINVAL; 2104 2105 dev_priv->perf.oa.oa_buffer.format = 2106 dev_priv->perf.oa.oa_formats[props->oa_format].format; 2107 2108 dev_priv->perf.oa.periodic = props->oa_periodic; 2109 if (dev_priv->perf.oa.periodic) 2110 dev_priv->perf.oa.period_exponent = props->oa_period_exponent; 2111 2112 if (stream->ctx) { 2113 ret = oa_get_render_ctx_id(stream); 2114 if (ret) 2115 return ret; 2116 } 2117 2118 ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config); 2119 if (ret) 2120 goto err_config; 2121 2122 /* PRM - observability performance counters: 2123 * 2124 * OACONTROL, performance counter enable, note: 2125 * 2126 * "When this bit is set, in order to have coherent counts, 2127 * RC6 power state and trunk clock gating must be disabled. 2128 * This can be achieved by programming MMIO registers as 2129 * 0xA094=0 and 0xA090[31]=1" 2130 * 2131 * In our case we are expecting that taking pm + FORCEWAKE 2132 * references will effectively disable RC6. 2133 */ 2134 intel_runtime_pm_get(dev_priv); 2135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2136 2137 ret = alloc_oa_buffer(dev_priv); 2138 if (ret) 2139 goto err_oa_buf_alloc; 2140 2141 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2142 stream->oa_config); 2143 if (ret) 2144 goto err_enable; 2145 2146 stream->ops = &i915_oa_stream_ops; 2147 2148 /* Lock device for exclusive_stream access late because 2149 * enable_metric_set() might lock as well on gen8+. 2150 */ 2151 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 2152 if (ret) 2153 goto err_lock; 2154 2155 dev_priv->perf.oa.exclusive_stream = stream; 2156 2157 mutex_unlock(&dev_priv->drm.struct_mutex); 2158 2159 return 0; 2160 2161 err_lock: 2162 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2163 2164 err_enable: 2165 free_oa_buffer(dev_priv); 2166 2167 err_oa_buf_alloc: 2168 put_oa_config(dev_priv, stream->oa_config); 2169 2170 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2171 intel_runtime_pm_put(dev_priv); 2172 2173 err_config: 2174 if (stream->ctx) 2175 oa_put_render_ctx_id(stream); 2176 2177 return ret; 2178 } 2179 2180 void i915_oa_init_reg_state(struct intel_engine_cs *engine, 2181 struct i915_gem_context *ctx, 2182 u32 *reg_state) 2183 { 2184 struct i915_perf_stream *stream; 2185 2186 if (engine->id != RCS) 2187 return; 2188 2189 stream = engine->i915->perf.oa.exclusive_stream; 2190 if (stream) 2191 gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config); 2192 } 2193 2194 /** 2195 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation 2196 * @stream: An i915 perf stream 2197 * @file: An i915 perf stream file 2198 * @buf: destination buffer given by userspace 2199 * @count: the number of bytes userspace wants to read 2200 * @ppos: (inout) file seek position (unused) 2201 * 2202 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to 2203 * ensure that if we've successfully copied any data then reporting that takes 2204 * precedence over any internal error status, so the data isn't lost. 2205 * 2206 * For example ret will be -ENOSPC whenever there is more buffered data than 2207 * can be copied to userspace, but that's only interesting if we weren't able 2208 * to copy some data because it implies the userspace buffer is too small to 2209 * receive a single record (and we never split records). 2210 * 2211 * Another case with ret == -EFAULT is more of a grey area since it would seem 2212 * like bad form for userspace to ask us to overrun its buffer, but the user 2213 * knows best: 2214 * 2215 * http://yarchive.net/comp/linux/partial_reads_writes.html 2216 * 2217 * Returns: The number of bytes copied or a negative error code on failure. 2218 */ 2219 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, 2220 struct file *file, 2221 char __user *buf, 2222 size_t count, 2223 loff_t *ppos) 2224 { 2225 /* Note we keep the offset (aka bytes read) separate from any 2226 * error status so that the final check for whether we return 2227 * the bytes read with a higher precedence than any error (see 2228 * comment below) doesn't need to be handled/duplicated in 2229 * stream->ops->read() implementations. 2230 */ 2231 size_t offset = 0; 2232 int ret = stream->ops->read(stream, buf, count, &offset); 2233 2234 return offset ?: (ret ?: -EAGAIN); 2235 } 2236 2237 /** 2238 * i915_perf_read - handles read() FOP for i915 perf stream FDs 2239 * @file: An i915 perf stream file 2240 * @buf: destination buffer given by userspace 2241 * @count: the number of bytes userspace wants to read 2242 * @ppos: (inout) file seek position (unused) 2243 * 2244 * The entry point for handling a read() on a stream file descriptor from 2245 * userspace. Most of the work is left to the i915_perf_read_locked() and 2246 * &i915_perf_stream_ops->read but to save having stream implementations (of 2247 * which we might have multiple later) we handle blocking read here. 2248 * 2249 * We can also consistently treat trying to read from a disabled stream 2250 * as an IO error so implementations can assume the stream is enabled 2251 * while reading. 2252 * 2253 * Returns: The number of bytes copied or a negative error code on failure. 2254 */ 2255 static ssize_t i915_perf_read(struct file *file, 2256 char __user *buf, 2257 size_t count, 2258 loff_t *ppos) 2259 { 2260 struct i915_perf_stream *stream = file->private_data; 2261 struct drm_i915_private *dev_priv = stream->dev_priv; 2262 ssize_t ret; 2263 2264 /* To ensure it's handled consistently we simply treat all reads of a 2265 * disabled stream as an error. In particular it might otherwise lead 2266 * to a deadlock for blocking file descriptors... 2267 */ 2268 if (!stream->enabled) 2269 return -EIO; 2270 2271 if (!(file->f_flags & O_NONBLOCK)) { 2272 /* There's the small chance of false positives from 2273 * stream->ops->wait_unlocked. 2274 * 2275 * E.g. with single context filtering since we only wait until 2276 * oabuffer has >= 1 report we don't immediately know whether 2277 * any reports really belong to the current context 2278 */ 2279 do { 2280 ret = stream->ops->wait_unlocked(stream); 2281 if (ret) 2282 return ret; 2283 2284 mutex_lock(&dev_priv->perf.lock); 2285 ret = i915_perf_read_locked(stream, file, 2286 buf, count, ppos); 2287 mutex_unlock(&dev_priv->perf.lock); 2288 } while (ret == -EAGAIN); 2289 } else { 2290 mutex_lock(&dev_priv->perf.lock); 2291 ret = i915_perf_read_locked(stream, file, buf, count, ppos); 2292 mutex_unlock(&dev_priv->perf.lock); 2293 } 2294 2295 /* We allow the poll checking to sometimes report false positive EPOLLIN 2296 * events where we might actually report EAGAIN on read() if there's 2297 * not really any data available. In this situation though we don't 2298 * want to enter a busy loop between poll() reporting a EPOLLIN event 2299 * and read() returning -EAGAIN. Clearing the oa.pollin state here 2300 * effectively ensures we back off until the next hrtimer callback 2301 * before reporting another EPOLLIN event. 2302 */ 2303 if (ret >= 0 || ret == -EAGAIN) { 2304 /* Maybe make ->pollin per-stream state if we support multiple 2305 * concurrent streams in the future. 2306 */ 2307 dev_priv->perf.oa.pollin = false; 2308 } 2309 2310 return ret; 2311 } 2312 2313 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 2314 { 2315 struct drm_i915_private *dev_priv = 2316 container_of(hrtimer, typeof(*dev_priv), 2317 perf.oa.poll_check_timer); 2318 2319 if (oa_buffer_check_unlocked(dev_priv)) { 2320 dev_priv->perf.oa.pollin = true; 2321 wake_up(&dev_priv->perf.oa.poll_wq); 2322 } 2323 2324 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); 2325 2326 return HRTIMER_RESTART; 2327 } 2328 2329 /** 2330 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 2331 * @dev_priv: i915 device instance 2332 * @stream: An i915 perf stream 2333 * @file: An i915 perf stream file 2334 * @wait: poll() state table 2335 * 2336 * For handling userspace polling on an i915 perf stream, this calls through to 2337 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 2338 * will be woken for new stream data. 2339 * 2340 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2341 * with any non-file-operation driver hooks. 2342 * 2343 * Returns: any poll events that are ready without sleeping 2344 */ 2345 static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, 2346 struct i915_perf_stream *stream, 2347 struct file *file, 2348 poll_table *wait) 2349 { 2350 __poll_t events = 0; 2351 2352 stream->ops->poll_wait(stream, file, wait); 2353 2354 /* Note: we don't explicitly check whether there's something to read 2355 * here since this path may be very hot depending on what else 2356 * userspace is polling, or on the timeout in use. We rely solely on 2357 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 2358 * samples to read. 2359 */ 2360 if (dev_priv->perf.oa.pollin) 2361 events |= EPOLLIN; 2362 2363 return events; 2364 } 2365 2366 /** 2367 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 2368 * @file: An i915 perf stream file 2369 * @wait: poll() state table 2370 * 2371 * For handling userspace polling on an i915 perf stream, this ensures 2372 * poll_wait() gets called with a wait queue that will be woken for new stream 2373 * data. 2374 * 2375 * Note: Implementation deferred to i915_perf_poll_locked() 2376 * 2377 * Returns: any poll events that are ready without sleeping 2378 */ 2379 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 2380 { 2381 struct i915_perf_stream *stream = file->private_data; 2382 struct drm_i915_private *dev_priv = stream->dev_priv; 2383 __poll_t ret; 2384 2385 mutex_lock(&dev_priv->perf.lock); 2386 ret = i915_perf_poll_locked(dev_priv, stream, file, wait); 2387 mutex_unlock(&dev_priv->perf.lock); 2388 2389 return ret; 2390 } 2391 2392 /** 2393 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 2394 * @stream: A disabled i915 perf stream 2395 * 2396 * [Re]enables the associated capture of data for this stream. 2397 * 2398 * If a stream was previously enabled then there's currently no intention 2399 * to provide userspace any guarantee about the preservation of previously 2400 * buffered data. 2401 */ 2402 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 2403 { 2404 if (stream->enabled) 2405 return; 2406 2407 /* Allow stream->ops->enable() to refer to this */ 2408 stream->enabled = true; 2409 2410 if (stream->ops->enable) 2411 stream->ops->enable(stream); 2412 } 2413 2414 /** 2415 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 2416 * @stream: An enabled i915 perf stream 2417 * 2418 * Disables the associated capture of data for this stream. 2419 * 2420 * The intention is that disabling an re-enabling a stream will ideally be 2421 * cheaper than destroying and re-opening a stream with the same configuration, 2422 * though there are no formal guarantees about what state or buffered data 2423 * must be retained between disabling and re-enabling a stream. 2424 * 2425 * Note: while a stream is disabled it's considered an error for userspace 2426 * to attempt to read from the stream (-EIO). 2427 */ 2428 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 2429 { 2430 if (!stream->enabled) 2431 return; 2432 2433 /* Allow stream->ops->disable() to refer to this */ 2434 stream->enabled = false; 2435 2436 if (stream->ops->disable) 2437 stream->ops->disable(stream); 2438 } 2439 2440 /** 2441 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2442 * @stream: An i915 perf stream 2443 * @cmd: the ioctl request 2444 * @arg: the ioctl data 2445 * 2446 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2447 * with any non-file-operation driver hooks. 2448 * 2449 * Returns: zero on success or a negative error code. Returns -EINVAL for 2450 * an unknown ioctl request. 2451 */ 2452 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 2453 unsigned int cmd, 2454 unsigned long arg) 2455 { 2456 switch (cmd) { 2457 case I915_PERF_IOCTL_ENABLE: 2458 i915_perf_enable_locked(stream); 2459 return 0; 2460 case I915_PERF_IOCTL_DISABLE: 2461 i915_perf_disable_locked(stream); 2462 return 0; 2463 } 2464 2465 return -EINVAL; 2466 } 2467 2468 /** 2469 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2470 * @file: An i915 perf stream file 2471 * @cmd: the ioctl request 2472 * @arg: the ioctl data 2473 * 2474 * Implementation deferred to i915_perf_ioctl_locked(). 2475 * 2476 * Returns: zero on success or a negative error code. Returns -EINVAL for 2477 * an unknown ioctl request. 2478 */ 2479 static long i915_perf_ioctl(struct file *file, 2480 unsigned int cmd, 2481 unsigned long arg) 2482 { 2483 struct i915_perf_stream *stream = file->private_data; 2484 struct drm_i915_private *dev_priv = stream->dev_priv; 2485 long ret; 2486 2487 mutex_lock(&dev_priv->perf.lock); 2488 ret = i915_perf_ioctl_locked(stream, cmd, arg); 2489 mutex_unlock(&dev_priv->perf.lock); 2490 2491 return ret; 2492 } 2493 2494 /** 2495 * i915_perf_destroy_locked - destroy an i915 perf stream 2496 * @stream: An i915 perf stream 2497 * 2498 * Frees all resources associated with the given i915 perf @stream, disabling 2499 * any associated data capture in the process. 2500 * 2501 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2502 * with any non-file-operation driver hooks. 2503 */ 2504 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 2505 { 2506 if (stream->enabled) 2507 i915_perf_disable_locked(stream); 2508 2509 if (stream->ops->destroy) 2510 stream->ops->destroy(stream); 2511 2512 list_del(&stream->link); 2513 2514 if (stream->ctx) 2515 i915_gem_context_put(stream->ctx); 2516 2517 kfree(stream); 2518 } 2519 2520 /** 2521 * i915_perf_release - handles userspace close() of a stream file 2522 * @inode: anonymous inode associated with file 2523 * @file: An i915 perf stream file 2524 * 2525 * Cleans up any resources associated with an open i915 perf stream file. 2526 * 2527 * NB: close() can't really fail from the userspace point of view. 2528 * 2529 * Returns: zero on success or a negative error code. 2530 */ 2531 static int i915_perf_release(struct inode *inode, struct file *file) 2532 { 2533 struct i915_perf_stream *stream = file->private_data; 2534 struct drm_i915_private *dev_priv = stream->dev_priv; 2535 2536 mutex_lock(&dev_priv->perf.lock); 2537 i915_perf_destroy_locked(stream); 2538 mutex_unlock(&dev_priv->perf.lock); 2539 2540 return 0; 2541 } 2542 2543 2544 static const struct file_operations fops = { 2545 .owner = THIS_MODULE, 2546 .llseek = no_llseek, 2547 .release = i915_perf_release, 2548 .poll = i915_perf_poll, 2549 .read = i915_perf_read, 2550 .unlocked_ioctl = i915_perf_ioctl, 2551 /* Our ioctl have no arguments, so it's safe to use the same function 2552 * to handle 32bits compatibility. 2553 */ 2554 .compat_ioctl = i915_perf_ioctl, 2555 }; 2556 2557 2558 /** 2559 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 2560 * @dev_priv: i915 device instance 2561 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 2562 * @props: individually validated u64 property value pairs 2563 * @file: drm file 2564 * 2565 * See i915_perf_ioctl_open() for interface details. 2566 * 2567 * Implements further stream config validation and stream initialization on 2568 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex 2569 * taken to serialize with any non-file-operation driver hooks. 2570 * 2571 * Note: at this point the @props have only been validated in isolation and 2572 * it's still necessary to validate that the combination of properties makes 2573 * sense. 2574 * 2575 * In the case where userspace is interested in OA unit metrics then further 2576 * config validation and stream initialization details will be handled by 2577 * i915_oa_stream_init(). The code here should only validate config state that 2578 * will be relevant to all stream types / backends. 2579 * 2580 * Returns: zero on success or a negative error code. 2581 */ 2582 static int 2583 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, 2584 struct drm_i915_perf_open_param *param, 2585 struct perf_open_properties *props, 2586 struct drm_file *file) 2587 { 2588 struct i915_gem_context *specific_ctx = NULL; 2589 struct i915_perf_stream *stream = NULL; 2590 unsigned long f_flags = 0; 2591 bool privileged_op = true; 2592 int stream_fd; 2593 int ret; 2594 2595 if (props->single_context) { 2596 u32 ctx_handle = props->ctx_handle; 2597 struct drm_i915_file_private *file_priv = file->driver_priv; 2598 2599 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 2600 if (!specific_ctx) { 2601 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 2602 ctx_handle); 2603 ret = -ENOENT; 2604 goto err; 2605 } 2606 } 2607 2608 /* 2609 * On Haswell the OA unit supports clock gating off for a specific 2610 * context and in this mode there's no visibility of metrics for the 2611 * rest of the system, which we consider acceptable for a 2612 * non-privileged client. 2613 * 2614 * For Gen8+ the OA unit no longer supports clock gating off for a 2615 * specific context and the kernel can't securely stop the counters 2616 * from updating as system-wide / global values. Even though we can 2617 * filter reports based on the included context ID we can't block 2618 * clients from seeing the raw / global counter values via 2619 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 2620 * enable the OA unit by default. 2621 */ 2622 if (IS_HASWELL(dev_priv) && specific_ctx) 2623 privileged_op = false; 2624 2625 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 2626 * we check a dev.i915.perf_stream_paranoid sysctl option 2627 * to determine if it's ok to access system wide OA counters 2628 * without CAP_SYS_ADMIN privileges. 2629 */ 2630 if (privileged_op && 2631 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 2632 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); 2633 ret = -EACCES; 2634 goto err_ctx; 2635 } 2636 2637 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 2638 if (!stream) { 2639 ret = -ENOMEM; 2640 goto err_ctx; 2641 } 2642 2643 stream->dev_priv = dev_priv; 2644 stream->ctx = specific_ctx; 2645 2646 ret = i915_oa_stream_init(stream, param, props); 2647 if (ret) 2648 goto err_alloc; 2649 2650 /* we avoid simply assigning stream->sample_flags = props->sample_flags 2651 * to have _stream_init check the combination of sample flags more 2652 * thoroughly, but still this is the expected result at this point. 2653 */ 2654 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 2655 ret = -ENODEV; 2656 goto err_flags; 2657 } 2658 2659 list_add(&stream->link, &dev_priv->perf.streams); 2660 2661 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 2662 f_flags |= O_CLOEXEC; 2663 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 2664 f_flags |= O_NONBLOCK; 2665 2666 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 2667 if (stream_fd < 0) { 2668 ret = stream_fd; 2669 goto err_open; 2670 } 2671 2672 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 2673 i915_perf_enable_locked(stream); 2674 2675 return stream_fd; 2676 2677 err_open: 2678 list_del(&stream->link); 2679 err_flags: 2680 if (stream->ops->destroy) 2681 stream->ops->destroy(stream); 2682 err_alloc: 2683 kfree(stream); 2684 err_ctx: 2685 if (specific_ctx) 2686 i915_gem_context_put(specific_ctx); 2687 err: 2688 return ret; 2689 } 2690 2691 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) 2692 { 2693 return div64_u64(1000000000ULL * (2ULL << exponent), 2694 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz); 2695 } 2696 2697 /** 2698 * read_properties_unlocked - validate + copy userspace stream open properties 2699 * @dev_priv: i915 device instance 2700 * @uprops: The array of u64 key value pairs given by userspace 2701 * @n_props: The number of key value pairs expected in @uprops 2702 * @props: The stream configuration built up while validating properties 2703 * 2704 * Note this function only validates properties in isolation it doesn't 2705 * validate that the combination of properties makes sense or that all 2706 * properties necessary for a particular kind of stream have been set. 2707 * 2708 * Note that there currently aren't any ordering requirements for properties so 2709 * we shouldn't validate or assume anything about ordering here. This doesn't 2710 * rule out defining new properties with ordering requirements in the future. 2711 */ 2712 static int read_properties_unlocked(struct drm_i915_private *dev_priv, 2713 u64 __user *uprops, 2714 u32 n_props, 2715 struct perf_open_properties *props) 2716 { 2717 u64 __user *uprop = uprops; 2718 u32 i; 2719 2720 memset(props, 0, sizeof(struct perf_open_properties)); 2721 2722 if (!n_props) { 2723 DRM_DEBUG("No i915 perf properties given\n"); 2724 return -EINVAL; 2725 } 2726 2727 /* Considering that ID = 0 is reserved and assuming that we don't 2728 * (currently) expect any configurations to ever specify duplicate 2729 * values for a particular property ID then the last _PROP_MAX value is 2730 * one greater than the maximum number of properties we expect to get 2731 * from userspace. 2732 */ 2733 if (n_props >= DRM_I915_PERF_PROP_MAX) { 2734 DRM_DEBUG("More i915 perf properties specified than exist\n"); 2735 return -EINVAL; 2736 } 2737 2738 for (i = 0; i < n_props; i++) { 2739 u64 oa_period, oa_freq_hz; 2740 u64 id, value; 2741 int ret; 2742 2743 ret = get_user(id, uprop); 2744 if (ret) 2745 return ret; 2746 2747 ret = get_user(value, uprop + 1); 2748 if (ret) 2749 return ret; 2750 2751 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 2752 DRM_DEBUG("Unknown i915 perf property ID\n"); 2753 return -EINVAL; 2754 } 2755 2756 switch ((enum drm_i915_perf_property_id)id) { 2757 case DRM_I915_PERF_PROP_CTX_HANDLE: 2758 props->single_context = 1; 2759 props->ctx_handle = value; 2760 break; 2761 case DRM_I915_PERF_PROP_SAMPLE_OA: 2762 props->sample_flags |= SAMPLE_OA_REPORT; 2763 break; 2764 case DRM_I915_PERF_PROP_OA_METRICS_SET: 2765 if (value == 0) { 2766 DRM_DEBUG("Unknown OA metric set ID\n"); 2767 return -EINVAL; 2768 } 2769 props->metrics_set = value; 2770 break; 2771 case DRM_I915_PERF_PROP_OA_FORMAT: 2772 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 2773 DRM_DEBUG("Out-of-range OA report format %llu\n", 2774 value); 2775 return -EINVAL; 2776 } 2777 if (!dev_priv->perf.oa.oa_formats[value].size) { 2778 DRM_DEBUG("Unsupported OA report format %llu\n", 2779 value); 2780 return -EINVAL; 2781 } 2782 props->oa_format = value; 2783 break; 2784 case DRM_I915_PERF_PROP_OA_EXPONENT: 2785 if (value > OA_EXPONENT_MAX) { 2786 DRM_DEBUG("OA timer exponent too high (> %u)\n", 2787 OA_EXPONENT_MAX); 2788 return -EINVAL; 2789 } 2790 2791 /* Theoretically we can program the OA unit to sample 2792 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 2793 * for BXT. We don't allow such high sampling 2794 * frequencies by default unless root. 2795 */ 2796 2797 BUILD_BUG_ON(sizeof(oa_period) != 8); 2798 oa_period = oa_exponent_to_ns(dev_priv, value); 2799 2800 /* This check is primarily to ensure that oa_period <= 2801 * UINT32_MAX (before passing to do_div which only 2802 * accepts a u32 denominator), but we can also skip 2803 * checking anything < 1Hz which implicitly can't be 2804 * limited via an integer oa_max_sample_rate. 2805 */ 2806 if (oa_period <= NSEC_PER_SEC) { 2807 u64 tmp = NSEC_PER_SEC; 2808 do_div(tmp, oa_period); 2809 oa_freq_hz = tmp; 2810 } else 2811 oa_freq_hz = 0; 2812 2813 if (oa_freq_hz > i915_oa_max_sample_rate && 2814 !capable(CAP_SYS_ADMIN)) { 2815 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", 2816 i915_oa_max_sample_rate); 2817 return -EACCES; 2818 } 2819 2820 props->oa_periodic = true; 2821 props->oa_period_exponent = value; 2822 break; 2823 case DRM_I915_PERF_PROP_MAX: 2824 MISSING_CASE(id); 2825 return -EINVAL; 2826 } 2827 2828 uprop += 2; 2829 } 2830 2831 return 0; 2832 } 2833 2834 /** 2835 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 2836 * @dev: drm device 2837 * @data: ioctl data copied from userspace (unvalidated) 2838 * @file: drm file 2839 * 2840 * Validates the stream open parameters given by userspace including flags 2841 * and an array of u64 key, value pair properties. 2842 * 2843 * Very little is assumed up front about the nature of the stream being 2844 * opened (for instance we don't assume it's for periodic OA unit metrics). An 2845 * i915-perf stream is expected to be a suitable interface for other forms of 2846 * buffered data written by the GPU besides periodic OA metrics. 2847 * 2848 * Note we copy the properties from userspace outside of the i915 perf 2849 * mutex to avoid an awkward lockdep with mmap_sem. 2850 * 2851 * Most of the implementation details are handled by 2852 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock 2853 * mutex for serializing with any non-file-operation driver hooks. 2854 * 2855 * Return: A newly opened i915 Perf stream file descriptor or negative 2856 * error code on failure. 2857 */ 2858 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 2859 struct drm_file *file) 2860 { 2861 struct drm_i915_private *dev_priv = dev->dev_private; 2862 struct drm_i915_perf_open_param *param = data; 2863 struct perf_open_properties props; 2864 u32 known_open_flags; 2865 int ret; 2866 2867 if (!dev_priv->perf.initialized) { 2868 DRM_DEBUG("i915 perf interface not available for this system\n"); 2869 return -ENOTSUPP; 2870 } 2871 2872 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 2873 I915_PERF_FLAG_FD_NONBLOCK | 2874 I915_PERF_FLAG_DISABLED; 2875 if (param->flags & ~known_open_flags) { 2876 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 2877 return -EINVAL; 2878 } 2879 2880 ret = read_properties_unlocked(dev_priv, 2881 u64_to_user_ptr(param->properties_ptr), 2882 param->num_properties, 2883 &props); 2884 if (ret) 2885 return ret; 2886 2887 mutex_lock(&dev_priv->perf.lock); 2888 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); 2889 mutex_unlock(&dev_priv->perf.lock); 2890 2891 return ret; 2892 } 2893 2894 /** 2895 * i915_perf_register - exposes i915-perf to userspace 2896 * @dev_priv: i915 device instance 2897 * 2898 * In particular OA metric sets are advertised under a sysfs metrics/ 2899 * directory allowing userspace to enumerate valid IDs that can be 2900 * used to open an i915-perf stream. 2901 */ 2902 void i915_perf_register(struct drm_i915_private *dev_priv) 2903 { 2904 int ret; 2905 2906 if (!dev_priv->perf.initialized) 2907 return; 2908 2909 /* To be sure we're synchronized with an attempted 2910 * i915_perf_open_ioctl(); considering that we register after 2911 * being exposed to userspace. 2912 */ 2913 mutex_lock(&dev_priv->perf.lock); 2914 2915 dev_priv->perf.metrics_kobj = 2916 kobject_create_and_add("metrics", 2917 &dev_priv->drm.primary->kdev->kobj); 2918 if (!dev_priv->perf.metrics_kobj) 2919 goto exit; 2920 2921 sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr); 2922 2923 if (IS_HASWELL(dev_priv)) { 2924 i915_perf_load_test_config_hsw(dev_priv); 2925 } else if (IS_BROADWELL(dev_priv)) { 2926 i915_perf_load_test_config_bdw(dev_priv); 2927 } else if (IS_CHERRYVIEW(dev_priv)) { 2928 i915_perf_load_test_config_chv(dev_priv); 2929 } else if (IS_SKYLAKE(dev_priv)) { 2930 if (IS_SKL_GT2(dev_priv)) 2931 i915_perf_load_test_config_sklgt2(dev_priv); 2932 else if (IS_SKL_GT3(dev_priv)) 2933 i915_perf_load_test_config_sklgt3(dev_priv); 2934 else if (IS_SKL_GT4(dev_priv)) 2935 i915_perf_load_test_config_sklgt4(dev_priv); 2936 } else if (IS_BROXTON(dev_priv)) { 2937 i915_perf_load_test_config_bxt(dev_priv); 2938 } else if (IS_KABYLAKE(dev_priv)) { 2939 if (IS_KBL_GT2(dev_priv)) 2940 i915_perf_load_test_config_kblgt2(dev_priv); 2941 else if (IS_KBL_GT3(dev_priv)) 2942 i915_perf_load_test_config_kblgt3(dev_priv); 2943 } else if (IS_GEMINILAKE(dev_priv)) { 2944 i915_perf_load_test_config_glk(dev_priv); 2945 } else if (IS_COFFEELAKE(dev_priv)) { 2946 if (IS_CFL_GT2(dev_priv)) 2947 i915_perf_load_test_config_cflgt2(dev_priv); 2948 if (IS_CFL_GT3(dev_priv)) 2949 i915_perf_load_test_config_cflgt3(dev_priv); 2950 } else if (IS_CANNONLAKE(dev_priv)) { 2951 i915_perf_load_test_config_cnl(dev_priv); 2952 } 2953 2954 if (dev_priv->perf.oa.test_config.id == 0) 2955 goto sysfs_error; 2956 2957 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, 2958 &dev_priv->perf.oa.test_config.sysfs_metric); 2959 if (ret) 2960 goto sysfs_error; 2961 2962 atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1); 2963 2964 goto exit; 2965 2966 sysfs_error: 2967 kobject_put(dev_priv->perf.metrics_kobj); 2968 dev_priv->perf.metrics_kobj = NULL; 2969 2970 exit: 2971 mutex_unlock(&dev_priv->perf.lock); 2972 } 2973 2974 /** 2975 * i915_perf_unregister - hide i915-perf from userspace 2976 * @dev_priv: i915 device instance 2977 * 2978 * i915-perf state cleanup is split up into an 'unregister' and 2979 * 'deinit' phase where the interface is first hidden from 2980 * userspace by i915_perf_unregister() before cleaning up 2981 * remaining state in i915_perf_fini(). 2982 */ 2983 void i915_perf_unregister(struct drm_i915_private *dev_priv) 2984 { 2985 if (!dev_priv->perf.metrics_kobj) 2986 return; 2987 2988 sysfs_remove_group(dev_priv->perf.metrics_kobj, 2989 &dev_priv->perf.oa.test_config.sysfs_metric); 2990 2991 kobject_put(dev_priv->perf.metrics_kobj); 2992 dev_priv->perf.metrics_kobj = NULL; 2993 } 2994 2995 static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) 2996 { 2997 static const i915_reg_t flex_eu_regs[] = { 2998 EU_PERF_CNTL0, 2999 EU_PERF_CNTL1, 3000 EU_PERF_CNTL2, 3001 EU_PERF_CNTL3, 3002 EU_PERF_CNTL4, 3003 EU_PERF_CNTL5, 3004 EU_PERF_CNTL6, 3005 }; 3006 int i; 3007 3008 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3009 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3010 return true; 3011 } 3012 return false; 3013 } 3014 3015 static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr) 3016 { 3017 return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) && 3018 addr <= i915_mmio_reg_offset(OASTARTTRIG8)) || 3019 (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) && 3020 addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) || 3021 (addr >= i915_mmio_reg_offset(OACEC0_0) && 3022 addr <= i915_mmio_reg_offset(OACEC7_1)); 3023 } 3024 3025 static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3026 { 3027 return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) || 3028 (addr >= i915_mmio_reg_offset(MICRO_BP0_0) && 3029 addr <= i915_mmio_reg_offset(NOA_WRITE)) || 3030 (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) && 3031 addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) || 3032 (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) && 3033 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI)); 3034 } 3035 3036 static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3037 { 3038 return gen7_is_valid_mux_addr(dev_priv, addr) || 3039 addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) || 3040 (addr >= i915_mmio_reg_offset(RPM_CONFIG0) && 3041 addr <= i915_mmio_reg_offset(NOA_CONFIG(8))); 3042 } 3043 3044 static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3045 { 3046 return gen8_is_valid_mux_addr(dev_priv, addr) || 3047 (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) && 3048 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI)); 3049 } 3050 3051 static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3052 { 3053 return gen7_is_valid_mux_addr(dev_priv, addr) || 3054 (addr >= 0x25100 && addr <= 0x2FF90) || 3055 (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) && 3056 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) || 3057 addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0); 3058 } 3059 3060 static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3061 { 3062 return gen7_is_valid_mux_addr(dev_priv, addr) || 3063 (addr >= 0x182300 && addr <= 0x1823A4); 3064 } 3065 3066 static uint32_t mask_reg_value(u32 reg, u32 val) 3067 { 3068 /* HALF_SLICE_CHICKEN2 is programmed with a the 3069 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 3070 * programmed by userspace doesn't change this. 3071 */ 3072 if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg) 3073 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 3074 3075 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 3076 * indicated by its name and a bunch of selection fields used by OA 3077 * configs. 3078 */ 3079 if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg) 3080 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 3081 3082 return val; 3083 } 3084 3085 static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, 3086 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr), 3087 u32 __user *regs, 3088 u32 n_regs) 3089 { 3090 struct i915_oa_reg *oa_regs; 3091 int err; 3092 u32 i; 3093 3094 if (!n_regs) 3095 return NULL; 3096 3097 if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2)) 3098 return ERR_PTR(-EFAULT); 3099 3100 /* No is_valid function means we're not allowing any register to be programmed. */ 3101 GEM_BUG_ON(!is_valid); 3102 if (!is_valid) 3103 return ERR_PTR(-EINVAL); 3104 3105 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 3106 if (!oa_regs) 3107 return ERR_PTR(-ENOMEM); 3108 3109 for (i = 0; i < n_regs; i++) { 3110 u32 addr, value; 3111 3112 err = get_user(addr, regs); 3113 if (err) 3114 goto addr_err; 3115 3116 if (!is_valid(dev_priv, addr)) { 3117 DRM_DEBUG("Invalid oa_reg address: %X\n", addr); 3118 err = -EINVAL; 3119 goto addr_err; 3120 } 3121 3122 err = get_user(value, regs + 1); 3123 if (err) 3124 goto addr_err; 3125 3126 oa_regs[i].addr = _MMIO(addr); 3127 oa_regs[i].value = mask_reg_value(addr, value); 3128 3129 regs += 2; 3130 } 3131 3132 return oa_regs; 3133 3134 addr_err: 3135 kfree(oa_regs); 3136 return ERR_PTR(err); 3137 } 3138 3139 static ssize_t show_dynamic_id(struct device *dev, 3140 struct device_attribute *attr, 3141 char *buf) 3142 { 3143 struct i915_oa_config *oa_config = 3144 container_of(attr, typeof(*oa_config), sysfs_metric_id); 3145 3146 return sprintf(buf, "%d\n", oa_config->id); 3147 } 3148 3149 static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, 3150 struct i915_oa_config *oa_config) 3151 { 3152 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 3153 oa_config->sysfs_metric_id.attr.name = "id"; 3154 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 3155 oa_config->sysfs_metric_id.show = show_dynamic_id; 3156 oa_config->sysfs_metric_id.store = NULL; 3157 3158 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 3159 oa_config->attrs[1] = NULL; 3160 3161 oa_config->sysfs_metric.name = oa_config->uuid; 3162 oa_config->sysfs_metric.attrs = oa_config->attrs; 3163 3164 return sysfs_create_group(dev_priv->perf.metrics_kobj, 3165 &oa_config->sysfs_metric); 3166 } 3167 3168 /** 3169 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 3170 * @dev: drm device 3171 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 3172 * userspace (unvalidated) 3173 * @file: drm file 3174 * 3175 * Validates the submitted OA register to be saved into a new OA config that 3176 * can then be used for programming the OA unit and its NOA network. 3177 * 3178 * Returns: A new allocated config number to be used with the perf open ioctl 3179 * or a negative error code on failure. 3180 */ 3181 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 3182 struct drm_file *file) 3183 { 3184 struct drm_i915_private *dev_priv = dev->dev_private; 3185 struct drm_i915_perf_oa_config *args = data; 3186 struct i915_oa_config *oa_config, *tmp; 3187 int err, id; 3188 3189 if (!dev_priv->perf.initialized) { 3190 DRM_DEBUG("i915 perf interface not available for this system\n"); 3191 return -ENOTSUPP; 3192 } 3193 3194 if (!dev_priv->perf.metrics_kobj) { 3195 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 3196 return -EINVAL; 3197 } 3198 3199 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3200 DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); 3201 return -EACCES; 3202 } 3203 3204 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 3205 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 3206 (!args->flex_regs_ptr || !args->n_flex_regs)) { 3207 DRM_DEBUG("No OA registers given\n"); 3208 return -EINVAL; 3209 } 3210 3211 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 3212 if (!oa_config) { 3213 DRM_DEBUG("Failed to allocate memory for the OA config\n"); 3214 return -ENOMEM; 3215 } 3216 3217 atomic_set(&oa_config->ref_count, 1); 3218 3219 if (!uuid_is_valid(args->uuid)) { 3220 DRM_DEBUG("Invalid uuid format for OA config\n"); 3221 err = -EINVAL; 3222 goto reg_err; 3223 } 3224 3225 /* Last character in oa_config->uuid will be 0 because oa_config is 3226 * kzalloc. 3227 */ 3228 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 3229 3230 oa_config->mux_regs_len = args->n_mux_regs; 3231 oa_config->mux_regs = 3232 alloc_oa_regs(dev_priv, 3233 dev_priv->perf.oa.ops.is_valid_mux_reg, 3234 u64_to_user_ptr(args->mux_regs_ptr), 3235 args->n_mux_regs); 3236 3237 if (IS_ERR(oa_config->mux_regs)) { 3238 DRM_DEBUG("Failed to create OA config for mux_regs\n"); 3239 err = PTR_ERR(oa_config->mux_regs); 3240 goto reg_err; 3241 } 3242 3243 oa_config->b_counter_regs_len = args->n_boolean_regs; 3244 oa_config->b_counter_regs = 3245 alloc_oa_regs(dev_priv, 3246 dev_priv->perf.oa.ops.is_valid_b_counter_reg, 3247 u64_to_user_ptr(args->boolean_regs_ptr), 3248 args->n_boolean_regs); 3249 3250 if (IS_ERR(oa_config->b_counter_regs)) { 3251 DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); 3252 err = PTR_ERR(oa_config->b_counter_regs); 3253 goto reg_err; 3254 } 3255 3256 if (INTEL_GEN(dev_priv) < 8) { 3257 if (args->n_flex_regs != 0) { 3258 err = -EINVAL; 3259 goto reg_err; 3260 } 3261 } else { 3262 oa_config->flex_regs_len = args->n_flex_regs; 3263 oa_config->flex_regs = 3264 alloc_oa_regs(dev_priv, 3265 dev_priv->perf.oa.ops.is_valid_flex_reg, 3266 u64_to_user_ptr(args->flex_regs_ptr), 3267 args->n_flex_regs); 3268 3269 if (IS_ERR(oa_config->flex_regs)) { 3270 DRM_DEBUG("Failed to create OA config for flex_regs\n"); 3271 err = PTR_ERR(oa_config->flex_regs); 3272 goto reg_err; 3273 } 3274 } 3275 3276 err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 3277 if (err) 3278 goto reg_err; 3279 3280 /* We shouldn't have too many configs, so this iteration shouldn't be 3281 * too costly. 3282 */ 3283 idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) { 3284 if (!strcmp(tmp->uuid, oa_config->uuid)) { 3285 DRM_DEBUG("OA config already exists with this uuid\n"); 3286 err = -EADDRINUSE; 3287 goto sysfs_err; 3288 } 3289 } 3290 3291 err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config); 3292 if (err) { 3293 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 3294 goto sysfs_err; 3295 } 3296 3297 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 3298 oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr, 3299 oa_config, 2, 3300 0, GFP_KERNEL); 3301 if (oa_config->id < 0) { 3302 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 3303 err = oa_config->id; 3304 goto sysfs_err; 3305 } 3306 3307 mutex_unlock(&dev_priv->perf.metrics_lock); 3308 3309 return oa_config->id; 3310 3311 sysfs_err: 3312 mutex_unlock(&dev_priv->perf.metrics_lock); 3313 reg_err: 3314 put_oa_config(dev_priv, oa_config); 3315 DRM_DEBUG("Failed to add new OA config\n"); 3316 return err; 3317 } 3318 3319 /** 3320 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 3321 * @dev: drm device 3322 * @data: ioctl data (pointer to u64 integer) copied from userspace 3323 * @file: drm file 3324 * 3325 * Configs can be removed while being used, the will stop appearing in sysfs 3326 * and their content will be freed when the stream using the config is closed. 3327 * 3328 * Returns: 0 on success or a negative error code on failure. 3329 */ 3330 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 3331 struct drm_file *file) 3332 { 3333 struct drm_i915_private *dev_priv = dev->dev_private; 3334 u64 *arg = data; 3335 struct i915_oa_config *oa_config; 3336 int ret; 3337 3338 if (!dev_priv->perf.initialized) { 3339 DRM_DEBUG("i915 perf interface not available for this system\n"); 3340 return -ENOTSUPP; 3341 } 3342 3343 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3344 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); 3345 return -EACCES; 3346 } 3347 3348 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 3349 if (ret) 3350 goto lock_err; 3351 3352 oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg); 3353 if (!oa_config) { 3354 DRM_DEBUG("Failed to remove unknown OA config\n"); 3355 ret = -ENOENT; 3356 goto config_err; 3357 } 3358 3359 GEM_BUG_ON(*arg != oa_config->id); 3360 3361 sysfs_remove_group(dev_priv->perf.metrics_kobj, 3362 &oa_config->sysfs_metric); 3363 3364 idr_remove(&dev_priv->perf.metrics_idr, *arg); 3365 put_oa_config(dev_priv, oa_config); 3366 3367 config_err: 3368 mutex_unlock(&dev_priv->perf.metrics_lock); 3369 lock_err: 3370 return ret; 3371 } 3372 3373 static struct ctl_table oa_table[] = { 3374 { 3375 .procname = "perf_stream_paranoid", 3376 .data = &i915_perf_stream_paranoid, 3377 .maxlen = sizeof(i915_perf_stream_paranoid), 3378 .mode = 0644, 3379 .proc_handler = proc_dointvec_minmax, 3380 .extra1 = &zero, 3381 .extra2 = &one, 3382 }, 3383 { 3384 .procname = "oa_max_sample_rate", 3385 .data = &i915_oa_max_sample_rate, 3386 .maxlen = sizeof(i915_oa_max_sample_rate), 3387 .mode = 0644, 3388 .proc_handler = proc_dointvec_minmax, 3389 .extra1 = &zero, 3390 .extra2 = &oa_sample_rate_hard_limit, 3391 }, 3392 {} 3393 }; 3394 3395 static struct ctl_table i915_root[] = { 3396 { 3397 .procname = "i915", 3398 .maxlen = 0, 3399 .mode = 0555, 3400 .child = oa_table, 3401 }, 3402 {} 3403 }; 3404 3405 static struct ctl_table dev_root[] = { 3406 { 3407 .procname = "dev", 3408 .maxlen = 0, 3409 .mode = 0555, 3410 .child = i915_root, 3411 }, 3412 {} 3413 }; 3414 3415 /** 3416 * i915_perf_init - initialize i915-perf state on module load 3417 * @dev_priv: i915 device instance 3418 * 3419 * Initializes i915-perf state without exposing anything to userspace. 3420 * 3421 * Note: i915-perf initialization is split into an 'init' and 'register' 3422 * phase with the i915_perf_register() exposing state to userspace. 3423 */ 3424 void i915_perf_init(struct drm_i915_private *dev_priv) 3425 { 3426 if (IS_HASWELL(dev_priv)) { 3427 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3428 gen7_is_valid_b_counter_addr; 3429 dev_priv->perf.oa.ops.is_valid_mux_reg = 3430 hsw_is_valid_mux_addr; 3431 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; 3432 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; 3433 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; 3434 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; 3435 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; 3436 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; 3437 dev_priv->perf.oa.ops.read = gen7_oa_read; 3438 dev_priv->perf.oa.ops.oa_hw_tail_read = 3439 gen7_oa_hw_tail_read; 3440 3441 dev_priv->perf.oa.oa_formats = hsw_oa_formats; 3442 } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 3443 /* Note: that although we could theoretically also support the 3444 * legacy ringbuffer mode on BDW (and earlier iterations of 3445 * this driver, before upstreaming did this) it didn't seem 3446 * worth the complexity to maintain now that BDW+ enable 3447 * execlist mode by default. 3448 */ 3449 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; 3450 3451 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer; 3452 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; 3453 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; 3454 dev_priv->perf.oa.ops.read = gen8_oa_read; 3455 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 3456 3457 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) { 3458 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3459 gen7_is_valid_b_counter_addr; 3460 dev_priv->perf.oa.ops.is_valid_mux_reg = 3461 gen8_is_valid_mux_addr; 3462 dev_priv->perf.oa.ops.is_valid_flex_reg = 3463 gen8_is_valid_flex_addr; 3464 3465 if (IS_CHERRYVIEW(dev_priv)) { 3466 dev_priv->perf.oa.ops.is_valid_mux_reg = 3467 chv_is_valid_mux_addr; 3468 } 3469 3470 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3471 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set; 3472 3473 if (IS_GEN8(dev_priv)) { 3474 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; 3475 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; 3476 3477 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25); 3478 } else { 3479 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3480 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3481 3482 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3483 } 3484 } else if (IS_GEN10(dev_priv)) { 3485 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3486 gen7_is_valid_b_counter_addr; 3487 dev_priv->perf.oa.ops.is_valid_mux_reg = 3488 gen10_is_valid_mux_addr; 3489 dev_priv->perf.oa.ops.is_valid_flex_reg = 3490 gen8_is_valid_flex_addr; 3491 3492 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3493 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; 3494 3495 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3496 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3497 3498 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3499 } 3500 } 3501 3502 if (dev_priv->perf.oa.ops.enable_metric_set) { 3503 hrtimer_init(&dev_priv->perf.oa.poll_check_timer, 3504 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3505 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; 3506 init_waitqueue_head(&dev_priv->perf.oa.poll_wq); 3507 3508 INIT_LIST_HEAD(&dev_priv->perf.streams); 3509 mutex_init(&dev_priv->perf.lock); 3510 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); 3511 3512 oa_sample_rate_hard_limit = 1000 * 3513 (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); 3514 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); 3515 3516 mutex_init(&dev_priv->perf.metrics_lock); 3517 idr_init(&dev_priv->perf.metrics_idr); 3518 3519 dev_priv->perf.initialized = true; 3520 } 3521 } 3522 3523 static int destroy_config(int id, void *p, void *data) 3524 { 3525 struct drm_i915_private *dev_priv = data; 3526 struct i915_oa_config *oa_config = p; 3527 3528 put_oa_config(dev_priv, oa_config); 3529 3530 return 0; 3531 } 3532 3533 /** 3534 * i915_perf_fini - Counter part to i915_perf_init() 3535 * @dev_priv: i915 device instance 3536 */ 3537 void i915_perf_fini(struct drm_i915_private *dev_priv) 3538 { 3539 if (!dev_priv->perf.initialized) 3540 return; 3541 3542 idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv); 3543 idr_destroy(&dev_priv->perf.metrics_idr); 3544 3545 unregister_sysctl_table(dev_priv->perf.sysctl_header); 3546 3547 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); 3548 3549 dev_priv->perf.initialized = false; 3550 } 3551