1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "gem/i915_gem_context.h" 199 #include "gem/i915_gem_pm.h" 200 #include "gt/intel_lrc_reg.h" 201 202 #include "i915_drv.h" 203 #include "i915_oa_hsw.h" 204 #include "i915_oa_bdw.h" 205 #include "i915_oa_chv.h" 206 #include "i915_oa_sklgt2.h" 207 #include "i915_oa_sklgt3.h" 208 #include "i915_oa_sklgt4.h" 209 #include "i915_oa_bxt.h" 210 #include "i915_oa_kblgt2.h" 211 #include "i915_oa_kblgt3.h" 212 #include "i915_oa_glk.h" 213 #include "i915_oa_cflgt2.h" 214 #include "i915_oa_cflgt3.h" 215 #include "i915_oa_cnl.h" 216 #include "i915_oa_icl.h" 217 218 /* HW requires this to be a power of two, between 128k and 16M, though driver 219 * is currently generally designed assuming the largest 16M size is used such 220 * that the overflow cases are unlikely in normal operation. 221 */ 222 #define OA_BUFFER_SIZE SZ_16M 223 224 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 225 226 /** 227 * DOC: OA Tail Pointer Race 228 * 229 * There's a HW race condition between OA unit tail pointer register updates and 230 * writes to memory whereby the tail pointer can sometimes get ahead of what's 231 * been written out to the OA buffer so far (in terms of what's visible to the 232 * CPU). 233 * 234 * Although this can be observed explicitly while copying reports to userspace 235 * by checking for a zeroed report-id field in tail reports, we want to account 236 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant 237 * read() attempts. 238 * 239 * In effect we define a tail pointer for reading that lags the real tail 240 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough 241 * time for the corresponding reports to become visible to the CPU. 242 * 243 * To manage this we actually track two tail pointers: 244 * 1) An 'aging' tail with an associated timestamp that is tracked until we 245 * can trust the corresponding data is visible to the CPU; at which point 246 * it is considered 'aged'. 247 * 2) An 'aged' tail that can be used for read()ing. 248 * 249 * The two separate pointers let us decouple read()s from tail pointer aging. 250 * 251 * The tail pointers are checked and updated at a limited rate within a hrtimer 252 * callback (the same callback that is used for delivering EPOLLIN events) 253 * 254 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 255 * indicates that an updated tail pointer is needed. 256 * 257 * Most of the implementation details for this workaround are in 258 * oa_buffer_check_unlocked() and _append_oa_reports() 259 * 260 * Note for posterity: previously the driver used to define an effective tail 261 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 262 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 263 * This was flawed considering that the OA unit may also automatically generate 264 * non-periodic reports (such as on context switch) or the OA unit may be 265 * enabled without any periodic sampling. 266 */ 267 #define OA_TAIL_MARGIN_NSEC 100000ULL 268 #define INVALID_TAIL_PTR 0xffffffff 269 270 /* frequency for checking whether the OA unit has written new reports to the 271 * circular OA buffer... 272 */ 273 #define POLL_FREQUENCY 200 274 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) 275 276 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 277 static u32 i915_perf_stream_paranoid = true; 278 279 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 280 * of the 64bit timestamp bits to trigger reports from) but there's currently 281 * no known use case for sampling as infrequently as once per 47 thousand years. 282 * 283 * Since the timestamps included in OA reports are only 32bits it seems 284 * reasonable to limit the OA exponent where it's still possible to account for 285 * overflow in OA report timestamps. 286 */ 287 #define OA_EXPONENT_MAX 31 288 289 #define INVALID_CTX_ID 0xffffffff 290 291 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 292 #define OAREPORT_REASON_MASK 0x3f 293 #define OAREPORT_REASON_SHIFT 19 294 #define OAREPORT_REASON_TIMER (1<<0) 295 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 296 #define OAREPORT_REASON_CLK_RATIO (1<<5) 297 298 299 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 300 * 301 * The highest sampling frequency we can theoretically program the OA unit 302 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 303 * 304 * Initialized just before we register the sysctl parameter. 305 */ 306 static int oa_sample_rate_hard_limit; 307 308 /* Theoretically we can program the OA unit to sample every 160ns but don't 309 * allow that by default unless root... 310 * 311 * The default threshold of 100000Hz is based on perf's similar 312 * kernel.perf_event_max_sample_rate sysctl parameter. 313 */ 314 static u32 i915_oa_max_sample_rate = 100000; 315 316 /* XXX: beware if future OA HW adds new report formats that the current 317 * code assumes all reports have a power-of-two size and ~(size - 1) can 318 * be used as a mask to align the OA tail pointer. 319 */ 320 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 321 [I915_OA_FORMAT_A13] = { 0, 64 }, 322 [I915_OA_FORMAT_A29] = { 1, 128 }, 323 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 324 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 325 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 326 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 327 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 328 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 329 }; 330 331 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 332 [I915_OA_FORMAT_A12] = { 0, 64 }, 333 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 334 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 335 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 336 }; 337 338 #define SAMPLE_OA_REPORT (1<<0) 339 340 /** 341 * struct perf_open_properties - for validated properties given to open a stream 342 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 343 * @single_context: Whether a single or all gpu contexts should be monitored 344 * @ctx_handle: A gem ctx handle for use with @single_context 345 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 346 * @oa_format: An OA unit HW report format 347 * @oa_periodic: Whether to enable periodic OA unit sampling 348 * @oa_period_exponent: The OA unit sampling period is derived from this 349 * 350 * As read_properties_unlocked() enumerates and validates the properties given 351 * to open a stream of metrics the configuration is built up in the structure 352 * which starts out zero initialized. 353 */ 354 struct perf_open_properties { 355 u32 sample_flags; 356 357 u64 single_context:1; 358 u64 ctx_handle; 359 360 /* OA sampling state */ 361 int metrics_set; 362 int oa_format; 363 bool oa_periodic; 364 int oa_period_exponent; 365 }; 366 367 static void free_oa_config(struct drm_i915_private *dev_priv, 368 struct i915_oa_config *oa_config) 369 { 370 if (!PTR_ERR(oa_config->flex_regs)) 371 kfree(oa_config->flex_regs); 372 if (!PTR_ERR(oa_config->b_counter_regs)) 373 kfree(oa_config->b_counter_regs); 374 if (!PTR_ERR(oa_config->mux_regs)) 375 kfree(oa_config->mux_regs); 376 kfree(oa_config); 377 } 378 379 static void put_oa_config(struct drm_i915_private *dev_priv, 380 struct i915_oa_config *oa_config) 381 { 382 if (!atomic_dec_and_test(&oa_config->ref_count)) 383 return; 384 385 free_oa_config(dev_priv, oa_config); 386 } 387 388 static int get_oa_config(struct drm_i915_private *dev_priv, 389 int metrics_set, 390 struct i915_oa_config **out_config) 391 { 392 int ret; 393 394 if (metrics_set == 1) { 395 *out_config = &dev_priv->perf.oa.test_config; 396 atomic_inc(&dev_priv->perf.oa.test_config.ref_count); 397 return 0; 398 } 399 400 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 401 if (ret) 402 return ret; 403 404 *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set); 405 if (!*out_config) 406 ret = -EINVAL; 407 else 408 atomic_inc(&(*out_config)->ref_count); 409 410 mutex_unlock(&dev_priv->perf.metrics_lock); 411 412 return ret; 413 } 414 415 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv) 416 { 417 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 418 } 419 420 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv) 421 { 422 u32 oastatus1 = I915_READ(GEN7_OASTATUS1); 423 424 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 425 } 426 427 /** 428 * oa_buffer_check_unlocked - check for data and update tail ptr state 429 * @dev_priv: i915 device instance 430 * 431 * This is either called via fops (for blocking reads in user ctx) or the poll 432 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 433 * if there is data available for userspace to read. 434 * 435 * This function is central to providing a workaround for the OA unit tail 436 * pointer having a race with respect to what data is visible to the CPU. 437 * It is responsible for reading tail pointers from the hardware and giving 438 * the pointers time to 'age' before they are made available for reading. 439 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 440 * 441 * Besides returning true when there is data available to read() this function 442 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp 443 * and .aged_tail_idx state used for reading. 444 * 445 * Note: It's safe to read OA config state here unlocked, assuming that this is 446 * only called while the stream is enabled, while the global OA configuration 447 * can't be modified. 448 * 449 * Returns: %true if the OA buffer contains data, else %false 450 */ 451 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv) 452 { 453 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 454 unsigned long flags; 455 unsigned int aged_idx; 456 u32 head, hw_tail, aged_tail, aging_tail; 457 u64 now; 458 459 /* We have to consider the (unlikely) possibility that read() errors 460 * could result in an OA buffer reset which might reset the head, 461 * tails[] and aged_tail state. 462 */ 463 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 464 465 /* NB: The head we observe here might effectively be a little out of 466 * date (between head and tails[aged_idx].offset if there is currently 467 * a read() in progress. 468 */ 469 head = dev_priv->perf.oa.oa_buffer.head; 470 471 aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 472 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset; 473 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset; 474 475 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv); 476 477 /* The tail pointer increases in 64 byte increments, 478 * not in report_size steps... 479 */ 480 hw_tail &= ~(report_size - 1); 481 482 now = ktime_get_mono_fast_ns(); 483 484 /* Update the aged tail 485 * 486 * Flip the tail pointer available for read()s once the aging tail is 487 * old enough to trust that the corresponding data will be visible to 488 * the CPU... 489 * 490 * Do this before updating the aging pointer in case we may be able to 491 * immediately start aging a new pointer too (if new data has become 492 * available) without needing to wait for a later hrtimer callback. 493 */ 494 if (aging_tail != INVALID_TAIL_PTR && 495 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) > 496 OA_TAIL_MARGIN_NSEC)) { 497 498 aged_idx ^= 1; 499 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx; 500 501 aged_tail = aging_tail; 502 503 /* Mark that we need a new pointer to start aging... */ 504 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; 505 aging_tail = INVALID_TAIL_PTR; 506 } 507 508 /* Update the aging tail 509 * 510 * We throttle aging tail updates until we have a new tail that 511 * represents >= one report more data than is already available for 512 * reading. This ensures there will be enough data for a successful 513 * read once this new pointer has aged and ensures we will give the new 514 * pointer time to age. 515 */ 516 if (aging_tail == INVALID_TAIL_PTR && 517 (aged_tail == INVALID_TAIL_PTR || 518 OA_TAKEN(hw_tail, aged_tail) >= report_size)) { 519 struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma; 520 u32 gtt_offset = i915_ggtt_offset(vma); 521 522 /* Be paranoid and do a bounds check on the pointer read back 523 * from hardware, just in case some spurious hardware condition 524 * could put the tail out of bounds... 525 */ 526 if (hw_tail >= gtt_offset && 527 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { 528 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = 529 aging_tail = hw_tail; 530 dev_priv->perf.oa.oa_buffer.aging_timestamp = now; 531 } else { 532 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", 533 hw_tail); 534 } 535 } 536 537 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 538 539 return aged_tail == INVALID_TAIL_PTR ? 540 false : OA_TAKEN(aged_tail, head) >= report_size; 541 } 542 543 /** 544 * append_oa_status - Appends a status record to a userspace read() buffer. 545 * @stream: An i915-perf stream opened for OA metrics 546 * @buf: destination buffer given by userspace 547 * @count: the number of bytes userspace wants to read 548 * @offset: (inout): the current position for writing into @buf 549 * @type: The kind of status to report to userspace 550 * 551 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 552 * into the userspace read() buffer. 553 * 554 * The @buf @offset will only be updated on success. 555 * 556 * Returns: 0 on success, negative error code on failure. 557 */ 558 static int append_oa_status(struct i915_perf_stream *stream, 559 char __user *buf, 560 size_t count, 561 size_t *offset, 562 enum drm_i915_perf_record_type type) 563 { 564 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 565 566 if ((count - *offset) < header.size) 567 return -ENOSPC; 568 569 if (copy_to_user(buf + *offset, &header, sizeof(header))) 570 return -EFAULT; 571 572 (*offset) += header.size; 573 574 return 0; 575 } 576 577 /** 578 * append_oa_sample - Copies single OA report into userspace read() buffer. 579 * @stream: An i915-perf stream opened for OA metrics 580 * @buf: destination buffer given by userspace 581 * @count: the number of bytes userspace wants to read 582 * @offset: (inout): the current position for writing into @buf 583 * @report: A single OA report to (optionally) include as part of the sample 584 * 585 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 586 * properties when opening a stream, tracked as `stream->sample_flags`. This 587 * function copies the requested components of a single sample to the given 588 * read() @buf. 589 * 590 * The @buf @offset will only be updated on success. 591 * 592 * Returns: 0 on success, negative error code on failure. 593 */ 594 static int append_oa_sample(struct i915_perf_stream *stream, 595 char __user *buf, 596 size_t count, 597 size_t *offset, 598 const u8 *report) 599 { 600 struct drm_i915_private *dev_priv = stream->dev_priv; 601 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 602 struct drm_i915_perf_record_header header; 603 u32 sample_flags = stream->sample_flags; 604 605 header.type = DRM_I915_PERF_RECORD_SAMPLE; 606 header.pad = 0; 607 header.size = stream->sample_size; 608 609 if ((count - *offset) < header.size) 610 return -ENOSPC; 611 612 buf += *offset; 613 if (copy_to_user(buf, &header, sizeof(header))) 614 return -EFAULT; 615 buf += sizeof(header); 616 617 if (sample_flags & SAMPLE_OA_REPORT) { 618 if (copy_to_user(buf, report, report_size)) 619 return -EFAULT; 620 } 621 622 (*offset) += header.size; 623 624 return 0; 625 } 626 627 /** 628 * Copies all buffered OA reports into userspace read() buffer. 629 * @stream: An i915-perf stream opened for OA metrics 630 * @buf: destination buffer given by userspace 631 * @count: the number of bytes userspace wants to read 632 * @offset: (inout): the current position for writing into @buf 633 * 634 * Notably any error condition resulting in a short read (-%ENOSPC or 635 * -%EFAULT) will be returned even though one or more records may 636 * have been successfully copied. In this case it's up to the caller 637 * to decide if the error should be squashed before returning to 638 * userspace. 639 * 640 * Note: reports are consumed from the head, and appended to the 641 * tail, so the tail chases the head?... If you think that's mad 642 * and back-to-front you're not alone, but this follows the 643 * Gen PRM naming convention. 644 * 645 * Returns: 0 on success, negative error code on failure. 646 */ 647 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 648 char __user *buf, 649 size_t count, 650 size_t *offset) 651 { 652 struct drm_i915_private *dev_priv = stream->dev_priv; 653 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 654 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 655 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 656 u32 mask = (OA_BUFFER_SIZE - 1); 657 size_t start_offset = *offset; 658 unsigned long flags; 659 unsigned int aged_tail_idx; 660 u32 head, tail; 661 u32 taken; 662 int ret = 0; 663 664 if (WARN_ON(!stream->enabled)) 665 return -EIO; 666 667 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 668 669 head = dev_priv->perf.oa.oa_buffer.head; 670 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 671 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 672 673 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 674 675 /* 676 * An invalid tail pointer here means we're still waiting for the poll 677 * hrtimer callback to give us a pointer 678 */ 679 if (tail == INVALID_TAIL_PTR) 680 return -EAGAIN; 681 682 /* 683 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 684 * while indexing relative to oa_buf_base. 685 */ 686 head -= gtt_offset; 687 tail -= gtt_offset; 688 689 /* 690 * An out of bounds or misaligned head or tail pointer implies a driver 691 * bug since we validate + align the tail pointers we read from the 692 * hardware and we are in full control of the head pointer which should 693 * only be incremented by multiples of the report size (notably also 694 * all a power of two). 695 */ 696 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 697 tail > OA_BUFFER_SIZE || tail % report_size, 698 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 699 head, tail)) 700 return -EIO; 701 702 703 for (/* none */; 704 (taken = OA_TAKEN(tail, head)); 705 head = (head + report_size) & mask) { 706 u8 *report = oa_buf_base + head; 707 u32 *report32 = (void *)report; 708 u32 ctx_id; 709 u32 reason; 710 711 /* 712 * All the report sizes factor neatly into the buffer 713 * size so we never expect to see a report split 714 * between the beginning and end of the buffer. 715 * 716 * Given the initial alignment check a misalignment 717 * here would imply a driver bug that would result 718 * in an overrun. 719 */ 720 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 721 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 722 break; 723 } 724 725 /* 726 * The reason field includes flags identifying what 727 * triggered this specific report (mostly timer 728 * triggered or e.g. due to a context switch). 729 * 730 * This field is never expected to be zero so we can 731 * check that the report isn't invalid before copying 732 * it to userspace... 733 */ 734 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 735 OAREPORT_REASON_MASK); 736 if (reason == 0) { 737 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 738 DRM_NOTE("Skipping spurious, invalid OA report\n"); 739 continue; 740 } 741 742 ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask; 743 744 /* 745 * Squash whatever is in the CTX_ID field if it's marked as 746 * invalid to be sure we avoid false-positive, single-context 747 * filtering below... 748 * 749 * Note: that we don't clear the valid_ctx_bit so userspace can 750 * understand that the ID has been squashed by the kernel. 751 */ 752 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit)) 753 ctx_id = report32[2] = INVALID_CTX_ID; 754 755 /* 756 * NB: For Gen 8 the OA unit no longer supports clock gating 757 * off for a specific context and the kernel can't securely 758 * stop the counters from updating as system-wide / global 759 * values. 760 * 761 * Automatic reports now include a context ID so reports can be 762 * filtered on the cpu but it's not worth trying to 763 * automatically subtract/hide counter progress for other 764 * contexts while filtering since we can't stop userspace 765 * issuing MI_REPORT_PERF_COUNT commands which would still 766 * provide a side-band view of the real values. 767 * 768 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 769 * to normalize counters for a single filtered context then it 770 * needs be forwarded bookend context-switch reports so that it 771 * can track switches in between MI_REPORT_PERF_COUNT commands 772 * and can itself subtract/ignore the progress of counters 773 * associated with other contexts. Note that the hardware 774 * automatically triggers reports when switching to a new 775 * context which are tagged with the ID of the newly active 776 * context. To avoid the complexity (and likely fragility) of 777 * reading ahead while parsing reports to try and minimize 778 * forwarding redundant context switch reports (i.e. between 779 * other, unrelated contexts) we simply elect to forward them 780 * all. 781 * 782 * We don't rely solely on the reason field to identify context 783 * switches since it's not-uncommon for periodic samples to 784 * identify a switch before any 'context switch' report. 785 */ 786 if (!dev_priv->perf.oa.exclusive_stream->ctx || 787 dev_priv->perf.oa.specific_ctx_id == ctx_id || 788 (dev_priv->perf.oa.oa_buffer.last_ctx_id == 789 dev_priv->perf.oa.specific_ctx_id) || 790 reason & OAREPORT_REASON_CTX_SWITCH) { 791 792 /* 793 * While filtering for a single context we avoid 794 * leaking the IDs of other contexts. 795 */ 796 if (dev_priv->perf.oa.exclusive_stream->ctx && 797 dev_priv->perf.oa.specific_ctx_id != ctx_id) { 798 report32[2] = INVALID_CTX_ID; 799 } 800 801 ret = append_oa_sample(stream, buf, count, offset, 802 report); 803 if (ret) 804 break; 805 806 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id; 807 } 808 809 /* 810 * The above reason field sanity check is based on 811 * the assumption that the OA buffer is initially 812 * zeroed and we reset the field after copying so the 813 * check is still meaningful once old reports start 814 * being overwritten. 815 */ 816 report32[0] = 0; 817 } 818 819 if (start_offset != *offset) { 820 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 821 822 /* 823 * We removed the gtt_offset for the copy loop above, indexing 824 * relative to oa_buf_base so put back here... 825 */ 826 head += gtt_offset; 827 828 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); 829 dev_priv->perf.oa.oa_buffer.head = head; 830 831 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 832 } 833 834 return ret; 835 } 836 837 /** 838 * gen8_oa_read - copy status records then buffered OA reports 839 * @stream: An i915-perf stream opened for OA metrics 840 * @buf: destination buffer given by userspace 841 * @count: the number of bytes userspace wants to read 842 * @offset: (inout): the current position for writing into @buf 843 * 844 * Checks OA unit status registers and if necessary appends corresponding 845 * status records for userspace (such as for a buffer full condition) and then 846 * initiate appending any buffered OA reports. 847 * 848 * Updates @offset according to the number of bytes successfully copied into 849 * the userspace buffer. 850 * 851 * NB: some data may be successfully copied to the userspace buffer 852 * even if an error is returned, and this is reflected in the 853 * updated @offset. 854 * 855 * Returns: zero on success or a negative error code 856 */ 857 static int gen8_oa_read(struct i915_perf_stream *stream, 858 char __user *buf, 859 size_t count, 860 size_t *offset) 861 { 862 struct drm_i915_private *dev_priv = stream->dev_priv; 863 u32 oastatus; 864 int ret; 865 866 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 867 return -EIO; 868 869 oastatus = I915_READ(GEN8_OASTATUS); 870 871 /* 872 * We treat OABUFFER_OVERFLOW as a significant error: 873 * 874 * Although theoretically we could handle this more gracefully 875 * sometimes, some Gens don't correctly suppress certain 876 * automatically triggered reports in this condition and so we 877 * have to assume that old reports are now being trampled 878 * over. 879 * 880 * Considering how we don't currently give userspace control 881 * over the OA buffer size and always configure a large 16MB 882 * buffer, then a buffer overflow does anyway likely indicate 883 * that something has gone quite badly wrong. 884 */ 885 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 886 ret = append_oa_status(stream, buf, count, offset, 887 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 888 if (ret) 889 return ret; 890 891 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 892 dev_priv->perf.oa.period_exponent); 893 894 dev_priv->perf.oa.ops.oa_disable(stream); 895 dev_priv->perf.oa.ops.oa_enable(stream); 896 897 /* 898 * Note: .oa_enable() is expected to re-init the oabuffer and 899 * reset GEN8_OASTATUS for us 900 */ 901 oastatus = I915_READ(GEN8_OASTATUS); 902 } 903 904 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 905 ret = append_oa_status(stream, buf, count, offset, 906 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 907 if (ret) 908 return ret; 909 I915_WRITE(GEN8_OASTATUS, 910 oastatus & ~GEN8_OASTATUS_REPORT_LOST); 911 } 912 913 return gen8_append_oa_reports(stream, buf, count, offset); 914 } 915 916 /** 917 * Copies all buffered OA reports into userspace read() buffer. 918 * @stream: An i915-perf stream opened for OA metrics 919 * @buf: destination buffer given by userspace 920 * @count: the number of bytes userspace wants to read 921 * @offset: (inout): the current position for writing into @buf 922 * 923 * Notably any error condition resulting in a short read (-%ENOSPC or 924 * -%EFAULT) will be returned even though one or more records may 925 * have been successfully copied. In this case it's up to the caller 926 * to decide if the error should be squashed before returning to 927 * userspace. 928 * 929 * Note: reports are consumed from the head, and appended to the 930 * tail, so the tail chases the head?... If you think that's mad 931 * and back-to-front you're not alone, but this follows the 932 * Gen PRM naming convention. 933 * 934 * Returns: 0 on success, negative error code on failure. 935 */ 936 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 937 char __user *buf, 938 size_t count, 939 size_t *offset) 940 { 941 struct drm_i915_private *dev_priv = stream->dev_priv; 942 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 943 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 944 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 945 u32 mask = (OA_BUFFER_SIZE - 1); 946 size_t start_offset = *offset; 947 unsigned long flags; 948 unsigned int aged_tail_idx; 949 u32 head, tail; 950 u32 taken; 951 int ret = 0; 952 953 if (WARN_ON(!stream->enabled)) 954 return -EIO; 955 956 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 957 958 head = dev_priv->perf.oa.oa_buffer.head; 959 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 960 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 961 962 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 963 964 /* An invalid tail pointer here means we're still waiting for the poll 965 * hrtimer callback to give us a pointer 966 */ 967 if (tail == INVALID_TAIL_PTR) 968 return -EAGAIN; 969 970 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 971 * while indexing relative to oa_buf_base. 972 */ 973 head -= gtt_offset; 974 tail -= gtt_offset; 975 976 /* An out of bounds or misaligned head or tail pointer implies a driver 977 * bug since we validate + align the tail pointers we read from the 978 * hardware and we are in full control of the head pointer which should 979 * only be incremented by multiples of the report size (notably also 980 * all a power of two). 981 */ 982 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 983 tail > OA_BUFFER_SIZE || tail % report_size, 984 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 985 head, tail)) 986 return -EIO; 987 988 989 for (/* none */; 990 (taken = OA_TAKEN(tail, head)); 991 head = (head + report_size) & mask) { 992 u8 *report = oa_buf_base + head; 993 u32 *report32 = (void *)report; 994 995 /* All the report sizes factor neatly into the buffer 996 * size so we never expect to see a report split 997 * between the beginning and end of the buffer. 998 * 999 * Given the initial alignment check a misalignment 1000 * here would imply a driver bug that would result 1001 * in an overrun. 1002 */ 1003 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 1004 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 1005 break; 1006 } 1007 1008 /* The report-ID field for periodic samples includes 1009 * some undocumented flags related to what triggered 1010 * the report and is never expected to be zero so we 1011 * can check that the report isn't invalid before 1012 * copying it to userspace... 1013 */ 1014 if (report32[0] == 0) { 1015 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 1016 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1017 continue; 1018 } 1019 1020 ret = append_oa_sample(stream, buf, count, offset, report); 1021 if (ret) 1022 break; 1023 1024 /* The above report-id field sanity check is based on 1025 * the assumption that the OA buffer is initially 1026 * zeroed and we reset the field after copying so the 1027 * check is still meaningful once old reports start 1028 * being overwritten. 1029 */ 1030 report32[0] = 0; 1031 } 1032 1033 if (start_offset != *offset) { 1034 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1035 1036 /* We removed the gtt_offset for the copy loop above, indexing 1037 * relative to oa_buf_base so put back here... 1038 */ 1039 head += gtt_offset; 1040 1041 I915_WRITE(GEN7_OASTATUS2, 1042 ((head & GEN7_OASTATUS2_HEAD_MASK) | 1043 GEN7_OASTATUS2_MEM_SELECT_GGTT)); 1044 dev_priv->perf.oa.oa_buffer.head = head; 1045 1046 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1047 } 1048 1049 return ret; 1050 } 1051 1052 /** 1053 * gen7_oa_read - copy status records then buffered OA reports 1054 * @stream: An i915-perf stream opened for OA metrics 1055 * @buf: destination buffer given by userspace 1056 * @count: the number of bytes userspace wants to read 1057 * @offset: (inout): the current position for writing into @buf 1058 * 1059 * Checks Gen 7 specific OA unit status registers and if necessary appends 1060 * corresponding status records for userspace (such as for a buffer full 1061 * condition) and then initiate appending any buffered OA reports. 1062 * 1063 * Updates @offset according to the number of bytes successfully copied into 1064 * the userspace buffer. 1065 * 1066 * Returns: zero on success or a negative error code 1067 */ 1068 static int gen7_oa_read(struct i915_perf_stream *stream, 1069 char __user *buf, 1070 size_t count, 1071 size_t *offset) 1072 { 1073 struct drm_i915_private *dev_priv = stream->dev_priv; 1074 u32 oastatus1; 1075 int ret; 1076 1077 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 1078 return -EIO; 1079 1080 oastatus1 = I915_READ(GEN7_OASTATUS1); 1081 1082 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1083 * bits while the OA unit is enabled (while the tail pointer 1084 * may be updated asynchronously) so we ignore status bits 1085 * that have already been reported to userspace. 1086 */ 1087 oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; 1088 1089 /* We treat OABUFFER_OVERFLOW as a significant error: 1090 * 1091 * - The status can be interpreted to mean that the buffer is 1092 * currently full (with a higher precedence than OA_TAKEN() 1093 * which will start to report a near-empty buffer after an 1094 * overflow) but it's awkward that we can't clear the status 1095 * on Haswell, so without a reset we won't be able to catch 1096 * the state again. 1097 * 1098 * - Since it also implies the HW has started overwriting old 1099 * reports it may also affect our sanity checks for invalid 1100 * reports when copying to userspace that assume new reports 1101 * are being written to cleared memory. 1102 * 1103 * - In the future we may want to introduce a flight recorder 1104 * mode where the driver will automatically maintain a safe 1105 * guard band between head/tail, avoiding this overflow 1106 * condition, but we avoid the added driver complexity for 1107 * now. 1108 */ 1109 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1110 ret = append_oa_status(stream, buf, count, offset, 1111 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1112 if (ret) 1113 return ret; 1114 1115 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1116 dev_priv->perf.oa.period_exponent); 1117 1118 dev_priv->perf.oa.ops.oa_disable(stream); 1119 dev_priv->perf.oa.ops.oa_enable(stream); 1120 1121 oastatus1 = I915_READ(GEN7_OASTATUS1); 1122 } 1123 1124 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1125 ret = append_oa_status(stream, buf, count, offset, 1126 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1127 if (ret) 1128 return ret; 1129 dev_priv->perf.oa.gen7_latched_oastatus1 |= 1130 GEN7_OASTATUS1_REPORT_LOST; 1131 } 1132 1133 return gen7_append_oa_reports(stream, buf, count, offset); 1134 } 1135 1136 /** 1137 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1138 * @stream: An i915-perf stream opened for OA metrics 1139 * 1140 * Called when userspace tries to read() from a blocking stream FD opened 1141 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1142 * OA buffer and wakes us. 1143 * 1144 * Note: it's acceptable to have this return with some false positives 1145 * since any subsequent read handling will return -EAGAIN if there isn't 1146 * really data ready for userspace yet. 1147 * 1148 * Returns: zero on success or a negative error code 1149 */ 1150 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1151 { 1152 struct drm_i915_private *dev_priv = stream->dev_priv; 1153 1154 /* We would wait indefinitely if periodic sampling is not enabled */ 1155 if (!dev_priv->perf.oa.periodic) 1156 return -EIO; 1157 1158 return wait_event_interruptible(dev_priv->perf.oa.poll_wq, 1159 oa_buffer_check_unlocked(dev_priv)); 1160 } 1161 1162 /** 1163 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1164 * @stream: An i915-perf stream opened for OA metrics 1165 * @file: An i915 perf stream file 1166 * @wait: poll() state table 1167 * 1168 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1169 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1170 * when it sees data ready to read in the circular OA buffer. 1171 */ 1172 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1173 struct file *file, 1174 poll_table *wait) 1175 { 1176 struct drm_i915_private *dev_priv = stream->dev_priv; 1177 1178 poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); 1179 } 1180 1181 /** 1182 * i915_oa_read - just calls through to &i915_oa_ops->read 1183 * @stream: An i915-perf stream opened for OA metrics 1184 * @buf: destination buffer given by userspace 1185 * @count: the number of bytes userspace wants to read 1186 * @offset: (inout): the current position for writing into @buf 1187 * 1188 * Updates @offset according to the number of bytes successfully copied into 1189 * the userspace buffer. 1190 * 1191 * Returns: zero on success or a negative error code 1192 */ 1193 static int i915_oa_read(struct i915_perf_stream *stream, 1194 char __user *buf, 1195 size_t count, 1196 size_t *offset) 1197 { 1198 struct drm_i915_private *dev_priv = stream->dev_priv; 1199 1200 return dev_priv->perf.oa.ops.read(stream, buf, count, offset); 1201 } 1202 1203 static struct intel_context *oa_pin_context(struct drm_i915_private *i915, 1204 struct i915_gem_context *ctx) 1205 { 1206 struct i915_gem_engines_iter it; 1207 struct intel_context *ce; 1208 int err; 1209 1210 err = i915_mutex_lock_interruptible(&i915->drm); 1211 if (err) 1212 return ERR_PTR(err); 1213 1214 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1215 if (ce->engine->class != RENDER_CLASS) 1216 continue; 1217 1218 /* 1219 * As the ID is the gtt offset of the context's vma we 1220 * pin the vma to ensure the ID remains fixed. 1221 */ 1222 err = intel_context_pin(ce); 1223 if (err == 0) { 1224 i915->perf.oa.pinned_ctx = ce; 1225 break; 1226 } 1227 } 1228 i915_gem_context_unlock_engines(ctx); 1229 1230 mutex_unlock(&i915->drm.struct_mutex); 1231 if (err) 1232 return ERR_PTR(err); 1233 1234 return i915->perf.oa.pinned_ctx; 1235 } 1236 1237 /** 1238 * oa_get_render_ctx_id - determine and hold ctx hw id 1239 * @stream: An i915-perf stream opened for OA metrics 1240 * 1241 * Determine the render context hw id, and ensure it remains fixed for the 1242 * lifetime of the stream. This ensures that we don't have to worry about 1243 * updating the context ID in OACONTROL on the fly. 1244 * 1245 * Returns: zero on success or a negative error code 1246 */ 1247 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1248 { 1249 struct drm_i915_private *i915 = stream->dev_priv; 1250 struct intel_context *ce; 1251 1252 ce = oa_pin_context(i915, stream->ctx); 1253 if (IS_ERR(ce)) 1254 return PTR_ERR(ce); 1255 1256 switch (INTEL_GEN(i915)) { 1257 case 7: { 1258 /* 1259 * On Haswell we don't do any post processing of the reports 1260 * and don't need to use the mask. 1261 */ 1262 i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state); 1263 i915->perf.oa.specific_ctx_id_mask = 0; 1264 break; 1265 } 1266 1267 case 8: 1268 case 9: 1269 case 10: 1270 if (USES_GUC_SUBMISSION(i915)) { 1271 /* 1272 * When using GuC, the context descriptor we write in 1273 * i915 is read by GuC and rewritten before it's 1274 * actually written into the hardware. The LRCA is 1275 * what is put into the context id field of the 1276 * context descriptor by GuC. Because it's aligned to 1277 * a page, the lower 12bits are always at 0 and 1278 * dropped by GuC. They won't be part of the context 1279 * ID in the OA reports, so squash those lower bits. 1280 */ 1281 i915->perf.oa.specific_ctx_id = 1282 lower_32_bits(ce->lrc_desc) >> 12; 1283 1284 /* 1285 * GuC uses the top bit to signal proxy submission, so 1286 * ignore that bit. 1287 */ 1288 i915->perf.oa.specific_ctx_id_mask = 1289 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1290 } else { 1291 i915->perf.oa.specific_ctx_id_mask = 1292 (1U << GEN8_CTX_ID_WIDTH) - 1; 1293 i915->perf.oa.specific_ctx_id = 1294 upper_32_bits(ce->lrc_desc); 1295 i915->perf.oa.specific_ctx_id &= 1296 i915->perf.oa.specific_ctx_id_mask; 1297 } 1298 break; 1299 1300 case 11: { 1301 i915->perf.oa.specific_ctx_id_mask = 1302 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) | 1303 ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) | 1304 ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32); 1305 i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc); 1306 i915->perf.oa.specific_ctx_id &= 1307 i915->perf.oa.specific_ctx_id_mask; 1308 break; 1309 } 1310 1311 default: 1312 MISSING_CASE(INTEL_GEN(i915)); 1313 } 1314 1315 DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1316 i915->perf.oa.specific_ctx_id, 1317 i915->perf.oa.specific_ctx_id_mask); 1318 1319 return 0; 1320 } 1321 1322 /** 1323 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1324 * @stream: An i915-perf stream opened for OA metrics 1325 * 1326 * In case anything needed doing to ensure the context HW ID would remain valid 1327 * for the lifetime of the stream, then that can be undone here. 1328 */ 1329 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1330 { 1331 struct drm_i915_private *dev_priv = stream->dev_priv; 1332 struct intel_context *ce; 1333 1334 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1335 dev_priv->perf.oa.specific_ctx_id_mask = 0; 1336 1337 ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx); 1338 if (ce) { 1339 mutex_lock(&dev_priv->drm.struct_mutex); 1340 intel_context_unpin(ce); 1341 mutex_unlock(&dev_priv->drm.struct_mutex); 1342 } 1343 } 1344 1345 static void 1346 free_oa_buffer(struct drm_i915_private *i915) 1347 { 1348 mutex_lock(&i915->drm.struct_mutex); 1349 1350 i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma, 1351 I915_VMA_RELEASE_MAP); 1352 1353 mutex_unlock(&i915->drm.struct_mutex); 1354 1355 i915->perf.oa.oa_buffer.vaddr = NULL; 1356 } 1357 1358 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1359 { 1360 struct drm_i915_private *dev_priv = stream->dev_priv; 1361 1362 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); 1363 1364 /* 1365 * Unset exclusive_stream first, it will be checked while disabling 1366 * the metric set on gen8+. 1367 */ 1368 mutex_lock(&dev_priv->drm.struct_mutex); 1369 dev_priv->perf.oa.exclusive_stream = NULL; 1370 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1371 mutex_unlock(&dev_priv->drm.struct_mutex); 1372 1373 free_oa_buffer(dev_priv); 1374 1375 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1376 intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); 1377 1378 if (stream->ctx) 1379 oa_put_render_ctx_id(stream); 1380 1381 put_oa_config(dev_priv, stream->oa_config); 1382 1383 if (dev_priv->perf.oa.spurious_report_rs.missed) { 1384 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1385 dev_priv->perf.oa.spurious_report_rs.missed); 1386 } 1387 } 1388 1389 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) 1390 { 1391 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1392 unsigned long flags; 1393 1394 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1395 1396 /* Pre-DevBDW: OABUFFER must be set with counters off, 1397 * before OASTATUS1, but after OASTATUS2 1398 */ 1399 I915_WRITE(GEN7_OASTATUS2, 1400 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */ 1401 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1402 1403 I915_WRITE(GEN7_OABUFFER, gtt_offset); 1404 1405 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ 1406 1407 /* Mark that we need updated tail pointers to read from... */ 1408 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1409 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1410 1411 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1412 1413 /* On Haswell we have to track which OASTATUS1 flags we've 1414 * already seen since they can't be cleared while periodic 1415 * sampling is enabled. 1416 */ 1417 dev_priv->perf.oa.gen7_latched_oastatus1 = 0; 1418 1419 /* NB: although the OA buffer will initially be allocated 1420 * zeroed via shmfs (and so this memset is redundant when 1421 * first allocating), we may re-init the OA buffer, either 1422 * when re-enabling a stream or in error/reset paths. 1423 * 1424 * The reason we clear the buffer for each re-init is for the 1425 * sanity check in gen7_append_oa_reports() that looks at the 1426 * report-id field to make sure it's non-zero which relies on 1427 * the assumption that new reports are being written to zeroed 1428 * memory... 1429 */ 1430 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1431 1432 /* Maybe make ->pollin per-stream state if we support multiple 1433 * concurrent streams in the future. 1434 */ 1435 dev_priv->perf.oa.pollin = false; 1436 } 1437 1438 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv) 1439 { 1440 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1441 unsigned long flags; 1442 1443 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1444 1445 I915_WRITE(GEN8_OASTATUS, 0); 1446 I915_WRITE(GEN8_OAHEADPTR, gtt_offset); 1447 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1448 1449 I915_WRITE(GEN8_OABUFFER_UDW, 0); 1450 1451 /* 1452 * PRM says: 1453 * 1454 * "This MMIO must be set before the OATAILPTR 1455 * register and after the OAHEADPTR register. This is 1456 * to enable proper functionality of the overflow 1457 * bit." 1458 */ 1459 I915_WRITE(GEN8_OABUFFER, gtt_offset | 1460 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1461 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1462 1463 /* Mark that we need updated tail pointers to read from... */ 1464 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1465 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1466 1467 /* 1468 * Reset state used to recognise context switches, affecting which 1469 * reports we will forward to userspace while filtering for a single 1470 * context. 1471 */ 1472 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID; 1473 1474 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1475 1476 /* 1477 * NB: although the OA buffer will initially be allocated 1478 * zeroed via shmfs (and so this memset is redundant when 1479 * first allocating), we may re-init the OA buffer, either 1480 * when re-enabling a stream or in error/reset paths. 1481 * 1482 * The reason we clear the buffer for each re-init is for the 1483 * sanity check in gen8_append_oa_reports() that looks at the 1484 * reason field to make sure it's non-zero which relies on 1485 * the assumption that new reports are being written to zeroed 1486 * memory... 1487 */ 1488 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1489 1490 /* 1491 * Maybe make ->pollin per-stream state if we support multiple 1492 * concurrent streams in the future. 1493 */ 1494 dev_priv->perf.oa.pollin = false; 1495 } 1496 1497 static int alloc_oa_buffer(struct drm_i915_private *dev_priv) 1498 { 1499 struct drm_i915_gem_object *bo; 1500 struct i915_vma *vma; 1501 int ret; 1502 1503 if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) 1504 return -ENODEV; 1505 1506 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1507 if (ret) 1508 return ret; 1509 1510 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1511 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1512 1513 bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE); 1514 if (IS_ERR(bo)) { 1515 DRM_ERROR("Failed to allocate OA buffer\n"); 1516 ret = PTR_ERR(bo); 1517 goto unlock; 1518 } 1519 1520 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1521 1522 /* PreHSW required 512K alignment, HSW requires 16M */ 1523 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1524 if (IS_ERR(vma)) { 1525 ret = PTR_ERR(vma); 1526 goto err_unref; 1527 } 1528 dev_priv->perf.oa.oa_buffer.vma = vma; 1529 1530 dev_priv->perf.oa.oa_buffer.vaddr = 1531 i915_gem_object_pin_map(bo, I915_MAP_WB); 1532 if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { 1533 ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); 1534 goto err_unpin; 1535 } 1536 1537 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", 1538 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), 1539 dev_priv->perf.oa.oa_buffer.vaddr); 1540 1541 goto unlock; 1542 1543 err_unpin: 1544 __i915_vma_unpin(vma); 1545 1546 err_unref: 1547 i915_gem_object_put(bo); 1548 1549 dev_priv->perf.oa.oa_buffer.vaddr = NULL; 1550 dev_priv->perf.oa.oa_buffer.vma = NULL; 1551 1552 unlock: 1553 mutex_unlock(&dev_priv->drm.struct_mutex); 1554 return ret; 1555 } 1556 1557 static void config_oa_regs(struct drm_i915_private *dev_priv, 1558 const struct i915_oa_reg *regs, 1559 u32 n_regs) 1560 { 1561 u32 i; 1562 1563 for (i = 0; i < n_regs; i++) { 1564 const struct i915_oa_reg *reg = regs + i; 1565 1566 I915_WRITE(reg->addr, reg->value); 1567 } 1568 } 1569 1570 static void delay_after_mux(void) 1571 { 1572 /* 1573 * It apparently takes a fairly long time for a new MUX 1574 * configuration to be be applied after these register writes. 1575 * This delay duration was derived empirically based on the 1576 * render_basic config but hopefully it covers the maximum 1577 * configuration latency. 1578 * 1579 * As a fallback, the checks in _append_oa_reports() to skip 1580 * invalid OA reports do also seem to work to discard reports 1581 * generated before this config has completed - albeit not 1582 * silently. 1583 * 1584 * Unfortunately this is essentially a magic number, since we 1585 * don't currently know of a reliable mechanism for predicting 1586 * how long the MUX config will take to apply and besides 1587 * seeing invalid reports we don't know of a reliable way to 1588 * explicitly check that the MUX config has landed. 1589 * 1590 * It's even possible we've miss characterized the underlying 1591 * problem - it just seems like the simplest explanation why 1592 * a delay at this location would mitigate any invalid reports. 1593 */ 1594 usleep_range(15000, 20000); 1595 } 1596 1597 static int hsw_enable_metric_set(struct i915_perf_stream *stream) 1598 { 1599 struct drm_i915_private *dev_priv = stream->dev_priv; 1600 const struct i915_oa_config *oa_config = stream->oa_config; 1601 1602 /* 1603 * PRM: 1604 * 1605 * OA unit is using “crclk” for its functionality. When trunk 1606 * level clock gating takes place, OA clock would be gated, 1607 * unable to count the events from non-render clock domain. 1608 * Render clock gating must be disabled when OA is enabled to 1609 * count the events from non-render domain. Unit level clock 1610 * gating for RCS should also be disabled. 1611 */ 1612 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1613 ~GEN7_DOP_CLOCK_GATE_ENABLE)); 1614 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | 1615 GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1616 1617 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1618 delay_after_mux(); 1619 1620 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1621 oa_config->b_counter_regs_len); 1622 1623 return 0; 1624 } 1625 1626 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) 1627 { 1628 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & 1629 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1630 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | 1631 GEN7_DOP_CLOCK_GATE_ENABLE)); 1632 1633 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1634 ~GT_NOA_ENABLE)); 1635 } 1636 1637 /* 1638 * NB: It must always remain pointer safe to run this even if the OA unit 1639 * has been disabled. 1640 * 1641 * It's fine to put out-of-date values into these per-context registers 1642 * in the case that the OA unit has been disabled. 1643 */ 1644 static void 1645 gen8_update_reg_state_unlocked(struct intel_context *ce, 1646 u32 *reg_state, 1647 const struct i915_oa_config *oa_config) 1648 { 1649 struct drm_i915_private *i915 = ce->gem_context->i915; 1650 u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset; 1651 u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset; 1652 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1653 i915_reg_t flex_regs[] = { 1654 EU_PERF_CNTL0, 1655 EU_PERF_CNTL1, 1656 EU_PERF_CNTL2, 1657 EU_PERF_CNTL3, 1658 EU_PERF_CNTL4, 1659 EU_PERF_CNTL5, 1660 EU_PERF_CNTL6, 1661 }; 1662 int i; 1663 1664 CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, 1665 (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1666 (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | 1667 GEN8_OA_COUNTER_RESUME); 1668 1669 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { 1670 u32 state_offset = ctx_flexeu0 + i * 2; 1671 u32 mmio = i915_mmio_reg_offset(flex_regs[i]); 1672 1673 /* 1674 * This arbitrary default will select the 'EU FPU0 Pipeline 1675 * Active' event. In the future it's anticipated that there 1676 * will be an explicit 'No Event' we can select, but not yet... 1677 */ 1678 u32 value = 0; 1679 1680 if (oa_config) { 1681 u32 j; 1682 1683 for (j = 0; j < oa_config->flex_regs_len; j++) { 1684 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { 1685 value = oa_config->flex_regs[j].value; 1686 break; 1687 } 1688 } 1689 } 1690 1691 CTX_REG(reg_state, state_offset, flex_regs[i], value); 1692 } 1693 1694 CTX_REG(reg_state, 1695 CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 1696 intel_sseu_make_rpcs(i915, &ce->sseu)); 1697 } 1698 1699 /* 1700 * Manages updating the per-context aspects of the OA stream 1701 * configuration across all contexts. 1702 * 1703 * The awkward consideration here is that OACTXCONTROL controls the 1704 * exponent for periodic sampling which is primarily used for system 1705 * wide profiling where we'd like a consistent sampling period even in 1706 * the face of context switches. 1707 * 1708 * Our approach of updating the register state context (as opposed to 1709 * say using a workaround batch buffer) ensures that the hardware 1710 * won't automatically reload an out-of-date timer exponent even 1711 * transiently before a WA BB could be parsed. 1712 * 1713 * This function needs to: 1714 * - Ensure the currently running context's per-context OA state is 1715 * updated 1716 * - Ensure that all existing contexts will have the correct per-context 1717 * OA state if they are scheduled for use. 1718 * - Ensure any new contexts will be initialized with the correct 1719 * per-context OA state. 1720 * 1721 * Note: it's only the RCS/Render context that has any OA state. 1722 */ 1723 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1724 const struct i915_oa_config *oa_config) 1725 { 1726 unsigned int map_type = i915_coherent_map_type(dev_priv); 1727 struct i915_gem_context *ctx; 1728 struct i915_request *rq; 1729 int ret; 1730 1731 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1732 1733 /* 1734 * The OA register config is setup through the context image. This image 1735 * might be written to by the GPU on context switch (in particular on 1736 * lite-restore). This means we can't safely update a context's image, 1737 * if this context is scheduled/submitted to run on the GPU. 1738 * 1739 * We could emit the OA register config through the batch buffer but 1740 * this might leave small interval of time where the OA unit is 1741 * configured at an invalid sampling period. 1742 * 1743 * So far the best way to work around this issue seems to be draining 1744 * the GPU from any submitted work. 1745 */ 1746 ret = i915_gem_wait_for_idle(dev_priv, 1747 I915_WAIT_LOCKED, 1748 MAX_SCHEDULE_TIMEOUT); 1749 if (ret) 1750 return ret; 1751 1752 /* Update all contexts now that we've stalled the submission. */ 1753 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1754 struct i915_gem_engines_iter it; 1755 struct intel_context *ce; 1756 1757 for_each_gem_engine(ce, 1758 i915_gem_context_lock_engines(ctx), 1759 it) { 1760 u32 *regs; 1761 1762 if (ce->engine->class != RENDER_CLASS) 1763 continue; 1764 1765 /* OA settings will be set upon first use */ 1766 if (!ce->state) 1767 continue; 1768 1769 regs = i915_gem_object_pin_map(ce->state->obj, 1770 map_type); 1771 if (IS_ERR(regs)) { 1772 i915_gem_context_unlock_engines(ctx); 1773 return PTR_ERR(regs); 1774 } 1775 1776 ce->state->obj->mm.dirty = true; 1777 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); 1778 1779 gen8_update_reg_state_unlocked(ce, regs, oa_config); 1780 1781 i915_gem_object_unpin_map(ce->state->obj); 1782 } 1783 i915_gem_context_unlock_engines(ctx); 1784 } 1785 1786 /* 1787 * Apply the configuration by doing one context restore of the edited 1788 * context image. 1789 */ 1790 rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context); 1791 if (IS_ERR(rq)) 1792 return PTR_ERR(rq); 1793 1794 i915_request_add(rq); 1795 1796 return 0; 1797 } 1798 1799 static int gen8_enable_metric_set(struct i915_perf_stream *stream) 1800 { 1801 struct drm_i915_private *dev_priv = stream->dev_priv; 1802 const struct i915_oa_config *oa_config = stream->oa_config; 1803 int ret; 1804 1805 /* 1806 * We disable slice/unslice clock ratio change reports on SKL since 1807 * they are too noisy. The HW generates a lot of redundant reports 1808 * where the ratio hasn't really changed causing a lot of redundant 1809 * work to processes and increasing the chances we'll hit buffer 1810 * overruns. 1811 * 1812 * Although we don't currently use the 'disable overrun' OABUFFER 1813 * feature it's worth noting that clock ratio reports have to be 1814 * disabled before considering to use that feature since the HW doesn't 1815 * correctly block these reports. 1816 * 1817 * Currently none of the high-level metrics we have depend on knowing 1818 * this ratio to normalize. 1819 * 1820 * Note: This register is not power context saved and restored, but 1821 * that's OK considering that we disable RC6 while the OA unit is 1822 * enabled. 1823 * 1824 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 1825 * be read back from automatically triggered reports, as part of the 1826 * RPT_ID field. 1827 */ 1828 if (IS_GEN_RANGE(dev_priv, 9, 11)) { 1829 I915_WRITE(GEN8_OA_DEBUG, 1830 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 1831 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 1832 } 1833 1834 /* 1835 * Update all contexts prior writing the mux configurations as we need 1836 * to make sure all slices/subslices are ON before writing to NOA 1837 * registers. 1838 */ 1839 ret = gen8_configure_all_contexts(dev_priv, oa_config); 1840 if (ret) 1841 return ret; 1842 1843 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1844 delay_after_mux(); 1845 1846 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1847 oa_config->b_counter_regs_len); 1848 1849 return 0; 1850 } 1851 1852 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1853 { 1854 /* Reset all contexts' slices/subslices configurations. */ 1855 gen8_configure_all_contexts(dev_priv, NULL); 1856 1857 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1858 ~GT_NOA_ENABLE)); 1859 } 1860 1861 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1862 { 1863 /* Reset all contexts' slices/subslices configurations. */ 1864 gen8_configure_all_contexts(dev_priv, NULL); 1865 1866 /* Make sure we disable noa to save power. */ 1867 I915_WRITE(RPM_CONFIG1, 1868 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); 1869 } 1870 1871 static void gen7_oa_enable(struct i915_perf_stream *stream) 1872 { 1873 struct drm_i915_private *dev_priv = stream->dev_priv; 1874 struct i915_gem_context *ctx = stream->ctx; 1875 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; 1876 bool periodic = dev_priv->perf.oa.periodic; 1877 u32 period_exponent = dev_priv->perf.oa.period_exponent; 1878 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1879 1880 /* 1881 * Reset buf pointers so we don't forward reports from before now. 1882 * 1883 * Think carefully if considering trying to avoid this, since it 1884 * also ensures status flags and the buffer itself are cleared 1885 * in error paths, and we have checks for invalid reports based 1886 * on the assumption that certain fields are written to zeroed 1887 * memory which this helps maintains. 1888 */ 1889 gen7_init_oa_buffer(dev_priv); 1890 1891 I915_WRITE(GEN7_OACONTROL, 1892 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 1893 (period_exponent << 1894 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 1895 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 1896 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 1897 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 1898 GEN7_OACONTROL_ENABLE); 1899 } 1900 1901 static void gen8_oa_enable(struct i915_perf_stream *stream) 1902 { 1903 struct drm_i915_private *dev_priv = stream->dev_priv; 1904 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1905 1906 /* 1907 * Reset buf pointers so we don't forward reports from before now. 1908 * 1909 * Think carefully if considering trying to avoid this, since it 1910 * also ensures status flags and the buffer itself are cleared 1911 * in error paths, and we have checks for invalid reports based 1912 * on the assumption that certain fields are written to zeroed 1913 * memory which this helps maintains. 1914 */ 1915 gen8_init_oa_buffer(dev_priv); 1916 1917 /* 1918 * Note: we don't rely on the hardware to perform single context 1919 * filtering and instead filter on the cpu based on the context-id 1920 * field of reports 1921 */ 1922 I915_WRITE(GEN8_OACONTROL, (report_format << 1923 GEN8_OA_REPORT_FORMAT_SHIFT) | 1924 GEN8_OA_COUNTER_ENABLE); 1925 } 1926 1927 /** 1928 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 1929 * @stream: An i915 perf stream opened for OA metrics 1930 * 1931 * [Re]enables hardware periodic sampling according to the period configured 1932 * when opening the stream. This also starts a hrtimer that will periodically 1933 * check for data in the circular OA buffer for notifying userspace (e.g. 1934 * during a read() or poll()). 1935 */ 1936 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 1937 { 1938 struct drm_i915_private *dev_priv = stream->dev_priv; 1939 1940 dev_priv->perf.oa.ops.oa_enable(stream); 1941 1942 if (dev_priv->perf.oa.periodic) 1943 hrtimer_start(&dev_priv->perf.oa.poll_check_timer, 1944 ns_to_ktime(POLL_PERIOD), 1945 HRTIMER_MODE_REL_PINNED); 1946 } 1947 1948 static void gen7_oa_disable(struct i915_perf_stream *stream) 1949 { 1950 struct intel_uncore *uncore = &stream->dev_priv->uncore; 1951 1952 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 1953 if (intel_wait_for_register(uncore, 1954 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 1955 50)) 1956 DRM_ERROR("wait for OA to be disabled timed out\n"); 1957 } 1958 1959 static void gen8_oa_disable(struct i915_perf_stream *stream) 1960 { 1961 struct intel_uncore *uncore = &stream->dev_priv->uncore; 1962 1963 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 1964 if (intel_wait_for_register(uncore, 1965 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 1966 50)) 1967 DRM_ERROR("wait for OA to be disabled timed out\n"); 1968 } 1969 1970 /** 1971 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 1972 * @stream: An i915 perf stream opened for OA metrics 1973 * 1974 * Stops the OA unit from periodically writing counter reports into the 1975 * circular OA buffer. This also stops the hrtimer that periodically checks for 1976 * data in the circular OA buffer, for notifying userspace. 1977 */ 1978 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 1979 { 1980 struct drm_i915_private *dev_priv = stream->dev_priv; 1981 1982 dev_priv->perf.oa.ops.oa_disable(stream); 1983 1984 if (dev_priv->perf.oa.periodic) 1985 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); 1986 } 1987 1988 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 1989 .destroy = i915_oa_stream_destroy, 1990 .enable = i915_oa_stream_enable, 1991 .disable = i915_oa_stream_disable, 1992 .wait_unlocked = i915_oa_wait_unlocked, 1993 .poll_wait = i915_oa_poll_wait, 1994 .read = i915_oa_read, 1995 }; 1996 1997 /** 1998 * i915_oa_stream_init - validate combined props for OA stream and init 1999 * @stream: An i915 perf stream 2000 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2001 * @props: The property state that configures stream (individually validated) 2002 * 2003 * While read_properties_unlocked() validates properties in isolation it 2004 * doesn't ensure that the combination necessarily makes sense. 2005 * 2006 * At this point it has been determined that userspace wants a stream of 2007 * OA metrics, but still we need to further validate the combined 2008 * properties are OK. 2009 * 2010 * If the configuration makes sense then we can allocate memory for 2011 * a circular OA buffer and apply the requested metric set configuration. 2012 * 2013 * Returns: zero on success or a negative error code. 2014 */ 2015 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2016 struct drm_i915_perf_open_param *param, 2017 struct perf_open_properties *props) 2018 { 2019 struct drm_i915_private *dev_priv = stream->dev_priv; 2020 int format_size; 2021 int ret; 2022 2023 /* If the sysfs metrics/ directory wasn't registered for some 2024 * reason then don't let userspace try their luck with config 2025 * IDs 2026 */ 2027 if (!dev_priv->perf.metrics_kobj) { 2028 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 2029 return -EINVAL; 2030 } 2031 2032 if (!(props->sample_flags & SAMPLE_OA_REPORT)) { 2033 DRM_DEBUG("Only OA report sampling supported\n"); 2034 return -EINVAL; 2035 } 2036 2037 if (!dev_priv->perf.oa.ops.enable_metric_set) { 2038 DRM_DEBUG("OA unit not supported\n"); 2039 return -ENODEV; 2040 } 2041 2042 /* To avoid the complexity of having to accurately filter 2043 * counter reports and marshal to the appropriate client 2044 * we currently only allow exclusive access 2045 */ 2046 if (dev_priv->perf.oa.exclusive_stream) { 2047 DRM_DEBUG("OA unit already in use\n"); 2048 return -EBUSY; 2049 } 2050 2051 if (!props->oa_format) { 2052 DRM_DEBUG("OA report format not specified\n"); 2053 return -EINVAL; 2054 } 2055 2056 /* We set up some ratelimit state to potentially throttle any _NOTES 2057 * about spurious, invalid OA reports which we don't forward to 2058 * userspace. 2059 * 2060 * The initialization is associated with opening the stream (not driver 2061 * init) considering we print a _NOTE about any throttling when closing 2062 * the stream instead of waiting until driver _fini which no one would 2063 * ever see. 2064 * 2065 * Using the same limiting factors as printk_ratelimit() 2066 */ 2067 ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs, 2068 5 * HZ, 10); 2069 /* Since we use a DRM_NOTE for spurious reports it would be 2070 * inconsistent to let __ratelimit() automatically print a warning for 2071 * throttling. 2072 */ 2073 ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs, 2074 RATELIMIT_MSG_ON_RELEASE); 2075 2076 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2077 2078 format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; 2079 2080 stream->sample_flags |= SAMPLE_OA_REPORT; 2081 stream->sample_size += format_size; 2082 2083 dev_priv->perf.oa.oa_buffer.format_size = format_size; 2084 if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) 2085 return -EINVAL; 2086 2087 dev_priv->perf.oa.oa_buffer.format = 2088 dev_priv->perf.oa.oa_formats[props->oa_format].format; 2089 2090 dev_priv->perf.oa.periodic = props->oa_periodic; 2091 if (dev_priv->perf.oa.periodic) 2092 dev_priv->perf.oa.period_exponent = props->oa_period_exponent; 2093 2094 if (stream->ctx) { 2095 ret = oa_get_render_ctx_id(stream); 2096 if (ret) { 2097 DRM_DEBUG("Invalid context id to filter with\n"); 2098 return ret; 2099 } 2100 } 2101 2102 ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config); 2103 if (ret) { 2104 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); 2105 goto err_config; 2106 } 2107 2108 /* PRM - observability performance counters: 2109 * 2110 * OACONTROL, performance counter enable, note: 2111 * 2112 * "When this bit is set, in order to have coherent counts, 2113 * RC6 power state and trunk clock gating must be disabled. 2114 * This can be achieved by programming MMIO registers as 2115 * 0xA094=0 and 0xA090[31]=1" 2116 * 2117 * In our case we are expecting that taking pm + FORCEWAKE 2118 * references will effectively disable RC6. 2119 */ 2120 stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2121 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 2122 2123 ret = alloc_oa_buffer(dev_priv); 2124 if (ret) 2125 goto err_oa_buf_alloc; 2126 2127 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 2128 if (ret) 2129 goto err_lock; 2130 2131 stream->ops = &i915_oa_stream_ops; 2132 dev_priv->perf.oa.exclusive_stream = stream; 2133 2134 ret = dev_priv->perf.oa.ops.enable_metric_set(stream); 2135 if (ret) { 2136 DRM_DEBUG("Unable to enable metric set\n"); 2137 goto err_enable; 2138 } 2139 2140 mutex_unlock(&dev_priv->drm.struct_mutex); 2141 2142 return 0; 2143 2144 err_enable: 2145 dev_priv->perf.oa.exclusive_stream = NULL; 2146 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2147 mutex_unlock(&dev_priv->drm.struct_mutex); 2148 2149 err_lock: 2150 free_oa_buffer(dev_priv); 2151 2152 err_oa_buf_alloc: 2153 put_oa_config(dev_priv, stream->oa_config); 2154 2155 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 2156 intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); 2157 2158 err_config: 2159 if (stream->ctx) 2160 oa_put_render_ctx_id(stream); 2161 2162 return ret; 2163 } 2164 2165 void i915_oa_init_reg_state(struct intel_engine_cs *engine, 2166 struct intel_context *ce, 2167 u32 *regs) 2168 { 2169 struct i915_perf_stream *stream; 2170 2171 if (engine->class != RENDER_CLASS) 2172 return; 2173 2174 stream = engine->i915->perf.oa.exclusive_stream; 2175 if (stream) 2176 gen8_update_reg_state_unlocked(ce, regs, stream->oa_config); 2177 } 2178 2179 /** 2180 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation 2181 * @stream: An i915 perf stream 2182 * @file: An i915 perf stream file 2183 * @buf: destination buffer given by userspace 2184 * @count: the number of bytes userspace wants to read 2185 * @ppos: (inout) file seek position (unused) 2186 * 2187 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to 2188 * ensure that if we've successfully copied any data then reporting that takes 2189 * precedence over any internal error status, so the data isn't lost. 2190 * 2191 * For example ret will be -ENOSPC whenever there is more buffered data than 2192 * can be copied to userspace, but that's only interesting if we weren't able 2193 * to copy some data because it implies the userspace buffer is too small to 2194 * receive a single record (and we never split records). 2195 * 2196 * Another case with ret == -EFAULT is more of a grey area since it would seem 2197 * like bad form for userspace to ask us to overrun its buffer, but the user 2198 * knows best: 2199 * 2200 * http://yarchive.net/comp/linux/partial_reads_writes.html 2201 * 2202 * Returns: The number of bytes copied or a negative error code on failure. 2203 */ 2204 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, 2205 struct file *file, 2206 char __user *buf, 2207 size_t count, 2208 loff_t *ppos) 2209 { 2210 /* Note we keep the offset (aka bytes read) separate from any 2211 * error status so that the final check for whether we return 2212 * the bytes read with a higher precedence than any error (see 2213 * comment below) doesn't need to be handled/duplicated in 2214 * stream->ops->read() implementations. 2215 */ 2216 size_t offset = 0; 2217 int ret = stream->ops->read(stream, buf, count, &offset); 2218 2219 return offset ?: (ret ?: -EAGAIN); 2220 } 2221 2222 /** 2223 * i915_perf_read - handles read() FOP for i915 perf stream FDs 2224 * @file: An i915 perf stream file 2225 * @buf: destination buffer given by userspace 2226 * @count: the number of bytes userspace wants to read 2227 * @ppos: (inout) file seek position (unused) 2228 * 2229 * The entry point for handling a read() on a stream file descriptor from 2230 * userspace. Most of the work is left to the i915_perf_read_locked() and 2231 * &i915_perf_stream_ops->read but to save having stream implementations (of 2232 * which we might have multiple later) we handle blocking read here. 2233 * 2234 * We can also consistently treat trying to read from a disabled stream 2235 * as an IO error so implementations can assume the stream is enabled 2236 * while reading. 2237 * 2238 * Returns: The number of bytes copied or a negative error code on failure. 2239 */ 2240 static ssize_t i915_perf_read(struct file *file, 2241 char __user *buf, 2242 size_t count, 2243 loff_t *ppos) 2244 { 2245 struct i915_perf_stream *stream = file->private_data; 2246 struct drm_i915_private *dev_priv = stream->dev_priv; 2247 ssize_t ret; 2248 2249 /* To ensure it's handled consistently we simply treat all reads of a 2250 * disabled stream as an error. In particular it might otherwise lead 2251 * to a deadlock for blocking file descriptors... 2252 */ 2253 if (!stream->enabled) 2254 return -EIO; 2255 2256 if (!(file->f_flags & O_NONBLOCK)) { 2257 /* There's the small chance of false positives from 2258 * stream->ops->wait_unlocked. 2259 * 2260 * E.g. with single context filtering since we only wait until 2261 * oabuffer has >= 1 report we don't immediately know whether 2262 * any reports really belong to the current context 2263 */ 2264 do { 2265 ret = stream->ops->wait_unlocked(stream); 2266 if (ret) 2267 return ret; 2268 2269 mutex_lock(&dev_priv->perf.lock); 2270 ret = i915_perf_read_locked(stream, file, 2271 buf, count, ppos); 2272 mutex_unlock(&dev_priv->perf.lock); 2273 } while (ret == -EAGAIN); 2274 } else { 2275 mutex_lock(&dev_priv->perf.lock); 2276 ret = i915_perf_read_locked(stream, file, buf, count, ppos); 2277 mutex_unlock(&dev_priv->perf.lock); 2278 } 2279 2280 /* We allow the poll checking to sometimes report false positive EPOLLIN 2281 * events where we might actually report EAGAIN on read() if there's 2282 * not really any data available. In this situation though we don't 2283 * want to enter a busy loop between poll() reporting a EPOLLIN event 2284 * and read() returning -EAGAIN. Clearing the oa.pollin state here 2285 * effectively ensures we back off until the next hrtimer callback 2286 * before reporting another EPOLLIN event. 2287 */ 2288 if (ret >= 0 || ret == -EAGAIN) { 2289 /* Maybe make ->pollin per-stream state if we support multiple 2290 * concurrent streams in the future. 2291 */ 2292 dev_priv->perf.oa.pollin = false; 2293 } 2294 2295 return ret; 2296 } 2297 2298 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 2299 { 2300 struct drm_i915_private *dev_priv = 2301 container_of(hrtimer, typeof(*dev_priv), 2302 perf.oa.poll_check_timer); 2303 2304 if (oa_buffer_check_unlocked(dev_priv)) { 2305 dev_priv->perf.oa.pollin = true; 2306 wake_up(&dev_priv->perf.oa.poll_wq); 2307 } 2308 2309 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); 2310 2311 return HRTIMER_RESTART; 2312 } 2313 2314 /** 2315 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 2316 * @dev_priv: i915 device instance 2317 * @stream: An i915 perf stream 2318 * @file: An i915 perf stream file 2319 * @wait: poll() state table 2320 * 2321 * For handling userspace polling on an i915 perf stream, this calls through to 2322 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 2323 * will be woken for new stream data. 2324 * 2325 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2326 * with any non-file-operation driver hooks. 2327 * 2328 * Returns: any poll events that are ready without sleeping 2329 */ 2330 static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, 2331 struct i915_perf_stream *stream, 2332 struct file *file, 2333 poll_table *wait) 2334 { 2335 __poll_t events = 0; 2336 2337 stream->ops->poll_wait(stream, file, wait); 2338 2339 /* Note: we don't explicitly check whether there's something to read 2340 * here since this path may be very hot depending on what else 2341 * userspace is polling, or on the timeout in use. We rely solely on 2342 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 2343 * samples to read. 2344 */ 2345 if (dev_priv->perf.oa.pollin) 2346 events |= EPOLLIN; 2347 2348 return events; 2349 } 2350 2351 /** 2352 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 2353 * @file: An i915 perf stream file 2354 * @wait: poll() state table 2355 * 2356 * For handling userspace polling on an i915 perf stream, this ensures 2357 * poll_wait() gets called with a wait queue that will be woken for new stream 2358 * data. 2359 * 2360 * Note: Implementation deferred to i915_perf_poll_locked() 2361 * 2362 * Returns: any poll events that are ready without sleeping 2363 */ 2364 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 2365 { 2366 struct i915_perf_stream *stream = file->private_data; 2367 struct drm_i915_private *dev_priv = stream->dev_priv; 2368 __poll_t ret; 2369 2370 mutex_lock(&dev_priv->perf.lock); 2371 ret = i915_perf_poll_locked(dev_priv, stream, file, wait); 2372 mutex_unlock(&dev_priv->perf.lock); 2373 2374 return ret; 2375 } 2376 2377 /** 2378 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 2379 * @stream: A disabled i915 perf stream 2380 * 2381 * [Re]enables the associated capture of data for this stream. 2382 * 2383 * If a stream was previously enabled then there's currently no intention 2384 * to provide userspace any guarantee about the preservation of previously 2385 * buffered data. 2386 */ 2387 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 2388 { 2389 if (stream->enabled) 2390 return; 2391 2392 /* Allow stream->ops->enable() to refer to this */ 2393 stream->enabled = true; 2394 2395 if (stream->ops->enable) 2396 stream->ops->enable(stream); 2397 } 2398 2399 /** 2400 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 2401 * @stream: An enabled i915 perf stream 2402 * 2403 * Disables the associated capture of data for this stream. 2404 * 2405 * The intention is that disabling an re-enabling a stream will ideally be 2406 * cheaper than destroying and re-opening a stream with the same configuration, 2407 * though there are no formal guarantees about what state or buffered data 2408 * must be retained between disabling and re-enabling a stream. 2409 * 2410 * Note: while a stream is disabled it's considered an error for userspace 2411 * to attempt to read from the stream (-EIO). 2412 */ 2413 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 2414 { 2415 if (!stream->enabled) 2416 return; 2417 2418 /* Allow stream->ops->disable() to refer to this */ 2419 stream->enabled = false; 2420 2421 if (stream->ops->disable) 2422 stream->ops->disable(stream); 2423 } 2424 2425 /** 2426 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2427 * @stream: An i915 perf stream 2428 * @cmd: the ioctl request 2429 * @arg: the ioctl data 2430 * 2431 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2432 * with any non-file-operation driver hooks. 2433 * 2434 * Returns: zero on success or a negative error code. Returns -EINVAL for 2435 * an unknown ioctl request. 2436 */ 2437 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 2438 unsigned int cmd, 2439 unsigned long arg) 2440 { 2441 switch (cmd) { 2442 case I915_PERF_IOCTL_ENABLE: 2443 i915_perf_enable_locked(stream); 2444 return 0; 2445 case I915_PERF_IOCTL_DISABLE: 2446 i915_perf_disable_locked(stream); 2447 return 0; 2448 } 2449 2450 return -EINVAL; 2451 } 2452 2453 /** 2454 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2455 * @file: An i915 perf stream file 2456 * @cmd: the ioctl request 2457 * @arg: the ioctl data 2458 * 2459 * Implementation deferred to i915_perf_ioctl_locked(). 2460 * 2461 * Returns: zero on success or a negative error code. Returns -EINVAL for 2462 * an unknown ioctl request. 2463 */ 2464 static long i915_perf_ioctl(struct file *file, 2465 unsigned int cmd, 2466 unsigned long arg) 2467 { 2468 struct i915_perf_stream *stream = file->private_data; 2469 struct drm_i915_private *dev_priv = stream->dev_priv; 2470 long ret; 2471 2472 mutex_lock(&dev_priv->perf.lock); 2473 ret = i915_perf_ioctl_locked(stream, cmd, arg); 2474 mutex_unlock(&dev_priv->perf.lock); 2475 2476 return ret; 2477 } 2478 2479 /** 2480 * i915_perf_destroy_locked - destroy an i915 perf stream 2481 * @stream: An i915 perf stream 2482 * 2483 * Frees all resources associated with the given i915 perf @stream, disabling 2484 * any associated data capture in the process. 2485 * 2486 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2487 * with any non-file-operation driver hooks. 2488 */ 2489 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 2490 { 2491 if (stream->enabled) 2492 i915_perf_disable_locked(stream); 2493 2494 if (stream->ops->destroy) 2495 stream->ops->destroy(stream); 2496 2497 list_del(&stream->link); 2498 2499 if (stream->ctx) 2500 i915_gem_context_put(stream->ctx); 2501 2502 kfree(stream); 2503 } 2504 2505 /** 2506 * i915_perf_release - handles userspace close() of a stream file 2507 * @inode: anonymous inode associated with file 2508 * @file: An i915 perf stream file 2509 * 2510 * Cleans up any resources associated with an open i915 perf stream file. 2511 * 2512 * NB: close() can't really fail from the userspace point of view. 2513 * 2514 * Returns: zero on success or a negative error code. 2515 */ 2516 static int i915_perf_release(struct inode *inode, struct file *file) 2517 { 2518 struct i915_perf_stream *stream = file->private_data; 2519 struct drm_i915_private *dev_priv = stream->dev_priv; 2520 2521 mutex_lock(&dev_priv->perf.lock); 2522 i915_perf_destroy_locked(stream); 2523 mutex_unlock(&dev_priv->perf.lock); 2524 2525 /* Release the reference the perf stream kept on the driver. */ 2526 drm_dev_put(&dev_priv->drm); 2527 2528 return 0; 2529 } 2530 2531 2532 static const struct file_operations fops = { 2533 .owner = THIS_MODULE, 2534 .llseek = no_llseek, 2535 .release = i915_perf_release, 2536 .poll = i915_perf_poll, 2537 .read = i915_perf_read, 2538 .unlocked_ioctl = i915_perf_ioctl, 2539 /* Our ioctl have no arguments, so it's safe to use the same function 2540 * to handle 32bits compatibility. 2541 */ 2542 .compat_ioctl = i915_perf_ioctl, 2543 }; 2544 2545 2546 /** 2547 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 2548 * @dev_priv: i915 device instance 2549 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 2550 * @props: individually validated u64 property value pairs 2551 * @file: drm file 2552 * 2553 * See i915_perf_ioctl_open() for interface details. 2554 * 2555 * Implements further stream config validation and stream initialization on 2556 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex 2557 * taken to serialize with any non-file-operation driver hooks. 2558 * 2559 * Note: at this point the @props have only been validated in isolation and 2560 * it's still necessary to validate that the combination of properties makes 2561 * sense. 2562 * 2563 * In the case where userspace is interested in OA unit metrics then further 2564 * config validation and stream initialization details will be handled by 2565 * i915_oa_stream_init(). The code here should only validate config state that 2566 * will be relevant to all stream types / backends. 2567 * 2568 * Returns: zero on success or a negative error code. 2569 */ 2570 static int 2571 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, 2572 struct drm_i915_perf_open_param *param, 2573 struct perf_open_properties *props, 2574 struct drm_file *file) 2575 { 2576 struct i915_gem_context *specific_ctx = NULL; 2577 struct i915_perf_stream *stream = NULL; 2578 unsigned long f_flags = 0; 2579 bool privileged_op = true; 2580 int stream_fd; 2581 int ret; 2582 2583 if (props->single_context) { 2584 u32 ctx_handle = props->ctx_handle; 2585 struct drm_i915_file_private *file_priv = file->driver_priv; 2586 2587 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 2588 if (!specific_ctx) { 2589 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 2590 ctx_handle); 2591 ret = -ENOENT; 2592 goto err; 2593 } 2594 } 2595 2596 /* 2597 * On Haswell the OA unit supports clock gating off for a specific 2598 * context and in this mode there's no visibility of metrics for the 2599 * rest of the system, which we consider acceptable for a 2600 * non-privileged client. 2601 * 2602 * For Gen8+ the OA unit no longer supports clock gating off for a 2603 * specific context and the kernel can't securely stop the counters 2604 * from updating as system-wide / global values. Even though we can 2605 * filter reports based on the included context ID we can't block 2606 * clients from seeing the raw / global counter values via 2607 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 2608 * enable the OA unit by default. 2609 */ 2610 if (IS_HASWELL(dev_priv) && specific_ctx) 2611 privileged_op = false; 2612 2613 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 2614 * we check a dev.i915.perf_stream_paranoid sysctl option 2615 * to determine if it's ok to access system wide OA counters 2616 * without CAP_SYS_ADMIN privileges. 2617 */ 2618 if (privileged_op && 2619 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 2620 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); 2621 ret = -EACCES; 2622 goto err_ctx; 2623 } 2624 2625 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 2626 if (!stream) { 2627 ret = -ENOMEM; 2628 goto err_ctx; 2629 } 2630 2631 stream->dev_priv = dev_priv; 2632 stream->ctx = specific_ctx; 2633 2634 ret = i915_oa_stream_init(stream, param, props); 2635 if (ret) 2636 goto err_alloc; 2637 2638 /* we avoid simply assigning stream->sample_flags = props->sample_flags 2639 * to have _stream_init check the combination of sample flags more 2640 * thoroughly, but still this is the expected result at this point. 2641 */ 2642 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 2643 ret = -ENODEV; 2644 goto err_flags; 2645 } 2646 2647 list_add(&stream->link, &dev_priv->perf.streams); 2648 2649 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 2650 f_flags |= O_CLOEXEC; 2651 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 2652 f_flags |= O_NONBLOCK; 2653 2654 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 2655 if (stream_fd < 0) { 2656 ret = stream_fd; 2657 goto err_open; 2658 } 2659 2660 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 2661 i915_perf_enable_locked(stream); 2662 2663 /* Take a reference on the driver that will be kept with stream_fd 2664 * until its release. 2665 */ 2666 drm_dev_get(&dev_priv->drm); 2667 2668 return stream_fd; 2669 2670 err_open: 2671 list_del(&stream->link); 2672 err_flags: 2673 if (stream->ops->destroy) 2674 stream->ops->destroy(stream); 2675 err_alloc: 2676 kfree(stream); 2677 err_ctx: 2678 if (specific_ctx) 2679 i915_gem_context_put(specific_ctx); 2680 err: 2681 return ret; 2682 } 2683 2684 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) 2685 { 2686 return div64_u64(1000000000ULL * (2ULL << exponent), 2687 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz); 2688 } 2689 2690 /** 2691 * read_properties_unlocked - validate + copy userspace stream open properties 2692 * @dev_priv: i915 device instance 2693 * @uprops: The array of u64 key value pairs given by userspace 2694 * @n_props: The number of key value pairs expected in @uprops 2695 * @props: The stream configuration built up while validating properties 2696 * 2697 * Note this function only validates properties in isolation it doesn't 2698 * validate that the combination of properties makes sense or that all 2699 * properties necessary for a particular kind of stream have been set. 2700 * 2701 * Note that there currently aren't any ordering requirements for properties so 2702 * we shouldn't validate or assume anything about ordering here. This doesn't 2703 * rule out defining new properties with ordering requirements in the future. 2704 */ 2705 static int read_properties_unlocked(struct drm_i915_private *dev_priv, 2706 u64 __user *uprops, 2707 u32 n_props, 2708 struct perf_open_properties *props) 2709 { 2710 u64 __user *uprop = uprops; 2711 u32 i; 2712 2713 memset(props, 0, sizeof(struct perf_open_properties)); 2714 2715 if (!n_props) { 2716 DRM_DEBUG("No i915 perf properties given\n"); 2717 return -EINVAL; 2718 } 2719 2720 /* Considering that ID = 0 is reserved and assuming that we don't 2721 * (currently) expect any configurations to ever specify duplicate 2722 * values for a particular property ID then the last _PROP_MAX value is 2723 * one greater than the maximum number of properties we expect to get 2724 * from userspace. 2725 */ 2726 if (n_props >= DRM_I915_PERF_PROP_MAX) { 2727 DRM_DEBUG("More i915 perf properties specified than exist\n"); 2728 return -EINVAL; 2729 } 2730 2731 for (i = 0; i < n_props; i++) { 2732 u64 oa_period, oa_freq_hz; 2733 u64 id, value; 2734 int ret; 2735 2736 ret = get_user(id, uprop); 2737 if (ret) 2738 return ret; 2739 2740 ret = get_user(value, uprop + 1); 2741 if (ret) 2742 return ret; 2743 2744 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 2745 DRM_DEBUG("Unknown i915 perf property ID\n"); 2746 return -EINVAL; 2747 } 2748 2749 switch ((enum drm_i915_perf_property_id)id) { 2750 case DRM_I915_PERF_PROP_CTX_HANDLE: 2751 props->single_context = 1; 2752 props->ctx_handle = value; 2753 break; 2754 case DRM_I915_PERF_PROP_SAMPLE_OA: 2755 if (value) 2756 props->sample_flags |= SAMPLE_OA_REPORT; 2757 break; 2758 case DRM_I915_PERF_PROP_OA_METRICS_SET: 2759 if (value == 0) { 2760 DRM_DEBUG("Unknown OA metric set ID\n"); 2761 return -EINVAL; 2762 } 2763 props->metrics_set = value; 2764 break; 2765 case DRM_I915_PERF_PROP_OA_FORMAT: 2766 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 2767 DRM_DEBUG("Out-of-range OA report format %llu\n", 2768 value); 2769 return -EINVAL; 2770 } 2771 if (!dev_priv->perf.oa.oa_formats[value].size) { 2772 DRM_DEBUG("Unsupported OA report format %llu\n", 2773 value); 2774 return -EINVAL; 2775 } 2776 props->oa_format = value; 2777 break; 2778 case DRM_I915_PERF_PROP_OA_EXPONENT: 2779 if (value > OA_EXPONENT_MAX) { 2780 DRM_DEBUG("OA timer exponent too high (> %u)\n", 2781 OA_EXPONENT_MAX); 2782 return -EINVAL; 2783 } 2784 2785 /* Theoretically we can program the OA unit to sample 2786 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 2787 * for BXT. We don't allow such high sampling 2788 * frequencies by default unless root. 2789 */ 2790 2791 BUILD_BUG_ON(sizeof(oa_period) != 8); 2792 oa_period = oa_exponent_to_ns(dev_priv, value); 2793 2794 /* This check is primarily to ensure that oa_period <= 2795 * UINT32_MAX (before passing to do_div which only 2796 * accepts a u32 denominator), but we can also skip 2797 * checking anything < 1Hz which implicitly can't be 2798 * limited via an integer oa_max_sample_rate. 2799 */ 2800 if (oa_period <= NSEC_PER_SEC) { 2801 u64 tmp = NSEC_PER_SEC; 2802 do_div(tmp, oa_period); 2803 oa_freq_hz = tmp; 2804 } else 2805 oa_freq_hz = 0; 2806 2807 if (oa_freq_hz > i915_oa_max_sample_rate && 2808 !capable(CAP_SYS_ADMIN)) { 2809 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", 2810 i915_oa_max_sample_rate); 2811 return -EACCES; 2812 } 2813 2814 props->oa_periodic = true; 2815 props->oa_period_exponent = value; 2816 break; 2817 case DRM_I915_PERF_PROP_MAX: 2818 MISSING_CASE(id); 2819 return -EINVAL; 2820 } 2821 2822 uprop += 2; 2823 } 2824 2825 return 0; 2826 } 2827 2828 /** 2829 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 2830 * @dev: drm device 2831 * @data: ioctl data copied from userspace (unvalidated) 2832 * @file: drm file 2833 * 2834 * Validates the stream open parameters given by userspace including flags 2835 * and an array of u64 key, value pair properties. 2836 * 2837 * Very little is assumed up front about the nature of the stream being 2838 * opened (for instance we don't assume it's for periodic OA unit metrics). An 2839 * i915-perf stream is expected to be a suitable interface for other forms of 2840 * buffered data written by the GPU besides periodic OA metrics. 2841 * 2842 * Note we copy the properties from userspace outside of the i915 perf 2843 * mutex to avoid an awkward lockdep with mmap_sem. 2844 * 2845 * Most of the implementation details are handled by 2846 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock 2847 * mutex for serializing with any non-file-operation driver hooks. 2848 * 2849 * Return: A newly opened i915 Perf stream file descriptor or negative 2850 * error code on failure. 2851 */ 2852 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 2853 struct drm_file *file) 2854 { 2855 struct drm_i915_private *dev_priv = dev->dev_private; 2856 struct drm_i915_perf_open_param *param = data; 2857 struct perf_open_properties props; 2858 u32 known_open_flags; 2859 int ret; 2860 2861 if (!dev_priv->perf.initialized) { 2862 DRM_DEBUG("i915 perf interface not available for this system\n"); 2863 return -ENOTSUPP; 2864 } 2865 2866 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 2867 I915_PERF_FLAG_FD_NONBLOCK | 2868 I915_PERF_FLAG_DISABLED; 2869 if (param->flags & ~known_open_flags) { 2870 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 2871 return -EINVAL; 2872 } 2873 2874 ret = read_properties_unlocked(dev_priv, 2875 u64_to_user_ptr(param->properties_ptr), 2876 param->num_properties, 2877 &props); 2878 if (ret) 2879 return ret; 2880 2881 mutex_lock(&dev_priv->perf.lock); 2882 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); 2883 mutex_unlock(&dev_priv->perf.lock); 2884 2885 return ret; 2886 } 2887 2888 /** 2889 * i915_perf_register - exposes i915-perf to userspace 2890 * @dev_priv: i915 device instance 2891 * 2892 * In particular OA metric sets are advertised under a sysfs metrics/ 2893 * directory allowing userspace to enumerate valid IDs that can be 2894 * used to open an i915-perf stream. 2895 */ 2896 void i915_perf_register(struct drm_i915_private *dev_priv) 2897 { 2898 int ret; 2899 2900 if (!dev_priv->perf.initialized) 2901 return; 2902 2903 /* To be sure we're synchronized with an attempted 2904 * i915_perf_open_ioctl(); considering that we register after 2905 * being exposed to userspace. 2906 */ 2907 mutex_lock(&dev_priv->perf.lock); 2908 2909 dev_priv->perf.metrics_kobj = 2910 kobject_create_and_add("metrics", 2911 &dev_priv->drm.primary->kdev->kobj); 2912 if (!dev_priv->perf.metrics_kobj) 2913 goto exit; 2914 2915 sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr); 2916 2917 if (INTEL_GEN(dev_priv) >= 11) { 2918 i915_perf_load_test_config_icl(dev_priv); 2919 } else if (IS_CANNONLAKE(dev_priv)) { 2920 i915_perf_load_test_config_cnl(dev_priv); 2921 } else if (IS_COFFEELAKE(dev_priv)) { 2922 if (IS_CFL_GT2(dev_priv)) 2923 i915_perf_load_test_config_cflgt2(dev_priv); 2924 if (IS_CFL_GT3(dev_priv)) 2925 i915_perf_load_test_config_cflgt3(dev_priv); 2926 } else if (IS_GEMINILAKE(dev_priv)) { 2927 i915_perf_load_test_config_glk(dev_priv); 2928 } else if (IS_KABYLAKE(dev_priv)) { 2929 if (IS_KBL_GT2(dev_priv)) 2930 i915_perf_load_test_config_kblgt2(dev_priv); 2931 else if (IS_KBL_GT3(dev_priv)) 2932 i915_perf_load_test_config_kblgt3(dev_priv); 2933 } else if (IS_BROXTON(dev_priv)) { 2934 i915_perf_load_test_config_bxt(dev_priv); 2935 } else if (IS_SKYLAKE(dev_priv)) { 2936 if (IS_SKL_GT2(dev_priv)) 2937 i915_perf_load_test_config_sklgt2(dev_priv); 2938 else if (IS_SKL_GT3(dev_priv)) 2939 i915_perf_load_test_config_sklgt3(dev_priv); 2940 else if (IS_SKL_GT4(dev_priv)) 2941 i915_perf_load_test_config_sklgt4(dev_priv); 2942 } else if (IS_CHERRYVIEW(dev_priv)) { 2943 i915_perf_load_test_config_chv(dev_priv); 2944 } else if (IS_BROADWELL(dev_priv)) { 2945 i915_perf_load_test_config_bdw(dev_priv); 2946 } else if (IS_HASWELL(dev_priv)) { 2947 i915_perf_load_test_config_hsw(dev_priv); 2948 } 2949 2950 if (dev_priv->perf.oa.test_config.id == 0) 2951 goto sysfs_error; 2952 2953 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, 2954 &dev_priv->perf.oa.test_config.sysfs_metric); 2955 if (ret) 2956 goto sysfs_error; 2957 2958 atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1); 2959 2960 goto exit; 2961 2962 sysfs_error: 2963 kobject_put(dev_priv->perf.metrics_kobj); 2964 dev_priv->perf.metrics_kobj = NULL; 2965 2966 exit: 2967 mutex_unlock(&dev_priv->perf.lock); 2968 } 2969 2970 /** 2971 * i915_perf_unregister - hide i915-perf from userspace 2972 * @dev_priv: i915 device instance 2973 * 2974 * i915-perf state cleanup is split up into an 'unregister' and 2975 * 'deinit' phase where the interface is first hidden from 2976 * userspace by i915_perf_unregister() before cleaning up 2977 * remaining state in i915_perf_fini(). 2978 */ 2979 void i915_perf_unregister(struct drm_i915_private *dev_priv) 2980 { 2981 if (!dev_priv->perf.metrics_kobj) 2982 return; 2983 2984 sysfs_remove_group(dev_priv->perf.metrics_kobj, 2985 &dev_priv->perf.oa.test_config.sysfs_metric); 2986 2987 kobject_put(dev_priv->perf.metrics_kobj); 2988 dev_priv->perf.metrics_kobj = NULL; 2989 } 2990 2991 static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) 2992 { 2993 static const i915_reg_t flex_eu_regs[] = { 2994 EU_PERF_CNTL0, 2995 EU_PERF_CNTL1, 2996 EU_PERF_CNTL2, 2997 EU_PERF_CNTL3, 2998 EU_PERF_CNTL4, 2999 EU_PERF_CNTL5, 3000 EU_PERF_CNTL6, 3001 }; 3002 int i; 3003 3004 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3005 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3006 return true; 3007 } 3008 return false; 3009 } 3010 3011 static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr) 3012 { 3013 return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) && 3014 addr <= i915_mmio_reg_offset(OASTARTTRIG8)) || 3015 (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) && 3016 addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) || 3017 (addr >= i915_mmio_reg_offset(OACEC0_0) && 3018 addr <= i915_mmio_reg_offset(OACEC7_1)); 3019 } 3020 3021 static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3022 { 3023 return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) || 3024 (addr >= i915_mmio_reg_offset(MICRO_BP0_0) && 3025 addr <= i915_mmio_reg_offset(NOA_WRITE)) || 3026 (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) && 3027 addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) || 3028 (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) && 3029 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI)); 3030 } 3031 3032 static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3033 { 3034 return gen7_is_valid_mux_addr(dev_priv, addr) || 3035 addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) || 3036 (addr >= i915_mmio_reg_offset(RPM_CONFIG0) && 3037 addr <= i915_mmio_reg_offset(NOA_CONFIG(8))); 3038 } 3039 3040 static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3041 { 3042 return gen8_is_valid_mux_addr(dev_priv, addr) || 3043 addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) || 3044 (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) && 3045 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI)); 3046 } 3047 3048 static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3049 { 3050 return gen7_is_valid_mux_addr(dev_priv, addr) || 3051 (addr >= 0x25100 && addr <= 0x2FF90) || 3052 (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) && 3053 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) || 3054 addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0); 3055 } 3056 3057 static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3058 { 3059 return gen7_is_valid_mux_addr(dev_priv, addr) || 3060 (addr >= 0x182300 && addr <= 0x1823A4); 3061 } 3062 3063 static u32 mask_reg_value(u32 reg, u32 val) 3064 { 3065 /* HALF_SLICE_CHICKEN2 is programmed with a the 3066 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 3067 * programmed by userspace doesn't change this. 3068 */ 3069 if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg) 3070 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 3071 3072 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 3073 * indicated by its name and a bunch of selection fields used by OA 3074 * configs. 3075 */ 3076 if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg) 3077 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 3078 3079 return val; 3080 } 3081 3082 static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, 3083 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr), 3084 u32 __user *regs, 3085 u32 n_regs) 3086 { 3087 struct i915_oa_reg *oa_regs; 3088 int err; 3089 u32 i; 3090 3091 if (!n_regs) 3092 return NULL; 3093 3094 if (!access_ok(regs, n_regs * sizeof(u32) * 2)) 3095 return ERR_PTR(-EFAULT); 3096 3097 /* No is_valid function means we're not allowing any register to be programmed. */ 3098 GEM_BUG_ON(!is_valid); 3099 if (!is_valid) 3100 return ERR_PTR(-EINVAL); 3101 3102 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 3103 if (!oa_regs) 3104 return ERR_PTR(-ENOMEM); 3105 3106 for (i = 0; i < n_regs; i++) { 3107 u32 addr, value; 3108 3109 err = get_user(addr, regs); 3110 if (err) 3111 goto addr_err; 3112 3113 if (!is_valid(dev_priv, addr)) { 3114 DRM_DEBUG("Invalid oa_reg address: %X\n", addr); 3115 err = -EINVAL; 3116 goto addr_err; 3117 } 3118 3119 err = get_user(value, regs + 1); 3120 if (err) 3121 goto addr_err; 3122 3123 oa_regs[i].addr = _MMIO(addr); 3124 oa_regs[i].value = mask_reg_value(addr, value); 3125 3126 regs += 2; 3127 } 3128 3129 return oa_regs; 3130 3131 addr_err: 3132 kfree(oa_regs); 3133 return ERR_PTR(err); 3134 } 3135 3136 static ssize_t show_dynamic_id(struct device *dev, 3137 struct device_attribute *attr, 3138 char *buf) 3139 { 3140 struct i915_oa_config *oa_config = 3141 container_of(attr, typeof(*oa_config), sysfs_metric_id); 3142 3143 return sprintf(buf, "%d\n", oa_config->id); 3144 } 3145 3146 static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, 3147 struct i915_oa_config *oa_config) 3148 { 3149 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 3150 oa_config->sysfs_metric_id.attr.name = "id"; 3151 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 3152 oa_config->sysfs_metric_id.show = show_dynamic_id; 3153 oa_config->sysfs_metric_id.store = NULL; 3154 3155 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 3156 oa_config->attrs[1] = NULL; 3157 3158 oa_config->sysfs_metric.name = oa_config->uuid; 3159 oa_config->sysfs_metric.attrs = oa_config->attrs; 3160 3161 return sysfs_create_group(dev_priv->perf.metrics_kobj, 3162 &oa_config->sysfs_metric); 3163 } 3164 3165 /** 3166 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 3167 * @dev: drm device 3168 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 3169 * userspace (unvalidated) 3170 * @file: drm file 3171 * 3172 * Validates the submitted OA register to be saved into a new OA config that 3173 * can then be used for programming the OA unit and its NOA network. 3174 * 3175 * Returns: A new allocated config number to be used with the perf open ioctl 3176 * or a negative error code on failure. 3177 */ 3178 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 3179 struct drm_file *file) 3180 { 3181 struct drm_i915_private *dev_priv = dev->dev_private; 3182 struct drm_i915_perf_oa_config *args = data; 3183 struct i915_oa_config *oa_config, *tmp; 3184 int err, id; 3185 3186 if (!dev_priv->perf.initialized) { 3187 DRM_DEBUG("i915 perf interface not available for this system\n"); 3188 return -ENOTSUPP; 3189 } 3190 3191 if (!dev_priv->perf.metrics_kobj) { 3192 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 3193 return -EINVAL; 3194 } 3195 3196 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3197 DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); 3198 return -EACCES; 3199 } 3200 3201 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 3202 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 3203 (!args->flex_regs_ptr || !args->n_flex_regs)) { 3204 DRM_DEBUG("No OA registers given\n"); 3205 return -EINVAL; 3206 } 3207 3208 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 3209 if (!oa_config) { 3210 DRM_DEBUG("Failed to allocate memory for the OA config\n"); 3211 return -ENOMEM; 3212 } 3213 3214 atomic_set(&oa_config->ref_count, 1); 3215 3216 if (!uuid_is_valid(args->uuid)) { 3217 DRM_DEBUG("Invalid uuid format for OA config\n"); 3218 err = -EINVAL; 3219 goto reg_err; 3220 } 3221 3222 /* Last character in oa_config->uuid will be 0 because oa_config is 3223 * kzalloc. 3224 */ 3225 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 3226 3227 oa_config->mux_regs_len = args->n_mux_regs; 3228 oa_config->mux_regs = 3229 alloc_oa_regs(dev_priv, 3230 dev_priv->perf.oa.ops.is_valid_mux_reg, 3231 u64_to_user_ptr(args->mux_regs_ptr), 3232 args->n_mux_regs); 3233 3234 if (IS_ERR(oa_config->mux_regs)) { 3235 DRM_DEBUG("Failed to create OA config for mux_regs\n"); 3236 err = PTR_ERR(oa_config->mux_regs); 3237 goto reg_err; 3238 } 3239 3240 oa_config->b_counter_regs_len = args->n_boolean_regs; 3241 oa_config->b_counter_regs = 3242 alloc_oa_regs(dev_priv, 3243 dev_priv->perf.oa.ops.is_valid_b_counter_reg, 3244 u64_to_user_ptr(args->boolean_regs_ptr), 3245 args->n_boolean_regs); 3246 3247 if (IS_ERR(oa_config->b_counter_regs)) { 3248 DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); 3249 err = PTR_ERR(oa_config->b_counter_regs); 3250 goto reg_err; 3251 } 3252 3253 if (INTEL_GEN(dev_priv) < 8) { 3254 if (args->n_flex_regs != 0) { 3255 err = -EINVAL; 3256 goto reg_err; 3257 } 3258 } else { 3259 oa_config->flex_regs_len = args->n_flex_regs; 3260 oa_config->flex_regs = 3261 alloc_oa_regs(dev_priv, 3262 dev_priv->perf.oa.ops.is_valid_flex_reg, 3263 u64_to_user_ptr(args->flex_regs_ptr), 3264 args->n_flex_regs); 3265 3266 if (IS_ERR(oa_config->flex_regs)) { 3267 DRM_DEBUG("Failed to create OA config for flex_regs\n"); 3268 err = PTR_ERR(oa_config->flex_regs); 3269 goto reg_err; 3270 } 3271 } 3272 3273 err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 3274 if (err) 3275 goto reg_err; 3276 3277 /* We shouldn't have too many configs, so this iteration shouldn't be 3278 * too costly. 3279 */ 3280 idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) { 3281 if (!strcmp(tmp->uuid, oa_config->uuid)) { 3282 DRM_DEBUG("OA config already exists with this uuid\n"); 3283 err = -EADDRINUSE; 3284 goto sysfs_err; 3285 } 3286 } 3287 3288 err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config); 3289 if (err) { 3290 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 3291 goto sysfs_err; 3292 } 3293 3294 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 3295 oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr, 3296 oa_config, 2, 3297 0, GFP_KERNEL); 3298 if (oa_config->id < 0) { 3299 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 3300 err = oa_config->id; 3301 goto sysfs_err; 3302 } 3303 3304 mutex_unlock(&dev_priv->perf.metrics_lock); 3305 3306 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); 3307 3308 return oa_config->id; 3309 3310 sysfs_err: 3311 mutex_unlock(&dev_priv->perf.metrics_lock); 3312 reg_err: 3313 put_oa_config(dev_priv, oa_config); 3314 DRM_DEBUG("Failed to add new OA config\n"); 3315 return err; 3316 } 3317 3318 /** 3319 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 3320 * @dev: drm device 3321 * @data: ioctl data (pointer to u64 integer) copied from userspace 3322 * @file: drm file 3323 * 3324 * Configs can be removed while being used, the will stop appearing in sysfs 3325 * and their content will be freed when the stream using the config is closed. 3326 * 3327 * Returns: 0 on success or a negative error code on failure. 3328 */ 3329 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 3330 struct drm_file *file) 3331 { 3332 struct drm_i915_private *dev_priv = dev->dev_private; 3333 u64 *arg = data; 3334 struct i915_oa_config *oa_config; 3335 int ret; 3336 3337 if (!dev_priv->perf.initialized) { 3338 DRM_DEBUG("i915 perf interface not available for this system\n"); 3339 return -ENOTSUPP; 3340 } 3341 3342 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3343 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); 3344 return -EACCES; 3345 } 3346 3347 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 3348 if (ret) 3349 goto lock_err; 3350 3351 oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg); 3352 if (!oa_config) { 3353 DRM_DEBUG("Failed to remove unknown OA config\n"); 3354 ret = -ENOENT; 3355 goto config_err; 3356 } 3357 3358 GEM_BUG_ON(*arg != oa_config->id); 3359 3360 sysfs_remove_group(dev_priv->perf.metrics_kobj, 3361 &oa_config->sysfs_metric); 3362 3363 idr_remove(&dev_priv->perf.metrics_idr, *arg); 3364 3365 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 3366 3367 put_oa_config(dev_priv, oa_config); 3368 3369 config_err: 3370 mutex_unlock(&dev_priv->perf.metrics_lock); 3371 lock_err: 3372 return ret; 3373 } 3374 3375 static struct ctl_table oa_table[] = { 3376 { 3377 .procname = "perf_stream_paranoid", 3378 .data = &i915_perf_stream_paranoid, 3379 .maxlen = sizeof(i915_perf_stream_paranoid), 3380 .mode = 0644, 3381 .proc_handler = proc_dointvec_minmax, 3382 .extra1 = SYSCTL_ZERO, 3383 .extra2 = SYSCTL_ONE, 3384 }, 3385 { 3386 .procname = "oa_max_sample_rate", 3387 .data = &i915_oa_max_sample_rate, 3388 .maxlen = sizeof(i915_oa_max_sample_rate), 3389 .mode = 0644, 3390 .proc_handler = proc_dointvec_minmax, 3391 .extra1 = SYSCTL_ZERO, 3392 .extra2 = &oa_sample_rate_hard_limit, 3393 }, 3394 {} 3395 }; 3396 3397 static struct ctl_table i915_root[] = { 3398 { 3399 .procname = "i915", 3400 .maxlen = 0, 3401 .mode = 0555, 3402 .child = oa_table, 3403 }, 3404 {} 3405 }; 3406 3407 static struct ctl_table dev_root[] = { 3408 { 3409 .procname = "dev", 3410 .maxlen = 0, 3411 .mode = 0555, 3412 .child = i915_root, 3413 }, 3414 {} 3415 }; 3416 3417 /** 3418 * i915_perf_init - initialize i915-perf state on module load 3419 * @dev_priv: i915 device instance 3420 * 3421 * Initializes i915-perf state without exposing anything to userspace. 3422 * 3423 * Note: i915-perf initialization is split into an 'init' and 'register' 3424 * phase with the i915_perf_register() exposing state to userspace. 3425 */ 3426 void i915_perf_init(struct drm_i915_private *dev_priv) 3427 { 3428 if (IS_HASWELL(dev_priv)) { 3429 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3430 gen7_is_valid_b_counter_addr; 3431 dev_priv->perf.oa.ops.is_valid_mux_reg = 3432 hsw_is_valid_mux_addr; 3433 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; 3434 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; 3435 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; 3436 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; 3437 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; 3438 dev_priv->perf.oa.ops.read = gen7_oa_read; 3439 dev_priv->perf.oa.ops.oa_hw_tail_read = 3440 gen7_oa_hw_tail_read; 3441 3442 dev_priv->perf.oa.oa_formats = hsw_oa_formats; 3443 } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 3444 /* Note: that although we could theoretically also support the 3445 * legacy ringbuffer mode on BDW (and earlier iterations of 3446 * this driver, before upstreaming did this) it didn't seem 3447 * worth the complexity to maintain now that BDW+ enable 3448 * execlist mode by default. 3449 */ 3450 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; 3451 3452 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; 3453 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; 3454 dev_priv->perf.oa.ops.read = gen8_oa_read; 3455 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 3456 3457 if (IS_GEN_RANGE(dev_priv, 8, 9)) { 3458 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3459 gen7_is_valid_b_counter_addr; 3460 dev_priv->perf.oa.ops.is_valid_mux_reg = 3461 gen8_is_valid_mux_addr; 3462 dev_priv->perf.oa.ops.is_valid_flex_reg = 3463 gen8_is_valid_flex_addr; 3464 3465 if (IS_CHERRYVIEW(dev_priv)) { 3466 dev_priv->perf.oa.ops.is_valid_mux_reg = 3467 chv_is_valid_mux_addr; 3468 } 3469 3470 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3471 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set; 3472 3473 if (IS_GEN(dev_priv, 8)) { 3474 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; 3475 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; 3476 3477 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25); 3478 } else { 3479 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3480 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3481 3482 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3483 } 3484 } else if (IS_GEN_RANGE(dev_priv, 10, 11)) { 3485 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3486 gen7_is_valid_b_counter_addr; 3487 dev_priv->perf.oa.ops.is_valid_mux_reg = 3488 gen10_is_valid_mux_addr; 3489 dev_priv->perf.oa.ops.is_valid_flex_reg = 3490 gen8_is_valid_flex_addr; 3491 3492 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3493 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; 3494 3495 if (IS_GEN(dev_priv, 10)) { 3496 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3497 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3498 } else { 3499 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124; 3500 dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e; 3501 } 3502 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3503 } 3504 } 3505 3506 if (dev_priv->perf.oa.ops.enable_metric_set) { 3507 hrtimer_init(&dev_priv->perf.oa.poll_check_timer, 3508 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3509 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; 3510 init_waitqueue_head(&dev_priv->perf.oa.poll_wq); 3511 3512 INIT_LIST_HEAD(&dev_priv->perf.streams); 3513 mutex_init(&dev_priv->perf.lock); 3514 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); 3515 3516 oa_sample_rate_hard_limit = 1000 * 3517 (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); 3518 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); 3519 3520 mutex_init(&dev_priv->perf.metrics_lock); 3521 idr_init(&dev_priv->perf.metrics_idr); 3522 3523 dev_priv->perf.initialized = true; 3524 } 3525 } 3526 3527 static int destroy_config(int id, void *p, void *data) 3528 { 3529 struct drm_i915_private *dev_priv = data; 3530 struct i915_oa_config *oa_config = p; 3531 3532 put_oa_config(dev_priv, oa_config); 3533 3534 return 0; 3535 } 3536 3537 /** 3538 * i915_perf_fini - Counter part to i915_perf_init() 3539 * @dev_priv: i915 device instance 3540 */ 3541 void i915_perf_fini(struct drm_i915_private *dev_priv) 3542 { 3543 if (!dev_priv->perf.initialized) 3544 return; 3545 3546 idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv); 3547 idr_destroy(&dev_priv->perf.metrics_idr); 3548 3549 unregister_sysctl_table(dev_priv->perf.sysctl_header); 3550 3551 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); 3552 3553 dev_priv->perf.initialized = false; 3554 } 3555