1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 197 #include "i915_drv.h" 198 #include "i915_oa_hsw.h" 199 #include "i915_oa_bdw.h" 200 #include "i915_oa_chv.h" 201 #include "i915_oa_sklgt2.h" 202 #include "i915_oa_sklgt3.h" 203 #include "i915_oa_sklgt4.h" 204 #include "i915_oa_bxt.h" 205 #include "i915_oa_kblgt2.h" 206 #include "i915_oa_kblgt3.h" 207 #include "i915_oa_glk.h" 208 209 /* HW requires this to be a power of two, between 128k and 16M, though driver 210 * is currently generally designed assuming the largest 16M size is used such 211 * that the overflow cases are unlikely in normal operation. 212 */ 213 #define OA_BUFFER_SIZE SZ_16M 214 215 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 216 217 /** 218 * DOC: OA Tail Pointer Race 219 * 220 * There's a HW race condition between OA unit tail pointer register updates and 221 * writes to memory whereby the tail pointer can sometimes get ahead of what's 222 * been written out to the OA buffer so far (in terms of what's visible to the 223 * CPU). 224 * 225 * Although this can be observed explicitly while copying reports to userspace 226 * by checking for a zeroed report-id field in tail reports, we want to account 227 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant 228 * read() attempts. 229 * 230 * In effect we define a tail pointer for reading that lags the real tail 231 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough 232 * time for the corresponding reports to become visible to the CPU. 233 * 234 * To manage this we actually track two tail pointers: 235 * 1) An 'aging' tail with an associated timestamp that is tracked until we 236 * can trust the corresponding data is visible to the CPU; at which point 237 * it is considered 'aged'. 238 * 2) An 'aged' tail that can be used for read()ing. 239 * 240 * The two separate pointers let us decouple read()s from tail pointer aging. 241 * 242 * The tail pointers are checked and updated at a limited rate within a hrtimer 243 * callback (the same callback that is used for delivering POLLIN events) 244 * 245 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 246 * indicates that an updated tail pointer is needed. 247 * 248 * Most of the implementation details for this workaround are in 249 * oa_buffer_check_unlocked() and _append_oa_reports() 250 * 251 * Note for posterity: previously the driver used to define an effective tail 252 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 253 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 254 * This was flawed considering that the OA unit may also automatically generate 255 * non-periodic reports (such as on context switch) or the OA unit may be 256 * enabled without any periodic sampling. 257 */ 258 #define OA_TAIL_MARGIN_NSEC 100000ULL 259 #define INVALID_TAIL_PTR 0xffffffff 260 261 /* frequency for checking whether the OA unit has written new reports to the 262 * circular OA buffer... 263 */ 264 #define POLL_FREQUENCY 200 265 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) 266 267 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 268 static int zero; 269 static int one = 1; 270 static u32 i915_perf_stream_paranoid = true; 271 272 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 273 * of the 64bit timestamp bits to trigger reports from) but there's currently 274 * no known use case for sampling as infrequently as once per 47 thousand years. 275 * 276 * Since the timestamps included in OA reports are only 32bits it seems 277 * reasonable to limit the OA exponent where it's still possible to account for 278 * overflow in OA report timestamps. 279 */ 280 #define OA_EXPONENT_MAX 31 281 282 #define INVALID_CTX_ID 0xffffffff 283 284 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 285 #define OAREPORT_REASON_MASK 0x3f 286 #define OAREPORT_REASON_SHIFT 19 287 #define OAREPORT_REASON_TIMER (1<<0) 288 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 289 #define OAREPORT_REASON_CLK_RATIO (1<<5) 290 291 292 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 293 * 294 * The highest sampling frequency we can theoretically program the OA unit 295 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 296 * 297 * Initialized just before we register the sysctl parameter. 298 */ 299 static int oa_sample_rate_hard_limit; 300 301 /* Theoretically we can program the OA unit to sample every 160ns but don't 302 * allow that by default unless root... 303 * 304 * The default threshold of 100000Hz is based on perf's similar 305 * kernel.perf_event_max_sample_rate sysctl parameter. 306 */ 307 static u32 i915_oa_max_sample_rate = 100000; 308 309 /* XXX: beware if future OA HW adds new report formats that the current 310 * code assumes all reports have a power-of-two size and ~(size - 1) can 311 * be used as a mask to align the OA tail pointer. 312 */ 313 static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 314 [I915_OA_FORMAT_A13] = { 0, 64 }, 315 [I915_OA_FORMAT_A29] = { 1, 128 }, 316 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 317 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 318 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 319 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 320 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 321 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 322 }; 323 324 static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 325 [I915_OA_FORMAT_A12] = { 0, 64 }, 326 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 327 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 328 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 329 }; 330 331 #define SAMPLE_OA_REPORT (1<<0) 332 333 /** 334 * struct perf_open_properties - for validated properties given to open a stream 335 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 336 * @single_context: Whether a single or all gpu contexts should be monitored 337 * @ctx_handle: A gem ctx handle for use with @single_context 338 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 339 * @oa_format: An OA unit HW report format 340 * @oa_periodic: Whether to enable periodic OA unit sampling 341 * @oa_period_exponent: The OA unit sampling period is derived from this 342 * 343 * As read_properties_unlocked() enumerates and validates the properties given 344 * to open a stream of metrics the configuration is built up in the structure 345 * which starts out zero initialized. 346 */ 347 struct perf_open_properties { 348 u32 sample_flags; 349 350 u64 single_context:1; 351 u64 ctx_handle; 352 353 /* OA sampling state */ 354 int metrics_set; 355 int oa_format; 356 bool oa_periodic; 357 int oa_period_exponent; 358 }; 359 360 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv) 361 { 362 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 363 } 364 365 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv) 366 { 367 u32 oastatus1 = I915_READ(GEN7_OASTATUS1); 368 369 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 370 } 371 372 /** 373 * oa_buffer_check_unlocked - check for data and update tail ptr state 374 * @dev_priv: i915 device instance 375 * 376 * This is either called via fops (for blocking reads in user ctx) or the poll 377 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 378 * if there is data available for userspace to read. 379 * 380 * This function is central to providing a workaround for the OA unit tail 381 * pointer having a race with respect to what data is visible to the CPU. 382 * It is responsible for reading tail pointers from the hardware and giving 383 * the pointers time to 'age' before they are made available for reading. 384 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 385 * 386 * Besides returning true when there is data available to read() this function 387 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp 388 * and .aged_tail_idx state used for reading. 389 * 390 * Note: It's safe to read OA config state here unlocked, assuming that this is 391 * only called while the stream is enabled, while the global OA configuration 392 * can't be modified. 393 * 394 * Returns: %true if the OA buffer contains data, else %false 395 */ 396 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv) 397 { 398 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 399 unsigned long flags; 400 unsigned int aged_idx; 401 u32 head, hw_tail, aged_tail, aging_tail; 402 u64 now; 403 404 /* We have to consider the (unlikely) possibility that read() errors 405 * could result in an OA buffer reset which might reset the head, 406 * tails[] and aged_tail state. 407 */ 408 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 409 410 /* NB: The head we observe here might effectively be a little out of 411 * date (between head and tails[aged_idx].offset if there is currently 412 * a read() in progress. 413 */ 414 head = dev_priv->perf.oa.oa_buffer.head; 415 416 aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 417 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset; 418 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset; 419 420 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv); 421 422 /* The tail pointer increases in 64 byte increments, 423 * not in report_size steps... 424 */ 425 hw_tail &= ~(report_size - 1); 426 427 now = ktime_get_mono_fast_ns(); 428 429 /* Update the aged tail 430 * 431 * Flip the tail pointer available for read()s once the aging tail is 432 * old enough to trust that the corresponding data will be visible to 433 * the CPU... 434 * 435 * Do this before updating the aging pointer in case we may be able to 436 * immediately start aging a new pointer too (if new data has become 437 * available) without needing to wait for a later hrtimer callback. 438 */ 439 if (aging_tail != INVALID_TAIL_PTR && 440 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) > 441 OA_TAIL_MARGIN_NSEC)) { 442 443 aged_idx ^= 1; 444 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx; 445 446 aged_tail = aging_tail; 447 448 /* Mark that we need a new pointer to start aging... */ 449 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; 450 aging_tail = INVALID_TAIL_PTR; 451 } 452 453 /* Update the aging tail 454 * 455 * We throttle aging tail updates until we have a new tail that 456 * represents >= one report more data than is already available for 457 * reading. This ensures there will be enough data for a successful 458 * read once this new pointer has aged and ensures we will give the new 459 * pointer time to age. 460 */ 461 if (aging_tail == INVALID_TAIL_PTR && 462 (aged_tail == INVALID_TAIL_PTR || 463 OA_TAKEN(hw_tail, aged_tail) >= report_size)) { 464 struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma; 465 u32 gtt_offset = i915_ggtt_offset(vma); 466 467 /* Be paranoid and do a bounds check on the pointer read back 468 * from hardware, just in case some spurious hardware condition 469 * could put the tail out of bounds... 470 */ 471 if (hw_tail >= gtt_offset && 472 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { 473 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = 474 aging_tail = hw_tail; 475 dev_priv->perf.oa.oa_buffer.aging_timestamp = now; 476 } else { 477 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", 478 hw_tail); 479 } 480 } 481 482 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 483 484 return aged_tail == INVALID_TAIL_PTR ? 485 false : OA_TAKEN(aged_tail, head) >= report_size; 486 } 487 488 /** 489 * append_oa_status - Appends a status record to a userspace read() buffer. 490 * @stream: An i915-perf stream opened for OA metrics 491 * @buf: destination buffer given by userspace 492 * @count: the number of bytes userspace wants to read 493 * @offset: (inout): the current position for writing into @buf 494 * @type: The kind of status to report to userspace 495 * 496 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 497 * into the userspace read() buffer. 498 * 499 * The @buf @offset will only be updated on success. 500 * 501 * Returns: 0 on success, negative error code on failure. 502 */ 503 static int append_oa_status(struct i915_perf_stream *stream, 504 char __user *buf, 505 size_t count, 506 size_t *offset, 507 enum drm_i915_perf_record_type type) 508 { 509 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 510 511 if ((count - *offset) < header.size) 512 return -ENOSPC; 513 514 if (copy_to_user(buf + *offset, &header, sizeof(header))) 515 return -EFAULT; 516 517 (*offset) += header.size; 518 519 return 0; 520 } 521 522 /** 523 * append_oa_sample - Copies single OA report into userspace read() buffer. 524 * @stream: An i915-perf stream opened for OA metrics 525 * @buf: destination buffer given by userspace 526 * @count: the number of bytes userspace wants to read 527 * @offset: (inout): the current position for writing into @buf 528 * @report: A single OA report to (optionally) include as part of the sample 529 * 530 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 531 * properties when opening a stream, tracked as `stream->sample_flags`. This 532 * function copies the requested components of a single sample to the given 533 * read() @buf. 534 * 535 * The @buf @offset will only be updated on success. 536 * 537 * Returns: 0 on success, negative error code on failure. 538 */ 539 static int append_oa_sample(struct i915_perf_stream *stream, 540 char __user *buf, 541 size_t count, 542 size_t *offset, 543 const u8 *report) 544 { 545 struct drm_i915_private *dev_priv = stream->dev_priv; 546 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 547 struct drm_i915_perf_record_header header; 548 u32 sample_flags = stream->sample_flags; 549 550 header.type = DRM_I915_PERF_RECORD_SAMPLE; 551 header.pad = 0; 552 header.size = stream->sample_size; 553 554 if ((count - *offset) < header.size) 555 return -ENOSPC; 556 557 buf += *offset; 558 if (copy_to_user(buf, &header, sizeof(header))) 559 return -EFAULT; 560 buf += sizeof(header); 561 562 if (sample_flags & SAMPLE_OA_REPORT) { 563 if (copy_to_user(buf, report, report_size)) 564 return -EFAULT; 565 } 566 567 (*offset) += header.size; 568 569 return 0; 570 } 571 572 /** 573 * Copies all buffered OA reports into userspace read() buffer. 574 * @stream: An i915-perf stream opened for OA metrics 575 * @buf: destination buffer given by userspace 576 * @count: the number of bytes userspace wants to read 577 * @offset: (inout): the current position for writing into @buf 578 * 579 * Notably any error condition resulting in a short read (-%ENOSPC or 580 * -%EFAULT) will be returned even though one or more records may 581 * have been successfully copied. In this case it's up to the caller 582 * to decide if the error should be squashed before returning to 583 * userspace. 584 * 585 * Note: reports are consumed from the head, and appended to the 586 * tail, so the tail chases the head?... If you think that's mad 587 * and back-to-front you're not alone, but this follows the 588 * Gen PRM naming convention. 589 * 590 * Returns: 0 on success, negative error code on failure. 591 */ 592 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 593 char __user *buf, 594 size_t count, 595 size_t *offset) 596 { 597 struct drm_i915_private *dev_priv = stream->dev_priv; 598 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 599 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 600 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 601 u32 mask = (OA_BUFFER_SIZE - 1); 602 size_t start_offset = *offset; 603 unsigned long flags; 604 unsigned int aged_tail_idx; 605 u32 head, tail; 606 u32 taken; 607 int ret = 0; 608 609 if (WARN_ON(!stream->enabled)) 610 return -EIO; 611 612 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 613 614 head = dev_priv->perf.oa.oa_buffer.head; 615 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 616 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 617 618 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 619 620 /* 621 * An invalid tail pointer here means we're still waiting for the poll 622 * hrtimer callback to give us a pointer 623 */ 624 if (tail == INVALID_TAIL_PTR) 625 return -EAGAIN; 626 627 /* 628 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 629 * while indexing relative to oa_buf_base. 630 */ 631 head -= gtt_offset; 632 tail -= gtt_offset; 633 634 /* 635 * An out of bounds or misaligned head or tail pointer implies a driver 636 * bug since we validate + align the tail pointers we read from the 637 * hardware and we are in full control of the head pointer which should 638 * only be incremented by multiples of the report size (notably also 639 * all a power of two). 640 */ 641 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 642 tail > OA_BUFFER_SIZE || tail % report_size, 643 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 644 head, tail)) 645 return -EIO; 646 647 648 for (/* none */; 649 (taken = OA_TAKEN(tail, head)); 650 head = (head + report_size) & mask) { 651 u8 *report = oa_buf_base + head; 652 u32 *report32 = (void *)report; 653 u32 ctx_id; 654 u32 reason; 655 656 /* 657 * All the report sizes factor neatly into the buffer 658 * size so we never expect to see a report split 659 * between the beginning and end of the buffer. 660 * 661 * Given the initial alignment check a misalignment 662 * here would imply a driver bug that would result 663 * in an overrun. 664 */ 665 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 666 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 667 break; 668 } 669 670 /* 671 * The reason field includes flags identifying what 672 * triggered this specific report (mostly timer 673 * triggered or e.g. due to a context switch). 674 * 675 * This field is never expected to be zero so we can 676 * check that the report isn't invalid before copying 677 * it to userspace... 678 */ 679 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 680 OAREPORT_REASON_MASK); 681 if (reason == 0) { 682 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 683 DRM_NOTE("Skipping spurious, invalid OA report\n"); 684 continue; 685 } 686 687 /* 688 * XXX: Just keep the lower 21 bits for now since I'm not 689 * entirely sure if the HW touches any of the higher bits in 690 * this field 691 */ 692 ctx_id = report32[2] & 0x1fffff; 693 694 /* 695 * Squash whatever is in the CTX_ID field if it's marked as 696 * invalid to be sure we avoid false-positive, single-context 697 * filtering below... 698 * 699 * Note: that we don't clear the valid_ctx_bit so userspace can 700 * understand that the ID has been squashed by the kernel. 701 */ 702 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit)) 703 ctx_id = report32[2] = INVALID_CTX_ID; 704 705 /* 706 * NB: For Gen 8 the OA unit no longer supports clock gating 707 * off for a specific context and the kernel can't securely 708 * stop the counters from updating as system-wide / global 709 * values. 710 * 711 * Automatic reports now include a context ID so reports can be 712 * filtered on the cpu but it's not worth trying to 713 * automatically subtract/hide counter progress for other 714 * contexts while filtering since we can't stop userspace 715 * issuing MI_REPORT_PERF_COUNT commands which would still 716 * provide a side-band view of the real values. 717 * 718 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 719 * to normalize counters for a single filtered context then it 720 * needs be forwarded bookend context-switch reports so that it 721 * can track switches in between MI_REPORT_PERF_COUNT commands 722 * and can itself subtract/ignore the progress of counters 723 * associated with other contexts. Note that the hardware 724 * automatically triggers reports when switching to a new 725 * context which are tagged with the ID of the newly active 726 * context. To avoid the complexity (and likely fragility) of 727 * reading ahead while parsing reports to try and minimize 728 * forwarding redundant context switch reports (i.e. between 729 * other, unrelated contexts) we simply elect to forward them 730 * all. 731 * 732 * We don't rely solely on the reason field to identify context 733 * switches since it's not-uncommon for periodic samples to 734 * identify a switch before any 'context switch' report. 735 */ 736 if (!dev_priv->perf.oa.exclusive_stream->ctx || 737 dev_priv->perf.oa.specific_ctx_id == ctx_id || 738 (dev_priv->perf.oa.oa_buffer.last_ctx_id == 739 dev_priv->perf.oa.specific_ctx_id) || 740 reason & OAREPORT_REASON_CTX_SWITCH) { 741 742 /* 743 * While filtering for a single context we avoid 744 * leaking the IDs of other contexts. 745 */ 746 if (dev_priv->perf.oa.exclusive_stream->ctx && 747 dev_priv->perf.oa.specific_ctx_id != ctx_id) { 748 report32[2] = INVALID_CTX_ID; 749 } 750 751 ret = append_oa_sample(stream, buf, count, offset, 752 report); 753 if (ret) 754 break; 755 756 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id; 757 } 758 759 /* 760 * The above reason field sanity check is based on 761 * the assumption that the OA buffer is initially 762 * zeroed and we reset the field after copying so the 763 * check is still meaningful once old reports start 764 * being overwritten. 765 */ 766 report32[0] = 0; 767 } 768 769 if (start_offset != *offset) { 770 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 771 772 /* 773 * We removed the gtt_offset for the copy loop above, indexing 774 * relative to oa_buf_base so put back here... 775 */ 776 head += gtt_offset; 777 778 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); 779 dev_priv->perf.oa.oa_buffer.head = head; 780 781 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 782 } 783 784 return ret; 785 } 786 787 /** 788 * gen8_oa_read - copy status records then buffered OA reports 789 * @stream: An i915-perf stream opened for OA metrics 790 * @buf: destination buffer given by userspace 791 * @count: the number of bytes userspace wants to read 792 * @offset: (inout): the current position for writing into @buf 793 * 794 * Checks OA unit status registers and if necessary appends corresponding 795 * status records for userspace (such as for a buffer full condition) and then 796 * initiate appending any buffered OA reports. 797 * 798 * Updates @offset according to the number of bytes successfully copied into 799 * the userspace buffer. 800 * 801 * NB: some data may be successfully copied to the userspace buffer 802 * even if an error is returned, and this is reflected in the 803 * updated @offset. 804 * 805 * Returns: zero on success or a negative error code 806 */ 807 static int gen8_oa_read(struct i915_perf_stream *stream, 808 char __user *buf, 809 size_t count, 810 size_t *offset) 811 { 812 struct drm_i915_private *dev_priv = stream->dev_priv; 813 u32 oastatus; 814 int ret; 815 816 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 817 return -EIO; 818 819 oastatus = I915_READ(GEN8_OASTATUS); 820 821 /* 822 * We treat OABUFFER_OVERFLOW as a significant error: 823 * 824 * Although theoretically we could handle this more gracefully 825 * sometimes, some Gens don't correctly suppress certain 826 * automatically triggered reports in this condition and so we 827 * have to assume that old reports are now being trampled 828 * over. 829 * 830 * Considering how we don't currently give userspace control 831 * over the OA buffer size and always configure a large 16MB 832 * buffer, then a buffer overflow does anyway likely indicate 833 * that something has gone quite badly wrong. 834 */ 835 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 836 ret = append_oa_status(stream, buf, count, offset, 837 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 838 if (ret) 839 return ret; 840 841 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 842 dev_priv->perf.oa.period_exponent); 843 844 dev_priv->perf.oa.ops.oa_disable(dev_priv); 845 dev_priv->perf.oa.ops.oa_enable(dev_priv); 846 847 /* 848 * Note: .oa_enable() is expected to re-init the oabuffer and 849 * reset GEN8_OASTATUS for us 850 */ 851 oastatus = I915_READ(GEN8_OASTATUS); 852 } 853 854 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 855 ret = append_oa_status(stream, buf, count, offset, 856 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 857 if (ret) 858 return ret; 859 I915_WRITE(GEN8_OASTATUS, 860 oastatus & ~GEN8_OASTATUS_REPORT_LOST); 861 } 862 863 return gen8_append_oa_reports(stream, buf, count, offset); 864 } 865 866 /** 867 * Copies all buffered OA reports into userspace read() buffer. 868 * @stream: An i915-perf stream opened for OA metrics 869 * @buf: destination buffer given by userspace 870 * @count: the number of bytes userspace wants to read 871 * @offset: (inout): the current position for writing into @buf 872 * 873 * Notably any error condition resulting in a short read (-%ENOSPC or 874 * -%EFAULT) will be returned even though one or more records may 875 * have been successfully copied. In this case it's up to the caller 876 * to decide if the error should be squashed before returning to 877 * userspace. 878 * 879 * Note: reports are consumed from the head, and appended to the 880 * tail, so the tail chases the head?... If you think that's mad 881 * and back-to-front you're not alone, but this follows the 882 * Gen PRM naming convention. 883 * 884 * Returns: 0 on success, negative error code on failure. 885 */ 886 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 887 char __user *buf, 888 size_t count, 889 size_t *offset) 890 { 891 struct drm_i915_private *dev_priv = stream->dev_priv; 892 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 893 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 894 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 895 u32 mask = (OA_BUFFER_SIZE - 1); 896 size_t start_offset = *offset; 897 unsigned long flags; 898 unsigned int aged_tail_idx; 899 u32 head, tail; 900 u32 taken; 901 int ret = 0; 902 903 if (WARN_ON(!stream->enabled)) 904 return -EIO; 905 906 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 907 908 head = dev_priv->perf.oa.oa_buffer.head; 909 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 910 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 911 912 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 913 914 /* An invalid tail pointer here means we're still waiting for the poll 915 * hrtimer callback to give us a pointer 916 */ 917 if (tail == INVALID_TAIL_PTR) 918 return -EAGAIN; 919 920 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 921 * while indexing relative to oa_buf_base. 922 */ 923 head -= gtt_offset; 924 tail -= gtt_offset; 925 926 /* An out of bounds or misaligned head or tail pointer implies a driver 927 * bug since we validate + align the tail pointers we read from the 928 * hardware and we are in full control of the head pointer which should 929 * only be incremented by multiples of the report size (notably also 930 * all a power of two). 931 */ 932 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 933 tail > OA_BUFFER_SIZE || tail % report_size, 934 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 935 head, tail)) 936 return -EIO; 937 938 939 for (/* none */; 940 (taken = OA_TAKEN(tail, head)); 941 head = (head + report_size) & mask) { 942 u8 *report = oa_buf_base + head; 943 u32 *report32 = (void *)report; 944 945 /* All the report sizes factor neatly into the buffer 946 * size so we never expect to see a report split 947 * between the beginning and end of the buffer. 948 * 949 * Given the initial alignment check a misalignment 950 * here would imply a driver bug that would result 951 * in an overrun. 952 */ 953 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 954 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 955 break; 956 } 957 958 /* The report-ID field for periodic samples includes 959 * some undocumented flags related to what triggered 960 * the report and is never expected to be zero so we 961 * can check that the report isn't invalid before 962 * copying it to userspace... 963 */ 964 if (report32[0] == 0) { 965 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 966 DRM_NOTE("Skipping spurious, invalid OA report\n"); 967 continue; 968 } 969 970 ret = append_oa_sample(stream, buf, count, offset, report); 971 if (ret) 972 break; 973 974 /* The above report-id field sanity check is based on 975 * the assumption that the OA buffer is initially 976 * zeroed and we reset the field after copying so the 977 * check is still meaningful once old reports start 978 * being overwritten. 979 */ 980 report32[0] = 0; 981 } 982 983 if (start_offset != *offset) { 984 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 985 986 /* We removed the gtt_offset for the copy loop above, indexing 987 * relative to oa_buf_base so put back here... 988 */ 989 head += gtt_offset; 990 991 I915_WRITE(GEN7_OASTATUS2, 992 ((head & GEN7_OASTATUS2_HEAD_MASK) | 993 OA_MEM_SELECT_GGTT)); 994 dev_priv->perf.oa.oa_buffer.head = head; 995 996 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 997 } 998 999 return ret; 1000 } 1001 1002 /** 1003 * gen7_oa_read - copy status records then buffered OA reports 1004 * @stream: An i915-perf stream opened for OA metrics 1005 * @buf: destination buffer given by userspace 1006 * @count: the number of bytes userspace wants to read 1007 * @offset: (inout): the current position for writing into @buf 1008 * 1009 * Checks Gen 7 specific OA unit status registers and if necessary appends 1010 * corresponding status records for userspace (such as for a buffer full 1011 * condition) and then initiate appending any buffered OA reports. 1012 * 1013 * Updates @offset according to the number of bytes successfully copied into 1014 * the userspace buffer. 1015 * 1016 * Returns: zero on success or a negative error code 1017 */ 1018 static int gen7_oa_read(struct i915_perf_stream *stream, 1019 char __user *buf, 1020 size_t count, 1021 size_t *offset) 1022 { 1023 struct drm_i915_private *dev_priv = stream->dev_priv; 1024 u32 oastatus1; 1025 int ret; 1026 1027 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 1028 return -EIO; 1029 1030 oastatus1 = I915_READ(GEN7_OASTATUS1); 1031 1032 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1033 * bits while the OA unit is enabled (while the tail pointer 1034 * may be updated asynchronously) so we ignore status bits 1035 * that have already been reported to userspace. 1036 */ 1037 oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; 1038 1039 /* We treat OABUFFER_OVERFLOW as a significant error: 1040 * 1041 * - The status can be interpreted to mean that the buffer is 1042 * currently full (with a higher precedence than OA_TAKEN() 1043 * which will start to report a near-empty buffer after an 1044 * overflow) but it's awkward that we can't clear the status 1045 * on Haswell, so without a reset we won't be able to catch 1046 * the state again. 1047 * 1048 * - Since it also implies the HW has started overwriting old 1049 * reports it may also affect our sanity checks for invalid 1050 * reports when copying to userspace that assume new reports 1051 * are being written to cleared memory. 1052 * 1053 * - In the future we may want to introduce a flight recorder 1054 * mode where the driver will automatically maintain a safe 1055 * guard band between head/tail, avoiding this overflow 1056 * condition, but we avoid the added driver complexity for 1057 * now. 1058 */ 1059 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1060 ret = append_oa_status(stream, buf, count, offset, 1061 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1062 if (ret) 1063 return ret; 1064 1065 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1066 dev_priv->perf.oa.period_exponent); 1067 1068 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1069 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1070 1071 oastatus1 = I915_READ(GEN7_OASTATUS1); 1072 } 1073 1074 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1075 ret = append_oa_status(stream, buf, count, offset, 1076 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1077 if (ret) 1078 return ret; 1079 dev_priv->perf.oa.gen7_latched_oastatus1 |= 1080 GEN7_OASTATUS1_REPORT_LOST; 1081 } 1082 1083 return gen7_append_oa_reports(stream, buf, count, offset); 1084 } 1085 1086 /** 1087 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1088 * @stream: An i915-perf stream opened for OA metrics 1089 * 1090 * Called when userspace tries to read() from a blocking stream FD opened 1091 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1092 * OA buffer and wakes us. 1093 * 1094 * Note: it's acceptable to have this return with some false positives 1095 * since any subsequent read handling will return -EAGAIN if there isn't 1096 * really data ready for userspace yet. 1097 * 1098 * Returns: zero on success or a negative error code 1099 */ 1100 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1101 { 1102 struct drm_i915_private *dev_priv = stream->dev_priv; 1103 1104 /* We would wait indefinitely if periodic sampling is not enabled */ 1105 if (!dev_priv->perf.oa.periodic) 1106 return -EIO; 1107 1108 return wait_event_interruptible(dev_priv->perf.oa.poll_wq, 1109 oa_buffer_check_unlocked(dev_priv)); 1110 } 1111 1112 /** 1113 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1114 * @stream: An i915-perf stream opened for OA metrics 1115 * @file: An i915 perf stream file 1116 * @wait: poll() state table 1117 * 1118 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1119 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1120 * when it sees data ready to read in the circular OA buffer. 1121 */ 1122 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1123 struct file *file, 1124 poll_table *wait) 1125 { 1126 struct drm_i915_private *dev_priv = stream->dev_priv; 1127 1128 poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); 1129 } 1130 1131 /** 1132 * i915_oa_read - just calls through to &i915_oa_ops->read 1133 * @stream: An i915-perf stream opened for OA metrics 1134 * @buf: destination buffer given by userspace 1135 * @count: the number of bytes userspace wants to read 1136 * @offset: (inout): the current position for writing into @buf 1137 * 1138 * Updates @offset according to the number of bytes successfully copied into 1139 * the userspace buffer. 1140 * 1141 * Returns: zero on success or a negative error code 1142 */ 1143 static int i915_oa_read(struct i915_perf_stream *stream, 1144 char __user *buf, 1145 size_t count, 1146 size_t *offset) 1147 { 1148 struct drm_i915_private *dev_priv = stream->dev_priv; 1149 1150 return dev_priv->perf.oa.ops.read(stream, buf, count, offset); 1151 } 1152 1153 /** 1154 * oa_get_render_ctx_id - determine and hold ctx hw id 1155 * @stream: An i915-perf stream opened for OA metrics 1156 * 1157 * Determine the render context hw id, and ensure it remains fixed for the 1158 * lifetime of the stream. This ensures that we don't have to worry about 1159 * updating the context ID in OACONTROL on the fly. 1160 * 1161 * Returns: zero on success or a negative error code 1162 */ 1163 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1164 { 1165 struct drm_i915_private *dev_priv = stream->dev_priv; 1166 1167 if (i915.enable_execlists) 1168 dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; 1169 else { 1170 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1171 struct intel_ring *ring; 1172 int ret; 1173 1174 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1175 if (ret) 1176 return ret; 1177 1178 /* 1179 * As the ID is the gtt offset of the context's vma we 1180 * pin the vma to ensure the ID remains fixed. 1181 * 1182 * NB: implied RCS engine... 1183 */ 1184 ring = engine->context_pin(engine, stream->ctx); 1185 mutex_unlock(&dev_priv->drm.struct_mutex); 1186 if (IS_ERR(ring)) 1187 return PTR_ERR(ring); 1188 1189 1190 /* 1191 * Explicitly track the ID (instead of calling 1192 * i915_ggtt_offset() on the fly) considering the difference 1193 * with gen8+ and execlists 1194 */ 1195 dev_priv->perf.oa.specific_ctx_id = 1196 i915_ggtt_offset(stream->ctx->engine[engine->id].state); 1197 } 1198 1199 return 0; 1200 } 1201 1202 /** 1203 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1204 * @stream: An i915-perf stream opened for OA metrics 1205 * 1206 * In case anything needed doing to ensure the context HW ID would remain valid 1207 * for the lifetime of the stream, then that can be undone here. 1208 */ 1209 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1210 { 1211 struct drm_i915_private *dev_priv = stream->dev_priv; 1212 1213 if (i915.enable_execlists) { 1214 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1215 } else { 1216 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1217 1218 mutex_lock(&dev_priv->drm.struct_mutex); 1219 1220 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1221 engine->context_unpin(engine, stream->ctx); 1222 1223 mutex_unlock(&dev_priv->drm.struct_mutex); 1224 } 1225 } 1226 1227 static void 1228 free_oa_buffer(struct drm_i915_private *i915) 1229 { 1230 mutex_lock(&i915->drm.struct_mutex); 1231 1232 i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); 1233 i915_vma_unpin(i915->perf.oa.oa_buffer.vma); 1234 i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); 1235 1236 i915->perf.oa.oa_buffer.vma = NULL; 1237 i915->perf.oa.oa_buffer.vaddr = NULL; 1238 1239 mutex_unlock(&i915->drm.struct_mutex); 1240 } 1241 1242 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1243 { 1244 struct drm_i915_private *dev_priv = stream->dev_priv; 1245 1246 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); 1247 1248 /* 1249 * Unset exclusive_stream first, it might be checked while 1250 * disabling the metric set on gen8+. 1251 */ 1252 dev_priv->perf.oa.exclusive_stream = NULL; 1253 1254 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1255 1256 free_oa_buffer(dev_priv); 1257 1258 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1259 intel_runtime_pm_put(dev_priv); 1260 1261 if (stream->ctx) 1262 oa_put_render_ctx_id(stream); 1263 1264 if (dev_priv->perf.oa.spurious_report_rs.missed) { 1265 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1266 dev_priv->perf.oa.spurious_report_rs.missed); 1267 } 1268 } 1269 1270 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) 1271 { 1272 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1273 unsigned long flags; 1274 1275 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1276 1277 /* Pre-DevBDW: OABUFFER must be set with counters off, 1278 * before OASTATUS1, but after OASTATUS2 1279 */ 1280 I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */ 1281 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1282 1283 I915_WRITE(GEN7_OABUFFER, gtt_offset); 1284 1285 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ 1286 1287 /* Mark that we need updated tail pointers to read from... */ 1288 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1289 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1290 1291 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1292 1293 /* On Haswell we have to track which OASTATUS1 flags we've 1294 * already seen since they can't be cleared while periodic 1295 * sampling is enabled. 1296 */ 1297 dev_priv->perf.oa.gen7_latched_oastatus1 = 0; 1298 1299 /* NB: although the OA buffer will initially be allocated 1300 * zeroed via shmfs (and so this memset is redundant when 1301 * first allocating), we may re-init the OA buffer, either 1302 * when re-enabling a stream or in error/reset paths. 1303 * 1304 * The reason we clear the buffer for each re-init is for the 1305 * sanity check in gen7_append_oa_reports() that looks at the 1306 * report-id field to make sure it's non-zero which relies on 1307 * the assumption that new reports are being written to zeroed 1308 * memory... 1309 */ 1310 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1311 1312 /* Maybe make ->pollin per-stream state if we support multiple 1313 * concurrent streams in the future. 1314 */ 1315 dev_priv->perf.oa.pollin = false; 1316 } 1317 1318 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv) 1319 { 1320 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1321 unsigned long flags; 1322 1323 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1324 1325 I915_WRITE(GEN8_OASTATUS, 0); 1326 I915_WRITE(GEN8_OAHEADPTR, gtt_offset); 1327 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1328 1329 I915_WRITE(GEN8_OABUFFER_UDW, 0); 1330 1331 /* 1332 * PRM says: 1333 * 1334 * "This MMIO must be set before the OATAILPTR 1335 * register and after the OAHEADPTR register. This is 1336 * to enable proper functionality of the overflow 1337 * bit." 1338 */ 1339 I915_WRITE(GEN8_OABUFFER, gtt_offset | 1340 OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT); 1341 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1342 1343 /* Mark that we need updated tail pointers to read from... */ 1344 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1345 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1346 1347 /* 1348 * Reset state used to recognise context switches, affecting which 1349 * reports we will forward to userspace while filtering for a single 1350 * context. 1351 */ 1352 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID; 1353 1354 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1355 1356 /* 1357 * NB: although the OA buffer will initially be allocated 1358 * zeroed via shmfs (and so this memset is redundant when 1359 * first allocating), we may re-init the OA buffer, either 1360 * when re-enabling a stream or in error/reset paths. 1361 * 1362 * The reason we clear the buffer for each re-init is for the 1363 * sanity check in gen8_append_oa_reports() that looks at the 1364 * reason field to make sure it's non-zero which relies on 1365 * the assumption that new reports are being written to zeroed 1366 * memory... 1367 */ 1368 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1369 1370 /* 1371 * Maybe make ->pollin per-stream state if we support multiple 1372 * concurrent streams in the future. 1373 */ 1374 dev_priv->perf.oa.pollin = false; 1375 } 1376 1377 static int alloc_oa_buffer(struct drm_i915_private *dev_priv) 1378 { 1379 struct drm_i915_gem_object *bo; 1380 struct i915_vma *vma; 1381 int ret; 1382 1383 if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) 1384 return -ENODEV; 1385 1386 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1387 if (ret) 1388 return ret; 1389 1390 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1391 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1392 1393 bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); 1394 if (IS_ERR(bo)) { 1395 DRM_ERROR("Failed to allocate OA buffer\n"); 1396 ret = PTR_ERR(bo); 1397 goto unlock; 1398 } 1399 1400 ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); 1401 if (ret) 1402 goto err_unref; 1403 1404 /* PreHSW required 512K alignment, HSW requires 16M */ 1405 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1406 if (IS_ERR(vma)) { 1407 ret = PTR_ERR(vma); 1408 goto err_unref; 1409 } 1410 dev_priv->perf.oa.oa_buffer.vma = vma; 1411 1412 dev_priv->perf.oa.oa_buffer.vaddr = 1413 i915_gem_object_pin_map(bo, I915_MAP_WB); 1414 if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { 1415 ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); 1416 goto err_unpin; 1417 } 1418 1419 dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); 1420 1421 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", 1422 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), 1423 dev_priv->perf.oa.oa_buffer.vaddr); 1424 1425 goto unlock; 1426 1427 err_unpin: 1428 __i915_vma_unpin(vma); 1429 1430 err_unref: 1431 i915_gem_object_put(bo); 1432 1433 dev_priv->perf.oa.oa_buffer.vaddr = NULL; 1434 dev_priv->perf.oa.oa_buffer.vma = NULL; 1435 1436 unlock: 1437 mutex_unlock(&dev_priv->drm.struct_mutex); 1438 return ret; 1439 } 1440 1441 static void config_oa_regs(struct drm_i915_private *dev_priv, 1442 const struct i915_oa_reg *regs, 1443 int n_regs) 1444 { 1445 int i; 1446 1447 for (i = 0; i < n_regs; i++) { 1448 const struct i915_oa_reg *reg = regs + i; 1449 1450 I915_WRITE(reg->addr, reg->value); 1451 } 1452 } 1453 1454 static int hsw_enable_metric_set(struct drm_i915_private *dev_priv) 1455 { 1456 int ret = i915_oa_select_metric_set_hsw(dev_priv); 1457 int i; 1458 1459 if (ret) 1460 return ret; 1461 1462 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) | 1463 GT_NOA_ENABLE)); 1464 1465 /* PRM: 1466 * 1467 * OA unit is using “crclk” for its functionality. When trunk 1468 * level clock gating takes place, OA clock would be gated, 1469 * unable to count the events from non-render clock domain. 1470 * Render clock gating must be disabled when OA is enabled to 1471 * count the events from non-render domain. Unit level clock 1472 * gating for RCS should also be disabled. 1473 */ 1474 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1475 ~GEN7_DOP_CLOCK_GATE_ENABLE)); 1476 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | 1477 GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1478 1479 for (i = 0; i < dev_priv->perf.oa.n_mux_configs; i++) { 1480 config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs[i], 1481 dev_priv->perf.oa.mux_regs_lens[i]); 1482 } 1483 1484 /* It apparently takes a fairly long time for a new MUX 1485 * configuration to be be applied after these register writes. 1486 * This delay duration was derived empirically based on the 1487 * render_basic config but hopefully it covers the maximum 1488 * configuration latency. 1489 * 1490 * As a fallback, the checks in _append_oa_reports() to skip 1491 * invalid OA reports do also seem to work to discard reports 1492 * generated before this config has completed - albeit not 1493 * silently. 1494 * 1495 * Unfortunately this is essentially a magic number, since we 1496 * don't currently know of a reliable mechanism for predicting 1497 * how long the MUX config will take to apply and besides 1498 * seeing invalid reports we don't know of a reliable way to 1499 * explicitly check that the MUX config has landed. 1500 * 1501 * It's even possible we've miss characterized the underlying 1502 * problem - it just seems like the simplest explanation why 1503 * a delay at this location would mitigate any invalid reports. 1504 */ 1505 usleep_range(15000, 20000); 1506 1507 config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs, 1508 dev_priv->perf.oa.b_counter_regs_len); 1509 1510 return 0; 1511 } 1512 1513 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) 1514 { 1515 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & 1516 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1517 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | 1518 GEN7_DOP_CLOCK_GATE_ENABLE)); 1519 1520 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1521 ~GT_NOA_ENABLE)); 1522 } 1523 1524 /* 1525 * NB: It must always remain pointer safe to run this even if the OA unit 1526 * has been disabled. 1527 * 1528 * It's fine to put out-of-date values into these per-context registers 1529 * in the case that the OA unit has been disabled. 1530 */ 1531 static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, 1532 u32 *reg_state) 1533 { 1534 struct drm_i915_private *dev_priv = ctx->i915; 1535 const struct i915_oa_reg *flex_regs = dev_priv->perf.oa.flex_regs; 1536 int n_flex_regs = dev_priv->perf.oa.flex_regs_len; 1537 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; 1538 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; 1539 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1540 u32 flex_mmio[] = { 1541 i915_mmio_reg_offset(EU_PERF_CNTL0), 1542 i915_mmio_reg_offset(EU_PERF_CNTL1), 1543 i915_mmio_reg_offset(EU_PERF_CNTL2), 1544 i915_mmio_reg_offset(EU_PERF_CNTL3), 1545 i915_mmio_reg_offset(EU_PERF_CNTL4), 1546 i915_mmio_reg_offset(EU_PERF_CNTL5), 1547 i915_mmio_reg_offset(EU_PERF_CNTL6), 1548 }; 1549 int i; 1550 1551 reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1552 reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent << 1553 GEN8_OA_TIMER_PERIOD_SHIFT) | 1554 (dev_priv->perf.oa.periodic ? 1555 GEN8_OA_TIMER_ENABLE : 0) | 1556 GEN8_OA_COUNTER_RESUME; 1557 1558 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { 1559 u32 state_offset = ctx_flexeu0 + i * 2; 1560 u32 mmio = flex_mmio[i]; 1561 1562 /* 1563 * This arbitrary default will select the 'EU FPU0 Pipeline 1564 * Active' event. In the future it's anticipated that there 1565 * will be an explicit 'No Event' we can select, but not yet... 1566 */ 1567 u32 value = 0; 1568 int j; 1569 1570 for (j = 0; j < n_flex_regs; j++) { 1571 if (i915_mmio_reg_offset(flex_regs[j].addr) == mmio) { 1572 value = flex_regs[j].value; 1573 break; 1574 } 1575 } 1576 1577 reg_state[state_offset] = mmio; 1578 reg_state[state_offset+1] = value; 1579 } 1580 } 1581 1582 /* 1583 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This 1584 * is only used by the kernel context. 1585 */ 1586 static int gen8_emit_oa_config(struct drm_i915_gem_request *req) 1587 { 1588 struct drm_i915_private *dev_priv = req->i915; 1589 const struct i915_oa_reg *flex_regs = dev_priv->perf.oa.flex_regs; 1590 int n_flex_regs = dev_priv->perf.oa.flex_regs_len; 1591 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1592 u32 flex_mmio[] = { 1593 i915_mmio_reg_offset(EU_PERF_CNTL0), 1594 i915_mmio_reg_offset(EU_PERF_CNTL1), 1595 i915_mmio_reg_offset(EU_PERF_CNTL2), 1596 i915_mmio_reg_offset(EU_PERF_CNTL3), 1597 i915_mmio_reg_offset(EU_PERF_CNTL4), 1598 i915_mmio_reg_offset(EU_PERF_CNTL5), 1599 i915_mmio_reg_offset(EU_PERF_CNTL6), 1600 }; 1601 u32 *cs; 1602 int i; 1603 1604 cs = intel_ring_begin(req, n_flex_regs * 2 + 4); 1605 if (IS_ERR(cs)) 1606 return PTR_ERR(cs); 1607 1608 *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); 1609 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1612 (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | 1613 GEN8_OA_COUNTER_RESUME; 1614 1615 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { 1616 u32 mmio = flex_mmio[i]; 1617 1618 /* 1619 * This arbitrary default will select the 'EU FPU0 Pipeline 1620 * Active' event. In the future it's anticipated that there 1621 * will be an explicit 'No Event' we can select, but not 1622 * yet... 1623 */ 1624 u32 value = 0; 1625 int j; 1626 1627 for (j = 0; j < n_flex_regs; j++) { 1628 if (i915_mmio_reg_offset(flex_regs[j].addr) == mmio) { 1629 value = flex_regs[j].value; 1630 break; 1631 } 1632 } 1633 1634 *cs++ = mmio; 1635 *cs++ = value; 1636 } 1637 1638 *cs++ = MI_NOOP; 1639 intel_ring_advance(req, cs); 1640 1641 return 0; 1642 } 1643 1644 static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv) 1645 { 1646 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1647 struct i915_gem_timeline *timeline; 1648 struct drm_i915_gem_request *req; 1649 int ret; 1650 1651 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1652 1653 i915_gem_retire_requests(dev_priv); 1654 1655 req = i915_gem_request_alloc(engine, dev_priv->kernel_context); 1656 if (IS_ERR(req)) 1657 return PTR_ERR(req); 1658 1659 ret = gen8_emit_oa_config(req); 1660 if (ret) { 1661 i915_add_request(req); 1662 return ret; 1663 } 1664 1665 /* Queue this switch after all other activity */ 1666 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 1667 struct drm_i915_gem_request *prev; 1668 struct intel_timeline *tl; 1669 1670 tl = &timeline->engine[engine->id]; 1671 prev = i915_gem_active_raw(&tl->last_request, 1672 &dev_priv->drm.struct_mutex); 1673 if (prev) 1674 i915_sw_fence_await_sw_fence_gfp(&req->submit, 1675 &prev->submit, 1676 GFP_KERNEL); 1677 } 1678 1679 ret = i915_switch_context(req); 1680 i915_add_request(req); 1681 1682 return ret; 1683 } 1684 1685 /* 1686 * Manages updating the per-context aspects of the OA stream 1687 * configuration across all contexts. 1688 * 1689 * The awkward consideration here is that OACTXCONTROL controls the 1690 * exponent for periodic sampling which is primarily used for system 1691 * wide profiling where we'd like a consistent sampling period even in 1692 * the face of context switches. 1693 * 1694 * Our approach of updating the register state context (as opposed to 1695 * say using a workaround batch buffer) ensures that the hardware 1696 * won't automatically reload an out-of-date timer exponent even 1697 * transiently before a WA BB could be parsed. 1698 * 1699 * This function needs to: 1700 * - Ensure the currently running context's per-context OA state is 1701 * updated 1702 * - Ensure that all existing contexts will have the correct per-context 1703 * OA state if they are scheduled for use. 1704 * - Ensure any new contexts will be initialized with the correct 1705 * per-context OA state. 1706 * 1707 * Note: it's only the RCS/Render context that has any OA state. 1708 */ 1709 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1710 bool interruptible) 1711 { 1712 struct i915_gem_context *ctx; 1713 int ret; 1714 unsigned int wait_flags = I915_WAIT_LOCKED; 1715 1716 if (interruptible) { 1717 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1718 if (ret) 1719 return ret; 1720 1721 wait_flags |= I915_WAIT_INTERRUPTIBLE; 1722 } else { 1723 mutex_lock(&dev_priv->drm.struct_mutex); 1724 } 1725 1726 /* Switch away from any user context. */ 1727 ret = gen8_switch_to_updated_kernel_context(dev_priv); 1728 if (ret) 1729 goto out; 1730 1731 /* 1732 * The OA register config is setup through the context image. This image 1733 * might be written to by the GPU on context switch (in particular on 1734 * lite-restore). This means we can't safely update a context's image, 1735 * if this context is scheduled/submitted to run on the GPU. 1736 * 1737 * We could emit the OA register config through the batch buffer but 1738 * this might leave small interval of time where the OA unit is 1739 * configured at an invalid sampling period. 1740 * 1741 * So far the best way to work around this issue seems to be draining 1742 * the GPU from any submitted work. 1743 */ 1744 ret = i915_gem_wait_for_idle(dev_priv, wait_flags); 1745 if (ret) 1746 goto out; 1747 1748 /* Update all contexts now that we've stalled the submission. */ 1749 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1750 struct intel_context *ce = &ctx->engine[RCS]; 1751 u32 *regs; 1752 1753 /* OA settings will be set upon first use */ 1754 if (!ce->state) 1755 continue; 1756 1757 regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 1758 if (IS_ERR(regs)) { 1759 ret = PTR_ERR(regs); 1760 goto out; 1761 } 1762 1763 ce->state->obj->mm.dirty = true; 1764 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); 1765 1766 gen8_update_reg_state_unlocked(ctx, regs); 1767 1768 i915_gem_object_unpin_map(ce->state->obj); 1769 } 1770 1771 out: 1772 mutex_unlock(&dev_priv->drm.struct_mutex); 1773 1774 return ret; 1775 } 1776 1777 static int gen8_enable_metric_set(struct drm_i915_private *dev_priv) 1778 { 1779 int ret = dev_priv->perf.oa.ops.select_metric_set(dev_priv); 1780 int i; 1781 1782 if (ret) 1783 return ret; 1784 1785 /* 1786 * We disable slice/unslice clock ratio change reports on SKL since 1787 * they are too noisy. The HW generates a lot of redundant reports 1788 * where the ratio hasn't really changed causing a lot of redundant 1789 * work to processes and increasing the chances we'll hit buffer 1790 * overruns. 1791 * 1792 * Although we don't currently use the 'disable overrun' OABUFFER 1793 * feature it's worth noting that clock ratio reports have to be 1794 * disabled before considering to use that feature since the HW doesn't 1795 * correctly block these reports. 1796 * 1797 * Currently none of the high-level metrics we have depend on knowing 1798 * this ratio to normalize. 1799 * 1800 * Note: This register is not power context saved and restored, but 1801 * that's OK considering that we disable RC6 while the OA unit is 1802 * enabled. 1803 * 1804 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 1805 * be read back from automatically triggered reports, as part of the 1806 * RPT_ID field. 1807 */ 1808 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || 1809 IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { 1810 I915_WRITE(GEN8_OA_DEBUG, 1811 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 1812 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 1813 } 1814 1815 /* 1816 * Update all contexts prior writing the mux configurations as we need 1817 * to make sure all slices/subslices are ON before writing to NOA 1818 * registers. 1819 */ 1820 ret = gen8_configure_all_contexts(dev_priv, true); 1821 if (ret) 1822 return ret; 1823 1824 I915_WRITE(GDT_CHICKEN_BITS, 0xA0); 1825 for (i = 0; i < dev_priv->perf.oa.n_mux_configs; i++) { 1826 config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs[i], 1827 dev_priv->perf.oa.mux_regs_lens[i]); 1828 } 1829 I915_WRITE(GDT_CHICKEN_BITS, 0x80); 1830 1831 config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs, 1832 dev_priv->perf.oa.b_counter_regs_len); 1833 1834 return 0; 1835 } 1836 1837 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1838 { 1839 /* Reset all contexts' slices/subslices configurations. */ 1840 gen8_configure_all_contexts(dev_priv, false); 1841 } 1842 1843 static void gen7_oa_enable(struct drm_i915_private *dev_priv) 1844 { 1845 /* 1846 * Reset buf pointers so we don't forward reports from before now. 1847 * 1848 * Think carefully if considering trying to avoid this, since it 1849 * also ensures status flags and the buffer itself are cleared 1850 * in error paths, and we have checks for invalid reports based 1851 * on the assumption that certain fields are written to zeroed 1852 * memory which this helps maintains. 1853 */ 1854 gen7_init_oa_buffer(dev_priv); 1855 1856 if (dev_priv->perf.oa.exclusive_stream->enabled) { 1857 struct i915_gem_context *ctx = 1858 dev_priv->perf.oa.exclusive_stream->ctx; 1859 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; 1860 1861 bool periodic = dev_priv->perf.oa.periodic; 1862 u32 period_exponent = dev_priv->perf.oa.period_exponent; 1863 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1864 1865 I915_WRITE(GEN7_OACONTROL, 1866 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 1867 (period_exponent << 1868 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 1869 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 1870 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 1871 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 1872 GEN7_OACONTROL_ENABLE); 1873 } else 1874 I915_WRITE(GEN7_OACONTROL, 0); 1875 } 1876 1877 static void gen8_oa_enable(struct drm_i915_private *dev_priv) 1878 { 1879 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1880 1881 /* 1882 * Reset buf pointers so we don't forward reports from before now. 1883 * 1884 * Think carefully if considering trying to avoid this, since it 1885 * also ensures status flags and the buffer itself are cleared 1886 * in error paths, and we have checks for invalid reports based 1887 * on the assumption that certain fields are written to zeroed 1888 * memory which this helps maintains. 1889 */ 1890 gen8_init_oa_buffer(dev_priv); 1891 1892 /* 1893 * Note: we don't rely on the hardware to perform single context 1894 * filtering and instead filter on the cpu based on the context-id 1895 * field of reports 1896 */ 1897 I915_WRITE(GEN8_OACONTROL, (report_format << 1898 GEN8_OA_REPORT_FORMAT_SHIFT) | 1899 GEN8_OA_COUNTER_ENABLE); 1900 } 1901 1902 /** 1903 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 1904 * @stream: An i915 perf stream opened for OA metrics 1905 * 1906 * [Re]enables hardware periodic sampling according to the period configured 1907 * when opening the stream. This also starts a hrtimer that will periodically 1908 * check for data in the circular OA buffer for notifying userspace (e.g. 1909 * during a read() or poll()). 1910 */ 1911 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 1912 { 1913 struct drm_i915_private *dev_priv = stream->dev_priv; 1914 1915 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1916 1917 if (dev_priv->perf.oa.periodic) 1918 hrtimer_start(&dev_priv->perf.oa.poll_check_timer, 1919 ns_to_ktime(POLL_PERIOD), 1920 HRTIMER_MODE_REL_PINNED); 1921 } 1922 1923 static void gen7_oa_disable(struct drm_i915_private *dev_priv) 1924 { 1925 I915_WRITE(GEN7_OACONTROL, 0); 1926 } 1927 1928 static void gen8_oa_disable(struct drm_i915_private *dev_priv) 1929 { 1930 I915_WRITE(GEN8_OACONTROL, 0); 1931 } 1932 1933 /** 1934 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 1935 * @stream: An i915 perf stream opened for OA metrics 1936 * 1937 * Stops the OA unit from periodically writing counter reports into the 1938 * circular OA buffer. This also stops the hrtimer that periodically checks for 1939 * data in the circular OA buffer, for notifying userspace. 1940 */ 1941 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 1942 { 1943 struct drm_i915_private *dev_priv = stream->dev_priv; 1944 1945 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1946 1947 if (dev_priv->perf.oa.periodic) 1948 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); 1949 } 1950 1951 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 1952 .destroy = i915_oa_stream_destroy, 1953 .enable = i915_oa_stream_enable, 1954 .disable = i915_oa_stream_disable, 1955 .wait_unlocked = i915_oa_wait_unlocked, 1956 .poll_wait = i915_oa_poll_wait, 1957 .read = i915_oa_read, 1958 }; 1959 1960 /** 1961 * i915_oa_stream_init - validate combined props for OA stream and init 1962 * @stream: An i915 perf stream 1963 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 1964 * @props: The property state that configures stream (individually validated) 1965 * 1966 * While read_properties_unlocked() validates properties in isolation it 1967 * doesn't ensure that the combination necessarily makes sense. 1968 * 1969 * At this point it has been determined that userspace wants a stream of 1970 * OA metrics, but still we need to further validate the combined 1971 * properties are OK. 1972 * 1973 * If the configuration makes sense then we can allocate memory for 1974 * a circular OA buffer and apply the requested metric set configuration. 1975 * 1976 * Returns: zero on success or a negative error code. 1977 */ 1978 static int i915_oa_stream_init(struct i915_perf_stream *stream, 1979 struct drm_i915_perf_open_param *param, 1980 struct perf_open_properties *props) 1981 { 1982 struct drm_i915_private *dev_priv = stream->dev_priv; 1983 int format_size; 1984 int ret; 1985 1986 /* If the sysfs metrics/ directory wasn't registered for some 1987 * reason then don't let userspace try their luck with config 1988 * IDs 1989 */ 1990 if (!dev_priv->perf.metrics_kobj) { 1991 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 1992 return -EINVAL; 1993 } 1994 1995 if (!(props->sample_flags & SAMPLE_OA_REPORT)) { 1996 DRM_DEBUG("Only OA report sampling supported\n"); 1997 return -EINVAL; 1998 } 1999 2000 if (!dev_priv->perf.oa.ops.init_oa_buffer) { 2001 DRM_DEBUG("OA unit not supported\n"); 2002 return -ENODEV; 2003 } 2004 2005 /* To avoid the complexity of having to accurately filter 2006 * counter reports and marshal to the appropriate client 2007 * we currently only allow exclusive access 2008 */ 2009 if (dev_priv->perf.oa.exclusive_stream) { 2010 DRM_DEBUG("OA unit already in use\n"); 2011 return -EBUSY; 2012 } 2013 2014 if (!props->metrics_set) { 2015 DRM_DEBUG("OA metric set not specified\n"); 2016 return -EINVAL; 2017 } 2018 2019 if (!props->oa_format) { 2020 DRM_DEBUG("OA report format not specified\n"); 2021 return -EINVAL; 2022 } 2023 2024 /* We set up some ratelimit state to potentially throttle any _NOTES 2025 * about spurious, invalid OA reports which we don't forward to 2026 * userspace. 2027 * 2028 * The initialization is associated with opening the stream (not driver 2029 * init) considering we print a _NOTE about any throttling when closing 2030 * the stream instead of waiting until driver _fini which no one would 2031 * ever see. 2032 * 2033 * Using the same limiting factors as printk_ratelimit() 2034 */ 2035 ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs, 2036 5 * HZ, 10); 2037 /* Since we use a DRM_NOTE for spurious reports it would be 2038 * inconsistent to let __ratelimit() automatically print a warning for 2039 * throttling. 2040 */ 2041 ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs, 2042 RATELIMIT_MSG_ON_RELEASE); 2043 2044 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2045 2046 format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; 2047 2048 stream->sample_flags |= SAMPLE_OA_REPORT; 2049 stream->sample_size += format_size; 2050 2051 dev_priv->perf.oa.oa_buffer.format_size = format_size; 2052 if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) 2053 return -EINVAL; 2054 2055 dev_priv->perf.oa.oa_buffer.format = 2056 dev_priv->perf.oa.oa_formats[props->oa_format].format; 2057 2058 dev_priv->perf.oa.metrics_set = props->metrics_set; 2059 2060 dev_priv->perf.oa.periodic = props->oa_periodic; 2061 if (dev_priv->perf.oa.periodic) 2062 dev_priv->perf.oa.period_exponent = props->oa_period_exponent; 2063 2064 if (stream->ctx) { 2065 ret = oa_get_render_ctx_id(stream); 2066 if (ret) 2067 return ret; 2068 } 2069 2070 /* PRM - observability performance counters: 2071 * 2072 * OACONTROL, performance counter enable, note: 2073 * 2074 * "When this bit is set, in order to have coherent counts, 2075 * RC6 power state and trunk clock gating must be disabled. 2076 * This can be achieved by programming MMIO registers as 2077 * 0xA094=0 and 0xA090[31]=1" 2078 * 2079 * In our case we are expecting that taking pm + FORCEWAKE 2080 * references will effectively disable RC6. 2081 */ 2082 intel_runtime_pm_get(dev_priv); 2083 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2084 2085 ret = alloc_oa_buffer(dev_priv); 2086 if (ret) 2087 goto err_oa_buf_alloc; 2088 2089 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); 2090 if (ret) 2091 goto err_enable; 2092 2093 stream->ops = &i915_oa_stream_ops; 2094 2095 dev_priv->perf.oa.exclusive_stream = stream; 2096 2097 return 0; 2098 2099 err_enable: 2100 free_oa_buffer(dev_priv); 2101 2102 err_oa_buf_alloc: 2103 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2104 intel_runtime_pm_put(dev_priv); 2105 if (stream->ctx) 2106 oa_put_render_ctx_id(stream); 2107 2108 return ret; 2109 } 2110 2111 void i915_oa_init_reg_state(struct intel_engine_cs *engine, 2112 struct i915_gem_context *ctx, 2113 u32 *reg_state) 2114 { 2115 struct drm_i915_private *dev_priv = engine->i915; 2116 2117 if (engine->id != RCS) 2118 return; 2119 2120 if (!dev_priv->perf.initialized) 2121 return; 2122 2123 gen8_update_reg_state_unlocked(ctx, reg_state); 2124 } 2125 2126 /** 2127 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation 2128 * @stream: An i915 perf stream 2129 * @file: An i915 perf stream file 2130 * @buf: destination buffer given by userspace 2131 * @count: the number of bytes userspace wants to read 2132 * @ppos: (inout) file seek position (unused) 2133 * 2134 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to 2135 * ensure that if we've successfully copied any data then reporting that takes 2136 * precedence over any internal error status, so the data isn't lost. 2137 * 2138 * For example ret will be -ENOSPC whenever there is more buffered data than 2139 * can be copied to userspace, but that's only interesting if we weren't able 2140 * to copy some data because it implies the userspace buffer is too small to 2141 * receive a single record (and we never split records). 2142 * 2143 * Another case with ret == -EFAULT is more of a grey area since it would seem 2144 * like bad form for userspace to ask us to overrun its buffer, but the user 2145 * knows best: 2146 * 2147 * http://yarchive.net/comp/linux/partial_reads_writes.html 2148 * 2149 * Returns: The number of bytes copied or a negative error code on failure. 2150 */ 2151 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, 2152 struct file *file, 2153 char __user *buf, 2154 size_t count, 2155 loff_t *ppos) 2156 { 2157 /* Note we keep the offset (aka bytes read) separate from any 2158 * error status so that the final check for whether we return 2159 * the bytes read with a higher precedence than any error (see 2160 * comment below) doesn't need to be handled/duplicated in 2161 * stream->ops->read() implementations. 2162 */ 2163 size_t offset = 0; 2164 int ret = stream->ops->read(stream, buf, count, &offset); 2165 2166 return offset ?: (ret ?: -EAGAIN); 2167 } 2168 2169 /** 2170 * i915_perf_read - handles read() FOP for i915 perf stream FDs 2171 * @file: An i915 perf stream file 2172 * @buf: destination buffer given by userspace 2173 * @count: the number of bytes userspace wants to read 2174 * @ppos: (inout) file seek position (unused) 2175 * 2176 * The entry point for handling a read() on a stream file descriptor from 2177 * userspace. Most of the work is left to the i915_perf_read_locked() and 2178 * &i915_perf_stream_ops->read but to save having stream implementations (of 2179 * which we might have multiple later) we handle blocking read here. 2180 * 2181 * We can also consistently treat trying to read from a disabled stream 2182 * as an IO error so implementations can assume the stream is enabled 2183 * while reading. 2184 * 2185 * Returns: The number of bytes copied or a negative error code on failure. 2186 */ 2187 static ssize_t i915_perf_read(struct file *file, 2188 char __user *buf, 2189 size_t count, 2190 loff_t *ppos) 2191 { 2192 struct i915_perf_stream *stream = file->private_data; 2193 struct drm_i915_private *dev_priv = stream->dev_priv; 2194 ssize_t ret; 2195 2196 /* To ensure it's handled consistently we simply treat all reads of a 2197 * disabled stream as an error. In particular it might otherwise lead 2198 * to a deadlock for blocking file descriptors... 2199 */ 2200 if (!stream->enabled) 2201 return -EIO; 2202 2203 if (!(file->f_flags & O_NONBLOCK)) { 2204 /* There's the small chance of false positives from 2205 * stream->ops->wait_unlocked. 2206 * 2207 * E.g. with single context filtering since we only wait until 2208 * oabuffer has >= 1 report we don't immediately know whether 2209 * any reports really belong to the current context 2210 */ 2211 do { 2212 ret = stream->ops->wait_unlocked(stream); 2213 if (ret) 2214 return ret; 2215 2216 mutex_lock(&dev_priv->perf.lock); 2217 ret = i915_perf_read_locked(stream, file, 2218 buf, count, ppos); 2219 mutex_unlock(&dev_priv->perf.lock); 2220 } while (ret == -EAGAIN); 2221 } else { 2222 mutex_lock(&dev_priv->perf.lock); 2223 ret = i915_perf_read_locked(stream, file, buf, count, ppos); 2224 mutex_unlock(&dev_priv->perf.lock); 2225 } 2226 2227 /* We allow the poll checking to sometimes report false positive POLLIN 2228 * events where we might actually report EAGAIN on read() if there's 2229 * not really any data available. In this situation though we don't 2230 * want to enter a busy loop between poll() reporting a POLLIN event 2231 * and read() returning -EAGAIN. Clearing the oa.pollin state here 2232 * effectively ensures we back off until the next hrtimer callback 2233 * before reporting another POLLIN event. 2234 */ 2235 if (ret >= 0 || ret == -EAGAIN) { 2236 /* Maybe make ->pollin per-stream state if we support multiple 2237 * concurrent streams in the future. 2238 */ 2239 dev_priv->perf.oa.pollin = false; 2240 } 2241 2242 return ret; 2243 } 2244 2245 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 2246 { 2247 struct drm_i915_private *dev_priv = 2248 container_of(hrtimer, typeof(*dev_priv), 2249 perf.oa.poll_check_timer); 2250 2251 if (oa_buffer_check_unlocked(dev_priv)) { 2252 dev_priv->perf.oa.pollin = true; 2253 wake_up(&dev_priv->perf.oa.poll_wq); 2254 } 2255 2256 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); 2257 2258 return HRTIMER_RESTART; 2259 } 2260 2261 /** 2262 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 2263 * @dev_priv: i915 device instance 2264 * @stream: An i915 perf stream 2265 * @file: An i915 perf stream file 2266 * @wait: poll() state table 2267 * 2268 * For handling userspace polling on an i915 perf stream, this calls through to 2269 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 2270 * will be woken for new stream data. 2271 * 2272 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2273 * with any non-file-operation driver hooks. 2274 * 2275 * Returns: any poll events that are ready without sleeping 2276 */ 2277 static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, 2278 struct i915_perf_stream *stream, 2279 struct file *file, 2280 poll_table *wait) 2281 { 2282 unsigned int events = 0; 2283 2284 stream->ops->poll_wait(stream, file, wait); 2285 2286 /* Note: we don't explicitly check whether there's something to read 2287 * here since this path may be very hot depending on what else 2288 * userspace is polling, or on the timeout in use. We rely solely on 2289 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 2290 * samples to read. 2291 */ 2292 if (dev_priv->perf.oa.pollin) 2293 events |= POLLIN; 2294 2295 return events; 2296 } 2297 2298 /** 2299 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 2300 * @file: An i915 perf stream file 2301 * @wait: poll() state table 2302 * 2303 * For handling userspace polling on an i915 perf stream, this ensures 2304 * poll_wait() gets called with a wait queue that will be woken for new stream 2305 * data. 2306 * 2307 * Note: Implementation deferred to i915_perf_poll_locked() 2308 * 2309 * Returns: any poll events that are ready without sleeping 2310 */ 2311 static unsigned int i915_perf_poll(struct file *file, poll_table *wait) 2312 { 2313 struct i915_perf_stream *stream = file->private_data; 2314 struct drm_i915_private *dev_priv = stream->dev_priv; 2315 int ret; 2316 2317 mutex_lock(&dev_priv->perf.lock); 2318 ret = i915_perf_poll_locked(dev_priv, stream, file, wait); 2319 mutex_unlock(&dev_priv->perf.lock); 2320 2321 return ret; 2322 } 2323 2324 /** 2325 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 2326 * @stream: A disabled i915 perf stream 2327 * 2328 * [Re]enables the associated capture of data for this stream. 2329 * 2330 * If a stream was previously enabled then there's currently no intention 2331 * to provide userspace any guarantee about the preservation of previously 2332 * buffered data. 2333 */ 2334 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 2335 { 2336 if (stream->enabled) 2337 return; 2338 2339 /* Allow stream->ops->enable() to refer to this */ 2340 stream->enabled = true; 2341 2342 if (stream->ops->enable) 2343 stream->ops->enable(stream); 2344 } 2345 2346 /** 2347 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 2348 * @stream: An enabled i915 perf stream 2349 * 2350 * Disables the associated capture of data for this stream. 2351 * 2352 * The intention is that disabling an re-enabling a stream will ideally be 2353 * cheaper than destroying and re-opening a stream with the same configuration, 2354 * though there are no formal guarantees about what state or buffered data 2355 * must be retained between disabling and re-enabling a stream. 2356 * 2357 * Note: while a stream is disabled it's considered an error for userspace 2358 * to attempt to read from the stream (-EIO). 2359 */ 2360 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 2361 { 2362 if (!stream->enabled) 2363 return; 2364 2365 /* Allow stream->ops->disable() to refer to this */ 2366 stream->enabled = false; 2367 2368 if (stream->ops->disable) 2369 stream->ops->disable(stream); 2370 } 2371 2372 /** 2373 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2374 * @stream: An i915 perf stream 2375 * @cmd: the ioctl request 2376 * @arg: the ioctl data 2377 * 2378 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2379 * with any non-file-operation driver hooks. 2380 * 2381 * Returns: zero on success or a negative error code. Returns -EINVAL for 2382 * an unknown ioctl request. 2383 */ 2384 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 2385 unsigned int cmd, 2386 unsigned long arg) 2387 { 2388 switch (cmd) { 2389 case I915_PERF_IOCTL_ENABLE: 2390 i915_perf_enable_locked(stream); 2391 return 0; 2392 case I915_PERF_IOCTL_DISABLE: 2393 i915_perf_disable_locked(stream); 2394 return 0; 2395 } 2396 2397 return -EINVAL; 2398 } 2399 2400 /** 2401 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2402 * @file: An i915 perf stream file 2403 * @cmd: the ioctl request 2404 * @arg: the ioctl data 2405 * 2406 * Implementation deferred to i915_perf_ioctl_locked(). 2407 * 2408 * Returns: zero on success or a negative error code. Returns -EINVAL for 2409 * an unknown ioctl request. 2410 */ 2411 static long i915_perf_ioctl(struct file *file, 2412 unsigned int cmd, 2413 unsigned long arg) 2414 { 2415 struct i915_perf_stream *stream = file->private_data; 2416 struct drm_i915_private *dev_priv = stream->dev_priv; 2417 long ret; 2418 2419 mutex_lock(&dev_priv->perf.lock); 2420 ret = i915_perf_ioctl_locked(stream, cmd, arg); 2421 mutex_unlock(&dev_priv->perf.lock); 2422 2423 return ret; 2424 } 2425 2426 /** 2427 * i915_perf_destroy_locked - destroy an i915 perf stream 2428 * @stream: An i915 perf stream 2429 * 2430 * Frees all resources associated with the given i915 perf @stream, disabling 2431 * any associated data capture in the process. 2432 * 2433 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2434 * with any non-file-operation driver hooks. 2435 */ 2436 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 2437 { 2438 if (stream->enabled) 2439 i915_perf_disable_locked(stream); 2440 2441 if (stream->ops->destroy) 2442 stream->ops->destroy(stream); 2443 2444 list_del(&stream->link); 2445 2446 if (stream->ctx) 2447 i915_gem_context_put_unlocked(stream->ctx); 2448 2449 kfree(stream); 2450 } 2451 2452 /** 2453 * i915_perf_release - handles userspace close() of a stream file 2454 * @inode: anonymous inode associated with file 2455 * @file: An i915 perf stream file 2456 * 2457 * Cleans up any resources associated with an open i915 perf stream file. 2458 * 2459 * NB: close() can't really fail from the userspace point of view. 2460 * 2461 * Returns: zero on success or a negative error code. 2462 */ 2463 static int i915_perf_release(struct inode *inode, struct file *file) 2464 { 2465 struct i915_perf_stream *stream = file->private_data; 2466 struct drm_i915_private *dev_priv = stream->dev_priv; 2467 2468 mutex_lock(&dev_priv->perf.lock); 2469 i915_perf_destroy_locked(stream); 2470 mutex_unlock(&dev_priv->perf.lock); 2471 2472 return 0; 2473 } 2474 2475 2476 static const struct file_operations fops = { 2477 .owner = THIS_MODULE, 2478 .llseek = no_llseek, 2479 .release = i915_perf_release, 2480 .poll = i915_perf_poll, 2481 .read = i915_perf_read, 2482 .unlocked_ioctl = i915_perf_ioctl, 2483 }; 2484 2485 2486 static struct i915_gem_context * 2487 lookup_context(struct drm_i915_private *dev_priv, 2488 struct drm_i915_file_private *file_priv, 2489 u32 ctx_user_handle) 2490 { 2491 struct i915_gem_context *ctx; 2492 int ret; 2493 2494 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 2495 if (ret) 2496 return ERR_PTR(ret); 2497 2498 ctx = i915_gem_context_lookup(file_priv, ctx_user_handle); 2499 if (!IS_ERR(ctx)) 2500 i915_gem_context_get(ctx); 2501 2502 mutex_unlock(&dev_priv->drm.struct_mutex); 2503 2504 return ctx; 2505 } 2506 2507 /** 2508 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 2509 * @dev_priv: i915 device instance 2510 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 2511 * @props: individually validated u64 property value pairs 2512 * @file: drm file 2513 * 2514 * See i915_perf_ioctl_open() for interface details. 2515 * 2516 * Implements further stream config validation and stream initialization on 2517 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex 2518 * taken to serialize with any non-file-operation driver hooks. 2519 * 2520 * Note: at this point the @props have only been validated in isolation and 2521 * it's still necessary to validate that the combination of properties makes 2522 * sense. 2523 * 2524 * In the case where userspace is interested in OA unit metrics then further 2525 * config validation and stream initialization details will be handled by 2526 * i915_oa_stream_init(). The code here should only validate config state that 2527 * will be relevant to all stream types / backends. 2528 * 2529 * Returns: zero on success or a negative error code. 2530 */ 2531 static int 2532 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, 2533 struct drm_i915_perf_open_param *param, 2534 struct perf_open_properties *props, 2535 struct drm_file *file) 2536 { 2537 struct i915_gem_context *specific_ctx = NULL; 2538 struct i915_perf_stream *stream = NULL; 2539 unsigned long f_flags = 0; 2540 bool privileged_op = true; 2541 int stream_fd; 2542 int ret; 2543 2544 if (props->single_context) { 2545 u32 ctx_handle = props->ctx_handle; 2546 struct drm_i915_file_private *file_priv = file->driver_priv; 2547 2548 specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle); 2549 if (IS_ERR(specific_ctx)) { 2550 ret = PTR_ERR(specific_ctx); 2551 if (ret != -EINTR) 2552 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 2553 ctx_handle); 2554 goto err; 2555 } 2556 } 2557 2558 /* 2559 * On Haswell the OA unit supports clock gating off for a specific 2560 * context and in this mode there's no visibility of metrics for the 2561 * rest of the system, which we consider acceptable for a 2562 * non-privileged client. 2563 * 2564 * For Gen8+ the OA unit no longer supports clock gating off for a 2565 * specific context and the kernel can't securely stop the counters 2566 * from updating as system-wide / global values. Even though we can 2567 * filter reports based on the included context ID we can't block 2568 * clients from seeing the raw / global counter values via 2569 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 2570 * enable the OA unit by default. 2571 */ 2572 if (IS_HASWELL(dev_priv) && specific_ctx) 2573 privileged_op = false; 2574 2575 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 2576 * we check a dev.i915.perf_stream_paranoid sysctl option 2577 * to determine if it's ok to access system wide OA counters 2578 * without CAP_SYS_ADMIN privileges. 2579 */ 2580 if (privileged_op && 2581 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 2582 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); 2583 ret = -EACCES; 2584 goto err_ctx; 2585 } 2586 2587 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 2588 if (!stream) { 2589 ret = -ENOMEM; 2590 goto err_ctx; 2591 } 2592 2593 stream->dev_priv = dev_priv; 2594 stream->ctx = specific_ctx; 2595 2596 ret = i915_oa_stream_init(stream, param, props); 2597 if (ret) 2598 goto err_alloc; 2599 2600 /* we avoid simply assigning stream->sample_flags = props->sample_flags 2601 * to have _stream_init check the combination of sample flags more 2602 * thoroughly, but still this is the expected result at this point. 2603 */ 2604 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 2605 ret = -ENODEV; 2606 goto err_flags; 2607 } 2608 2609 list_add(&stream->link, &dev_priv->perf.streams); 2610 2611 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 2612 f_flags |= O_CLOEXEC; 2613 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 2614 f_flags |= O_NONBLOCK; 2615 2616 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 2617 if (stream_fd < 0) { 2618 ret = stream_fd; 2619 goto err_open; 2620 } 2621 2622 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 2623 i915_perf_enable_locked(stream); 2624 2625 return stream_fd; 2626 2627 err_open: 2628 list_del(&stream->link); 2629 err_flags: 2630 if (stream->ops->destroy) 2631 stream->ops->destroy(stream); 2632 err_alloc: 2633 kfree(stream); 2634 err_ctx: 2635 if (specific_ctx) 2636 i915_gem_context_put_unlocked(specific_ctx); 2637 err: 2638 return ret; 2639 } 2640 2641 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) 2642 { 2643 return div_u64(1000000000ULL * (2ULL << exponent), 2644 dev_priv->perf.oa.timestamp_frequency); 2645 } 2646 2647 /** 2648 * read_properties_unlocked - validate + copy userspace stream open properties 2649 * @dev_priv: i915 device instance 2650 * @uprops: The array of u64 key value pairs given by userspace 2651 * @n_props: The number of key value pairs expected in @uprops 2652 * @props: The stream configuration built up while validating properties 2653 * 2654 * Note this function only validates properties in isolation it doesn't 2655 * validate that the combination of properties makes sense or that all 2656 * properties necessary for a particular kind of stream have been set. 2657 * 2658 * Note that there currently aren't any ordering requirements for properties so 2659 * we shouldn't validate or assume anything about ordering here. This doesn't 2660 * rule out defining new properties with ordering requirements in the future. 2661 */ 2662 static int read_properties_unlocked(struct drm_i915_private *dev_priv, 2663 u64 __user *uprops, 2664 u32 n_props, 2665 struct perf_open_properties *props) 2666 { 2667 u64 __user *uprop = uprops; 2668 int i; 2669 2670 memset(props, 0, sizeof(struct perf_open_properties)); 2671 2672 if (!n_props) { 2673 DRM_DEBUG("No i915 perf properties given\n"); 2674 return -EINVAL; 2675 } 2676 2677 /* Considering that ID = 0 is reserved and assuming that we don't 2678 * (currently) expect any configurations to ever specify duplicate 2679 * values for a particular property ID then the last _PROP_MAX value is 2680 * one greater than the maximum number of properties we expect to get 2681 * from userspace. 2682 */ 2683 if (n_props >= DRM_I915_PERF_PROP_MAX) { 2684 DRM_DEBUG("More i915 perf properties specified than exist\n"); 2685 return -EINVAL; 2686 } 2687 2688 for (i = 0; i < n_props; i++) { 2689 u64 oa_period, oa_freq_hz; 2690 u64 id, value; 2691 int ret; 2692 2693 ret = get_user(id, uprop); 2694 if (ret) 2695 return ret; 2696 2697 ret = get_user(value, uprop + 1); 2698 if (ret) 2699 return ret; 2700 2701 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 2702 DRM_DEBUG("Unknown i915 perf property ID\n"); 2703 return -EINVAL; 2704 } 2705 2706 switch ((enum drm_i915_perf_property_id)id) { 2707 case DRM_I915_PERF_PROP_CTX_HANDLE: 2708 props->single_context = 1; 2709 props->ctx_handle = value; 2710 break; 2711 case DRM_I915_PERF_PROP_SAMPLE_OA: 2712 props->sample_flags |= SAMPLE_OA_REPORT; 2713 break; 2714 case DRM_I915_PERF_PROP_OA_METRICS_SET: 2715 if (value == 0 || 2716 value > dev_priv->perf.oa.n_builtin_sets) { 2717 DRM_DEBUG("Unknown OA metric set ID\n"); 2718 return -EINVAL; 2719 } 2720 props->metrics_set = value; 2721 break; 2722 case DRM_I915_PERF_PROP_OA_FORMAT: 2723 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 2724 DRM_DEBUG("Out-of-range OA report format %llu\n", 2725 value); 2726 return -EINVAL; 2727 } 2728 if (!dev_priv->perf.oa.oa_formats[value].size) { 2729 DRM_DEBUG("Unsupported OA report format %llu\n", 2730 value); 2731 return -EINVAL; 2732 } 2733 props->oa_format = value; 2734 break; 2735 case DRM_I915_PERF_PROP_OA_EXPONENT: 2736 if (value > OA_EXPONENT_MAX) { 2737 DRM_DEBUG("OA timer exponent too high (> %u)\n", 2738 OA_EXPONENT_MAX); 2739 return -EINVAL; 2740 } 2741 2742 /* Theoretically we can program the OA unit to sample 2743 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 2744 * for BXT. We don't allow such high sampling 2745 * frequencies by default unless root. 2746 */ 2747 2748 BUILD_BUG_ON(sizeof(oa_period) != 8); 2749 oa_period = oa_exponent_to_ns(dev_priv, value); 2750 2751 /* This check is primarily to ensure that oa_period <= 2752 * UINT32_MAX (before passing to do_div which only 2753 * accepts a u32 denominator), but we can also skip 2754 * checking anything < 1Hz which implicitly can't be 2755 * limited via an integer oa_max_sample_rate. 2756 */ 2757 if (oa_period <= NSEC_PER_SEC) { 2758 u64 tmp = NSEC_PER_SEC; 2759 do_div(tmp, oa_period); 2760 oa_freq_hz = tmp; 2761 } else 2762 oa_freq_hz = 0; 2763 2764 if (oa_freq_hz > i915_oa_max_sample_rate && 2765 !capable(CAP_SYS_ADMIN)) { 2766 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", 2767 i915_oa_max_sample_rate); 2768 return -EACCES; 2769 } 2770 2771 props->oa_periodic = true; 2772 props->oa_period_exponent = value; 2773 break; 2774 case DRM_I915_PERF_PROP_MAX: 2775 MISSING_CASE(id); 2776 return -EINVAL; 2777 } 2778 2779 uprop += 2; 2780 } 2781 2782 return 0; 2783 } 2784 2785 /** 2786 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 2787 * @dev: drm device 2788 * @data: ioctl data copied from userspace (unvalidated) 2789 * @file: drm file 2790 * 2791 * Validates the stream open parameters given by userspace including flags 2792 * and an array of u64 key, value pair properties. 2793 * 2794 * Very little is assumed up front about the nature of the stream being 2795 * opened (for instance we don't assume it's for periodic OA unit metrics). An 2796 * i915-perf stream is expected to be a suitable interface for other forms of 2797 * buffered data written by the GPU besides periodic OA metrics. 2798 * 2799 * Note we copy the properties from userspace outside of the i915 perf 2800 * mutex to avoid an awkward lockdep with mmap_sem. 2801 * 2802 * Most of the implementation details are handled by 2803 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock 2804 * mutex for serializing with any non-file-operation driver hooks. 2805 * 2806 * Return: A newly opened i915 Perf stream file descriptor or negative 2807 * error code on failure. 2808 */ 2809 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 2810 struct drm_file *file) 2811 { 2812 struct drm_i915_private *dev_priv = dev->dev_private; 2813 struct drm_i915_perf_open_param *param = data; 2814 struct perf_open_properties props; 2815 u32 known_open_flags; 2816 int ret; 2817 2818 if (!dev_priv->perf.initialized) { 2819 DRM_DEBUG("i915 perf interface not available for this system\n"); 2820 return -ENOTSUPP; 2821 } 2822 2823 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 2824 I915_PERF_FLAG_FD_NONBLOCK | 2825 I915_PERF_FLAG_DISABLED; 2826 if (param->flags & ~known_open_flags) { 2827 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 2828 return -EINVAL; 2829 } 2830 2831 ret = read_properties_unlocked(dev_priv, 2832 u64_to_user_ptr(param->properties_ptr), 2833 param->num_properties, 2834 &props); 2835 if (ret) 2836 return ret; 2837 2838 mutex_lock(&dev_priv->perf.lock); 2839 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); 2840 mutex_unlock(&dev_priv->perf.lock); 2841 2842 return ret; 2843 } 2844 2845 /** 2846 * i915_perf_register - exposes i915-perf to userspace 2847 * @dev_priv: i915 device instance 2848 * 2849 * In particular OA metric sets are advertised under a sysfs metrics/ 2850 * directory allowing userspace to enumerate valid IDs that can be 2851 * used to open an i915-perf stream. 2852 */ 2853 void i915_perf_register(struct drm_i915_private *dev_priv) 2854 { 2855 if (!dev_priv->perf.initialized) 2856 return; 2857 2858 /* To be sure we're synchronized with an attempted 2859 * i915_perf_open_ioctl(); considering that we register after 2860 * being exposed to userspace. 2861 */ 2862 mutex_lock(&dev_priv->perf.lock); 2863 2864 dev_priv->perf.metrics_kobj = 2865 kobject_create_and_add("metrics", 2866 &dev_priv->drm.primary->kdev->kobj); 2867 if (!dev_priv->perf.metrics_kobj) 2868 goto exit; 2869 2870 if (IS_HASWELL(dev_priv)) { 2871 if (i915_perf_register_sysfs_hsw(dev_priv)) 2872 goto sysfs_error; 2873 } else if (IS_BROADWELL(dev_priv)) { 2874 if (i915_perf_register_sysfs_bdw(dev_priv)) 2875 goto sysfs_error; 2876 } else if (IS_CHERRYVIEW(dev_priv)) { 2877 if (i915_perf_register_sysfs_chv(dev_priv)) 2878 goto sysfs_error; 2879 } else if (IS_SKYLAKE(dev_priv)) { 2880 if (IS_SKL_GT2(dev_priv)) { 2881 if (i915_perf_register_sysfs_sklgt2(dev_priv)) 2882 goto sysfs_error; 2883 } else if (IS_SKL_GT3(dev_priv)) { 2884 if (i915_perf_register_sysfs_sklgt3(dev_priv)) 2885 goto sysfs_error; 2886 } else if (IS_SKL_GT4(dev_priv)) { 2887 if (i915_perf_register_sysfs_sklgt4(dev_priv)) 2888 goto sysfs_error; 2889 } else 2890 goto sysfs_error; 2891 } else if (IS_BROXTON(dev_priv)) { 2892 if (i915_perf_register_sysfs_bxt(dev_priv)) 2893 goto sysfs_error; 2894 } else if (IS_KABYLAKE(dev_priv)) { 2895 if (IS_KBL_GT2(dev_priv)) { 2896 if (i915_perf_register_sysfs_kblgt2(dev_priv)) 2897 goto sysfs_error; 2898 } else if (IS_KBL_GT3(dev_priv)) { 2899 if (i915_perf_register_sysfs_kblgt3(dev_priv)) 2900 goto sysfs_error; 2901 } else 2902 goto sysfs_error; 2903 } else if (IS_GEMINILAKE(dev_priv)) { 2904 if (i915_perf_register_sysfs_glk(dev_priv)) 2905 goto sysfs_error; 2906 } 2907 2908 goto exit; 2909 2910 sysfs_error: 2911 kobject_put(dev_priv->perf.metrics_kobj); 2912 dev_priv->perf.metrics_kobj = NULL; 2913 2914 exit: 2915 mutex_unlock(&dev_priv->perf.lock); 2916 } 2917 2918 /** 2919 * i915_perf_unregister - hide i915-perf from userspace 2920 * @dev_priv: i915 device instance 2921 * 2922 * i915-perf state cleanup is split up into an 'unregister' and 2923 * 'deinit' phase where the interface is first hidden from 2924 * userspace by i915_perf_unregister() before cleaning up 2925 * remaining state in i915_perf_fini(). 2926 */ 2927 void i915_perf_unregister(struct drm_i915_private *dev_priv) 2928 { 2929 if (!dev_priv->perf.metrics_kobj) 2930 return; 2931 2932 if (IS_HASWELL(dev_priv)) 2933 i915_perf_unregister_sysfs_hsw(dev_priv); 2934 else if (IS_BROADWELL(dev_priv)) 2935 i915_perf_unregister_sysfs_bdw(dev_priv); 2936 else if (IS_CHERRYVIEW(dev_priv)) 2937 i915_perf_unregister_sysfs_chv(dev_priv); 2938 else if (IS_SKYLAKE(dev_priv)) { 2939 if (IS_SKL_GT2(dev_priv)) 2940 i915_perf_unregister_sysfs_sklgt2(dev_priv); 2941 else if (IS_SKL_GT3(dev_priv)) 2942 i915_perf_unregister_sysfs_sklgt3(dev_priv); 2943 else if (IS_SKL_GT4(dev_priv)) 2944 i915_perf_unregister_sysfs_sklgt4(dev_priv); 2945 } else if (IS_BROXTON(dev_priv)) 2946 i915_perf_unregister_sysfs_bxt(dev_priv); 2947 else if (IS_KABYLAKE(dev_priv)) { 2948 if (IS_KBL_GT2(dev_priv)) 2949 i915_perf_unregister_sysfs_kblgt2(dev_priv); 2950 else if (IS_KBL_GT3(dev_priv)) 2951 i915_perf_unregister_sysfs_kblgt3(dev_priv); 2952 } else if (IS_GEMINILAKE(dev_priv)) 2953 i915_perf_unregister_sysfs_glk(dev_priv); 2954 2955 2956 kobject_put(dev_priv->perf.metrics_kobj); 2957 dev_priv->perf.metrics_kobj = NULL; 2958 } 2959 2960 static struct ctl_table oa_table[] = { 2961 { 2962 .procname = "perf_stream_paranoid", 2963 .data = &i915_perf_stream_paranoid, 2964 .maxlen = sizeof(i915_perf_stream_paranoid), 2965 .mode = 0644, 2966 .proc_handler = proc_dointvec_minmax, 2967 .extra1 = &zero, 2968 .extra2 = &one, 2969 }, 2970 { 2971 .procname = "oa_max_sample_rate", 2972 .data = &i915_oa_max_sample_rate, 2973 .maxlen = sizeof(i915_oa_max_sample_rate), 2974 .mode = 0644, 2975 .proc_handler = proc_dointvec_minmax, 2976 .extra1 = &zero, 2977 .extra2 = &oa_sample_rate_hard_limit, 2978 }, 2979 {} 2980 }; 2981 2982 static struct ctl_table i915_root[] = { 2983 { 2984 .procname = "i915", 2985 .maxlen = 0, 2986 .mode = 0555, 2987 .child = oa_table, 2988 }, 2989 {} 2990 }; 2991 2992 static struct ctl_table dev_root[] = { 2993 { 2994 .procname = "dev", 2995 .maxlen = 0, 2996 .mode = 0555, 2997 .child = i915_root, 2998 }, 2999 {} 3000 }; 3001 3002 /** 3003 * i915_perf_init - initialize i915-perf state on module load 3004 * @dev_priv: i915 device instance 3005 * 3006 * Initializes i915-perf state without exposing anything to userspace. 3007 * 3008 * Note: i915-perf initialization is split into an 'init' and 'register' 3009 * phase with the i915_perf_register() exposing state to userspace. 3010 */ 3011 void i915_perf_init(struct drm_i915_private *dev_priv) 3012 { 3013 dev_priv->perf.oa.n_builtin_sets = 0; 3014 3015 if (IS_HASWELL(dev_priv)) { 3016 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; 3017 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; 3018 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; 3019 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; 3020 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; 3021 dev_priv->perf.oa.ops.read = gen7_oa_read; 3022 dev_priv->perf.oa.ops.oa_hw_tail_read = 3023 gen7_oa_hw_tail_read; 3024 3025 dev_priv->perf.oa.timestamp_frequency = 12500000; 3026 3027 dev_priv->perf.oa.oa_formats = hsw_oa_formats; 3028 3029 dev_priv->perf.oa.n_builtin_sets = 3030 i915_oa_n_builtin_metric_sets_hsw; 3031 } else if (i915.enable_execlists) { 3032 /* Note: that although we could theoretically also support the 3033 * legacy ringbuffer mode on BDW (and earlier iterations of 3034 * this driver, before upstreaming did this) it didn't seem 3035 * worth the complexity to maintain now that BDW+ enable 3036 * execlist mode by default. 3037 */ 3038 3039 if (IS_GEN8(dev_priv)) { 3040 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; 3041 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; 3042 3043 dev_priv->perf.oa.timestamp_frequency = 12500000; 3044 3045 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25); 3046 3047 if (IS_BROADWELL(dev_priv)) { 3048 dev_priv->perf.oa.n_builtin_sets = 3049 i915_oa_n_builtin_metric_sets_bdw; 3050 dev_priv->perf.oa.ops.select_metric_set = 3051 i915_oa_select_metric_set_bdw; 3052 } else if (IS_CHERRYVIEW(dev_priv)) { 3053 dev_priv->perf.oa.n_builtin_sets = 3054 i915_oa_n_builtin_metric_sets_chv; 3055 dev_priv->perf.oa.ops.select_metric_set = 3056 i915_oa_select_metric_set_chv; 3057 } 3058 } else if (IS_GEN9(dev_priv)) { 3059 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3060 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3061 3062 dev_priv->perf.oa.timestamp_frequency = 12000000; 3063 3064 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3065 3066 if (IS_SKL_GT2(dev_priv)) { 3067 dev_priv->perf.oa.n_builtin_sets = 3068 i915_oa_n_builtin_metric_sets_sklgt2; 3069 dev_priv->perf.oa.ops.select_metric_set = 3070 i915_oa_select_metric_set_sklgt2; 3071 } else if (IS_SKL_GT3(dev_priv)) { 3072 dev_priv->perf.oa.n_builtin_sets = 3073 i915_oa_n_builtin_metric_sets_sklgt3; 3074 dev_priv->perf.oa.ops.select_metric_set = 3075 i915_oa_select_metric_set_sklgt3; 3076 } else if (IS_SKL_GT4(dev_priv)) { 3077 dev_priv->perf.oa.n_builtin_sets = 3078 i915_oa_n_builtin_metric_sets_sklgt4; 3079 dev_priv->perf.oa.ops.select_metric_set = 3080 i915_oa_select_metric_set_sklgt4; 3081 } else if (IS_BROXTON(dev_priv)) { 3082 dev_priv->perf.oa.timestamp_frequency = 19200000; 3083 3084 dev_priv->perf.oa.n_builtin_sets = 3085 i915_oa_n_builtin_metric_sets_bxt; 3086 dev_priv->perf.oa.ops.select_metric_set = 3087 i915_oa_select_metric_set_bxt; 3088 } else if (IS_KBL_GT2(dev_priv)) { 3089 dev_priv->perf.oa.n_builtin_sets = 3090 i915_oa_n_builtin_metric_sets_kblgt2; 3091 dev_priv->perf.oa.ops.select_metric_set = 3092 i915_oa_select_metric_set_kblgt2; 3093 } else if (IS_KBL_GT3(dev_priv)) { 3094 dev_priv->perf.oa.n_builtin_sets = 3095 i915_oa_n_builtin_metric_sets_kblgt3; 3096 dev_priv->perf.oa.ops.select_metric_set = 3097 i915_oa_select_metric_set_kblgt3; 3098 } else if (IS_GEMINILAKE(dev_priv)) { 3099 dev_priv->perf.oa.timestamp_frequency = 19200000; 3100 3101 dev_priv->perf.oa.n_builtin_sets = 3102 i915_oa_n_builtin_metric_sets_glk; 3103 dev_priv->perf.oa.ops.select_metric_set = 3104 i915_oa_select_metric_set_glk; 3105 } 3106 } 3107 3108 if (dev_priv->perf.oa.n_builtin_sets) { 3109 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer; 3110 dev_priv->perf.oa.ops.enable_metric_set = 3111 gen8_enable_metric_set; 3112 dev_priv->perf.oa.ops.disable_metric_set = 3113 gen8_disable_metric_set; 3114 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; 3115 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; 3116 dev_priv->perf.oa.ops.read = gen8_oa_read; 3117 dev_priv->perf.oa.ops.oa_hw_tail_read = 3118 gen8_oa_hw_tail_read; 3119 3120 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; 3121 } 3122 } 3123 3124 if (dev_priv->perf.oa.n_builtin_sets) { 3125 hrtimer_init(&dev_priv->perf.oa.poll_check_timer, 3126 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3127 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; 3128 init_waitqueue_head(&dev_priv->perf.oa.poll_wq); 3129 3130 INIT_LIST_HEAD(&dev_priv->perf.streams); 3131 mutex_init(&dev_priv->perf.lock); 3132 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); 3133 3134 oa_sample_rate_hard_limit = 3135 dev_priv->perf.oa.timestamp_frequency / 2; 3136 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); 3137 3138 dev_priv->perf.initialized = true; 3139 } 3140 } 3141 3142 /** 3143 * i915_perf_fini - Counter part to i915_perf_init() 3144 * @dev_priv: i915 device instance 3145 */ 3146 void i915_perf_fini(struct drm_i915_private *dev_priv) 3147 { 3148 if (!dev_priv->perf.initialized) 3149 return; 3150 3151 unregister_sysctl_table(dev_priv->perf.sysctl_header); 3152 3153 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); 3154 3155 dev_priv->perf.initialized = false; 3156 } 3157