1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hashtable.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/reservation.h> 45 #include <linux/shmem_fs.h> 46 47 #include <drm/drmP.h> 48 #include <drm/intel-gtt.h> 49 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 50 #include <drm/drm_gem.h> 51 #include <drm/drm_auth.h> 52 #include <drm/drm_cache.h> 53 54 #include "i915_params.h" 55 #include "i915_reg.h" 56 #include "i915_utils.h" 57 58 #include "intel_bios.h" 59 #include "intel_dpll_mgr.h" 60 #include "intel_uc.h" 61 #include "intel_lrc.h" 62 #include "intel_ringbuffer.h" 63 64 #include "i915_gem.h" 65 #include "i915_gem_context.h" 66 #include "i915_gem_fence_reg.h" 67 #include "i915_gem_object.h" 68 #include "i915_gem_gtt.h" 69 #include "i915_gem_render_state.h" 70 #include "i915_gem_request.h" 71 #include "i915_gem_timeline.h" 72 73 #include "i915_vma.h" 74 75 #include "intel_gvt.h" 76 77 /* General customization: 78 */ 79 80 #define DRIVER_NAME "i915" 81 #define DRIVER_DESC "Intel Graphics" 82 #define DRIVER_DATE "20170403" 83 #define DRIVER_TIMESTAMP 1491198738 84 85 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 86 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 87 * which may not necessarily be a user visible problem. This will either 88 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 89 * enable distros and users to tailor their preferred amount of i915 abrt 90 * spam. 91 */ 92 #define I915_STATE_WARN(condition, format...) ({ \ 93 int __ret_warn_on = !!(condition); \ 94 if (unlikely(__ret_warn_on)) \ 95 if (!WARN(i915.verbose_state_checks, format)) \ 96 DRM_ERROR(format); \ 97 unlikely(__ret_warn_on); \ 98 }) 99 100 #define I915_STATE_WARN_ON(x) \ 101 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 102 103 bool __i915_inject_load_failure(const char *func, int line); 104 #define i915_inject_load_failure() \ 105 __i915_inject_load_failure(__func__, __LINE__) 106 107 typedef struct { 108 uint32_t val; 109 } uint_fixed_16_16_t; 110 111 #define FP_16_16_MAX ({ \ 112 uint_fixed_16_16_t fp; \ 113 fp.val = UINT_MAX; \ 114 fp; \ 115 }) 116 117 static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val) 118 { 119 uint_fixed_16_16_t fp; 120 121 WARN_ON(val >> 16); 122 123 fp.val = val << 16; 124 return fp; 125 } 126 127 static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp) 128 { 129 return DIV_ROUND_UP(fp.val, 1 << 16); 130 } 131 132 static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp) 133 { 134 return fp.val >> 16; 135 } 136 137 static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1, 138 uint_fixed_16_16_t min2) 139 { 140 uint_fixed_16_16_t min; 141 142 min.val = min(min1.val, min2.val); 143 return min; 144 } 145 146 static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1, 147 uint_fixed_16_16_t max2) 148 { 149 uint_fixed_16_16_t max; 150 151 max.val = max(max1.val, max2.val); 152 return max; 153 } 154 155 static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val, 156 uint32_t d) 157 { 158 uint_fixed_16_16_t fp, res; 159 160 fp = u32_to_fixed_16_16(val); 161 res.val = DIV_ROUND_UP(fp.val, d); 162 return res; 163 } 164 165 static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val, 166 uint32_t d) 167 { 168 uint_fixed_16_16_t res; 169 uint64_t interm_val; 170 171 interm_val = (uint64_t)val << 16; 172 interm_val = DIV_ROUND_UP_ULL(interm_val, d); 173 WARN_ON(interm_val >> 32); 174 res.val = (uint32_t) interm_val; 175 176 return res; 177 } 178 179 static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val, 180 uint_fixed_16_16_t mul) 181 { 182 uint64_t intermediate_val; 183 uint_fixed_16_16_t fp; 184 185 intermediate_val = (uint64_t) val * mul.val; 186 WARN_ON(intermediate_val >> 32); 187 fp.val = (uint32_t) intermediate_val; 188 return fp; 189 } 190 191 static inline const char *yesno(bool v) 192 { 193 return v ? "yes" : "no"; 194 } 195 196 static inline const char *onoff(bool v) 197 { 198 return v ? "on" : "off"; 199 } 200 201 static inline const char *enableddisabled(bool v) 202 { 203 return v ? "enabled" : "disabled"; 204 } 205 206 enum pipe { 207 INVALID_PIPE = -1, 208 PIPE_A = 0, 209 PIPE_B, 210 PIPE_C, 211 _PIPE_EDP, 212 I915_MAX_PIPES = _PIPE_EDP 213 }; 214 #define pipe_name(p) ((p) + 'A') 215 216 enum transcoder { 217 TRANSCODER_A = 0, 218 TRANSCODER_B, 219 TRANSCODER_C, 220 TRANSCODER_EDP, 221 TRANSCODER_DSI_A, 222 TRANSCODER_DSI_C, 223 I915_MAX_TRANSCODERS 224 }; 225 226 static inline const char *transcoder_name(enum transcoder transcoder) 227 { 228 switch (transcoder) { 229 case TRANSCODER_A: 230 return "A"; 231 case TRANSCODER_B: 232 return "B"; 233 case TRANSCODER_C: 234 return "C"; 235 case TRANSCODER_EDP: 236 return "EDP"; 237 case TRANSCODER_DSI_A: 238 return "DSI A"; 239 case TRANSCODER_DSI_C: 240 return "DSI C"; 241 default: 242 return "<invalid>"; 243 } 244 } 245 246 static inline bool transcoder_is_dsi(enum transcoder transcoder) 247 { 248 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 249 } 250 251 /* 252 * Global legacy plane identifier. Valid only for primary/sprite 253 * planes on pre-g4x, and only for primary planes on g4x+. 254 */ 255 enum plane { 256 PLANE_A, 257 PLANE_B, 258 PLANE_C, 259 }; 260 #define plane_name(p) ((p) + 'A') 261 262 #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') 263 264 /* 265 * Per-pipe plane identifier. 266 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 267 * number of planes per CRTC. Not all platforms really have this many planes, 268 * which means some arrays of size I915_MAX_PLANES may have unused entries 269 * between the topmost sprite plane and the cursor plane. 270 * 271 * This is expected to be passed to various register macros 272 * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care. 273 */ 274 enum plane_id { 275 PLANE_PRIMARY, 276 PLANE_SPRITE0, 277 PLANE_SPRITE1, 278 PLANE_SPRITE2, 279 PLANE_CURSOR, 280 I915_MAX_PLANES, 281 }; 282 283 #define for_each_plane_id_on_crtc(__crtc, __p) \ 284 for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ 285 for_each_if ((__crtc)->plane_ids_mask & BIT(__p)) 286 287 enum port { 288 PORT_NONE = -1, 289 PORT_A = 0, 290 PORT_B, 291 PORT_C, 292 PORT_D, 293 PORT_E, 294 I915_MAX_PORTS 295 }; 296 #define port_name(p) ((p) + 'A') 297 298 #define I915_NUM_PHYS_VLV 2 299 300 enum dpio_channel { 301 DPIO_CH0, 302 DPIO_CH1 303 }; 304 305 enum dpio_phy { 306 DPIO_PHY0, 307 DPIO_PHY1, 308 DPIO_PHY2, 309 }; 310 311 enum intel_display_power_domain { 312 POWER_DOMAIN_PIPE_A, 313 POWER_DOMAIN_PIPE_B, 314 POWER_DOMAIN_PIPE_C, 315 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 316 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 317 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 318 POWER_DOMAIN_TRANSCODER_A, 319 POWER_DOMAIN_TRANSCODER_B, 320 POWER_DOMAIN_TRANSCODER_C, 321 POWER_DOMAIN_TRANSCODER_EDP, 322 POWER_DOMAIN_TRANSCODER_DSI_A, 323 POWER_DOMAIN_TRANSCODER_DSI_C, 324 POWER_DOMAIN_PORT_DDI_A_LANES, 325 POWER_DOMAIN_PORT_DDI_B_LANES, 326 POWER_DOMAIN_PORT_DDI_C_LANES, 327 POWER_DOMAIN_PORT_DDI_D_LANES, 328 POWER_DOMAIN_PORT_DDI_E_LANES, 329 POWER_DOMAIN_PORT_DDI_A_IO, 330 POWER_DOMAIN_PORT_DDI_B_IO, 331 POWER_DOMAIN_PORT_DDI_C_IO, 332 POWER_DOMAIN_PORT_DDI_D_IO, 333 POWER_DOMAIN_PORT_DDI_E_IO, 334 POWER_DOMAIN_PORT_DSI, 335 POWER_DOMAIN_PORT_CRT, 336 POWER_DOMAIN_PORT_OTHER, 337 POWER_DOMAIN_VGA, 338 POWER_DOMAIN_AUDIO, 339 POWER_DOMAIN_PLLS, 340 POWER_DOMAIN_AUX_A, 341 POWER_DOMAIN_AUX_B, 342 POWER_DOMAIN_AUX_C, 343 POWER_DOMAIN_AUX_D, 344 POWER_DOMAIN_GMBUS, 345 POWER_DOMAIN_MODESET, 346 POWER_DOMAIN_INIT, 347 348 POWER_DOMAIN_NUM, 349 }; 350 351 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 352 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 353 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 354 #define POWER_DOMAIN_TRANSCODER(tran) \ 355 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 356 (tran) + POWER_DOMAIN_TRANSCODER_A) 357 358 enum hpd_pin { 359 HPD_NONE = 0, 360 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 361 HPD_CRT, 362 HPD_SDVO_B, 363 HPD_SDVO_C, 364 HPD_PORT_A, 365 HPD_PORT_B, 366 HPD_PORT_C, 367 HPD_PORT_D, 368 HPD_PORT_E, 369 HPD_NUM_PINS 370 }; 371 372 #define for_each_hpd_pin(__pin) \ 373 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 374 375 #define HPD_STORM_DEFAULT_THRESHOLD 5 376 377 struct i915_hotplug { 378 struct work_struct hotplug_work; 379 380 struct { 381 unsigned long last_jiffies; 382 int count; 383 enum { 384 HPD_ENABLED = 0, 385 HPD_DISABLED = 1, 386 HPD_MARK_DISABLED = 2 387 } state; 388 } stats[HPD_NUM_PINS]; 389 u32 event_bits; 390 struct delayed_work reenable_work; 391 392 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 393 u32 long_port_mask; 394 u32 short_port_mask; 395 struct work_struct dig_port_work; 396 397 struct work_struct poll_init_work; 398 bool poll_enabled; 399 400 unsigned int hpd_storm_threshold; 401 402 /* 403 * if we get a HPD irq from DP and a HPD irq from non-DP 404 * the non-DP HPD could block the workqueue on a mode config 405 * mutex getting, that userspace may have taken. However 406 * userspace is waiting on the DP workqueue to run which is 407 * blocked behind the non-DP one. 408 */ 409 struct workqueue_struct *dp_wq; 410 }; 411 412 #define I915_GEM_GPU_DOMAINS \ 413 (I915_GEM_DOMAIN_RENDER | \ 414 I915_GEM_DOMAIN_SAMPLER | \ 415 I915_GEM_DOMAIN_COMMAND | \ 416 I915_GEM_DOMAIN_INSTRUCTION | \ 417 I915_GEM_DOMAIN_VERTEX) 418 419 #define for_each_pipe(__dev_priv, __p) \ 420 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 421 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 422 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 423 for_each_if ((__mask) & (1 << (__p))) 424 #define for_each_universal_plane(__dev_priv, __pipe, __p) \ 425 for ((__p) = 0; \ 426 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 427 (__p)++) 428 #define for_each_sprite(__dev_priv, __p, __s) \ 429 for ((__s) = 0; \ 430 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 431 (__s)++) 432 433 #define for_each_port_masked(__port, __ports_mask) \ 434 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 435 for_each_if ((__ports_mask) & (1 << (__port))) 436 437 #define for_each_crtc(dev, crtc) \ 438 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) 439 440 #define for_each_intel_plane(dev, intel_plane) \ 441 list_for_each_entry(intel_plane, \ 442 &(dev)->mode_config.plane_list, \ 443 base.head) 444 445 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 446 list_for_each_entry(intel_plane, \ 447 &(dev)->mode_config.plane_list, \ 448 base.head) \ 449 for_each_if ((plane_mask) & \ 450 (1 << drm_plane_index(&intel_plane->base))) 451 452 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 453 list_for_each_entry(intel_plane, \ 454 &(dev)->mode_config.plane_list, \ 455 base.head) \ 456 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 457 458 #define for_each_intel_crtc(dev, intel_crtc) \ 459 list_for_each_entry(intel_crtc, \ 460 &(dev)->mode_config.crtc_list, \ 461 base.head) 462 463 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 464 list_for_each_entry(intel_crtc, \ 465 &(dev)->mode_config.crtc_list, \ 466 base.head) \ 467 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 468 469 #define for_each_intel_encoder(dev, intel_encoder) \ 470 list_for_each_entry(intel_encoder, \ 471 &(dev)->mode_config.encoder_list, \ 472 base.head) 473 474 #define for_each_intel_connector_iter(intel_connector, iter) \ 475 while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter)))) 476 477 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 478 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 479 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 480 481 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 482 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 483 for_each_if ((intel_connector)->base.encoder == (__encoder)) 484 485 #define for_each_power_domain(domain, mask) \ 486 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 487 for_each_if (BIT_ULL(domain) & (mask)) 488 489 #define for_each_power_well(__dev_priv, __power_well) \ 490 for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ 491 (__power_well) - (__dev_priv)->power_domains.power_wells < \ 492 (__dev_priv)->power_domains.power_well_count; \ 493 (__power_well)++) 494 495 #define for_each_power_well_rev(__dev_priv, __power_well) \ 496 for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ 497 (__dev_priv)->power_domains.power_well_count - 1; \ 498 (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ 499 (__power_well)--) 500 501 #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ 502 for_each_power_well(__dev_priv, __power_well) \ 503 for_each_if ((__power_well)->domains & (__domain_mask)) 504 505 #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ 506 for_each_power_well_rev(__dev_priv, __power_well) \ 507 for_each_if ((__power_well)->domains & (__domain_mask)) 508 509 #define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \ 510 for ((__i) = 0; \ 511 (__i) < (__state)->base.dev->mode_config.num_total_plane && \ 512 ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \ 513 (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \ 514 (__i)++) \ 515 for_each_if (plane_state) 516 517 struct drm_i915_private; 518 struct i915_mm_struct; 519 struct i915_mmu_object; 520 521 struct drm_i915_file_private { 522 struct drm_i915_private *dev_priv; 523 struct drm_file *file; 524 525 struct { 526 spinlock_t lock; 527 struct list_head request_list; 528 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 529 * chosen to prevent the CPU getting more than a frame ahead of the GPU 530 * (when using lax throttling for the frontbuffer). We also use it to 531 * offer free GPU waitboosts for severely congested workloads. 532 */ 533 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 534 } mm; 535 struct idr context_idr; 536 537 struct intel_rps_client { 538 struct list_head link; 539 unsigned boosts; 540 } rps; 541 542 unsigned int bsd_engine; 543 544 /* Client can have a maximum of 3 contexts banned before 545 * it is denied of creating new contexts. As one context 546 * ban needs 4 consecutive hangs, and more if there is 547 * progress in between, this is a last resort stop gap measure 548 * to limit the badly behaving clients access to gpu. 549 */ 550 #define I915_MAX_CLIENT_CONTEXT_BANS 3 551 int context_bans; 552 }; 553 554 /* Used by dp and fdi links */ 555 struct intel_link_m_n { 556 uint32_t tu; 557 uint32_t gmch_m; 558 uint32_t gmch_n; 559 uint32_t link_m; 560 uint32_t link_n; 561 }; 562 563 void intel_link_compute_m_n(int bpp, int nlanes, 564 int pixel_clock, int link_clock, 565 struct intel_link_m_n *m_n); 566 567 /* Interface history: 568 * 569 * 1.1: Original. 570 * 1.2: Add Power Management 571 * 1.3: Add vblank support 572 * 1.4: Fix cmdbuffer path, add heap destroy 573 * 1.5: Add vblank pipe configuration 574 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 575 * - Support vertical blank on secondary display pipe 576 */ 577 #define DRIVER_MAJOR 1 578 #define DRIVER_MINOR 6 579 #define DRIVER_PATCHLEVEL 0 580 581 struct opregion_header; 582 struct opregion_acpi; 583 struct opregion_swsci; 584 struct opregion_asle; 585 586 struct intel_opregion { 587 struct opregion_header *header; 588 struct opregion_acpi *acpi; 589 struct opregion_swsci *swsci; 590 u32 swsci_gbda_sub_functions; 591 u32 swsci_sbcb_sub_functions; 592 struct opregion_asle *asle; 593 void *rvda; 594 const void *vbt; 595 u32 vbt_size; 596 u32 *lid_state; 597 struct work_struct asle_work; 598 }; 599 #define OPREGION_SIZE (8*1024) 600 601 struct intel_overlay; 602 struct intel_overlay_error_state; 603 604 struct sdvo_device_mapping { 605 u8 initialized; 606 u8 dvo_port; 607 u8 slave_addr; 608 u8 dvo_wiring; 609 u8 i2c_pin; 610 u8 ddc_pin; 611 }; 612 613 struct intel_connector; 614 struct intel_encoder; 615 struct intel_atomic_state; 616 struct intel_crtc_state; 617 struct intel_initial_plane_config; 618 struct intel_crtc; 619 struct intel_limit; 620 struct dpll; 621 struct intel_cdclk_state; 622 623 struct drm_i915_display_funcs { 624 void (*get_cdclk)(struct drm_i915_private *dev_priv, 625 struct intel_cdclk_state *cdclk_state); 626 void (*set_cdclk)(struct drm_i915_private *dev_priv, 627 const struct intel_cdclk_state *cdclk_state); 628 int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane); 629 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 630 int (*compute_intermediate_wm)(struct drm_device *dev, 631 struct intel_crtc *intel_crtc, 632 struct intel_crtc_state *newstate); 633 void (*initial_watermarks)(struct intel_atomic_state *state, 634 struct intel_crtc_state *cstate); 635 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 636 struct intel_crtc_state *cstate); 637 void (*optimize_watermarks)(struct intel_atomic_state *state, 638 struct intel_crtc_state *cstate); 639 int (*compute_global_watermarks)(struct drm_atomic_state *state); 640 void (*update_wm)(struct intel_crtc *crtc); 641 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 642 /* Returns the active state of the crtc, and if the crtc is active, 643 * fills out the pipe-config with the hw state. */ 644 bool (*get_pipe_config)(struct intel_crtc *, 645 struct intel_crtc_state *); 646 void (*get_initial_plane_config)(struct intel_crtc *, 647 struct intel_initial_plane_config *); 648 int (*crtc_compute_clock)(struct intel_crtc *crtc, 649 struct intel_crtc_state *crtc_state); 650 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 651 struct drm_atomic_state *old_state); 652 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 653 struct drm_atomic_state *old_state); 654 void (*update_crtcs)(struct drm_atomic_state *state, 655 unsigned int *crtc_vblank_mask); 656 void (*audio_codec_enable)(struct drm_connector *connector, 657 struct intel_encoder *encoder, 658 const struct drm_display_mode *adjusted_mode); 659 void (*audio_codec_disable)(struct intel_encoder *encoder); 660 void (*fdi_link_train)(struct intel_crtc *crtc, 661 const struct intel_crtc_state *crtc_state); 662 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 663 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 664 struct drm_framebuffer *fb, 665 struct drm_i915_gem_object *obj, 666 struct drm_i915_gem_request *req, 667 uint32_t flags); 668 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 669 /* clock updates for mode set */ 670 /* cursor updates */ 671 /* render clock increase/decrease */ 672 /* display clock increase/decrease */ 673 /* pll clock increase/decrease */ 674 675 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 676 void (*load_luts)(struct drm_crtc_state *crtc_state); 677 }; 678 679 enum forcewake_domain_id { 680 FW_DOMAIN_ID_RENDER = 0, 681 FW_DOMAIN_ID_BLITTER, 682 FW_DOMAIN_ID_MEDIA, 683 684 FW_DOMAIN_ID_COUNT 685 }; 686 687 enum forcewake_domains { 688 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), 689 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), 690 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), 691 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 692 FORCEWAKE_BLITTER | 693 FORCEWAKE_MEDIA) 694 }; 695 696 #define FW_REG_READ (1) 697 #define FW_REG_WRITE (2) 698 699 enum decoupled_power_domain { 700 GEN9_DECOUPLED_PD_BLITTER = 0, 701 GEN9_DECOUPLED_PD_RENDER, 702 GEN9_DECOUPLED_PD_MEDIA, 703 GEN9_DECOUPLED_PD_ALL 704 }; 705 706 enum decoupled_ops { 707 GEN9_DECOUPLED_OP_WRITE = 0, 708 GEN9_DECOUPLED_OP_READ 709 }; 710 711 enum forcewake_domains 712 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 713 i915_reg_t reg, unsigned int op); 714 715 struct intel_uncore_funcs { 716 void (*force_wake_get)(struct drm_i915_private *dev_priv, 717 enum forcewake_domains domains); 718 void (*force_wake_put)(struct drm_i915_private *dev_priv, 719 enum forcewake_domains domains); 720 721 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, 722 i915_reg_t r, bool trace); 723 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, 724 i915_reg_t r, bool trace); 725 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, 726 i915_reg_t r, bool trace); 727 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, 728 i915_reg_t r, bool trace); 729 730 void (*mmio_writeb)(struct drm_i915_private *dev_priv, 731 i915_reg_t r, uint8_t val, bool trace); 732 void (*mmio_writew)(struct drm_i915_private *dev_priv, 733 i915_reg_t r, uint16_t val, bool trace); 734 void (*mmio_writel)(struct drm_i915_private *dev_priv, 735 i915_reg_t r, uint32_t val, bool trace); 736 }; 737 738 struct intel_forcewake_range { 739 u32 start; 740 u32 end; 741 742 enum forcewake_domains domains; 743 }; 744 745 struct intel_uncore { 746 spinlock_t lock; /** lock is also taken in irq contexts. */ 747 748 const struct intel_forcewake_range *fw_domains_table; 749 unsigned int fw_domains_table_entries; 750 751 struct notifier_block pmic_bus_access_nb; 752 struct intel_uncore_funcs funcs; 753 754 unsigned fifo_count; 755 756 enum forcewake_domains fw_domains; 757 enum forcewake_domains fw_domains_active; 758 759 u32 fw_set; 760 u32 fw_clear; 761 u32 fw_reset; 762 763 struct intel_uncore_forcewake_domain { 764 enum forcewake_domain_id id; 765 enum forcewake_domains mask; 766 unsigned wake_count; 767 struct hrtimer timer; 768 i915_reg_t reg_set; 769 i915_reg_t reg_ack; 770 } fw_domain[FW_DOMAIN_ID_COUNT]; 771 772 int unclaimed_mmio_check; 773 }; 774 775 #define __mask_next_bit(mask) ({ \ 776 int __idx = ffs(mask) - 1; \ 777 mask &= ~BIT(__idx); \ 778 __idx; \ 779 }) 780 781 /* Iterate over initialised fw domains */ 782 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \ 783 for (tmp__ = (mask__); \ 784 tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) 785 786 #define for_each_fw_domain(domain__, dev_priv__, tmp__) \ 787 for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__) 788 789 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 790 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 791 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 792 793 struct intel_csr { 794 struct work_struct work; 795 const char *fw_path; 796 uint32_t *dmc_payload; 797 uint32_t dmc_fw_size; 798 uint32_t version; 799 uint32_t mmio_count; 800 i915_reg_t mmioaddr[8]; 801 uint32_t mmiodata[8]; 802 uint32_t dc_state; 803 uint32_t allowed_dc_mask; 804 }; 805 806 #define DEV_INFO_FOR_EACH_FLAG(func) \ 807 func(is_mobile); \ 808 func(is_lp); \ 809 func(is_alpha_support); \ 810 /* Keep has_* in alphabetical order */ \ 811 func(has_64bit_reloc); \ 812 func(has_aliasing_ppgtt); \ 813 func(has_csr); \ 814 func(has_ddi); \ 815 func(has_decoupled_mmio); \ 816 func(has_dp_mst); \ 817 func(has_fbc); \ 818 func(has_fpga_dbg); \ 819 func(has_full_ppgtt); \ 820 func(has_full_48bit_ppgtt); \ 821 func(has_gmbus_irq); \ 822 func(has_gmch_display); \ 823 func(has_guc); \ 824 func(has_hotplug); \ 825 func(has_hw_contexts); \ 826 func(has_l3_dpf); \ 827 func(has_llc); \ 828 func(has_logical_ring_contexts); \ 829 func(has_overlay); \ 830 func(has_pipe_cxsr); \ 831 func(has_pooled_eu); \ 832 func(has_psr); \ 833 func(has_rc6); \ 834 func(has_rc6p); \ 835 func(has_resource_streamer); \ 836 func(has_runtime_pm); \ 837 func(has_snoop); \ 838 func(unfenced_needs_alignment); \ 839 func(cursor_needs_physical); \ 840 func(hws_needs_physical); \ 841 func(overlay_needs_physical); \ 842 func(supports_tv); 843 844 struct sseu_dev_info { 845 u8 slice_mask; 846 u8 subslice_mask; 847 u8 eu_total; 848 u8 eu_per_subslice; 849 u8 min_eu_in_pool; 850 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 851 u8 subslice_7eu[3]; 852 u8 has_slice_pg:1; 853 u8 has_subslice_pg:1; 854 u8 has_eu_pg:1; 855 }; 856 857 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 858 { 859 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 860 } 861 862 /* Keep in gen based order, and chronological order within a gen */ 863 enum intel_platform { 864 INTEL_PLATFORM_UNINITIALIZED = 0, 865 INTEL_I830, 866 INTEL_I845G, 867 INTEL_I85X, 868 INTEL_I865G, 869 INTEL_I915G, 870 INTEL_I915GM, 871 INTEL_I945G, 872 INTEL_I945GM, 873 INTEL_G33, 874 INTEL_PINEVIEW, 875 INTEL_I965G, 876 INTEL_I965GM, 877 INTEL_G45, 878 INTEL_GM45, 879 INTEL_IRONLAKE, 880 INTEL_SANDYBRIDGE, 881 INTEL_IVYBRIDGE, 882 INTEL_VALLEYVIEW, 883 INTEL_HASWELL, 884 INTEL_BROADWELL, 885 INTEL_CHERRYVIEW, 886 INTEL_SKYLAKE, 887 INTEL_BROXTON, 888 INTEL_KABYLAKE, 889 INTEL_GEMINILAKE, 890 INTEL_MAX_PLATFORMS 891 }; 892 893 struct intel_device_info { 894 u32 display_mmio_offset; 895 u16 device_id; 896 u8 num_pipes; 897 u8 num_sprites[I915_MAX_PIPES]; 898 u8 num_scalers[I915_MAX_PIPES]; 899 u8 gen; 900 u16 gen_mask; 901 enum intel_platform platform; 902 u8 ring_mask; /* Rings supported by the HW */ 903 u8 num_rings; 904 #define DEFINE_FLAG(name) u8 name:1 905 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 906 #undef DEFINE_FLAG 907 u16 ddb_size; /* in blocks */ 908 /* Register offsets for the various display pipes and transcoders */ 909 int pipe_offsets[I915_MAX_TRANSCODERS]; 910 int trans_offsets[I915_MAX_TRANSCODERS]; 911 int palette_offsets[I915_MAX_PIPES]; 912 int cursor_offsets[I915_MAX_PIPES]; 913 914 /* Slice/subslice/EU info */ 915 struct sseu_dev_info sseu; 916 917 struct color_luts { 918 u16 degamma_lut_size; 919 u16 gamma_lut_size; 920 } color; 921 }; 922 923 struct intel_display_error_state; 924 925 struct i915_gpu_state { 926 struct kref ref; 927 struct timeval time; 928 struct timeval boottime; 929 struct timeval uptime; 930 931 struct drm_i915_private *i915; 932 933 char error_msg[128]; 934 bool simulated; 935 bool awake; 936 bool wakelock; 937 bool suspended; 938 int iommu; 939 u32 reset_count; 940 u32 suspend_count; 941 struct intel_device_info device_info; 942 struct i915_params params; 943 944 /* Generic register state */ 945 u32 eir; 946 u32 pgtbl_er; 947 u32 ier; 948 u32 gtier[4], ngtier; 949 u32 ccid; 950 u32 derrmr; 951 u32 forcewake; 952 u32 error; /* gen6+ */ 953 u32 err_int; /* gen7 */ 954 u32 fault_data0; /* gen8, gen9 */ 955 u32 fault_data1; /* gen8, gen9 */ 956 u32 done_reg; 957 u32 gac_eco; 958 u32 gam_ecochk; 959 u32 gab_ctl; 960 u32 gfx_mode; 961 962 u32 nfence; 963 u64 fence[I915_MAX_NUM_FENCES]; 964 struct intel_overlay_error_state *overlay; 965 struct intel_display_error_state *display; 966 struct drm_i915_error_object *semaphore; 967 struct drm_i915_error_object *guc_log; 968 969 struct drm_i915_error_engine { 970 int engine_id; 971 /* Software tracked state */ 972 bool waiting; 973 int num_waiters; 974 unsigned long hangcheck_timestamp; 975 bool hangcheck_stalled; 976 enum intel_engine_hangcheck_action hangcheck_action; 977 struct i915_address_space *vm; 978 int num_requests; 979 980 /* position of active request inside the ring */ 981 u32 rq_head, rq_post, rq_tail; 982 983 /* our own tracking of ring head and tail */ 984 u32 cpu_ring_head; 985 u32 cpu_ring_tail; 986 987 u32 last_seqno; 988 989 /* Register state */ 990 u32 start; 991 u32 tail; 992 u32 head; 993 u32 ctl; 994 u32 mode; 995 u32 hws; 996 u32 ipeir; 997 u32 ipehr; 998 u32 bbstate; 999 u32 instpm; 1000 u32 instps; 1001 u32 seqno; 1002 u64 bbaddr; 1003 u64 acthd; 1004 u32 fault_reg; 1005 u64 faddr; 1006 u32 rc_psmi; /* sleep state */ 1007 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 1008 struct intel_instdone instdone; 1009 1010 struct drm_i915_error_context { 1011 char comm[TASK_COMM_LEN]; 1012 pid_t pid; 1013 u32 handle; 1014 u32 hw_id; 1015 int ban_score; 1016 int active; 1017 int guilty; 1018 } context; 1019 1020 struct drm_i915_error_object { 1021 u64 gtt_offset; 1022 u64 gtt_size; 1023 int page_count; 1024 int unused; 1025 u32 *pages[0]; 1026 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 1027 1028 struct drm_i915_error_object *wa_ctx; 1029 1030 struct drm_i915_error_request { 1031 long jiffies; 1032 pid_t pid; 1033 u32 context; 1034 int ban_score; 1035 u32 seqno; 1036 u32 head; 1037 u32 tail; 1038 } *requests, execlist[2]; 1039 1040 struct drm_i915_error_waiter { 1041 char comm[TASK_COMM_LEN]; 1042 pid_t pid; 1043 u32 seqno; 1044 } *waiters; 1045 1046 struct { 1047 u32 gfx_mode; 1048 union { 1049 u64 pdp[4]; 1050 u32 pp_dir_base; 1051 }; 1052 } vm_info; 1053 } engine[I915_NUM_ENGINES]; 1054 1055 struct drm_i915_error_buffer { 1056 u32 size; 1057 u32 name; 1058 u32 rseqno[I915_NUM_ENGINES], wseqno; 1059 u64 gtt_offset; 1060 u32 read_domains; 1061 u32 write_domain; 1062 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 1063 u32 tiling:2; 1064 u32 dirty:1; 1065 u32 purgeable:1; 1066 u32 userptr:1; 1067 s32 engine:4; 1068 u32 cache_level:3; 1069 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 1070 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 1071 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 1072 }; 1073 1074 enum i915_cache_level { 1075 I915_CACHE_NONE = 0, 1076 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 1077 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 1078 caches, eg sampler/render caches, and the 1079 large Last-Level-Cache. LLC is coherent with 1080 the CPU, but L3 is only visible to the GPU. */ 1081 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 1082 }; 1083 1084 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 1085 1086 enum fb_op_origin { 1087 ORIGIN_GTT, 1088 ORIGIN_CPU, 1089 ORIGIN_CS, 1090 ORIGIN_FLIP, 1091 ORIGIN_DIRTYFB, 1092 }; 1093 1094 struct intel_fbc { 1095 /* This is always the inner lock when overlapping with struct_mutex and 1096 * it's the outer lock when overlapping with stolen_lock. */ 1097 struct mutex lock; 1098 unsigned threshold; 1099 unsigned int possible_framebuffer_bits; 1100 unsigned int busy_bits; 1101 unsigned int visible_pipes_mask; 1102 struct intel_crtc *crtc; 1103 1104 struct drm_mm_node compressed_fb; 1105 struct drm_mm_node *compressed_llb; 1106 1107 bool false_color; 1108 1109 bool enabled; 1110 bool active; 1111 1112 bool underrun_detected; 1113 struct work_struct underrun_work; 1114 1115 struct intel_fbc_state_cache { 1116 struct i915_vma *vma; 1117 1118 struct { 1119 unsigned int mode_flags; 1120 uint32_t hsw_bdw_pixel_rate; 1121 } crtc; 1122 1123 struct { 1124 unsigned int rotation; 1125 int src_w; 1126 int src_h; 1127 bool visible; 1128 } plane; 1129 1130 struct { 1131 const struct drm_format_info *format; 1132 unsigned int stride; 1133 } fb; 1134 } state_cache; 1135 1136 struct intel_fbc_reg_params { 1137 struct i915_vma *vma; 1138 1139 struct { 1140 enum pipe pipe; 1141 enum plane plane; 1142 unsigned int fence_y_offset; 1143 } crtc; 1144 1145 struct { 1146 const struct drm_format_info *format; 1147 unsigned int stride; 1148 } fb; 1149 1150 int cfb_size; 1151 } params; 1152 1153 struct intel_fbc_work { 1154 bool scheduled; 1155 u32 scheduled_vblank; 1156 struct work_struct work; 1157 } work; 1158 1159 const char *no_fbc_reason; 1160 }; 1161 1162 /* 1163 * HIGH_RR is the highest eDP panel refresh rate read from EDID 1164 * LOW_RR is the lowest eDP panel refresh rate found from EDID 1165 * parsing for same resolution. 1166 */ 1167 enum drrs_refresh_rate_type { 1168 DRRS_HIGH_RR, 1169 DRRS_LOW_RR, 1170 DRRS_MAX_RR, /* RR count */ 1171 }; 1172 1173 enum drrs_support_type { 1174 DRRS_NOT_SUPPORTED = 0, 1175 STATIC_DRRS_SUPPORT = 1, 1176 SEAMLESS_DRRS_SUPPORT = 2 1177 }; 1178 1179 struct intel_dp; 1180 struct i915_drrs { 1181 struct mutex mutex; 1182 struct delayed_work work; 1183 struct intel_dp *dp; 1184 unsigned busy_frontbuffer_bits; 1185 enum drrs_refresh_rate_type refresh_rate_type; 1186 enum drrs_support_type type; 1187 }; 1188 1189 struct i915_psr { 1190 struct mutex lock; 1191 bool sink_support; 1192 bool source_ok; 1193 struct intel_dp *enabled; 1194 bool active; 1195 struct delayed_work work; 1196 unsigned busy_frontbuffer_bits; 1197 bool psr2_support; 1198 bool aux_frame_sync; 1199 bool link_standby; 1200 bool y_cord_support; 1201 bool colorimetry_support; 1202 bool alpm; 1203 }; 1204 1205 enum intel_pch { 1206 PCH_NONE = 0, /* No PCH present */ 1207 PCH_IBX, /* Ibexpeak PCH */ 1208 PCH_CPT, /* Cougarpoint PCH */ 1209 PCH_LPT, /* Lynxpoint PCH */ 1210 PCH_SPT, /* Sunrisepoint PCH */ 1211 PCH_KBP, /* Kabypoint PCH */ 1212 PCH_NOP, 1213 }; 1214 1215 enum intel_sbi_destination { 1216 SBI_ICLK, 1217 SBI_MPHY, 1218 }; 1219 1220 #define QUIRK_PIPEA_FORCE (1<<0) 1221 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1222 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1223 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1224 #define QUIRK_PIPEB_FORCE (1<<4) 1225 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1226 1227 struct intel_fbdev; 1228 struct intel_fbc_work; 1229 1230 struct intel_gmbus { 1231 struct i2c_adapter adapter; 1232 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1233 u32 force_bit; 1234 u32 reg0; 1235 i915_reg_t gpio_reg; 1236 struct i2c_algo_bit_data bit_algo; 1237 struct drm_i915_private *dev_priv; 1238 }; 1239 1240 struct i915_suspend_saved_registers { 1241 u32 saveDSPARB; 1242 u32 saveFBC_CONTROL; 1243 u32 saveCACHE_MODE_0; 1244 u32 saveMI_ARB_STATE; 1245 u32 saveSWF0[16]; 1246 u32 saveSWF1[16]; 1247 u32 saveSWF3[3]; 1248 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1249 u32 savePCH_PORT_HOTPLUG; 1250 u16 saveGCDGMBUS; 1251 }; 1252 1253 struct vlv_s0ix_state { 1254 /* GAM */ 1255 u32 wr_watermark; 1256 u32 gfx_prio_ctrl; 1257 u32 arb_mode; 1258 u32 gfx_pend_tlb0; 1259 u32 gfx_pend_tlb1; 1260 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1261 u32 media_max_req_count; 1262 u32 gfx_max_req_count; 1263 u32 render_hwsp; 1264 u32 ecochk; 1265 u32 bsd_hwsp; 1266 u32 blt_hwsp; 1267 u32 tlb_rd_addr; 1268 1269 /* MBC */ 1270 u32 g3dctl; 1271 u32 gsckgctl; 1272 u32 mbctl; 1273 1274 /* GCP */ 1275 u32 ucgctl1; 1276 u32 ucgctl3; 1277 u32 rcgctl1; 1278 u32 rcgctl2; 1279 u32 rstctl; 1280 u32 misccpctl; 1281 1282 /* GPM */ 1283 u32 gfxpause; 1284 u32 rpdeuhwtc; 1285 u32 rpdeuc; 1286 u32 ecobus; 1287 u32 pwrdwnupctl; 1288 u32 rp_down_timeout; 1289 u32 rp_deucsw; 1290 u32 rcubmabdtmr; 1291 u32 rcedata; 1292 u32 spare2gh; 1293 1294 /* Display 1 CZ domain */ 1295 u32 gt_imr; 1296 u32 gt_ier; 1297 u32 pm_imr; 1298 u32 pm_ier; 1299 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1300 1301 /* GT SA CZ domain */ 1302 u32 tilectl; 1303 u32 gt_fifoctl; 1304 u32 gtlc_wake_ctrl; 1305 u32 gtlc_survive; 1306 u32 pmwgicz; 1307 1308 /* Display 2 CZ domain */ 1309 u32 gu_ctl0; 1310 u32 gu_ctl1; 1311 u32 pcbr; 1312 u32 clock_gate_dis2; 1313 }; 1314 1315 struct intel_rps_ei { 1316 ktime_t ktime; 1317 u32 render_c0; 1318 u32 media_c0; 1319 }; 1320 1321 struct intel_gen6_power_mgmt { 1322 /* 1323 * work, interrupts_enabled and pm_iir are protected by 1324 * dev_priv->irq_lock 1325 */ 1326 struct work_struct work; 1327 bool interrupts_enabled; 1328 u32 pm_iir; 1329 1330 /* PM interrupt bits that should never be masked */ 1331 u32 pm_intrmsk_mbz; 1332 1333 /* Frequencies are stored in potentially platform dependent multiples. 1334 * In other words, *_freq needs to be multiplied by X to be interesting. 1335 * Soft limits are those which are used for the dynamic reclocking done 1336 * by the driver (raise frequencies under heavy loads, and lower for 1337 * lighter loads). Hard limits are those imposed by the hardware. 1338 * 1339 * A distinction is made for overclocking, which is never enabled by 1340 * default, and is considered to be above the hard limit if it's 1341 * possible at all. 1342 */ 1343 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1344 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1345 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1346 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1347 u8 min_freq; /* AKA RPn. Minimum frequency */ 1348 u8 boost_freq; /* Frequency to request when wait boosting */ 1349 u8 idle_freq; /* Frequency to request when we are idle */ 1350 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1351 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1352 u8 rp0_freq; /* Non-overclocked max frequency. */ 1353 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1354 1355 u8 up_threshold; /* Current %busy required to uplock */ 1356 u8 down_threshold; /* Current %busy required to downclock */ 1357 1358 int last_adj; 1359 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1360 1361 spinlock_t client_lock; 1362 struct list_head clients; 1363 bool client_boost; 1364 1365 bool enabled; 1366 struct delayed_work autoenable_work; 1367 unsigned boosts; 1368 1369 /* manual wa residency calculations */ 1370 struct intel_rps_ei ei; 1371 1372 /* 1373 * Protects RPS/RC6 register access and PCU communication. 1374 * Must be taken after struct_mutex if nested. Note that 1375 * this lock may be held for long periods of time when 1376 * talking to hw - so only take it when talking to hw! 1377 */ 1378 struct mutex hw_lock; 1379 }; 1380 1381 /* defined intel_pm.c */ 1382 extern spinlock_t mchdev_lock; 1383 1384 struct intel_ilk_power_mgmt { 1385 u8 cur_delay; 1386 u8 min_delay; 1387 u8 max_delay; 1388 u8 fmax; 1389 u8 fstart; 1390 1391 u64 last_count1; 1392 unsigned long last_time1; 1393 unsigned long chipset_power; 1394 u64 last_count2; 1395 u64 last_time2; 1396 unsigned long gfx_power; 1397 u8 corr; 1398 1399 int c_m; 1400 int r_t; 1401 }; 1402 1403 struct drm_i915_private; 1404 struct i915_power_well; 1405 1406 struct i915_power_well_ops { 1407 /* 1408 * Synchronize the well's hw state to match the current sw state, for 1409 * example enable/disable it based on the current refcount. Called 1410 * during driver init and resume time, possibly after first calling 1411 * the enable/disable handlers. 1412 */ 1413 void (*sync_hw)(struct drm_i915_private *dev_priv, 1414 struct i915_power_well *power_well); 1415 /* 1416 * Enable the well and resources that depend on it (for example 1417 * interrupts located on the well). Called after the 0->1 refcount 1418 * transition. 1419 */ 1420 void (*enable)(struct drm_i915_private *dev_priv, 1421 struct i915_power_well *power_well); 1422 /* 1423 * Disable the well and resources that depend on it. Called after 1424 * the 1->0 refcount transition. 1425 */ 1426 void (*disable)(struct drm_i915_private *dev_priv, 1427 struct i915_power_well *power_well); 1428 /* Returns the hw enabled state. */ 1429 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1430 struct i915_power_well *power_well); 1431 }; 1432 1433 /* Power well structure for haswell */ 1434 struct i915_power_well { 1435 const char *name; 1436 bool always_on; 1437 /* power well enable/disable usage count */ 1438 int count; 1439 /* cached hw enabled state */ 1440 bool hw_enabled; 1441 u64 domains; 1442 /* unique identifier for this power well */ 1443 unsigned long id; 1444 /* 1445 * Arbitraty data associated with this power well. Platform and power 1446 * well specific. 1447 */ 1448 unsigned long data; 1449 const struct i915_power_well_ops *ops; 1450 }; 1451 1452 struct i915_power_domains { 1453 /* 1454 * Power wells needed for initialization at driver init and suspend 1455 * time are on. They are kept on until after the first modeset. 1456 */ 1457 bool init_power_on; 1458 bool initializing; 1459 int power_well_count; 1460 1461 struct mutex lock; 1462 int domain_use_count[POWER_DOMAIN_NUM]; 1463 struct i915_power_well *power_wells; 1464 }; 1465 1466 #define MAX_L3_SLICES 2 1467 struct intel_l3_parity { 1468 u32 *remap_info[MAX_L3_SLICES]; 1469 struct work_struct error_work; 1470 int which_slice; 1471 }; 1472 1473 struct i915_gem_mm { 1474 /** Memory allocator for GTT stolen memory */ 1475 struct drm_mm stolen; 1476 /** Protects the usage of the GTT stolen memory allocator. This is 1477 * always the inner lock when overlapping with struct_mutex. */ 1478 struct mutex stolen_lock; 1479 1480 /** List of all objects in gtt_space. Used to restore gtt 1481 * mappings on resume */ 1482 struct list_head bound_list; 1483 /** 1484 * List of objects which are not bound to the GTT (thus 1485 * are idle and not used by the GPU). These objects may or may 1486 * not actually have any pages attached. 1487 */ 1488 struct list_head unbound_list; 1489 1490 /** List of all objects in gtt_space, currently mmaped by userspace. 1491 * All objects within this list must also be on bound_list. 1492 */ 1493 struct list_head userfault_list; 1494 1495 /** 1496 * List of objects which are pending destruction. 1497 */ 1498 struct llist_head free_list; 1499 struct work_struct free_work; 1500 1501 /** Usable portion of the GTT for GEM */ 1502 dma_addr_t stolen_base; /* limited to low memory (32-bit) */ 1503 1504 /** PPGTT used for aliasing the PPGTT with the GTT */ 1505 struct i915_hw_ppgtt *aliasing_ppgtt; 1506 1507 struct notifier_block oom_notifier; 1508 struct notifier_block vmap_notifier; 1509 struct shrinker shrinker; 1510 1511 /** LRU list of objects with fence regs on them. */ 1512 struct list_head fence_list; 1513 1514 /** 1515 * Are we in a non-interruptible section of code like 1516 * modesetting? 1517 */ 1518 bool interruptible; 1519 1520 /* the indicator for dispatch video commands on two BSD rings */ 1521 atomic_t bsd_engine_dispatch_index; 1522 1523 /** Bit 6 swizzling required for X tiling */ 1524 uint32_t bit_6_swizzle_x; 1525 /** Bit 6 swizzling required for Y tiling */ 1526 uint32_t bit_6_swizzle_y; 1527 1528 /* accounting, useful for userland debugging */ 1529 spinlock_t object_stat_lock; 1530 u64 object_memory; 1531 u32 object_count; 1532 }; 1533 1534 struct drm_i915_error_state_buf { 1535 struct drm_i915_private *i915; 1536 unsigned bytes; 1537 unsigned size; 1538 int err; 1539 u8 *buf; 1540 loff_t start; 1541 loff_t pos; 1542 }; 1543 1544 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 1545 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 1546 1547 #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ 1548 #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ 1549 1550 struct i915_gpu_error { 1551 /* For hangcheck timer */ 1552 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1553 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1554 1555 struct delayed_work hangcheck_work; 1556 1557 /* For reset and error_state handling. */ 1558 spinlock_t lock; 1559 /* Protected by the above dev->gpu_error.lock. */ 1560 struct i915_gpu_state *first_error; 1561 1562 unsigned long missed_irq_rings; 1563 1564 /** 1565 * State variable controlling the reset flow and count 1566 * 1567 * This is a counter which gets incremented when reset is triggered, 1568 * 1569 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set 1570 * meaning that any waiters holding onto the struct_mutex should 1571 * relinquish the lock immediately in order for the reset to start. 1572 * 1573 * If reset is not completed succesfully, the I915_WEDGE bit is 1574 * set meaning that hardware is terminally sour and there is no 1575 * recovery. All waiters on the reset_queue will be woken when 1576 * that happens. 1577 * 1578 * This counter is used by the wait_seqno code to notice that reset 1579 * event happened and it needs to restart the entire ioctl (since most 1580 * likely the seqno it waited for won't ever signal anytime soon). 1581 * 1582 * This is important for lock-free wait paths, where no contended lock 1583 * naturally enforces the correct ordering between the bail-out of the 1584 * waiter and the gpu reset work code. 1585 */ 1586 unsigned long reset_count; 1587 1588 /** 1589 * flags: Control various stages of the GPU reset 1590 * 1591 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any 1592 * other users acquiring the struct_mutex. To do this we set the 1593 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset 1594 * and then check for that bit before acquiring the struct_mutex (in 1595 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a 1596 * secondary role in preventing two concurrent global reset attempts. 1597 * 1598 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the 1599 * struct_mutex. We try to acquire the struct_mutex in the reset worker, 1600 * but it may be held by some long running waiter (that we cannot 1601 * interrupt without causing trouble). Once we are ready to do the GPU 1602 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If 1603 * they already hold the struct_mutex and want to participate they can 1604 * inspect the bit and do the reset directly, otherwise the worker 1605 * waits for the struct_mutex. 1606 * 1607 * #I915_WEDGED - If reset fails and we can no longer use the GPU, 1608 * we set the #I915_WEDGED bit. Prior to command submission, e.g. 1609 * i915_gem_request_alloc(), this bit is checked and the sequence 1610 * aborted (with -EIO reported to userspace) if set. 1611 */ 1612 unsigned long flags; 1613 #define I915_RESET_BACKOFF 0 1614 #define I915_RESET_HANDOFF 1 1615 #define I915_WEDGED (BITS_PER_LONG - 1) 1616 1617 /** 1618 * Waitqueue to signal when a hang is detected. Used to for waiters 1619 * to release the struct_mutex for the reset to procede. 1620 */ 1621 wait_queue_head_t wait_queue; 1622 1623 /** 1624 * Waitqueue to signal when the reset has completed. Used by clients 1625 * that wait for dev_priv->mm.wedged to settle. 1626 */ 1627 wait_queue_head_t reset_queue; 1628 1629 /* For missed irq/seqno simulation. */ 1630 unsigned long test_irq_rings; 1631 }; 1632 1633 enum modeset_restore { 1634 MODESET_ON_LID_OPEN, 1635 MODESET_DONE, 1636 MODESET_SUSPENDED, 1637 }; 1638 1639 #define DP_AUX_A 0x40 1640 #define DP_AUX_B 0x10 1641 #define DP_AUX_C 0x20 1642 #define DP_AUX_D 0x30 1643 1644 #define DDC_PIN_B 0x05 1645 #define DDC_PIN_C 0x04 1646 #define DDC_PIN_D 0x06 1647 1648 struct ddi_vbt_port_info { 1649 /* 1650 * This is an index in the HDMI/DVI DDI buffer translation table. 1651 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1652 * populate this field. 1653 */ 1654 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1655 uint8_t hdmi_level_shift; 1656 1657 uint8_t supports_dvi:1; 1658 uint8_t supports_hdmi:1; 1659 uint8_t supports_dp:1; 1660 uint8_t supports_edp:1; 1661 1662 uint8_t alternate_aux_channel; 1663 uint8_t alternate_ddc_pin; 1664 1665 uint8_t dp_boost_level; 1666 uint8_t hdmi_boost_level; 1667 }; 1668 1669 enum psr_lines_to_wait { 1670 PSR_0_LINES_TO_WAIT = 0, 1671 PSR_1_LINE_TO_WAIT, 1672 PSR_4_LINES_TO_WAIT, 1673 PSR_8_LINES_TO_WAIT 1674 }; 1675 1676 struct intel_vbt_data { 1677 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1678 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1679 1680 /* Feature bits */ 1681 unsigned int int_tv_support:1; 1682 unsigned int lvds_dither:1; 1683 unsigned int lvds_vbt:1; 1684 unsigned int int_crt_support:1; 1685 unsigned int lvds_use_ssc:1; 1686 unsigned int display_clock_mode:1; 1687 unsigned int fdi_rx_polarity_inverted:1; 1688 unsigned int panel_type:4; 1689 int lvds_ssc_freq; 1690 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1691 1692 enum drrs_support_type drrs_type; 1693 1694 struct { 1695 int rate; 1696 int lanes; 1697 int preemphasis; 1698 int vswing; 1699 bool low_vswing; 1700 bool initialized; 1701 bool support; 1702 int bpp; 1703 struct edp_power_seq pps; 1704 } edp; 1705 1706 struct { 1707 bool full_link; 1708 bool require_aux_wakeup; 1709 int idle_frames; 1710 enum psr_lines_to_wait lines_to_wait; 1711 int tp1_wakeup_time; 1712 int tp2_tp3_wakeup_time; 1713 } psr; 1714 1715 struct { 1716 u16 pwm_freq_hz; 1717 bool present; 1718 bool active_low_pwm; 1719 u8 min_brightness; /* min_brightness/255 of max */ 1720 u8 controller; /* brightness controller number */ 1721 enum intel_backlight_type type; 1722 } backlight; 1723 1724 /* MIPI DSI */ 1725 struct { 1726 u16 panel_id; 1727 struct mipi_config *config; 1728 struct mipi_pps_data *pps; 1729 u8 seq_version; 1730 u32 size; 1731 u8 *data; 1732 const u8 *sequence[MIPI_SEQ_MAX]; 1733 } dsi; 1734 1735 int crt_ddc_pin; 1736 1737 int child_dev_num; 1738 union child_device_config *child_dev; 1739 1740 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1741 struct sdvo_device_mapping sdvo_mappings[2]; 1742 }; 1743 1744 enum intel_ddb_partitioning { 1745 INTEL_DDB_PART_1_2, 1746 INTEL_DDB_PART_5_6, /* IVB+ */ 1747 }; 1748 1749 struct intel_wm_level { 1750 bool enable; 1751 uint32_t pri_val; 1752 uint32_t spr_val; 1753 uint32_t cur_val; 1754 uint32_t fbc_val; 1755 }; 1756 1757 struct ilk_wm_values { 1758 uint32_t wm_pipe[3]; 1759 uint32_t wm_lp[3]; 1760 uint32_t wm_lp_spr[3]; 1761 uint32_t wm_linetime[3]; 1762 bool enable_fbc_wm; 1763 enum intel_ddb_partitioning partitioning; 1764 }; 1765 1766 struct vlv_pipe_wm { 1767 uint16_t plane[I915_MAX_PLANES]; 1768 }; 1769 1770 struct vlv_sr_wm { 1771 uint16_t plane; 1772 uint16_t cursor; 1773 }; 1774 1775 struct vlv_wm_ddl_values { 1776 uint8_t plane[I915_MAX_PLANES]; 1777 }; 1778 1779 struct vlv_wm_values { 1780 struct vlv_pipe_wm pipe[3]; 1781 struct vlv_sr_wm sr; 1782 struct vlv_wm_ddl_values ddl[3]; 1783 uint8_t level; 1784 bool cxsr; 1785 }; 1786 1787 struct skl_ddb_entry { 1788 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1789 }; 1790 1791 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1792 { 1793 return entry->end - entry->start; 1794 } 1795 1796 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1797 const struct skl_ddb_entry *e2) 1798 { 1799 if (e1->start == e2->start && e1->end == e2->end) 1800 return true; 1801 1802 return false; 1803 } 1804 1805 struct skl_ddb_allocation { 1806 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1807 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1808 }; 1809 1810 struct skl_wm_values { 1811 unsigned dirty_pipes; 1812 struct skl_ddb_allocation ddb; 1813 }; 1814 1815 struct skl_wm_level { 1816 bool plane_en; 1817 uint16_t plane_res_b; 1818 uint8_t plane_res_l; 1819 }; 1820 1821 /* 1822 * This struct helps tracking the state needed for runtime PM, which puts the 1823 * device in PCI D3 state. Notice that when this happens, nothing on the 1824 * graphics device works, even register access, so we don't get interrupts nor 1825 * anything else. 1826 * 1827 * Every piece of our code that needs to actually touch the hardware needs to 1828 * either call intel_runtime_pm_get or call intel_display_power_get with the 1829 * appropriate power domain. 1830 * 1831 * Our driver uses the autosuspend delay feature, which means we'll only really 1832 * suspend if we stay with zero refcount for a certain amount of time. The 1833 * default value is currently very conservative (see intel_runtime_pm_enable), but 1834 * it can be changed with the standard runtime PM files from sysfs. 1835 * 1836 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1837 * goes back to false exactly before we reenable the IRQs. We use this variable 1838 * to check if someone is trying to enable/disable IRQs while they're supposed 1839 * to be disabled. This shouldn't happen and we'll print some error messages in 1840 * case it happens. 1841 * 1842 * For more, read the Documentation/power/runtime_pm.txt. 1843 */ 1844 struct i915_runtime_pm { 1845 atomic_t wakeref_count; 1846 bool suspended; 1847 bool irqs_enabled; 1848 }; 1849 1850 enum intel_pipe_crc_source { 1851 INTEL_PIPE_CRC_SOURCE_NONE, 1852 INTEL_PIPE_CRC_SOURCE_PLANE1, 1853 INTEL_PIPE_CRC_SOURCE_PLANE2, 1854 INTEL_PIPE_CRC_SOURCE_PF, 1855 INTEL_PIPE_CRC_SOURCE_PIPE, 1856 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1857 INTEL_PIPE_CRC_SOURCE_TV, 1858 INTEL_PIPE_CRC_SOURCE_DP_B, 1859 INTEL_PIPE_CRC_SOURCE_DP_C, 1860 INTEL_PIPE_CRC_SOURCE_DP_D, 1861 INTEL_PIPE_CRC_SOURCE_AUTO, 1862 INTEL_PIPE_CRC_SOURCE_MAX, 1863 }; 1864 1865 struct intel_pipe_crc_entry { 1866 uint32_t frame; 1867 uint32_t crc[5]; 1868 }; 1869 1870 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1871 struct intel_pipe_crc { 1872 spinlock_t lock; 1873 bool opened; /* exclusive access to the result file */ 1874 struct intel_pipe_crc_entry *entries; 1875 enum intel_pipe_crc_source source; 1876 int head, tail; 1877 wait_queue_head_t wq; 1878 int skipped; 1879 }; 1880 1881 struct i915_frontbuffer_tracking { 1882 spinlock_t lock; 1883 1884 /* 1885 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1886 * scheduled flips. 1887 */ 1888 unsigned busy_bits; 1889 unsigned flip_bits; 1890 }; 1891 1892 struct i915_wa_reg { 1893 i915_reg_t addr; 1894 u32 value; 1895 /* bitmask representing WA bits */ 1896 u32 mask; 1897 }; 1898 1899 /* 1900 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1901 * allowing it for RCS as we don't foresee any requirement of having 1902 * a whitelist for other engines. When it is really required for 1903 * other engines then the limit need to be increased. 1904 */ 1905 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1906 1907 struct i915_workarounds { 1908 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1909 u32 count; 1910 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1911 }; 1912 1913 struct i915_virtual_gpu { 1914 bool active; 1915 }; 1916 1917 /* used in computing the new watermarks state */ 1918 struct intel_wm_config { 1919 unsigned int num_pipes_active; 1920 bool sprites_enabled; 1921 bool sprites_scaled; 1922 }; 1923 1924 struct i915_oa_format { 1925 u32 format; 1926 int size; 1927 }; 1928 1929 struct i915_oa_reg { 1930 i915_reg_t addr; 1931 u32 value; 1932 }; 1933 1934 struct i915_perf_stream; 1935 1936 /** 1937 * struct i915_perf_stream_ops - the OPs to support a specific stream type 1938 */ 1939 struct i915_perf_stream_ops { 1940 /** 1941 * @enable: Enables the collection of HW samples, either in response to 1942 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened 1943 * without `I915_PERF_FLAG_DISABLED`. 1944 */ 1945 void (*enable)(struct i915_perf_stream *stream); 1946 1947 /** 1948 * @disable: Disables the collection of HW samples, either in response 1949 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying 1950 * the stream. 1951 */ 1952 void (*disable)(struct i915_perf_stream *stream); 1953 1954 /** 1955 * @poll_wait: Call poll_wait, passing a wait queue that will be woken 1956 * once there is something ready to read() for the stream 1957 */ 1958 void (*poll_wait)(struct i915_perf_stream *stream, 1959 struct file *file, 1960 poll_table *wait); 1961 1962 /** 1963 * @wait_unlocked: For handling a blocking read, wait until there is 1964 * something to ready to read() for the stream. E.g. wait on the same 1965 * wait queue that would be passed to poll_wait(). 1966 */ 1967 int (*wait_unlocked)(struct i915_perf_stream *stream); 1968 1969 /** 1970 * @read: Copy buffered metrics as records to userspace 1971 * **buf**: the userspace, destination buffer 1972 * **count**: the number of bytes to copy, requested by userspace 1973 * **offset**: zero at the start of the read, updated as the read 1974 * proceeds, it represents how many bytes have been copied so far and 1975 * the buffer offset for copying the next record. 1976 * 1977 * Copy as many buffered i915 perf samples and records for this stream 1978 * to userspace as will fit in the given buffer. 1979 * 1980 * Only write complete records; returning -%ENOSPC if there isn't room 1981 * for a complete record. 1982 * 1983 * Return any error condition that results in a short read such as 1984 * -%ENOSPC or -%EFAULT, even though these may be squashed before 1985 * returning to userspace. 1986 */ 1987 int (*read)(struct i915_perf_stream *stream, 1988 char __user *buf, 1989 size_t count, 1990 size_t *offset); 1991 1992 /** 1993 * @destroy: Cleanup any stream specific resources. 1994 * 1995 * The stream will always be disabled before this is called. 1996 */ 1997 void (*destroy)(struct i915_perf_stream *stream); 1998 }; 1999 2000 /** 2001 * struct i915_perf_stream - state for a single open stream FD 2002 */ 2003 struct i915_perf_stream { 2004 /** 2005 * @dev_priv: i915 drm device 2006 */ 2007 struct drm_i915_private *dev_priv; 2008 2009 /** 2010 * @link: Links the stream into ``&drm_i915_private->streams`` 2011 */ 2012 struct list_head link; 2013 2014 /** 2015 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` 2016 * properties given when opening a stream, representing the contents 2017 * of a single sample as read() by userspace. 2018 */ 2019 u32 sample_flags; 2020 2021 /** 2022 * @sample_size: Considering the configured contents of a sample 2023 * combined with the required header size, this is the total size 2024 * of a single sample record. 2025 */ 2026 int sample_size; 2027 2028 /** 2029 * @ctx: %NULL if measuring system-wide across all contexts or a 2030 * specific context that is being monitored. 2031 */ 2032 struct i915_gem_context *ctx; 2033 2034 /** 2035 * @enabled: Whether the stream is currently enabled, considering 2036 * whether the stream was opened in a disabled state and based 2037 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. 2038 */ 2039 bool enabled; 2040 2041 /** 2042 * @ops: The callbacks providing the implementation of this specific 2043 * type of configured stream. 2044 */ 2045 const struct i915_perf_stream_ops *ops; 2046 }; 2047 2048 /** 2049 * struct i915_oa_ops - Gen specific implementation of an OA unit stream 2050 */ 2051 struct i915_oa_ops { 2052 /** 2053 * @init_oa_buffer: Resets the head and tail pointers of the 2054 * circular buffer for periodic OA reports. 2055 * 2056 * Called when first opening a stream for OA metrics, but also may be 2057 * called in response to an OA buffer overflow or other error 2058 * condition. 2059 * 2060 * Note it may be necessary to clear the full OA buffer here as part of 2061 * maintaining the invariable that new reports must be written to 2062 * zeroed memory for us to be able to reliable detect if an expected 2063 * report has not yet landed in memory. (At least on Haswell the OA 2064 * buffer tail pointer is not synchronized with reports being visible 2065 * to the CPU) 2066 */ 2067 void (*init_oa_buffer)(struct drm_i915_private *dev_priv); 2068 2069 /** 2070 * @enable_metric_set: Applies any MUX configuration to set up the 2071 * Boolean and Custom (B/C) counters that are part of the counter 2072 * reports being sampled. May apply system constraints such as 2073 * disabling EU clock gating as required. 2074 */ 2075 int (*enable_metric_set)(struct drm_i915_private *dev_priv); 2076 2077 /** 2078 * @disable_metric_set: Remove system constraints associated with using 2079 * the OA unit. 2080 */ 2081 void (*disable_metric_set)(struct drm_i915_private *dev_priv); 2082 2083 /** 2084 * @oa_enable: Enable periodic sampling 2085 */ 2086 void (*oa_enable)(struct drm_i915_private *dev_priv); 2087 2088 /** 2089 * @oa_disable: Disable periodic sampling 2090 */ 2091 void (*oa_disable)(struct drm_i915_private *dev_priv); 2092 2093 /** 2094 * @read: Copy data from the circular OA buffer into a given userspace 2095 * buffer. 2096 */ 2097 int (*read)(struct i915_perf_stream *stream, 2098 char __user *buf, 2099 size_t count, 2100 size_t *offset); 2101 2102 /** 2103 * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK) 2104 * 2105 * This is either called via fops or the poll check hrtimer (atomic 2106 * ctx) without any locks taken. 2107 * 2108 * It's safe to read OA config state here unlocked, assuming that this 2109 * is only called while the stream is enabled, while the global OA 2110 * configuration can't be modified. 2111 * 2112 * Efficiency is more important than avoiding some false positives 2113 * here, which will be handled gracefully - likely resulting in an 2114 * %EAGAIN error for userspace. 2115 */ 2116 bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv); 2117 }; 2118 2119 struct intel_cdclk_state { 2120 unsigned int cdclk, vco, ref; 2121 }; 2122 2123 struct drm_i915_private { 2124 struct drm_device drm; 2125 2126 struct kmem_cache *objects; 2127 struct kmem_cache *vmas; 2128 struct kmem_cache *requests; 2129 struct kmem_cache *dependencies; 2130 2131 const struct intel_device_info info; 2132 2133 void __iomem *regs; 2134 2135 struct intel_uncore uncore; 2136 2137 struct i915_virtual_gpu vgpu; 2138 2139 struct intel_gvt *gvt; 2140 2141 struct intel_huc huc; 2142 struct intel_guc guc; 2143 2144 struct intel_csr csr; 2145 2146 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 2147 2148 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 2149 * controller on different i2c buses. */ 2150 struct mutex gmbus_mutex; 2151 2152 /** 2153 * Base address of the gmbus and gpio block. 2154 */ 2155 uint32_t gpio_mmio_base; 2156 2157 /* MMIO base address for MIPI regs */ 2158 uint32_t mipi_mmio_base; 2159 2160 uint32_t psr_mmio_base; 2161 2162 uint32_t pps_mmio_base; 2163 2164 wait_queue_head_t gmbus_wait_queue; 2165 2166 struct pci_dev *bridge_dev; 2167 struct i915_gem_context *kernel_context; 2168 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 2169 struct i915_vma *semaphore; 2170 2171 struct drm_dma_handle *status_page_dmah; 2172 struct resource mch_res; 2173 2174 /* protects the irq masks */ 2175 spinlock_t irq_lock; 2176 2177 /* protects the mmio flip data */ 2178 spinlock_t mmio_flip_lock; 2179 2180 bool display_irqs_enabled; 2181 2182 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 2183 struct pm_qos_request pm_qos; 2184 2185 /* Sideband mailbox protection */ 2186 struct mutex sb_lock; 2187 2188 /** Cached value of IMR to avoid reads in updating the bitfield */ 2189 union { 2190 u32 irq_mask; 2191 u32 de_irq_mask[I915_MAX_PIPES]; 2192 }; 2193 u32 gt_irq_mask; 2194 u32 pm_imr; 2195 u32 pm_ier; 2196 u32 pm_rps_events; 2197 u32 pm_guc_events; 2198 u32 pipestat_irq_mask[I915_MAX_PIPES]; 2199 2200 struct i915_hotplug hotplug; 2201 struct intel_fbc fbc; 2202 struct i915_drrs drrs; 2203 struct intel_opregion opregion; 2204 struct intel_vbt_data vbt; 2205 2206 bool preserve_bios_swizzle; 2207 2208 /* overlay */ 2209 struct intel_overlay *overlay; 2210 2211 /* backlight registers and fields in struct intel_panel */ 2212 struct mutex backlight_lock; 2213 2214 /* LVDS info */ 2215 bool no_aux_handshake; 2216 2217 /* protects panel power sequencer state */ 2218 struct mutex pps_mutex; 2219 2220 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 2221 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 2222 2223 unsigned int fsb_freq, mem_freq, is_ddr3; 2224 unsigned int skl_preferred_vco_freq; 2225 unsigned int max_cdclk_freq; 2226 2227 unsigned int max_dotclk_freq; 2228 unsigned int rawclk_freq; 2229 unsigned int hpll_freq; 2230 unsigned int czclk_freq; 2231 2232 struct { 2233 /* 2234 * The current logical cdclk state. 2235 * See intel_atomic_state.cdclk.logical 2236 * 2237 * For reading holding any crtc lock is sufficient, 2238 * for writing must hold all of them. 2239 */ 2240 struct intel_cdclk_state logical; 2241 /* 2242 * The current actual cdclk state. 2243 * See intel_atomic_state.cdclk.actual 2244 */ 2245 struct intel_cdclk_state actual; 2246 /* The current hardware cdclk state */ 2247 struct intel_cdclk_state hw; 2248 } cdclk; 2249 2250 /** 2251 * wq - Driver workqueue for GEM. 2252 * 2253 * NOTE: Work items scheduled here are not allowed to grab any modeset 2254 * locks, for otherwise the flushing done in the pageflip code will 2255 * result in deadlocks. 2256 */ 2257 struct workqueue_struct *wq; 2258 2259 /* Display functions */ 2260 struct drm_i915_display_funcs display; 2261 2262 /* PCH chipset type */ 2263 enum intel_pch pch_type; 2264 unsigned short pch_id; 2265 2266 unsigned long quirks; 2267 2268 enum modeset_restore modeset_restore; 2269 struct mutex modeset_restore_lock; 2270 struct drm_atomic_state *modeset_restore_state; 2271 struct drm_modeset_acquire_ctx reset_ctx; 2272 2273 struct list_head vm_list; /* Global list of all address spaces */ 2274 struct i915_ggtt ggtt; /* VM representing the global address space */ 2275 2276 struct i915_gem_mm mm; 2277 DECLARE_HASHTABLE(mm_structs, 7); 2278 struct mutex mm_lock; 2279 2280 /* The hw wants to have a stable context identifier for the lifetime 2281 * of the context (for OA, PASID, faults, etc). This is limited 2282 * in execlists to 21 bits. 2283 */ 2284 struct ida context_hw_ida; 2285 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 2286 2287 /* Kernel Modesetting */ 2288 2289 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 2290 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 2291 wait_queue_head_t pending_flip_queue; 2292 2293 #ifdef CONFIG_DEBUG_FS 2294 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 2295 #endif 2296 2297 /* dpll and cdclk state is protected by connection_mutex */ 2298 int num_shared_dpll; 2299 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 2300 const struct intel_dpll_mgr *dpll_mgr; 2301 2302 /* 2303 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 2304 * Must be global rather than per dpll, because on some platforms 2305 * plls share registers. 2306 */ 2307 struct mutex dpll_lock; 2308 2309 unsigned int active_crtcs; 2310 unsigned int min_pixclk[I915_MAX_PIPES]; 2311 2312 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 2313 2314 struct i915_workarounds workarounds; 2315 2316 struct i915_frontbuffer_tracking fb_tracking; 2317 2318 struct intel_atomic_helper { 2319 struct llist_head free_list; 2320 struct work_struct free_work; 2321 } atomic_helper; 2322 2323 u16 orig_clock; 2324 2325 bool mchbar_need_disable; 2326 2327 struct intel_l3_parity l3_parity; 2328 2329 /* Cannot be determined by PCIID. You must always read a register. */ 2330 u32 edram_cap; 2331 2332 /* gen6+ rps state */ 2333 struct intel_gen6_power_mgmt rps; 2334 2335 /* ilk-only ips/rps state. Everything in here is protected by the global 2336 * mchdev_lock in intel_pm.c */ 2337 struct intel_ilk_power_mgmt ips; 2338 2339 struct i915_power_domains power_domains; 2340 2341 struct i915_psr psr; 2342 2343 struct i915_gpu_error gpu_error; 2344 2345 struct drm_i915_gem_object *vlv_pctx; 2346 2347 #ifdef CONFIG_DRM_FBDEV_EMULATION 2348 /* list of fbdev register on this device */ 2349 struct intel_fbdev *fbdev; 2350 struct work_struct fbdev_suspend_work; 2351 #endif 2352 2353 struct drm_property *broadcast_rgb_property; 2354 struct drm_property *force_audio_property; 2355 2356 /* hda/i915 audio component */ 2357 struct i915_audio_component *audio_component; 2358 bool audio_component_registered; 2359 /** 2360 * av_mutex - mutex for audio/video sync 2361 * 2362 */ 2363 struct mutex av_mutex; 2364 2365 uint32_t hw_context_size; 2366 struct list_head context_list; 2367 2368 u32 fdi_rx_config; 2369 2370 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 2371 u32 chv_phy_control; 2372 /* 2373 * Shadows for CHV DPLL_MD regs to keep the state 2374 * checker somewhat working in the presence hardware 2375 * crappiness (can't read out DPLL_MD for pipes B & C). 2376 */ 2377 u32 chv_dpll_md[I915_MAX_PIPES]; 2378 u32 bxt_phy_grc; 2379 2380 u32 suspend_count; 2381 bool suspended_to_idle; 2382 struct i915_suspend_saved_registers regfile; 2383 struct vlv_s0ix_state vlv_s0ix_state; 2384 2385 enum { 2386 I915_SAGV_UNKNOWN = 0, 2387 I915_SAGV_DISABLED, 2388 I915_SAGV_ENABLED, 2389 I915_SAGV_NOT_CONTROLLED 2390 } sagv_status; 2391 2392 struct { 2393 /* 2394 * Raw watermark latency values: 2395 * in 0.1us units for WM0, 2396 * in 0.5us units for WM1+. 2397 */ 2398 /* primary */ 2399 uint16_t pri_latency[5]; 2400 /* sprite */ 2401 uint16_t spr_latency[5]; 2402 /* cursor */ 2403 uint16_t cur_latency[5]; 2404 /* 2405 * Raw watermark memory latency values 2406 * for SKL for all 8 levels 2407 * in 1us units. 2408 */ 2409 uint16_t skl_latency[8]; 2410 2411 /* current hardware state */ 2412 union { 2413 struct ilk_wm_values hw; 2414 struct skl_wm_values skl_hw; 2415 struct vlv_wm_values vlv; 2416 }; 2417 2418 uint8_t max_level; 2419 2420 /* 2421 * Should be held around atomic WM register writing; also 2422 * protects * intel_crtc->wm.active and 2423 * cstate->wm.need_postvbl_update. 2424 */ 2425 struct mutex wm_mutex; 2426 2427 /* 2428 * Set during HW readout of watermarks/DDB. Some platforms 2429 * need to know when we're still using BIOS-provided values 2430 * (which we don't fully trust). 2431 */ 2432 bool distrust_bios_wm; 2433 } wm; 2434 2435 struct i915_runtime_pm pm; 2436 2437 struct { 2438 bool initialized; 2439 2440 struct kobject *metrics_kobj; 2441 struct ctl_table_header *sysctl_header; 2442 2443 struct mutex lock; 2444 struct list_head streams; 2445 2446 spinlock_t hook_lock; 2447 2448 struct { 2449 struct i915_perf_stream *exclusive_stream; 2450 2451 u32 specific_ctx_id; 2452 2453 struct hrtimer poll_check_timer; 2454 wait_queue_head_t poll_wq; 2455 bool pollin; 2456 2457 bool periodic; 2458 int period_exponent; 2459 int timestamp_frequency; 2460 2461 int tail_margin; 2462 2463 int metrics_set; 2464 2465 const struct i915_oa_reg *mux_regs; 2466 int mux_regs_len; 2467 const struct i915_oa_reg *b_counter_regs; 2468 int b_counter_regs_len; 2469 2470 struct { 2471 struct i915_vma *vma; 2472 u8 *vaddr; 2473 int format; 2474 int format_size; 2475 } oa_buffer; 2476 2477 u32 gen7_latched_oastatus1; 2478 2479 struct i915_oa_ops ops; 2480 const struct i915_oa_format *oa_formats; 2481 int n_builtin_sets; 2482 } oa; 2483 } perf; 2484 2485 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 2486 struct { 2487 void (*resume)(struct drm_i915_private *); 2488 void (*cleanup_engine)(struct intel_engine_cs *engine); 2489 2490 struct list_head timelines; 2491 struct i915_gem_timeline global_timeline; 2492 u32 active_requests; 2493 2494 /** 2495 * Is the GPU currently considered idle, or busy executing 2496 * userspace requests? Whilst idle, we allow runtime power 2497 * management to power down the hardware and display clocks. 2498 * In order to reduce the effect on performance, there 2499 * is a slight delay before we do so. 2500 */ 2501 bool awake; 2502 2503 /** 2504 * We leave the user IRQ off as much as possible, 2505 * but this means that requests will finish and never 2506 * be retired once the system goes idle. Set a timer to 2507 * fire periodically while the ring is running. When it 2508 * fires, go retire requests. 2509 */ 2510 struct delayed_work retire_work; 2511 2512 /** 2513 * When we detect an idle GPU, we want to turn on 2514 * powersaving features. So once we see that there 2515 * are no more requests outstanding and no more 2516 * arrive within a small period of time, we fire 2517 * off the idle_work. 2518 */ 2519 struct delayed_work idle_work; 2520 2521 ktime_t last_init_time; 2522 } gt; 2523 2524 /* perform PHY state sanity checks? */ 2525 bool chv_phy_assert[2]; 2526 2527 bool ipc_enabled; 2528 2529 /* Used to save the pipe-to-encoder mapping for audio */ 2530 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2531 2532 /* necessary resource sharing with HDMI LPE audio driver. */ 2533 struct { 2534 struct platform_device *platdev; 2535 int irq; 2536 } lpe_audio; 2537 2538 /* 2539 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2540 * will be rejected. Instead look for a better place. 2541 */ 2542 }; 2543 2544 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2545 { 2546 return container_of(dev, struct drm_i915_private, drm); 2547 } 2548 2549 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2550 { 2551 return to_i915(dev_get_drvdata(kdev)); 2552 } 2553 2554 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2555 { 2556 return container_of(guc, struct drm_i915_private, guc); 2557 } 2558 2559 static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) 2560 { 2561 return container_of(huc, struct drm_i915_private, huc); 2562 } 2563 2564 /* Simple iterator over all initialised engines */ 2565 #define for_each_engine(engine__, dev_priv__, id__) \ 2566 for ((id__) = 0; \ 2567 (id__) < I915_NUM_ENGINES; \ 2568 (id__)++) \ 2569 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2570 2571 /* Iterator over subset of engines selected by mask */ 2572 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2573 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2574 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) 2575 2576 enum hdmi_force_audio { 2577 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2578 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2579 HDMI_AUDIO_AUTO, /* trust EDID */ 2580 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2581 }; 2582 2583 #define I915_GTT_OFFSET_NONE ((u32)-1) 2584 2585 /* 2586 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2587 * considered to be the frontbuffer for the given plane interface-wise. This 2588 * doesn't mean that the hw necessarily already scans it out, but that any 2589 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2590 * 2591 * We have one bit per pipe and per scanout plane type. 2592 */ 2593 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2594 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2595 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2596 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2597 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2598 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2599 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2600 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2601 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2602 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2603 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2604 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2605 2606 /* 2607 * Optimised SGL iterator for GEM objects 2608 */ 2609 static __always_inline struct sgt_iter { 2610 struct scatterlist *sgp; 2611 union { 2612 unsigned long pfn; 2613 dma_addr_t dma; 2614 }; 2615 unsigned int curr; 2616 unsigned int max; 2617 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2618 struct sgt_iter s = { .sgp = sgl }; 2619 2620 if (s.sgp) { 2621 s.max = s.curr = s.sgp->offset; 2622 s.max += s.sgp->length; 2623 if (dma) 2624 s.dma = sg_dma_address(s.sgp); 2625 else 2626 s.pfn = page_to_pfn(sg_page(s.sgp)); 2627 } 2628 2629 return s; 2630 } 2631 2632 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 2633 { 2634 ++sg; 2635 if (unlikely(sg_is_chain(sg))) 2636 sg = sg_chain_ptr(sg); 2637 return sg; 2638 } 2639 2640 /** 2641 * __sg_next - return the next scatterlist entry in a list 2642 * @sg: The current sg entry 2643 * 2644 * Description: 2645 * If the entry is the last, return NULL; otherwise, step to the next 2646 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2647 * otherwise just return the pointer to the current element. 2648 **/ 2649 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2650 { 2651 #ifdef CONFIG_DEBUG_SG 2652 BUG_ON(sg->sg_magic != SG_MAGIC); 2653 #endif 2654 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2655 } 2656 2657 /** 2658 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2659 * @__dmap: DMA address (output) 2660 * @__iter: 'struct sgt_iter' (iterator state, internal) 2661 * @__sgt: sg_table to iterate over (input) 2662 */ 2663 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2664 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2665 ((__dmap) = (__iter).dma + (__iter).curr); \ 2666 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2667 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) 2668 2669 /** 2670 * for_each_sgt_page - iterate over the pages of the given sg_table 2671 * @__pp: page pointer (output) 2672 * @__iter: 'struct sgt_iter' (iterator state, internal) 2673 * @__sgt: sg_table to iterate over (input) 2674 */ 2675 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2676 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2677 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2678 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2679 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2680 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) 2681 2682 static inline const struct intel_device_info * 2683 intel_info(const struct drm_i915_private *dev_priv) 2684 { 2685 return &dev_priv->info; 2686 } 2687 2688 #define INTEL_INFO(dev_priv) intel_info((dev_priv)) 2689 2690 #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2691 #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2692 2693 #define REVID_FOREVER 0xff 2694 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2695 2696 #define GEN_FOREVER (0) 2697 /* 2698 * Returns true if Gen is in inclusive range [Start, End]. 2699 * 2700 * Use GEN_FOREVER for unbound start and or end. 2701 */ 2702 #define IS_GEN(dev_priv, s, e) ({ \ 2703 unsigned int __s = (s), __e = (e); \ 2704 BUILD_BUG_ON(!__builtin_constant_p(s)); \ 2705 BUILD_BUG_ON(!__builtin_constant_p(e)); \ 2706 if ((__s) != GEN_FOREVER) \ 2707 __s = (s) - 1; \ 2708 if ((__e) == GEN_FOREVER) \ 2709 __e = BITS_PER_LONG - 1; \ 2710 else \ 2711 __e = (e) - 1; \ 2712 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ 2713 }) 2714 2715 /* 2716 * Return true if revision is in range [since,until] inclusive. 2717 * 2718 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2719 */ 2720 #define IS_REVID(p, since, until) \ 2721 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2722 2723 #define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830) 2724 #define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G) 2725 #define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) 2726 #define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G) 2727 #define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) 2728 #define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM) 2729 #define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G) 2730 #define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) 2731 #define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) 2732 #define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) 2733 #define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45) 2734 #define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45) 2735 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 2736 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2737 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2738 #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW) 2739 #define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33) 2740 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2741 #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE) 2742 #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ 2743 INTEL_DEVID(dev_priv) == 0x0152 || \ 2744 INTEL_DEVID(dev_priv) == 0x015a) 2745 #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW) 2746 #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW) 2747 #define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL) 2748 #define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL) 2749 #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE) 2750 #define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON) 2751 #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE) 2752 #define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE) 2753 #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2754 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2755 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2756 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2757 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2758 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2759 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2760 /* ULX machines are also considered ULT. */ 2761 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2762 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2763 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2764 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2765 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2766 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2767 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2768 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2769 /* ULX machines are also considered ULT. */ 2770 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2771 INTEL_DEVID(dev_priv) == 0x0A1E) 2772 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2773 INTEL_DEVID(dev_priv) == 0x1913 || \ 2774 INTEL_DEVID(dev_priv) == 0x1916 || \ 2775 INTEL_DEVID(dev_priv) == 0x1921 || \ 2776 INTEL_DEVID(dev_priv) == 0x1926) 2777 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2778 INTEL_DEVID(dev_priv) == 0x1915 || \ 2779 INTEL_DEVID(dev_priv) == 0x191E) 2780 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2781 INTEL_DEVID(dev_priv) == 0x5913 || \ 2782 INTEL_DEVID(dev_priv) == 0x5916 || \ 2783 INTEL_DEVID(dev_priv) == 0x5921 || \ 2784 INTEL_DEVID(dev_priv) == 0x5926) 2785 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2786 INTEL_DEVID(dev_priv) == 0x5915 || \ 2787 INTEL_DEVID(dev_priv) == 0x591E) 2788 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2789 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2790 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2791 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) 2792 2793 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2794 2795 #define SKL_REVID_A0 0x0 2796 #define SKL_REVID_B0 0x1 2797 #define SKL_REVID_C0 0x2 2798 #define SKL_REVID_D0 0x3 2799 #define SKL_REVID_E0 0x4 2800 #define SKL_REVID_F0 0x5 2801 #define SKL_REVID_G0 0x6 2802 #define SKL_REVID_H0 0x7 2803 2804 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2805 2806 #define BXT_REVID_A0 0x0 2807 #define BXT_REVID_A1 0x1 2808 #define BXT_REVID_B0 0x3 2809 #define BXT_REVID_B_LAST 0x8 2810 #define BXT_REVID_C0 0x9 2811 2812 #define IS_BXT_REVID(dev_priv, since, until) \ 2813 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2814 2815 #define KBL_REVID_A0 0x0 2816 #define KBL_REVID_B0 0x1 2817 #define KBL_REVID_C0 0x2 2818 #define KBL_REVID_D0 0x3 2819 #define KBL_REVID_E0 0x4 2820 2821 #define IS_KBL_REVID(dev_priv, since, until) \ 2822 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2823 2824 #define GLK_REVID_A0 0x0 2825 #define GLK_REVID_A1 0x1 2826 2827 #define IS_GLK_REVID(dev_priv, since, until) \ 2828 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2829 2830 /* 2831 * The genX designation typically refers to the render engine, so render 2832 * capability related checks should use IS_GEN, while display and other checks 2833 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2834 * chips, etc.). 2835 */ 2836 #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) 2837 #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) 2838 #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) 2839 #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) 2840 #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) 2841 #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2842 #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2843 #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2844 2845 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 2846 #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) 2847 #define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) 2848 2849 #define ENGINE_MASK(id) BIT(id) 2850 #define RENDER_RING ENGINE_MASK(RCS) 2851 #define BSD_RING ENGINE_MASK(VCS) 2852 #define BLT_RING ENGINE_MASK(BCS) 2853 #define VEBOX_RING ENGINE_MASK(VECS) 2854 #define BSD2_RING ENGINE_MASK(VCS2) 2855 #define ALL_ENGINES (~0) 2856 2857 #define HAS_ENGINE(dev_priv, id) \ 2858 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) 2859 2860 #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2861 #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2862 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2863 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2864 2865 #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) 2866 #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) 2867 #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) 2868 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2869 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2870 2871 #define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) 2872 2873 #define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts) 2874 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2875 ((dev_priv)->info.has_logical_ring_contexts) 2876 #define USES_PPGTT(dev_priv) (i915.enable_ppgtt) 2877 #define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) 2878 #define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) 2879 2880 #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) 2881 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 2882 ((dev_priv)->info.overlay_needs_physical) 2883 2884 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2885 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2886 2887 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2888 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2889 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 2890 2891 /* 2892 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2893 * even when in MSI mode. This results in spurious interrupt warnings if the 2894 * legacy irq no. is shared with another device. The kernel then disables that 2895 * interrupt source and so prevents the other device from working properly. 2896 */ 2897 #define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5) 2898 #define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq) 2899 2900 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2901 * rows, which changed the alignment requirements and fence programming. 2902 */ 2903 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2904 !(IS_I915G(dev_priv) || \ 2905 IS_I915GM(dev_priv))) 2906 #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) 2907 #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) 2908 2909 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2910 #define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr) 2911 #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) 2912 2913 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2914 2915 #define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) 2916 2917 #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) 2918 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) 2919 #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) 2920 #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) 2921 #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) 2922 2923 #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) 2924 2925 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2926 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 2927 2928 /* 2929 * For now, anything with a GuC requires uCode loading, and then supports 2930 * command submission once loaded. But these are logically independent 2931 * properties, so we have separate macros to test them. 2932 */ 2933 #define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) 2934 #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2935 #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) 2936 #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2937 2938 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) 2939 2940 #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) 2941 2942 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2943 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2944 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2945 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2946 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2947 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2948 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2949 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2950 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2951 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2952 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2953 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2954 2955 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2956 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2957 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2958 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2959 #define HAS_PCH_LPT_LP(dev_priv) \ 2960 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2961 #define HAS_PCH_LPT_H(dev_priv) \ 2962 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2963 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2964 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2965 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2966 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2967 2968 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 2969 2970 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv)) 2971 2972 /* DPF == dynamic parity feature */ 2973 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2974 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2975 2 : HAS_L3_DPF(dev_priv)) 2976 2977 #define GT_FREQUENCY_MULTIPLIER 50 2978 #define GEN9_FREQ_SCALER 3 2979 2980 #define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio) 2981 2982 #include "i915_trace.h" 2983 2984 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2985 { 2986 #ifdef CONFIG_INTEL_IOMMU 2987 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) 2988 return true; 2989 #endif 2990 return false; 2991 } 2992 2993 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2994 int enable_ppgtt); 2995 2996 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); 2997 2998 /* i915_drv.c */ 2999 void __printf(3, 4) 3000 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 3001 const char *fmt, ...); 3002 3003 #define i915_report_error(dev_priv, fmt, ...) \ 3004 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 3005 3006 #ifdef CONFIG_COMPAT 3007 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 3008 unsigned long arg); 3009 #else 3010 #define i915_compat_ioctl NULL 3011 #endif 3012 extern const struct dev_pm_ops i915_pm_ops; 3013 3014 extern int i915_driver_load(struct pci_dev *pdev, 3015 const struct pci_device_id *ent); 3016 extern void i915_driver_unload(struct drm_device *dev); 3017 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 3018 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 3019 extern void i915_reset(struct drm_i915_private *dev_priv); 3020 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 3021 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 3022 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); 3023 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 3024 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 3025 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 3026 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 3027 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 3028 3029 int intel_engines_init_early(struct drm_i915_private *dev_priv); 3030 int intel_engines_init(struct drm_i915_private *dev_priv); 3031 3032 /* intel_hotplug.c */ 3033 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 3034 u32 pin_mask, u32 long_mask); 3035 void intel_hpd_init(struct drm_i915_private *dev_priv); 3036 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 3037 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 3038 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 3039 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 3040 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 3041 3042 /* i915_irq.c */ 3043 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 3044 { 3045 unsigned long delay; 3046 3047 if (unlikely(!i915.enable_hangcheck)) 3048 return; 3049 3050 /* Don't continually defer the hangcheck so that it is always run at 3051 * least once after work has been scheduled on any ring. Otherwise, 3052 * we will ignore a hung ring if a second ring is kept busy. 3053 */ 3054 3055 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 3056 queue_delayed_work(system_long_wq, 3057 &dev_priv->gpu_error.hangcheck_work, delay); 3058 } 3059 3060 __printf(3, 4) 3061 void i915_handle_error(struct drm_i915_private *dev_priv, 3062 u32 engine_mask, 3063 const char *fmt, ...); 3064 3065 extern void intel_irq_init(struct drm_i915_private *dev_priv); 3066 int intel_irq_install(struct drm_i915_private *dev_priv); 3067 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 3068 3069 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 3070 extern void intel_uncore_init(struct drm_i915_private *dev_priv); 3071 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 3072 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 3073 extern void intel_uncore_fini(struct drm_i915_private *dev_priv); 3074 extern void intel_uncore_suspend(struct drm_i915_private *dev_priv); 3075 extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv); 3076 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 3077 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 3078 enum forcewake_domains domains); 3079 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 3080 enum forcewake_domains domains); 3081 /* Like above but the caller must manage the uncore.lock itself. 3082 * Must be used with I915_READ_FW and friends. 3083 */ 3084 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 3085 enum forcewake_domains domains); 3086 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 3087 enum forcewake_domains domains); 3088 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 3089 3090 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 3091 3092 int intel_wait_for_register(struct drm_i915_private *dev_priv, 3093 i915_reg_t reg, 3094 const u32 mask, 3095 const u32 value, 3096 const unsigned long timeout_ms); 3097 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 3098 i915_reg_t reg, 3099 const u32 mask, 3100 const u32 value, 3101 const unsigned long timeout_ms); 3102 3103 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 3104 { 3105 return dev_priv->gvt; 3106 } 3107 3108 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 3109 { 3110 return dev_priv->vgpu.active; 3111 } 3112 3113 void 3114 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3115 u32 status_mask); 3116 3117 void 3118 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3119 u32 status_mask); 3120 3121 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 3122 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 3123 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 3124 uint32_t mask, 3125 uint32_t bits); 3126 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 3127 uint32_t interrupt_mask, 3128 uint32_t enabled_irq_mask); 3129 static inline void 3130 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3131 { 3132 ilk_update_display_irq(dev_priv, bits, bits); 3133 } 3134 static inline void 3135 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3136 { 3137 ilk_update_display_irq(dev_priv, bits, 0); 3138 } 3139 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 3140 enum pipe pipe, 3141 uint32_t interrupt_mask, 3142 uint32_t enabled_irq_mask); 3143 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 3144 enum pipe pipe, uint32_t bits) 3145 { 3146 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 3147 } 3148 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 3149 enum pipe pipe, uint32_t bits) 3150 { 3151 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 3152 } 3153 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 3154 uint32_t interrupt_mask, 3155 uint32_t enabled_irq_mask); 3156 static inline void 3157 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3158 { 3159 ibx_display_interrupt_update(dev_priv, bits, bits); 3160 } 3161 static inline void 3162 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3163 { 3164 ibx_display_interrupt_update(dev_priv, bits, 0); 3165 } 3166 3167 /* i915_gem.c */ 3168 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 3169 struct drm_file *file_priv); 3170 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 3171 struct drm_file *file_priv); 3172 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 3173 struct drm_file *file_priv); 3174 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 3175 struct drm_file *file_priv); 3176 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 3177 struct drm_file *file_priv); 3178 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 3179 struct drm_file *file_priv); 3180 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 3181 struct drm_file *file_priv); 3182 int i915_gem_execbuffer(struct drm_device *dev, void *data, 3183 struct drm_file *file_priv); 3184 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 3185 struct drm_file *file_priv); 3186 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3187 struct drm_file *file_priv); 3188 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3189 struct drm_file *file); 3190 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3191 struct drm_file *file); 3192 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3193 struct drm_file *file_priv); 3194 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3195 struct drm_file *file_priv); 3196 int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 3197 struct drm_file *file_priv); 3198 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 3199 struct drm_file *file_priv); 3200 void i915_gem_init_userptr(struct drm_i915_private *dev_priv); 3201 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3202 struct drm_file *file); 3203 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3204 struct drm_file *file_priv); 3205 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 3206 struct drm_file *file_priv); 3207 void i915_gem_sanitize(struct drm_i915_private *i915); 3208 int i915_gem_load_init(struct drm_i915_private *dev_priv); 3209 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); 3210 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3211 int i915_gem_freeze(struct drm_i915_private *dev_priv); 3212 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 3213 3214 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); 3215 void i915_gem_object_free(struct drm_i915_gem_object *obj); 3216 void i915_gem_object_init(struct drm_i915_gem_object *obj, 3217 const struct drm_i915_gem_object_ops *ops); 3218 struct drm_i915_gem_object * 3219 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); 3220 struct drm_i915_gem_object * 3221 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 3222 const void *data, size_t size); 3223 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 3224 void i915_gem_free_object(struct drm_gem_object *obj); 3225 3226 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 3227 { 3228 /* A single pass should suffice to release all the freed objects (along 3229 * most call paths) , but be a little more paranoid in that freeing 3230 * the objects does take a little amount of time, during which the rcu 3231 * callbacks could have added new objects into the freed list, and 3232 * armed the work again. 3233 */ 3234 do { 3235 rcu_barrier(); 3236 } while (flush_work(&i915->mm.free_work)); 3237 } 3238 3239 struct i915_vma * __must_check 3240 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 3241 const struct i915_ggtt_view *view, 3242 u64 size, 3243 u64 alignment, 3244 u64 flags); 3245 3246 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 3247 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 3248 3249 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 3250 3251 static inline int __sg_page_count(const struct scatterlist *sg) 3252 { 3253 return sg->length >> PAGE_SHIFT; 3254 } 3255 3256 struct scatterlist * 3257 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 3258 unsigned int n, unsigned int *offset); 3259 3260 struct page * 3261 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 3262 unsigned int n); 3263 3264 struct page * 3265 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 3266 unsigned int n); 3267 3268 dma_addr_t 3269 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 3270 unsigned long n); 3271 3272 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 3273 struct sg_table *pages); 3274 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 3275 3276 static inline int __must_check 3277 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3278 { 3279 might_lock(&obj->mm.lock); 3280 3281 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 3282 return 0; 3283 3284 return __i915_gem_object_get_pages(obj); 3285 } 3286 3287 static inline void 3288 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3289 { 3290 GEM_BUG_ON(!obj->mm.pages); 3291 3292 atomic_inc(&obj->mm.pages_pin_count); 3293 } 3294 3295 static inline bool 3296 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 3297 { 3298 return atomic_read(&obj->mm.pages_pin_count); 3299 } 3300 3301 static inline void 3302 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3303 { 3304 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 3305 GEM_BUG_ON(!obj->mm.pages); 3306 3307 atomic_dec(&obj->mm.pages_pin_count); 3308 } 3309 3310 static inline void 3311 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3312 { 3313 __i915_gem_object_unpin_pages(obj); 3314 } 3315 3316 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ 3317 I915_MM_NORMAL = 0, 3318 I915_MM_SHRINKER 3319 }; 3320 3321 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 3322 enum i915_mm_subclass subclass); 3323 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); 3324 3325 enum i915_map_type { 3326 I915_MAP_WB = 0, 3327 I915_MAP_WC, 3328 }; 3329 3330 /** 3331 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3332 * @obj: the object to map into kernel address space 3333 * @type: the type of mapping, used to select pgprot_t 3334 * 3335 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3336 * pages and then returns a contiguous mapping of the backing storage into 3337 * the kernel address space. Based on the @type of mapping, the PTE will be 3338 * set to either WriteBack or WriteCombine (via pgprot_t). 3339 * 3340 * The caller is responsible for calling i915_gem_object_unpin_map() when the 3341 * mapping is no longer required. 3342 * 3343 * Returns the pointer through which to access the mapped object, or an 3344 * ERR_PTR() on error. 3345 */ 3346 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 3347 enum i915_map_type type); 3348 3349 /** 3350 * i915_gem_object_unpin_map - releases an earlier mapping 3351 * @obj: the object to unmap 3352 * 3353 * After pinning the object and mapping its pages, once you are finished 3354 * with your access, call i915_gem_object_unpin_map() to release the pin 3355 * upon the mapping. Once the pin count reaches zero, that mapping may be 3356 * removed. 3357 */ 3358 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3359 { 3360 i915_gem_object_unpin_pages(obj); 3361 } 3362 3363 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3364 unsigned int *needs_clflush); 3365 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3366 unsigned int *needs_clflush); 3367 #define CLFLUSH_BEFORE BIT(0) 3368 #define CLFLUSH_AFTER BIT(1) 3369 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3370 3371 static inline void 3372 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3373 { 3374 i915_gem_object_unpin_pages(obj); 3375 } 3376 3377 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3378 void i915_vma_move_to_active(struct i915_vma *vma, 3379 struct drm_i915_gem_request *req, 3380 unsigned int flags); 3381 int i915_gem_dumb_create(struct drm_file *file_priv, 3382 struct drm_device *dev, 3383 struct drm_mode_create_dumb *args); 3384 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3385 uint32_t handle, uint64_t *offset); 3386 int i915_gem_mmap_gtt_version(void); 3387 3388 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3389 struct drm_i915_gem_object *new, 3390 unsigned frontbuffer_bits); 3391 3392 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3393 3394 struct drm_i915_gem_request * 3395 i915_gem_find_active_request(struct intel_engine_cs *engine); 3396 3397 void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3398 3399 static inline bool i915_reset_backoff(struct i915_gpu_error *error) 3400 { 3401 return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); 3402 } 3403 3404 static inline bool i915_reset_handoff(struct i915_gpu_error *error) 3405 { 3406 return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags)); 3407 } 3408 3409 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3410 { 3411 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3412 } 3413 3414 static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error) 3415 { 3416 return i915_reset_backoff(error) | i915_terminally_wedged(error); 3417 } 3418 3419 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3420 { 3421 return READ_ONCE(error->reset_count); 3422 } 3423 3424 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3425 void i915_gem_reset(struct drm_i915_private *dev_priv); 3426 void i915_gem_reset_finish(struct drm_i915_private *dev_priv); 3427 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3428 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); 3429 3430 void i915_gem_init_mmio(struct drm_i915_private *i915); 3431 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 3432 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); 3433 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); 3434 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); 3435 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3436 unsigned int flags); 3437 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); 3438 void i915_gem_resume(struct drm_i915_private *dev_priv); 3439 int i915_gem_fault(struct vm_fault *vmf); 3440 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 3441 unsigned int flags, 3442 long timeout, 3443 struct intel_rps_client *rps); 3444 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3445 unsigned int flags, 3446 int priority); 3447 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX 3448 3449 int __must_check 3450 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3451 bool write); 3452 int __must_check 3453 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3454 struct i915_vma * __must_check 3455 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3456 u32 alignment, 3457 const struct i915_ggtt_view *view); 3458 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3459 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3460 int align); 3461 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3462 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3463 3464 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3465 enum i915_cache_level cache_level); 3466 3467 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3468 struct dma_buf *dma_buf); 3469 3470 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3471 struct drm_gem_object *gem_obj, int flags); 3472 3473 static inline struct i915_hw_ppgtt * 3474 i915_vm_to_ppgtt(struct i915_address_space *vm) 3475 { 3476 return container_of(vm, struct i915_hw_ppgtt, base); 3477 } 3478 3479 /* i915_gem_fence_reg.c */ 3480 int __must_check i915_vma_get_fence(struct i915_vma *vma); 3481 int __must_check i915_vma_put_fence(struct i915_vma *vma); 3482 3483 void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); 3484 void i915_gem_restore_fences(struct drm_i915_private *dev_priv); 3485 3486 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); 3487 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, 3488 struct sg_table *pages); 3489 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, 3490 struct sg_table *pages); 3491 3492 static inline struct i915_gem_context * 3493 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3494 { 3495 struct i915_gem_context *ctx; 3496 3497 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); 3498 3499 ctx = idr_find(&file_priv->context_idr, id); 3500 if (!ctx) 3501 return ERR_PTR(-ENOENT); 3502 3503 return ctx; 3504 } 3505 3506 static inline struct i915_gem_context * 3507 i915_gem_context_get(struct i915_gem_context *ctx) 3508 { 3509 kref_get(&ctx->ref); 3510 return ctx; 3511 } 3512 3513 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 3514 { 3515 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 3516 kref_put(&ctx->ref, i915_gem_context_free); 3517 } 3518 3519 static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx) 3520 { 3521 struct mutex *lock = &ctx->i915->drm.struct_mutex; 3522 3523 if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock)) 3524 mutex_unlock(lock); 3525 } 3526 3527 static inline struct intel_timeline * 3528 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, 3529 struct intel_engine_cs *engine) 3530 { 3531 struct i915_address_space *vm; 3532 3533 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; 3534 return &vm->timeline.engine[engine->id]; 3535 } 3536 3537 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3538 struct drm_file *file); 3539 3540 /* i915_gem_evict.c */ 3541 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3542 u64 min_size, u64 alignment, 3543 unsigned cache_level, 3544 u64 start, u64 end, 3545 unsigned flags); 3546 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 3547 struct drm_mm_node *node, 3548 unsigned int flags); 3549 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3550 3551 /* belongs in i915_gem_gtt.h */ 3552 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3553 { 3554 wmb(); 3555 if (INTEL_GEN(dev_priv) < 6) 3556 intel_gtt_chipset_flush(); 3557 } 3558 3559 /* i915_gem_stolen.c */ 3560 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3561 struct drm_mm_node *node, u64 size, 3562 unsigned alignment); 3563 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3564 struct drm_mm_node *node, u64 size, 3565 unsigned alignment, u64 start, 3566 u64 end); 3567 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3568 struct drm_mm_node *node); 3569 int i915_gem_init_stolen(struct drm_i915_private *dev_priv); 3570 void i915_gem_cleanup_stolen(struct drm_device *dev); 3571 struct drm_i915_gem_object * 3572 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size); 3573 struct drm_i915_gem_object * 3574 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, 3575 u32 stolen_offset, 3576 u32 gtt_offset, 3577 u32 size); 3578 3579 /* i915_gem_internal.c */ 3580 struct drm_i915_gem_object * 3581 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 3582 phys_addr_t size); 3583 3584 /* i915_gem_shrinker.c */ 3585 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3586 unsigned long target, 3587 unsigned flags); 3588 #define I915_SHRINK_PURGEABLE 0x1 3589 #define I915_SHRINK_UNBOUND 0x2 3590 #define I915_SHRINK_BOUND 0x4 3591 #define I915_SHRINK_ACTIVE 0x8 3592 #define I915_SHRINK_VMAPS 0x10 3593 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3594 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3595 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3596 3597 3598 /* i915_gem_tiling.c */ 3599 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3600 { 3601 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3602 3603 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3604 i915_gem_object_is_tiled(obj); 3605 } 3606 3607 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, 3608 unsigned int tiling, unsigned int stride); 3609 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, 3610 unsigned int tiling, unsigned int stride); 3611 3612 /* i915_debugfs.c */ 3613 #ifdef CONFIG_DEBUG_FS 3614 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3615 int i915_debugfs_connector_add(struct drm_connector *connector); 3616 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3617 #else 3618 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3619 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3620 { return 0; } 3621 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3622 #endif 3623 3624 /* i915_gpu_error.c */ 3625 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 3626 3627 __printf(2, 3) 3628 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3629 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3630 const struct i915_gpu_state *gpu); 3631 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3632 struct drm_i915_private *i915, 3633 size_t count, loff_t pos); 3634 static inline void i915_error_state_buf_release( 3635 struct drm_i915_error_state_buf *eb) 3636 { 3637 kfree(eb->buf); 3638 } 3639 3640 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); 3641 void i915_capture_error_state(struct drm_i915_private *dev_priv, 3642 u32 engine_mask, 3643 const char *error_msg); 3644 3645 static inline struct i915_gpu_state * 3646 i915_gpu_state_get(struct i915_gpu_state *gpu) 3647 { 3648 kref_get(&gpu->ref); 3649 return gpu; 3650 } 3651 3652 void __i915_gpu_state_free(struct kref *kref); 3653 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) 3654 { 3655 if (gpu) 3656 kref_put(&gpu->ref, __i915_gpu_state_free); 3657 } 3658 3659 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 3660 void i915_reset_error_state(struct drm_i915_private *i915); 3661 3662 #else 3663 3664 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 3665 u32 engine_mask, 3666 const char *error_msg) 3667 { 3668 } 3669 3670 static inline struct i915_gpu_state * 3671 i915_first_error_state(struct drm_i915_private *i915) 3672 { 3673 return NULL; 3674 } 3675 3676 static inline void i915_reset_error_state(struct drm_i915_private *i915) 3677 { 3678 } 3679 3680 #endif 3681 3682 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3683 3684 /* i915_cmd_parser.c */ 3685 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3686 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3687 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3688 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3689 struct drm_i915_gem_object *batch_obj, 3690 struct drm_i915_gem_object *shadow_batch_obj, 3691 u32 batch_start_offset, 3692 u32 batch_len, 3693 bool is_master); 3694 3695 /* i915_perf.c */ 3696 extern void i915_perf_init(struct drm_i915_private *dev_priv); 3697 extern void i915_perf_fini(struct drm_i915_private *dev_priv); 3698 extern void i915_perf_register(struct drm_i915_private *dev_priv); 3699 extern void i915_perf_unregister(struct drm_i915_private *dev_priv); 3700 3701 /* i915_suspend.c */ 3702 extern int i915_save_state(struct drm_i915_private *dev_priv); 3703 extern int i915_restore_state(struct drm_i915_private *dev_priv); 3704 3705 /* i915_sysfs.c */ 3706 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3707 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3708 3709 /* intel_lpe_audio.c */ 3710 int intel_lpe_audio_init(struct drm_i915_private *dev_priv); 3711 void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); 3712 void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); 3713 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, 3714 void *eld, int port, int pipe, int tmds_clk_speed, 3715 bool dp_output, int link_rate); 3716 3717 /* intel_i2c.c */ 3718 extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); 3719 extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); 3720 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3721 unsigned int pin); 3722 3723 extern struct i2c_adapter * 3724 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3725 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3726 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3727 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3728 { 3729 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3730 } 3731 extern void intel_i2c_reset(struct drm_i915_private *dev_priv); 3732 3733 /* intel_bios.c */ 3734 void intel_bios_init(struct drm_i915_private *dev_priv); 3735 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3736 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3737 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3738 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3739 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3740 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3741 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3742 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3743 enum port port); 3744 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3745 enum port port); 3746 3747 3748 /* intel_opregion.c */ 3749 #ifdef CONFIG_ACPI 3750 extern int intel_opregion_setup(struct drm_i915_private *dev_priv); 3751 extern void intel_opregion_register(struct drm_i915_private *dev_priv); 3752 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); 3753 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 3754 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3755 bool enable); 3756 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, 3757 pci_power_t state); 3758 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); 3759 #else 3760 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } 3761 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } 3762 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } 3763 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3764 { 3765 } 3766 static inline int 3767 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3768 { 3769 return 0; 3770 } 3771 static inline int 3772 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) 3773 { 3774 return 0; 3775 } 3776 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) 3777 { 3778 return -ENODEV; 3779 } 3780 #endif 3781 3782 /* intel_acpi.c */ 3783 #ifdef CONFIG_ACPI 3784 extern void intel_register_dsm_handler(void); 3785 extern void intel_unregister_dsm_handler(void); 3786 #else 3787 static inline void intel_register_dsm_handler(void) { return; } 3788 static inline void intel_unregister_dsm_handler(void) { return; } 3789 #endif /* CONFIG_ACPI */ 3790 3791 /* intel_device_info.c */ 3792 static inline struct intel_device_info * 3793 mkwrite_device_info(struct drm_i915_private *dev_priv) 3794 { 3795 return (struct intel_device_info *)&dev_priv->info; 3796 } 3797 3798 const char *intel_platform_name(enum intel_platform platform); 3799 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); 3800 void intel_device_info_dump(struct drm_i915_private *dev_priv); 3801 3802 /* modesetting */ 3803 extern void intel_modeset_init_hw(struct drm_device *dev); 3804 extern int intel_modeset_init(struct drm_device *dev); 3805 extern void intel_modeset_gem_init(struct drm_device *dev); 3806 extern void intel_modeset_cleanup(struct drm_device *dev); 3807 extern int intel_connector_register(struct drm_connector *); 3808 extern void intel_connector_unregister(struct drm_connector *); 3809 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3810 bool state); 3811 extern void intel_display_resume(struct drm_device *dev); 3812 extern void i915_redisable_vga(struct drm_i915_private *dev_priv); 3813 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); 3814 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3815 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); 3816 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3817 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3818 bool enable); 3819 3820 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3821 struct drm_file *file); 3822 3823 /* overlay */ 3824 extern struct intel_overlay_error_state * 3825 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3826 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3827 struct intel_overlay_error_state *error); 3828 3829 extern struct intel_display_error_state * 3830 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3831 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3832 struct intel_display_error_state *error); 3833 3834 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3835 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3836 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3837 u32 reply_mask, u32 reply, int timeout_base_ms); 3838 3839 /* intel_sideband.c */ 3840 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3841 int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3842 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3843 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3844 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3845 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3846 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3847 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3848 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3849 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3850 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3851 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3852 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3853 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3854 enum intel_sbi_destination destination); 3855 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3856 enum intel_sbi_destination destination); 3857 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3858 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3859 3860 /* intel_dpio_phy.c */ 3861 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, 3862 enum dpio_phy *phy, enum dpio_channel *ch); 3863 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, 3864 enum port port, u32 margin, u32 scale, 3865 u32 enable, u32 deemphasis); 3866 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3867 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3868 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, 3869 enum dpio_phy phy); 3870 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, 3871 enum dpio_phy phy); 3872 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, 3873 uint8_t lane_count); 3874 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, 3875 uint8_t lane_lat_optim_mask); 3876 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); 3877 3878 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3879 u32 deemph_reg_value, u32 margin_reg_value, 3880 bool uniq_trans_scale); 3881 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3882 bool reset); 3883 void chv_phy_pre_pll_enable(struct intel_encoder *encoder); 3884 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3885 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3886 void chv_phy_post_pll_disable(struct intel_encoder *encoder); 3887 3888 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3889 u32 demph_reg_value, u32 preemph_reg_value, 3890 u32 uniqtranscale_reg_value, u32 tx3_demph); 3891 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); 3892 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3893 void vlv_phy_reset_lanes(struct intel_encoder *encoder); 3894 3895 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3896 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3897 u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, 3898 const i915_reg_t reg); 3899 3900 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3901 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3902 3903 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3904 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3905 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3906 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3907 3908 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3909 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3910 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3911 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3912 3913 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3914 * will be implemented using 2 32-bit writes in an arbitrary order with 3915 * an arbitrary delay between them. This can cause the hardware to 3916 * act upon the intermediate value, possibly leading to corruption and 3917 * machine death. For this reason we do not support I915_WRITE64, or 3918 * dev_priv->uncore.funcs.mmio_writeq. 3919 * 3920 * When reading a 64-bit value as two 32-bit values, the delay may cause 3921 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3922 * occasionally a 64-bit register does not actualy support a full readq 3923 * and must be read using two 32-bit reads. 3924 * 3925 * You have been warned. 3926 */ 3927 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3928 3929 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3930 u32 upper, lower, old_upper, loop = 0; \ 3931 upper = I915_READ(upper_reg); \ 3932 do { \ 3933 old_upper = upper; \ 3934 lower = I915_READ(lower_reg); \ 3935 upper = I915_READ(upper_reg); \ 3936 } while (upper != old_upper && loop++ < 2); \ 3937 (u64)upper << 32 | lower; }) 3938 3939 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3940 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3941 3942 #define __raw_read(x, s) \ 3943 static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ 3944 i915_reg_t reg) \ 3945 { \ 3946 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3947 } 3948 3949 #define __raw_write(x, s) \ 3950 static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \ 3951 i915_reg_t reg, uint##x##_t val) \ 3952 { \ 3953 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3954 } 3955 __raw_read(8, b) 3956 __raw_read(16, w) 3957 __raw_read(32, l) 3958 __raw_read(64, q) 3959 3960 __raw_write(8, b) 3961 __raw_write(16, w) 3962 __raw_write(32, l) 3963 __raw_write(64, q) 3964 3965 #undef __raw_read 3966 #undef __raw_write 3967 3968 /* These are untraced mmio-accessors that are only valid to be used inside 3969 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 3970 * controlled. 3971 * 3972 * Think twice, and think again, before using these. 3973 * 3974 * As an example, these accessors can possibly be used between: 3975 * 3976 * spin_lock_irq(&dev_priv->uncore.lock); 3977 * intel_uncore_forcewake_get__locked(); 3978 * 3979 * and 3980 * 3981 * intel_uncore_forcewake_put__locked(); 3982 * spin_unlock_irq(&dev_priv->uncore.lock); 3983 * 3984 * 3985 * Note: some registers may not need forcewake held, so 3986 * intel_uncore_forcewake_{get,put} can be omitted, see 3987 * intel_uncore_forcewake_for_reg(). 3988 * 3989 * Certain architectures will die if the same cacheline is concurrently accessed 3990 * by different clients (e.g. on Ivybridge). Access to registers should 3991 * therefore generally be serialised, by either the dev_priv->uncore.lock or 3992 * a more localised lock guarding all access to that bank of registers. 3993 */ 3994 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3995 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3996 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) 3997 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3998 3999 /* "Broadcast RGB" property */ 4000 #define INTEL_BROADCAST_RGB_AUTO 0 4001 #define INTEL_BROADCAST_RGB_FULL 1 4002 #define INTEL_BROADCAST_RGB_LIMITED 2 4003 4004 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 4005 { 4006 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4007 return VLV_VGACNTRL; 4008 else if (INTEL_GEN(dev_priv) >= 5) 4009 return CPU_VGACNTRL; 4010 else 4011 return VGACNTRL; 4012 } 4013 4014 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 4015 { 4016 unsigned long j = msecs_to_jiffies(m); 4017 4018 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 4019 } 4020 4021 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 4022 { 4023 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 4024 } 4025 4026 static inline unsigned long 4027 timespec_to_jiffies_timeout(const struct timespec *value) 4028 { 4029 unsigned long j = timespec_to_jiffies(value); 4030 4031 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 4032 } 4033 4034 /* 4035 * If you need to wait X milliseconds between events A and B, but event B 4036 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 4037 * when event A happened, then just before event B you call this function and 4038 * pass the timestamp as the first argument, and X as the second argument. 4039 */ 4040 static inline void 4041 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 4042 { 4043 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 4044 4045 /* 4046 * Don't re-read the value of "jiffies" every time since it may change 4047 * behind our back and break the math. 4048 */ 4049 tmp_jiffies = jiffies; 4050 target_jiffies = timestamp_jiffies + 4051 msecs_to_jiffies_timeout(to_wait_ms); 4052 4053 if (time_after(target_jiffies, tmp_jiffies)) { 4054 remaining_jiffies = target_jiffies - tmp_jiffies; 4055 while (remaining_jiffies) 4056 remaining_jiffies = 4057 schedule_timeout_uninterruptible(remaining_jiffies); 4058 } 4059 } 4060 4061 static inline bool 4062 __i915_request_irq_complete(const struct drm_i915_gem_request *req) 4063 { 4064 struct intel_engine_cs *engine = req->engine; 4065 u32 seqno; 4066 4067 /* Note that the engine may have wrapped around the seqno, and 4068 * so our request->global_seqno will be ahead of the hardware, 4069 * even though it completed the request before wrapping. We catch 4070 * this by kicking all the waiters before resetting the seqno 4071 * in hardware, and also signal the fence. 4072 */ 4073 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) 4074 return true; 4075 4076 /* The request was dequeued before we were awoken. We check after 4077 * inspecting the hw to confirm that this was the same request 4078 * that generated the HWS update. The memory barriers within 4079 * the request execution are sufficient to ensure that a check 4080 * after reading the value from hw matches this request. 4081 */ 4082 seqno = i915_gem_request_global_seqno(req); 4083 if (!seqno) 4084 return false; 4085 4086 /* Before we do the heavier coherent read of the seqno, 4087 * check the value (hopefully) in the CPU cacheline. 4088 */ 4089 if (__i915_gem_request_completed(req, seqno)) 4090 return true; 4091 4092 /* Ensure our read of the seqno is coherent so that we 4093 * do not "miss an interrupt" (i.e. if this is the last 4094 * request and the seqno write from the GPU is not visible 4095 * by the time the interrupt fires, we will see that the 4096 * request is incomplete and go back to sleep awaiting 4097 * another interrupt that will never come.) 4098 * 4099 * Strictly, we only need to do this once after an interrupt, 4100 * but it is easier and safer to do it every time the waiter 4101 * is woken. 4102 */ 4103 if (engine->irq_seqno_barrier && 4104 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { 4105 struct intel_breadcrumbs *b = &engine->breadcrumbs; 4106 4107 /* The ordering of irq_posted versus applying the barrier 4108 * is crucial. The clearing of the current irq_posted must 4109 * be visible before we perform the barrier operation, 4110 * such that if a subsequent interrupt arrives, irq_posted 4111 * is reasserted and our task rewoken (which causes us to 4112 * do another __i915_request_irq_complete() immediately 4113 * and reapply the barrier). Conversely, if the clear 4114 * occurs after the barrier, then an interrupt that arrived 4115 * whilst we waited on the barrier would not trigger a 4116 * barrier on the next pass, and the read may not see the 4117 * seqno update. 4118 */ 4119 engine->irq_seqno_barrier(engine); 4120 4121 /* If we consume the irq, but we are no longer the bottom-half, 4122 * the real bottom-half may not have serialised their own 4123 * seqno check with the irq-barrier (i.e. may have inspected 4124 * the seqno before we believe it coherent since they see 4125 * irq_posted == false but we are still running). 4126 */ 4127 spin_lock_irq(&b->irq_lock); 4128 if (b->irq_wait && b->irq_wait->tsk != current) 4129 /* Note that if the bottom-half is changed as we 4130 * are sending the wake-up, the new bottom-half will 4131 * be woken by whomever made the change. We only have 4132 * to worry about when we steal the irq-posted for 4133 * ourself. 4134 */ 4135 wake_up_process(b->irq_wait->tsk); 4136 spin_unlock_irq(&b->irq_lock); 4137 4138 if (__i915_gem_request_completed(req, seqno)) 4139 return true; 4140 } 4141 4142 return false; 4143 } 4144 4145 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 4146 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 4147 4148 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, 4149 * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot 4150 * perform the operation. To check beforehand, pass in the parameters to 4151 * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits, 4152 * you only need to pass in the minor offsets, page-aligned pointers are 4153 * always valid. 4154 * 4155 * For just checking for SSE4.1, in the foreknowledge that the future use 4156 * will be correctly aligned, just use i915_has_memcpy_from_wc(). 4157 */ 4158 #define i915_can_memcpy_from_wc(dst, src, len) \ 4159 i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0) 4160 4161 #define i915_has_memcpy_from_wc() \ 4162 i915_memcpy_from_wc(NULL, NULL, 0) 4163 4164 /* i915_mm.c */ 4165 int remap_io_mapping(struct vm_area_struct *vma, 4166 unsigned long addr, unsigned long pfn, unsigned long size, 4167 struct io_mapping *iomap); 4168 4169 static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj) 4170 { 4171 return (obj->cache_level != I915_CACHE_NONE || 4172 HAS_LLC(to_i915(obj->base.dev))); 4173 } 4174 4175 #endif 4176