1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hashtable.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/reservation.h> 45 #include <linux/shmem_fs.h> 46 47 #include <drm/drmP.h> 48 #include <drm/intel-gtt.h> 49 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 50 #include <drm/drm_gem.h> 51 #include <drm/drm_auth.h> 52 53 #include "i915_params.h" 54 #include "i915_reg.h" 55 56 #include "intel_bios.h" 57 #include "intel_dpll_mgr.h" 58 #include "intel_guc.h" 59 #include "intel_lrc.h" 60 #include "intel_ringbuffer.h" 61 62 #include "i915_gem.h" 63 #include "i915_gem_gtt.h" 64 #include "i915_gem_render_state.h" 65 #include "i915_gem_request.h" 66 #include "i915_gem_timeline.h" 67 68 #include "intel_gvt.h" 69 70 /* General customization: 71 */ 72 73 #define DRIVER_NAME "i915" 74 #define DRIVER_DESC "Intel Graphics" 75 #define DRIVER_DATE "20161108" 76 #define DRIVER_TIMESTAMP 1478587895 77 78 #undef WARN_ON 79 /* Many gcc seem to no see through this and fall over :( */ 80 #if 0 81 #define WARN_ON(x) ({ \ 82 bool __i915_warn_cond = (x); \ 83 if (__builtin_constant_p(__i915_warn_cond)) \ 84 BUILD_BUG_ON(__i915_warn_cond); \ 85 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 86 #else 87 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 88 #endif 89 90 #undef WARN_ON_ONCE 91 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 92 93 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 94 (long) (x), __func__); 95 96 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 97 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 98 * which may not necessarily be a user visible problem. This will either 99 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 100 * enable distros and users to tailor their preferred amount of i915 abrt 101 * spam. 102 */ 103 #define I915_STATE_WARN(condition, format...) ({ \ 104 int __ret_warn_on = !!(condition); \ 105 if (unlikely(__ret_warn_on)) \ 106 if (!WARN(i915.verbose_state_checks, format)) \ 107 DRM_ERROR(format); \ 108 unlikely(__ret_warn_on); \ 109 }) 110 111 #define I915_STATE_WARN_ON(x) \ 112 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 113 114 bool __i915_inject_load_failure(const char *func, int line); 115 #define i915_inject_load_failure() \ 116 __i915_inject_load_failure(__func__, __LINE__) 117 118 static inline const char *yesno(bool v) 119 { 120 return v ? "yes" : "no"; 121 } 122 123 static inline const char *onoff(bool v) 124 { 125 return v ? "on" : "off"; 126 } 127 128 enum pipe { 129 INVALID_PIPE = -1, 130 PIPE_A = 0, 131 PIPE_B, 132 PIPE_C, 133 _PIPE_EDP, 134 I915_MAX_PIPES = _PIPE_EDP 135 }; 136 #define pipe_name(p) ((p) + 'A') 137 138 enum transcoder { 139 TRANSCODER_A = 0, 140 TRANSCODER_B, 141 TRANSCODER_C, 142 TRANSCODER_EDP, 143 TRANSCODER_DSI_A, 144 TRANSCODER_DSI_C, 145 I915_MAX_TRANSCODERS 146 }; 147 148 static inline const char *transcoder_name(enum transcoder transcoder) 149 { 150 switch (transcoder) { 151 case TRANSCODER_A: 152 return "A"; 153 case TRANSCODER_B: 154 return "B"; 155 case TRANSCODER_C: 156 return "C"; 157 case TRANSCODER_EDP: 158 return "EDP"; 159 case TRANSCODER_DSI_A: 160 return "DSI A"; 161 case TRANSCODER_DSI_C: 162 return "DSI C"; 163 default: 164 return "<invalid>"; 165 } 166 } 167 168 static inline bool transcoder_is_dsi(enum transcoder transcoder) 169 { 170 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 171 } 172 173 /* 174 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 175 * number of planes per CRTC. Not all platforms really have this many planes, 176 * which means some arrays of size I915_MAX_PLANES may have unused entries 177 * between the topmost sprite plane and the cursor plane. 178 */ 179 enum plane { 180 PLANE_A = 0, 181 PLANE_B, 182 PLANE_C, 183 PLANE_CURSOR, 184 I915_MAX_PLANES, 185 }; 186 #define plane_name(p) ((p) + 'A') 187 188 #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') 189 190 enum port { 191 PORT_NONE = -1, 192 PORT_A = 0, 193 PORT_B, 194 PORT_C, 195 PORT_D, 196 PORT_E, 197 I915_MAX_PORTS 198 }; 199 #define port_name(p) ((p) + 'A') 200 201 #define I915_NUM_PHYS_VLV 2 202 203 enum dpio_channel { 204 DPIO_CH0, 205 DPIO_CH1 206 }; 207 208 enum dpio_phy { 209 DPIO_PHY0, 210 DPIO_PHY1 211 }; 212 213 enum intel_display_power_domain { 214 POWER_DOMAIN_PIPE_A, 215 POWER_DOMAIN_PIPE_B, 216 POWER_DOMAIN_PIPE_C, 217 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 218 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 219 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 220 POWER_DOMAIN_TRANSCODER_A, 221 POWER_DOMAIN_TRANSCODER_B, 222 POWER_DOMAIN_TRANSCODER_C, 223 POWER_DOMAIN_TRANSCODER_EDP, 224 POWER_DOMAIN_TRANSCODER_DSI_A, 225 POWER_DOMAIN_TRANSCODER_DSI_C, 226 POWER_DOMAIN_PORT_DDI_A_LANES, 227 POWER_DOMAIN_PORT_DDI_B_LANES, 228 POWER_DOMAIN_PORT_DDI_C_LANES, 229 POWER_DOMAIN_PORT_DDI_D_LANES, 230 POWER_DOMAIN_PORT_DDI_E_LANES, 231 POWER_DOMAIN_PORT_DSI, 232 POWER_DOMAIN_PORT_CRT, 233 POWER_DOMAIN_PORT_OTHER, 234 POWER_DOMAIN_VGA, 235 POWER_DOMAIN_AUDIO, 236 POWER_DOMAIN_PLLS, 237 POWER_DOMAIN_AUX_A, 238 POWER_DOMAIN_AUX_B, 239 POWER_DOMAIN_AUX_C, 240 POWER_DOMAIN_AUX_D, 241 POWER_DOMAIN_GMBUS, 242 POWER_DOMAIN_MODESET, 243 POWER_DOMAIN_INIT, 244 245 POWER_DOMAIN_NUM, 246 }; 247 248 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 249 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 250 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 251 #define POWER_DOMAIN_TRANSCODER(tran) \ 252 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 253 (tran) + POWER_DOMAIN_TRANSCODER_A) 254 255 enum hpd_pin { 256 HPD_NONE = 0, 257 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 258 HPD_CRT, 259 HPD_SDVO_B, 260 HPD_SDVO_C, 261 HPD_PORT_A, 262 HPD_PORT_B, 263 HPD_PORT_C, 264 HPD_PORT_D, 265 HPD_PORT_E, 266 HPD_NUM_PINS 267 }; 268 269 #define for_each_hpd_pin(__pin) \ 270 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 271 272 struct i915_hotplug { 273 struct work_struct hotplug_work; 274 275 struct { 276 unsigned long last_jiffies; 277 int count; 278 enum { 279 HPD_ENABLED = 0, 280 HPD_DISABLED = 1, 281 HPD_MARK_DISABLED = 2 282 } state; 283 } stats[HPD_NUM_PINS]; 284 u32 event_bits; 285 struct delayed_work reenable_work; 286 287 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 288 u32 long_port_mask; 289 u32 short_port_mask; 290 struct work_struct dig_port_work; 291 292 struct work_struct poll_init_work; 293 bool poll_enabled; 294 295 /* 296 * if we get a HPD irq from DP and a HPD irq from non-DP 297 * the non-DP HPD could block the workqueue on a mode config 298 * mutex getting, that userspace may have taken. However 299 * userspace is waiting on the DP workqueue to run which is 300 * blocked behind the non-DP one. 301 */ 302 struct workqueue_struct *dp_wq; 303 }; 304 305 #define I915_GEM_GPU_DOMAINS \ 306 (I915_GEM_DOMAIN_RENDER | \ 307 I915_GEM_DOMAIN_SAMPLER | \ 308 I915_GEM_DOMAIN_COMMAND | \ 309 I915_GEM_DOMAIN_INSTRUCTION | \ 310 I915_GEM_DOMAIN_VERTEX) 311 312 #define for_each_pipe(__dev_priv, __p) \ 313 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 314 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 315 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 316 for_each_if ((__mask) & (1 << (__p))) 317 #define for_each_universal_plane(__dev_priv, __pipe, __p) \ 318 for ((__p) = 0; \ 319 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 320 (__p)++) 321 #define for_each_sprite(__dev_priv, __p, __s) \ 322 for ((__s) = 0; \ 323 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 324 (__s)++) 325 326 #define for_each_port_masked(__port, __ports_mask) \ 327 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 328 for_each_if ((__ports_mask) & (1 << (__port))) 329 330 #define for_each_crtc(dev, crtc) \ 331 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) 332 333 #define for_each_intel_plane(dev, intel_plane) \ 334 list_for_each_entry(intel_plane, \ 335 &(dev)->mode_config.plane_list, \ 336 base.head) 337 338 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 339 list_for_each_entry(intel_plane, \ 340 &(dev)->mode_config.plane_list, \ 341 base.head) \ 342 for_each_if ((plane_mask) & \ 343 (1 << drm_plane_index(&intel_plane->base))) 344 345 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 346 list_for_each_entry(intel_plane, \ 347 &(dev)->mode_config.plane_list, \ 348 base.head) \ 349 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 350 351 #define for_each_intel_crtc(dev, intel_crtc) \ 352 list_for_each_entry(intel_crtc, \ 353 &(dev)->mode_config.crtc_list, \ 354 base.head) 355 356 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 357 list_for_each_entry(intel_crtc, \ 358 &(dev)->mode_config.crtc_list, \ 359 base.head) \ 360 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 361 362 #define for_each_intel_encoder(dev, intel_encoder) \ 363 list_for_each_entry(intel_encoder, \ 364 &(dev)->mode_config.encoder_list, \ 365 base.head) 366 367 #define for_each_intel_connector(dev, intel_connector) \ 368 list_for_each_entry(intel_connector, \ 369 &(dev)->mode_config.connector_list, \ 370 base.head) 371 372 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 373 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 374 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 375 376 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 377 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 378 for_each_if ((intel_connector)->base.encoder == (__encoder)) 379 380 #define for_each_power_domain(domain, mask) \ 381 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 382 for_each_if ((1 << (domain)) & (mask)) 383 384 struct drm_i915_private; 385 struct i915_mm_struct; 386 struct i915_mmu_object; 387 388 struct drm_i915_file_private { 389 struct drm_i915_private *dev_priv; 390 struct drm_file *file; 391 392 struct { 393 spinlock_t lock; 394 struct list_head request_list; 395 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 396 * chosen to prevent the CPU getting more than a frame ahead of the GPU 397 * (when using lax throttling for the frontbuffer). We also use it to 398 * offer free GPU waitboosts for severely congested workloads. 399 */ 400 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 401 } mm; 402 struct idr context_idr; 403 404 struct intel_rps_client { 405 struct list_head link; 406 unsigned boosts; 407 } rps; 408 409 unsigned int bsd_engine; 410 }; 411 412 /* Used by dp and fdi links */ 413 struct intel_link_m_n { 414 uint32_t tu; 415 uint32_t gmch_m; 416 uint32_t gmch_n; 417 uint32_t link_m; 418 uint32_t link_n; 419 }; 420 421 void intel_link_compute_m_n(int bpp, int nlanes, 422 int pixel_clock, int link_clock, 423 struct intel_link_m_n *m_n); 424 425 /* Interface history: 426 * 427 * 1.1: Original. 428 * 1.2: Add Power Management 429 * 1.3: Add vblank support 430 * 1.4: Fix cmdbuffer path, add heap destroy 431 * 1.5: Add vblank pipe configuration 432 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 433 * - Support vertical blank on secondary display pipe 434 */ 435 #define DRIVER_MAJOR 1 436 #define DRIVER_MINOR 6 437 #define DRIVER_PATCHLEVEL 0 438 439 struct opregion_header; 440 struct opregion_acpi; 441 struct opregion_swsci; 442 struct opregion_asle; 443 444 struct intel_opregion { 445 struct opregion_header *header; 446 struct opregion_acpi *acpi; 447 struct opregion_swsci *swsci; 448 u32 swsci_gbda_sub_functions; 449 u32 swsci_sbcb_sub_functions; 450 struct opregion_asle *asle; 451 void *rvda; 452 const void *vbt; 453 u32 vbt_size; 454 u32 *lid_state; 455 struct work_struct asle_work; 456 }; 457 #define OPREGION_SIZE (8*1024) 458 459 struct intel_overlay; 460 struct intel_overlay_error_state; 461 462 struct drm_i915_fence_reg { 463 struct list_head link; 464 struct drm_i915_private *i915; 465 struct i915_vma *vma; 466 int pin_count; 467 int id; 468 /** 469 * Whether the tiling parameters for the currently 470 * associated fence register have changed. Note that 471 * for the purposes of tracking tiling changes we also 472 * treat the unfenced register, the register slot that 473 * the object occupies whilst it executes a fenced 474 * command (such as BLT on gen2/3), as a "fence". 475 */ 476 bool dirty; 477 }; 478 479 struct sdvo_device_mapping { 480 u8 initialized; 481 u8 dvo_port; 482 u8 slave_addr; 483 u8 dvo_wiring; 484 u8 i2c_pin; 485 u8 ddc_pin; 486 }; 487 488 struct intel_connector; 489 struct intel_encoder; 490 struct intel_crtc_state; 491 struct intel_initial_plane_config; 492 struct intel_crtc; 493 struct intel_limit; 494 struct dpll; 495 496 struct drm_i915_display_funcs { 497 int (*get_display_clock_speed)(struct drm_i915_private *dev_priv); 498 int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane); 499 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 500 int (*compute_intermediate_wm)(struct drm_device *dev, 501 struct intel_crtc *intel_crtc, 502 struct intel_crtc_state *newstate); 503 void (*initial_watermarks)(struct intel_crtc_state *cstate); 504 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 505 int (*compute_global_watermarks)(struct drm_atomic_state *state); 506 void (*update_wm)(struct intel_crtc *crtc); 507 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 508 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 509 /* Returns the active state of the crtc, and if the crtc is active, 510 * fills out the pipe-config with the hw state. */ 511 bool (*get_pipe_config)(struct intel_crtc *, 512 struct intel_crtc_state *); 513 void (*get_initial_plane_config)(struct intel_crtc *, 514 struct intel_initial_plane_config *); 515 int (*crtc_compute_clock)(struct intel_crtc *crtc, 516 struct intel_crtc_state *crtc_state); 517 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 518 struct drm_atomic_state *old_state); 519 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 520 struct drm_atomic_state *old_state); 521 void (*update_crtcs)(struct drm_atomic_state *state, 522 unsigned int *crtc_vblank_mask); 523 void (*audio_codec_enable)(struct drm_connector *connector, 524 struct intel_encoder *encoder, 525 const struct drm_display_mode *adjusted_mode); 526 void (*audio_codec_disable)(struct intel_encoder *encoder); 527 void (*fdi_link_train)(struct drm_crtc *crtc); 528 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 529 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 530 struct drm_framebuffer *fb, 531 struct drm_i915_gem_object *obj, 532 struct drm_i915_gem_request *req, 533 uint32_t flags); 534 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 535 /* clock updates for mode set */ 536 /* cursor updates */ 537 /* render clock increase/decrease */ 538 /* display clock increase/decrease */ 539 /* pll clock increase/decrease */ 540 541 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 542 void (*load_luts)(struct drm_crtc_state *crtc_state); 543 }; 544 545 enum forcewake_domain_id { 546 FW_DOMAIN_ID_RENDER = 0, 547 FW_DOMAIN_ID_BLITTER, 548 FW_DOMAIN_ID_MEDIA, 549 550 FW_DOMAIN_ID_COUNT 551 }; 552 553 enum forcewake_domains { 554 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 555 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 556 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 557 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 558 FORCEWAKE_BLITTER | 559 FORCEWAKE_MEDIA) 560 }; 561 562 #define FW_REG_READ (1) 563 #define FW_REG_WRITE (2) 564 565 enum forcewake_domains 566 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 567 i915_reg_t reg, unsigned int op); 568 569 struct intel_uncore_funcs { 570 void (*force_wake_get)(struct drm_i915_private *dev_priv, 571 enum forcewake_domains domains); 572 void (*force_wake_put)(struct drm_i915_private *dev_priv, 573 enum forcewake_domains domains); 574 575 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 576 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 577 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 578 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 579 580 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 581 uint8_t val, bool trace); 582 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 583 uint16_t val, bool trace); 584 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 585 uint32_t val, bool trace); 586 }; 587 588 struct intel_forcewake_range { 589 u32 start; 590 u32 end; 591 592 enum forcewake_domains domains; 593 }; 594 595 struct intel_uncore { 596 spinlock_t lock; /** lock is also taken in irq contexts. */ 597 598 const struct intel_forcewake_range *fw_domains_table; 599 unsigned int fw_domains_table_entries; 600 601 struct intel_uncore_funcs funcs; 602 603 unsigned fifo_count; 604 605 enum forcewake_domains fw_domains; 606 enum forcewake_domains fw_domains_active; 607 608 struct intel_uncore_forcewake_domain { 609 struct drm_i915_private *i915; 610 enum forcewake_domain_id id; 611 enum forcewake_domains mask; 612 unsigned wake_count; 613 struct hrtimer timer; 614 i915_reg_t reg_set; 615 u32 val_set; 616 u32 val_clear; 617 i915_reg_t reg_ack; 618 i915_reg_t reg_post; 619 u32 val_reset; 620 } fw_domain[FW_DOMAIN_ID_COUNT]; 621 622 int unclaimed_mmio_check; 623 }; 624 625 /* Iterate over initialised fw domains */ 626 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 627 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 628 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 629 (domain__)++) \ 630 for_each_if ((mask__) & (domain__)->mask) 631 632 #define for_each_fw_domain(domain__, dev_priv__) \ 633 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 634 635 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 636 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 637 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 638 639 struct intel_csr { 640 struct work_struct work; 641 const char *fw_path; 642 uint32_t *dmc_payload; 643 uint32_t dmc_fw_size; 644 uint32_t version; 645 uint32_t mmio_count; 646 i915_reg_t mmioaddr[8]; 647 uint32_t mmiodata[8]; 648 uint32_t dc_state; 649 uint32_t allowed_dc_mask; 650 }; 651 652 #define DEV_INFO_FOR_EACH_FLAG(func) \ 653 /* Keep is_* in chronological order */ \ 654 func(is_mobile); \ 655 func(is_i85x); \ 656 func(is_i915g); \ 657 func(is_i945gm); \ 658 func(is_g33); \ 659 func(is_g4x); \ 660 func(is_pineview); \ 661 func(is_broadwater); \ 662 func(is_crestline); \ 663 func(is_ivybridge); \ 664 func(is_valleyview); \ 665 func(is_cherryview); \ 666 func(is_haswell); \ 667 func(is_broadwell); \ 668 func(is_skylake); \ 669 func(is_broxton); \ 670 func(is_kabylake); \ 671 func(is_preliminary); \ 672 /* Keep has_* in alphabetical order */ \ 673 func(has_64bit_reloc); \ 674 func(has_csr); \ 675 func(has_ddi); \ 676 func(has_dp_mst); \ 677 func(has_fbc); \ 678 func(has_fpga_dbg); \ 679 func(has_gmbus_irq); \ 680 func(has_gmch_display); \ 681 func(has_guc); \ 682 func(has_hotplug); \ 683 func(has_hw_contexts); \ 684 func(has_l3_dpf); \ 685 func(has_llc); \ 686 func(has_logical_ring_contexts); \ 687 func(has_overlay); \ 688 func(has_pipe_cxsr); \ 689 func(has_pooled_eu); \ 690 func(has_psr); \ 691 func(has_rc6); \ 692 func(has_rc6p); \ 693 func(has_resource_streamer); \ 694 func(has_runtime_pm); \ 695 func(has_snoop); \ 696 func(cursor_needs_physical); \ 697 func(hws_needs_physical); \ 698 func(overlay_needs_physical); \ 699 func(supports_tv) 700 701 struct sseu_dev_info { 702 u8 slice_mask; 703 u8 subslice_mask; 704 u8 eu_total; 705 u8 eu_per_subslice; 706 u8 min_eu_in_pool; 707 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 708 u8 subslice_7eu[3]; 709 u8 has_slice_pg:1; 710 u8 has_subslice_pg:1; 711 u8 has_eu_pg:1; 712 }; 713 714 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 715 { 716 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 717 } 718 719 struct intel_device_info { 720 u32 display_mmio_offset; 721 u16 device_id; 722 u8 num_pipes; 723 u8 num_sprites[I915_MAX_PIPES]; 724 u8 gen; 725 u16 gen_mask; 726 u8 ring_mask; /* Rings supported by the HW */ 727 u8 num_rings; 728 #define DEFINE_FLAG(name) u8 name:1 729 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 730 #undef DEFINE_FLAG 731 u16 ddb_size; /* in blocks */ 732 /* Register offsets for the various display pipes and transcoders */ 733 int pipe_offsets[I915_MAX_TRANSCODERS]; 734 int trans_offsets[I915_MAX_TRANSCODERS]; 735 int palette_offsets[I915_MAX_PIPES]; 736 int cursor_offsets[I915_MAX_PIPES]; 737 738 /* Slice/subslice/EU info */ 739 struct sseu_dev_info sseu; 740 741 struct color_luts { 742 u16 degamma_lut_size; 743 u16 gamma_lut_size; 744 } color; 745 }; 746 747 struct intel_display_error_state; 748 749 struct drm_i915_error_state { 750 struct kref ref; 751 struct timeval time; 752 struct timeval boottime; 753 struct timeval uptime; 754 755 struct drm_i915_private *i915; 756 757 char error_msg[128]; 758 bool simulated; 759 int iommu; 760 u32 reset_count; 761 u32 suspend_count; 762 struct intel_device_info device_info; 763 764 /* Generic register state */ 765 u32 eir; 766 u32 pgtbl_er; 767 u32 ier; 768 u32 gtier[4]; 769 u32 ccid; 770 u32 derrmr; 771 u32 forcewake; 772 u32 error; /* gen6+ */ 773 u32 err_int; /* gen7 */ 774 u32 fault_data0; /* gen8, gen9 */ 775 u32 fault_data1; /* gen8, gen9 */ 776 u32 done_reg; 777 u32 gac_eco; 778 u32 gam_ecochk; 779 u32 gab_ctl; 780 u32 gfx_mode; 781 782 u64 fence[I915_MAX_NUM_FENCES]; 783 struct intel_overlay_error_state *overlay; 784 struct intel_display_error_state *display; 785 struct drm_i915_error_object *semaphore; 786 struct drm_i915_error_object *guc_log; 787 788 struct drm_i915_error_engine { 789 int engine_id; 790 /* Software tracked state */ 791 bool waiting; 792 int num_waiters; 793 int hangcheck_score; 794 enum intel_engine_hangcheck_action hangcheck_action; 795 struct i915_address_space *vm; 796 int num_requests; 797 798 /* position of active request inside the ring */ 799 u32 rq_head, rq_post, rq_tail; 800 801 /* our own tracking of ring head and tail */ 802 u32 cpu_ring_head; 803 u32 cpu_ring_tail; 804 805 u32 last_seqno; 806 807 /* Register state */ 808 u32 start; 809 u32 tail; 810 u32 head; 811 u32 ctl; 812 u32 mode; 813 u32 hws; 814 u32 ipeir; 815 u32 ipehr; 816 u32 bbstate; 817 u32 instpm; 818 u32 instps; 819 u32 seqno; 820 u64 bbaddr; 821 u64 acthd; 822 u32 fault_reg; 823 u64 faddr; 824 u32 rc_psmi; /* sleep state */ 825 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 826 struct intel_instdone instdone; 827 828 struct drm_i915_error_object { 829 u64 gtt_offset; 830 u64 gtt_size; 831 int page_count; 832 int unused; 833 u32 *pages[0]; 834 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 835 836 struct drm_i915_error_object *wa_ctx; 837 838 struct drm_i915_error_request { 839 long jiffies; 840 pid_t pid; 841 u32 context; 842 u32 seqno; 843 u32 head; 844 u32 tail; 845 } *requests, execlist[2]; 846 847 struct drm_i915_error_waiter { 848 char comm[TASK_COMM_LEN]; 849 pid_t pid; 850 u32 seqno; 851 } *waiters; 852 853 struct { 854 u32 gfx_mode; 855 union { 856 u64 pdp[4]; 857 u32 pp_dir_base; 858 }; 859 } vm_info; 860 861 pid_t pid; 862 char comm[TASK_COMM_LEN]; 863 } engine[I915_NUM_ENGINES]; 864 865 struct drm_i915_error_buffer { 866 u32 size; 867 u32 name; 868 u32 rseqno[I915_NUM_ENGINES], wseqno; 869 u64 gtt_offset; 870 u32 read_domains; 871 u32 write_domain; 872 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 873 u32 tiling:2; 874 u32 dirty:1; 875 u32 purgeable:1; 876 u32 userptr:1; 877 s32 engine:4; 878 u32 cache_level:3; 879 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 880 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 881 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 882 }; 883 884 enum i915_cache_level { 885 I915_CACHE_NONE = 0, 886 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 887 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 888 caches, eg sampler/render caches, and the 889 large Last-Level-Cache. LLC is coherent with 890 the CPU, but L3 is only visible to the GPU. */ 891 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 892 }; 893 894 struct i915_ctx_hang_stats { 895 /* This context had batch pending when hang was declared */ 896 unsigned batch_pending; 897 898 /* This context had batch active when hang was declared */ 899 unsigned batch_active; 900 901 /* Time when this context was last blamed for a GPU reset */ 902 unsigned long guilty_ts; 903 904 /* If the contexts causes a second GPU hang within this time, 905 * it is permanently banned from submitting any more work. 906 */ 907 unsigned long ban_period_seconds; 908 909 /* This context is banned to submit more work */ 910 bool banned; 911 }; 912 913 /* This must match up with the value previously used for execbuf2.rsvd1. */ 914 #define DEFAULT_CONTEXT_HANDLE 0 915 916 /** 917 * struct i915_gem_context - as the name implies, represents a context. 918 * @ref: reference count. 919 * @user_handle: userspace tracking identity for this context. 920 * @remap_slice: l3 row remapping information. 921 * @flags: context specific flags: 922 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 923 * @file_priv: filp associated with this context (NULL for global default 924 * context). 925 * @hang_stats: information about the role of this context in possible GPU 926 * hangs. 927 * @ppgtt: virtual memory space used by this context. 928 * @legacy_hw_ctx: render context backing object and whether it is correctly 929 * initialized (legacy ring submission mechanism only). 930 * @link: link in the global list of contexts. 931 * 932 * Contexts are memory images used by the hardware to store copies of their 933 * internal state. 934 */ 935 struct i915_gem_context { 936 struct kref ref; 937 struct drm_i915_private *i915; 938 struct drm_i915_file_private *file_priv; 939 struct i915_hw_ppgtt *ppgtt; 940 struct pid *pid; 941 const char *name; 942 943 struct i915_ctx_hang_stats hang_stats; 944 945 unsigned long flags; 946 #define CONTEXT_NO_ZEROMAP BIT(0) 947 #define CONTEXT_NO_ERROR_CAPTURE BIT(1) 948 949 /* Unique identifier for this context, used by the hw for tracking */ 950 unsigned int hw_id; 951 u32 user_handle; 952 953 u32 ggtt_alignment; 954 955 struct intel_context { 956 struct i915_vma *state; 957 struct intel_ring *ring; 958 uint32_t *lrc_reg_state; 959 u64 lrc_desc; 960 int pin_count; 961 bool initialised; 962 } engine[I915_NUM_ENGINES]; 963 u32 ring_size; 964 u32 desc_template; 965 struct atomic_notifier_head status_notifier; 966 bool execlists_force_single_submission; 967 968 struct list_head link; 969 970 u8 remap_slice; 971 bool closed:1; 972 }; 973 974 enum fb_op_origin { 975 ORIGIN_GTT, 976 ORIGIN_CPU, 977 ORIGIN_CS, 978 ORIGIN_FLIP, 979 ORIGIN_DIRTYFB, 980 }; 981 982 struct intel_fbc { 983 /* This is always the inner lock when overlapping with struct_mutex and 984 * it's the outer lock when overlapping with stolen_lock. */ 985 struct mutex lock; 986 unsigned threshold; 987 unsigned int possible_framebuffer_bits; 988 unsigned int busy_bits; 989 unsigned int visible_pipes_mask; 990 struct intel_crtc *crtc; 991 992 struct drm_mm_node compressed_fb; 993 struct drm_mm_node *compressed_llb; 994 995 bool false_color; 996 997 bool enabled; 998 bool active; 999 1000 bool underrun_detected; 1001 struct work_struct underrun_work; 1002 1003 struct intel_fbc_state_cache { 1004 struct { 1005 unsigned int mode_flags; 1006 uint32_t hsw_bdw_pixel_rate; 1007 } crtc; 1008 1009 struct { 1010 unsigned int rotation; 1011 int src_w; 1012 int src_h; 1013 bool visible; 1014 } plane; 1015 1016 struct { 1017 u64 ilk_ggtt_offset; 1018 uint32_t pixel_format; 1019 unsigned int stride; 1020 int fence_reg; 1021 unsigned int tiling_mode; 1022 } fb; 1023 } state_cache; 1024 1025 struct intel_fbc_reg_params { 1026 struct { 1027 enum pipe pipe; 1028 enum plane plane; 1029 unsigned int fence_y_offset; 1030 } crtc; 1031 1032 struct { 1033 u64 ggtt_offset; 1034 uint32_t pixel_format; 1035 unsigned int stride; 1036 int fence_reg; 1037 } fb; 1038 1039 int cfb_size; 1040 } params; 1041 1042 struct intel_fbc_work { 1043 bool scheduled; 1044 u32 scheduled_vblank; 1045 struct work_struct work; 1046 } work; 1047 1048 const char *no_fbc_reason; 1049 }; 1050 1051 /** 1052 * HIGH_RR is the highest eDP panel refresh rate read from EDID 1053 * LOW_RR is the lowest eDP panel refresh rate found from EDID 1054 * parsing for same resolution. 1055 */ 1056 enum drrs_refresh_rate_type { 1057 DRRS_HIGH_RR, 1058 DRRS_LOW_RR, 1059 DRRS_MAX_RR, /* RR count */ 1060 }; 1061 1062 enum drrs_support_type { 1063 DRRS_NOT_SUPPORTED = 0, 1064 STATIC_DRRS_SUPPORT = 1, 1065 SEAMLESS_DRRS_SUPPORT = 2 1066 }; 1067 1068 struct intel_dp; 1069 struct i915_drrs { 1070 struct mutex mutex; 1071 struct delayed_work work; 1072 struct intel_dp *dp; 1073 unsigned busy_frontbuffer_bits; 1074 enum drrs_refresh_rate_type refresh_rate_type; 1075 enum drrs_support_type type; 1076 }; 1077 1078 struct i915_psr { 1079 struct mutex lock; 1080 bool sink_support; 1081 bool source_ok; 1082 struct intel_dp *enabled; 1083 bool active; 1084 struct delayed_work work; 1085 unsigned busy_frontbuffer_bits; 1086 bool psr2_support; 1087 bool aux_frame_sync; 1088 bool link_standby; 1089 }; 1090 1091 enum intel_pch { 1092 PCH_NONE = 0, /* No PCH present */ 1093 PCH_IBX, /* Ibexpeak PCH */ 1094 PCH_CPT, /* Cougarpoint PCH */ 1095 PCH_LPT, /* Lynxpoint PCH */ 1096 PCH_SPT, /* Sunrisepoint PCH */ 1097 PCH_KBP, /* Kabypoint PCH */ 1098 PCH_NOP, 1099 }; 1100 1101 enum intel_sbi_destination { 1102 SBI_ICLK, 1103 SBI_MPHY, 1104 }; 1105 1106 #define QUIRK_PIPEA_FORCE (1<<0) 1107 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1108 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1109 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1110 #define QUIRK_PIPEB_FORCE (1<<4) 1111 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1112 1113 struct intel_fbdev; 1114 struct intel_fbc_work; 1115 1116 struct intel_gmbus { 1117 struct i2c_adapter adapter; 1118 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1119 u32 force_bit; 1120 u32 reg0; 1121 i915_reg_t gpio_reg; 1122 struct i2c_algo_bit_data bit_algo; 1123 struct drm_i915_private *dev_priv; 1124 }; 1125 1126 struct i915_suspend_saved_registers { 1127 u32 saveDSPARB; 1128 u32 saveFBC_CONTROL; 1129 u32 saveCACHE_MODE_0; 1130 u32 saveMI_ARB_STATE; 1131 u32 saveSWF0[16]; 1132 u32 saveSWF1[16]; 1133 u32 saveSWF3[3]; 1134 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1135 u32 savePCH_PORT_HOTPLUG; 1136 u16 saveGCDGMBUS; 1137 }; 1138 1139 struct vlv_s0ix_state { 1140 /* GAM */ 1141 u32 wr_watermark; 1142 u32 gfx_prio_ctrl; 1143 u32 arb_mode; 1144 u32 gfx_pend_tlb0; 1145 u32 gfx_pend_tlb1; 1146 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1147 u32 media_max_req_count; 1148 u32 gfx_max_req_count; 1149 u32 render_hwsp; 1150 u32 ecochk; 1151 u32 bsd_hwsp; 1152 u32 blt_hwsp; 1153 u32 tlb_rd_addr; 1154 1155 /* MBC */ 1156 u32 g3dctl; 1157 u32 gsckgctl; 1158 u32 mbctl; 1159 1160 /* GCP */ 1161 u32 ucgctl1; 1162 u32 ucgctl3; 1163 u32 rcgctl1; 1164 u32 rcgctl2; 1165 u32 rstctl; 1166 u32 misccpctl; 1167 1168 /* GPM */ 1169 u32 gfxpause; 1170 u32 rpdeuhwtc; 1171 u32 rpdeuc; 1172 u32 ecobus; 1173 u32 pwrdwnupctl; 1174 u32 rp_down_timeout; 1175 u32 rp_deucsw; 1176 u32 rcubmabdtmr; 1177 u32 rcedata; 1178 u32 spare2gh; 1179 1180 /* Display 1 CZ domain */ 1181 u32 gt_imr; 1182 u32 gt_ier; 1183 u32 pm_imr; 1184 u32 pm_ier; 1185 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1186 1187 /* GT SA CZ domain */ 1188 u32 tilectl; 1189 u32 gt_fifoctl; 1190 u32 gtlc_wake_ctrl; 1191 u32 gtlc_survive; 1192 u32 pmwgicz; 1193 1194 /* Display 2 CZ domain */ 1195 u32 gu_ctl0; 1196 u32 gu_ctl1; 1197 u32 pcbr; 1198 u32 clock_gate_dis2; 1199 }; 1200 1201 struct intel_rps_ei { 1202 u32 cz_clock; 1203 u32 render_c0; 1204 u32 media_c0; 1205 }; 1206 1207 struct intel_gen6_power_mgmt { 1208 /* 1209 * work, interrupts_enabled and pm_iir are protected by 1210 * dev_priv->irq_lock 1211 */ 1212 struct work_struct work; 1213 bool interrupts_enabled; 1214 u32 pm_iir; 1215 1216 /* PM interrupt bits that should never be masked */ 1217 u32 pm_intr_keep; 1218 1219 /* Frequencies are stored in potentially platform dependent multiples. 1220 * In other words, *_freq needs to be multiplied by X to be interesting. 1221 * Soft limits are those which are used for the dynamic reclocking done 1222 * by the driver (raise frequencies under heavy loads, and lower for 1223 * lighter loads). Hard limits are those imposed by the hardware. 1224 * 1225 * A distinction is made for overclocking, which is never enabled by 1226 * default, and is considered to be above the hard limit if it's 1227 * possible at all. 1228 */ 1229 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1230 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1231 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1232 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1233 u8 min_freq; /* AKA RPn. Minimum frequency */ 1234 u8 boost_freq; /* Frequency to request when wait boosting */ 1235 u8 idle_freq; /* Frequency to request when we are idle */ 1236 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1237 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1238 u8 rp0_freq; /* Non-overclocked max frequency. */ 1239 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1240 1241 u8 up_threshold; /* Current %busy required to uplock */ 1242 u8 down_threshold; /* Current %busy required to downclock */ 1243 1244 int last_adj; 1245 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1246 1247 spinlock_t client_lock; 1248 struct list_head clients; 1249 bool client_boost; 1250 1251 bool enabled; 1252 struct delayed_work autoenable_work; 1253 unsigned boosts; 1254 1255 /* manual wa residency calculations */ 1256 struct intel_rps_ei up_ei, down_ei; 1257 1258 /* 1259 * Protects RPS/RC6 register access and PCU communication. 1260 * Must be taken after struct_mutex if nested. Note that 1261 * this lock may be held for long periods of time when 1262 * talking to hw - so only take it when talking to hw! 1263 */ 1264 struct mutex hw_lock; 1265 }; 1266 1267 /* defined intel_pm.c */ 1268 extern spinlock_t mchdev_lock; 1269 1270 struct intel_ilk_power_mgmt { 1271 u8 cur_delay; 1272 u8 min_delay; 1273 u8 max_delay; 1274 u8 fmax; 1275 u8 fstart; 1276 1277 u64 last_count1; 1278 unsigned long last_time1; 1279 unsigned long chipset_power; 1280 u64 last_count2; 1281 u64 last_time2; 1282 unsigned long gfx_power; 1283 u8 corr; 1284 1285 int c_m; 1286 int r_t; 1287 }; 1288 1289 struct drm_i915_private; 1290 struct i915_power_well; 1291 1292 struct i915_power_well_ops { 1293 /* 1294 * Synchronize the well's hw state to match the current sw state, for 1295 * example enable/disable it based on the current refcount. Called 1296 * during driver init and resume time, possibly after first calling 1297 * the enable/disable handlers. 1298 */ 1299 void (*sync_hw)(struct drm_i915_private *dev_priv, 1300 struct i915_power_well *power_well); 1301 /* 1302 * Enable the well and resources that depend on it (for example 1303 * interrupts located on the well). Called after the 0->1 refcount 1304 * transition. 1305 */ 1306 void (*enable)(struct drm_i915_private *dev_priv, 1307 struct i915_power_well *power_well); 1308 /* 1309 * Disable the well and resources that depend on it. Called after 1310 * the 1->0 refcount transition. 1311 */ 1312 void (*disable)(struct drm_i915_private *dev_priv, 1313 struct i915_power_well *power_well); 1314 /* Returns the hw enabled state. */ 1315 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1316 struct i915_power_well *power_well); 1317 }; 1318 1319 /* Power well structure for haswell */ 1320 struct i915_power_well { 1321 const char *name; 1322 bool always_on; 1323 /* power well enable/disable usage count */ 1324 int count; 1325 /* cached hw enabled state */ 1326 bool hw_enabled; 1327 unsigned long domains; 1328 /* unique identifier for this power well */ 1329 unsigned long id; 1330 /* 1331 * Arbitraty data associated with this power well. Platform and power 1332 * well specific. 1333 */ 1334 unsigned long data; 1335 const struct i915_power_well_ops *ops; 1336 }; 1337 1338 struct i915_power_domains { 1339 /* 1340 * Power wells needed for initialization at driver init and suspend 1341 * time are on. They are kept on until after the first modeset. 1342 */ 1343 bool init_power_on; 1344 bool initializing; 1345 int power_well_count; 1346 1347 struct mutex lock; 1348 int domain_use_count[POWER_DOMAIN_NUM]; 1349 struct i915_power_well *power_wells; 1350 }; 1351 1352 #define MAX_L3_SLICES 2 1353 struct intel_l3_parity { 1354 u32 *remap_info[MAX_L3_SLICES]; 1355 struct work_struct error_work; 1356 int which_slice; 1357 }; 1358 1359 struct i915_gem_mm { 1360 /** Memory allocator for GTT stolen memory */ 1361 struct drm_mm stolen; 1362 /** Protects the usage of the GTT stolen memory allocator. This is 1363 * always the inner lock when overlapping with struct_mutex. */ 1364 struct mutex stolen_lock; 1365 1366 /** List of all objects in gtt_space. Used to restore gtt 1367 * mappings on resume */ 1368 struct list_head bound_list; 1369 /** 1370 * List of objects which are not bound to the GTT (thus 1371 * are idle and not used by the GPU). These objects may or may 1372 * not actually have any pages attached. 1373 */ 1374 struct list_head unbound_list; 1375 1376 /** List of all objects in gtt_space, currently mmaped by userspace. 1377 * All objects within this list must also be on bound_list. 1378 */ 1379 struct list_head userfault_list; 1380 1381 /** 1382 * List of objects which are pending destruction. 1383 */ 1384 struct llist_head free_list; 1385 struct work_struct free_work; 1386 1387 /** Usable portion of the GTT for GEM */ 1388 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1389 1390 /** PPGTT used for aliasing the PPGTT with the GTT */ 1391 struct i915_hw_ppgtt *aliasing_ppgtt; 1392 1393 struct notifier_block oom_notifier; 1394 struct notifier_block vmap_notifier; 1395 struct shrinker shrinker; 1396 1397 /** LRU list of objects with fence regs on them. */ 1398 struct list_head fence_list; 1399 1400 /** 1401 * Are we in a non-interruptible section of code like 1402 * modesetting? 1403 */ 1404 bool interruptible; 1405 1406 /* the indicator for dispatch video commands on two BSD rings */ 1407 atomic_t bsd_engine_dispatch_index; 1408 1409 /** Bit 6 swizzling required for X tiling */ 1410 uint32_t bit_6_swizzle_x; 1411 /** Bit 6 swizzling required for Y tiling */ 1412 uint32_t bit_6_swizzle_y; 1413 1414 /* accounting, useful for userland debugging */ 1415 spinlock_t object_stat_lock; 1416 u64 object_memory; 1417 u32 object_count; 1418 }; 1419 1420 struct drm_i915_error_state_buf { 1421 struct drm_i915_private *i915; 1422 unsigned bytes; 1423 unsigned size; 1424 int err; 1425 u8 *buf; 1426 loff_t start; 1427 loff_t pos; 1428 }; 1429 1430 struct i915_error_state_file_priv { 1431 struct drm_device *dev; 1432 struct drm_i915_error_state *error; 1433 }; 1434 1435 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 1436 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 1437 1438 struct i915_gpu_error { 1439 /* For hangcheck timer */ 1440 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1441 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1442 /* Hang gpu twice in this window and your context gets banned */ 1443 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1444 1445 struct delayed_work hangcheck_work; 1446 1447 /* For reset and error_state handling. */ 1448 spinlock_t lock; 1449 /* Protected by the above dev->gpu_error.lock. */ 1450 struct drm_i915_error_state *first_error; 1451 1452 unsigned long missed_irq_rings; 1453 1454 /** 1455 * State variable controlling the reset flow and count 1456 * 1457 * This is a counter which gets incremented when reset is triggered, 1458 * 1459 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set 1460 * meaning that any waiters holding onto the struct_mutex should 1461 * relinquish the lock immediately in order for the reset to start. 1462 * 1463 * If reset is not completed succesfully, the I915_WEDGE bit is 1464 * set meaning that hardware is terminally sour and there is no 1465 * recovery. All waiters on the reset_queue will be woken when 1466 * that happens. 1467 * 1468 * This counter is used by the wait_seqno code to notice that reset 1469 * event happened and it needs to restart the entire ioctl (since most 1470 * likely the seqno it waited for won't ever signal anytime soon). 1471 * 1472 * This is important for lock-free wait paths, where no contended lock 1473 * naturally enforces the correct ordering between the bail-out of the 1474 * waiter and the gpu reset work code. 1475 */ 1476 unsigned long reset_count; 1477 1478 unsigned long flags; 1479 #define I915_RESET_IN_PROGRESS 0 1480 #define I915_WEDGED (BITS_PER_LONG - 1) 1481 1482 /** 1483 * Waitqueue to signal when a hang is detected. Used to for waiters 1484 * to release the struct_mutex for the reset to procede. 1485 */ 1486 wait_queue_head_t wait_queue; 1487 1488 /** 1489 * Waitqueue to signal when the reset has completed. Used by clients 1490 * that wait for dev_priv->mm.wedged to settle. 1491 */ 1492 wait_queue_head_t reset_queue; 1493 1494 /* For missed irq/seqno simulation. */ 1495 unsigned long test_irq_rings; 1496 }; 1497 1498 enum modeset_restore { 1499 MODESET_ON_LID_OPEN, 1500 MODESET_DONE, 1501 MODESET_SUSPENDED, 1502 }; 1503 1504 #define DP_AUX_A 0x40 1505 #define DP_AUX_B 0x10 1506 #define DP_AUX_C 0x20 1507 #define DP_AUX_D 0x30 1508 1509 #define DDC_PIN_B 0x05 1510 #define DDC_PIN_C 0x04 1511 #define DDC_PIN_D 0x06 1512 1513 struct ddi_vbt_port_info { 1514 /* 1515 * This is an index in the HDMI/DVI DDI buffer translation table. 1516 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1517 * populate this field. 1518 */ 1519 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1520 uint8_t hdmi_level_shift; 1521 1522 uint8_t supports_dvi:1; 1523 uint8_t supports_hdmi:1; 1524 uint8_t supports_dp:1; 1525 1526 uint8_t alternate_aux_channel; 1527 uint8_t alternate_ddc_pin; 1528 1529 uint8_t dp_boost_level; 1530 uint8_t hdmi_boost_level; 1531 }; 1532 1533 enum psr_lines_to_wait { 1534 PSR_0_LINES_TO_WAIT = 0, 1535 PSR_1_LINE_TO_WAIT, 1536 PSR_4_LINES_TO_WAIT, 1537 PSR_8_LINES_TO_WAIT 1538 }; 1539 1540 struct intel_vbt_data { 1541 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1542 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1543 1544 /* Feature bits */ 1545 unsigned int int_tv_support:1; 1546 unsigned int lvds_dither:1; 1547 unsigned int lvds_vbt:1; 1548 unsigned int int_crt_support:1; 1549 unsigned int lvds_use_ssc:1; 1550 unsigned int display_clock_mode:1; 1551 unsigned int fdi_rx_polarity_inverted:1; 1552 unsigned int panel_type:4; 1553 int lvds_ssc_freq; 1554 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1555 1556 enum drrs_support_type drrs_type; 1557 1558 struct { 1559 int rate; 1560 int lanes; 1561 int preemphasis; 1562 int vswing; 1563 bool low_vswing; 1564 bool initialized; 1565 bool support; 1566 int bpp; 1567 struct edp_power_seq pps; 1568 } edp; 1569 1570 struct { 1571 bool full_link; 1572 bool require_aux_wakeup; 1573 int idle_frames; 1574 enum psr_lines_to_wait lines_to_wait; 1575 int tp1_wakeup_time; 1576 int tp2_tp3_wakeup_time; 1577 } psr; 1578 1579 struct { 1580 u16 pwm_freq_hz; 1581 bool present; 1582 bool active_low_pwm; 1583 u8 min_brightness; /* min_brightness/255 of max */ 1584 enum intel_backlight_type type; 1585 } backlight; 1586 1587 /* MIPI DSI */ 1588 struct { 1589 u16 panel_id; 1590 struct mipi_config *config; 1591 struct mipi_pps_data *pps; 1592 u8 seq_version; 1593 u32 size; 1594 u8 *data; 1595 const u8 *sequence[MIPI_SEQ_MAX]; 1596 } dsi; 1597 1598 int crt_ddc_pin; 1599 1600 int child_dev_num; 1601 union child_device_config *child_dev; 1602 1603 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1604 struct sdvo_device_mapping sdvo_mappings[2]; 1605 }; 1606 1607 enum intel_ddb_partitioning { 1608 INTEL_DDB_PART_1_2, 1609 INTEL_DDB_PART_5_6, /* IVB+ */ 1610 }; 1611 1612 struct intel_wm_level { 1613 bool enable; 1614 uint32_t pri_val; 1615 uint32_t spr_val; 1616 uint32_t cur_val; 1617 uint32_t fbc_val; 1618 }; 1619 1620 struct ilk_wm_values { 1621 uint32_t wm_pipe[3]; 1622 uint32_t wm_lp[3]; 1623 uint32_t wm_lp_spr[3]; 1624 uint32_t wm_linetime[3]; 1625 bool enable_fbc_wm; 1626 enum intel_ddb_partitioning partitioning; 1627 }; 1628 1629 struct vlv_pipe_wm { 1630 uint16_t primary; 1631 uint16_t sprite[2]; 1632 uint8_t cursor; 1633 }; 1634 1635 struct vlv_sr_wm { 1636 uint16_t plane; 1637 uint8_t cursor; 1638 }; 1639 1640 struct vlv_wm_values { 1641 struct vlv_pipe_wm pipe[3]; 1642 struct vlv_sr_wm sr; 1643 struct { 1644 uint8_t cursor; 1645 uint8_t sprite[2]; 1646 uint8_t primary; 1647 } ddl[3]; 1648 uint8_t level; 1649 bool cxsr; 1650 }; 1651 1652 struct skl_ddb_entry { 1653 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1654 }; 1655 1656 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1657 { 1658 return entry->end - entry->start; 1659 } 1660 1661 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1662 const struct skl_ddb_entry *e2) 1663 { 1664 if (e1->start == e2->start && e1->end == e2->end) 1665 return true; 1666 1667 return false; 1668 } 1669 1670 struct skl_ddb_allocation { 1671 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1672 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1673 }; 1674 1675 struct skl_wm_values { 1676 unsigned dirty_pipes; 1677 struct skl_ddb_allocation ddb; 1678 }; 1679 1680 struct skl_wm_level { 1681 bool plane_en; 1682 uint16_t plane_res_b; 1683 uint8_t plane_res_l; 1684 }; 1685 1686 /* 1687 * This struct helps tracking the state needed for runtime PM, which puts the 1688 * device in PCI D3 state. Notice that when this happens, nothing on the 1689 * graphics device works, even register access, so we don't get interrupts nor 1690 * anything else. 1691 * 1692 * Every piece of our code that needs to actually touch the hardware needs to 1693 * either call intel_runtime_pm_get or call intel_display_power_get with the 1694 * appropriate power domain. 1695 * 1696 * Our driver uses the autosuspend delay feature, which means we'll only really 1697 * suspend if we stay with zero refcount for a certain amount of time. The 1698 * default value is currently very conservative (see intel_runtime_pm_enable), but 1699 * it can be changed with the standard runtime PM files from sysfs. 1700 * 1701 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1702 * goes back to false exactly before we reenable the IRQs. We use this variable 1703 * to check if someone is trying to enable/disable IRQs while they're supposed 1704 * to be disabled. This shouldn't happen and we'll print some error messages in 1705 * case it happens. 1706 * 1707 * For more, read the Documentation/power/runtime_pm.txt. 1708 */ 1709 struct i915_runtime_pm { 1710 atomic_t wakeref_count; 1711 bool suspended; 1712 bool irqs_enabled; 1713 }; 1714 1715 enum intel_pipe_crc_source { 1716 INTEL_PIPE_CRC_SOURCE_NONE, 1717 INTEL_PIPE_CRC_SOURCE_PLANE1, 1718 INTEL_PIPE_CRC_SOURCE_PLANE2, 1719 INTEL_PIPE_CRC_SOURCE_PF, 1720 INTEL_PIPE_CRC_SOURCE_PIPE, 1721 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1722 INTEL_PIPE_CRC_SOURCE_TV, 1723 INTEL_PIPE_CRC_SOURCE_DP_B, 1724 INTEL_PIPE_CRC_SOURCE_DP_C, 1725 INTEL_PIPE_CRC_SOURCE_DP_D, 1726 INTEL_PIPE_CRC_SOURCE_AUTO, 1727 INTEL_PIPE_CRC_SOURCE_MAX, 1728 }; 1729 1730 struct intel_pipe_crc_entry { 1731 uint32_t frame; 1732 uint32_t crc[5]; 1733 }; 1734 1735 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1736 struct intel_pipe_crc { 1737 spinlock_t lock; 1738 bool opened; /* exclusive access to the result file */ 1739 struct intel_pipe_crc_entry *entries; 1740 enum intel_pipe_crc_source source; 1741 int head, tail; 1742 wait_queue_head_t wq; 1743 }; 1744 1745 struct i915_frontbuffer_tracking { 1746 spinlock_t lock; 1747 1748 /* 1749 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1750 * scheduled flips. 1751 */ 1752 unsigned busy_bits; 1753 unsigned flip_bits; 1754 }; 1755 1756 struct i915_wa_reg { 1757 i915_reg_t addr; 1758 u32 value; 1759 /* bitmask representing WA bits */ 1760 u32 mask; 1761 }; 1762 1763 /* 1764 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1765 * allowing it for RCS as we don't foresee any requirement of having 1766 * a whitelist for other engines. When it is really required for 1767 * other engines then the limit need to be increased. 1768 */ 1769 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1770 1771 struct i915_workarounds { 1772 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1773 u32 count; 1774 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1775 }; 1776 1777 struct i915_virtual_gpu { 1778 bool active; 1779 }; 1780 1781 /* used in computing the new watermarks state */ 1782 struct intel_wm_config { 1783 unsigned int num_pipes_active; 1784 bool sprites_enabled; 1785 bool sprites_scaled; 1786 }; 1787 1788 struct drm_i915_private { 1789 struct drm_device drm; 1790 1791 struct kmem_cache *objects; 1792 struct kmem_cache *vmas; 1793 struct kmem_cache *requests; 1794 1795 const struct intel_device_info info; 1796 1797 int relative_constants_mode; 1798 1799 void __iomem *regs; 1800 1801 struct intel_uncore uncore; 1802 1803 struct i915_virtual_gpu vgpu; 1804 1805 struct intel_gvt *gvt; 1806 1807 struct intel_guc guc; 1808 1809 struct intel_csr csr; 1810 1811 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1812 1813 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1814 * controller on different i2c buses. */ 1815 struct mutex gmbus_mutex; 1816 1817 /** 1818 * Base address of the gmbus and gpio block. 1819 */ 1820 uint32_t gpio_mmio_base; 1821 1822 /* MMIO base address for MIPI regs */ 1823 uint32_t mipi_mmio_base; 1824 1825 uint32_t psr_mmio_base; 1826 1827 uint32_t pps_mmio_base; 1828 1829 wait_queue_head_t gmbus_wait_queue; 1830 1831 struct pci_dev *bridge_dev; 1832 struct i915_gem_context *kernel_context; 1833 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 1834 struct i915_vma *semaphore; 1835 1836 struct drm_dma_handle *status_page_dmah; 1837 struct resource mch_res; 1838 1839 /* protects the irq masks */ 1840 spinlock_t irq_lock; 1841 1842 /* protects the mmio flip data */ 1843 spinlock_t mmio_flip_lock; 1844 1845 bool display_irqs_enabled; 1846 1847 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1848 struct pm_qos_request pm_qos; 1849 1850 /* Sideband mailbox protection */ 1851 struct mutex sb_lock; 1852 1853 /** Cached value of IMR to avoid reads in updating the bitfield */ 1854 union { 1855 u32 irq_mask; 1856 u32 de_irq_mask[I915_MAX_PIPES]; 1857 }; 1858 u32 gt_irq_mask; 1859 u32 pm_imr; 1860 u32 pm_ier; 1861 u32 pm_rps_events; 1862 u32 pm_guc_events; 1863 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1864 1865 struct i915_hotplug hotplug; 1866 struct intel_fbc fbc; 1867 struct i915_drrs drrs; 1868 struct intel_opregion opregion; 1869 struct intel_vbt_data vbt; 1870 1871 bool preserve_bios_swizzle; 1872 1873 /* overlay */ 1874 struct intel_overlay *overlay; 1875 1876 /* backlight registers and fields in struct intel_panel */ 1877 struct mutex backlight_lock; 1878 1879 /* LVDS info */ 1880 bool no_aux_handshake; 1881 1882 /* protects panel power sequencer state */ 1883 struct mutex pps_mutex; 1884 1885 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1886 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1887 1888 unsigned int fsb_freq, mem_freq, is_ddr3; 1889 unsigned int skl_preferred_vco_freq; 1890 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1891 unsigned int max_dotclk_freq; 1892 unsigned int rawclk_freq; 1893 unsigned int hpll_freq; 1894 unsigned int czclk_freq; 1895 1896 struct { 1897 unsigned int vco, ref; 1898 } cdclk_pll; 1899 1900 /** 1901 * wq - Driver workqueue for GEM. 1902 * 1903 * NOTE: Work items scheduled here are not allowed to grab any modeset 1904 * locks, for otherwise the flushing done in the pageflip code will 1905 * result in deadlocks. 1906 */ 1907 struct workqueue_struct *wq; 1908 1909 /* Display functions */ 1910 struct drm_i915_display_funcs display; 1911 1912 /* PCH chipset type */ 1913 enum intel_pch pch_type; 1914 unsigned short pch_id; 1915 1916 unsigned long quirks; 1917 1918 enum modeset_restore modeset_restore; 1919 struct mutex modeset_restore_lock; 1920 struct drm_atomic_state *modeset_restore_state; 1921 struct drm_modeset_acquire_ctx reset_ctx; 1922 1923 struct list_head vm_list; /* Global list of all address spaces */ 1924 struct i915_ggtt ggtt; /* VM representing the global address space */ 1925 1926 struct i915_gem_mm mm; 1927 DECLARE_HASHTABLE(mm_structs, 7); 1928 struct mutex mm_lock; 1929 1930 /* The hw wants to have a stable context identifier for the lifetime 1931 * of the context (for OA, PASID, faults, etc). This is limited 1932 * in execlists to 21 bits. 1933 */ 1934 struct ida context_hw_ida; 1935 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 1936 1937 /* Kernel Modesetting */ 1938 1939 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1940 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1941 wait_queue_head_t pending_flip_queue; 1942 1943 #ifdef CONFIG_DEBUG_FS 1944 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1945 #endif 1946 1947 /* dpll and cdclk state is protected by connection_mutex */ 1948 int num_shared_dpll; 1949 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1950 const struct intel_dpll_mgr *dpll_mgr; 1951 1952 /* 1953 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1954 * Must be global rather than per dpll, because on some platforms 1955 * plls share registers. 1956 */ 1957 struct mutex dpll_lock; 1958 1959 unsigned int active_crtcs; 1960 unsigned int min_pixclk[I915_MAX_PIPES]; 1961 1962 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1963 1964 struct i915_workarounds workarounds; 1965 1966 struct i915_frontbuffer_tracking fb_tracking; 1967 1968 u16 orig_clock; 1969 1970 bool mchbar_need_disable; 1971 1972 struct intel_l3_parity l3_parity; 1973 1974 /* Cannot be determined by PCIID. You must always read a register. */ 1975 u32 edram_cap; 1976 1977 /* gen6+ rps state */ 1978 struct intel_gen6_power_mgmt rps; 1979 1980 /* ilk-only ips/rps state. Everything in here is protected by the global 1981 * mchdev_lock in intel_pm.c */ 1982 struct intel_ilk_power_mgmt ips; 1983 1984 struct i915_power_domains power_domains; 1985 1986 struct i915_psr psr; 1987 1988 struct i915_gpu_error gpu_error; 1989 1990 struct drm_i915_gem_object *vlv_pctx; 1991 1992 #ifdef CONFIG_DRM_FBDEV_EMULATION 1993 /* list of fbdev register on this device */ 1994 struct intel_fbdev *fbdev; 1995 struct work_struct fbdev_suspend_work; 1996 #endif 1997 1998 struct drm_property *broadcast_rgb_property; 1999 struct drm_property *force_audio_property; 2000 2001 /* hda/i915 audio component */ 2002 struct i915_audio_component *audio_component; 2003 bool audio_component_registered; 2004 /** 2005 * av_mutex - mutex for audio/video sync 2006 * 2007 */ 2008 struct mutex av_mutex; 2009 2010 uint32_t hw_context_size; 2011 struct list_head context_list; 2012 2013 u32 fdi_rx_config; 2014 2015 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 2016 u32 chv_phy_control; 2017 /* 2018 * Shadows for CHV DPLL_MD regs to keep the state 2019 * checker somewhat working in the presence hardware 2020 * crappiness (can't read out DPLL_MD for pipes B & C). 2021 */ 2022 u32 chv_dpll_md[I915_MAX_PIPES]; 2023 u32 bxt_phy_grc; 2024 2025 u32 suspend_count; 2026 bool suspended_to_idle; 2027 struct i915_suspend_saved_registers regfile; 2028 struct vlv_s0ix_state vlv_s0ix_state; 2029 2030 enum { 2031 I915_SAGV_UNKNOWN = 0, 2032 I915_SAGV_DISABLED, 2033 I915_SAGV_ENABLED, 2034 I915_SAGV_NOT_CONTROLLED 2035 } sagv_status; 2036 2037 struct { 2038 /* 2039 * Raw watermark latency values: 2040 * in 0.1us units for WM0, 2041 * in 0.5us units for WM1+. 2042 */ 2043 /* primary */ 2044 uint16_t pri_latency[5]; 2045 /* sprite */ 2046 uint16_t spr_latency[5]; 2047 /* cursor */ 2048 uint16_t cur_latency[5]; 2049 /* 2050 * Raw watermark memory latency values 2051 * for SKL for all 8 levels 2052 * in 1us units. 2053 */ 2054 uint16_t skl_latency[8]; 2055 2056 /* 2057 * The skl_wm_values structure is a bit too big for stack 2058 * allocation, so we keep the staging struct where we store 2059 * intermediate results here instead. 2060 */ 2061 struct skl_wm_values skl_results; 2062 2063 /* current hardware state */ 2064 union { 2065 struct ilk_wm_values hw; 2066 struct skl_wm_values skl_hw; 2067 struct vlv_wm_values vlv; 2068 }; 2069 2070 uint8_t max_level; 2071 2072 /* 2073 * Should be held around atomic WM register writing; also 2074 * protects * intel_crtc->wm.active and 2075 * cstate->wm.need_postvbl_update. 2076 */ 2077 struct mutex wm_mutex; 2078 2079 /* 2080 * Set during HW readout of watermarks/DDB. Some platforms 2081 * need to know when we're still using BIOS-provided values 2082 * (which we don't fully trust). 2083 */ 2084 bool distrust_bios_wm; 2085 } wm; 2086 2087 struct i915_runtime_pm pm; 2088 2089 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 2090 struct { 2091 void (*resume)(struct drm_i915_private *); 2092 void (*cleanup_engine)(struct intel_engine_cs *engine); 2093 2094 struct list_head timelines; 2095 struct i915_gem_timeline global_timeline; 2096 u32 active_requests; 2097 2098 /** 2099 * Is the GPU currently considered idle, or busy executing 2100 * userspace requests? Whilst idle, we allow runtime power 2101 * management to power down the hardware and display clocks. 2102 * In order to reduce the effect on performance, there 2103 * is a slight delay before we do so. 2104 */ 2105 bool awake; 2106 2107 /** 2108 * We leave the user IRQ off as much as possible, 2109 * but this means that requests will finish and never 2110 * be retired once the system goes idle. Set a timer to 2111 * fire periodically while the ring is running. When it 2112 * fires, go retire requests. 2113 */ 2114 struct delayed_work retire_work; 2115 2116 /** 2117 * When we detect an idle GPU, we want to turn on 2118 * powersaving features. So once we see that there 2119 * are no more requests outstanding and no more 2120 * arrive within a small period of time, we fire 2121 * off the idle_work. 2122 */ 2123 struct delayed_work idle_work; 2124 2125 ktime_t last_init_time; 2126 } gt; 2127 2128 /* perform PHY state sanity checks? */ 2129 bool chv_phy_assert[2]; 2130 2131 /* Used to save the pipe-to-encoder mapping for audio */ 2132 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2133 2134 /* 2135 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2136 * will be rejected. Instead look for a better place. 2137 */ 2138 }; 2139 2140 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2141 { 2142 return container_of(dev, struct drm_i915_private, drm); 2143 } 2144 2145 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2146 { 2147 return to_i915(dev_get_drvdata(kdev)); 2148 } 2149 2150 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2151 { 2152 return container_of(guc, struct drm_i915_private, guc); 2153 } 2154 2155 /* Simple iterator over all initialised engines */ 2156 #define for_each_engine(engine__, dev_priv__, id__) \ 2157 for ((id__) = 0; \ 2158 (id__) < I915_NUM_ENGINES; \ 2159 (id__)++) \ 2160 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2161 2162 #define __mask_next_bit(mask) ({ \ 2163 int __idx = ffs(mask) - 1; \ 2164 mask &= ~BIT(__idx); \ 2165 __idx; \ 2166 }) 2167 2168 /* Iterator over subset of engines selected by mask */ 2169 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2170 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2171 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) 2172 2173 enum hdmi_force_audio { 2174 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2175 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2176 HDMI_AUDIO_AUTO, /* trust EDID */ 2177 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2178 }; 2179 2180 #define I915_GTT_OFFSET_NONE ((u32)-1) 2181 2182 struct drm_i915_gem_object_ops { 2183 unsigned int flags; 2184 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 2185 #define I915_GEM_OBJECT_IS_SHRINKABLE 0x2 2186 2187 /* Interface between the GEM object and its backing storage. 2188 * get_pages() is called once prior to the use of the associated set 2189 * of pages before to binding them into the GTT, and put_pages() is 2190 * called after we no longer need them. As we expect there to be 2191 * associated cost with migrating pages between the backing storage 2192 * and making them available for the GPU (e.g. clflush), we may hold 2193 * onto the pages after they are no longer referenced by the GPU 2194 * in case they may be used again shortly (for example migrating the 2195 * pages to a different memory domain within the GTT). put_pages() 2196 * will therefore most likely be called when the object itself is 2197 * being released or under memory pressure (where we attempt to 2198 * reap pages for the shrinker). 2199 */ 2200 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 2201 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 2202 2203 int (*dmabuf_export)(struct drm_i915_gem_object *); 2204 void (*release)(struct drm_i915_gem_object *); 2205 }; 2206 2207 /* 2208 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2209 * considered to be the frontbuffer for the given plane interface-wise. This 2210 * doesn't mean that the hw necessarily already scans it out, but that any 2211 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2212 * 2213 * We have one bit per pipe and per scanout plane type. 2214 */ 2215 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2216 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2217 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2218 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2219 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2220 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2221 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2222 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2223 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2224 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2225 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2226 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2227 2228 struct drm_i915_gem_object { 2229 struct drm_gem_object base; 2230 2231 const struct drm_i915_gem_object_ops *ops; 2232 2233 /** List of VMAs backed by this object */ 2234 struct list_head vma_list; 2235 struct rb_root vma_tree; 2236 2237 /** Stolen memory for this object, instead of being backed by shmem. */ 2238 struct drm_mm_node *stolen; 2239 struct list_head global_link; 2240 union { 2241 struct rcu_head rcu; 2242 struct llist_node freed; 2243 }; 2244 2245 /** 2246 * Whether the object is currently in the GGTT mmap. 2247 */ 2248 struct list_head userfault_link; 2249 2250 /** Used in execbuf to temporarily hold a ref */ 2251 struct list_head obj_exec_link; 2252 2253 struct list_head batch_pool_link; 2254 2255 unsigned long flags; 2256 2257 /** 2258 * Have we taken a reference for the object for incomplete GPU 2259 * activity? 2260 */ 2261 #define I915_BO_ACTIVE_REF 0 2262 2263 /* 2264 * Is the object to be mapped as read-only to the GPU 2265 * Only honoured if hardware has relevant pte bit 2266 */ 2267 unsigned long gt_ro:1; 2268 unsigned int cache_level:3; 2269 unsigned int cache_dirty:1; 2270 2271 atomic_t frontbuffer_bits; 2272 unsigned int frontbuffer_ggtt_origin; /* write once */ 2273 2274 /** Current tiling stride for the object, if it's tiled. */ 2275 unsigned int tiling_and_stride; 2276 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ 2277 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) 2278 #define STRIDE_MASK (~TILING_MASK) 2279 2280 /** Count of VMA actually bound by this object */ 2281 unsigned int bind_count; 2282 unsigned int active_count; 2283 unsigned int pin_display; 2284 2285 struct { 2286 struct mutex lock; /* protects the pages and their use */ 2287 atomic_t pages_pin_count; 2288 2289 struct sg_table *pages; 2290 void *mapping; 2291 2292 struct i915_gem_object_page_iter { 2293 struct scatterlist *sg_pos; 2294 unsigned int sg_idx; /* in pages, but 32bit eek! */ 2295 2296 struct radix_tree_root radix; 2297 struct mutex lock; /* protects this cache */ 2298 } get_page; 2299 2300 /** 2301 * Advice: are the backing pages purgeable? 2302 */ 2303 unsigned int madv:2; 2304 2305 /** 2306 * This is set if the object has been written to since the 2307 * pages were last acquired. 2308 */ 2309 bool dirty:1; 2310 2311 /** 2312 * This is set if the object has been pinned due to unknown 2313 * swizzling. 2314 */ 2315 bool quirked:1; 2316 } mm; 2317 2318 /** Breadcrumb of last rendering to the buffer. 2319 * There can only be one writer, but we allow for multiple readers. 2320 * If there is a writer that necessarily implies that all other 2321 * read requests are complete - but we may only be lazily clearing 2322 * the read requests. A read request is naturally the most recent 2323 * request on a ring, so we may have two different write and read 2324 * requests on one ring where the write request is older than the 2325 * read request. This allows for the CPU to read from an active 2326 * buffer by only waiting for the write to complete. 2327 */ 2328 struct reservation_object *resv; 2329 2330 /** References from framebuffers, locks out tiling changes. */ 2331 unsigned long framebuffer_references; 2332 2333 /** Record of address bit 17 of each page at last unbind. */ 2334 unsigned long *bit_17; 2335 2336 struct i915_gem_userptr { 2337 uintptr_t ptr; 2338 unsigned read_only :1; 2339 2340 struct i915_mm_struct *mm; 2341 struct i915_mmu_object *mmu_object; 2342 struct work_struct *work; 2343 } userptr; 2344 2345 /** for phys allocated objects */ 2346 struct drm_dma_handle *phys_handle; 2347 2348 struct reservation_object __builtin_resv; 2349 }; 2350 2351 static inline struct drm_i915_gem_object * 2352 to_intel_bo(struct drm_gem_object *gem) 2353 { 2354 /* Assert that to_intel_bo(NULL) == NULL */ 2355 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); 2356 2357 return container_of(gem, struct drm_i915_gem_object, base); 2358 } 2359 2360 /** 2361 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle 2362 * @filp: DRM file private date 2363 * @handle: userspace handle 2364 * 2365 * Returns: 2366 * 2367 * A pointer to the object named by the handle if such exists on @filp, NULL 2368 * otherwise. This object is only valid whilst under the RCU read lock, and 2369 * note carefully the object may be in the process of being destroyed. 2370 */ 2371 static inline struct drm_i915_gem_object * 2372 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) 2373 { 2374 #ifdef CONFIG_LOCKDEP 2375 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); 2376 #endif 2377 return idr_find(&file->object_idr, handle); 2378 } 2379 2380 static inline struct drm_i915_gem_object * 2381 i915_gem_object_lookup(struct drm_file *file, u32 handle) 2382 { 2383 struct drm_i915_gem_object *obj; 2384 2385 rcu_read_lock(); 2386 obj = i915_gem_object_lookup_rcu(file, handle); 2387 if (obj && !kref_get_unless_zero(&obj->base.refcount)) 2388 obj = NULL; 2389 rcu_read_unlock(); 2390 2391 return obj; 2392 } 2393 2394 __deprecated 2395 extern struct drm_gem_object * 2396 drm_gem_object_lookup(struct drm_file *file, u32 handle); 2397 2398 __attribute__((nonnull)) 2399 static inline struct drm_i915_gem_object * 2400 i915_gem_object_get(struct drm_i915_gem_object *obj) 2401 { 2402 drm_gem_object_reference(&obj->base); 2403 return obj; 2404 } 2405 2406 __deprecated 2407 extern void drm_gem_object_reference(struct drm_gem_object *); 2408 2409 __attribute__((nonnull)) 2410 static inline void 2411 i915_gem_object_put(struct drm_i915_gem_object *obj) 2412 { 2413 __drm_gem_object_unreference(&obj->base); 2414 } 2415 2416 __deprecated 2417 extern void drm_gem_object_unreference(struct drm_gem_object *); 2418 2419 __deprecated 2420 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); 2421 2422 static inline bool 2423 i915_gem_object_is_dead(const struct drm_i915_gem_object *obj) 2424 { 2425 return atomic_read(&obj->base.refcount.refcount) == 0; 2426 } 2427 2428 static inline bool 2429 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 2430 { 2431 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; 2432 } 2433 2434 static inline bool 2435 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) 2436 { 2437 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; 2438 } 2439 2440 static inline bool 2441 i915_gem_object_is_active(const struct drm_i915_gem_object *obj) 2442 { 2443 return obj->active_count; 2444 } 2445 2446 static inline bool 2447 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) 2448 { 2449 return test_bit(I915_BO_ACTIVE_REF, &obj->flags); 2450 } 2451 2452 static inline void 2453 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) 2454 { 2455 lockdep_assert_held(&obj->base.dev->struct_mutex); 2456 __set_bit(I915_BO_ACTIVE_REF, &obj->flags); 2457 } 2458 2459 static inline void 2460 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) 2461 { 2462 lockdep_assert_held(&obj->base.dev->struct_mutex); 2463 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); 2464 } 2465 2466 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); 2467 2468 static inline unsigned int 2469 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) 2470 { 2471 return obj->tiling_and_stride & TILING_MASK; 2472 } 2473 2474 static inline bool 2475 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) 2476 { 2477 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 2478 } 2479 2480 static inline unsigned int 2481 i915_gem_object_get_stride(struct drm_i915_gem_object *obj) 2482 { 2483 return obj->tiling_and_stride & STRIDE_MASK; 2484 } 2485 2486 static inline struct intel_engine_cs * 2487 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) 2488 { 2489 struct intel_engine_cs *engine = NULL; 2490 struct dma_fence *fence; 2491 2492 rcu_read_lock(); 2493 fence = reservation_object_get_excl_rcu(obj->resv); 2494 rcu_read_unlock(); 2495 2496 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) 2497 engine = to_request(fence)->engine; 2498 dma_fence_put(fence); 2499 2500 return engine; 2501 } 2502 2503 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) 2504 { 2505 i915_gem_object_get(vma->obj); 2506 return vma; 2507 } 2508 2509 static inline void i915_vma_put(struct i915_vma *vma) 2510 { 2511 i915_gem_object_put(vma->obj); 2512 } 2513 2514 /* 2515 * Optimised SGL iterator for GEM objects 2516 */ 2517 static __always_inline struct sgt_iter { 2518 struct scatterlist *sgp; 2519 union { 2520 unsigned long pfn; 2521 dma_addr_t dma; 2522 }; 2523 unsigned int curr; 2524 unsigned int max; 2525 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2526 struct sgt_iter s = { .sgp = sgl }; 2527 2528 if (s.sgp) { 2529 s.max = s.curr = s.sgp->offset; 2530 s.max += s.sgp->length; 2531 if (dma) 2532 s.dma = sg_dma_address(s.sgp); 2533 else 2534 s.pfn = page_to_pfn(sg_page(s.sgp)); 2535 } 2536 2537 return s; 2538 } 2539 2540 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 2541 { 2542 ++sg; 2543 if (unlikely(sg_is_chain(sg))) 2544 sg = sg_chain_ptr(sg); 2545 return sg; 2546 } 2547 2548 /** 2549 * __sg_next - return the next scatterlist entry in a list 2550 * @sg: The current sg entry 2551 * 2552 * Description: 2553 * If the entry is the last, return NULL; otherwise, step to the next 2554 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2555 * otherwise just return the pointer to the current element. 2556 **/ 2557 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2558 { 2559 #ifdef CONFIG_DEBUG_SG 2560 BUG_ON(sg->sg_magic != SG_MAGIC); 2561 #endif 2562 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2563 } 2564 2565 /** 2566 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2567 * @__dmap: DMA address (output) 2568 * @__iter: 'struct sgt_iter' (iterator state, internal) 2569 * @__sgt: sg_table to iterate over (input) 2570 */ 2571 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2572 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2573 ((__dmap) = (__iter).dma + (__iter).curr); \ 2574 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2575 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) 2576 2577 /** 2578 * for_each_sgt_page - iterate over the pages of the given sg_table 2579 * @__pp: page pointer (output) 2580 * @__iter: 'struct sgt_iter' (iterator state, internal) 2581 * @__sgt: sg_table to iterate over (input) 2582 */ 2583 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2584 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2585 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2586 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2587 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2588 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) 2589 2590 /* 2591 * A command that requires special handling by the command parser. 2592 */ 2593 struct drm_i915_cmd_descriptor { 2594 /* 2595 * Flags describing how the command parser processes the command. 2596 * 2597 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2598 * a length mask if not set 2599 * CMD_DESC_SKIP: The command is allowed but does not follow the 2600 * standard length encoding for the opcode range in 2601 * which it falls 2602 * CMD_DESC_REJECT: The command is never allowed 2603 * CMD_DESC_REGISTER: The command should be checked against the 2604 * register whitelist for the appropriate ring 2605 * CMD_DESC_MASTER: The command is allowed if the submitting process 2606 * is the DRM master 2607 */ 2608 u32 flags; 2609 #define CMD_DESC_FIXED (1<<0) 2610 #define CMD_DESC_SKIP (1<<1) 2611 #define CMD_DESC_REJECT (1<<2) 2612 #define CMD_DESC_REGISTER (1<<3) 2613 #define CMD_DESC_BITMASK (1<<4) 2614 #define CMD_DESC_MASTER (1<<5) 2615 2616 /* 2617 * The command's unique identification bits and the bitmask to get them. 2618 * This isn't strictly the opcode field as defined in the spec and may 2619 * also include type, subtype, and/or subop fields. 2620 */ 2621 struct { 2622 u32 value; 2623 u32 mask; 2624 } cmd; 2625 2626 /* 2627 * The command's length. The command is either fixed length (i.e. does 2628 * not include a length field) or has a length field mask. The flag 2629 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2630 * a length mask. All command entries in a command table must include 2631 * length information. 2632 */ 2633 union { 2634 u32 fixed; 2635 u32 mask; 2636 } length; 2637 2638 /* 2639 * Describes where to find a register address in the command to check 2640 * against the ring's register whitelist. Only valid if flags has the 2641 * CMD_DESC_REGISTER bit set. 2642 * 2643 * A non-zero step value implies that the command may access multiple 2644 * registers in sequence (e.g. LRI), in that case step gives the 2645 * distance in dwords between individual offset fields. 2646 */ 2647 struct { 2648 u32 offset; 2649 u32 mask; 2650 u32 step; 2651 } reg; 2652 2653 #define MAX_CMD_DESC_BITMASKS 3 2654 /* 2655 * Describes command checks where a particular dword is masked and 2656 * compared against an expected value. If the command does not match 2657 * the expected value, the parser rejects it. Only valid if flags has 2658 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2659 * are valid. 2660 * 2661 * If the check specifies a non-zero condition_mask then the parser 2662 * only performs the check when the bits specified by condition_mask 2663 * are non-zero. 2664 */ 2665 struct { 2666 u32 offset; 2667 u32 mask; 2668 u32 expected; 2669 u32 condition_offset; 2670 u32 condition_mask; 2671 } bits[MAX_CMD_DESC_BITMASKS]; 2672 }; 2673 2674 /* 2675 * A table of commands requiring special handling by the command parser. 2676 * 2677 * Each engine has an array of tables. Each table consists of an array of 2678 * command descriptors, which must be sorted with command opcodes in 2679 * ascending order. 2680 */ 2681 struct drm_i915_cmd_table { 2682 const struct drm_i915_cmd_descriptor *table; 2683 int count; 2684 }; 2685 2686 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2687 #define __I915__(p) ({ \ 2688 struct drm_i915_private *__p; \ 2689 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2690 __p = (struct drm_i915_private *)p; \ 2691 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2692 __p = to_i915((struct drm_device *)p); \ 2693 else \ 2694 BUILD_BUG(); \ 2695 __p; \ 2696 }) 2697 #define INTEL_INFO(p) (&__I915__(p)->info) 2698 2699 #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2700 #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2701 2702 #define REVID_FOREVER 0xff 2703 #define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision) 2704 2705 #define GEN_FOREVER (0) 2706 /* 2707 * Returns true if Gen is in inclusive range [Start, End]. 2708 * 2709 * Use GEN_FOREVER for unbound start and or end. 2710 */ 2711 #define IS_GEN(dev_priv, s, e) ({ \ 2712 unsigned int __s = (s), __e = (e); \ 2713 BUILD_BUG_ON(!__builtin_constant_p(s)); \ 2714 BUILD_BUG_ON(!__builtin_constant_p(e)); \ 2715 if ((__s) != GEN_FOREVER) \ 2716 __s = (s) - 1; \ 2717 if ((__e) == GEN_FOREVER) \ 2718 __e = BITS_PER_LONG - 1; \ 2719 else \ 2720 __e = (e) - 1; \ 2721 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ 2722 }) 2723 2724 /* 2725 * Return true if revision is in range [since,until] inclusive. 2726 * 2727 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2728 */ 2729 #define IS_REVID(p, since, until) \ 2730 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2731 2732 #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) 2733 #define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) 2734 #define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x) 2735 #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) 2736 #define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g) 2737 #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) 2738 #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) 2739 #define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm) 2740 #define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater) 2741 #define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline) 2742 #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) 2743 #define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x) 2744 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2745 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2746 #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) 2747 #define IS_G33(dev_priv) ((dev_priv)->info.is_g33) 2748 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2749 #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge) 2750 #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ 2751 INTEL_DEVID(dev_priv) == 0x0152 || \ 2752 INTEL_DEVID(dev_priv) == 0x015a) 2753 #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview) 2754 #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview) 2755 #define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell) 2756 #define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) 2757 #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) 2758 #define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) 2759 #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) 2760 #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2761 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2762 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2763 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2764 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2765 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2766 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2767 /* ULX machines are also considered ULT. */ 2768 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2769 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2770 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2771 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2772 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2773 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2774 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2775 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2776 /* ULX machines are also considered ULT. */ 2777 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2778 INTEL_DEVID(dev_priv) == 0x0A1E) 2779 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2780 INTEL_DEVID(dev_priv) == 0x1913 || \ 2781 INTEL_DEVID(dev_priv) == 0x1916 || \ 2782 INTEL_DEVID(dev_priv) == 0x1921 || \ 2783 INTEL_DEVID(dev_priv) == 0x1926) 2784 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2785 INTEL_DEVID(dev_priv) == 0x1915 || \ 2786 INTEL_DEVID(dev_priv) == 0x191E) 2787 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2788 INTEL_DEVID(dev_priv) == 0x5913 || \ 2789 INTEL_DEVID(dev_priv) == 0x5916 || \ 2790 INTEL_DEVID(dev_priv) == 0x5921 || \ 2791 INTEL_DEVID(dev_priv) == 0x5926) 2792 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2793 INTEL_DEVID(dev_priv) == 0x5915 || \ 2794 INTEL_DEVID(dev_priv) == 0x591E) 2795 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2796 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2797 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2798 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) 2799 2800 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2801 2802 #define SKL_REVID_A0 0x0 2803 #define SKL_REVID_B0 0x1 2804 #define SKL_REVID_C0 0x2 2805 #define SKL_REVID_D0 0x3 2806 #define SKL_REVID_E0 0x4 2807 #define SKL_REVID_F0 0x5 2808 #define SKL_REVID_G0 0x6 2809 #define SKL_REVID_H0 0x7 2810 2811 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2812 2813 #define BXT_REVID_A0 0x0 2814 #define BXT_REVID_A1 0x1 2815 #define BXT_REVID_B0 0x3 2816 #define BXT_REVID_C0 0x9 2817 2818 #define IS_BXT_REVID(dev_priv, since, until) \ 2819 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2820 2821 #define KBL_REVID_A0 0x0 2822 #define KBL_REVID_B0 0x1 2823 #define KBL_REVID_C0 0x2 2824 #define KBL_REVID_D0 0x3 2825 #define KBL_REVID_E0 0x4 2826 2827 #define IS_KBL_REVID(dev_priv, since, until) \ 2828 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2829 2830 /* 2831 * The genX designation typically refers to the render engine, so render 2832 * capability related checks should use IS_GEN, while display and other checks 2833 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2834 * chips, etc.). 2835 */ 2836 #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) 2837 #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) 2838 #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) 2839 #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) 2840 #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) 2841 #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2842 #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2843 #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2844 2845 #define ENGINE_MASK(id) BIT(id) 2846 #define RENDER_RING ENGINE_MASK(RCS) 2847 #define BSD_RING ENGINE_MASK(VCS) 2848 #define BLT_RING ENGINE_MASK(BCS) 2849 #define VEBOX_RING ENGINE_MASK(VECS) 2850 #define BSD2_RING ENGINE_MASK(VCS2) 2851 #define ALL_ENGINES (~0) 2852 2853 #define HAS_ENGINE(dev_priv, id) \ 2854 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) 2855 2856 #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2857 #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2858 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2859 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2860 2861 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2862 #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2863 #define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) 2864 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2865 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2866 #define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical) 2867 2868 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts) 2869 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts) 2870 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2871 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) 2872 #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) 2873 2874 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2875 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2876 2877 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2878 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) 2879 2880 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2881 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2882 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ 2883 IS_SKL_GT3(dev_priv) || \ 2884 IS_SKL_GT4(dev_priv)) 2885 2886 /* 2887 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2888 * even when in MSI mode. This results in spurious interrupt warnings if the 2889 * legacy irq no. is shared with another device. The kernel then disables that 2890 * interrupt source and so prevents the other device from working properly. 2891 */ 2892 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2893 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq) 2894 2895 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2896 * rows, which changed the alignment requirements and fence programming. 2897 */ 2898 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2899 !(IS_I915G(dev_priv) || \ 2900 IS_I915GM(dev_priv))) 2901 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2902 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2903 2904 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2905 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2906 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2907 2908 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2909 2910 #define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst) 2911 2912 #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) 2913 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2914 #define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr) 2915 #define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 2916 #define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p) 2917 2918 #define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr) 2919 2920 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2921 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 2922 2923 /* 2924 * For now, anything with a GuC requires uCode loading, and then supports 2925 * command submission once loaded. But these are logically independent 2926 * properties, so we have separate macros to test them. 2927 */ 2928 #define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc) 2929 #define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) 2930 #define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) 2931 2932 #define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer) 2933 2934 #define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) 2935 2936 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2937 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2938 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2939 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2940 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2941 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2942 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2943 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2944 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2945 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2946 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2947 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2948 2949 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2950 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2951 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2952 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2953 #define HAS_PCH_LPT_LP(dev_priv) \ 2954 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2955 #define HAS_PCH_LPT_H(dev_priv) \ 2956 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2957 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2958 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2959 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2960 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2961 2962 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 2963 2964 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv)) 2965 2966 /* DPF == dynamic parity feature */ 2967 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2968 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2969 2 : HAS_L3_DPF(dev_priv)) 2970 2971 #define GT_FREQUENCY_MULTIPLIER 50 2972 #define GEN9_FREQ_SCALER 3 2973 2974 #include "i915_trace.h" 2975 2976 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2977 { 2978 #ifdef CONFIG_INTEL_IOMMU 2979 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) 2980 return true; 2981 #endif 2982 return false; 2983 } 2984 2985 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2986 extern int i915_resume_switcheroo(struct drm_device *dev); 2987 2988 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2989 int enable_ppgtt); 2990 2991 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); 2992 2993 /* i915_drv.c */ 2994 void __printf(3, 4) 2995 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2996 const char *fmt, ...); 2997 2998 #define i915_report_error(dev_priv, fmt, ...) \ 2999 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 3000 3001 #ifdef CONFIG_COMPAT 3002 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 3003 unsigned long arg); 3004 #else 3005 #define i915_compat_ioctl NULL 3006 #endif 3007 extern const struct dev_pm_ops i915_pm_ops; 3008 3009 extern int i915_driver_load(struct pci_dev *pdev, 3010 const struct pci_device_id *ent); 3011 extern void i915_driver_unload(struct drm_device *dev); 3012 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 3013 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 3014 extern void i915_reset(struct drm_i915_private *dev_priv); 3015 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 3016 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 3017 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); 3018 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 3019 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 3020 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 3021 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 3022 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 3023 3024 /* intel_hotplug.c */ 3025 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 3026 u32 pin_mask, u32 long_mask); 3027 void intel_hpd_init(struct drm_i915_private *dev_priv); 3028 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 3029 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 3030 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 3031 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 3032 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 3033 3034 /* i915_irq.c */ 3035 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 3036 { 3037 unsigned long delay; 3038 3039 if (unlikely(!i915.enable_hangcheck)) 3040 return; 3041 3042 /* Don't continually defer the hangcheck so that it is always run at 3043 * least once after work has been scheduled on any ring. Otherwise, 3044 * we will ignore a hung ring if a second ring is kept busy. 3045 */ 3046 3047 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 3048 queue_delayed_work(system_long_wq, 3049 &dev_priv->gpu_error.hangcheck_work, delay); 3050 } 3051 3052 __printf(3, 4) 3053 void i915_handle_error(struct drm_i915_private *dev_priv, 3054 u32 engine_mask, 3055 const char *fmt, ...); 3056 3057 extern void intel_irq_init(struct drm_i915_private *dev_priv); 3058 int intel_irq_install(struct drm_i915_private *dev_priv); 3059 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 3060 3061 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 3062 extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 3063 bool restore_forcewake); 3064 extern void intel_uncore_init(struct drm_i915_private *dev_priv); 3065 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 3066 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 3067 extern void intel_uncore_fini(struct drm_i915_private *dev_priv); 3068 extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 3069 bool restore); 3070 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 3071 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 3072 enum forcewake_domains domains); 3073 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 3074 enum forcewake_domains domains); 3075 /* Like above but the caller must manage the uncore.lock itself. 3076 * Must be used with I915_READ_FW and friends. 3077 */ 3078 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 3079 enum forcewake_domains domains); 3080 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 3081 enum forcewake_domains domains); 3082 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 3083 3084 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 3085 3086 int intel_wait_for_register(struct drm_i915_private *dev_priv, 3087 i915_reg_t reg, 3088 const u32 mask, 3089 const u32 value, 3090 const unsigned long timeout_ms); 3091 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 3092 i915_reg_t reg, 3093 const u32 mask, 3094 const u32 value, 3095 const unsigned long timeout_ms); 3096 3097 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 3098 { 3099 return dev_priv->gvt; 3100 } 3101 3102 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 3103 { 3104 return dev_priv->vgpu.active; 3105 } 3106 3107 void 3108 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3109 u32 status_mask); 3110 3111 void 3112 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3113 u32 status_mask); 3114 3115 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 3116 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 3117 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 3118 uint32_t mask, 3119 uint32_t bits); 3120 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 3121 uint32_t interrupt_mask, 3122 uint32_t enabled_irq_mask); 3123 static inline void 3124 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3125 { 3126 ilk_update_display_irq(dev_priv, bits, bits); 3127 } 3128 static inline void 3129 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3130 { 3131 ilk_update_display_irq(dev_priv, bits, 0); 3132 } 3133 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 3134 enum pipe pipe, 3135 uint32_t interrupt_mask, 3136 uint32_t enabled_irq_mask); 3137 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 3138 enum pipe pipe, uint32_t bits) 3139 { 3140 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 3141 } 3142 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 3143 enum pipe pipe, uint32_t bits) 3144 { 3145 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 3146 } 3147 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 3148 uint32_t interrupt_mask, 3149 uint32_t enabled_irq_mask); 3150 static inline void 3151 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3152 { 3153 ibx_display_interrupt_update(dev_priv, bits, bits); 3154 } 3155 static inline void 3156 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3157 { 3158 ibx_display_interrupt_update(dev_priv, bits, 0); 3159 } 3160 3161 /* i915_gem.c */ 3162 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 3163 struct drm_file *file_priv); 3164 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 3165 struct drm_file *file_priv); 3166 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 3167 struct drm_file *file_priv); 3168 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 3169 struct drm_file *file_priv); 3170 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 3171 struct drm_file *file_priv); 3172 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 3173 struct drm_file *file_priv); 3174 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 3175 struct drm_file *file_priv); 3176 int i915_gem_execbuffer(struct drm_device *dev, void *data, 3177 struct drm_file *file_priv); 3178 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 3179 struct drm_file *file_priv); 3180 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3181 struct drm_file *file_priv); 3182 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3183 struct drm_file *file); 3184 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3185 struct drm_file *file); 3186 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3187 struct drm_file *file_priv); 3188 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3189 struct drm_file *file_priv); 3190 int i915_gem_set_tiling(struct drm_device *dev, void *data, 3191 struct drm_file *file_priv); 3192 int i915_gem_get_tiling(struct drm_device *dev, void *data, 3193 struct drm_file *file_priv); 3194 void i915_gem_init_userptr(struct drm_i915_private *dev_priv); 3195 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3196 struct drm_file *file); 3197 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3198 struct drm_file *file_priv); 3199 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 3200 struct drm_file *file_priv); 3201 int i915_gem_load_init(struct drm_device *dev); 3202 void i915_gem_load_cleanup(struct drm_device *dev); 3203 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3204 int i915_gem_freeze(struct drm_i915_private *dev_priv); 3205 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 3206 3207 void *i915_gem_object_alloc(struct drm_device *dev); 3208 void i915_gem_object_free(struct drm_i915_gem_object *obj); 3209 void i915_gem_object_init(struct drm_i915_gem_object *obj, 3210 const struct drm_i915_gem_object_ops *ops); 3211 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, 3212 u64 size); 3213 struct drm_i915_gem_object *i915_gem_object_create_from_data( 3214 struct drm_device *dev, const void *data, size_t size); 3215 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 3216 void i915_gem_free_object(struct drm_gem_object *obj); 3217 3218 struct i915_vma * __must_check 3219 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 3220 const struct i915_ggtt_view *view, 3221 u64 size, 3222 u64 alignment, 3223 u64 flags); 3224 3225 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 3226 u32 flags); 3227 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 3228 int __must_check i915_vma_unbind(struct i915_vma *vma); 3229 void i915_vma_close(struct i915_vma *vma); 3230 void i915_vma_destroy(struct i915_vma *vma); 3231 3232 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 3233 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 3234 3235 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 3236 3237 static inline int __sg_page_count(const struct scatterlist *sg) 3238 { 3239 return sg->length >> PAGE_SHIFT; 3240 } 3241 3242 struct scatterlist * 3243 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 3244 unsigned int n, unsigned int *offset); 3245 3246 struct page * 3247 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 3248 unsigned int n); 3249 3250 struct page * 3251 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 3252 unsigned int n); 3253 3254 dma_addr_t 3255 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 3256 unsigned long n); 3257 3258 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 3259 struct sg_table *pages); 3260 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 3261 3262 static inline int __must_check 3263 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3264 { 3265 might_lock(&obj->mm.lock); 3266 3267 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 3268 return 0; 3269 3270 return __i915_gem_object_get_pages(obj); 3271 } 3272 3273 static inline void 3274 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3275 { 3276 GEM_BUG_ON(!obj->mm.pages); 3277 3278 atomic_inc(&obj->mm.pages_pin_count); 3279 } 3280 3281 static inline bool 3282 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 3283 { 3284 return atomic_read(&obj->mm.pages_pin_count); 3285 } 3286 3287 static inline void 3288 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3289 { 3290 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 3291 GEM_BUG_ON(!obj->mm.pages); 3292 3293 atomic_dec(&obj->mm.pages_pin_count); 3294 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 3295 } 3296 3297 static inline void 3298 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3299 { 3300 __i915_gem_object_unpin_pages(obj); 3301 } 3302 3303 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ 3304 I915_MM_NORMAL = 0, 3305 I915_MM_SHRINKER 3306 }; 3307 3308 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 3309 enum i915_mm_subclass subclass); 3310 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); 3311 3312 enum i915_map_type { 3313 I915_MAP_WB = 0, 3314 I915_MAP_WC, 3315 }; 3316 3317 /** 3318 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3319 * @obj - the object to map into kernel address space 3320 * @type - the type of mapping, used to select pgprot_t 3321 * 3322 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3323 * pages and then returns a contiguous mapping of the backing storage into 3324 * the kernel address space. Based on the @type of mapping, the PTE will be 3325 * set to either WriteBack or WriteCombine (via pgprot_t). 3326 * 3327 * The caller is responsible for calling i915_gem_object_unpin_map() when the 3328 * mapping is no longer required. 3329 * 3330 * Returns the pointer through which to access the mapped object, or an 3331 * ERR_PTR() on error. 3332 */ 3333 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 3334 enum i915_map_type type); 3335 3336 /** 3337 * i915_gem_object_unpin_map - releases an earlier mapping 3338 * @obj - the object to unmap 3339 * 3340 * After pinning the object and mapping its pages, once you are finished 3341 * with your access, call i915_gem_object_unpin_map() to release the pin 3342 * upon the mapping. Once the pin count reaches zero, that mapping may be 3343 * removed. 3344 */ 3345 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3346 { 3347 i915_gem_object_unpin_pages(obj); 3348 } 3349 3350 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3351 unsigned int *needs_clflush); 3352 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3353 unsigned int *needs_clflush); 3354 #define CLFLUSH_BEFORE 0x1 3355 #define CLFLUSH_AFTER 0x2 3356 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3357 3358 static inline void 3359 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3360 { 3361 i915_gem_object_unpin_pages(obj); 3362 } 3363 3364 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3365 void i915_vma_move_to_active(struct i915_vma *vma, 3366 struct drm_i915_gem_request *req, 3367 unsigned int flags); 3368 int i915_gem_dumb_create(struct drm_file *file_priv, 3369 struct drm_device *dev, 3370 struct drm_mode_create_dumb *args); 3371 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3372 uint32_t handle, uint64_t *offset); 3373 int i915_gem_mmap_gtt_version(void); 3374 3375 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3376 struct drm_i915_gem_object *new, 3377 unsigned frontbuffer_bits); 3378 3379 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3380 3381 struct drm_i915_gem_request * 3382 i915_gem_find_active_request(struct intel_engine_cs *engine); 3383 3384 void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3385 3386 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3387 { 3388 return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags)); 3389 } 3390 3391 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3392 { 3393 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3394 } 3395 3396 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) 3397 { 3398 return i915_reset_in_progress(error) | i915_terminally_wedged(error); 3399 } 3400 3401 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3402 { 3403 return READ_ONCE(error->reset_count); 3404 } 3405 3406 void i915_gem_reset(struct drm_i915_private *dev_priv); 3407 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3408 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3409 int __must_check i915_gem_init(struct drm_device *dev); 3410 int __must_check i915_gem_init_hw(struct drm_device *dev); 3411 void i915_gem_init_swizzling(struct drm_device *dev); 3412 void i915_gem_cleanup_engines(struct drm_device *dev); 3413 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3414 unsigned int flags); 3415 int __must_check i915_gem_suspend(struct drm_device *dev); 3416 void i915_gem_resume(struct drm_device *dev); 3417 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 3418 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 3419 unsigned int flags, 3420 long timeout, 3421 struct intel_rps_client *rps); 3422 int __must_check 3423 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3424 bool write); 3425 int __must_check 3426 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3427 struct i915_vma * __must_check 3428 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3429 u32 alignment, 3430 const struct i915_ggtt_view *view); 3431 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3432 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3433 int align); 3434 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3435 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3436 3437 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, 3438 int tiling_mode); 3439 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, 3440 int tiling_mode, bool fenced); 3441 3442 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3443 enum i915_cache_level cache_level); 3444 3445 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3446 struct dma_buf *dma_buf); 3447 3448 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3449 struct drm_gem_object *gem_obj, int flags); 3450 3451 struct i915_vma * 3452 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3453 struct i915_address_space *vm, 3454 const struct i915_ggtt_view *view); 3455 3456 struct i915_vma * 3457 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3458 struct i915_address_space *vm, 3459 const struct i915_ggtt_view *view); 3460 3461 static inline struct i915_hw_ppgtt * 3462 i915_vm_to_ppgtt(struct i915_address_space *vm) 3463 { 3464 return container_of(vm, struct i915_hw_ppgtt, base); 3465 } 3466 3467 static inline struct i915_vma * 3468 i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, 3469 const struct i915_ggtt_view *view) 3470 { 3471 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3472 } 3473 3474 static inline unsigned long 3475 i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, 3476 const struct i915_ggtt_view *view) 3477 { 3478 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); 3479 } 3480 3481 /* i915_gem_fence.c */ 3482 int __must_check i915_vma_get_fence(struct i915_vma *vma); 3483 int __must_check i915_vma_put_fence(struct i915_vma *vma); 3484 3485 /** 3486 * i915_vma_pin_fence - pin fencing state 3487 * @vma: vma to pin fencing for 3488 * 3489 * This pins the fencing state (whether tiled or untiled) to make sure the 3490 * vma (and its object) is ready to be used as a scanout target. Fencing 3491 * status must be synchronize first by calling i915_vma_get_fence(): 3492 * 3493 * The resulting fence pin reference must be released again with 3494 * i915_vma_unpin_fence(). 3495 * 3496 * Returns: 3497 * 3498 * True if the vma has a fence, false otherwise. 3499 */ 3500 static inline bool 3501 i915_vma_pin_fence(struct i915_vma *vma) 3502 { 3503 lockdep_assert_held(&vma->vm->dev->struct_mutex); 3504 if (vma->fence) { 3505 vma->fence->pin_count++; 3506 return true; 3507 } else 3508 return false; 3509 } 3510 3511 /** 3512 * i915_vma_unpin_fence - unpin fencing state 3513 * @vma: vma to unpin fencing for 3514 * 3515 * This releases the fence pin reference acquired through 3516 * i915_vma_pin_fence. It will handle both objects with and without an 3517 * attached fence correctly, callers do not need to distinguish this. 3518 */ 3519 static inline void 3520 i915_vma_unpin_fence(struct i915_vma *vma) 3521 { 3522 lockdep_assert_held(&vma->vm->dev->struct_mutex); 3523 if (vma->fence) { 3524 GEM_BUG_ON(vma->fence->pin_count <= 0); 3525 vma->fence->pin_count--; 3526 } 3527 } 3528 3529 void i915_gem_restore_fences(struct drm_device *dev); 3530 3531 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3532 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, 3533 struct sg_table *pages); 3534 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, 3535 struct sg_table *pages); 3536 3537 /* i915_gem_context.c */ 3538 int __must_check i915_gem_context_init(struct drm_device *dev); 3539 void i915_gem_context_lost(struct drm_i915_private *dev_priv); 3540 void i915_gem_context_fini(struct drm_device *dev); 3541 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3542 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3543 int i915_switch_context(struct drm_i915_gem_request *req); 3544 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); 3545 struct i915_vma * 3546 i915_gem_context_pin_legacy(struct i915_gem_context *ctx, 3547 unsigned int flags); 3548 void i915_gem_context_free(struct kref *ctx_ref); 3549 struct drm_i915_gem_object * 3550 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3551 struct i915_gem_context * 3552 i915_gem_context_create_gvt(struct drm_device *dev); 3553 3554 static inline struct i915_gem_context * 3555 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3556 { 3557 struct i915_gem_context *ctx; 3558 3559 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); 3560 3561 ctx = idr_find(&file_priv->context_idr, id); 3562 if (!ctx) 3563 return ERR_PTR(-ENOENT); 3564 3565 return ctx; 3566 } 3567 3568 static inline struct i915_gem_context * 3569 i915_gem_context_get(struct i915_gem_context *ctx) 3570 { 3571 kref_get(&ctx->ref); 3572 return ctx; 3573 } 3574 3575 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 3576 { 3577 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 3578 kref_put(&ctx->ref, i915_gem_context_free); 3579 } 3580 3581 static inline struct intel_timeline * 3582 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, 3583 struct intel_engine_cs *engine) 3584 { 3585 struct i915_address_space *vm; 3586 3587 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; 3588 return &vm->timeline.engine[engine->id]; 3589 } 3590 3591 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) 3592 { 3593 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3594 } 3595 3596 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3597 struct drm_file *file); 3598 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3599 struct drm_file *file); 3600 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3601 struct drm_file *file_priv); 3602 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3603 struct drm_file *file_priv); 3604 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, 3605 struct drm_file *file); 3606 3607 /* i915_gem_evict.c */ 3608 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3609 u64 min_size, u64 alignment, 3610 unsigned cache_level, 3611 u64 start, u64 end, 3612 unsigned flags); 3613 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3614 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3615 3616 /* belongs in i915_gem_gtt.h */ 3617 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3618 { 3619 wmb(); 3620 if (INTEL_GEN(dev_priv) < 6) 3621 intel_gtt_chipset_flush(); 3622 } 3623 3624 /* i915_gem_stolen.c */ 3625 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3626 struct drm_mm_node *node, u64 size, 3627 unsigned alignment); 3628 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3629 struct drm_mm_node *node, u64 size, 3630 unsigned alignment, u64 start, 3631 u64 end); 3632 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3633 struct drm_mm_node *node); 3634 int i915_gem_init_stolen(struct drm_device *dev); 3635 void i915_gem_cleanup_stolen(struct drm_device *dev); 3636 struct drm_i915_gem_object * 3637 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3638 struct drm_i915_gem_object * 3639 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3640 u32 stolen_offset, 3641 u32 gtt_offset, 3642 u32 size); 3643 3644 /* i915_gem_internal.c */ 3645 struct drm_i915_gem_object * 3646 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 3647 unsigned int size); 3648 3649 /* i915_gem_shrinker.c */ 3650 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3651 unsigned long target, 3652 unsigned flags); 3653 #define I915_SHRINK_PURGEABLE 0x1 3654 #define I915_SHRINK_UNBOUND 0x2 3655 #define I915_SHRINK_BOUND 0x4 3656 #define I915_SHRINK_ACTIVE 0x8 3657 #define I915_SHRINK_VMAPS 0x10 3658 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3659 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3660 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3661 3662 3663 /* i915_gem_tiling.c */ 3664 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3665 { 3666 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3667 3668 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3669 i915_gem_object_is_tiled(obj); 3670 } 3671 3672 /* i915_debugfs.c */ 3673 #ifdef CONFIG_DEBUG_FS 3674 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3675 void i915_debugfs_unregister(struct drm_i915_private *dev_priv); 3676 int i915_debugfs_connector_add(struct drm_connector *connector); 3677 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3678 #else 3679 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3680 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {} 3681 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3682 { return 0; } 3683 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3684 #endif 3685 3686 /* i915_gpu_error.c */ 3687 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 3688 3689 __printf(2, 3) 3690 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3691 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3692 const struct i915_error_state_file_priv *error); 3693 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3694 struct drm_i915_private *i915, 3695 size_t count, loff_t pos); 3696 static inline void i915_error_state_buf_release( 3697 struct drm_i915_error_state_buf *eb) 3698 { 3699 kfree(eb->buf); 3700 } 3701 void i915_capture_error_state(struct drm_i915_private *dev_priv, 3702 u32 engine_mask, 3703 const char *error_msg); 3704 void i915_error_state_get(struct drm_device *dev, 3705 struct i915_error_state_file_priv *error_priv); 3706 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3707 void i915_destroy_error_state(struct drm_device *dev); 3708 3709 #else 3710 3711 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 3712 u32 engine_mask, 3713 const char *error_msg) 3714 { 3715 } 3716 3717 static inline void i915_destroy_error_state(struct drm_device *dev) 3718 { 3719 } 3720 3721 #endif 3722 3723 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3724 3725 /* i915_cmd_parser.c */ 3726 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3727 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3728 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3729 bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); 3730 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3731 struct drm_i915_gem_object *batch_obj, 3732 struct drm_i915_gem_object *shadow_batch_obj, 3733 u32 batch_start_offset, 3734 u32 batch_len, 3735 bool is_master); 3736 3737 /* i915_suspend.c */ 3738 extern int i915_save_state(struct drm_device *dev); 3739 extern int i915_restore_state(struct drm_device *dev); 3740 3741 /* i915_sysfs.c */ 3742 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3743 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3744 3745 /* intel_i2c.c */ 3746 extern int intel_setup_gmbus(struct drm_device *dev); 3747 extern void intel_teardown_gmbus(struct drm_device *dev); 3748 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3749 unsigned int pin); 3750 3751 extern struct i2c_adapter * 3752 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3753 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3754 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3755 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3756 { 3757 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3758 } 3759 extern void intel_i2c_reset(struct drm_device *dev); 3760 3761 /* intel_bios.c */ 3762 int intel_bios_init(struct drm_i915_private *dev_priv); 3763 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3764 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3765 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3766 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3767 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3768 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3769 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3770 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3771 enum port port); 3772 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3773 enum port port); 3774 3775 3776 /* intel_opregion.c */ 3777 #ifdef CONFIG_ACPI 3778 extern int intel_opregion_setup(struct drm_i915_private *dev_priv); 3779 extern void intel_opregion_register(struct drm_i915_private *dev_priv); 3780 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); 3781 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 3782 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3783 bool enable); 3784 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, 3785 pci_power_t state); 3786 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); 3787 #else 3788 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } 3789 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } 3790 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } 3791 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3792 { 3793 } 3794 static inline int 3795 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3796 { 3797 return 0; 3798 } 3799 static inline int 3800 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) 3801 { 3802 return 0; 3803 } 3804 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) 3805 { 3806 return -ENODEV; 3807 } 3808 #endif 3809 3810 /* intel_acpi.c */ 3811 #ifdef CONFIG_ACPI 3812 extern void intel_register_dsm_handler(void); 3813 extern void intel_unregister_dsm_handler(void); 3814 #else 3815 static inline void intel_register_dsm_handler(void) { return; } 3816 static inline void intel_unregister_dsm_handler(void) { return; } 3817 #endif /* CONFIG_ACPI */ 3818 3819 /* intel_device_info.c */ 3820 static inline struct intel_device_info * 3821 mkwrite_device_info(struct drm_i915_private *dev_priv) 3822 { 3823 return (struct intel_device_info *)&dev_priv->info; 3824 } 3825 3826 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); 3827 void intel_device_info_dump(struct drm_i915_private *dev_priv); 3828 3829 /* modesetting */ 3830 extern void intel_modeset_init_hw(struct drm_device *dev); 3831 extern int intel_modeset_init(struct drm_device *dev); 3832 extern void intel_modeset_gem_init(struct drm_device *dev); 3833 extern void intel_modeset_cleanup(struct drm_device *dev); 3834 extern int intel_connector_register(struct drm_connector *); 3835 extern void intel_connector_unregister(struct drm_connector *); 3836 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3837 extern void intel_display_resume(struct drm_device *dev); 3838 extern void i915_redisable_vga(struct drm_device *dev); 3839 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3840 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3841 extern void intel_init_pch_refclk(struct drm_device *dev); 3842 extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3843 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3844 bool enable); 3845 3846 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3847 struct drm_file *file); 3848 3849 /* overlay */ 3850 extern struct intel_overlay_error_state * 3851 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3852 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3853 struct intel_overlay_error_state *error); 3854 3855 extern struct intel_display_error_state * 3856 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3857 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3858 struct drm_device *dev, 3859 struct intel_display_error_state *error); 3860 3861 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3862 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3863 3864 /* intel_sideband.c */ 3865 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3866 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3867 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3868 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3869 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3870 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3871 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3872 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3873 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3874 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3875 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3876 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3877 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3878 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3879 enum intel_sbi_destination destination); 3880 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3881 enum intel_sbi_destination destination); 3882 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3883 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3884 3885 /* intel_dpio_phy.c */ 3886 void bxt_port_to_phy_channel(enum port port, 3887 enum dpio_phy *phy, enum dpio_channel *ch); 3888 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, 3889 enum port port, u32 margin, u32 scale, 3890 u32 enable, u32 deemphasis); 3891 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3892 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3893 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, 3894 enum dpio_phy phy); 3895 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, 3896 enum dpio_phy phy); 3897 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, 3898 uint8_t lane_count); 3899 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, 3900 uint8_t lane_lat_optim_mask); 3901 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); 3902 3903 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3904 u32 deemph_reg_value, u32 margin_reg_value, 3905 bool uniq_trans_scale); 3906 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3907 bool reset); 3908 void chv_phy_pre_pll_enable(struct intel_encoder *encoder); 3909 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3910 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3911 void chv_phy_post_pll_disable(struct intel_encoder *encoder); 3912 3913 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3914 u32 demph_reg_value, u32 preemph_reg_value, 3915 u32 uniqtranscale_reg_value, u32 tx3_demph); 3916 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); 3917 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3918 void vlv_phy_reset_lanes(struct intel_encoder *encoder); 3919 3920 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3921 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3922 3923 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3924 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3925 3926 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3927 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3928 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3929 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3930 3931 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3932 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3933 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3934 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3935 3936 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3937 * will be implemented using 2 32-bit writes in an arbitrary order with 3938 * an arbitrary delay between them. This can cause the hardware to 3939 * act upon the intermediate value, possibly leading to corruption and 3940 * machine death. For this reason we do not support I915_WRITE64, or 3941 * dev_priv->uncore.funcs.mmio_writeq. 3942 * 3943 * When reading a 64-bit value as two 32-bit values, the delay may cause 3944 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3945 * occasionally a 64-bit register does not actualy support a full readq 3946 * and must be read using two 32-bit reads. 3947 * 3948 * You have been warned. 3949 */ 3950 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3951 3952 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3953 u32 upper, lower, old_upper, loop = 0; \ 3954 upper = I915_READ(upper_reg); \ 3955 do { \ 3956 old_upper = upper; \ 3957 lower = I915_READ(lower_reg); \ 3958 upper = I915_READ(upper_reg); \ 3959 } while (upper != old_upper && loop++ < 2); \ 3960 (u64)upper << 32 | lower; }) 3961 3962 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3963 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3964 3965 #define __raw_read(x, s) \ 3966 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3967 i915_reg_t reg) \ 3968 { \ 3969 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3970 } 3971 3972 #define __raw_write(x, s) \ 3973 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3974 i915_reg_t reg, uint##x##_t val) \ 3975 { \ 3976 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3977 } 3978 __raw_read(8, b) 3979 __raw_read(16, w) 3980 __raw_read(32, l) 3981 __raw_read(64, q) 3982 3983 __raw_write(8, b) 3984 __raw_write(16, w) 3985 __raw_write(32, l) 3986 __raw_write(64, q) 3987 3988 #undef __raw_read 3989 #undef __raw_write 3990 3991 /* These are untraced mmio-accessors that are only valid to be used inside 3992 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 3993 * controlled. 3994 * 3995 * Think twice, and think again, before using these. 3996 * 3997 * As an example, these accessors can possibly be used between: 3998 * 3999 * spin_lock_irq(&dev_priv->uncore.lock); 4000 * intel_uncore_forcewake_get__locked(); 4001 * 4002 * and 4003 * 4004 * intel_uncore_forcewake_put__locked(); 4005 * spin_unlock_irq(&dev_priv->uncore.lock); 4006 * 4007 * 4008 * Note: some registers may not need forcewake held, so 4009 * intel_uncore_forcewake_{get,put} can be omitted, see 4010 * intel_uncore_forcewake_for_reg(). 4011 * 4012 * Certain architectures will die if the same cacheline is concurrently accessed 4013 * by different clients (e.g. on Ivybridge). Access to registers should 4014 * therefore generally be serialised, by either the dev_priv->uncore.lock or 4015 * a more localised lock guarding all access to that bank of registers. 4016 */ 4017 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 4018 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 4019 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) 4020 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 4021 4022 /* "Broadcast RGB" property */ 4023 #define INTEL_BROADCAST_RGB_AUTO 0 4024 #define INTEL_BROADCAST_RGB_FULL 1 4025 #define INTEL_BROADCAST_RGB_LIMITED 2 4026 4027 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 4028 { 4029 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4030 return VLV_VGACNTRL; 4031 else if (INTEL_GEN(dev_priv) >= 5) 4032 return CPU_VGACNTRL; 4033 else 4034 return VGACNTRL; 4035 } 4036 4037 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 4038 { 4039 unsigned long j = msecs_to_jiffies(m); 4040 4041 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 4042 } 4043 4044 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 4045 { 4046 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 4047 } 4048 4049 static inline unsigned long 4050 timespec_to_jiffies_timeout(const struct timespec *value) 4051 { 4052 unsigned long j = timespec_to_jiffies(value); 4053 4054 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 4055 } 4056 4057 /* 4058 * If you need to wait X milliseconds between events A and B, but event B 4059 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 4060 * when event A happened, then just before event B you call this function and 4061 * pass the timestamp as the first argument, and X as the second argument. 4062 */ 4063 static inline void 4064 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 4065 { 4066 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 4067 4068 /* 4069 * Don't re-read the value of "jiffies" every time since it may change 4070 * behind our back and break the math. 4071 */ 4072 tmp_jiffies = jiffies; 4073 target_jiffies = timestamp_jiffies + 4074 msecs_to_jiffies_timeout(to_wait_ms); 4075 4076 if (time_after(target_jiffies, tmp_jiffies)) { 4077 remaining_jiffies = target_jiffies - tmp_jiffies; 4078 while (remaining_jiffies) 4079 remaining_jiffies = 4080 schedule_timeout_uninterruptible(remaining_jiffies); 4081 } 4082 } 4083 4084 static inline bool 4085 __i915_request_irq_complete(struct drm_i915_gem_request *req) 4086 { 4087 struct intel_engine_cs *engine = req->engine; 4088 4089 /* Before we do the heavier coherent read of the seqno, 4090 * check the value (hopefully) in the CPU cacheline. 4091 */ 4092 if (__i915_gem_request_completed(req)) 4093 return true; 4094 4095 /* Ensure our read of the seqno is coherent so that we 4096 * do not "miss an interrupt" (i.e. if this is the last 4097 * request and the seqno write from the GPU is not visible 4098 * by the time the interrupt fires, we will see that the 4099 * request is incomplete and go back to sleep awaiting 4100 * another interrupt that will never come.) 4101 * 4102 * Strictly, we only need to do this once after an interrupt, 4103 * but it is easier and safer to do it every time the waiter 4104 * is woken. 4105 */ 4106 if (engine->irq_seqno_barrier && 4107 rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && 4108 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { 4109 struct task_struct *tsk; 4110 4111 /* The ordering of irq_posted versus applying the barrier 4112 * is crucial. The clearing of the current irq_posted must 4113 * be visible before we perform the barrier operation, 4114 * such that if a subsequent interrupt arrives, irq_posted 4115 * is reasserted and our task rewoken (which causes us to 4116 * do another __i915_request_irq_complete() immediately 4117 * and reapply the barrier). Conversely, if the clear 4118 * occurs after the barrier, then an interrupt that arrived 4119 * whilst we waited on the barrier would not trigger a 4120 * barrier on the next pass, and the read may not see the 4121 * seqno update. 4122 */ 4123 engine->irq_seqno_barrier(engine); 4124 4125 /* If we consume the irq, but we are no longer the bottom-half, 4126 * the real bottom-half may not have serialised their own 4127 * seqno check with the irq-barrier (i.e. may have inspected 4128 * the seqno before we believe it coherent since they see 4129 * irq_posted == false but we are still running). 4130 */ 4131 rcu_read_lock(); 4132 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); 4133 if (tsk && tsk != current) 4134 /* Note that if the bottom-half is changed as we 4135 * are sending the wake-up, the new bottom-half will 4136 * be woken by whomever made the change. We only have 4137 * to worry about when we steal the irq-posted for 4138 * ourself. 4139 */ 4140 wake_up_process(tsk); 4141 rcu_read_unlock(); 4142 4143 if (__i915_gem_request_completed(req)) 4144 return true; 4145 } 4146 4147 return false; 4148 } 4149 4150 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 4151 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 4152 4153 /* i915_mm.c */ 4154 int remap_io_mapping(struct vm_area_struct *vma, 4155 unsigned long addr, unsigned long pfn, unsigned long size, 4156 struct io_mapping *iomap); 4157 4158 #define ptr_mask_bits(ptr) ({ \ 4159 unsigned long __v = (unsigned long)(ptr); \ 4160 (typeof(ptr))(__v & PAGE_MASK); \ 4161 }) 4162 4163 #define ptr_unpack_bits(ptr, bits) ({ \ 4164 unsigned long __v = (unsigned long)(ptr); \ 4165 (bits) = __v & ~PAGE_MASK; \ 4166 (typeof(ptr))(__v & PAGE_MASK); \ 4167 }) 4168 4169 #define ptr_pack_bits(ptr, bits) \ 4170 ((typeof(ptr))((unsigned long)(ptr) | (bits))) 4171 4172 #define fetch_and_zero(ptr) ({ \ 4173 typeof(*ptr) __T = *(ptr); \ 4174 *(ptr) = (typeof(*ptr))0; \ 4175 __T; \ 4176 }) 4177 4178 #endif 4179