1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <drm/drmP.h> 37 #include "i915_params.h" 38 #include "i915_reg.h" 39 #include "intel_bios.h" 40 #include "intel_ringbuffer.h" 41 #include "intel_lrc.h" 42 #include "i915_gem_gtt.h" 43 #include "i915_gem_render_state.h" 44 #include <linux/io-mapping.h> 45 #include <linux/i2c.h> 46 #include <linux/i2c-algo-bit.h> 47 #include <drm/intel-gtt.h> 48 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 49 #include <drm/drm_gem.h> 50 #include <linux/backlight.h> 51 #include <linux/hashtable.h> 52 #include <linux/intel-iommu.h> 53 #include <linux/kref.h> 54 #include <linux/pm_qos.h> 55 #include "intel_guc.h" 56 #include "intel_dpll_mgr.h" 57 58 /* General customization: 59 */ 60 61 #define DRIVER_NAME "i915" 62 #define DRIVER_DESC "Intel Graphics" 63 #define DRIVER_DATE "20160411" 64 65 #undef WARN_ON 66 /* Many gcc seem to no see through this and fall over :( */ 67 #if 0 68 #define WARN_ON(x) ({ \ 69 bool __i915_warn_cond = (x); \ 70 if (__builtin_constant_p(__i915_warn_cond)) \ 71 BUILD_BUG_ON(__i915_warn_cond); \ 72 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 73 #else 74 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 75 #endif 76 77 #undef WARN_ON_ONCE 78 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 79 80 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 81 (long) (x), __func__); 82 83 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 84 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 85 * which may not necessarily be a user visible problem. This will either 86 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 87 * enable distros and users to tailor their preferred amount of i915 abrt 88 * spam. 89 */ 90 #define I915_STATE_WARN(condition, format...) ({ \ 91 int __ret_warn_on = !!(condition); \ 92 if (unlikely(__ret_warn_on)) \ 93 if (!WARN(i915.verbose_state_checks, format)) \ 94 DRM_ERROR(format); \ 95 unlikely(__ret_warn_on); \ 96 }) 97 98 #define I915_STATE_WARN_ON(x) \ 99 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 100 101 bool __i915_inject_load_failure(const char *func, int line); 102 #define i915_inject_load_failure() \ 103 __i915_inject_load_failure(__func__, __LINE__) 104 105 static inline const char *yesno(bool v) 106 { 107 return v ? "yes" : "no"; 108 } 109 110 static inline const char *onoff(bool v) 111 { 112 return v ? "on" : "off"; 113 } 114 115 enum pipe { 116 INVALID_PIPE = -1, 117 PIPE_A = 0, 118 PIPE_B, 119 PIPE_C, 120 _PIPE_EDP, 121 I915_MAX_PIPES = _PIPE_EDP 122 }; 123 #define pipe_name(p) ((p) + 'A') 124 125 enum transcoder { 126 TRANSCODER_A = 0, 127 TRANSCODER_B, 128 TRANSCODER_C, 129 TRANSCODER_EDP, 130 TRANSCODER_DSI_A, 131 TRANSCODER_DSI_C, 132 I915_MAX_TRANSCODERS 133 }; 134 135 static inline const char *transcoder_name(enum transcoder transcoder) 136 { 137 switch (transcoder) { 138 case TRANSCODER_A: 139 return "A"; 140 case TRANSCODER_B: 141 return "B"; 142 case TRANSCODER_C: 143 return "C"; 144 case TRANSCODER_EDP: 145 return "EDP"; 146 case TRANSCODER_DSI_A: 147 return "DSI A"; 148 case TRANSCODER_DSI_C: 149 return "DSI C"; 150 default: 151 return "<invalid>"; 152 } 153 } 154 155 static inline bool transcoder_is_dsi(enum transcoder transcoder) 156 { 157 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 158 } 159 160 /* 161 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 162 * number of planes per CRTC. Not all platforms really have this many planes, 163 * which means some arrays of size I915_MAX_PLANES may have unused entries 164 * between the topmost sprite plane and the cursor plane. 165 */ 166 enum plane { 167 PLANE_A = 0, 168 PLANE_B, 169 PLANE_C, 170 PLANE_CURSOR, 171 I915_MAX_PLANES, 172 }; 173 #define plane_name(p) ((p) + 'A') 174 175 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 176 177 enum port { 178 PORT_A = 0, 179 PORT_B, 180 PORT_C, 181 PORT_D, 182 PORT_E, 183 I915_MAX_PORTS 184 }; 185 #define port_name(p) ((p) + 'A') 186 187 #define I915_NUM_PHYS_VLV 2 188 189 enum dpio_channel { 190 DPIO_CH0, 191 DPIO_CH1 192 }; 193 194 enum dpio_phy { 195 DPIO_PHY0, 196 DPIO_PHY1 197 }; 198 199 enum intel_display_power_domain { 200 POWER_DOMAIN_PIPE_A, 201 POWER_DOMAIN_PIPE_B, 202 POWER_DOMAIN_PIPE_C, 203 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 204 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 205 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 206 POWER_DOMAIN_TRANSCODER_A, 207 POWER_DOMAIN_TRANSCODER_B, 208 POWER_DOMAIN_TRANSCODER_C, 209 POWER_DOMAIN_TRANSCODER_EDP, 210 POWER_DOMAIN_TRANSCODER_DSI_A, 211 POWER_DOMAIN_TRANSCODER_DSI_C, 212 POWER_DOMAIN_PORT_DDI_A_LANES, 213 POWER_DOMAIN_PORT_DDI_B_LANES, 214 POWER_DOMAIN_PORT_DDI_C_LANES, 215 POWER_DOMAIN_PORT_DDI_D_LANES, 216 POWER_DOMAIN_PORT_DDI_E_LANES, 217 POWER_DOMAIN_PORT_DSI, 218 POWER_DOMAIN_PORT_CRT, 219 POWER_DOMAIN_PORT_OTHER, 220 POWER_DOMAIN_VGA, 221 POWER_DOMAIN_AUDIO, 222 POWER_DOMAIN_PLLS, 223 POWER_DOMAIN_AUX_A, 224 POWER_DOMAIN_AUX_B, 225 POWER_DOMAIN_AUX_C, 226 POWER_DOMAIN_AUX_D, 227 POWER_DOMAIN_GMBUS, 228 POWER_DOMAIN_MODESET, 229 POWER_DOMAIN_INIT, 230 231 POWER_DOMAIN_NUM, 232 }; 233 234 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 235 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 236 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 237 #define POWER_DOMAIN_TRANSCODER(tran) \ 238 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 239 (tran) + POWER_DOMAIN_TRANSCODER_A) 240 241 enum hpd_pin { 242 HPD_NONE = 0, 243 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 244 HPD_CRT, 245 HPD_SDVO_B, 246 HPD_SDVO_C, 247 HPD_PORT_A, 248 HPD_PORT_B, 249 HPD_PORT_C, 250 HPD_PORT_D, 251 HPD_PORT_E, 252 HPD_NUM_PINS 253 }; 254 255 #define for_each_hpd_pin(__pin) \ 256 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 257 258 struct i915_hotplug { 259 struct work_struct hotplug_work; 260 261 struct { 262 unsigned long last_jiffies; 263 int count; 264 enum { 265 HPD_ENABLED = 0, 266 HPD_DISABLED = 1, 267 HPD_MARK_DISABLED = 2 268 } state; 269 } stats[HPD_NUM_PINS]; 270 u32 event_bits; 271 struct delayed_work reenable_work; 272 273 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 274 u32 long_port_mask; 275 u32 short_port_mask; 276 struct work_struct dig_port_work; 277 278 /* 279 * if we get a HPD irq from DP and a HPD irq from non-DP 280 * the non-DP HPD could block the workqueue on a mode config 281 * mutex getting, that userspace may have taken. However 282 * userspace is waiting on the DP workqueue to run which is 283 * blocked behind the non-DP one. 284 */ 285 struct workqueue_struct *dp_wq; 286 }; 287 288 #define I915_GEM_GPU_DOMAINS \ 289 (I915_GEM_DOMAIN_RENDER | \ 290 I915_GEM_DOMAIN_SAMPLER | \ 291 I915_GEM_DOMAIN_COMMAND | \ 292 I915_GEM_DOMAIN_INSTRUCTION | \ 293 I915_GEM_DOMAIN_VERTEX) 294 295 #define for_each_pipe(__dev_priv, __p) \ 296 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 297 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 298 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 299 for_each_if ((__mask) & (1 << (__p))) 300 #define for_each_plane(__dev_priv, __pipe, __p) \ 301 for ((__p) = 0; \ 302 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 303 (__p)++) 304 #define for_each_sprite(__dev_priv, __p, __s) \ 305 for ((__s) = 0; \ 306 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 307 (__s)++) 308 309 #define for_each_port_masked(__port, __ports_mask) \ 310 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 311 for_each_if ((__ports_mask) & (1 << (__port))) 312 313 #define for_each_crtc(dev, crtc) \ 314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 315 316 #define for_each_intel_plane(dev, intel_plane) \ 317 list_for_each_entry(intel_plane, \ 318 &dev->mode_config.plane_list, \ 319 base.head) 320 321 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 322 list_for_each_entry(intel_plane, \ 323 &(dev)->mode_config.plane_list, \ 324 base.head) \ 325 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 326 327 #define for_each_intel_crtc(dev, intel_crtc) \ 328 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 329 330 #define for_each_intel_encoder(dev, intel_encoder) \ 331 list_for_each_entry(intel_encoder, \ 332 &(dev)->mode_config.encoder_list, \ 333 base.head) 334 335 #define for_each_intel_connector(dev, intel_connector) \ 336 list_for_each_entry(intel_connector, \ 337 &dev->mode_config.connector_list, \ 338 base.head) 339 340 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 341 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 342 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 343 344 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 345 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 346 for_each_if ((intel_connector)->base.encoder == (__encoder)) 347 348 #define for_each_power_domain(domain, mask) \ 349 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 350 for_each_if ((1 << (domain)) & (mask)) 351 352 struct drm_i915_private; 353 struct i915_mm_struct; 354 struct i915_mmu_object; 355 356 struct drm_i915_file_private { 357 struct drm_i915_private *dev_priv; 358 struct drm_file *file; 359 360 struct { 361 spinlock_t lock; 362 struct list_head request_list; 363 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 364 * chosen to prevent the CPU getting more than a frame ahead of the GPU 365 * (when using lax throttling for the frontbuffer). We also use it to 366 * offer free GPU waitboosts for severely congested workloads. 367 */ 368 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 369 } mm; 370 struct idr context_idr; 371 372 struct intel_rps_client { 373 struct list_head link; 374 unsigned boosts; 375 } rps; 376 377 unsigned int bsd_ring; 378 }; 379 380 /* Used by dp and fdi links */ 381 struct intel_link_m_n { 382 uint32_t tu; 383 uint32_t gmch_m; 384 uint32_t gmch_n; 385 uint32_t link_m; 386 uint32_t link_n; 387 }; 388 389 void intel_link_compute_m_n(int bpp, int nlanes, 390 int pixel_clock, int link_clock, 391 struct intel_link_m_n *m_n); 392 393 /* Interface history: 394 * 395 * 1.1: Original. 396 * 1.2: Add Power Management 397 * 1.3: Add vblank support 398 * 1.4: Fix cmdbuffer path, add heap destroy 399 * 1.5: Add vblank pipe configuration 400 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 401 * - Support vertical blank on secondary display pipe 402 */ 403 #define DRIVER_MAJOR 1 404 #define DRIVER_MINOR 6 405 #define DRIVER_PATCHLEVEL 0 406 407 #define WATCH_LISTS 0 408 409 struct opregion_header; 410 struct opregion_acpi; 411 struct opregion_swsci; 412 struct opregion_asle; 413 414 struct intel_opregion { 415 struct opregion_header *header; 416 struct opregion_acpi *acpi; 417 struct opregion_swsci *swsci; 418 u32 swsci_gbda_sub_functions; 419 u32 swsci_sbcb_sub_functions; 420 struct opregion_asle *asle; 421 void *rvda; 422 const void *vbt; 423 u32 vbt_size; 424 u32 *lid_state; 425 struct work_struct asle_work; 426 }; 427 #define OPREGION_SIZE (8*1024) 428 429 struct intel_overlay; 430 struct intel_overlay_error_state; 431 432 #define I915_FENCE_REG_NONE -1 433 #define I915_MAX_NUM_FENCES 32 434 /* 32 fences + sign bit for FENCE_REG_NONE */ 435 #define I915_MAX_NUM_FENCE_BITS 6 436 437 struct drm_i915_fence_reg { 438 struct list_head lru_list; 439 struct drm_i915_gem_object *obj; 440 int pin_count; 441 }; 442 443 struct sdvo_device_mapping { 444 u8 initialized; 445 u8 dvo_port; 446 u8 slave_addr; 447 u8 dvo_wiring; 448 u8 i2c_pin; 449 u8 ddc_pin; 450 }; 451 452 struct intel_display_error_state; 453 454 struct drm_i915_error_state { 455 struct kref ref; 456 struct timeval time; 457 458 char error_msg[128]; 459 int iommu; 460 u32 reset_count; 461 u32 suspend_count; 462 463 /* Generic register state */ 464 u32 eir; 465 u32 pgtbl_er; 466 u32 ier; 467 u32 gtier[4]; 468 u32 ccid; 469 u32 derrmr; 470 u32 forcewake; 471 u32 error; /* gen6+ */ 472 u32 err_int; /* gen7 */ 473 u32 fault_data0; /* gen8, gen9 */ 474 u32 fault_data1; /* gen8, gen9 */ 475 u32 done_reg; 476 u32 gac_eco; 477 u32 gam_ecochk; 478 u32 gab_ctl; 479 u32 gfx_mode; 480 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 481 u64 fence[I915_MAX_NUM_FENCES]; 482 struct intel_overlay_error_state *overlay; 483 struct intel_display_error_state *display; 484 struct drm_i915_error_object *semaphore_obj; 485 486 struct drm_i915_error_ring { 487 bool valid; 488 /* Software tracked state */ 489 bool waiting; 490 int hangcheck_score; 491 enum intel_ring_hangcheck_action hangcheck_action; 492 int num_requests; 493 494 /* our own tracking of ring head and tail */ 495 u32 cpu_ring_head; 496 u32 cpu_ring_tail; 497 498 u32 last_seqno; 499 u32 semaphore_seqno[I915_NUM_ENGINES - 1]; 500 501 /* Register state */ 502 u32 start; 503 u32 tail; 504 u32 head; 505 u32 ctl; 506 u32 hws; 507 u32 ipeir; 508 u32 ipehr; 509 u32 instdone; 510 u32 bbstate; 511 u32 instpm; 512 u32 instps; 513 u32 seqno; 514 u64 bbaddr; 515 u64 acthd; 516 u32 fault_reg; 517 u64 faddr; 518 u32 rc_psmi; /* sleep state */ 519 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 520 521 struct drm_i915_error_object { 522 int page_count; 523 u64 gtt_offset; 524 u32 *pages[0]; 525 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 526 527 struct drm_i915_error_object *wa_ctx; 528 529 struct drm_i915_error_request { 530 long jiffies; 531 u32 seqno; 532 u32 tail; 533 } *requests; 534 535 struct { 536 u32 gfx_mode; 537 union { 538 u64 pdp[4]; 539 u32 pp_dir_base; 540 }; 541 } vm_info; 542 543 pid_t pid; 544 char comm[TASK_COMM_LEN]; 545 } ring[I915_NUM_ENGINES]; 546 547 struct drm_i915_error_buffer { 548 u32 size; 549 u32 name; 550 u32 rseqno[I915_NUM_ENGINES], wseqno; 551 u64 gtt_offset; 552 u32 read_domains; 553 u32 write_domain; 554 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 555 s32 pinned:2; 556 u32 tiling:2; 557 u32 dirty:1; 558 u32 purgeable:1; 559 u32 userptr:1; 560 s32 ring:4; 561 u32 cache_level:3; 562 } **active_bo, **pinned_bo; 563 564 u32 *active_bo_count, *pinned_bo_count; 565 u32 vm_count; 566 }; 567 568 struct intel_connector; 569 struct intel_encoder; 570 struct intel_crtc_state; 571 struct intel_initial_plane_config; 572 struct intel_crtc; 573 struct intel_limit; 574 struct dpll; 575 576 struct drm_i915_display_funcs { 577 int (*get_display_clock_speed)(struct drm_device *dev); 578 int (*get_fifo_size)(struct drm_device *dev, int plane); 579 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 580 int (*compute_intermediate_wm)(struct drm_device *dev, 581 struct intel_crtc *intel_crtc, 582 struct intel_crtc_state *newstate); 583 void (*initial_watermarks)(struct intel_crtc_state *cstate); 584 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 585 void (*update_wm)(struct drm_crtc *crtc); 586 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 587 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 588 /* Returns the active state of the crtc, and if the crtc is active, 589 * fills out the pipe-config with the hw state. */ 590 bool (*get_pipe_config)(struct intel_crtc *, 591 struct intel_crtc_state *); 592 void (*get_initial_plane_config)(struct intel_crtc *, 593 struct intel_initial_plane_config *); 594 int (*crtc_compute_clock)(struct intel_crtc *crtc, 595 struct intel_crtc_state *crtc_state); 596 void (*crtc_enable)(struct drm_crtc *crtc); 597 void (*crtc_disable)(struct drm_crtc *crtc); 598 void (*audio_codec_enable)(struct drm_connector *connector, 599 struct intel_encoder *encoder, 600 const struct drm_display_mode *adjusted_mode); 601 void (*audio_codec_disable)(struct intel_encoder *encoder); 602 void (*fdi_link_train)(struct drm_crtc *crtc); 603 void (*init_clock_gating)(struct drm_device *dev); 604 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 605 struct drm_framebuffer *fb, 606 struct drm_i915_gem_object *obj, 607 struct drm_i915_gem_request *req, 608 uint32_t flags); 609 void (*hpd_irq_setup)(struct drm_device *dev); 610 /* clock updates for mode set */ 611 /* cursor updates */ 612 /* render clock increase/decrease */ 613 /* display clock increase/decrease */ 614 /* pll clock increase/decrease */ 615 616 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 617 void (*load_luts)(struct drm_crtc_state *crtc_state); 618 }; 619 620 enum forcewake_domain_id { 621 FW_DOMAIN_ID_RENDER = 0, 622 FW_DOMAIN_ID_BLITTER, 623 FW_DOMAIN_ID_MEDIA, 624 625 FW_DOMAIN_ID_COUNT 626 }; 627 628 enum forcewake_domains { 629 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 630 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 631 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 632 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 633 FORCEWAKE_BLITTER | 634 FORCEWAKE_MEDIA) 635 }; 636 637 struct intel_uncore_funcs { 638 void (*force_wake_get)(struct drm_i915_private *dev_priv, 639 enum forcewake_domains domains); 640 void (*force_wake_put)(struct drm_i915_private *dev_priv, 641 enum forcewake_domains domains); 642 643 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 644 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 645 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 646 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 647 648 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 649 uint8_t val, bool trace); 650 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 651 uint16_t val, bool trace); 652 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 653 uint32_t val, bool trace); 654 void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r, 655 uint64_t val, bool trace); 656 }; 657 658 struct intel_uncore { 659 spinlock_t lock; /** lock is also taken in irq contexts. */ 660 661 struct intel_uncore_funcs funcs; 662 663 unsigned fifo_count; 664 enum forcewake_domains fw_domains; 665 666 struct intel_uncore_forcewake_domain { 667 struct drm_i915_private *i915; 668 enum forcewake_domain_id id; 669 unsigned wake_count; 670 struct timer_list timer; 671 i915_reg_t reg_set; 672 u32 val_set; 673 u32 val_clear; 674 i915_reg_t reg_ack; 675 i915_reg_t reg_post; 676 u32 val_reset; 677 } fw_domain[FW_DOMAIN_ID_COUNT]; 678 679 int unclaimed_mmio_check; 680 }; 681 682 /* Iterate over initialised fw domains */ 683 #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ 684 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 685 (i__) < FW_DOMAIN_ID_COUNT; \ 686 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 687 for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 688 689 #define for_each_fw_domain(domain__, dev_priv__, i__) \ 690 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 691 692 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 693 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 694 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 695 696 struct intel_csr { 697 struct work_struct work; 698 const char *fw_path; 699 uint32_t *dmc_payload; 700 uint32_t dmc_fw_size; 701 uint32_t version; 702 uint32_t mmio_count; 703 i915_reg_t mmioaddr[8]; 704 uint32_t mmiodata[8]; 705 uint32_t dc_state; 706 uint32_t allowed_dc_mask; 707 }; 708 709 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 710 func(is_mobile) sep \ 711 func(is_i85x) sep \ 712 func(is_i915g) sep \ 713 func(is_i945gm) sep \ 714 func(is_g33) sep \ 715 func(need_gfx_hws) sep \ 716 func(is_g4x) sep \ 717 func(is_pineview) sep \ 718 func(is_broadwater) sep \ 719 func(is_crestline) sep \ 720 func(is_ivybridge) sep \ 721 func(is_valleyview) sep \ 722 func(is_cherryview) sep \ 723 func(is_haswell) sep \ 724 func(is_skylake) sep \ 725 func(is_broxton) sep \ 726 func(is_kabylake) sep \ 727 func(is_preliminary) sep \ 728 func(has_fbc) sep \ 729 func(has_pipe_cxsr) sep \ 730 func(has_hotplug) sep \ 731 func(cursor_needs_physical) sep \ 732 func(has_overlay) sep \ 733 func(overlay_needs_physical) sep \ 734 func(supports_tv) sep \ 735 func(has_llc) sep \ 736 func(has_snoop) sep \ 737 func(has_ddi) sep \ 738 func(has_fpga_dbg) 739 740 #define DEFINE_FLAG(name) u8 name:1 741 #define SEP_SEMICOLON ; 742 743 struct intel_device_info { 744 u32 display_mmio_offset; 745 u16 device_id; 746 u8 num_pipes:3; 747 u8 num_sprites[I915_MAX_PIPES]; 748 u8 gen; 749 u8 ring_mask; /* Rings supported by the HW */ 750 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 751 /* Register offsets for the various display pipes and transcoders */ 752 int pipe_offsets[I915_MAX_TRANSCODERS]; 753 int trans_offsets[I915_MAX_TRANSCODERS]; 754 int palette_offsets[I915_MAX_PIPES]; 755 int cursor_offsets[I915_MAX_PIPES]; 756 757 /* Slice/subslice/EU info */ 758 u8 slice_total; 759 u8 subslice_total; 760 u8 subslice_per_slice; 761 u8 eu_total; 762 u8 eu_per_subslice; 763 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 764 u8 subslice_7eu[3]; 765 u8 has_slice_pg:1; 766 u8 has_subslice_pg:1; 767 u8 has_eu_pg:1; 768 769 struct color_luts { 770 u16 degamma_lut_size; 771 u16 gamma_lut_size; 772 } color; 773 }; 774 775 #undef DEFINE_FLAG 776 #undef SEP_SEMICOLON 777 778 enum i915_cache_level { 779 I915_CACHE_NONE = 0, 780 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 781 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 782 caches, eg sampler/render caches, and the 783 large Last-Level-Cache. LLC is coherent with 784 the CPU, but L3 is only visible to the GPU. */ 785 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 786 }; 787 788 struct i915_ctx_hang_stats { 789 /* This context had batch pending when hang was declared */ 790 unsigned batch_pending; 791 792 /* This context had batch active when hang was declared */ 793 unsigned batch_active; 794 795 /* Time when this context was last blamed for a GPU reset */ 796 unsigned long guilty_ts; 797 798 /* If the contexts causes a second GPU hang within this time, 799 * it is permanently banned from submitting any more work. 800 */ 801 unsigned long ban_period_seconds; 802 803 /* This context is banned to submit more work */ 804 bool banned; 805 }; 806 807 /* This must match up with the value previously used for execbuf2.rsvd1. */ 808 #define DEFAULT_CONTEXT_HANDLE 0 809 810 #define CONTEXT_NO_ZEROMAP (1<<0) 811 /** 812 * struct intel_context - as the name implies, represents a context. 813 * @ref: reference count. 814 * @user_handle: userspace tracking identity for this context. 815 * @remap_slice: l3 row remapping information. 816 * @flags: context specific flags: 817 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 818 * @file_priv: filp associated with this context (NULL for global default 819 * context). 820 * @hang_stats: information about the role of this context in possible GPU 821 * hangs. 822 * @ppgtt: virtual memory space used by this context. 823 * @legacy_hw_ctx: render context backing object and whether it is correctly 824 * initialized (legacy ring submission mechanism only). 825 * @link: link in the global list of contexts. 826 * 827 * Contexts are memory images used by the hardware to store copies of their 828 * internal state. 829 */ 830 struct intel_context { 831 struct kref ref; 832 int user_handle; 833 uint8_t remap_slice; 834 struct drm_i915_private *i915; 835 int flags; 836 struct drm_i915_file_private *file_priv; 837 struct i915_ctx_hang_stats hang_stats; 838 struct i915_hw_ppgtt *ppgtt; 839 840 /* Legacy ring buffer submission */ 841 struct { 842 struct drm_i915_gem_object *rcs_state; 843 bool initialized; 844 } legacy_hw_ctx; 845 846 /* Execlists */ 847 struct { 848 struct drm_i915_gem_object *state; 849 struct intel_ringbuffer *ringbuf; 850 int pin_count; 851 struct i915_vma *lrc_vma; 852 u64 lrc_desc; 853 uint32_t *lrc_reg_state; 854 } engine[I915_NUM_ENGINES]; 855 856 struct list_head link; 857 }; 858 859 enum fb_op_origin { 860 ORIGIN_GTT, 861 ORIGIN_CPU, 862 ORIGIN_CS, 863 ORIGIN_FLIP, 864 ORIGIN_DIRTYFB, 865 }; 866 867 struct intel_fbc { 868 /* This is always the inner lock when overlapping with struct_mutex and 869 * it's the outer lock when overlapping with stolen_lock. */ 870 struct mutex lock; 871 unsigned threshold; 872 unsigned int possible_framebuffer_bits; 873 unsigned int busy_bits; 874 unsigned int visible_pipes_mask; 875 struct intel_crtc *crtc; 876 877 struct drm_mm_node compressed_fb; 878 struct drm_mm_node *compressed_llb; 879 880 bool false_color; 881 882 bool enabled; 883 bool active; 884 885 struct intel_fbc_state_cache { 886 struct { 887 unsigned int mode_flags; 888 uint32_t hsw_bdw_pixel_rate; 889 } crtc; 890 891 struct { 892 unsigned int rotation; 893 int src_w; 894 int src_h; 895 bool visible; 896 } plane; 897 898 struct { 899 u64 ilk_ggtt_offset; 900 uint32_t pixel_format; 901 unsigned int stride; 902 int fence_reg; 903 unsigned int tiling_mode; 904 } fb; 905 } state_cache; 906 907 struct intel_fbc_reg_params { 908 struct { 909 enum pipe pipe; 910 enum plane plane; 911 unsigned int fence_y_offset; 912 } crtc; 913 914 struct { 915 u64 ggtt_offset; 916 uint32_t pixel_format; 917 unsigned int stride; 918 int fence_reg; 919 } fb; 920 921 int cfb_size; 922 } params; 923 924 struct intel_fbc_work { 925 bool scheduled; 926 u32 scheduled_vblank; 927 struct work_struct work; 928 } work; 929 930 const char *no_fbc_reason; 931 }; 932 933 /** 934 * HIGH_RR is the highest eDP panel refresh rate read from EDID 935 * LOW_RR is the lowest eDP panel refresh rate found from EDID 936 * parsing for same resolution. 937 */ 938 enum drrs_refresh_rate_type { 939 DRRS_HIGH_RR, 940 DRRS_LOW_RR, 941 DRRS_MAX_RR, /* RR count */ 942 }; 943 944 enum drrs_support_type { 945 DRRS_NOT_SUPPORTED = 0, 946 STATIC_DRRS_SUPPORT = 1, 947 SEAMLESS_DRRS_SUPPORT = 2 948 }; 949 950 struct intel_dp; 951 struct i915_drrs { 952 struct mutex mutex; 953 struct delayed_work work; 954 struct intel_dp *dp; 955 unsigned busy_frontbuffer_bits; 956 enum drrs_refresh_rate_type refresh_rate_type; 957 enum drrs_support_type type; 958 }; 959 960 struct i915_psr { 961 struct mutex lock; 962 bool sink_support; 963 bool source_ok; 964 struct intel_dp *enabled; 965 bool active; 966 struct delayed_work work; 967 unsigned busy_frontbuffer_bits; 968 bool psr2_support; 969 bool aux_frame_sync; 970 bool link_standby; 971 }; 972 973 enum intel_pch { 974 PCH_NONE = 0, /* No PCH present */ 975 PCH_IBX, /* Ibexpeak PCH */ 976 PCH_CPT, /* Cougarpoint PCH */ 977 PCH_LPT, /* Lynxpoint PCH */ 978 PCH_SPT, /* Sunrisepoint PCH */ 979 PCH_NOP, 980 }; 981 982 enum intel_sbi_destination { 983 SBI_ICLK, 984 SBI_MPHY, 985 }; 986 987 #define QUIRK_PIPEA_FORCE (1<<0) 988 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 989 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 990 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 991 #define QUIRK_PIPEB_FORCE (1<<4) 992 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 993 994 struct intel_fbdev; 995 struct intel_fbc_work; 996 997 struct intel_gmbus { 998 struct i2c_adapter adapter; 999 u32 force_bit; 1000 u32 reg0; 1001 i915_reg_t gpio_reg; 1002 struct i2c_algo_bit_data bit_algo; 1003 struct drm_i915_private *dev_priv; 1004 }; 1005 1006 struct i915_suspend_saved_registers { 1007 u32 saveDSPARB; 1008 u32 saveLVDS; 1009 u32 savePP_ON_DELAYS; 1010 u32 savePP_OFF_DELAYS; 1011 u32 savePP_ON; 1012 u32 savePP_OFF; 1013 u32 savePP_CONTROL; 1014 u32 savePP_DIVISOR; 1015 u32 saveFBC_CONTROL; 1016 u32 saveCACHE_MODE_0; 1017 u32 saveMI_ARB_STATE; 1018 u32 saveSWF0[16]; 1019 u32 saveSWF1[16]; 1020 u32 saveSWF3[3]; 1021 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1022 u32 savePCH_PORT_HOTPLUG; 1023 u16 saveGCDGMBUS; 1024 }; 1025 1026 struct vlv_s0ix_state { 1027 /* GAM */ 1028 u32 wr_watermark; 1029 u32 gfx_prio_ctrl; 1030 u32 arb_mode; 1031 u32 gfx_pend_tlb0; 1032 u32 gfx_pend_tlb1; 1033 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1034 u32 media_max_req_count; 1035 u32 gfx_max_req_count; 1036 u32 render_hwsp; 1037 u32 ecochk; 1038 u32 bsd_hwsp; 1039 u32 blt_hwsp; 1040 u32 tlb_rd_addr; 1041 1042 /* MBC */ 1043 u32 g3dctl; 1044 u32 gsckgctl; 1045 u32 mbctl; 1046 1047 /* GCP */ 1048 u32 ucgctl1; 1049 u32 ucgctl3; 1050 u32 rcgctl1; 1051 u32 rcgctl2; 1052 u32 rstctl; 1053 u32 misccpctl; 1054 1055 /* GPM */ 1056 u32 gfxpause; 1057 u32 rpdeuhwtc; 1058 u32 rpdeuc; 1059 u32 ecobus; 1060 u32 pwrdwnupctl; 1061 u32 rp_down_timeout; 1062 u32 rp_deucsw; 1063 u32 rcubmabdtmr; 1064 u32 rcedata; 1065 u32 spare2gh; 1066 1067 /* Display 1 CZ domain */ 1068 u32 gt_imr; 1069 u32 gt_ier; 1070 u32 pm_imr; 1071 u32 pm_ier; 1072 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1073 1074 /* GT SA CZ domain */ 1075 u32 tilectl; 1076 u32 gt_fifoctl; 1077 u32 gtlc_wake_ctrl; 1078 u32 gtlc_survive; 1079 u32 pmwgicz; 1080 1081 /* Display 2 CZ domain */ 1082 u32 gu_ctl0; 1083 u32 gu_ctl1; 1084 u32 pcbr; 1085 u32 clock_gate_dis2; 1086 }; 1087 1088 struct intel_rps_ei { 1089 u32 cz_clock; 1090 u32 render_c0; 1091 u32 media_c0; 1092 }; 1093 1094 struct intel_gen6_power_mgmt { 1095 /* 1096 * work, interrupts_enabled and pm_iir are protected by 1097 * dev_priv->irq_lock 1098 */ 1099 struct work_struct work; 1100 bool interrupts_enabled; 1101 u32 pm_iir; 1102 1103 /* Frequencies are stored in potentially platform dependent multiples. 1104 * In other words, *_freq needs to be multiplied by X to be interesting. 1105 * Soft limits are those which are used for the dynamic reclocking done 1106 * by the driver (raise frequencies under heavy loads, and lower for 1107 * lighter loads). Hard limits are those imposed by the hardware. 1108 * 1109 * A distinction is made for overclocking, which is never enabled by 1110 * default, and is considered to be above the hard limit if it's 1111 * possible at all. 1112 */ 1113 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1114 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1115 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1116 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1117 u8 min_freq; /* AKA RPn. Minimum frequency */ 1118 u8 idle_freq; /* Frequency to request when we are idle */ 1119 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1120 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1121 u8 rp0_freq; /* Non-overclocked max frequency. */ 1122 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1123 1124 u8 up_threshold; /* Current %busy required to uplock */ 1125 u8 down_threshold; /* Current %busy required to downclock */ 1126 1127 int last_adj; 1128 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1129 1130 spinlock_t client_lock; 1131 struct list_head clients; 1132 bool client_boost; 1133 1134 bool enabled; 1135 struct delayed_work delayed_resume_work; 1136 unsigned boosts; 1137 1138 struct intel_rps_client semaphores, mmioflips; 1139 1140 /* manual wa residency calculations */ 1141 struct intel_rps_ei up_ei, down_ei; 1142 1143 /* 1144 * Protects RPS/RC6 register access and PCU communication. 1145 * Must be taken after struct_mutex if nested. Note that 1146 * this lock may be held for long periods of time when 1147 * talking to hw - so only take it when talking to hw! 1148 */ 1149 struct mutex hw_lock; 1150 }; 1151 1152 /* defined intel_pm.c */ 1153 extern spinlock_t mchdev_lock; 1154 1155 struct intel_ilk_power_mgmt { 1156 u8 cur_delay; 1157 u8 min_delay; 1158 u8 max_delay; 1159 u8 fmax; 1160 u8 fstart; 1161 1162 u64 last_count1; 1163 unsigned long last_time1; 1164 unsigned long chipset_power; 1165 u64 last_count2; 1166 u64 last_time2; 1167 unsigned long gfx_power; 1168 u8 corr; 1169 1170 int c_m; 1171 int r_t; 1172 }; 1173 1174 struct drm_i915_private; 1175 struct i915_power_well; 1176 1177 struct i915_power_well_ops { 1178 /* 1179 * Synchronize the well's hw state to match the current sw state, for 1180 * example enable/disable it based on the current refcount. Called 1181 * during driver init and resume time, possibly after first calling 1182 * the enable/disable handlers. 1183 */ 1184 void (*sync_hw)(struct drm_i915_private *dev_priv, 1185 struct i915_power_well *power_well); 1186 /* 1187 * Enable the well and resources that depend on it (for example 1188 * interrupts located on the well). Called after the 0->1 refcount 1189 * transition. 1190 */ 1191 void (*enable)(struct drm_i915_private *dev_priv, 1192 struct i915_power_well *power_well); 1193 /* 1194 * Disable the well and resources that depend on it. Called after 1195 * the 1->0 refcount transition. 1196 */ 1197 void (*disable)(struct drm_i915_private *dev_priv, 1198 struct i915_power_well *power_well); 1199 /* Returns the hw enabled state. */ 1200 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1201 struct i915_power_well *power_well); 1202 }; 1203 1204 /* Power well structure for haswell */ 1205 struct i915_power_well { 1206 const char *name; 1207 bool always_on; 1208 /* power well enable/disable usage count */ 1209 int count; 1210 /* cached hw enabled state */ 1211 bool hw_enabled; 1212 unsigned long domains; 1213 unsigned long data; 1214 const struct i915_power_well_ops *ops; 1215 }; 1216 1217 struct i915_power_domains { 1218 /* 1219 * Power wells needed for initialization at driver init and suspend 1220 * time are on. They are kept on until after the first modeset. 1221 */ 1222 bool init_power_on; 1223 bool initializing; 1224 int power_well_count; 1225 1226 struct mutex lock; 1227 int domain_use_count[POWER_DOMAIN_NUM]; 1228 struct i915_power_well *power_wells; 1229 }; 1230 1231 #define MAX_L3_SLICES 2 1232 struct intel_l3_parity { 1233 u32 *remap_info[MAX_L3_SLICES]; 1234 struct work_struct error_work; 1235 int which_slice; 1236 }; 1237 1238 struct i915_gem_mm { 1239 /** Memory allocator for GTT stolen memory */ 1240 struct drm_mm stolen; 1241 /** Protects the usage of the GTT stolen memory allocator. This is 1242 * always the inner lock when overlapping with struct_mutex. */ 1243 struct mutex stolen_lock; 1244 1245 /** List of all objects in gtt_space. Used to restore gtt 1246 * mappings on resume */ 1247 struct list_head bound_list; 1248 /** 1249 * List of objects which are not bound to the GTT (thus 1250 * are idle and not used by the GPU) but still have 1251 * (presumably uncached) pages still attached. 1252 */ 1253 struct list_head unbound_list; 1254 1255 /** Usable portion of the GTT for GEM */ 1256 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1257 1258 /** PPGTT used for aliasing the PPGTT with the GTT */ 1259 struct i915_hw_ppgtt *aliasing_ppgtt; 1260 1261 struct notifier_block oom_notifier; 1262 struct notifier_block vmap_notifier; 1263 struct shrinker shrinker; 1264 bool shrinker_no_lock_stealing; 1265 1266 /** LRU list of objects with fence regs on them. */ 1267 struct list_head fence_list; 1268 1269 /** 1270 * We leave the user IRQ off as much as possible, 1271 * but this means that requests will finish and never 1272 * be retired once the system goes idle. Set a timer to 1273 * fire periodically while the ring is running. When it 1274 * fires, go retire requests. 1275 */ 1276 struct delayed_work retire_work; 1277 1278 /** 1279 * When we detect an idle GPU, we want to turn on 1280 * powersaving features. So once we see that there 1281 * are no more requests outstanding and no more 1282 * arrive within a small period of time, we fire 1283 * off the idle_work. 1284 */ 1285 struct delayed_work idle_work; 1286 1287 /** 1288 * Are we in a non-interruptible section of code like 1289 * modesetting? 1290 */ 1291 bool interruptible; 1292 1293 /** 1294 * Is the GPU currently considered idle, or busy executing userspace 1295 * requests? Whilst idle, we attempt to power down the hardware and 1296 * display clocks. In order to reduce the effect on performance, there 1297 * is a slight delay before we do so. 1298 */ 1299 bool busy; 1300 1301 /* the indicator for dispatch video commands on two BSD rings */ 1302 unsigned int bsd_ring_dispatch_index; 1303 1304 /** Bit 6 swizzling required for X tiling */ 1305 uint32_t bit_6_swizzle_x; 1306 /** Bit 6 swizzling required for Y tiling */ 1307 uint32_t bit_6_swizzle_y; 1308 1309 /* accounting, useful for userland debugging */ 1310 spinlock_t object_stat_lock; 1311 size_t object_memory; 1312 u32 object_count; 1313 }; 1314 1315 struct drm_i915_error_state_buf { 1316 struct drm_i915_private *i915; 1317 unsigned bytes; 1318 unsigned size; 1319 int err; 1320 u8 *buf; 1321 loff_t start; 1322 loff_t pos; 1323 }; 1324 1325 struct i915_error_state_file_priv { 1326 struct drm_device *dev; 1327 struct drm_i915_error_state *error; 1328 }; 1329 1330 struct i915_gpu_error { 1331 /* For hangcheck timer */ 1332 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1333 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1334 /* Hang gpu twice in this window and your context gets banned */ 1335 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1336 1337 struct workqueue_struct *hangcheck_wq; 1338 struct delayed_work hangcheck_work; 1339 1340 /* For reset and error_state handling. */ 1341 spinlock_t lock; 1342 /* Protected by the above dev->gpu_error.lock. */ 1343 struct drm_i915_error_state *first_error; 1344 1345 unsigned long missed_irq_rings; 1346 1347 /** 1348 * State variable controlling the reset flow and count 1349 * 1350 * This is a counter which gets incremented when reset is triggered, 1351 * and again when reset has been handled. So odd values (lowest bit set) 1352 * means that reset is in progress and even values that 1353 * (reset_counter >> 1):th reset was successfully completed. 1354 * 1355 * If reset is not completed succesfully, the I915_WEDGE bit is 1356 * set meaning that hardware is terminally sour and there is no 1357 * recovery. All waiters on the reset_queue will be woken when 1358 * that happens. 1359 * 1360 * This counter is used by the wait_seqno code to notice that reset 1361 * event happened and it needs to restart the entire ioctl (since most 1362 * likely the seqno it waited for won't ever signal anytime soon). 1363 * 1364 * This is important for lock-free wait paths, where no contended lock 1365 * naturally enforces the correct ordering between the bail-out of the 1366 * waiter and the gpu reset work code. 1367 */ 1368 atomic_t reset_counter; 1369 1370 #define I915_RESET_IN_PROGRESS_FLAG 1 1371 #define I915_WEDGED (1 << 31) 1372 1373 /** 1374 * Waitqueue to signal when the reset has completed. Used by clients 1375 * that wait for dev_priv->mm.wedged to settle. 1376 */ 1377 wait_queue_head_t reset_queue; 1378 1379 /* Userspace knobs for gpu hang simulation; 1380 * combines both a ring mask, and extra flags 1381 */ 1382 u32 stop_rings; 1383 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1384 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1385 1386 /* For missed irq/seqno simulation. */ 1387 unsigned int test_irq_rings; 1388 1389 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 1390 bool reload_in_reset; 1391 }; 1392 1393 enum modeset_restore { 1394 MODESET_ON_LID_OPEN, 1395 MODESET_DONE, 1396 MODESET_SUSPENDED, 1397 }; 1398 1399 #define DP_AUX_A 0x40 1400 #define DP_AUX_B 0x10 1401 #define DP_AUX_C 0x20 1402 #define DP_AUX_D 0x30 1403 1404 #define DDC_PIN_B 0x05 1405 #define DDC_PIN_C 0x04 1406 #define DDC_PIN_D 0x06 1407 1408 struct ddi_vbt_port_info { 1409 /* 1410 * This is an index in the HDMI/DVI DDI buffer translation table. 1411 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1412 * populate this field. 1413 */ 1414 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1415 uint8_t hdmi_level_shift; 1416 1417 uint8_t supports_dvi:1; 1418 uint8_t supports_hdmi:1; 1419 uint8_t supports_dp:1; 1420 1421 uint8_t alternate_aux_channel; 1422 uint8_t alternate_ddc_pin; 1423 1424 uint8_t dp_boost_level; 1425 uint8_t hdmi_boost_level; 1426 }; 1427 1428 enum psr_lines_to_wait { 1429 PSR_0_LINES_TO_WAIT = 0, 1430 PSR_1_LINE_TO_WAIT, 1431 PSR_4_LINES_TO_WAIT, 1432 PSR_8_LINES_TO_WAIT 1433 }; 1434 1435 struct intel_vbt_data { 1436 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1437 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1438 1439 /* Feature bits */ 1440 unsigned int int_tv_support:1; 1441 unsigned int lvds_dither:1; 1442 unsigned int lvds_vbt:1; 1443 unsigned int int_crt_support:1; 1444 unsigned int lvds_use_ssc:1; 1445 unsigned int display_clock_mode:1; 1446 unsigned int fdi_rx_polarity_inverted:1; 1447 int lvds_ssc_freq; 1448 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1449 1450 enum drrs_support_type drrs_type; 1451 1452 struct { 1453 int rate; 1454 int lanes; 1455 int preemphasis; 1456 int vswing; 1457 bool low_vswing; 1458 bool initialized; 1459 bool support; 1460 int bpp; 1461 struct edp_power_seq pps; 1462 } edp; 1463 1464 struct { 1465 bool full_link; 1466 bool require_aux_wakeup; 1467 int idle_frames; 1468 enum psr_lines_to_wait lines_to_wait; 1469 int tp1_wakeup_time; 1470 int tp2_tp3_wakeup_time; 1471 } psr; 1472 1473 struct { 1474 u16 pwm_freq_hz; 1475 bool present; 1476 bool active_low_pwm; 1477 u8 min_brightness; /* min_brightness/255 of max */ 1478 } backlight; 1479 1480 /* MIPI DSI */ 1481 struct { 1482 u16 panel_id; 1483 struct mipi_config *config; 1484 struct mipi_pps_data *pps; 1485 u8 seq_version; 1486 u32 size; 1487 u8 *data; 1488 const u8 *sequence[MIPI_SEQ_MAX]; 1489 } dsi; 1490 1491 int crt_ddc_pin; 1492 1493 int child_dev_num; 1494 union child_device_config *child_dev; 1495 1496 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1497 struct sdvo_device_mapping sdvo_mappings[2]; 1498 }; 1499 1500 enum intel_ddb_partitioning { 1501 INTEL_DDB_PART_1_2, 1502 INTEL_DDB_PART_5_6, /* IVB+ */ 1503 }; 1504 1505 struct intel_wm_level { 1506 bool enable; 1507 uint32_t pri_val; 1508 uint32_t spr_val; 1509 uint32_t cur_val; 1510 uint32_t fbc_val; 1511 }; 1512 1513 struct ilk_wm_values { 1514 uint32_t wm_pipe[3]; 1515 uint32_t wm_lp[3]; 1516 uint32_t wm_lp_spr[3]; 1517 uint32_t wm_linetime[3]; 1518 bool enable_fbc_wm; 1519 enum intel_ddb_partitioning partitioning; 1520 }; 1521 1522 struct vlv_pipe_wm { 1523 uint16_t primary; 1524 uint16_t sprite[2]; 1525 uint8_t cursor; 1526 }; 1527 1528 struct vlv_sr_wm { 1529 uint16_t plane; 1530 uint8_t cursor; 1531 }; 1532 1533 struct vlv_wm_values { 1534 struct vlv_pipe_wm pipe[3]; 1535 struct vlv_sr_wm sr; 1536 struct { 1537 uint8_t cursor; 1538 uint8_t sprite[2]; 1539 uint8_t primary; 1540 } ddl[3]; 1541 uint8_t level; 1542 bool cxsr; 1543 }; 1544 1545 struct skl_ddb_entry { 1546 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1547 }; 1548 1549 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1550 { 1551 return entry->end - entry->start; 1552 } 1553 1554 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1555 const struct skl_ddb_entry *e2) 1556 { 1557 if (e1->start == e2->start && e1->end == e2->end) 1558 return true; 1559 1560 return false; 1561 } 1562 1563 struct skl_ddb_allocation { 1564 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1565 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1566 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1567 }; 1568 1569 struct skl_wm_values { 1570 bool dirty[I915_MAX_PIPES]; 1571 struct skl_ddb_allocation ddb; 1572 uint32_t wm_linetime[I915_MAX_PIPES]; 1573 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1574 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1575 }; 1576 1577 struct skl_wm_level { 1578 bool plane_en[I915_MAX_PLANES]; 1579 uint16_t plane_res_b[I915_MAX_PLANES]; 1580 uint8_t plane_res_l[I915_MAX_PLANES]; 1581 }; 1582 1583 /* 1584 * This struct helps tracking the state needed for runtime PM, which puts the 1585 * device in PCI D3 state. Notice that when this happens, nothing on the 1586 * graphics device works, even register access, so we don't get interrupts nor 1587 * anything else. 1588 * 1589 * Every piece of our code that needs to actually touch the hardware needs to 1590 * either call intel_runtime_pm_get or call intel_display_power_get with the 1591 * appropriate power domain. 1592 * 1593 * Our driver uses the autosuspend delay feature, which means we'll only really 1594 * suspend if we stay with zero refcount for a certain amount of time. The 1595 * default value is currently very conservative (see intel_runtime_pm_enable), but 1596 * it can be changed with the standard runtime PM files from sysfs. 1597 * 1598 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1599 * goes back to false exactly before we reenable the IRQs. We use this variable 1600 * to check if someone is trying to enable/disable IRQs while they're supposed 1601 * to be disabled. This shouldn't happen and we'll print some error messages in 1602 * case it happens. 1603 * 1604 * For more, read the Documentation/power/runtime_pm.txt. 1605 */ 1606 struct i915_runtime_pm { 1607 atomic_t wakeref_count; 1608 atomic_t atomic_seq; 1609 bool suspended; 1610 bool irqs_enabled; 1611 }; 1612 1613 enum intel_pipe_crc_source { 1614 INTEL_PIPE_CRC_SOURCE_NONE, 1615 INTEL_PIPE_CRC_SOURCE_PLANE1, 1616 INTEL_PIPE_CRC_SOURCE_PLANE2, 1617 INTEL_PIPE_CRC_SOURCE_PF, 1618 INTEL_PIPE_CRC_SOURCE_PIPE, 1619 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1620 INTEL_PIPE_CRC_SOURCE_TV, 1621 INTEL_PIPE_CRC_SOURCE_DP_B, 1622 INTEL_PIPE_CRC_SOURCE_DP_C, 1623 INTEL_PIPE_CRC_SOURCE_DP_D, 1624 INTEL_PIPE_CRC_SOURCE_AUTO, 1625 INTEL_PIPE_CRC_SOURCE_MAX, 1626 }; 1627 1628 struct intel_pipe_crc_entry { 1629 uint32_t frame; 1630 uint32_t crc[5]; 1631 }; 1632 1633 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1634 struct intel_pipe_crc { 1635 spinlock_t lock; 1636 bool opened; /* exclusive access to the result file */ 1637 struct intel_pipe_crc_entry *entries; 1638 enum intel_pipe_crc_source source; 1639 int head, tail; 1640 wait_queue_head_t wq; 1641 }; 1642 1643 struct i915_frontbuffer_tracking { 1644 struct mutex lock; 1645 1646 /* 1647 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1648 * scheduled flips. 1649 */ 1650 unsigned busy_bits; 1651 unsigned flip_bits; 1652 }; 1653 1654 struct i915_wa_reg { 1655 i915_reg_t addr; 1656 u32 value; 1657 /* bitmask representing WA bits */ 1658 u32 mask; 1659 }; 1660 1661 /* 1662 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1663 * allowing it for RCS as we don't foresee any requirement of having 1664 * a whitelist for other engines. When it is really required for 1665 * other engines then the limit need to be increased. 1666 */ 1667 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1668 1669 struct i915_workarounds { 1670 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1671 u32 count; 1672 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1673 }; 1674 1675 struct i915_virtual_gpu { 1676 bool active; 1677 }; 1678 1679 struct i915_execbuffer_params { 1680 struct drm_device *dev; 1681 struct drm_file *file; 1682 uint32_t dispatch_flags; 1683 uint32_t args_batch_start_offset; 1684 uint64_t batch_obj_vm_offset; 1685 struct intel_engine_cs *engine; 1686 struct drm_i915_gem_object *batch_obj; 1687 struct intel_context *ctx; 1688 struct drm_i915_gem_request *request; 1689 }; 1690 1691 /* used in computing the new watermarks state */ 1692 struct intel_wm_config { 1693 unsigned int num_pipes_active; 1694 bool sprites_enabled; 1695 bool sprites_scaled; 1696 }; 1697 1698 struct drm_i915_private { 1699 struct drm_device *dev; 1700 struct kmem_cache *objects; 1701 struct kmem_cache *vmas; 1702 struct kmem_cache *requests; 1703 1704 const struct intel_device_info info; 1705 1706 int relative_constants_mode; 1707 1708 void __iomem *regs; 1709 1710 struct intel_uncore uncore; 1711 1712 struct i915_virtual_gpu vgpu; 1713 1714 struct intel_guc guc; 1715 1716 struct intel_csr csr; 1717 1718 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1719 1720 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1721 * controller on different i2c buses. */ 1722 struct mutex gmbus_mutex; 1723 1724 /** 1725 * Base address of the gmbus and gpio block. 1726 */ 1727 uint32_t gpio_mmio_base; 1728 1729 /* MMIO base address for MIPI regs */ 1730 uint32_t mipi_mmio_base; 1731 1732 uint32_t psr_mmio_base; 1733 1734 wait_queue_head_t gmbus_wait_queue; 1735 1736 struct pci_dev *bridge_dev; 1737 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1738 struct drm_i915_gem_object *semaphore_obj; 1739 uint32_t last_seqno, next_seqno; 1740 1741 struct drm_dma_handle *status_page_dmah; 1742 struct resource mch_res; 1743 1744 /* protects the irq masks */ 1745 spinlock_t irq_lock; 1746 1747 /* protects the mmio flip data */ 1748 spinlock_t mmio_flip_lock; 1749 1750 bool display_irqs_enabled; 1751 1752 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1753 struct pm_qos_request pm_qos; 1754 1755 /* Sideband mailbox protection */ 1756 struct mutex sb_lock; 1757 1758 /** Cached value of IMR to avoid reads in updating the bitfield */ 1759 union { 1760 u32 irq_mask; 1761 u32 de_irq_mask[I915_MAX_PIPES]; 1762 }; 1763 u32 gt_irq_mask; 1764 u32 pm_irq_mask; 1765 u32 pm_rps_events; 1766 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1767 1768 struct i915_hotplug hotplug; 1769 struct intel_fbc fbc; 1770 struct i915_drrs drrs; 1771 struct intel_opregion opregion; 1772 struct intel_vbt_data vbt; 1773 1774 bool preserve_bios_swizzle; 1775 1776 /* overlay */ 1777 struct intel_overlay *overlay; 1778 1779 /* backlight registers and fields in struct intel_panel */ 1780 struct mutex backlight_lock; 1781 1782 /* LVDS info */ 1783 bool no_aux_handshake; 1784 1785 /* protects panel power sequencer state */ 1786 struct mutex pps_mutex; 1787 1788 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1789 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1790 1791 unsigned int fsb_freq, mem_freq, is_ddr3; 1792 unsigned int skl_boot_cdclk; 1793 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1794 unsigned int max_dotclk_freq; 1795 unsigned int rawclk_freq; 1796 unsigned int hpll_freq; 1797 unsigned int czclk_freq; 1798 1799 /** 1800 * wq - Driver workqueue for GEM. 1801 * 1802 * NOTE: Work items scheduled here are not allowed to grab any modeset 1803 * locks, for otherwise the flushing done in the pageflip code will 1804 * result in deadlocks. 1805 */ 1806 struct workqueue_struct *wq; 1807 1808 /* Display functions */ 1809 struct drm_i915_display_funcs display; 1810 1811 /* PCH chipset type */ 1812 enum intel_pch pch_type; 1813 unsigned short pch_id; 1814 1815 unsigned long quirks; 1816 1817 enum modeset_restore modeset_restore; 1818 struct mutex modeset_restore_lock; 1819 struct drm_atomic_state *modeset_restore_state; 1820 1821 struct list_head vm_list; /* Global list of all address spaces */ 1822 struct i915_ggtt ggtt; /* VM representing the global address space */ 1823 1824 struct i915_gem_mm mm; 1825 DECLARE_HASHTABLE(mm_structs, 7); 1826 struct mutex mm_lock; 1827 1828 /* Kernel Modesetting */ 1829 1830 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1831 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1832 wait_queue_head_t pending_flip_queue; 1833 1834 #ifdef CONFIG_DEBUG_FS 1835 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1836 #endif 1837 1838 /* dpll and cdclk state is protected by connection_mutex */ 1839 int num_shared_dpll; 1840 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1841 const struct intel_dpll_mgr *dpll_mgr; 1842 1843 /* 1844 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1845 * Must be global rather than per dpll, because on some platforms 1846 * plls share registers. 1847 */ 1848 struct mutex dpll_lock; 1849 1850 unsigned int active_crtcs; 1851 unsigned int min_pixclk[I915_MAX_PIPES]; 1852 1853 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1854 1855 struct i915_workarounds workarounds; 1856 1857 struct i915_frontbuffer_tracking fb_tracking; 1858 1859 u16 orig_clock; 1860 1861 bool mchbar_need_disable; 1862 1863 struct intel_l3_parity l3_parity; 1864 1865 /* Cannot be determined by PCIID. You must always read a register. */ 1866 size_t ellc_size; 1867 1868 /* gen6+ rps state */ 1869 struct intel_gen6_power_mgmt rps; 1870 1871 /* ilk-only ips/rps state. Everything in here is protected by the global 1872 * mchdev_lock in intel_pm.c */ 1873 struct intel_ilk_power_mgmt ips; 1874 1875 struct i915_power_domains power_domains; 1876 1877 struct i915_psr psr; 1878 1879 struct i915_gpu_error gpu_error; 1880 1881 struct drm_i915_gem_object *vlv_pctx; 1882 1883 #ifdef CONFIG_DRM_FBDEV_EMULATION 1884 /* list of fbdev register on this device */ 1885 struct intel_fbdev *fbdev; 1886 struct work_struct fbdev_suspend_work; 1887 #endif 1888 1889 struct drm_property *broadcast_rgb_property; 1890 struct drm_property *force_audio_property; 1891 1892 /* hda/i915 audio component */ 1893 struct i915_audio_component *audio_component; 1894 bool audio_component_registered; 1895 /** 1896 * av_mutex - mutex for audio/video sync 1897 * 1898 */ 1899 struct mutex av_mutex; 1900 1901 uint32_t hw_context_size; 1902 struct list_head context_list; 1903 1904 u32 fdi_rx_config; 1905 1906 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 1907 u32 chv_phy_control; 1908 /* 1909 * Shadows for CHV DPLL_MD regs to keep the state 1910 * checker somewhat working in the presence hardware 1911 * crappiness (can't read out DPLL_MD for pipes B & C). 1912 */ 1913 u32 chv_dpll_md[I915_MAX_PIPES]; 1914 1915 u32 suspend_count; 1916 bool suspended_to_idle; 1917 struct i915_suspend_saved_registers regfile; 1918 struct vlv_s0ix_state vlv_s0ix_state; 1919 1920 struct { 1921 /* 1922 * Raw watermark latency values: 1923 * in 0.1us units for WM0, 1924 * in 0.5us units for WM1+. 1925 */ 1926 /* primary */ 1927 uint16_t pri_latency[5]; 1928 /* sprite */ 1929 uint16_t spr_latency[5]; 1930 /* cursor */ 1931 uint16_t cur_latency[5]; 1932 /* 1933 * Raw watermark memory latency values 1934 * for SKL for all 8 levels 1935 * in 1us units. 1936 */ 1937 uint16_t skl_latency[8]; 1938 1939 /* Committed wm config */ 1940 struct intel_wm_config config; 1941 1942 /* 1943 * The skl_wm_values structure is a bit too big for stack 1944 * allocation, so we keep the staging struct where we store 1945 * intermediate results here instead. 1946 */ 1947 struct skl_wm_values skl_results; 1948 1949 /* current hardware state */ 1950 union { 1951 struct ilk_wm_values hw; 1952 struct skl_wm_values skl_hw; 1953 struct vlv_wm_values vlv; 1954 }; 1955 1956 uint8_t max_level; 1957 1958 /* 1959 * Should be held around atomic WM register writing; also 1960 * protects * intel_crtc->wm.active and 1961 * cstate->wm.need_postvbl_update. 1962 */ 1963 struct mutex wm_mutex; 1964 } wm; 1965 1966 struct i915_runtime_pm pm; 1967 1968 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1969 struct { 1970 int (*execbuf_submit)(struct i915_execbuffer_params *params, 1971 struct drm_i915_gem_execbuffer2 *args, 1972 struct list_head *vmas); 1973 int (*init_engines)(struct drm_device *dev); 1974 void (*cleanup_engine)(struct intel_engine_cs *engine); 1975 void (*stop_engine)(struct intel_engine_cs *engine); 1976 } gt; 1977 1978 struct intel_context *kernel_context; 1979 1980 /* perform PHY state sanity checks? */ 1981 bool chv_phy_assert[2]; 1982 1983 struct intel_encoder *dig_port_map[I915_MAX_PORTS]; 1984 1985 /* 1986 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1987 * will be rejected. Instead look for a better place. 1988 */ 1989 }; 1990 1991 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1992 { 1993 return dev->dev_private; 1994 } 1995 1996 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 1997 { 1998 return to_i915(dev_get_drvdata(dev)); 1999 } 2000 2001 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2002 { 2003 return container_of(guc, struct drm_i915_private, guc); 2004 } 2005 2006 /* Simple iterator over all initialised engines */ 2007 #define for_each_engine(engine__, dev_priv__) \ 2008 for ((engine__) = &(dev_priv__)->engine[0]; \ 2009 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2010 (engine__)++) \ 2011 for_each_if (intel_engine_initialized(engine__)) 2012 2013 /* Iterator with engine_id */ 2014 #define for_each_engine_id(engine__, dev_priv__, id__) \ 2015 for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ 2016 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2017 (engine__)++) \ 2018 for_each_if (((id__) = (engine__)->id, \ 2019 intel_engine_initialized(engine__))) 2020 2021 /* Iterator over subset of engines selected by mask */ 2022 #define for_each_engine_masked(engine__, dev_priv__, mask__) \ 2023 for ((engine__) = &(dev_priv__)->engine[0]; \ 2024 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2025 (engine__)++) \ 2026 for_each_if (((mask__) & intel_engine_flag(engine__)) && \ 2027 intel_engine_initialized(engine__)) 2028 2029 enum hdmi_force_audio { 2030 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2031 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2032 HDMI_AUDIO_AUTO, /* trust EDID */ 2033 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2034 }; 2035 2036 #define I915_GTT_OFFSET_NONE ((u32)-1) 2037 2038 struct drm_i915_gem_object_ops { 2039 unsigned int flags; 2040 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 2041 2042 /* Interface between the GEM object and its backing storage. 2043 * get_pages() is called once prior to the use of the associated set 2044 * of pages before to binding them into the GTT, and put_pages() is 2045 * called after we no longer need them. As we expect there to be 2046 * associated cost with migrating pages between the backing storage 2047 * and making them available for the GPU (e.g. clflush), we may hold 2048 * onto the pages after they are no longer referenced by the GPU 2049 * in case they may be used again shortly (for example migrating the 2050 * pages to a different memory domain within the GTT). put_pages() 2051 * will therefore most likely be called when the object itself is 2052 * being released or under memory pressure (where we attempt to 2053 * reap pages for the shrinker). 2054 */ 2055 int (*get_pages)(struct drm_i915_gem_object *); 2056 void (*put_pages)(struct drm_i915_gem_object *); 2057 2058 int (*dmabuf_export)(struct drm_i915_gem_object *); 2059 void (*release)(struct drm_i915_gem_object *); 2060 }; 2061 2062 /* 2063 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2064 * considered to be the frontbuffer for the given plane interface-wise. This 2065 * doesn't mean that the hw necessarily already scans it out, but that any 2066 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2067 * 2068 * We have one bit per pipe and per scanout plane type. 2069 */ 2070 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2071 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2072 #define INTEL_FRONTBUFFER_BITS \ 2073 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 2074 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2075 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2076 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2077 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2078 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2079 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2080 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2081 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2082 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2083 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2084 2085 struct drm_i915_gem_object { 2086 struct drm_gem_object base; 2087 2088 const struct drm_i915_gem_object_ops *ops; 2089 2090 /** List of VMAs backed by this object */ 2091 struct list_head vma_list; 2092 2093 /** Stolen memory for this object, instead of being backed by shmem. */ 2094 struct drm_mm_node *stolen; 2095 struct list_head global_list; 2096 2097 struct list_head engine_list[I915_NUM_ENGINES]; 2098 /** Used in execbuf to temporarily hold a ref */ 2099 struct list_head obj_exec_link; 2100 2101 struct list_head batch_pool_link; 2102 2103 /** 2104 * This is set if the object is on the active lists (has pending 2105 * rendering and so a non-zero seqno), and is not set if it i s on 2106 * inactive (ready to be unbound) list. 2107 */ 2108 unsigned int active:I915_NUM_ENGINES; 2109 2110 /** 2111 * This is set if the object has been written to since last bound 2112 * to the GTT 2113 */ 2114 unsigned int dirty:1; 2115 2116 /** 2117 * Fence register bits (if any) for this object. Will be set 2118 * as needed when mapped into the GTT. 2119 * Protected by dev->struct_mutex. 2120 */ 2121 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2122 2123 /** 2124 * Advice: are the backing pages purgeable? 2125 */ 2126 unsigned int madv:2; 2127 2128 /** 2129 * Current tiling mode for the object. 2130 */ 2131 unsigned int tiling_mode:2; 2132 /** 2133 * Whether the tiling parameters for the currently associated fence 2134 * register have changed. Note that for the purposes of tracking 2135 * tiling changes we also treat the unfenced register, the register 2136 * slot that the object occupies whilst it executes a fenced 2137 * command (such as BLT on gen2/3), as a "fence". 2138 */ 2139 unsigned int fence_dirty:1; 2140 2141 /** 2142 * Is the object at the current location in the gtt mappable and 2143 * fenceable? Used to avoid costly recalculations. 2144 */ 2145 unsigned int map_and_fenceable:1; 2146 2147 /** 2148 * Whether the current gtt mapping needs to be mappable (and isn't just 2149 * mappable by accident). Track pin and fault separate for a more 2150 * accurate mappable working set. 2151 */ 2152 unsigned int fault_mappable:1; 2153 2154 /* 2155 * Is the object to be mapped as read-only to the GPU 2156 * Only honoured if hardware has relevant pte bit 2157 */ 2158 unsigned long gt_ro:1; 2159 unsigned int cache_level:3; 2160 unsigned int cache_dirty:1; 2161 2162 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2163 2164 unsigned int pin_display; 2165 2166 struct sg_table *pages; 2167 int pages_pin_count; 2168 struct get_page { 2169 struct scatterlist *sg; 2170 int last; 2171 } get_page; 2172 void *mapping; 2173 2174 /** Breadcrumb of last rendering to the buffer. 2175 * There can only be one writer, but we allow for multiple readers. 2176 * If there is a writer that necessarily implies that all other 2177 * read requests are complete - but we may only be lazily clearing 2178 * the read requests. A read request is naturally the most recent 2179 * request on a ring, so we may have two different write and read 2180 * requests on one ring where the write request is older than the 2181 * read request. This allows for the CPU to read from an active 2182 * buffer by only waiting for the write to complete. 2183 * */ 2184 struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; 2185 struct drm_i915_gem_request *last_write_req; 2186 /** Breadcrumb of last fenced GPU access to the buffer. */ 2187 struct drm_i915_gem_request *last_fenced_req; 2188 2189 /** Current tiling stride for the object, if it's tiled. */ 2190 uint32_t stride; 2191 2192 /** References from framebuffers, locks out tiling changes. */ 2193 unsigned long framebuffer_references; 2194 2195 /** Record of address bit 17 of each page at last unbind. */ 2196 unsigned long *bit_17; 2197 2198 union { 2199 /** for phy allocated objects */ 2200 struct drm_dma_handle *phys_handle; 2201 2202 struct i915_gem_userptr { 2203 uintptr_t ptr; 2204 unsigned read_only :1; 2205 unsigned workers :4; 2206 #define I915_GEM_USERPTR_MAX_WORKERS 15 2207 2208 struct i915_mm_struct *mm; 2209 struct i915_mmu_object *mmu_object; 2210 struct work_struct *work; 2211 } userptr; 2212 }; 2213 }; 2214 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2215 2216 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2217 struct drm_i915_gem_object *new, 2218 unsigned frontbuffer_bits); 2219 2220 /** 2221 * Request queue structure. 2222 * 2223 * The request queue allows us to note sequence numbers that have been emitted 2224 * and may be associated with active buffers to be retired. 2225 * 2226 * By keeping this list, we can avoid having to do questionable sequence 2227 * number comparisons on buffer last_read|write_seqno. It also allows an 2228 * emission time to be associated with the request for tracking how far ahead 2229 * of the GPU the submission is. 2230 * 2231 * The requests are reference counted, so upon creation they should have an 2232 * initial reference taken using kref_init 2233 */ 2234 struct drm_i915_gem_request { 2235 struct kref ref; 2236 2237 /** On Which ring this request was generated */ 2238 struct drm_i915_private *i915; 2239 struct intel_engine_cs *engine; 2240 2241 /** GEM sequence number associated with the previous request, 2242 * when the HWS breadcrumb is equal to this the GPU is processing 2243 * this request. 2244 */ 2245 u32 previous_seqno; 2246 2247 /** GEM sequence number associated with this request, 2248 * when the HWS breadcrumb is equal or greater than this the GPU 2249 * has finished processing this request. 2250 */ 2251 u32 seqno; 2252 2253 /** Position in the ringbuffer of the start of the request */ 2254 u32 head; 2255 2256 /** 2257 * Position in the ringbuffer of the start of the postfix. 2258 * This is required to calculate the maximum available ringbuffer 2259 * space without overwriting the postfix. 2260 */ 2261 u32 postfix; 2262 2263 /** Position in the ringbuffer of the end of the whole request */ 2264 u32 tail; 2265 2266 /** 2267 * Context and ring buffer related to this request 2268 * Contexts are refcounted, so when this request is associated with a 2269 * context, we must increment the context's refcount, to guarantee that 2270 * it persists while any request is linked to it. Requests themselves 2271 * are also refcounted, so the request will only be freed when the last 2272 * reference to it is dismissed, and the code in 2273 * i915_gem_request_free() will then decrement the refcount on the 2274 * context. 2275 */ 2276 struct intel_context *ctx; 2277 struct intel_ringbuffer *ringbuf; 2278 2279 /** Batch buffer related to this request if any (used for 2280 error state dump only) */ 2281 struct drm_i915_gem_object *batch_obj; 2282 2283 /** Time at which this request was emitted, in jiffies. */ 2284 unsigned long emitted_jiffies; 2285 2286 /** global list entry for this request */ 2287 struct list_head list; 2288 2289 struct drm_i915_file_private *file_priv; 2290 /** file_priv list entry for this request */ 2291 struct list_head client_list; 2292 2293 /** process identifier submitting this request */ 2294 struct pid *pid; 2295 2296 /** 2297 * The ELSP only accepts two elements at a time, so we queue 2298 * context/tail pairs on a given queue (ring->execlist_queue) until the 2299 * hardware is available. The queue serves a double purpose: we also use 2300 * it to keep track of the up to 2 contexts currently in the hardware 2301 * (usually one in execution and the other queued up by the GPU): We 2302 * only remove elements from the head of the queue when the hardware 2303 * informs us that an element has been completed. 2304 * 2305 * All accesses to the queue are mediated by a spinlock 2306 * (ring->execlist_lock). 2307 */ 2308 2309 /** Execlist link in the submission queue.*/ 2310 struct list_head execlist_link; 2311 2312 /** Execlists no. of times this request has been sent to the ELSP */ 2313 int elsp_submitted; 2314 2315 }; 2316 2317 struct drm_i915_gem_request * __must_check 2318 i915_gem_request_alloc(struct intel_engine_cs *engine, 2319 struct intel_context *ctx); 2320 void i915_gem_request_cancel(struct drm_i915_gem_request *req); 2321 void i915_gem_request_free(struct kref *req_ref); 2322 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2323 struct drm_file *file); 2324 2325 static inline uint32_t 2326 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2327 { 2328 return req ? req->seqno : 0; 2329 } 2330 2331 static inline struct intel_engine_cs * 2332 i915_gem_request_get_engine(struct drm_i915_gem_request *req) 2333 { 2334 return req ? req->engine : NULL; 2335 } 2336 2337 static inline struct drm_i915_gem_request * 2338 i915_gem_request_reference(struct drm_i915_gem_request *req) 2339 { 2340 if (req) 2341 kref_get(&req->ref); 2342 return req; 2343 } 2344 2345 static inline void 2346 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2347 { 2348 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); 2349 kref_put(&req->ref, i915_gem_request_free); 2350 } 2351 2352 static inline void 2353 i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) 2354 { 2355 struct drm_device *dev; 2356 2357 if (!req) 2358 return; 2359 2360 dev = req->engine->dev; 2361 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) 2362 mutex_unlock(&dev->struct_mutex); 2363 } 2364 2365 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2366 struct drm_i915_gem_request *src) 2367 { 2368 if (src) 2369 i915_gem_request_reference(src); 2370 2371 if (*pdst) 2372 i915_gem_request_unreference(*pdst); 2373 2374 *pdst = src; 2375 } 2376 2377 /* 2378 * XXX: i915_gem_request_completed should be here but currently needs the 2379 * definition of i915_seqno_passed() which is below. It will be moved in 2380 * a later patch when the call to i915_seqno_passed() is obsoleted... 2381 */ 2382 2383 /* 2384 * A command that requires special handling by the command parser. 2385 */ 2386 struct drm_i915_cmd_descriptor { 2387 /* 2388 * Flags describing how the command parser processes the command. 2389 * 2390 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2391 * a length mask if not set 2392 * CMD_DESC_SKIP: The command is allowed but does not follow the 2393 * standard length encoding for the opcode range in 2394 * which it falls 2395 * CMD_DESC_REJECT: The command is never allowed 2396 * CMD_DESC_REGISTER: The command should be checked against the 2397 * register whitelist for the appropriate ring 2398 * CMD_DESC_MASTER: The command is allowed if the submitting process 2399 * is the DRM master 2400 */ 2401 u32 flags; 2402 #define CMD_DESC_FIXED (1<<0) 2403 #define CMD_DESC_SKIP (1<<1) 2404 #define CMD_DESC_REJECT (1<<2) 2405 #define CMD_DESC_REGISTER (1<<3) 2406 #define CMD_DESC_BITMASK (1<<4) 2407 #define CMD_DESC_MASTER (1<<5) 2408 2409 /* 2410 * The command's unique identification bits and the bitmask to get them. 2411 * This isn't strictly the opcode field as defined in the spec and may 2412 * also include type, subtype, and/or subop fields. 2413 */ 2414 struct { 2415 u32 value; 2416 u32 mask; 2417 } cmd; 2418 2419 /* 2420 * The command's length. The command is either fixed length (i.e. does 2421 * not include a length field) or has a length field mask. The flag 2422 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2423 * a length mask. All command entries in a command table must include 2424 * length information. 2425 */ 2426 union { 2427 u32 fixed; 2428 u32 mask; 2429 } length; 2430 2431 /* 2432 * Describes where to find a register address in the command to check 2433 * against the ring's register whitelist. Only valid if flags has the 2434 * CMD_DESC_REGISTER bit set. 2435 * 2436 * A non-zero step value implies that the command may access multiple 2437 * registers in sequence (e.g. LRI), in that case step gives the 2438 * distance in dwords between individual offset fields. 2439 */ 2440 struct { 2441 u32 offset; 2442 u32 mask; 2443 u32 step; 2444 } reg; 2445 2446 #define MAX_CMD_DESC_BITMASKS 3 2447 /* 2448 * Describes command checks where a particular dword is masked and 2449 * compared against an expected value. If the command does not match 2450 * the expected value, the parser rejects it. Only valid if flags has 2451 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2452 * are valid. 2453 * 2454 * If the check specifies a non-zero condition_mask then the parser 2455 * only performs the check when the bits specified by condition_mask 2456 * are non-zero. 2457 */ 2458 struct { 2459 u32 offset; 2460 u32 mask; 2461 u32 expected; 2462 u32 condition_offset; 2463 u32 condition_mask; 2464 } bits[MAX_CMD_DESC_BITMASKS]; 2465 }; 2466 2467 /* 2468 * A table of commands requiring special handling by the command parser. 2469 * 2470 * Each ring has an array of tables. Each table consists of an array of command 2471 * descriptors, which must be sorted with command opcodes in ascending order. 2472 */ 2473 struct drm_i915_cmd_table { 2474 const struct drm_i915_cmd_descriptor *table; 2475 int count; 2476 }; 2477 2478 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2479 #define __I915__(p) ({ \ 2480 struct drm_i915_private *__p; \ 2481 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2482 __p = (struct drm_i915_private *)p; \ 2483 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2484 __p = to_i915((struct drm_device *)p); \ 2485 else \ 2486 BUILD_BUG(); \ 2487 __p; \ 2488 }) 2489 #define INTEL_INFO(p) (&__I915__(p)->info) 2490 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2491 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2492 2493 #define REVID_FOREVER 0xff 2494 /* 2495 * Return true if revision is in range [since,until] inclusive. 2496 * 2497 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2498 */ 2499 #define IS_REVID(p, since, until) \ 2500 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2501 2502 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2503 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2504 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2505 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2506 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2507 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2508 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2509 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2510 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2511 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2512 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2513 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2514 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2515 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2516 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2517 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2518 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2519 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2520 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2521 INTEL_DEVID(dev) == 0x0152 || \ 2522 INTEL_DEVID(dev) == 0x015a) 2523 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2524 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2525 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2526 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2527 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2528 #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2529 #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2530 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2531 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2532 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2533 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2534 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2535 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2536 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2537 /* ULX machines are also considered ULT. */ 2538 #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ 2539 (INTEL_DEVID(dev) & 0xf) == 0xe) 2540 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2541 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2542 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2543 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2544 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2545 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2546 /* ULX machines are also considered ULT. */ 2547 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2548 INTEL_DEVID(dev) == 0x0A1E) 2549 #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ 2550 INTEL_DEVID(dev) == 0x1913 || \ 2551 INTEL_DEVID(dev) == 0x1916 || \ 2552 INTEL_DEVID(dev) == 0x1921 || \ 2553 INTEL_DEVID(dev) == 0x1926) 2554 #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ 2555 INTEL_DEVID(dev) == 0x1915 || \ 2556 INTEL_DEVID(dev) == 0x191E) 2557 #define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \ 2558 INTEL_DEVID(dev) == 0x5913 || \ 2559 INTEL_DEVID(dev) == 0x5916 || \ 2560 INTEL_DEVID(dev) == 0x5921 || \ 2561 INTEL_DEVID(dev) == 0x5926) 2562 #define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \ 2563 INTEL_DEVID(dev) == 0x5915 || \ 2564 INTEL_DEVID(dev) == 0x591E) 2565 #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ 2566 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2567 #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ 2568 (INTEL_DEVID(dev) & 0x00F0) == 0x0030) 2569 2570 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2571 2572 #define SKL_REVID_A0 0x0 2573 #define SKL_REVID_B0 0x1 2574 #define SKL_REVID_C0 0x2 2575 #define SKL_REVID_D0 0x3 2576 #define SKL_REVID_E0 0x4 2577 #define SKL_REVID_F0 0x5 2578 2579 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2580 2581 #define BXT_REVID_A0 0x0 2582 #define BXT_REVID_A1 0x1 2583 #define BXT_REVID_B0 0x3 2584 #define BXT_REVID_C0 0x9 2585 2586 #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) 2587 2588 /* 2589 * The genX designation typically refers to the render engine, so render 2590 * capability related checks should use IS_GEN, while display and other checks 2591 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2592 * chips, etc.). 2593 */ 2594 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2595 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2596 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2597 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2598 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2599 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2600 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2601 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2602 2603 #define RENDER_RING (1<<RCS) 2604 #define BSD_RING (1<<VCS) 2605 #define BLT_RING (1<<BCS) 2606 #define VEBOX_RING (1<<VECS) 2607 #define BSD2_RING (1<<VCS2) 2608 #define ALL_ENGINES (~0) 2609 2610 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2611 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2612 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2613 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2614 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2615 #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2616 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2617 __I915__(dev)->ellc_size) 2618 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2619 2620 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2621 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2622 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2623 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) 2624 #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) 2625 2626 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2627 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2628 2629 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2630 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2631 2632 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2633 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2634 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ 2635 IS_SKL_REVID(dev, 0, SKL_REVID_F0))) 2636 /* 2637 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2638 * even when in MSI mode. This results in spurious interrupt warnings if the 2639 * legacy irq no. is shared with another device. The kernel then disables that 2640 * interrupt source and so prevents the other device from working properly. 2641 */ 2642 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2643 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2644 2645 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2646 * rows, which changed the alignment requirements and fence programming. 2647 */ 2648 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2649 IS_I915GM(dev))) 2650 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2651 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2652 2653 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2654 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2655 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2656 2657 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2658 2659 #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2660 INTEL_INFO(dev)->gen >= 9) 2661 2662 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2663 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2664 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2665 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2666 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 2667 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2668 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2669 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2670 IS_KABYLAKE(dev)) 2671 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2672 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2673 2674 #define HAS_CSR(dev) (IS_GEN9(dev)) 2675 2676 #define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2677 #define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2678 2679 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2680 INTEL_INFO(dev)->gen >= 8) 2681 2682 #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ 2683 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ 2684 !IS_BROXTON(dev)) 2685 2686 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2687 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2688 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2689 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2690 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2691 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2692 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2693 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2694 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2695 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2696 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2697 2698 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2699 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2700 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2701 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2702 #define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2703 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2704 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2705 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2706 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2707 2708 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \ 2709 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 2710 2711 /* DPF == dynamic parity feature */ 2712 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2713 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2714 2715 #define GT_FREQUENCY_MULTIPLIER 50 2716 #define GEN9_FREQ_SCALER 3 2717 2718 #include "i915_trace.h" 2719 2720 extern const struct drm_ioctl_desc i915_ioctls[]; 2721 extern int i915_max_ioctl; 2722 2723 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2724 extern int i915_resume_switcheroo(struct drm_device *dev); 2725 2726 /* i915_dma.c */ 2727 void __printf(3, 4) 2728 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2729 const char *fmt, ...); 2730 2731 #define i915_report_error(dev_priv, fmt, ...) \ 2732 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2733 2734 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2735 extern int i915_driver_unload(struct drm_device *); 2736 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2737 extern void i915_driver_lastclose(struct drm_device * dev); 2738 extern void i915_driver_preclose(struct drm_device *dev, 2739 struct drm_file *file); 2740 extern void i915_driver_postclose(struct drm_device *dev, 2741 struct drm_file *file); 2742 #ifdef CONFIG_COMPAT 2743 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2744 unsigned long arg); 2745 #endif 2746 extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2747 extern bool intel_has_gpu_reset(struct drm_device *dev); 2748 extern int i915_reset(struct drm_device *dev); 2749 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2750 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2751 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2752 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2753 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2754 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2755 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2756 2757 /* intel_hotplug.c */ 2758 void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2759 void intel_hpd_init(struct drm_i915_private *dev_priv); 2760 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2761 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2762 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2763 2764 /* i915_irq.c */ 2765 void i915_queue_hangcheck(struct drm_device *dev); 2766 __printf(3, 4) 2767 void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2768 const char *fmt, ...); 2769 2770 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2771 int intel_irq_install(struct drm_i915_private *dev_priv); 2772 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2773 2774 extern void intel_uncore_sanitize(struct drm_device *dev); 2775 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2776 bool restore_forcewake); 2777 extern void intel_uncore_init(struct drm_device *dev); 2778 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2779 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2780 extern void intel_uncore_fini(struct drm_device *dev); 2781 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2782 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2783 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2784 enum forcewake_domains domains); 2785 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2786 enum forcewake_domains domains); 2787 /* Like above but the caller must manage the uncore.lock itself. 2788 * Must be used with I915_READ_FW and friends. 2789 */ 2790 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2791 enum forcewake_domains domains); 2792 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2793 enum forcewake_domains domains); 2794 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2795 static inline bool intel_vgpu_active(struct drm_device *dev) 2796 { 2797 return to_i915(dev)->vgpu.active; 2798 } 2799 2800 void 2801 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2802 u32 status_mask); 2803 2804 void 2805 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2806 u32 status_mask); 2807 2808 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2809 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2810 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2811 uint32_t mask, 2812 uint32_t bits); 2813 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 2814 uint32_t interrupt_mask, 2815 uint32_t enabled_irq_mask); 2816 static inline void 2817 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2818 { 2819 ilk_update_display_irq(dev_priv, bits, bits); 2820 } 2821 static inline void 2822 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2823 { 2824 ilk_update_display_irq(dev_priv, bits, 0); 2825 } 2826 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 2827 enum pipe pipe, 2828 uint32_t interrupt_mask, 2829 uint32_t enabled_irq_mask); 2830 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 2831 enum pipe pipe, uint32_t bits) 2832 { 2833 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 2834 } 2835 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 2836 enum pipe pipe, uint32_t bits) 2837 { 2838 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 2839 } 2840 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2841 uint32_t interrupt_mask, 2842 uint32_t enabled_irq_mask); 2843 static inline void 2844 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2845 { 2846 ibx_display_interrupt_update(dev_priv, bits, bits); 2847 } 2848 static inline void 2849 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2850 { 2851 ibx_display_interrupt_update(dev_priv, bits, 0); 2852 } 2853 2854 2855 /* i915_gem.c */ 2856 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2857 struct drm_file *file_priv); 2858 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2859 struct drm_file *file_priv); 2860 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2861 struct drm_file *file_priv); 2862 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2863 struct drm_file *file_priv); 2864 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2865 struct drm_file *file_priv); 2866 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2867 struct drm_file *file_priv); 2868 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2869 struct drm_file *file_priv); 2870 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2871 struct drm_i915_gem_request *req); 2872 void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); 2873 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, 2874 struct drm_i915_gem_execbuffer2 *args, 2875 struct list_head *vmas); 2876 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2877 struct drm_file *file_priv); 2878 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2879 struct drm_file *file_priv); 2880 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2881 struct drm_file *file_priv); 2882 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2883 struct drm_file *file); 2884 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2885 struct drm_file *file); 2886 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2887 struct drm_file *file_priv); 2888 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2889 struct drm_file *file_priv); 2890 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2891 struct drm_file *file_priv); 2892 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2893 struct drm_file *file_priv); 2894 int i915_gem_init_userptr(struct drm_device *dev); 2895 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2896 struct drm_file *file); 2897 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2898 struct drm_file *file_priv); 2899 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2900 struct drm_file *file_priv); 2901 void i915_gem_load_init(struct drm_device *dev); 2902 void i915_gem_load_cleanup(struct drm_device *dev); 2903 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 2904 void *i915_gem_object_alloc(struct drm_device *dev); 2905 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2906 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2907 const struct drm_i915_gem_object_ops *ops); 2908 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2909 size_t size); 2910 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2911 struct drm_device *dev, const void *data, size_t size); 2912 void i915_gem_free_object(struct drm_gem_object *obj); 2913 void i915_gem_vma_destroy(struct i915_vma *vma); 2914 2915 /* Flags used by pin/bind&friends. */ 2916 #define PIN_MAPPABLE (1<<0) 2917 #define PIN_NONBLOCK (1<<1) 2918 #define PIN_GLOBAL (1<<2) 2919 #define PIN_OFFSET_BIAS (1<<3) 2920 #define PIN_USER (1<<4) 2921 #define PIN_UPDATE (1<<5) 2922 #define PIN_ZONE_4G (1<<6) 2923 #define PIN_HIGH (1<<7) 2924 #define PIN_OFFSET_FIXED (1<<8) 2925 #define PIN_OFFSET_MASK (~4095) 2926 int __must_check 2927 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2928 struct i915_address_space *vm, 2929 uint32_t alignment, 2930 uint64_t flags); 2931 int __must_check 2932 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2933 const struct i915_ggtt_view *view, 2934 uint32_t alignment, 2935 uint64_t flags); 2936 2937 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2938 u32 flags); 2939 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 2940 int __must_check i915_vma_unbind(struct i915_vma *vma); 2941 /* 2942 * BEWARE: Do not use the function below unless you can _absolutely_ 2943 * _guarantee_ VMA in question is _not in use_ anywhere. 2944 */ 2945 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); 2946 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2947 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2948 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2949 2950 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2951 int *needs_clflush); 2952 2953 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2954 2955 static inline int __sg_page_count(struct scatterlist *sg) 2956 { 2957 return sg->length >> PAGE_SHIFT; 2958 } 2959 2960 struct page * 2961 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); 2962 2963 static inline struct page * 2964 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2965 { 2966 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) 2967 return NULL; 2968 2969 if (n < obj->get_page.last) { 2970 obj->get_page.sg = obj->pages->sgl; 2971 obj->get_page.last = 0; 2972 } 2973 2974 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 2975 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 2976 if (unlikely(sg_is_chain(obj->get_page.sg))) 2977 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 2978 } 2979 2980 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); 2981 } 2982 2983 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2984 { 2985 BUG_ON(obj->pages == NULL); 2986 obj->pages_pin_count++; 2987 } 2988 2989 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2990 { 2991 BUG_ON(obj->pages_pin_count == 0); 2992 obj->pages_pin_count--; 2993 } 2994 2995 /** 2996 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 2997 * @obj - the object to map into kernel address space 2998 * 2999 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3000 * pages and then returns a contiguous mapping of the backing storage into 3001 * the kernel address space. 3002 * 3003 * The caller must hold the struct_mutex. 3004 * 3005 * Returns the pointer through which to access the backing storage. 3006 */ 3007 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); 3008 3009 /** 3010 * i915_gem_object_unpin_map - releases an earlier mapping 3011 * @obj - the object to unmap 3012 * 3013 * After pinning the object and mapping its pages, once you are finished 3014 * with your access, call i915_gem_object_unpin_map() to release the pin 3015 * upon the mapping. Once the pin count reaches zero, that mapping may be 3016 * removed. 3017 * 3018 * The caller must hold the struct_mutex. 3019 */ 3020 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3021 { 3022 lockdep_assert_held(&obj->base.dev->struct_mutex); 3023 i915_gem_object_unpin_pages(obj); 3024 } 3025 3026 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3027 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 3028 struct intel_engine_cs *to, 3029 struct drm_i915_gem_request **to_req); 3030 void i915_vma_move_to_active(struct i915_vma *vma, 3031 struct drm_i915_gem_request *req); 3032 int i915_gem_dumb_create(struct drm_file *file_priv, 3033 struct drm_device *dev, 3034 struct drm_mode_create_dumb *args); 3035 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3036 uint32_t handle, uint64_t *offset); 3037 /** 3038 * Returns true if seq1 is later than seq2. 3039 */ 3040 static inline bool 3041 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 3042 { 3043 return (int32_t)(seq1 - seq2) >= 0; 3044 } 3045 3046 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 3047 bool lazy_coherency) 3048 { 3049 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3050 req->engine->irq_seqno_barrier(req->engine); 3051 return i915_seqno_passed(req->engine->get_seqno(req->engine), 3052 req->previous_seqno); 3053 } 3054 3055 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 3056 bool lazy_coherency) 3057 { 3058 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3059 req->engine->irq_seqno_barrier(req->engine); 3060 return i915_seqno_passed(req->engine->get_seqno(req->engine), 3061 req->seqno); 3062 } 3063 3064 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3065 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3066 3067 struct drm_i915_gem_request * 3068 i915_gem_find_active_request(struct intel_engine_cs *engine); 3069 3070 bool i915_gem_retire_requests(struct drm_device *dev); 3071 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3072 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 3073 bool interruptible); 3074 3075 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3076 { 3077 return unlikely(atomic_read(&error->reset_counter) 3078 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 3079 } 3080 3081 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3082 { 3083 return atomic_read(&error->reset_counter) & I915_WEDGED; 3084 } 3085 3086 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3087 { 3088 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 3089 } 3090 3091 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 3092 { 3093 return dev_priv->gpu_error.stop_rings == 0 || 3094 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 3095 } 3096 3097 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 3098 { 3099 return dev_priv->gpu_error.stop_rings == 0 || 3100 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 3101 } 3102 3103 void i915_gem_reset(struct drm_device *dev); 3104 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3105 int __must_check i915_gem_init(struct drm_device *dev); 3106 int i915_gem_init_engines(struct drm_device *dev); 3107 int __must_check i915_gem_init_hw(struct drm_device *dev); 3108 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); 3109 void i915_gem_init_swizzling(struct drm_device *dev); 3110 void i915_gem_cleanup_engines(struct drm_device *dev); 3111 int __must_check i915_gpu_idle(struct drm_device *dev); 3112 int __must_check i915_gem_suspend(struct drm_device *dev); 3113 void __i915_add_request(struct drm_i915_gem_request *req, 3114 struct drm_i915_gem_object *batch_obj, 3115 bool flush_caches); 3116 #define i915_add_request(req) \ 3117 __i915_add_request(req, NULL, true) 3118 #define i915_add_request_no_flush(req) \ 3119 __i915_add_request(req, NULL, false) 3120 int __i915_wait_request(struct drm_i915_gem_request *req, 3121 unsigned reset_counter, 3122 bool interruptible, 3123 s64 *timeout, 3124 struct intel_rps_client *rps); 3125 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 3126 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 3127 int __must_check 3128 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 3129 bool readonly); 3130 int __must_check 3131 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3132 bool write); 3133 int __must_check 3134 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3135 int __must_check 3136 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3137 u32 alignment, 3138 const struct i915_ggtt_view *view); 3139 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3140 const struct i915_ggtt_view *view); 3141 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3142 int align); 3143 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3144 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3145 3146 uint32_t 3147 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 3148 uint32_t 3149 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 3150 int tiling_mode, bool fenced); 3151 3152 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3153 enum i915_cache_level cache_level); 3154 3155 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3156 struct dma_buf *dma_buf); 3157 3158 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3159 struct drm_gem_object *gem_obj, int flags); 3160 3161 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 3162 const struct i915_ggtt_view *view); 3163 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, 3164 struct i915_address_space *vm); 3165 static inline u64 3166 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 3167 { 3168 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 3169 } 3170 3171 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 3172 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 3173 const struct i915_ggtt_view *view); 3174 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3175 struct i915_address_space *vm); 3176 3177 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 3178 struct i915_address_space *vm); 3179 struct i915_vma * 3180 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3181 struct i915_address_space *vm); 3182 struct i915_vma * 3183 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 3184 const struct i915_ggtt_view *view); 3185 3186 struct i915_vma * 3187 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3188 struct i915_address_space *vm); 3189 struct i915_vma * 3190 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3191 const struct i915_ggtt_view *view); 3192 3193 static inline struct i915_vma * 3194 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 3195 { 3196 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 3197 } 3198 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 3199 3200 /* Some GGTT VM helpers */ 3201 static inline struct i915_hw_ppgtt * 3202 i915_vm_to_ppgtt(struct i915_address_space *vm) 3203 { 3204 return container_of(vm, struct i915_hw_ppgtt, base); 3205 } 3206 3207 3208 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 3209 { 3210 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3211 } 3212 3213 static inline unsigned long 3214 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3215 { 3216 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3217 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3218 3219 return i915_gem_obj_size(obj, &ggtt->base); 3220 } 3221 3222 static inline int __must_check 3223 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3224 uint32_t alignment, 3225 unsigned flags) 3226 { 3227 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3228 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3229 3230 return i915_gem_object_pin(obj, &ggtt->base, 3231 alignment, flags | PIN_GLOBAL); 3232 } 3233 3234 static inline int 3235 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 3236 { 3237 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 3238 } 3239 3240 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3241 const struct i915_ggtt_view *view); 3242 static inline void 3243 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3244 { 3245 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 3246 } 3247 3248 /* i915_gem_fence.c */ 3249 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 3250 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 3251 3252 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 3253 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 3254 3255 void i915_gem_restore_fences(struct drm_device *dev); 3256 3257 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3258 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3259 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3260 3261 /* i915_gem_context.c */ 3262 int __must_check i915_gem_context_init(struct drm_device *dev); 3263 void i915_gem_context_fini(struct drm_device *dev); 3264 void i915_gem_context_reset(struct drm_device *dev); 3265 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3266 int i915_gem_context_enable(struct drm_i915_gem_request *req); 3267 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3268 int i915_switch_context(struct drm_i915_gem_request *req); 3269 struct intel_context * 3270 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3271 void i915_gem_context_free(struct kref *ctx_ref); 3272 struct drm_i915_gem_object * 3273 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3274 static inline void i915_gem_context_reference(struct intel_context *ctx) 3275 { 3276 kref_get(&ctx->ref); 3277 } 3278 3279 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3280 { 3281 kref_put(&ctx->ref, i915_gem_context_free); 3282 } 3283 3284 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3285 { 3286 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3287 } 3288 3289 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3290 struct drm_file *file); 3291 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3292 struct drm_file *file); 3293 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3294 struct drm_file *file_priv); 3295 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3296 struct drm_file *file_priv); 3297 3298 /* i915_gem_evict.c */ 3299 int __must_check i915_gem_evict_something(struct drm_device *dev, 3300 struct i915_address_space *vm, 3301 int min_size, 3302 unsigned alignment, 3303 unsigned cache_level, 3304 unsigned long start, 3305 unsigned long end, 3306 unsigned flags); 3307 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3308 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3309 3310 /* belongs in i915_gem_gtt.h */ 3311 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3312 { 3313 if (INTEL_INFO(dev)->gen < 6) 3314 intel_gtt_chipset_flush(); 3315 } 3316 3317 /* i915_gem_stolen.c */ 3318 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3319 struct drm_mm_node *node, u64 size, 3320 unsigned alignment); 3321 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3322 struct drm_mm_node *node, u64 size, 3323 unsigned alignment, u64 start, 3324 u64 end); 3325 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3326 struct drm_mm_node *node); 3327 int i915_gem_init_stolen(struct drm_device *dev); 3328 void i915_gem_cleanup_stolen(struct drm_device *dev); 3329 struct drm_i915_gem_object * 3330 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3331 struct drm_i915_gem_object * 3332 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3333 u32 stolen_offset, 3334 u32 gtt_offset, 3335 u32 size); 3336 3337 /* i915_gem_shrinker.c */ 3338 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3339 unsigned long target, 3340 unsigned flags); 3341 #define I915_SHRINK_PURGEABLE 0x1 3342 #define I915_SHRINK_UNBOUND 0x2 3343 #define I915_SHRINK_BOUND 0x4 3344 #define I915_SHRINK_ACTIVE 0x8 3345 #define I915_SHRINK_VMAPS 0x10 3346 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3347 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3348 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3349 3350 3351 /* i915_gem_tiling.c */ 3352 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3353 { 3354 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3355 3356 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3357 obj->tiling_mode != I915_TILING_NONE; 3358 } 3359 3360 /* i915_gem_debug.c */ 3361 #if WATCH_LISTS 3362 int i915_verify_lists(struct drm_device *dev); 3363 #else 3364 #define i915_verify_lists(dev) 0 3365 #endif 3366 3367 /* i915_debugfs.c */ 3368 int i915_debugfs_init(struct drm_minor *minor); 3369 void i915_debugfs_cleanup(struct drm_minor *minor); 3370 #ifdef CONFIG_DEBUG_FS 3371 int i915_debugfs_connector_add(struct drm_connector *connector); 3372 void intel_display_crc_init(struct drm_device *dev); 3373 #else 3374 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3375 { return 0; } 3376 static inline void intel_display_crc_init(struct drm_device *dev) {} 3377 #endif 3378 3379 /* i915_gpu_error.c */ 3380 __printf(2, 3) 3381 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3382 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3383 const struct i915_error_state_file_priv *error); 3384 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3385 struct drm_i915_private *i915, 3386 size_t count, loff_t pos); 3387 static inline void i915_error_state_buf_release( 3388 struct drm_i915_error_state_buf *eb) 3389 { 3390 kfree(eb->buf); 3391 } 3392 void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3393 const char *error_msg); 3394 void i915_error_state_get(struct drm_device *dev, 3395 struct i915_error_state_file_priv *error_priv); 3396 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3397 void i915_destroy_error_state(struct drm_device *dev); 3398 3399 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3400 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3401 3402 /* i915_cmd_parser.c */ 3403 int i915_cmd_parser_get_version(void); 3404 int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3405 void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3406 bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3407 int i915_parse_cmds(struct intel_engine_cs *engine, 3408 struct drm_i915_gem_object *batch_obj, 3409 struct drm_i915_gem_object *shadow_batch_obj, 3410 u32 batch_start_offset, 3411 u32 batch_len, 3412 bool is_master); 3413 3414 /* i915_suspend.c */ 3415 extern int i915_save_state(struct drm_device *dev); 3416 extern int i915_restore_state(struct drm_device *dev); 3417 3418 /* i915_sysfs.c */ 3419 void i915_setup_sysfs(struct drm_device *dev_priv); 3420 void i915_teardown_sysfs(struct drm_device *dev_priv); 3421 3422 /* intel_i2c.c */ 3423 extern int intel_setup_gmbus(struct drm_device *dev); 3424 extern void intel_teardown_gmbus(struct drm_device *dev); 3425 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3426 unsigned int pin); 3427 3428 extern struct i2c_adapter * 3429 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3430 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3431 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3432 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3433 { 3434 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3435 } 3436 extern void intel_i2c_reset(struct drm_device *dev); 3437 3438 /* intel_bios.c */ 3439 int intel_bios_init(struct drm_i915_private *dev_priv); 3440 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3441 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3442 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3443 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3444 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3445 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3446 enum port port); 3447 3448 /* intel_opregion.c */ 3449 #ifdef CONFIG_ACPI 3450 extern int intel_opregion_setup(struct drm_device *dev); 3451 extern void intel_opregion_init(struct drm_device *dev); 3452 extern void intel_opregion_fini(struct drm_device *dev); 3453 extern void intel_opregion_asle_intr(struct drm_device *dev); 3454 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3455 bool enable); 3456 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3457 pci_power_t state); 3458 #else 3459 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3460 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3461 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3462 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3463 static inline int 3464 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3465 { 3466 return 0; 3467 } 3468 static inline int 3469 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3470 { 3471 return 0; 3472 } 3473 #endif 3474 3475 /* intel_acpi.c */ 3476 #ifdef CONFIG_ACPI 3477 extern void intel_register_dsm_handler(void); 3478 extern void intel_unregister_dsm_handler(void); 3479 #else 3480 static inline void intel_register_dsm_handler(void) { return; } 3481 static inline void intel_unregister_dsm_handler(void) { return; } 3482 #endif /* CONFIG_ACPI */ 3483 3484 /* modesetting */ 3485 extern void intel_modeset_init_hw(struct drm_device *dev); 3486 extern void intel_modeset_init(struct drm_device *dev); 3487 extern void intel_modeset_gem_init(struct drm_device *dev); 3488 extern void intel_modeset_cleanup(struct drm_device *dev); 3489 extern void intel_connector_unregister(struct intel_connector *); 3490 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3491 extern void intel_display_resume(struct drm_device *dev); 3492 extern void i915_redisable_vga(struct drm_device *dev); 3493 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3494 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3495 extern void intel_init_pch_refclk(struct drm_device *dev); 3496 extern void intel_set_rps(struct drm_device *dev, u8 val); 3497 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3498 bool enable); 3499 extern void intel_detect_pch(struct drm_device *dev); 3500 extern int intel_enable_rc6(const struct drm_device *dev); 3501 3502 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3503 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3504 struct drm_file *file); 3505 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3506 struct drm_file *file); 3507 3508 /* overlay */ 3509 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3510 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3511 struct intel_overlay_error_state *error); 3512 3513 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3514 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3515 struct drm_device *dev, 3516 struct intel_display_error_state *error); 3517 3518 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3519 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3520 3521 /* intel_sideband.c */ 3522 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3523 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3524 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3525 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3526 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3527 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3528 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3529 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3530 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3531 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3532 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3533 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3534 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3535 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3536 enum intel_sbi_destination destination); 3537 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3538 enum intel_sbi_destination destination); 3539 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3540 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3541 3542 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3543 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3544 3545 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3546 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3547 3548 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3549 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3550 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3551 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3552 3553 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3554 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3555 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3556 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3557 3558 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3559 * will be implemented using 2 32-bit writes in an arbitrary order with 3560 * an arbitrary delay between them. This can cause the hardware to 3561 * act upon the intermediate value, possibly leading to corruption and 3562 * machine death. You have been warned. 3563 */ 3564 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3565 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3566 3567 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3568 u32 upper, lower, old_upper, loop = 0; \ 3569 upper = I915_READ(upper_reg); \ 3570 do { \ 3571 old_upper = upper; \ 3572 lower = I915_READ(lower_reg); \ 3573 upper = I915_READ(upper_reg); \ 3574 } while (upper != old_upper && loop++ < 2); \ 3575 (u64)upper << 32 | lower; }) 3576 3577 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3578 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3579 3580 #define __raw_read(x, s) \ 3581 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3582 i915_reg_t reg) \ 3583 { \ 3584 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3585 } 3586 3587 #define __raw_write(x, s) \ 3588 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3589 i915_reg_t reg, uint##x##_t val) \ 3590 { \ 3591 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3592 } 3593 __raw_read(8, b) 3594 __raw_read(16, w) 3595 __raw_read(32, l) 3596 __raw_read(64, q) 3597 3598 __raw_write(8, b) 3599 __raw_write(16, w) 3600 __raw_write(32, l) 3601 __raw_write(64, q) 3602 3603 #undef __raw_read 3604 #undef __raw_write 3605 3606 /* These are untraced mmio-accessors that are only valid to be used inside 3607 * criticial sections inside IRQ handlers where forcewake is explicitly 3608 * controlled. 3609 * Think twice, and think again, before using these. 3610 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3611 * intel_uncore_forcewake_irqunlock(). 3612 */ 3613 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3614 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3615 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3616 3617 /* "Broadcast RGB" property */ 3618 #define INTEL_BROADCAST_RGB_AUTO 0 3619 #define INTEL_BROADCAST_RGB_FULL 1 3620 #define INTEL_BROADCAST_RGB_LIMITED 2 3621 3622 static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) 3623 { 3624 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 3625 return VLV_VGACNTRL; 3626 else if (INTEL_INFO(dev)->gen >= 5) 3627 return CPU_VGACNTRL; 3628 else 3629 return VGACNTRL; 3630 } 3631 3632 static inline void __user *to_user_ptr(u64 address) 3633 { 3634 return (void __user *)(uintptr_t)address; 3635 } 3636 3637 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3638 { 3639 unsigned long j = msecs_to_jiffies(m); 3640 3641 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3642 } 3643 3644 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3645 { 3646 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3647 } 3648 3649 static inline unsigned long 3650 timespec_to_jiffies_timeout(const struct timespec *value) 3651 { 3652 unsigned long j = timespec_to_jiffies(value); 3653 3654 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3655 } 3656 3657 /* 3658 * If you need to wait X milliseconds between events A and B, but event B 3659 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3660 * when event A happened, then just before event B you call this function and 3661 * pass the timestamp as the first argument, and X as the second argument. 3662 */ 3663 static inline void 3664 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3665 { 3666 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3667 3668 /* 3669 * Don't re-read the value of "jiffies" every time since it may change 3670 * behind our back and break the math. 3671 */ 3672 tmp_jiffies = jiffies; 3673 target_jiffies = timestamp_jiffies + 3674 msecs_to_jiffies_timeout(to_wait_ms); 3675 3676 if (time_after(target_jiffies, tmp_jiffies)) { 3677 remaining_jiffies = target_jiffies - tmp_jiffies; 3678 while (remaining_jiffies) 3679 remaining_jiffies = 3680 schedule_timeout_uninterruptible(remaining_jiffies); 3681 } 3682 } 3683 3684 static inline void i915_trace_irq_get(struct intel_engine_cs *engine, 3685 struct drm_i915_gem_request *req) 3686 { 3687 if (engine->trace_irq_req == NULL && engine->irq_get(engine)) 3688 i915_gem_request_assign(&engine->trace_irq_req, req); 3689 } 3690 3691 #endif 3692