1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 35 #include "i915_reg.h" 36 #include "intel_bios.h" 37 #include "intel_ringbuffer.h" 38 #include "i915_gem_gtt.h" 39 #include <linux/io-mapping.h> 40 #include <linux/i2c.h> 41 #include <linux/i2c-algo-bit.h> 42 #include <drm/intel-gtt.h> 43 #include <linux/backlight.h> 44 #include <linux/hashtable.h> 45 #include <linux/intel-iommu.h> 46 #include <linux/kref.h> 47 #include <linux/pm_qos.h> 48 49 /* General customization: 50 */ 51 52 #define DRIVER_AUTHOR "Tungsten Graphics, Inc." 53 54 #define DRIVER_NAME "i915" 55 #define DRIVER_DESC "Intel Graphics" 56 #define DRIVER_DATE "20080730" 57 58 enum pipe { 59 INVALID_PIPE = -1, 60 PIPE_A = 0, 61 PIPE_B, 62 PIPE_C, 63 _PIPE_EDP, 64 I915_MAX_PIPES = _PIPE_EDP 65 }; 66 #define pipe_name(p) ((p) + 'A') 67 68 enum transcoder { 69 TRANSCODER_A = 0, 70 TRANSCODER_B, 71 TRANSCODER_C, 72 TRANSCODER_EDP, 73 I915_MAX_TRANSCODERS 74 }; 75 #define transcoder_name(t) ((t) + 'A') 76 77 enum plane { 78 PLANE_A = 0, 79 PLANE_B, 80 PLANE_C, 81 }; 82 #define plane_name(p) ((p) + 'A') 83 84 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 85 86 enum port { 87 PORT_A = 0, 88 PORT_B, 89 PORT_C, 90 PORT_D, 91 PORT_E, 92 I915_MAX_PORTS 93 }; 94 #define port_name(p) ((p) + 'A') 95 96 #define I915_NUM_PHYS_VLV 2 97 98 enum dpio_channel { 99 DPIO_CH0, 100 DPIO_CH1 101 }; 102 103 enum dpio_phy { 104 DPIO_PHY0, 105 DPIO_PHY1 106 }; 107 108 enum intel_display_power_domain { 109 POWER_DOMAIN_PIPE_A, 110 POWER_DOMAIN_PIPE_B, 111 POWER_DOMAIN_PIPE_C, 112 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 113 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 114 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 115 POWER_DOMAIN_TRANSCODER_A, 116 POWER_DOMAIN_TRANSCODER_B, 117 POWER_DOMAIN_TRANSCODER_C, 118 POWER_DOMAIN_TRANSCODER_EDP, 119 POWER_DOMAIN_PORT_DDI_A_2_LANES, 120 POWER_DOMAIN_PORT_DDI_A_4_LANES, 121 POWER_DOMAIN_PORT_DDI_B_2_LANES, 122 POWER_DOMAIN_PORT_DDI_B_4_LANES, 123 POWER_DOMAIN_PORT_DDI_C_2_LANES, 124 POWER_DOMAIN_PORT_DDI_C_4_LANES, 125 POWER_DOMAIN_PORT_DDI_D_2_LANES, 126 POWER_DOMAIN_PORT_DDI_D_4_LANES, 127 POWER_DOMAIN_PORT_DSI, 128 POWER_DOMAIN_PORT_CRT, 129 POWER_DOMAIN_PORT_OTHER, 130 POWER_DOMAIN_VGA, 131 POWER_DOMAIN_AUDIO, 132 POWER_DOMAIN_INIT, 133 134 POWER_DOMAIN_NUM, 135 }; 136 137 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 138 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 139 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 140 #define POWER_DOMAIN_TRANSCODER(tran) \ 141 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 142 (tran) + POWER_DOMAIN_TRANSCODER_A) 143 144 enum hpd_pin { 145 HPD_NONE = 0, 146 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 147 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 148 HPD_CRT, 149 HPD_SDVO_B, 150 HPD_SDVO_C, 151 HPD_PORT_B, 152 HPD_PORT_C, 153 HPD_PORT_D, 154 HPD_NUM_PINS 155 }; 156 157 #define I915_GEM_GPU_DOMAINS \ 158 (I915_GEM_DOMAIN_RENDER | \ 159 I915_GEM_DOMAIN_SAMPLER | \ 160 I915_GEM_DOMAIN_COMMAND | \ 161 I915_GEM_DOMAIN_INSTRUCTION | \ 162 I915_GEM_DOMAIN_VERTEX) 163 164 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 165 #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) 166 167 #define for_each_crtc(dev, crtc) \ 168 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 169 170 #define for_each_intel_crtc(dev, intel_crtc) \ 171 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 172 173 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 174 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 175 if ((intel_encoder)->base.crtc == (__crtc)) 176 177 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 178 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 179 if ((intel_connector)->base.encoder == (__encoder)) 180 181 struct drm_i915_private; 182 struct i915_mmu_object; 183 184 enum intel_dpll_id { 185 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 186 /* real shared dpll ids must be >= 0 */ 187 DPLL_ID_PCH_PLL_A, 188 DPLL_ID_PCH_PLL_B, 189 }; 190 #define I915_NUM_PLLS 2 191 192 struct intel_dpll_hw_state { 193 uint32_t dpll; 194 uint32_t dpll_md; 195 uint32_t fp0; 196 uint32_t fp1; 197 }; 198 199 struct intel_shared_dpll { 200 int refcount; /* count of number of CRTCs sharing this PLL */ 201 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 202 bool on; /* is the PLL actually active? Disabled during modeset */ 203 const char *name; 204 /* should match the index in the dev_priv->shared_dplls array */ 205 enum intel_dpll_id id; 206 struct intel_dpll_hw_state hw_state; 207 void (*mode_set)(struct drm_i915_private *dev_priv, 208 struct intel_shared_dpll *pll); 209 void (*enable)(struct drm_i915_private *dev_priv, 210 struct intel_shared_dpll *pll); 211 void (*disable)(struct drm_i915_private *dev_priv, 212 struct intel_shared_dpll *pll); 213 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 214 struct intel_shared_dpll *pll, 215 struct intel_dpll_hw_state *hw_state); 216 }; 217 218 /* Used by dp and fdi links */ 219 struct intel_link_m_n { 220 uint32_t tu; 221 uint32_t gmch_m; 222 uint32_t gmch_n; 223 uint32_t link_m; 224 uint32_t link_n; 225 }; 226 227 void intel_link_compute_m_n(int bpp, int nlanes, 228 int pixel_clock, int link_clock, 229 struct intel_link_m_n *m_n); 230 231 struct intel_ddi_plls { 232 int spll_refcount; 233 int wrpll1_refcount; 234 int wrpll2_refcount; 235 }; 236 237 /* Interface history: 238 * 239 * 1.1: Original. 240 * 1.2: Add Power Management 241 * 1.3: Add vblank support 242 * 1.4: Fix cmdbuffer path, add heap destroy 243 * 1.5: Add vblank pipe configuration 244 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 245 * - Support vertical blank on secondary display pipe 246 */ 247 #define DRIVER_MAJOR 1 248 #define DRIVER_MINOR 6 249 #define DRIVER_PATCHLEVEL 0 250 251 #define WATCH_LISTS 0 252 #define WATCH_GTT 0 253 254 struct opregion_header; 255 struct opregion_acpi; 256 struct opregion_swsci; 257 struct opregion_asle; 258 259 struct intel_opregion { 260 struct opregion_header __iomem *header; 261 struct opregion_acpi __iomem *acpi; 262 struct opregion_swsci __iomem *swsci; 263 u32 swsci_gbda_sub_functions; 264 u32 swsci_sbcb_sub_functions; 265 struct opregion_asle __iomem *asle; 266 void __iomem *vbt; 267 u32 __iomem *lid_state; 268 struct work_struct asle_work; 269 }; 270 #define OPREGION_SIZE (8*1024) 271 272 struct intel_overlay; 273 struct intel_overlay_error_state; 274 275 struct drm_i915_master_private { 276 drm_local_map_t *sarea; 277 struct _drm_i915_sarea *sarea_priv; 278 }; 279 #define I915_FENCE_REG_NONE -1 280 #define I915_MAX_NUM_FENCES 32 281 /* 32 fences + sign bit for FENCE_REG_NONE */ 282 #define I915_MAX_NUM_FENCE_BITS 6 283 284 struct drm_i915_fence_reg { 285 struct list_head lru_list; 286 struct drm_i915_gem_object *obj; 287 int pin_count; 288 }; 289 290 struct sdvo_device_mapping { 291 u8 initialized; 292 u8 dvo_port; 293 u8 slave_addr; 294 u8 dvo_wiring; 295 u8 i2c_pin; 296 u8 ddc_pin; 297 }; 298 299 struct intel_display_error_state; 300 301 struct drm_i915_error_state { 302 struct kref ref; 303 struct timeval time; 304 305 char error_msg[128]; 306 u32 reset_count; 307 u32 suspend_count; 308 309 /* Generic register state */ 310 u32 eir; 311 u32 pgtbl_er; 312 u32 ier; 313 u32 ccid; 314 u32 derrmr; 315 u32 forcewake; 316 u32 error; /* gen6+ */ 317 u32 err_int; /* gen7 */ 318 u32 done_reg; 319 u32 gac_eco; 320 u32 gam_ecochk; 321 u32 gab_ctl; 322 u32 gfx_mode; 323 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 324 u64 fence[I915_MAX_NUM_FENCES]; 325 struct intel_overlay_error_state *overlay; 326 struct intel_display_error_state *display; 327 328 struct drm_i915_error_ring { 329 bool valid; 330 /* Software tracked state */ 331 bool waiting; 332 int hangcheck_score; 333 enum intel_ring_hangcheck_action hangcheck_action; 334 int num_requests; 335 336 /* our own tracking of ring head and tail */ 337 u32 cpu_ring_head; 338 u32 cpu_ring_tail; 339 340 u32 semaphore_seqno[I915_NUM_RINGS - 1]; 341 342 /* Register state */ 343 u32 tail; 344 u32 head; 345 u32 ctl; 346 u32 hws; 347 u32 ipeir; 348 u32 ipehr; 349 u32 instdone; 350 u32 bbstate; 351 u32 instpm; 352 u32 instps; 353 u32 seqno; 354 u64 bbaddr; 355 u64 acthd; 356 u32 fault_reg; 357 u64 faddr; 358 u32 rc_psmi; /* sleep state */ 359 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 360 361 struct drm_i915_error_object { 362 int page_count; 363 u32 gtt_offset; 364 u32 *pages[0]; 365 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 366 367 struct drm_i915_error_request { 368 long jiffies; 369 u32 seqno; 370 u32 tail; 371 } *requests; 372 373 struct { 374 u32 gfx_mode; 375 union { 376 u64 pdp[4]; 377 u32 pp_dir_base; 378 }; 379 } vm_info; 380 381 pid_t pid; 382 char comm[TASK_COMM_LEN]; 383 } ring[I915_NUM_RINGS]; 384 struct drm_i915_error_buffer { 385 u32 size; 386 u32 name; 387 u32 rseqno, wseqno; 388 u32 gtt_offset; 389 u32 read_domains; 390 u32 write_domain; 391 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 392 s32 pinned:2; 393 u32 tiling:2; 394 u32 dirty:1; 395 u32 purgeable:1; 396 u32 userptr:1; 397 s32 ring:4; 398 u32 cache_level:3; 399 } **active_bo, **pinned_bo; 400 401 u32 *active_bo_count, *pinned_bo_count; 402 }; 403 404 struct intel_connector; 405 struct intel_crtc_config; 406 struct intel_plane_config; 407 struct intel_crtc; 408 struct intel_limit; 409 struct dpll; 410 411 struct drm_i915_display_funcs { 412 bool (*fbc_enabled)(struct drm_device *dev); 413 void (*enable_fbc)(struct drm_crtc *crtc); 414 void (*disable_fbc)(struct drm_device *dev); 415 int (*get_display_clock_speed)(struct drm_device *dev); 416 int (*get_fifo_size)(struct drm_device *dev, int plane); 417 /** 418 * find_dpll() - Find the best values for the PLL 419 * @limit: limits for the PLL 420 * @crtc: current CRTC 421 * @target: target frequency in kHz 422 * @refclk: reference clock frequency in kHz 423 * @match_clock: if provided, @best_clock P divider must 424 * match the P divider from @match_clock 425 * used for LVDS downclocking 426 * @best_clock: best PLL values found 427 * 428 * Returns true on success, false on failure. 429 */ 430 bool (*find_dpll)(const struct intel_limit *limit, 431 struct drm_crtc *crtc, 432 int target, int refclk, 433 struct dpll *match_clock, 434 struct dpll *best_clock); 435 void (*update_wm)(struct drm_crtc *crtc); 436 void (*update_sprite_wm)(struct drm_plane *plane, 437 struct drm_crtc *crtc, 438 uint32_t sprite_width, int pixel_size, 439 bool enable, bool scaled); 440 void (*modeset_global_resources)(struct drm_device *dev); 441 /* Returns the active state of the crtc, and if the crtc is active, 442 * fills out the pipe-config with the hw state. */ 443 bool (*get_pipe_config)(struct intel_crtc *, 444 struct intel_crtc_config *); 445 void (*get_plane_config)(struct intel_crtc *, 446 struct intel_plane_config *); 447 int (*crtc_mode_set)(struct drm_crtc *crtc, 448 int x, int y, 449 struct drm_framebuffer *old_fb); 450 void (*crtc_enable)(struct drm_crtc *crtc); 451 void (*crtc_disable)(struct drm_crtc *crtc); 452 void (*off)(struct drm_crtc *crtc); 453 void (*write_eld)(struct drm_connector *connector, 454 struct drm_crtc *crtc, 455 struct drm_display_mode *mode); 456 void (*fdi_link_train)(struct drm_crtc *crtc); 457 void (*init_clock_gating)(struct drm_device *dev); 458 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 459 struct drm_framebuffer *fb, 460 struct drm_i915_gem_object *obj, 461 struct intel_engine_cs *ring, 462 uint32_t flags); 463 void (*update_primary_plane)(struct drm_crtc *crtc, 464 struct drm_framebuffer *fb, 465 int x, int y); 466 void (*hpd_irq_setup)(struct drm_device *dev); 467 /* clock updates for mode set */ 468 /* cursor updates */ 469 /* render clock increase/decrease */ 470 /* display clock increase/decrease */ 471 /* pll clock increase/decrease */ 472 473 int (*setup_backlight)(struct intel_connector *connector); 474 uint32_t (*get_backlight)(struct intel_connector *connector); 475 void (*set_backlight)(struct intel_connector *connector, 476 uint32_t level); 477 void (*disable_backlight)(struct intel_connector *connector); 478 void (*enable_backlight)(struct intel_connector *connector); 479 }; 480 481 struct intel_uncore_funcs { 482 void (*force_wake_get)(struct drm_i915_private *dev_priv, 483 int fw_engine); 484 void (*force_wake_put)(struct drm_i915_private *dev_priv, 485 int fw_engine); 486 487 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 488 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 489 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 490 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 491 492 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 493 uint8_t val, bool trace); 494 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 495 uint16_t val, bool trace); 496 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 497 uint32_t val, bool trace); 498 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 499 uint64_t val, bool trace); 500 }; 501 502 struct intel_uncore { 503 spinlock_t lock; /** lock is also taken in irq contexts. */ 504 505 struct intel_uncore_funcs funcs; 506 507 unsigned fifo_count; 508 unsigned forcewake_count; 509 510 unsigned fw_rendercount; 511 unsigned fw_mediacount; 512 513 struct timer_list force_wake_timer; 514 }; 515 516 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 517 func(is_mobile) sep \ 518 func(is_i85x) sep \ 519 func(is_i915g) sep \ 520 func(is_i945gm) sep \ 521 func(is_g33) sep \ 522 func(need_gfx_hws) sep \ 523 func(is_g4x) sep \ 524 func(is_pineview) sep \ 525 func(is_broadwater) sep \ 526 func(is_crestline) sep \ 527 func(is_ivybridge) sep \ 528 func(is_valleyview) sep \ 529 func(is_haswell) sep \ 530 func(is_preliminary) sep \ 531 func(has_fbc) sep \ 532 func(has_pipe_cxsr) sep \ 533 func(has_hotplug) sep \ 534 func(cursor_needs_physical) sep \ 535 func(has_overlay) sep \ 536 func(overlay_needs_physical) sep \ 537 func(supports_tv) sep \ 538 func(has_llc) sep \ 539 func(has_ddi) sep \ 540 func(has_fpga_dbg) 541 542 #define DEFINE_FLAG(name) u8 name:1 543 #define SEP_SEMICOLON ; 544 545 struct intel_device_info { 546 u32 display_mmio_offset; 547 u8 num_pipes:3; 548 u8 num_sprites[I915_MAX_PIPES]; 549 u8 gen; 550 u8 ring_mask; /* Rings supported by the HW */ 551 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 552 /* Register offsets for the various display pipes and transcoders */ 553 int pipe_offsets[I915_MAX_TRANSCODERS]; 554 int trans_offsets[I915_MAX_TRANSCODERS]; 555 int dpll_offsets[I915_MAX_PIPES]; 556 int dpll_md_offsets[I915_MAX_PIPES]; 557 int palette_offsets[I915_MAX_PIPES]; 558 int cursor_offsets[I915_MAX_PIPES]; 559 }; 560 561 #undef DEFINE_FLAG 562 #undef SEP_SEMICOLON 563 564 enum i915_cache_level { 565 I915_CACHE_NONE = 0, 566 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 567 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 568 caches, eg sampler/render caches, and the 569 large Last-Level-Cache. LLC is coherent with 570 the CPU, but L3 is only visible to the GPU. */ 571 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 572 }; 573 574 struct i915_ctx_hang_stats { 575 /* This context had batch pending when hang was declared */ 576 unsigned batch_pending; 577 578 /* This context had batch active when hang was declared */ 579 unsigned batch_active; 580 581 /* Time when this context was last blamed for a GPU reset */ 582 unsigned long guilty_ts; 583 584 /* This context is banned to submit more work */ 585 bool banned; 586 }; 587 588 /* This must match up with the value previously used for execbuf2.rsvd1. */ 589 #define DEFAULT_CONTEXT_ID 0 590 struct intel_context { 591 struct kref ref; 592 int id; 593 bool is_initialized; 594 uint8_t remap_slice; 595 struct drm_i915_file_private *file_priv; 596 struct intel_engine_cs *last_ring; 597 struct drm_i915_gem_object *obj; 598 struct i915_ctx_hang_stats hang_stats; 599 struct i915_address_space *vm; 600 601 struct list_head link; 602 }; 603 604 struct i915_fbc { 605 unsigned long size; 606 unsigned int fb_id; 607 enum plane plane; 608 int y; 609 610 struct drm_mm_node *compressed_fb; 611 struct drm_mm_node *compressed_llb; 612 613 struct intel_fbc_work { 614 struct delayed_work work; 615 struct drm_crtc *crtc; 616 struct drm_framebuffer *fb; 617 } *fbc_work; 618 619 enum no_fbc_reason { 620 FBC_OK, /* FBC is enabled */ 621 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 622 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 623 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 624 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 625 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 626 FBC_BAD_PLANE, /* fbc not supported on plane */ 627 FBC_NOT_TILED, /* buffer not tiled */ 628 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 629 FBC_MODULE_PARAM, 630 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 631 } no_fbc_reason; 632 }; 633 634 struct i915_drrs { 635 struct intel_connector *connector; 636 }; 637 638 struct i915_psr { 639 bool sink_support; 640 bool source_ok; 641 }; 642 643 enum intel_pch { 644 PCH_NONE = 0, /* No PCH present */ 645 PCH_IBX, /* Ibexpeak PCH */ 646 PCH_CPT, /* Cougarpoint PCH */ 647 PCH_LPT, /* Lynxpoint PCH */ 648 PCH_NOP, 649 }; 650 651 enum intel_sbi_destination { 652 SBI_ICLK, 653 SBI_MPHY, 654 }; 655 656 #define QUIRK_PIPEA_FORCE (1<<0) 657 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 658 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 659 660 struct intel_fbdev; 661 struct intel_fbc_work; 662 663 struct intel_gmbus { 664 struct i2c_adapter adapter; 665 u32 force_bit; 666 u32 reg0; 667 u32 gpio_reg; 668 struct i2c_algo_bit_data bit_algo; 669 struct drm_i915_private *dev_priv; 670 }; 671 672 struct i915_suspend_saved_registers { 673 u8 saveLBB; 674 u32 saveDSPACNTR; 675 u32 saveDSPBCNTR; 676 u32 saveDSPARB; 677 u32 savePIPEACONF; 678 u32 savePIPEBCONF; 679 u32 savePIPEASRC; 680 u32 savePIPEBSRC; 681 u32 saveFPA0; 682 u32 saveFPA1; 683 u32 saveDPLL_A; 684 u32 saveDPLL_A_MD; 685 u32 saveHTOTAL_A; 686 u32 saveHBLANK_A; 687 u32 saveHSYNC_A; 688 u32 saveVTOTAL_A; 689 u32 saveVBLANK_A; 690 u32 saveVSYNC_A; 691 u32 saveBCLRPAT_A; 692 u32 saveTRANSACONF; 693 u32 saveTRANS_HTOTAL_A; 694 u32 saveTRANS_HBLANK_A; 695 u32 saveTRANS_HSYNC_A; 696 u32 saveTRANS_VTOTAL_A; 697 u32 saveTRANS_VBLANK_A; 698 u32 saveTRANS_VSYNC_A; 699 u32 savePIPEASTAT; 700 u32 saveDSPASTRIDE; 701 u32 saveDSPASIZE; 702 u32 saveDSPAPOS; 703 u32 saveDSPAADDR; 704 u32 saveDSPASURF; 705 u32 saveDSPATILEOFF; 706 u32 savePFIT_PGM_RATIOS; 707 u32 saveBLC_HIST_CTL; 708 u32 saveBLC_PWM_CTL; 709 u32 saveBLC_PWM_CTL2; 710 u32 saveBLC_HIST_CTL_B; 711 u32 saveBLC_CPU_PWM_CTL; 712 u32 saveBLC_CPU_PWM_CTL2; 713 u32 saveFPB0; 714 u32 saveFPB1; 715 u32 saveDPLL_B; 716 u32 saveDPLL_B_MD; 717 u32 saveHTOTAL_B; 718 u32 saveHBLANK_B; 719 u32 saveHSYNC_B; 720 u32 saveVTOTAL_B; 721 u32 saveVBLANK_B; 722 u32 saveVSYNC_B; 723 u32 saveBCLRPAT_B; 724 u32 saveTRANSBCONF; 725 u32 saveTRANS_HTOTAL_B; 726 u32 saveTRANS_HBLANK_B; 727 u32 saveTRANS_HSYNC_B; 728 u32 saveTRANS_VTOTAL_B; 729 u32 saveTRANS_VBLANK_B; 730 u32 saveTRANS_VSYNC_B; 731 u32 savePIPEBSTAT; 732 u32 saveDSPBSTRIDE; 733 u32 saveDSPBSIZE; 734 u32 saveDSPBPOS; 735 u32 saveDSPBADDR; 736 u32 saveDSPBSURF; 737 u32 saveDSPBTILEOFF; 738 u32 saveVGA0; 739 u32 saveVGA1; 740 u32 saveVGA_PD; 741 u32 saveVGACNTRL; 742 u32 saveADPA; 743 u32 saveLVDS; 744 u32 savePP_ON_DELAYS; 745 u32 savePP_OFF_DELAYS; 746 u32 saveDVOA; 747 u32 saveDVOB; 748 u32 saveDVOC; 749 u32 savePP_ON; 750 u32 savePP_OFF; 751 u32 savePP_CONTROL; 752 u32 savePP_DIVISOR; 753 u32 savePFIT_CONTROL; 754 u32 save_palette_a[256]; 755 u32 save_palette_b[256]; 756 u32 saveFBC_CONTROL; 757 u32 saveIER; 758 u32 saveIIR; 759 u32 saveIMR; 760 u32 saveDEIER; 761 u32 saveDEIMR; 762 u32 saveGTIER; 763 u32 saveGTIMR; 764 u32 saveFDI_RXA_IMR; 765 u32 saveFDI_RXB_IMR; 766 u32 saveCACHE_MODE_0; 767 u32 saveMI_ARB_STATE; 768 u32 saveSWF0[16]; 769 u32 saveSWF1[16]; 770 u32 saveSWF2[3]; 771 u8 saveMSR; 772 u8 saveSR[8]; 773 u8 saveGR[25]; 774 u8 saveAR_INDEX; 775 u8 saveAR[21]; 776 u8 saveDACMASK; 777 u8 saveCR[37]; 778 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 779 u32 saveCURACNTR; 780 u32 saveCURAPOS; 781 u32 saveCURABASE; 782 u32 saveCURBCNTR; 783 u32 saveCURBPOS; 784 u32 saveCURBBASE; 785 u32 saveCURSIZE; 786 u32 saveDP_B; 787 u32 saveDP_C; 788 u32 saveDP_D; 789 u32 savePIPEA_GMCH_DATA_M; 790 u32 savePIPEB_GMCH_DATA_M; 791 u32 savePIPEA_GMCH_DATA_N; 792 u32 savePIPEB_GMCH_DATA_N; 793 u32 savePIPEA_DP_LINK_M; 794 u32 savePIPEB_DP_LINK_M; 795 u32 savePIPEA_DP_LINK_N; 796 u32 savePIPEB_DP_LINK_N; 797 u32 saveFDI_RXA_CTL; 798 u32 saveFDI_TXA_CTL; 799 u32 saveFDI_RXB_CTL; 800 u32 saveFDI_TXB_CTL; 801 u32 savePFA_CTL_1; 802 u32 savePFB_CTL_1; 803 u32 savePFA_WIN_SZ; 804 u32 savePFB_WIN_SZ; 805 u32 savePFA_WIN_POS; 806 u32 savePFB_WIN_POS; 807 u32 savePCH_DREF_CONTROL; 808 u32 saveDISP_ARB_CTL; 809 u32 savePIPEA_DATA_M1; 810 u32 savePIPEA_DATA_N1; 811 u32 savePIPEA_LINK_M1; 812 u32 savePIPEA_LINK_N1; 813 u32 savePIPEB_DATA_M1; 814 u32 savePIPEB_DATA_N1; 815 u32 savePIPEB_LINK_M1; 816 u32 savePIPEB_LINK_N1; 817 u32 saveMCHBAR_RENDER_STANDBY; 818 u32 savePCH_PORT_HOTPLUG; 819 }; 820 821 struct vlv_s0ix_state { 822 /* GAM */ 823 u32 wr_watermark; 824 u32 gfx_prio_ctrl; 825 u32 arb_mode; 826 u32 gfx_pend_tlb0; 827 u32 gfx_pend_tlb1; 828 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 829 u32 media_max_req_count; 830 u32 gfx_max_req_count; 831 u32 render_hwsp; 832 u32 ecochk; 833 u32 bsd_hwsp; 834 u32 blt_hwsp; 835 u32 tlb_rd_addr; 836 837 /* MBC */ 838 u32 g3dctl; 839 u32 gsckgctl; 840 u32 mbctl; 841 842 /* GCP */ 843 u32 ucgctl1; 844 u32 ucgctl3; 845 u32 rcgctl1; 846 u32 rcgctl2; 847 u32 rstctl; 848 u32 misccpctl; 849 850 /* GPM */ 851 u32 gfxpause; 852 u32 rpdeuhwtc; 853 u32 rpdeuc; 854 u32 ecobus; 855 u32 pwrdwnupctl; 856 u32 rp_down_timeout; 857 u32 rp_deucsw; 858 u32 rcubmabdtmr; 859 u32 rcedata; 860 u32 spare2gh; 861 862 /* Display 1 CZ domain */ 863 u32 gt_imr; 864 u32 gt_ier; 865 u32 pm_imr; 866 u32 pm_ier; 867 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 868 869 /* GT SA CZ domain */ 870 u32 tilectl; 871 u32 gt_fifoctl; 872 u32 gtlc_wake_ctrl; 873 u32 gtlc_survive; 874 u32 pmwgicz; 875 876 /* Display 2 CZ domain */ 877 u32 gu_ctl0; 878 u32 gu_ctl1; 879 u32 clock_gate_dis2; 880 }; 881 882 struct intel_gen6_power_mgmt { 883 /* work and pm_iir are protected by dev_priv->irq_lock */ 884 struct work_struct work; 885 u32 pm_iir; 886 887 /* Frequencies are stored in potentially platform dependent multiples. 888 * In other words, *_freq needs to be multiplied by X to be interesting. 889 * Soft limits are those which are used for the dynamic reclocking done 890 * by the driver (raise frequencies under heavy loads, and lower for 891 * lighter loads). Hard limits are those imposed by the hardware. 892 * 893 * A distinction is made for overclocking, which is never enabled by 894 * default, and is considered to be above the hard limit if it's 895 * possible at all. 896 */ 897 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 898 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 899 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 900 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 901 u8 min_freq; /* AKA RPn. Minimum frequency */ 902 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 903 u8 rp1_freq; /* "less than" RP0 power/freqency */ 904 u8 rp0_freq; /* Non-overclocked max frequency. */ 905 906 int last_adj; 907 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 908 909 bool enabled; 910 struct delayed_work delayed_resume_work; 911 912 /* 913 * Protects RPS/RC6 register access and PCU communication. 914 * Must be taken after struct_mutex if nested. 915 */ 916 struct mutex hw_lock; 917 }; 918 919 /* defined intel_pm.c */ 920 extern spinlock_t mchdev_lock; 921 922 struct intel_ilk_power_mgmt { 923 u8 cur_delay; 924 u8 min_delay; 925 u8 max_delay; 926 u8 fmax; 927 u8 fstart; 928 929 u64 last_count1; 930 unsigned long last_time1; 931 unsigned long chipset_power; 932 u64 last_count2; 933 struct timespec last_time2; 934 unsigned long gfx_power; 935 u8 corr; 936 937 int c_m; 938 int r_t; 939 940 struct drm_i915_gem_object *pwrctx; 941 struct drm_i915_gem_object *renderctx; 942 }; 943 944 struct drm_i915_private; 945 struct i915_power_well; 946 947 struct i915_power_well_ops { 948 /* 949 * Synchronize the well's hw state to match the current sw state, for 950 * example enable/disable it based on the current refcount. Called 951 * during driver init and resume time, possibly after first calling 952 * the enable/disable handlers. 953 */ 954 void (*sync_hw)(struct drm_i915_private *dev_priv, 955 struct i915_power_well *power_well); 956 /* 957 * Enable the well and resources that depend on it (for example 958 * interrupts located on the well). Called after the 0->1 refcount 959 * transition. 960 */ 961 void (*enable)(struct drm_i915_private *dev_priv, 962 struct i915_power_well *power_well); 963 /* 964 * Disable the well and resources that depend on it. Called after 965 * the 1->0 refcount transition. 966 */ 967 void (*disable)(struct drm_i915_private *dev_priv, 968 struct i915_power_well *power_well); 969 /* Returns the hw enabled state. */ 970 bool (*is_enabled)(struct drm_i915_private *dev_priv, 971 struct i915_power_well *power_well); 972 }; 973 974 /* Power well structure for haswell */ 975 struct i915_power_well { 976 const char *name; 977 bool always_on; 978 /* power well enable/disable usage count */ 979 int count; 980 /* cached hw enabled state */ 981 bool hw_enabled; 982 unsigned long domains; 983 unsigned long data; 984 const struct i915_power_well_ops *ops; 985 }; 986 987 struct i915_power_domains { 988 /* 989 * Power wells needed for initialization at driver init and suspend 990 * time are on. They are kept on until after the first modeset. 991 */ 992 bool init_power_on; 993 bool initializing; 994 int power_well_count; 995 996 struct mutex lock; 997 int domain_use_count[POWER_DOMAIN_NUM]; 998 struct i915_power_well *power_wells; 999 }; 1000 1001 struct i915_dri1_state { 1002 unsigned allow_batchbuffer : 1; 1003 u32 __iomem *gfx_hws_cpu_addr; 1004 1005 unsigned int cpp; 1006 int back_offset; 1007 int front_offset; 1008 int current_page; 1009 int page_flipping; 1010 1011 uint32_t counter; 1012 }; 1013 1014 struct i915_ums_state { 1015 /** 1016 * Flag if the X Server, and thus DRM, is not currently in 1017 * control of the device. 1018 * 1019 * This is set between LeaveVT and EnterVT. It needs to be 1020 * replaced with a semaphore. It also needs to be 1021 * transitioned away from for kernel modesetting. 1022 */ 1023 int mm_suspended; 1024 }; 1025 1026 #define MAX_L3_SLICES 2 1027 struct intel_l3_parity { 1028 u32 *remap_info[MAX_L3_SLICES]; 1029 struct work_struct error_work; 1030 int which_slice; 1031 }; 1032 1033 struct i915_gem_mm { 1034 /** Memory allocator for GTT stolen memory */ 1035 struct drm_mm stolen; 1036 /** List of all objects in gtt_space. Used to restore gtt 1037 * mappings on resume */ 1038 struct list_head bound_list; 1039 /** 1040 * List of objects which are not bound to the GTT (thus 1041 * are idle and not used by the GPU) but still have 1042 * (presumably uncached) pages still attached. 1043 */ 1044 struct list_head unbound_list; 1045 1046 /** Usable portion of the GTT for GEM */ 1047 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1048 1049 /** PPGTT used for aliasing the PPGTT with the GTT */ 1050 struct i915_hw_ppgtt *aliasing_ppgtt; 1051 1052 struct notifier_block oom_notifier; 1053 struct shrinker shrinker; 1054 bool shrinker_no_lock_stealing; 1055 1056 /** LRU list of objects with fence regs on them. */ 1057 struct list_head fence_list; 1058 1059 /** 1060 * We leave the user IRQ off as much as possible, 1061 * but this means that requests will finish and never 1062 * be retired once the system goes idle. Set a timer to 1063 * fire periodically while the ring is running. When it 1064 * fires, go retire requests. 1065 */ 1066 struct delayed_work retire_work; 1067 1068 /** 1069 * When we detect an idle GPU, we want to turn on 1070 * powersaving features. So once we see that there 1071 * are no more requests outstanding and no more 1072 * arrive within a small period of time, we fire 1073 * off the idle_work. 1074 */ 1075 struct delayed_work idle_work; 1076 1077 /** 1078 * Are we in a non-interruptible section of code like 1079 * modesetting? 1080 */ 1081 bool interruptible; 1082 1083 /** 1084 * Is the GPU currently considered idle, or busy executing userspace 1085 * requests? Whilst idle, we attempt to power down the hardware and 1086 * display clocks. In order to reduce the effect on performance, there 1087 * is a slight delay before we do so. 1088 */ 1089 bool busy; 1090 1091 /* the indicator for dispatch video commands on two BSD rings */ 1092 int bsd_ring_dispatch_index; 1093 1094 /** Bit 6 swizzling required for X tiling */ 1095 uint32_t bit_6_swizzle_x; 1096 /** Bit 6 swizzling required for Y tiling */ 1097 uint32_t bit_6_swizzle_y; 1098 1099 /* accounting, useful for userland debugging */ 1100 spinlock_t object_stat_lock; 1101 size_t object_memory; 1102 u32 object_count; 1103 }; 1104 1105 struct drm_i915_error_state_buf { 1106 unsigned bytes; 1107 unsigned size; 1108 int err; 1109 u8 *buf; 1110 loff_t start; 1111 loff_t pos; 1112 }; 1113 1114 struct i915_error_state_file_priv { 1115 struct drm_device *dev; 1116 struct drm_i915_error_state *error; 1117 }; 1118 1119 struct i915_gpu_error { 1120 /* For hangcheck timer */ 1121 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1122 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1123 /* Hang gpu twice in this window and your context gets banned */ 1124 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1125 1126 struct timer_list hangcheck_timer; 1127 1128 /* For reset and error_state handling. */ 1129 spinlock_t lock; 1130 /* Protected by the above dev->gpu_error.lock. */ 1131 struct drm_i915_error_state *first_error; 1132 struct work_struct work; 1133 1134 1135 unsigned long missed_irq_rings; 1136 1137 /** 1138 * State variable controlling the reset flow and count 1139 * 1140 * This is a counter which gets incremented when reset is triggered, 1141 * and again when reset has been handled. So odd values (lowest bit set) 1142 * means that reset is in progress and even values that 1143 * (reset_counter >> 1):th reset was successfully completed. 1144 * 1145 * If reset is not completed succesfully, the I915_WEDGE bit is 1146 * set meaning that hardware is terminally sour and there is no 1147 * recovery. All waiters on the reset_queue will be woken when 1148 * that happens. 1149 * 1150 * This counter is used by the wait_seqno code to notice that reset 1151 * event happened and it needs to restart the entire ioctl (since most 1152 * likely the seqno it waited for won't ever signal anytime soon). 1153 * 1154 * This is important for lock-free wait paths, where no contended lock 1155 * naturally enforces the correct ordering between the bail-out of the 1156 * waiter and the gpu reset work code. 1157 */ 1158 atomic_t reset_counter; 1159 1160 #define I915_RESET_IN_PROGRESS_FLAG 1 1161 #define I915_WEDGED (1 << 31) 1162 1163 /** 1164 * Waitqueue to signal when the reset has completed. Used by clients 1165 * that wait for dev_priv->mm.wedged to settle. 1166 */ 1167 wait_queue_head_t reset_queue; 1168 1169 /* Userspace knobs for gpu hang simulation; 1170 * combines both a ring mask, and extra flags 1171 */ 1172 u32 stop_rings; 1173 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1174 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1175 1176 /* For missed irq/seqno simulation. */ 1177 unsigned int test_irq_rings; 1178 }; 1179 1180 enum modeset_restore { 1181 MODESET_ON_LID_OPEN, 1182 MODESET_DONE, 1183 MODESET_SUSPENDED, 1184 }; 1185 1186 struct ddi_vbt_port_info { 1187 uint8_t hdmi_level_shift; 1188 1189 uint8_t supports_dvi:1; 1190 uint8_t supports_hdmi:1; 1191 uint8_t supports_dp:1; 1192 }; 1193 1194 enum drrs_support_type { 1195 DRRS_NOT_SUPPORTED = 0, 1196 STATIC_DRRS_SUPPORT = 1, 1197 SEAMLESS_DRRS_SUPPORT = 2 1198 }; 1199 1200 struct intel_vbt_data { 1201 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1202 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1203 1204 /* Feature bits */ 1205 unsigned int int_tv_support:1; 1206 unsigned int lvds_dither:1; 1207 unsigned int lvds_vbt:1; 1208 unsigned int int_crt_support:1; 1209 unsigned int lvds_use_ssc:1; 1210 unsigned int display_clock_mode:1; 1211 unsigned int fdi_rx_polarity_inverted:1; 1212 unsigned int has_mipi:1; 1213 int lvds_ssc_freq; 1214 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1215 1216 enum drrs_support_type drrs_type; 1217 1218 /* eDP */ 1219 int edp_rate; 1220 int edp_lanes; 1221 int edp_preemphasis; 1222 int edp_vswing; 1223 bool edp_initialized; 1224 bool edp_support; 1225 int edp_bpp; 1226 struct edp_power_seq edp_pps; 1227 1228 struct { 1229 u16 pwm_freq_hz; 1230 bool present; 1231 bool active_low_pwm; 1232 } backlight; 1233 1234 /* MIPI DSI */ 1235 struct { 1236 u16 port; 1237 u16 panel_id; 1238 struct mipi_config *config; 1239 struct mipi_pps_data *pps; 1240 u8 seq_version; 1241 u32 size; 1242 u8 *data; 1243 u8 *sequence[MIPI_SEQ_MAX]; 1244 } dsi; 1245 1246 int crt_ddc_pin; 1247 1248 int child_dev_num; 1249 union child_device_config *child_dev; 1250 1251 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1252 }; 1253 1254 enum intel_ddb_partitioning { 1255 INTEL_DDB_PART_1_2, 1256 INTEL_DDB_PART_5_6, /* IVB+ */ 1257 }; 1258 1259 struct intel_wm_level { 1260 bool enable; 1261 uint32_t pri_val; 1262 uint32_t spr_val; 1263 uint32_t cur_val; 1264 uint32_t fbc_val; 1265 }; 1266 1267 struct ilk_wm_values { 1268 uint32_t wm_pipe[3]; 1269 uint32_t wm_lp[3]; 1270 uint32_t wm_lp_spr[3]; 1271 uint32_t wm_linetime[3]; 1272 bool enable_fbc_wm; 1273 enum intel_ddb_partitioning partitioning; 1274 }; 1275 1276 /* 1277 * This struct helps tracking the state needed for runtime PM, which puts the 1278 * device in PCI D3 state. Notice that when this happens, nothing on the 1279 * graphics device works, even register access, so we don't get interrupts nor 1280 * anything else. 1281 * 1282 * Every piece of our code that needs to actually touch the hardware needs to 1283 * either call intel_runtime_pm_get or call intel_display_power_get with the 1284 * appropriate power domain. 1285 * 1286 * Our driver uses the autosuspend delay feature, which means we'll only really 1287 * suspend if we stay with zero refcount for a certain amount of time. The 1288 * default value is currently very conservative (see intel_init_runtime_pm), but 1289 * it can be changed with the standard runtime PM files from sysfs. 1290 * 1291 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1292 * goes back to false exactly before we reenable the IRQs. We use this variable 1293 * to check if someone is trying to enable/disable IRQs while they're supposed 1294 * to be disabled. This shouldn't happen and we'll print some error messages in 1295 * case it happens. 1296 * 1297 * For more, read the Documentation/power/runtime_pm.txt. 1298 */ 1299 struct i915_runtime_pm { 1300 bool suspended; 1301 bool irqs_disabled; 1302 }; 1303 1304 enum intel_pipe_crc_source { 1305 INTEL_PIPE_CRC_SOURCE_NONE, 1306 INTEL_PIPE_CRC_SOURCE_PLANE1, 1307 INTEL_PIPE_CRC_SOURCE_PLANE2, 1308 INTEL_PIPE_CRC_SOURCE_PF, 1309 INTEL_PIPE_CRC_SOURCE_PIPE, 1310 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1311 INTEL_PIPE_CRC_SOURCE_TV, 1312 INTEL_PIPE_CRC_SOURCE_DP_B, 1313 INTEL_PIPE_CRC_SOURCE_DP_C, 1314 INTEL_PIPE_CRC_SOURCE_DP_D, 1315 INTEL_PIPE_CRC_SOURCE_AUTO, 1316 INTEL_PIPE_CRC_SOURCE_MAX, 1317 }; 1318 1319 struct intel_pipe_crc_entry { 1320 uint32_t frame; 1321 uint32_t crc[5]; 1322 }; 1323 1324 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1325 struct intel_pipe_crc { 1326 spinlock_t lock; 1327 bool opened; /* exclusive access to the result file */ 1328 struct intel_pipe_crc_entry *entries; 1329 enum intel_pipe_crc_source source; 1330 int head, tail; 1331 wait_queue_head_t wq; 1332 }; 1333 1334 struct drm_i915_private { 1335 struct drm_device *dev; 1336 struct kmem_cache *slab; 1337 1338 const struct intel_device_info info; 1339 1340 int relative_constants_mode; 1341 1342 void __iomem *regs; 1343 1344 struct intel_uncore uncore; 1345 1346 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 1347 1348 1349 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1350 * controller on different i2c buses. */ 1351 struct mutex gmbus_mutex; 1352 1353 /** 1354 * Base address of the gmbus and gpio block. 1355 */ 1356 uint32_t gpio_mmio_base; 1357 1358 /* MMIO base address for MIPI regs */ 1359 uint32_t mipi_mmio_base; 1360 1361 wait_queue_head_t gmbus_wait_queue; 1362 1363 struct pci_dev *bridge_dev; 1364 struct intel_engine_cs ring[I915_NUM_RINGS]; 1365 uint32_t last_seqno, next_seqno; 1366 1367 drm_dma_handle_t *status_page_dmah; 1368 struct resource mch_res; 1369 1370 /* protects the irq masks */ 1371 spinlock_t irq_lock; 1372 1373 bool display_irqs_enabled; 1374 1375 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1376 struct pm_qos_request pm_qos; 1377 1378 /* DPIO indirect register protection */ 1379 struct mutex dpio_lock; 1380 1381 /** Cached value of IMR to avoid reads in updating the bitfield */ 1382 union { 1383 u32 irq_mask; 1384 u32 de_irq_mask[I915_MAX_PIPES]; 1385 }; 1386 u32 gt_irq_mask; 1387 u32 pm_irq_mask; 1388 u32 pm_rps_events; 1389 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1390 1391 struct work_struct hotplug_work; 1392 bool enable_hotplug_processing; 1393 struct { 1394 unsigned long hpd_last_jiffies; 1395 int hpd_cnt; 1396 enum { 1397 HPD_ENABLED = 0, 1398 HPD_DISABLED = 1, 1399 HPD_MARK_DISABLED = 2 1400 } hpd_mark; 1401 } hpd_stats[HPD_NUM_PINS]; 1402 u32 hpd_event_bits; 1403 struct timer_list hotplug_reenable_timer; 1404 1405 struct i915_fbc fbc; 1406 struct i915_drrs drrs; 1407 struct intel_opregion opregion; 1408 struct intel_vbt_data vbt; 1409 1410 /* overlay */ 1411 struct intel_overlay *overlay; 1412 1413 /* backlight registers and fields in struct intel_panel */ 1414 spinlock_t backlight_lock; 1415 1416 /* LVDS info */ 1417 bool no_aux_handshake; 1418 1419 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1420 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1421 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1422 1423 unsigned int fsb_freq, mem_freq, is_ddr3; 1424 unsigned int vlv_cdclk_freq; 1425 1426 /** 1427 * wq - Driver workqueue for GEM. 1428 * 1429 * NOTE: Work items scheduled here are not allowed to grab any modeset 1430 * locks, for otherwise the flushing done in the pageflip code will 1431 * result in deadlocks. 1432 */ 1433 struct workqueue_struct *wq; 1434 1435 /* Display functions */ 1436 struct drm_i915_display_funcs display; 1437 1438 /* PCH chipset type */ 1439 enum intel_pch pch_type; 1440 unsigned short pch_id; 1441 1442 unsigned long quirks; 1443 1444 enum modeset_restore modeset_restore; 1445 struct mutex modeset_restore_lock; 1446 1447 struct list_head vm_list; /* Global list of all address spaces */ 1448 struct i915_gtt gtt; /* VM representing the global address space */ 1449 1450 struct i915_gem_mm mm; 1451 #if defined(CONFIG_MMU_NOTIFIER) 1452 DECLARE_HASHTABLE(mmu_notifiers, 7); 1453 #endif 1454 1455 /* Kernel Modesetting */ 1456 1457 struct sdvo_device_mapping sdvo_mappings[2]; 1458 1459 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1460 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1461 wait_queue_head_t pending_flip_queue; 1462 1463 #ifdef CONFIG_DEBUG_FS 1464 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1465 #endif 1466 1467 int num_shared_dpll; 1468 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1469 struct intel_ddi_plls ddi_plls; 1470 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1471 1472 /* Reclocking support */ 1473 bool render_reclock_avail; 1474 bool lvds_downclock_avail; 1475 /* indicates the reduced downclock for LVDS*/ 1476 int lvds_downclock; 1477 u16 orig_clock; 1478 1479 bool mchbar_need_disable; 1480 1481 struct intel_l3_parity l3_parity; 1482 1483 /* Cannot be determined by PCIID. You must always read a register. */ 1484 size_t ellc_size; 1485 1486 /* gen6+ rps state */ 1487 struct intel_gen6_power_mgmt rps; 1488 1489 /* ilk-only ips/rps state. Everything in here is protected by the global 1490 * mchdev_lock in intel_pm.c */ 1491 struct intel_ilk_power_mgmt ips; 1492 1493 struct i915_power_domains power_domains; 1494 1495 struct i915_psr psr; 1496 1497 struct i915_gpu_error gpu_error; 1498 1499 struct drm_i915_gem_object *vlv_pctx; 1500 1501 #ifdef CONFIG_DRM_I915_FBDEV 1502 /* list of fbdev register on this device */ 1503 struct intel_fbdev *fbdev; 1504 #endif 1505 1506 /* 1507 * The console may be contended at resume, but we don't 1508 * want it to block on it. 1509 */ 1510 struct work_struct console_resume_work; 1511 1512 struct drm_property *broadcast_rgb_property; 1513 struct drm_property *force_audio_property; 1514 1515 uint32_t hw_context_size; 1516 struct list_head context_list; 1517 1518 u32 fdi_rx_config; 1519 1520 u32 suspend_count; 1521 struct i915_suspend_saved_registers regfile; 1522 struct vlv_s0ix_state vlv_s0ix_state; 1523 1524 struct { 1525 /* 1526 * Raw watermark latency values: 1527 * in 0.1us units for WM0, 1528 * in 0.5us units for WM1+. 1529 */ 1530 /* primary */ 1531 uint16_t pri_latency[5]; 1532 /* sprite */ 1533 uint16_t spr_latency[5]; 1534 /* cursor */ 1535 uint16_t cur_latency[5]; 1536 1537 /* current hardware state */ 1538 struct ilk_wm_values hw; 1539 } wm; 1540 1541 struct i915_runtime_pm pm; 1542 1543 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1544 * here! */ 1545 struct i915_dri1_state dri1; 1546 /* Old ums support infrastructure, same warning applies. */ 1547 struct i915_ums_state ums; 1548 1549 /* 1550 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1551 * will be rejected. Instead look for a better place. 1552 */ 1553 }; 1554 1555 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1556 { 1557 return dev->dev_private; 1558 } 1559 1560 /* Iterate over initialised rings */ 1561 #define for_each_ring(ring__, dev_priv__, i__) \ 1562 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1563 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1564 1565 enum hdmi_force_audio { 1566 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1567 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1568 HDMI_AUDIO_AUTO, /* trust EDID */ 1569 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1570 }; 1571 1572 #define I915_GTT_OFFSET_NONE ((u32)-1) 1573 1574 struct drm_i915_gem_object_ops { 1575 /* Interface between the GEM object and its backing storage. 1576 * get_pages() is called once prior to the use of the associated set 1577 * of pages before to binding them into the GTT, and put_pages() is 1578 * called after we no longer need them. As we expect there to be 1579 * associated cost with migrating pages between the backing storage 1580 * and making them available for the GPU (e.g. clflush), we may hold 1581 * onto the pages after they are no longer referenced by the GPU 1582 * in case they may be used again shortly (for example migrating the 1583 * pages to a different memory domain within the GTT). put_pages() 1584 * will therefore most likely be called when the object itself is 1585 * being released or under memory pressure (where we attempt to 1586 * reap pages for the shrinker). 1587 */ 1588 int (*get_pages)(struct drm_i915_gem_object *); 1589 void (*put_pages)(struct drm_i915_gem_object *); 1590 int (*dmabuf_export)(struct drm_i915_gem_object *); 1591 void (*release)(struct drm_i915_gem_object *); 1592 }; 1593 1594 struct drm_i915_gem_object { 1595 struct drm_gem_object base; 1596 1597 const struct drm_i915_gem_object_ops *ops; 1598 1599 /** List of VMAs backed by this object */ 1600 struct list_head vma_list; 1601 1602 /** Stolen memory for this object, instead of being backed by shmem. */ 1603 struct drm_mm_node *stolen; 1604 struct list_head global_list; 1605 1606 struct list_head ring_list; 1607 /** Used in execbuf to temporarily hold a ref */ 1608 struct list_head obj_exec_link; 1609 1610 /** 1611 * This is set if the object is on the active lists (has pending 1612 * rendering and so a non-zero seqno), and is not set if it i s on 1613 * inactive (ready to be unbound) list. 1614 */ 1615 unsigned int active:1; 1616 1617 /** 1618 * This is set if the object has been written to since last bound 1619 * to the GTT 1620 */ 1621 unsigned int dirty:1; 1622 1623 /** 1624 * Fence register bits (if any) for this object. Will be set 1625 * as needed when mapped into the GTT. 1626 * Protected by dev->struct_mutex. 1627 */ 1628 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 1629 1630 /** 1631 * Advice: are the backing pages purgeable? 1632 */ 1633 unsigned int madv:2; 1634 1635 /** 1636 * Current tiling mode for the object. 1637 */ 1638 unsigned int tiling_mode:2; 1639 /** 1640 * Whether the tiling parameters for the currently associated fence 1641 * register have changed. Note that for the purposes of tracking 1642 * tiling changes we also treat the unfenced register, the register 1643 * slot that the object occupies whilst it executes a fenced 1644 * command (such as BLT on gen2/3), as a "fence". 1645 */ 1646 unsigned int fence_dirty:1; 1647 1648 /** 1649 * Is the object at the current location in the gtt mappable and 1650 * fenceable? Used to avoid costly recalculations. 1651 */ 1652 unsigned int map_and_fenceable:1; 1653 1654 /** 1655 * Whether the current gtt mapping needs to be mappable (and isn't just 1656 * mappable by accident). Track pin and fault separate for a more 1657 * accurate mappable working set. 1658 */ 1659 unsigned int fault_mappable:1; 1660 unsigned int pin_mappable:1; 1661 unsigned int pin_display:1; 1662 1663 /* 1664 * Is the GPU currently using a fence to access this buffer, 1665 */ 1666 unsigned int pending_fenced_gpu_access:1; 1667 unsigned int fenced_gpu_access:1; 1668 1669 unsigned int cache_level:3; 1670 1671 unsigned int has_aliasing_ppgtt_mapping:1; 1672 unsigned int has_global_gtt_mapping:1; 1673 unsigned int has_dma_mapping:1; 1674 1675 struct sg_table *pages; 1676 int pages_pin_count; 1677 1678 /* prime dma-buf support */ 1679 void *dma_buf_vmapping; 1680 int vmapping_count; 1681 1682 struct intel_engine_cs *ring; 1683 1684 /** Breadcrumb of last rendering to the buffer. */ 1685 uint32_t last_read_seqno; 1686 uint32_t last_write_seqno; 1687 /** Breadcrumb of last fenced GPU access to the buffer. */ 1688 uint32_t last_fenced_seqno; 1689 1690 /** Current tiling stride for the object, if it's tiled. */ 1691 uint32_t stride; 1692 1693 /** References from framebuffers, locks out tiling changes. */ 1694 unsigned long framebuffer_references; 1695 1696 /** Record of address bit 17 of each page at last unbind. */ 1697 unsigned long *bit_17; 1698 1699 /** User space pin count and filp owning the pin */ 1700 unsigned long user_pin_count; 1701 struct drm_file *pin_filp; 1702 1703 /** for phy allocated objects */ 1704 drm_dma_handle_t *phys_handle; 1705 1706 union { 1707 struct i915_gem_userptr { 1708 uintptr_t ptr; 1709 unsigned read_only :1; 1710 unsigned workers :4; 1711 #define I915_GEM_USERPTR_MAX_WORKERS 15 1712 1713 struct mm_struct *mm; 1714 struct i915_mmu_object *mn; 1715 struct work_struct *work; 1716 } userptr; 1717 }; 1718 }; 1719 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1720 1721 /** 1722 * Request queue structure. 1723 * 1724 * The request queue allows us to note sequence numbers that have been emitted 1725 * and may be associated with active buffers to be retired. 1726 * 1727 * By keeping this list, we can avoid having to do questionable 1728 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 1729 * an emission time with seqnos for tracking how far ahead of the GPU we are. 1730 */ 1731 struct drm_i915_gem_request { 1732 /** On Which ring this request was generated */ 1733 struct intel_engine_cs *ring; 1734 1735 /** GEM sequence number associated with this request. */ 1736 uint32_t seqno; 1737 1738 /** Position in the ringbuffer of the start of the request */ 1739 u32 head; 1740 1741 /** Position in the ringbuffer of the end of the request */ 1742 u32 tail; 1743 1744 /** Context related to this request */ 1745 struct intel_context *ctx; 1746 1747 /** Batch buffer related to this request if any */ 1748 struct drm_i915_gem_object *batch_obj; 1749 1750 /** Time at which this request was emitted, in jiffies. */ 1751 unsigned long emitted_jiffies; 1752 1753 /** global list entry for this request */ 1754 struct list_head list; 1755 1756 struct drm_i915_file_private *file_priv; 1757 /** file_priv list entry for this request */ 1758 struct list_head client_list; 1759 }; 1760 1761 struct drm_i915_file_private { 1762 struct drm_i915_private *dev_priv; 1763 struct drm_file *file; 1764 1765 struct { 1766 spinlock_t lock; 1767 struct list_head request_list; 1768 struct delayed_work idle_work; 1769 } mm; 1770 struct idr context_idr; 1771 1772 atomic_t rps_wait_boost; 1773 struct intel_engine_cs *bsd_ring; 1774 }; 1775 1776 /* 1777 * A command that requires special handling by the command parser. 1778 */ 1779 struct drm_i915_cmd_descriptor { 1780 /* 1781 * Flags describing how the command parser processes the command. 1782 * 1783 * CMD_DESC_FIXED: The command has a fixed length if this is set, 1784 * a length mask if not set 1785 * CMD_DESC_SKIP: The command is allowed but does not follow the 1786 * standard length encoding for the opcode range in 1787 * which it falls 1788 * CMD_DESC_REJECT: The command is never allowed 1789 * CMD_DESC_REGISTER: The command should be checked against the 1790 * register whitelist for the appropriate ring 1791 * CMD_DESC_MASTER: The command is allowed if the submitting process 1792 * is the DRM master 1793 */ 1794 u32 flags; 1795 #define CMD_DESC_FIXED (1<<0) 1796 #define CMD_DESC_SKIP (1<<1) 1797 #define CMD_DESC_REJECT (1<<2) 1798 #define CMD_DESC_REGISTER (1<<3) 1799 #define CMD_DESC_BITMASK (1<<4) 1800 #define CMD_DESC_MASTER (1<<5) 1801 1802 /* 1803 * The command's unique identification bits and the bitmask to get them. 1804 * This isn't strictly the opcode field as defined in the spec and may 1805 * also include type, subtype, and/or subop fields. 1806 */ 1807 struct { 1808 u32 value; 1809 u32 mask; 1810 } cmd; 1811 1812 /* 1813 * The command's length. The command is either fixed length (i.e. does 1814 * not include a length field) or has a length field mask. The flag 1815 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 1816 * a length mask. All command entries in a command table must include 1817 * length information. 1818 */ 1819 union { 1820 u32 fixed; 1821 u32 mask; 1822 } length; 1823 1824 /* 1825 * Describes where to find a register address in the command to check 1826 * against the ring's register whitelist. Only valid if flags has the 1827 * CMD_DESC_REGISTER bit set. 1828 */ 1829 struct { 1830 u32 offset; 1831 u32 mask; 1832 } reg; 1833 1834 #define MAX_CMD_DESC_BITMASKS 3 1835 /* 1836 * Describes command checks where a particular dword is masked and 1837 * compared against an expected value. If the command does not match 1838 * the expected value, the parser rejects it. Only valid if flags has 1839 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 1840 * are valid. 1841 * 1842 * If the check specifies a non-zero condition_mask then the parser 1843 * only performs the check when the bits specified by condition_mask 1844 * are non-zero. 1845 */ 1846 struct { 1847 u32 offset; 1848 u32 mask; 1849 u32 expected; 1850 u32 condition_offset; 1851 u32 condition_mask; 1852 } bits[MAX_CMD_DESC_BITMASKS]; 1853 }; 1854 1855 /* 1856 * A table of commands requiring special handling by the command parser. 1857 * 1858 * Each ring has an array of tables. Each table consists of an array of command 1859 * descriptors, which must be sorted with command opcodes in ascending order. 1860 */ 1861 struct drm_i915_cmd_table { 1862 const struct drm_i915_cmd_descriptor *table; 1863 int count; 1864 }; 1865 1866 #define INTEL_INFO(dev) (&to_i915(dev)->info) 1867 1868 #define IS_I830(dev) ((dev)->pdev->device == 0x3577) 1869 #define IS_845G(dev) ((dev)->pdev->device == 0x2562) 1870 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1871 #define IS_I865G(dev) ((dev)->pdev->device == 0x2572) 1872 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1873 #define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) 1874 #define IS_I945G(dev) ((dev)->pdev->device == 0x2772) 1875 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1876 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1877 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1878 #define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) 1879 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1880 #define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) 1881 #define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) 1882 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1883 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1884 #define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) 1885 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1886 #define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ 1887 (dev)->pdev->device == 0x0152 || \ 1888 (dev)->pdev->device == 0x015a) 1889 #define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ 1890 (dev)->pdev->device == 0x0106 || \ 1891 (dev)->pdev->device == 0x010A) 1892 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1893 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 1894 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1895 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 1896 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1897 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1898 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1899 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 1900 (((dev)->pdev->device & 0xf) == 0x2 || \ 1901 ((dev)->pdev->device & 0xf) == 0x6 || \ 1902 ((dev)->pdev->device & 0xf) == 0xe)) 1903 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 1904 ((dev)->pdev->device & 0xFF00) == 0x0A00) 1905 #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 1906 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1907 ((dev)->pdev->device & 0x00F0) == 0x0020) 1908 /* ULX machines are also considered ULT. */ 1909 #define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ 1910 (dev)->pdev->device == 0x0A1E) 1911 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1912 1913 /* 1914 * The genX designation typically refers to the render engine, so render 1915 * capability related checks should use IS_GEN, while display and other checks 1916 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 1917 * chips, etc.). 1918 */ 1919 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 1920 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 1921 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 1922 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1923 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1924 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1925 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 1926 1927 #define RENDER_RING (1<<RCS) 1928 #define BSD_RING (1<<VCS) 1929 #define BLT_RING (1<<BCS) 1930 #define VEBOX_RING (1<<VECS) 1931 #define BSD2_RING (1<<VCS2) 1932 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 1933 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 1934 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 1935 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 1936 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1937 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 1938 to_i915(dev)->ellc_size) 1939 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1940 1941 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1942 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \ 1943 (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) 1944 #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \ 1945 && !IS_GEN8(dev)) 1946 #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) 1947 #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) 1948 1949 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1950 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1951 1952 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1953 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 1954 /* 1955 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 1956 * even when in MSI mode. This results in spurious interrupt warnings if the 1957 * legacy irq no. is shared with another device. The kernel then disables that 1958 * interrupt source and so prevents the other device from working properly. 1959 */ 1960 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 1961 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 1962 1963 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1964 * rows, which changed the alignment requirements and fence programming. 1965 */ 1966 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 1967 IS_I915GM(dev))) 1968 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1969 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1970 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1971 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1972 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1973 1974 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1975 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1976 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1977 1978 #define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) 1979 1980 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1981 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1982 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1983 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 1984 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) 1985 1986 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 1987 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1988 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 1989 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 1990 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 1991 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 1992 1993 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) 1994 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1995 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1996 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1997 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 1998 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 1999 2000 /* DPF == dynamic parity feature */ 2001 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2002 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2003 2004 #define GT_FREQUENCY_MULTIPLIER 50 2005 2006 #include "i915_trace.h" 2007 2008 extern const struct drm_ioctl_desc i915_ioctls[]; 2009 extern int i915_max_ioctl; 2010 2011 extern int i915_suspend(struct drm_device *dev, pm_message_t state); 2012 extern int i915_resume(struct drm_device *dev); 2013 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 2014 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 2015 2016 /* i915_params.c */ 2017 struct i915_params { 2018 int modeset; 2019 int panel_ignore_lid; 2020 unsigned int powersave; 2021 int semaphores; 2022 unsigned int lvds_downclock; 2023 int lvds_channel_mode; 2024 int panel_use_ssc; 2025 int vbt_sdvo_panel_type; 2026 int enable_rc6; 2027 int enable_fbc; 2028 int enable_ppgtt; 2029 int enable_psr; 2030 unsigned int preliminary_hw_support; 2031 int disable_power_well; 2032 int enable_ips; 2033 int invert_brightness; 2034 int enable_cmd_parser; 2035 /* leave bools at the end to not create holes */ 2036 bool enable_hangcheck; 2037 bool fastboot; 2038 bool prefault_disable; 2039 bool reset; 2040 bool disable_display; 2041 bool disable_vtd_wa; 2042 }; 2043 extern struct i915_params i915 __read_mostly; 2044 2045 /* i915_dma.c */ 2046 void i915_update_dri1_breadcrumb(struct drm_device *dev); 2047 extern void i915_kernel_lost_context(struct drm_device * dev); 2048 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2049 extern int i915_driver_unload(struct drm_device *); 2050 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 2051 extern void i915_driver_lastclose(struct drm_device * dev); 2052 extern void i915_driver_preclose(struct drm_device *dev, 2053 struct drm_file *file_priv); 2054 extern void i915_driver_postclose(struct drm_device *dev, 2055 struct drm_file *file_priv); 2056 extern int i915_driver_device_is_agp(struct drm_device * dev); 2057 #ifdef CONFIG_COMPAT 2058 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2059 unsigned long arg); 2060 #endif 2061 extern int i915_emit_box(struct drm_device *dev, 2062 struct drm_clip_rect *box, 2063 int DR1, int DR4); 2064 extern int intel_gpu_reset(struct drm_device *dev); 2065 extern int i915_reset(struct drm_device *dev); 2066 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2067 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2068 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2069 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2070 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2071 2072 extern void intel_console_resume(struct work_struct *work); 2073 2074 /* i915_irq.c */ 2075 void i915_queue_hangcheck(struct drm_device *dev); 2076 __printf(3, 4) 2077 void i915_handle_error(struct drm_device *dev, bool wedged, 2078 const char *fmt, ...); 2079 2080 void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, 2081 int new_delay); 2082 extern void intel_irq_init(struct drm_device *dev); 2083 extern void intel_hpd_init(struct drm_device *dev); 2084 2085 extern void intel_uncore_sanitize(struct drm_device *dev); 2086 extern void intel_uncore_early_sanitize(struct drm_device *dev); 2087 extern void intel_uncore_init(struct drm_device *dev); 2088 extern void intel_uncore_check_errors(struct drm_device *dev); 2089 extern void intel_uncore_fini(struct drm_device *dev); 2090 2091 void 2092 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2093 u32 status_mask); 2094 2095 void 2096 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2097 u32 status_mask); 2098 2099 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2100 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2101 2102 /* i915_gem.c */ 2103 int i915_gem_init_ioctl(struct drm_device *dev, void *data, 2104 struct drm_file *file_priv); 2105 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2106 struct drm_file *file_priv); 2107 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2108 struct drm_file *file_priv); 2109 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2110 struct drm_file *file_priv); 2111 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2112 struct drm_file *file_priv); 2113 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2114 struct drm_file *file_priv); 2115 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2116 struct drm_file *file_priv); 2117 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2118 struct drm_file *file_priv); 2119 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2120 struct drm_file *file_priv); 2121 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2122 struct drm_file *file_priv); 2123 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 2124 struct drm_file *file_priv); 2125 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 2126 struct drm_file *file_priv); 2127 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2128 struct drm_file *file_priv); 2129 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2130 struct drm_file *file); 2131 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2132 struct drm_file *file); 2133 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2134 struct drm_file *file_priv); 2135 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2136 struct drm_file *file_priv); 2137 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 2138 struct drm_file *file_priv); 2139 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 2140 struct drm_file *file_priv); 2141 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2142 struct drm_file *file_priv); 2143 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2144 struct drm_file *file_priv); 2145 int i915_gem_init_userptr(struct drm_device *dev); 2146 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2147 struct drm_file *file); 2148 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2149 struct drm_file *file_priv); 2150 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2151 struct drm_file *file_priv); 2152 void i915_gem_load(struct drm_device *dev); 2153 void *i915_gem_object_alloc(struct drm_device *dev); 2154 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2155 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2156 const struct drm_i915_gem_object_ops *ops); 2157 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2158 size_t size); 2159 void i915_init_vm(struct drm_i915_private *dev_priv, 2160 struct i915_address_space *vm); 2161 void i915_gem_free_object(struct drm_gem_object *obj); 2162 void i915_gem_vma_destroy(struct i915_vma *vma); 2163 2164 #define PIN_MAPPABLE 0x1 2165 #define PIN_NONBLOCK 0x2 2166 #define PIN_GLOBAL 0x4 2167 #define PIN_OFFSET_BIAS 0x8 2168 #define PIN_OFFSET_MASK (~4095) 2169 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2170 struct i915_address_space *vm, 2171 uint32_t alignment, 2172 uint64_t flags); 2173 int __must_check i915_vma_unbind(struct i915_vma *vma); 2174 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2175 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2176 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2177 void i915_gem_lastclose(struct drm_device *dev); 2178 2179 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2180 int *needs_clflush); 2181 2182 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2183 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2184 { 2185 struct sg_page_iter sg_iter; 2186 2187 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) 2188 return sg_page_iter_page(&sg_iter); 2189 2190 return NULL; 2191 } 2192 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2193 { 2194 BUG_ON(obj->pages == NULL); 2195 obj->pages_pin_count++; 2196 } 2197 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2198 { 2199 BUG_ON(obj->pages_pin_count == 0); 2200 obj->pages_pin_count--; 2201 } 2202 2203 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2204 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2205 struct intel_engine_cs *to); 2206 void i915_vma_move_to_active(struct i915_vma *vma, 2207 struct intel_engine_cs *ring); 2208 int i915_gem_dumb_create(struct drm_file *file_priv, 2209 struct drm_device *dev, 2210 struct drm_mode_create_dumb *args); 2211 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2212 uint32_t handle, uint64_t *offset); 2213 /** 2214 * Returns true if seq1 is later than seq2. 2215 */ 2216 static inline bool 2217 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 2218 { 2219 return (int32_t)(seq1 - seq2) >= 0; 2220 } 2221 2222 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 2223 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2224 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 2225 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 2226 2227 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 2228 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 2229 2230 struct drm_i915_gem_request * 2231 i915_gem_find_active_request(struct intel_engine_cs *ring); 2232 2233 bool i915_gem_retire_requests(struct drm_device *dev); 2234 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2235 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2236 bool interruptible); 2237 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2238 { 2239 return unlikely(atomic_read(&error->reset_counter) 2240 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 2241 } 2242 2243 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 2244 { 2245 return atomic_read(&error->reset_counter) & I915_WEDGED; 2246 } 2247 2248 static inline u32 i915_reset_count(struct i915_gpu_error *error) 2249 { 2250 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2251 } 2252 2253 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 2254 { 2255 return dev_priv->gpu_error.stop_rings == 0 || 2256 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 2257 } 2258 2259 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 2260 { 2261 return dev_priv->gpu_error.stop_rings == 0 || 2262 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 2263 } 2264 2265 void i915_gem_reset(struct drm_device *dev); 2266 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2267 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2268 int __must_check i915_gem_init(struct drm_device *dev); 2269 int __must_check i915_gem_init_hw(struct drm_device *dev); 2270 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); 2271 void i915_gem_init_swizzling(struct drm_device *dev); 2272 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2273 int __must_check i915_gpu_idle(struct drm_device *dev); 2274 int __must_check i915_gem_suspend(struct drm_device *dev); 2275 int __i915_add_request(struct intel_engine_cs *ring, 2276 struct drm_file *file, 2277 struct drm_i915_gem_object *batch_obj, 2278 u32 *seqno); 2279 #define i915_add_request(ring, seqno) \ 2280 __i915_add_request(ring, NULL, NULL, seqno) 2281 int __must_check i915_wait_seqno(struct intel_engine_cs *ring, 2282 uint32_t seqno); 2283 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 2284 int __must_check 2285 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 2286 bool write); 2287 int __must_check 2288 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 2289 int __must_check 2290 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2291 u32 alignment, 2292 struct intel_engine_cs *pipelined); 2293 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2294 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2295 int align); 2296 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2297 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2298 2299 uint32_t 2300 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 2301 uint32_t 2302 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 2303 int tiling_mode, bool fenced); 2304 2305 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 2306 enum i915_cache_level cache_level); 2307 2308 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 2309 struct dma_buf *dma_buf); 2310 2311 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 2312 struct drm_gem_object *gem_obj, int flags); 2313 2314 void i915_gem_restore_fences(struct drm_device *dev); 2315 2316 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, 2317 struct i915_address_space *vm); 2318 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 2319 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 2320 struct i915_address_space *vm); 2321 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 2322 struct i915_address_space *vm); 2323 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 2324 struct i915_address_space *vm); 2325 struct i915_vma * 2326 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2327 struct i915_address_space *vm); 2328 2329 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); 2330 static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { 2331 struct i915_vma *vma; 2332 list_for_each_entry(vma, &obj->vma_list, vma_link) 2333 if (vma->pin_count > 0) 2334 return true; 2335 return false; 2336 } 2337 2338 /* Some GGTT VM helpers */ 2339 #define obj_to_ggtt(obj) \ 2340 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2341 static inline bool i915_is_ggtt(struct i915_address_space *vm) 2342 { 2343 struct i915_address_space *ggtt = 2344 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; 2345 return vm == ggtt; 2346 } 2347 2348 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2349 { 2350 return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); 2351 } 2352 2353 static inline unsigned long 2354 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) 2355 { 2356 return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); 2357 } 2358 2359 static inline unsigned long 2360 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 2361 { 2362 return i915_gem_obj_size(obj, obj_to_ggtt(obj)); 2363 } 2364 2365 static inline int __must_check 2366 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2367 uint32_t alignment, 2368 unsigned flags) 2369 { 2370 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); 2371 } 2372 2373 static inline int 2374 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 2375 { 2376 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 2377 } 2378 2379 void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2380 2381 /* i915_gem_context.c */ 2382 #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) 2383 int __must_check i915_gem_context_init(struct drm_device *dev); 2384 void i915_gem_context_fini(struct drm_device *dev); 2385 void i915_gem_context_reset(struct drm_device *dev); 2386 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 2387 int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2388 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2389 int i915_switch_context(struct intel_engine_cs *ring, 2390 struct intel_context *to); 2391 struct intel_context * 2392 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2393 void i915_gem_context_free(struct kref *ctx_ref); 2394 static inline void i915_gem_context_reference(struct intel_context *ctx) 2395 { 2396 kref_get(&ctx->ref); 2397 } 2398 2399 static inline void i915_gem_context_unreference(struct intel_context *ctx) 2400 { 2401 kref_put(&ctx->ref, i915_gem_context_free); 2402 } 2403 2404 static inline bool i915_gem_context_is_default(const struct intel_context *c) 2405 { 2406 return c->id == DEFAULT_CONTEXT_ID; 2407 } 2408 2409 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2410 struct drm_file *file); 2411 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2412 struct drm_file *file); 2413 2414 /* i915_gem_render_state.c */ 2415 int i915_gem_render_state_init(struct intel_engine_cs *ring); 2416 /* i915_gem_evict.c */ 2417 int __must_check i915_gem_evict_something(struct drm_device *dev, 2418 struct i915_address_space *vm, 2419 int min_size, 2420 unsigned alignment, 2421 unsigned cache_level, 2422 unsigned long start, 2423 unsigned long end, 2424 unsigned flags); 2425 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2426 int i915_gem_evict_everything(struct drm_device *dev); 2427 2428 /* belongs in i915_gem_gtt.h */ 2429 static inline void i915_gem_chipset_flush(struct drm_device *dev) 2430 { 2431 if (INTEL_INFO(dev)->gen < 6) 2432 intel_gtt_chipset_flush(); 2433 } 2434 2435 /* i915_gem_stolen.c */ 2436 int i915_gem_init_stolen(struct drm_device *dev); 2437 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); 2438 void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 2439 void i915_gem_cleanup_stolen(struct drm_device *dev); 2440 struct drm_i915_gem_object * 2441 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 2442 struct drm_i915_gem_object * 2443 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 2444 u32 stolen_offset, 2445 u32 gtt_offset, 2446 u32 size); 2447 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); 2448 2449 /* i915_gem_tiling.c */ 2450 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2451 { 2452 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2453 2454 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 2455 obj->tiling_mode != I915_TILING_NONE; 2456 } 2457 2458 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 2459 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 2460 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 2461 2462 /* i915_gem_debug.c */ 2463 #if WATCH_LISTS 2464 int i915_verify_lists(struct drm_device *dev); 2465 #else 2466 #define i915_verify_lists(dev) 0 2467 #endif 2468 2469 /* i915_debugfs.c */ 2470 int i915_debugfs_init(struct drm_minor *minor); 2471 void i915_debugfs_cleanup(struct drm_minor *minor); 2472 #ifdef CONFIG_DEBUG_FS 2473 void intel_display_crc_init(struct drm_device *dev); 2474 #else 2475 static inline void intel_display_crc_init(struct drm_device *dev) {} 2476 #endif 2477 2478 /* i915_gpu_error.c */ 2479 __printf(2, 3) 2480 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 2481 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 2482 const struct i915_error_state_file_priv *error); 2483 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 2484 size_t count, loff_t pos); 2485 static inline void i915_error_state_buf_release( 2486 struct drm_i915_error_state_buf *eb) 2487 { 2488 kfree(eb->buf); 2489 } 2490 void i915_capture_error_state(struct drm_device *dev, bool wedge, 2491 const char *error_msg); 2492 void i915_error_state_get(struct drm_device *dev, 2493 struct i915_error_state_file_priv *error_priv); 2494 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 2495 void i915_destroy_error_state(struct drm_device *dev); 2496 2497 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 2498 const char *i915_cache_level_str(int type); 2499 2500 /* i915_cmd_parser.c */ 2501 int i915_cmd_parser_get_version(void); 2502 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 2503 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 2504 bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 2505 int i915_parse_cmds(struct intel_engine_cs *ring, 2506 struct drm_i915_gem_object *batch_obj, 2507 u32 batch_start_offset, 2508 bool is_master); 2509 2510 /* i915_suspend.c */ 2511 extern int i915_save_state(struct drm_device *dev); 2512 extern int i915_restore_state(struct drm_device *dev); 2513 2514 /* i915_ums.c */ 2515 void i915_save_display_reg(struct drm_device *dev); 2516 void i915_restore_display_reg(struct drm_device *dev); 2517 2518 /* i915_sysfs.c */ 2519 void i915_setup_sysfs(struct drm_device *dev_priv); 2520 void i915_teardown_sysfs(struct drm_device *dev_priv); 2521 2522 /* intel_i2c.c */ 2523 extern int intel_setup_gmbus(struct drm_device *dev); 2524 extern void intel_teardown_gmbus(struct drm_device *dev); 2525 static inline bool intel_gmbus_is_port_valid(unsigned port) 2526 { 2527 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); 2528 } 2529 2530 extern struct i2c_adapter *intel_gmbus_get_adapter( 2531 struct drm_i915_private *dev_priv, unsigned port); 2532 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 2533 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 2534 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 2535 { 2536 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 2537 } 2538 extern void intel_i2c_reset(struct drm_device *dev); 2539 2540 /* intel_opregion.c */ 2541 struct intel_encoder; 2542 #ifdef CONFIG_ACPI 2543 extern int intel_opregion_setup(struct drm_device *dev); 2544 extern void intel_opregion_init(struct drm_device *dev); 2545 extern void intel_opregion_fini(struct drm_device *dev); 2546 extern void intel_opregion_asle_intr(struct drm_device *dev); 2547 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 2548 bool enable); 2549 extern int intel_opregion_notify_adapter(struct drm_device *dev, 2550 pci_power_t state); 2551 #else 2552 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 2553 static inline void intel_opregion_init(struct drm_device *dev) { return; } 2554 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 2555 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 2556 static inline int 2557 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 2558 { 2559 return 0; 2560 } 2561 static inline int 2562 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 2563 { 2564 return 0; 2565 } 2566 #endif 2567 2568 /* intel_acpi.c */ 2569 #ifdef CONFIG_ACPI 2570 extern void intel_register_dsm_handler(void); 2571 extern void intel_unregister_dsm_handler(void); 2572 #else 2573 static inline void intel_register_dsm_handler(void) { return; } 2574 static inline void intel_unregister_dsm_handler(void) { return; } 2575 #endif /* CONFIG_ACPI */ 2576 2577 /* modesetting */ 2578 extern void intel_modeset_init_hw(struct drm_device *dev); 2579 extern void intel_modeset_suspend_hw(struct drm_device *dev); 2580 extern void intel_modeset_init(struct drm_device *dev); 2581 extern void intel_modeset_gem_init(struct drm_device *dev); 2582 extern void intel_modeset_cleanup(struct drm_device *dev); 2583 extern void intel_connector_unregister(struct intel_connector *); 2584 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 2585 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 2586 bool force_restore); 2587 extern void i915_redisable_vga(struct drm_device *dev); 2588 extern void i915_redisable_vga_power_on(struct drm_device *dev); 2589 extern bool intel_fbc_enabled(struct drm_device *dev); 2590 extern void intel_disable_fbc(struct drm_device *dev); 2591 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2592 extern void intel_init_pch_refclk(struct drm_device *dev); 2593 extern void gen6_set_rps(struct drm_device *dev, u8 val); 2594 extern void valleyview_set_rps(struct drm_device *dev, u8 val); 2595 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); 2596 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); 2597 extern void intel_detect_pch(struct drm_device *dev); 2598 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 2599 extern int intel_enable_rc6(const struct drm_device *dev); 2600 2601 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 2602 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 2603 struct drm_file *file); 2604 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 2605 struct drm_file *file); 2606 2607 /* overlay */ 2608 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 2609 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 2610 struct intel_overlay_error_state *error); 2611 2612 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 2613 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 2614 struct drm_device *dev, 2615 struct intel_display_error_state *error); 2616 2617 /* On SNB platform, before reading ring registers forcewake bit 2618 * must be set to prevent GT core from power down and stale values being 2619 * returned. 2620 */ 2621 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); 2622 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); 2623 void assert_force_wake_inactive(struct drm_i915_private *dev_priv); 2624 2625 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2626 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2627 2628 /* intel_sideband.c */ 2629 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2630 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 2631 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 2632 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); 2633 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 2634 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 2635 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 2636 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 2637 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 2638 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 2639 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 2640 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); 2641 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 2642 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 2643 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 2644 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 2645 enum intel_sbi_destination destination); 2646 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 2647 enum intel_sbi_destination destination); 2648 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 2649 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 2650 2651 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); 2652 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); 2653 2654 #define FORCEWAKE_RENDER (1 << 0) 2655 #define FORCEWAKE_MEDIA (1 << 1) 2656 #define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) 2657 2658 2659 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 2660 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 2661 2662 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 2663 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 2664 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 2665 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 2666 2667 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 2668 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 2669 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 2670 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 2671 2672 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 2673 * will be implemented using 2 32-bit writes in an arbitrary order with 2674 * an arbitrary delay between them. This can cause the hardware to 2675 * act upon the intermediate value, possibly leading to corruption and 2676 * machine death. You have been warned. 2677 */ 2678 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 2679 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 2680 2681 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 2682 u32 upper = I915_READ(upper_reg); \ 2683 u32 lower = I915_READ(lower_reg); \ 2684 u32 tmp = I915_READ(upper_reg); \ 2685 if (upper != tmp) { \ 2686 upper = tmp; \ 2687 lower = I915_READ(lower_reg); \ 2688 WARN_ON(I915_READ(upper_reg) != upper); \ 2689 } \ 2690 (u64)upper << 32 | lower; }) 2691 2692 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2693 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2694 2695 /* "Broadcast RGB" property */ 2696 #define INTEL_BROADCAST_RGB_AUTO 0 2697 #define INTEL_BROADCAST_RGB_FULL 1 2698 #define INTEL_BROADCAST_RGB_LIMITED 2 2699 2700 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 2701 { 2702 if (HAS_PCH_SPLIT(dev)) 2703 return CPU_VGACNTRL; 2704 else if (IS_VALLEYVIEW(dev)) 2705 return VLV_VGACNTRL; 2706 else 2707 return VGACNTRL; 2708 } 2709 2710 static inline void __user *to_user_ptr(u64 address) 2711 { 2712 return (void __user *)(uintptr_t)address; 2713 } 2714 2715 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 2716 { 2717 unsigned long j = msecs_to_jiffies(m); 2718 2719 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2720 } 2721 2722 static inline unsigned long 2723 timespec_to_jiffies_timeout(const struct timespec *value) 2724 { 2725 unsigned long j = timespec_to_jiffies(value); 2726 2727 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2728 } 2729 2730 /* 2731 * If you need to wait X milliseconds between events A and B, but event B 2732 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 2733 * when event A happened, then just before event B you call this function and 2734 * pass the timestamp as the first argument, and X as the second argument. 2735 */ 2736 static inline void 2737 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 2738 { 2739 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 2740 2741 /* 2742 * Don't re-read the value of "jiffies" every time since it may change 2743 * behind our back and break the math. 2744 */ 2745 tmp_jiffies = jiffies; 2746 target_jiffies = timestamp_jiffies + 2747 msecs_to_jiffies_timeout(to_wait_ms); 2748 2749 if (time_after(target_jiffies, tmp_jiffies)) { 2750 remaining_jiffies = target_jiffies - tmp_jiffies; 2751 while (remaining_jiffies) 2752 remaining_jiffies = 2753 schedule_timeout_uninterruptible(remaining_jiffies); 2754 } 2755 } 2756 2757 #endif 2758