1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include "i915_reg.h" 34 #include "intel_bios.h" 35 #include "intel_ringbuffer.h" 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <drm/intel-gtt.h> 40 #include <linux/backlight.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 44 /* General customization: 45 */ 46 47 #define DRIVER_AUTHOR "Tungsten Graphics, Inc." 48 49 #define DRIVER_NAME "i915" 50 #define DRIVER_DESC "Intel Graphics" 51 #define DRIVER_DATE "20080730" 52 53 enum pipe { 54 PIPE_A = 0, 55 PIPE_B, 56 PIPE_C, 57 I915_MAX_PIPES 58 }; 59 #define pipe_name(p) ((p) + 'A') 60 61 enum transcoder { 62 TRANSCODER_A = 0, 63 TRANSCODER_B, 64 TRANSCODER_C, 65 TRANSCODER_EDP = 0xF, 66 }; 67 #define transcoder_name(t) ((t) + 'A') 68 69 enum plane { 70 PLANE_A = 0, 71 PLANE_B, 72 PLANE_C, 73 }; 74 #define plane_name(p) ((p) + 'A') 75 76 enum port { 77 PORT_A = 0, 78 PORT_B, 79 PORT_C, 80 PORT_D, 81 PORT_E, 82 I915_MAX_PORTS 83 }; 84 #define port_name(p) ((p) + 'A') 85 86 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 87 88 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 89 90 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 91 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 92 if ((intel_encoder)->base.crtc == (__crtc)) 93 94 struct intel_pch_pll { 95 int refcount; /* count of number of CRTCs sharing this PLL */ 96 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 97 bool on; /* is the PLL actually active? Disabled during modeset */ 98 int pll_reg; 99 int fp0_reg; 100 int fp1_reg; 101 }; 102 #define I915_NUM_PLLS 2 103 104 struct intel_ddi_plls { 105 int spll_refcount; 106 int wrpll1_refcount; 107 int wrpll2_refcount; 108 }; 109 110 /* Interface history: 111 * 112 * 1.1: Original. 113 * 1.2: Add Power Management 114 * 1.3: Add vblank support 115 * 1.4: Fix cmdbuffer path, add heap destroy 116 * 1.5: Add vblank pipe configuration 117 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 118 * - Support vertical blank on secondary display pipe 119 */ 120 #define DRIVER_MAJOR 1 121 #define DRIVER_MINOR 6 122 #define DRIVER_PATCHLEVEL 0 123 124 #define WATCH_COHERENCY 0 125 #define WATCH_LISTS 0 126 #define WATCH_GTT 0 127 128 #define I915_GEM_PHYS_CURSOR_0 1 129 #define I915_GEM_PHYS_CURSOR_1 2 130 #define I915_GEM_PHYS_OVERLAY_REGS 3 131 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) 132 133 struct drm_i915_gem_phys_object { 134 int id; 135 struct page **page_list; 136 drm_dma_handle_t *handle; 137 struct drm_i915_gem_object *cur_obj; 138 }; 139 140 struct opregion_header; 141 struct opregion_acpi; 142 struct opregion_swsci; 143 struct opregion_asle; 144 struct drm_i915_private; 145 146 struct intel_opregion { 147 struct opregion_header __iomem *header; 148 struct opregion_acpi __iomem *acpi; 149 struct opregion_swsci __iomem *swsci; 150 struct opregion_asle __iomem *asle; 151 void __iomem *vbt; 152 u32 __iomem *lid_state; 153 }; 154 #define OPREGION_SIZE (8*1024) 155 156 struct intel_overlay; 157 struct intel_overlay_error_state; 158 159 struct drm_i915_master_private { 160 drm_local_map_t *sarea; 161 struct _drm_i915_sarea *sarea_priv; 162 }; 163 #define I915_FENCE_REG_NONE -1 164 #define I915_MAX_NUM_FENCES 16 165 /* 16 fences + sign bit for FENCE_REG_NONE */ 166 #define I915_MAX_NUM_FENCE_BITS 5 167 168 struct drm_i915_fence_reg { 169 struct list_head lru_list; 170 struct drm_i915_gem_object *obj; 171 int pin_count; 172 }; 173 174 struct sdvo_device_mapping { 175 u8 initialized; 176 u8 dvo_port; 177 u8 slave_addr; 178 u8 dvo_wiring; 179 u8 i2c_pin; 180 u8 ddc_pin; 181 }; 182 183 struct intel_display_error_state; 184 185 struct drm_i915_error_state { 186 struct kref ref; 187 u32 eir; 188 u32 pgtbl_er; 189 u32 ier; 190 u32 ccid; 191 u32 derrmr; 192 u32 forcewake; 193 bool waiting[I915_NUM_RINGS]; 194 u32 pipestat[I915_MAX_PIPES]; 195 u32 tail[I915_NUM_RINGS]; 196 u32 head[I915_NUM_RINGS]; 197 u32 ctl[I915_NUM_RINGS]; 198 u32 ipeir[I915_NUM_RINGS]; 199 u32 ipehr[I915_NUM_RINGS]; 200 u32 instdone[I915_NUM_RINGS]; 201 u32 acthd[I915_NUM_RINGS]; 202 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 203 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 204 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ 205 /* our own tracking of ring head and tail */ 206 u32 cpu_ring_head[I915_NUM_RINGS]; 207 u32 cpu_ring_tail[I915_NUM_RINGS]; 208 u32 error; /* gen6+ */ 209 u32 err_int; /* gen7 */ 210 u32 instpm[I915_NUM_RINGS]; 211 u32 instps[I915_NUM_RINGS]; 212 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 213 u32 seqno[I915_NUM_RINGS]; 214 u64 bbaddr; 215 u32 fault_reg[I915_NUM_RINGS]; 216 u32 done_reg; 217 u32 faddr[I915_NUM_RINGS]; 218 u64 fence[I915_MAX_NUM_FENCES]; 219 struct timeval time; 220 struct drm_i915_error_ring { 221 struct drm_i915_error_object { 222 int page_count; 223 u32 gtt_offset; 224 u32 *pages[0]; 225 } *ringbuffer, *batchbuffer; 226 struct drm_i915_error_request { 227 long jiffies; 228 u32 seqno; 229 u32 tail; 230 } *requests; 231 int num_requests; 232 } ring[I915_NUM_RINGS]; 233 struct drm_i915_error_buffer { 234 u32 size; 235 u32 name; 236 u32 rseqno, wseqno; 237 u32 gtt_offset; 238 u32 read_domains; 239 u32 write_domain; 240 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 241 s32 pinned:2; 242 u32 tiling:2; 243 u32 dirty:1; 244 u32 purgeable:1; 245 s32 ring:4; 246 u32 cache_level:2; 247 } *active_bo, *pinned_bo; 248 u32 active_bo_count, pinned_bo_count; 249 struct intel_overlay_error_state *overlay; 250 struct intel_display_error_state *display; 251 }; 252 253 struct drm_i915_display_funcs { 254 bool (*fbc_enabled)(struct drm_device *dev); 255 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 256 void (*disable_fbc)(struct drm_device *dev); 257 int (*get_display_clock_speed)(struct drm_device *dev); 258 int (*get_fifo_size)(struct drm_device *dev, int plane); 259 void (*update_wm)(struct drm_device *dev); 260 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 261 uint32_t sprite_width, int pixel_size); 262 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 263 struct drm_display_mode *mode); 264 void (*modeset_global_resources)(struct drm_device *dev); 265 int (*crtc_mode_set)(struct drm_crtc *crtc, 266 struct drm_display_mode *mode, 267 struct drm_display_mode *adjusted_mode, 268 int x, int y, 269 struct drm_framebuffer *old_fb); 270 void (*crtc_enable)(struct drm_crtc *crtc); 271 void (*crtc_disable)(struct drm_crtc *crtc); 272 void (*off)(struct drm_crtc *crtc); 273 void (*write_eld)(struct drm_connector *connector, 274 struct drm_crtc *crtc); 275 void (*fdi_link_train)(struct drm_crtc *crtc); 276 void (*init_clock_gating)(struct drm_device *dev); 277 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 278 struct drm_framebuffer *fb, 279 struct drm_i915_gem_object *obj); 280 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 281 int x, int y); 282 /* clock updates for mode set */ 283 /* cursor updates */ 284 /* render clock increase/decrease */ 285 /* display clock increase/decrease */ 286 /* pll clock increase/decrease */ 287 }; 288 289 struct drm_i915_gt_funcs { 290 void (*force_wake_get)(struct drm_i915_private *dev_priv); 291 void (*force_wake_put)(struct drm_i915_private *dev_priv); 292 }; 293 294 #define DEV_INFO_FLAGS \ 295 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ 296 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ 297 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ 298 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ 299 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ 300 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ 301 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ 302 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ 303 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ 304 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ 305 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ 306 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ 307 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ 308 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ 309 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ 310 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ 311 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ 312 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ 313 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ 314 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ 315 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ 316 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ 317 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ 318 DEV_INFO_FLAG(has_llc) 319 320 struct intel_device_info { 321 u8 gen; 322 u8 is_mobile:1; 323 u8 is_i85x:1; 324 u8 is_i915g:1; 325 u8 is_i945gm:1; 326 u8 is_g33:1; 327 u8 need_gfx_hws:1; 328 u8 is_g4x:1; 329 u8 is_pineview:1; 330 u8 is_broadwater:1; 331 u8 is_crestline:1; 332 u8 is_ivybridge:1; 333 u8 is_valleyview:1; 334 u8 has_force_wake:1; 335 u8 is_haswell:1; 336 u8 has_fbc:1; 337 u8 has_pipe_cxsr:1; 338 u8 has_hotplug:1; 339 u8 cursor_needs_physical:1; 340 u8 has_overlay:1; 341 u8 overlay_needs_physical:1; 342 u8 supports_tv:1; 343 u8 has_bsd_ring:1; 344 u8 has_blt_ring:1; 345 u8 has_llc:1; 346 }; 347 348 #define I915_PPGTT_PD_ENTRIES 512 349 #define I915_PPGTT_PT_ENTRIES 1024 350 struct i915_hw_ppgtt { 351 struct drm_device *dev; 352 unsigned num_pd_entries; 353 struct page **pt_pages; 354 uint32_t pd_offset; 355 dma_addr_t *pt_dma_addr; 356 dma_addr_t scratch_page_dma_addr; 357 }; 358 359 360 /* This must match up with the value previously used for execbuf2.rsvd1. */ 361 #define DEFAULT_CONTEXT_ID 0 362 struct i915_hw_context { 363 int id; 364 bool is_initialized; 365 struct drm_i915_file_private *file_priv; 366 struct intel_ring_buffer *ring; 367 struct drm_i915_gem_object *obj; 368 }; 369 370 enum no_fbc_reason { 371 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 372 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 373 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 374 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 375 FBC_BAD_PLANE, /* fbc not supported on plane */ 376 FBC_NOT_TILED, /* buffer not tiled */ 377 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 378 FBC_MODULE_PARAM, 379 }; 380 381 enum intel_pch { 382 PCH_NONE = 0, /* No PCH present */ 383 PCH_IBX, /* Ibexpeak PCH */ 384 PCH_CPT, /* Cougarpoint PCH */ 385 PCH_LPT, /* Lynxpoint PCH */ 386 }; 387 388 enum intel_sbi_destination { 389 SBI_ICLK, 390 SBI_MPHY, 391 }; 392 393 #define QUIRK_PIPEA_FORCE (1<<0) 394 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 395 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 396 397 struct intel_fbdev; 398 struct intel_fbc_work; 399 400 struct intel_gmbus { 401 struct i2c_adapter adapter; 402 u32 force_bit; 403 u32 reg0; 404 u32 gpio_reg; 405 struct i2c_algo_bit_data bit_algo; 406 struct drm_i915_private *dev_priv; 407 }; 408 409 struct i915_suspend_saved_registers { 410 u8 saveLBB; 411 u32 saveDSPACNTR; 412 u32 saveDSPBCNTR; 413 u32 saveDSPARB; 414 u32 savePIPEACONF; 415 u32 savePIPEBCONF; 416 u32 savePIPEASRC; 417 u32 savePIPEBSRC; 418 u32 saveFPA0; 419 u32 saveFPA1; 420 u32 saveDPLL_A; 421 u32 saveDPLL_A_MD; 422 u32 saveHTOTAL_A; 423 u32 saveHBLANK_A; 424 u32 saveHSYNC_A; 425 u32 saveVTOTAL_A; 426 u32 saveVBLANK_A; 427 u32 saveVSYNC_A; 428 u32 saveBCLRPAT_A; 429 u32 saveTRANSACONF; 430 u32 saveTRANS_HTOTAL_A; 431 u32 saveTRANS_HBLANK_A; 432 u32 saveTRANS_HSYNC_A; 433 u32 saveTRANS_VTOTAL_A; 434 u32 saveTRANS_VBLANK_A; 435 u32 saveTRANS_VSYNC_A; 436 u32 savePIPEASTAT; 437 u32 saveDSPASTRIDE; 438 u32 saveDSPASIZE; 439 u32 saveDSPAPOS; 440 u32 saveDSPAADDR; 441 u32 saveDSPASURF; 442 u32 saveDSPATILEOFF; 443 u32 savePFIT_PGM_RATIOS; 444 u32 saveBLC_HIST_CTL; 445 u32 saveBLC_PWM_CTL; 446 u32 saveBLC_PWM_CTL2; 447 u32 saveBLC_CPU_PWM_CTL; 448 u32 saveBLC_CPU_PWM_CTL2; 449 u32 saveFPB0; 450 u32 saveFPB1; 451 u32 saveDPLL_B; 452 u32 saveDPLL_B_MD; 453 u32 saveHTOTAL_B; 454 u32 saveHBLANK_B; 455 u32 saveHSYNC_B; 456 u32 saveVTOTAL_B; 457 u32 saveVBLANK_B; 458 u32 saveVSYNC_B; 459 u32 saveBCLRPAT_B; 460 u32 saveTRANSBCONF; 461 u32 saveTRANS_HTOTAL_B; 462 u32 saveTRANS_HBLANK_B; 463 u32 saveTRANS_HSYNC_B; 464 u32 saveTRANS_VTOTAL_B; 465 u32 saveTRANS_VBLANK_B; 466 u32 saveTRANS_VSYNC_B; 467 u32 savePIPEBSTAT; 468 u32 saveDSPBSTRIDE; 469 u32 saveDSPBSIZE; 470 u32 saveDSPBPOS; 471 u32 saveDSPBADDR; 472 u32 saveDSPBSURF; 473 u32 saveDSPBTILEOFF; 474 u32 saveVGA0; 475 u32 saveVGA1; 476 u32 saveVGA_PD; 477 u32 saveVGACNTRL; 478 u32 saveADPA; 479 u32 saveLVDS; 480 u32 savePP_ON_DELAYS; 481 u32 savePP_OFF_DELAYS; 482 u32 saveDVOA; 483 u32 saveDVOB; 484 u32 saveDVOC; 485 u32 savePP_ON; 486 u32 savePP_OFF; 487 u32 savePP_CONTROL; 488 u32 savePP_DIVISOR; 489 u32 savePFIT_CONTROL; 490 u32 save_palette_a[256]; 491 u32 save_palette_b[256]; 492 u32 saveDPFC_CB_BASE; 493 u32 saveFBC_CFB_BASE; 494 u32 saveFBC_LL_BASE; 495 u32 saveFBC_CONTROL; 496 u32 saveFBC_CONTROL2; 497 u32 saveIER; 498 u32 saveIIR; 499 u32 saveIMR; 500 u32 saveDEIER; 501 u32 saveDEIMR; 502 u32 saveGTIER; 503 u32 saveGTIMR; 504 u32 saveFDI_RXA_IMR; 505 u32 saveFDI_RXB_IMR; 506 u32 saveCACHE_MODE_0; 507 u32 saveMI_ARB_STATE; 508 u32 saveSWF0[16]; 509 u32 saveSWF1[16]; 510 u32 saveSWF2[3]; 511 u8 saveMSR; 512 u8 saveSR[8]; 513 u8 saveGR[25]; 514 u8 saveAR_INDEX; 515 u8 saveAR[21]; 516 u8 saveDACMASK; 517 u8 saveCR[37]; 518 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 519 u32 saveCURACNTR; 520 u32 saveCURAPOS; 521 u32 saveCURABASE; 522 u32 saveCURBCNTR; 523 u32 saveCURBPOS; 524 u32 saveCURBBASE; 525 u32 saveCURSIZE; 526 u32 saveDP_B; 527 u32 saveDP_C; 528 u32 saveDP_D; 529 u32 savePIPEA_GMCH_DATA_M; 530 u32 savePIPEB_GMCH_DATA_M; 531 u32 savePIPEA_GMCH_DATA_N; 532 u32 savePIPEB_GMCH_DATA_N; 533 u32 savePIPEA_DP_LINK_M; 534 u32 savePIPEB_DP_LINK_M; 535 u32 savePIPEA_DP_LINK_N; 536 u32 savePIPEB_DP_LINK_N; 537 u32 saveFDI_RXA_CTL; 538 u32 saveFDI_TXA_CTL; 539 u32 saveFDI_RXB_CTL; 540 u32 saveFDI_TXB_CTL; 541 u32 savePFA_CTL_1; 542 u32 savePFB_CTL_1; 543 u32 savePFA_WIN_SZ; 544 u32 savePFB_WIN_SZ; 545 u32 savePFA_WIN_POS; 546 u32 savePFB_WIN_POS; 547 u32 savePCH_DREF_CONTROL; 548 u32 saveDISP_ARB_CTL; 549 u32 savePIPEA_DATA_M1; 550 u32 savePIPEA_DATA_N1; 551 u32 savePIPEA_LINK_M1; 552 u32 savePIPEA_LINK_N1; 553 u32 savePIPEB_DATA_M1; 554 u32 savePIPEB_DATA_N1; 555 u32 savePIPEB_LINK_M1; 556 u32 savePIPEB_LINK_N1; 557 u32 saveMCHBAR_RENDER_STANDBY; 558 u32 savePCH_PORT_HOTPLUG; 559 }; 560 561 struct intel_gen6_power_mgmt { 562 struct work_struct work; 563 u32 pm_iir; 564 /* lock - irqsave spinlock that protectects the work_struct and 565 * pm_iir. */ 566 spinlock_t lock; 567 568 /* The below variables an all the rps hw state are protected by 569 * dev->struct mutext. */ 570 u8 cur_delay; 571 u8 min_delay; 572 u8 max_delay; 573 574 struct delayed_work delayed_resume_work; 575 576 /* 577 * Protects RPS/RC6 register access and PCU communication. 578 * Must be taken after struct_mutex if nested. 579 */ 580 struct mutex hw_lock; 581 }; 582 583 struct intel_ilk_power_mgmt { 584 u8 cur_delay; 585 u8 min_delay; 586 u8 max_delay; 587 u8 fmax; 588 u8 fstart; 589 590 u64 last_count1; 591 unsigned long last_time1; 592 unsigned long chipset_power; 593 u64 last_count2; 594 struct timespec last_time2; 595 unsigned long gfx_power; 596 u8 corr; 597 598 int c_m; 599 int r_t; 600 601 struct drm_i915_gem_object *pwrctx; 602 struct drm_i915_gem_object *renderctx; 603 }; 604 605 struct i915_dri1_state { 606 unsigned allow_batchbuffer : 1; 607 u32 __iomem *gfx_hws_cpu_addr; 608 609 unsigned int cpp; 610 int back_offset; 611 int front_offset; 612 int current_page; 613 int page_flipping; 614 615 uint32_t counter; 616 }; 617 618 struct intel_l3_parity { 619 u32 *remap_info; 620 struct work_struct error_work; 621 }; 622 623 typedef struct drm_i915_private { 624 struct drm_device *dev; 625 626 const struct intel_device_info *info; 627 628 int relative_constants_mode; 629 630 void __iomem *regs; 631 632 struct drm_i915_gt_funcs gt; 633 /** gt_fifo_count and the subsequent register write are synchronized 634 * with dev->struct_mutex. */ 635 unsigned gt_fifo_count; 636 /** forcewake_count is protected by gt_lock */ 637 unsigned forcewake_count; 638 /** gt_lock is also taken in irq contexts. */ 639 struct spinlock gt_lock; 640 641 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 642 643 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 644 * controller on different i2c buses. */ 645 struct mutex gmbus_mutex; 646 647 /** 648 * Base address of the gmbus and gpio block. 649 */ 650 uint32_t gpio_mmio_base; 651 652 struct pci_dev *bridge_dev; 653 struct intel_ring_buffer ring[I915_NUM_RINGS]; 654 uint32_t next_seqno; 655 656 drm_dma_handle_t *status_page_dmah; 657 struct resource mch_res; 658 659 atomic_t irq_received; 660 661 /* protects the irq masks */ 662 spinlock_t irq_lock; 663 664 /* DPIO indirect register protection */ 665 spinlock_t dpio_lock; 666 667 /** Cached value of IMR to avoid reads in updating the bitfield */ 668 u32 pipestat[2]; 669 u32 irq_mask; 670 u32 gt_irq_mask; 671 u32 pch_irq_mask; 672 673 u32 hotplug_supported_mask; 674 struct work_struct hotplug_work; 675 676 int num_pipe; 677 int num_pch_pll; 678 679 /* For hangcheck timer */ 680 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 681 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 682 struct timer_list hangcheck_timer; 683 int hangcheck_count; 684 uint32_t last_acthd[I915_NUM_RINGS]; 685 uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; 686 687 unsigned int stop_rings; 688 689 unsigned long cfb_size; 690 unsigned int cfb_fb; 691 enum plane cfb_plane; 692 int cfb_y; 693 struct intel_fbc_work *fbc_work; 694 695 struct intel_opregion opregion; 696 697 /* overlay */ 698 struct intel_overlay *overlay; 699 bool sprite_scaling_enabled; 700 701 /* LVDS info */ 702 int backlight_level; /* restore backlight to this value */ 703 bool backlight_enabled; 704 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 705 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 706 707 /* Feature bits from the VBIOS */ 708 unsigned int int_tv_support:1; 709 unsigned int lvds_dither:1; 710 unsigned int lvds_vbt:1; 711 unsigned int int_crt_support:1; 712 unsigned int lvds_use_ssc:1; 713 unsigned int display_clock_mode:1; 714 int lvds_ssc_freq; 715 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 716 unsigned int lvds_val; /* used for checking LVDS channel mode */ 717 struct { 718 int rate; 719 int lanes; 720 int preemphasis; 721 int vswing; 722 723 bool initialized; 724 bool support; 725 int bpp; 726 struct edp_power_seq pps; 727 } edp; 728 bool no_aux_handshake; 729 730 int crt_ddc_pin; 731 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 732 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 733 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 734 735 unsigned int fsb_freq, mem_freq, is_ddr3; 736 737 spinlock_t error_lock; 738 /* Protected by dev->error_lock. */ 739 struct drm_i915_error_state *first_error; 740 struct work_struct error_work; 741 struct completion error_completion; 742 struct workqueue_struct *wq; 743 744 /* Display functions */ 745 struct drm_i915_display_funcs display; 746 747 /* PCH chipset type */ 748 enum intel_pch pch_type; 749 unsigned short pch_id; 750 751 unsigned long quirks; 752 753 /* Register state */ 754 bool modeset_on_lid; 755 756 struct { 757 /** Bridge to intel-gtt-ko */ 758 struct intel_gtt *gtt; 759 /** Memory allocator for GTT stolen memory */ 760 struct drm_mm stolen; 761 /** Memory allocator for GTT */ 762 struct drm_mm gtt_space; 763 /** List of all objects in gtt_space. Used to restore gtt 764 * mappings on resume */ 765 struct list_head bound_list; 766 /** 767 * List of objects which are not bound to the GTT (thus 768 * are idle and not used by the GPU) but still have 769 * (presumably uncached) pages still attached. 770 */ 771 struct list_head unbound_list; 772 773 /** Usable portion of the GTT for GEM */ 774 unsigned long gtt_start; 775 unsigned long gtt_mappable_end; 776 unsigned long gtt_end; 777 778 struct io_mapping *gtt_mapping; 779 phys_addr_t gtt_base_addr; 780 int gtt_mtrr; 781 782 /** PPGTT used for aliasing the PPGTT with the GTT */ 783 struct i915_hw_ppgtt *aliasing_ppgtt; 784 785 struct shrinker inactive_shrinker; 786 bool shrinker_no_lock_stealing; 787 788 /** 789 * List of objects currently involved in rendering. 790 * 791 * Includes buffers having the contents of their GPU caches 792 * flushed, not necessarily primitives. last_rendering_seqno 793 * represents when the rendering involved will be completed. 794 * 795 * A reference is held on the buffer while on this list. 796 */ 797 struct list_head active_list; 798 799 /** 800 * LRU list of objects which are not in the ringbuffer and 801 * are ready to unbind, but are still in the GTT. 802 * 803 * last_rendering_seqno is 0 while an object is in this list. 804 * 805 * A reference is not held on the buffer while on this list, 806 * as merely being GTT-bound shouldn't prevent its being 807 * freed, and we'll pull it off the list in the free path. 808 */ 809 struct list_head inactive_list; 810 811 /** LRU list of objects with fence regs on them. */ 812 struct list_head fence_list; 813 814 /** 815 * We leave the user IRQ off as much as possible, 816 * but this means that requests will finish and never 817 * be retired once the system goes idle. Set a timer to 818 * fire periodically while the ring is running. When it 819 * fires, go retire requests. 820 */ 821 struct delayed_work retire_work; 822 823 /** 824 * Are we in a non-interruptible section of code like 825 * modesetting? 826 */ 827 bool interruptible; 828 829 /** 830 * Flag if the X Server, and thus DRM, is not currently in 831 * control of the device. 832 * 833 * This is set between LeaveVT and EnterVT. It needs to be 834 * replaced with a semaphore. It also needs to be 835 * transitioned away from for kernel modesetting. 836 */ 837 int suspended; 838 839 /** 840 * Flag if the hardware appears to be wedged. 841 * 842 * This is set when attempts to idle the device timeout. 843 * It prevents command submission from occurring and makes 844 * every pending request fail 845 */ 846 atomic_t wedged; 847 848 /** Bit 6 swizzling required for X tiling */ 849 uint32_t bit_6_swizzle_x; 850 /** Bit 6 swizzling required for Y tiling */ 851 uint32_t bit_6_swizzle_y; 852 853 /* storage for physical objects */ 854 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 855 856 /* accounting, useful for userland debugging */ 857 size_t gtt_total; 858 size_t mappable_gtt_total; 859 size_t object_memory; 860 u32 object_count; 861 } mm; 862 863 /* Kernel Modesetting */ 864 865 struct sdvo_device_mapping sdvo_mappings[2]; 866 /* indicate whether the LVDS_BORDER should be enabled or not */ 867 unsigned int lvds_border_bits; 868 /* Panel fitter placement and size for Ironlake+ */ 869 u32 pch_pf_pos, pch_pf_size; 870 871 struct drm_crtc *plane_to_crtc_mapping[3]; 872 struct drm_crtc *pipe_to_crtc_mapping[3]; 873 wait_queue_head_t pending_flip_queue; 874 875 struct intel_pch_pll pch_plls[I915_NUM_PLLS]; 876 struct intel_ddi_plls ddi_plls; 877 878 /* Reclocking support */ 879 bool render_reclock_avail; 880 bool lvds_downclock_avail; 881 /* indicates the reduced downclock for LVDS*/ 882 int lvds_downclock; 883 u16 orig_clock; 884 int child_dev_num; 885 struct child_device_config *child_dev; 886 887 bool mchbar_need_disable; 888 889 struct intel_l3_parity l3_parity; 890 891 /* gen6+ rps state */ 892 struct intel_gen6_power_mgmt rps; 893 894 /* ilk-only ips/rps state. Everything in here is protected by the global 895 * mchdev_lock in intel_pm.c */ 896 struct intel_ilk_power_mgmt ips; 897 898 enum no_fbc_reason no_fbc_reason; 899 900 struct drm_mm_node *compressed_fb; 901 struct drm_mm_node *compressed_llb; 902 903 unsigned long last_gpu_reset; 904 905 /* list of fbdev register on this device */ 906 struct intel_fbdev *fbdev; 907 908 /* 909 * The console may be contended at resume, but we don't 910 * want it to block on it. 911 */ 912 struct work_struct console_resume_work; 913 914 struct backlight_device *backlight; 915 916 struct drm_property *broadcast_rgb_property; 917 struct drm_property *force_audio_property; 918 919 bool hw_contexts_disabled; 920 uint32_t hw_context_size; 921 922 bool fdi_rx_polarity_reversed; 923 924 struct i915_suspend_saved_registers regfile; 925 926 /* Old dri1 support infrastructure, beware the dragons ya fools entering 927 * here! */ 928 struct i915_dri1_state dri1; 929 } drm_i915_private_t; 930 931 /* Iterate over initialised rings */ 932 #define for_each_ring(ring__, dev_priv__, i__) \ 933 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 934 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 935 936 enum hdmi_force_audio { 937 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 938 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 939 HDMI_AUDIO_AUTO, /* trust EDID */ 940 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 941 }; 942 943 enum i915_cache_level { 944 I915_CACHE_NONE = 0, 945 I915_CACHE_LLC, 946 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 947 }; 948 949 struct drm_i915_gem_object_ops { 950 /* Interface between the GEM object and its backing storage. 951 * get_pages() is called once prior to the use of the associated set 952 * of pages before to binding them into the GTT, and put_pages() is 953 * called after we no longer need them. As we expect there to be 954 * associated cost with migrating pages between the backing storage 955 * and making them available for the GPU (e.g. clflush), we may hold 956 * onto the pages after they are no longer referenced by the GPU 957 * in case they may be used again shortly (for example migrating the 958 * pages to a different memory domain within the GTT). put_pages() 959 * will therefore most likely be called when the object itself is 960 * being released or under memory pressure (where we attempt to 961 * reap pages for the shrinker). 962 */ 963 int (*get_pages)(struct drm_i915_gem_object *); 964 void (*put_pages)(struct drm_i915_gem_object *); 965 }; 966 967 struct drm_i915_gem_object { 968 struct drm_gem_object base; 969 970 const struct drm_i915_gem_object_ops *ops; 971 972 /** Current space allocated to this object in the GTT, if any. */ 973 struct drm_mm_node *gtt_space; 974 struct list_head gtt_list; 975 976 /** This object's place on the active/inactive lists */ 977 struct list_head ring_list; 978 struct list_head mm_list; 979 /** This object's place in the batchbuffer or on the eviction list */ 980 struct list_head exec_list; 981 982 /** 983 * This is set if the object is on the active lists (has pending 984 * rendering and so a non-zero seqno), and is not set if it i s on 985 * inactive (ready to be unbound) list. 986 */ 987 unsigned int active:1; 988 989 /** 990 * This is set if the object has been written to since last bound 991 * to the GTT 992 */ 993 unsigned int dirty:1; 994 995 /** 996 * Fence register bits (if any) for this object. Will be set 997 * as needed when mapped into the GTT. 998 * Protected by dev->struct_mutex. 999 */ 1000 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 1001 1002 /** 1003 * Advice: are the backing pages purgeable? 1004 */ 1005 unsigned int madv:2; 1006 1007 /** 1008 * Current tiling mode for the object. 1009 */ 1010 unsigned int tiling_mode:2; 1011 /** 1012 * Whether the tiling parameters for the currently associated fence 1013 * register have changed. Note that for the purposes of tracking 1014 * tiling changes we also treat the unfenced register, the register 1015 * slot that the object occupies whilst it executes a fenced 1016 * command (such as BLT on gen2/3), as a "fence". 1017 */ 1018 unsigned int fence_dirty:1; 1019 1020 /** How many users have pinned this object in GTT space. The following 1021 * users can each hold at most one reference: pwrite/pread, pin_ioctl 1022 * (via user_pin_count), execbuffer (objects are not allowed multiple 1023 * times for the same batchbuffer), and the framebuffer code. When 1024 * switching/pageflipping, the framebuffer code has at most two buffers 1025 * pinned per crtc. 1026 * 1027 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 1028 * bits with absolutely no headroom. So use 4 bits. */ 1029 unsigned int pin_count:4; 1030 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 1031 1032 /** 1033 * Is the object at the current location in the gtt mappable and 1034 * fenceable? Used to avoid costly recalculations. 1035 */ 1036 unsigned int map_and_fenceable:1; 1037 1038 /** 1039 * Whether the current gtt mapping needs to be mappable (and isn't just 1040 * mappable by accident). Track pin and fault separate for a more 1041 * accurate mappable working set. 1042 */ 1043 unsigned int fault_mappable:1; 1044 unsigned int pin_mappable:1; 1045 1046 /* 1047 * Is the GPU currently using a fence to access this buffer, 1048 */ 1049 unsigned int pending_fenced_gpu_access:1; 1050 unsigned int fenced_gpu_access:1; 1051 1052 unsigned int cache_level:2; 1053 1054 unsigned int has_aliasing_ppgtt_mapping:1; 1055 unsigned int has_global_gtt_mapping:1; 1056 unsigned int has_dma_mapping:1; 1057 1058 struct sg_table *pages; 1059 int pages_pin_count; 1060 1061 /* prime dma-buf support */ 1062 void *dma_buf_vmapping; 1063 int vmapping_count; 1064 1065 /** 1066 * Used for performing relocations during execbuffer insertion. 1067 */ 1068 struct hlist_node exec_node; 1069 unsigned long exec_handle; 1070 struct drm_i915_gem_exec_object2 *exec_entry; 1071 1072 /** 1073 * Current offset of the object in GTT space. 1074 * 1075 * This is the same as gtt_space->start 1076 */ 1077 uint32_t gtt_offset; 1078 1079 struct intel_ring_buffer *ring; 1080 1081 /** Breadcrumb of last rendering to the buffer. */ 1082 uint32_t last_read_seqno; 1083 uint32_t last_write_seqno; 1084 /** Breadcrumb of last fenced GPU access to the buffer. */ 1085 uint32_t last_fenced_seqno; 1086 1087 /** Current tiling stride for the object, if it's tiled. */ 1088 uint32_t stride; 1089 1090 /** Record of address bit 17 of each page at last unbind. */ 1091 unsigned long *bit_17; 1092 1093 /** User space pin count and filp owning the pin */ 1094 uint32_t user_pin_count; 1095 struct drm_file *pin_filp; 1096 1097 /** for phy allocated objects */ 1098 struct drm_i915_gem_phys_object *phys_obj; 1099 1100 /** 1101 * Number of crtcs where this object is currently the fb, but 1102 * will be page flipped away on the next vblank. When it 1103 * reaches 0, dev_priv->pending_flip_queue will be woken up. 1104 */ 1105 atomic_t pending_flip; 1106 }; 1107 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) 1108 1109 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1110 1111 /** 1112 * Request queue structure. 1113 * 1114 * The request queue allows us to note sequence numbers that have been emitted 1115 * and may be associated with active buffers to be retired. 1116 * 1117 * By keeping this list, we can avoid having to do questionable 1118 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 1119 * an emission time with seqnos for tracking how far ahead of the GPU we are. 1120 */ 1121 struct drm_i915_gem_request { 1122 /** On Which ring this request was generated */ 1123 struct intel_ring_buffer *ring; 1124 1125 /** GEM sequence number associated with this request. */ 1126 uint32_t seqno; 1127 1128 /** Postion in the ringbuffer of the end of the request */ 1129 u32 tail; 1130 1131 /** Time at which this request was emitted, in jiffies. */ 1132 unsigned long emitted_jiffies; 1133 1134 /** global list entry for this request */ 1135 struct list_head list; 1136 1137 struct drm_i915_file_private *file_priv; 1138 /** file_priv list entry for this request */ 1139 struct list_head client_list; 1140 }; 1141 1142 struct drm_i915_file_private { 1143 struct { 1144 struct spinlock lock; 1145 struct list_head request_list; 1146 } mm; 1147 struct idr context_idr; 1148 }; 1149 1150 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1151 1152 #define IS_I830(dev) ((dev)->pci_device == 0x3577) 1153 #define IS_845G(dev) ((dev)->pci_device == 0x2562) 1154 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1155 #define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1156 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1157 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1158 #define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1159 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1160 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1161 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1162 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1163 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1164 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1165 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1166 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1167 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1168 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1169 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1170 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1171 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1172 (dev)->pci_device == 0x0152 || \ 1173 (dev)->pci_device == 0x015a) 1174 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ 1175 (dev)->pci_device == 0x0106 || \ 1176 (dev)->pci_device == 0x010A) 1177 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1178 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1179 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1180 #define IS_ULT(dev) (IS_HASWELL(dev) && \ 1181 ((dev)->pci_device & 0xFF00) == 0x0A00) 1182 1183 /* 1184 * The genX designation typically refers to the render engine, so render 1185 * capability related checks should use IS_GEN, while display and other checks 1186 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 1187 * chips, etc.). 1188 */ 1189 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 1190 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 1191 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 1192 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1193 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1194 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1195 1196 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1197 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1198 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1199 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1200 1201 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1202 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) 1203 1204 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1205 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1206 1207 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1208 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 1209 1210 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1211 * rows, which changed the alignment requirements and fence programming. 1212 */ 1213 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 1214 IS_I915GM(dev))) 1215 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1216 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1217 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1218 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1219 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1220 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1221 /* dsparb controlled by hw only */ 1222 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1223 1224 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1225 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1226 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1227 1228 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1229 1230 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 1231 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1232 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 1233 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 1234 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 1235 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 1236 1237 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1238 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1239 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1240 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1241 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 1242 1243 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1244 1245 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1246 1247 #define GT_FREQUENCY_MULTIPLIER 50 1248 1249 #include "i915_trace.h" 1250 1251 /** 1252 * RC6 is a special power stage which allows the GPU to enter an very 1253 * low-voltage mode when idle, using down to 0V while at this stage. This 1254 * stage is entered automatically when the GPU is idle when RC6 support is 1255 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 1256 * 1257 * There are different RC6 modes available in Intel GPU, which differentiate 1258 * among each other with the latency required to enter and leave RC6 and 1259 * voltage consumed by the GPU in different states. 1260 * 1261 * The combination of the following flags define which states GPU is allowed 1262 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 1263 * RC6pp is deepest RC6. Their support by hardware varies according to the 1264 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 1265 * which brings the most power savings; deeper states save more power, but 1266 * require higher latency to switch to and wake up. 1267 */ 1268 #define INTEL_RC6_ENABLE (1<<0) 1269 #define INTEL_RC6p_ENABLE (1<<1) 1270 #define INTEL_RC6pp_ENABLE (1<<2) 1271 1272 extern struct drm_ioctl_desc i915_ioctls[]; 1273 extern int i915_max_ioctl; 1274 extern unsigned int i915_fbpercrtc __always_unused; 1275 extern int i915_panel_ignore_lid __read_mostly; 1276 extern unsigned int i915_powersave __read_mostly; 1277 extern int i915_semaphores __read_mostly; 1278 extern unsigned int i915_lvds_downclock __read_mostly; 1279 extern int i915_lvds_channel_mode __read_mostly; 1280 extern int i915_panel_use_ssc __read_mostly; 1281 extern int i915_vbt_sdvo_panel_type __read_mostly; 1282 extern int i915_enable_rc6 __read_mostly; 1283 extern int i915_enable_fbc __read_mostly; 1284 extern bool i915_enable_hangcheck __read_mostly; 1285 extern int i915_enable_ppgtt __read_mostly; 1286 extern unsigned int i915_preliminary_hw_support __read_mostly; 1287 1288 extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1289 extern int i915_resume(struct drm_device *dev); 1290 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 1291 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1292 1293 /* i915_dma.c */ 1294 void i915_update_dri1_breadcrumb(struct drm_device *dev); 1295 extern void i915_kernel_lost_context(struct drm_device * dev); 1296 extern int i915_driver_load(struct drm_device *, unsigned long flags); 1297 extern int i915_driver_unload(struct drm_device *); 1298 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 1299 extern void i915_driver_lastclose(struct drm_device * dev); 1300 extern void i915_driver_preclose(struct drm_device *dev, 1301 struct drm_file *file_priv); 1302 extern void i915_driver_postclose(struct drm_device *dev, 1303 struct drm_file *file_priv); 1304 extern int i915_driver_device_is_agp(struct drm_device * dev); 1305 #ifdef CONFIG_COMPAT 1306 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1307 unsigned long arg); 1308 #endif 1309 extern int i915_emit_box(struct drm_device *dev, 1310 struct drm_clip_rect *box, 1311 int DR1, int DR4); 1312 extern int intel_gpu_reset(struct drm_device *dev); 1313 extern int i915_reset(struct drm_device *dev); 1314 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1315 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1316 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1317 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1318 1319 extern void intel_console_resume(struct work_struct *work); 1320 1321 /* i915_irq.c */ 1322 void i915_hangcheck_elapsed(unsigned long data); 1323 void i915_handle_error(struct drm_device *dev, bool wedged); 1324 1325 extern void intel_irq_init(struct drm_device *dev); 1326 extern void intel_gt_init(struct drm_device *dev); 1327 extern void intel_gt_reset(struct drm_device *dev); 1328 1329 void i915_error_state_free(struct kref *error_ref); 1330 1331 void 1332 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1333 1334 void 1335 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1336 1337 void intel_enable_asle(struct drm_device *dev); 1338 1339 #ifdef CONFIG_DEBUG_FS 1340 extern void i915_destroy_error_state(struct drm_device *dev); 1341 #else 1342 #define i915_destroy_error_state(x) 1343 #endif 1344 1345 1346 /* i915_gem.c */ 1347 int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1348 struct drm_file *file_priv); 1349 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 1350 struct drm_file *file_priv); 1351 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1352 struct drm_file *file_priv); 1353 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1354 struct drm_file *file_priv); 1355 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1356 struct drm_file *file_priv); 1357 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1358 struct drm_file *file_priv); 1359 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1360 struct drm_file *file_priv); 1361 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1362 struct drm_file *file_priv); 1363 int i915_gem_execbuffer(struct drm_device *dev, void *data, 1364 struct drm_file *file_priv); 1365 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 1366 struct drm_file *file_priv); 1367 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 1368 struct drm_file *file_priv); 1369 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 1370 struct drm_file *file_priv); 1371 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1372 struct drm_file *file_priv); 1373 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 1374 struct drm_file *file); 1375 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 1376 struct drm_file *file); 1377 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1378 struct drm_file *file_priv); 1379 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1380 struct drm_file *file_priv); 1381 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 1382 struct drm_file *file_priv); 1383 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 1384 struct drm_file *file_priv); 1385 int i915_gem_set_tiling(struct drm_device *dev, void *data, 1386 struct drm_file *file_priv); 1387 int i915_gem_get_tiling(struct drm_device *dev, void *data, 1388 struct drm_file *file_priv); 1389 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 1390 struct drm_file *file_priv); 1391 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 1392 struct drm_file *file_priv); 1393 void i915_gem_load(struct drm_device *dev); 1394 int i915_gem_init_object(struct drm_gem_object *obj); 1395 void i915_gem_object_init(struct drm_i915_gem_object *obj, 1396 const struct drm_i915_gem_object_ops *ops); 1397 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1398 size_t size); 1399 void i915_gem_free_object(struct drm_gem_object *obj); 1400 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1401 uint32_t alignment, 1402 bool map_and_fenceable, 1403 bool nonblocking); 1404 void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1405 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1406 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1407 void i915_gem_lastclose(struct drm_device *dev); 1408 1409 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 1410 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1411 { 1412 struct scatterlist *sg = obj->pages->sgl; 1413 int nents = obj->pages->nents; 1414 while (nents > SG_MAX_SINGLE_ALLOC) { 1415 if (n < SG_MAX_SINGLE_ALLOC - 1) 1416 break; 1417 1418 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); 1419 n -= SG_MAX_SINGLE_ALLOC - 1; 1420 nents -= SG_MAX_SINGLE_ALLOC - 1; 1421 } 1422 return sg_page(sg+n); 1423 } 1424 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 1425 { 1426 BUG_ON(obj->pages == NULL); 1427 obj->pages_pin_count++; 1428 } 1429 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 1430 { 1431 BUG_ON(obj->pages_pin_count == 0); 1432 obj->pages_pin_count--; 1433 } 1434 1435 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1436 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1437 struct intel_ring_buffer *to); 1438 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1439 struct intel_ring_buffer *ring); 1440 1441 int i915_gem_dumb_create(struct drm_file *file_priv, 1442 struct drm_device *dev, 1443 struct drm_mode_create_dumb *args); 1444 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1445 uint32_t handle, uint64_t *offset); 1446 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, 1447 uint32_t handle); 1448 /** 1449 * Returns true if seq1 is later than seq2. 1450 */ 1451 static inline bool 1452 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1453 { 1454 return (int32_t)(seq1 - seq2) >= 0; 1455 } 1456 1457 extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1458 1459 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1460 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1461 1462 static inline bool 1463 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 1464 { 1465 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1466 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1467 dev_priv->fence_regs[obj->fence_reg].pin_count++; 1468 return true; 1469 } else 1470 return false; 1471 } 1472 1473 static inline void 1474 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 1475 { 1476 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1477 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1478 dev_priv->fence_regs[obj->fence_reg].pin_count--; 1479 } 1480 } 1481 1482 void i915_gem_retire_requests(struct drm_device *dev); 1483 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1484 int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, 1485 bool interruptible); 1486 1487 void i915_gem_reset(struct drm_device *dev); 1488 void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1489 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1490 uint32_t read_domains, 1491 uint32_t write_domain); 1492 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1493 int __must_check i915_gem_init(struct drm_device *dev); 1494 int __must_check i915_gem_init_hw(struct drm_device *dev); 1495 void i915_gem_l3_remap(struct drm_device *dev); 1496 void i915_gem_init_swizzling(struct drm_device *dev); 1497 void i915_gem_init_ppgtt(struct drm_device *dev); 1498 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1499 int __must_check i915_gpu_idle(struct drm_device *dev); 1500 int __must_check i915_gem_idle(struct drm_device *dev); 1501 int i915_add_request(struct intel_ring_buffer *ring, 1502 struct drm_file *file, 1503 u32 *seqno); 1504 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1505 uint32_t seqno); 1506 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1507 int __must_check 1508 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1509 bool write); 1510 int __must_check 1511 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 1512 int __must_check 1513 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1514 u32 alignment, 1515 struct intel_ring_buffer *pipelined); 1516 int i915_gem_attach_phys_object(struct drm_device *dev, 1517 struct drm_i915_gem_object *obj, 1518 int id, 1519 int align); 1520 void i915_gem_detach_phys_object(struct drm_device *dev, 1521 struct drm_i915_gem_object *obj); 1522 void i915_gem_free_all_phys_object(struct drm_device *dev); 1523 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1524 1525 uint32_t 1526 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1527 uint32_t size, 1528 int tiling_mode); 1529 1530 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1531 enum i915_cache_level cache_level); 1532 1533 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 1534 struct dma_buf *dma_buf); 1535 1536 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 1537 struct drm_gem_object *gem_obj, int flags); 1538 1539 /* i915_gem_context.c */ 1540 void i915_gem_context_init(struct drm_device *dev); 1541 void i915_gem_context_fini(struct drm_device *dev); 1542 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 1543 int i915_switch_context(struct intel_ring_buffer *ring, 1544 struct drm_file *file, int to_id); 1545 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 1546 struct drm_file *file); 1547 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 1548 struct drm_file *file); 1549 1550 /* i915_gem_gtt.c */ 1551 int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1552 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1553 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1554 struct drm_i915_gem_object *obj, 1555 enum i915_cache_level cache_level); 1556 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 1557 struct drm_i915_gem_object *obj); 1558 1559 void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1560 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 1561 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 1562 enum i915_cache_level cache_level); 1563 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1564 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 1565 void i915_gem_init_global_gtt(struct drm_device *dev, 1566 unsigned long start, 1567 unsigned long mappable_end, 1568 unsigned long end); 1569 int i915_gem_gtt_init(struct drm_device *dev); 1570 void i915_gem_gtt_fini(struct drm_device *dev); 1571 static inline void i915_gem_chipset_flush(struct drm_device *dev) 1572 { 1573 if (INTEL_INFO(dev)->gen < 6) 1574 intel_gtt_chipset_flush(); 1575 } 1576 1577 1578 /* i915_gem_evict.c */ 1579 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1580 unsigned alignment, 1581 unsigned cache_level, 1582 bool mappable, 1583 bool nonblock); 1584 int i915_gem_evict_everything(struct drm_device *dev); 1585 1586 /* i915_gem_stolen.c */ 1587 int i915_gem_init_stolen(struct drm_device *dev); 1588 void i915_gem_cleanup_stolen(struct drm_device *dev); 1589 1590 /* i915_gem_tiling.c */ 1591 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1592 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1593 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1594 1595 /* i915_gem_debug.c */ 1596 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, 1597 const char *where, uint32_t mark); 1598 #if WATCH_LISTS 1599 int i915_verify_lists(struct drm_device *dev); 1600 #else 1601 #define i915_verify_lists(dev) 0 1602 #endif 1603 void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, 1604 int handle); 1605 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, 1606 const char *where, uint32_t mark); 1607 1608 /* i915_debugfs.c */ 1609 int i915_debugfs_init(struct drm_minor *minor); 1610 void i915_debugfs_cleanup(struct drm_minor *minor); 1611 1612 /* i915_suspend.c */ 1613 extern int i915_save_state(struct drm_device *dev); 1614 extern int i915_restore_state(struct drm_device *dev); 1615 1616 /* i915_suspend.c */ 1617 extern int i915_save_state(struct drm_device *dev); 1618 extern int i915_restore_state(struct drm_device *dev); 1619 1620 /* i915_sysfs.c */ 1621 void i915_setup_sysfs(struct drm_device *dev_priv); 1622 void i915_teardown_sysfs(struct drm_device *dev_priv); 1623 1624 /* intel_i2c.c */ 1625 extern int intel_setup_gmbus(struct drm_device *dev); 1626 extern void intel_teardown_gmbus(struct drm_device *dev); 1627 extern inline bool intel_gmbus_is_port_valid(unsigned port) 1628 { 1629 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); 1630 } 1631 1632 extern struct i2c_adapter *intel_gmbus_get_adapter( 1633 struct drm_i915_private *dev_priv, unsigned port); 1634 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1635 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1636 extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1637 { 1638 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 1639 } 1640 extern void intel_i2c_reset(struct drm_device *dev); 1641 1642 /* intel_opregion.c */ 1643 extern int intel_opregion_setup(struct drm_device *dev); 1644 #ifdef CONFIG_ACPI 1645 extern void intel_opregion_init(struct drm_device *dev); 1646 extern void intel_opregion_fini(struct drm_device *dev); 1647 extern void intel_opregion_asle_intr(struct drm_device *dev); 1648 extern void intel_opregion_gse_intr(struct drm_device *dev); 1649 extern void intel_opregion_enable_asle(struct drm_device *dev); 1650 #else 1651 static inline void intel_opregion_init(struct drm_device *dev) { return; } 1652 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 1653 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 1654 static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } 1655 static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } 1656 #endif 1657 1658 /* intel_acpi.c */ 1659 #ifdef CONFIG_ACPI 1660 extern void intel_register_dsm_handler(void); 1661 extern void intel_unregister_dsm_handler(void); 1662 #else 1663 static inline void intel_register_dsm_handler(void) { return; } 1664 static inline void intel_unregister_dsm_handler(void) { return; } 1665 #endif /* CONFIG_ACPI */ 1666 1667 /* modesetting */ 1668 extern void intel_modeset_init_hw(struct drm_device *dev); 1669 extern void intel_modeset_init(struct drm_device *dev); 1670 extern void intel_modeset_gem_init(struct drm_device *dev); 1671 extern void intel_modeset_cleanup(struct drm_device *dev); 1672 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1673 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 1674 bool force_restore); 1675 extern bool intel_fbc_enabled(struct drm_device *dev); 1676 extern void intel_disable_fbc(struct drm_device *dev); 1677 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1678 extern void intel_init_pch_refclk(struct drm_device *dev); 1679 extern void gen6_set_rps(struct drm_device *dev, u8 val); 1680 extern void intel_detect_pch(struct drm_device *dev); 1681 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1682 extern int intel_enable_rc6(const struct drm_device *dev); 1683 1684 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 1685 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 1686 struct drm_file *file); 1687 1688 /* overlay */ 1689 #ifdef CONFIG_DEBUG_FS 1690 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1691 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1692 1693 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 1694 extern void intel_display_print_error_state(struct seq_file *m, 1695 struct drm_device *dev, 1696 struct intel_display_error_state *error); 1697 #endif 1698 1699 /* On SNB platform, before reading ring registers forcewake bit 1700 * must be set to prevent GT core from power down and stale values being 1701 * returned. 1702 */ 1703 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1704 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1705 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1706 1707 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 1708 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 1709 1710 #define __i915_read(x, y) \ 1711 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1712 1713 __i915_read(8, b) 1714 __i915_read(16, w) 1715 __i915_read(32, l) 1716 __i915_read(64, q) 1717 #undef __i915_read 1718 1719 #define __i915_write(x, y) \ 1720 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 1721 1722 __i915_write(8, b) 1723 __i915_write(16, w) 1724 __i915_write(32, l) 1725 __i915_write(64, q) 1726 #undef __i915_write 1727 1728 #define I915_READ8(reg) i915_read8(dev_priv, (reg)) 1729 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) 1730 1731 #define I915_READ16(reg) i915_read16(dev_priv, (reg)) 1732 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) 1733 #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) 1734 #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) 1735 1736 #define I915_READ(reg) i915_read32(dev_priv, (reg)) 1737 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) 1738 #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) 1739 #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) 1740 1741 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) 1742 #define I915_READ64(reg) i915_read64(dev_priv, (reg)) 1743 1744 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 1745 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1746 1747 1748 #endif 1749