1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include "i915_reg.h" 37 #include "intel_bios.h" 38 #include "intel_ringbuffer.h" 39 #include "intel_lrc.h" 40 #include "i915_gem_gtt.h" 41 #include "i915_gem_render_state.h" 42 #include <linux/io-mapping.h> 43 #include <linux/i2c.h> 44 #include <linux/i2c-algo-bit.h> 45 #include <drm/intel-gtt.h> 46 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 47 #include <drm/drm_gem.h> 48 #include <linux/backlight.h> 49 #include <linux/hashtable.h> 50 #include <linux/intel-iommu.h> 51 #include <linux/kref.h> 52 #include <linux/pm_qos.h> 53 54 /* General customization: 55 */ 56 57 #define DRIVER_NAME "i915" 58 #define DRIVER_DESC "Intel Graphics" 59 #define DRIVER_DATE "20150731" 60 61 #undef WARN_ON 62 /* Many gcc seem to no see through this and fall over :( */ 63 #if 0 64 #define WARN_ON(x) ({ \ 65 bool __i915_warn_cond = (x); \ 66 if (__builtin_constant_p(__i915_warn_cond)) \ 67 BUILD_BUG_ON(__i915_warn_cond); \ 68 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 69 #else 70 #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") 71 #endif 72 73 #undef WARN_ON_ONCE 74 #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") 75 76 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 77 (long) (x), __func__); 78 79 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 80 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 81 * which may not necessarily be a user visible problem. This will either 82 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 83 * enable distros and users to tailor their preferred amount of i915 abrt 84 * spam. 85 */ 86 #define I915_STATE_WARN(condition, format...) ({ \ 87 int __ret_warn_on = !!(condition); \ 88 if (unlikely(__ret_warn_on)) { \ 89 if (i915.verbose_state_checks) \ 90 WARN(1, format); \ 91 else \ 92 DRM_ERROR(format); \ 93 } \ 94 unlikely(__ret_warn_on); \ 95 }) 96 97 #define I915_STATE_WARN_ON(condition) ({ \ 98 int __ret_warn_on = !!(condition); \ 99 if (unlikely(__ret_warn_on)) { \ 100 if (i915.verbose_state_checks) \ 101 WARN(1, "WARN_ON(" #condition ")\n"); \ 102 else \ 103 DRM_ERROR("WARN_ON(" #condition ")\n"); \ 104 } \ 105 unlikely(__ret_warn_on); \ 106 }) 107 108 enum pipe { 109 INVALID_PIPE = -1, 110 PIPE_A = 0, 111 PIPE_B, 112 PIPE_C, 113 _PIPE_EDP, 114 I915_MAX_PIPES = _PIPE_EDP 115 }; 116 #define pipe_name(p) ((p) + 'A') 117 118 enum transcoder { 119 TRANSCODER_A = 0, 120 TRANSCODER_B, 121 TRANSCODER_C, 122 TRANSCODER_EDP, 123 I915_MAX_TRANSCODERS 124 }; 125 #define transcoder_name(t) ((t) + 'A') 126 127 /* 128 * This is the maximum (across all platforms) number of planes (primary + 129 * sprites) that can be active at the same time on one pipe. 130 * 131 * This value doesn't count the cursor plane. 132 */ 133 #define I915_MAX_PLANES 4 134 135 enum plane { 136 PLANE_A = 0, 137 PLANE_B, 138 PLANE_C, 139 }; 140 #define plane_name(p) ((p) + 'A') 141 142 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 143 144 enum port { 145 PORT_A = 0, 146 PORT_B, 147 PORT_C, 148 PORT_D, 149 PORT_E, 150 I915_MAX_PORTS 151 }; 152 #define port_name(p) ((p) + 'A') 153 154 #define I915_NUM_PHYS_VLV 2 155 156 enum dpio_channel { 157 DPIO_CH0, 158 DPIO_CH1 159 }; 160 161 enum dpio_phy { 162 DPIO_PHY0, 163 DPIO_PHY1 164 }; 165 166 enum intel_display_power_domain { 167 POWER_DOMAIN_PIPE_A, 168 POWER_DOMAIN_PIPE_B, 169 POWER_DOMAIN_PIPE_C, 170 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 171 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 172 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 173 POWER_DOMAIN_TRANSCODER_A, 174 POWER_DOMAIN_TRANSCODER_B, 175 POWER_DOMAIN_TRANSCODER_C, 176 POWER_DOMAIN_TRANSCODER_EDP, 177 POWER_DOMAIN_PORT_DDI_A_2_LANES, 178 POWER_DOMAIN_PORT_DDI_A_4_LANES, 179 POWER_DOMAIN_PORT_DDI_B_2_LANES, 180 POWER_DOMAIN_PORT_DDI_B_4_LANES, 181 POWER_DOMAIN_PORT_DDI_C_2_LANES, 182 POWER_DOMAIN_PORT_DDI_C_4_LANES, 183 POWER_DOMAIN_PORT_DDI_D_2_LANES, 184 POWER_DOMAIN_PORT_DDI_D_4_LANES, 185 POWER_DOMAIN_PORT_DSI, 186 POWER_DOMAIN_PORT_CRT, 187 POWER_DOMAIN_PORT_OTHER, 188 POWER_DOMAIN_VGA, 189 POWER_DOMAIN_AUDIO, 190 POWER_DOMAIN_PLLS, 191 POWER_DOMAIN_AUX_A, 192 POWER_DOMAIN_AUX_B, 193 POWER_DOMAIN_AUX_C, 194 POWER_DOMAIN_AUX_D, 195 POWER_DOMAIN_INIT, 196 197 POWER_DOMAIN_NUM, 198 }; 199 200 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 201 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 202 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 203 #define POWER_DOMAIN_TRANSCODER(tran) \ 204 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 205 (tran) + POWER_DOMAIN_TRANSCODER_A) 206 207 enum hpd_pin { 208 HPD_NONE = 0, 209 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 210 HPD_CRT, 211 HPD_SDVO_B, 212 HPD_SDVO_C, 213 HPD_PORT_A, 214 HPD_PORT_B, 215 HPD_PORT_C, 216 HPD_PORT_D, 217 HPD_NUM_PINS 218 }; 219 220 #define for_each_hpd_pin(__pin) \ 221 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 222 223 struct i915_hotplug { 224 struct work_struct hotplug_work; 225 226 struct { 227 unsigned long last_jiffies; 228 int count; 229 enum { 230 HPD_ENABLED = 0, 231 HPD_DISABLED = 1, 232 HPD_MARK_DISABLED = 2 233 } state; 234 } stats[HPD_NUM_PINS]; 235 u32 event_bits; 236 struct delayed_work reenable_work; 237 238 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 239 u32 long_port_mask; 240 u32 short_port_mask; 241 struct work_struct dig_port_work; 242 243 /* 244 * if we get a HPD irq from DP and a HPD irq from non-DP 245 * the non-DP HPD could block the workqueue on a mode config 246 * mutex getting, that userspace may have taken. However 247 * userspace is waiting on the DP workqueue to run which is 248 * blocked behind the non-DP one. 249 */ 250 struct workqueue_struct *dp_wq; 251 }; 252 253 #define I915_GEM_GPU_DOMAINS \ 254 (I915_GEM_DOMAIN_RENDER | \ 255 I915_GEM_DOMAIN_SAMPLER | \ 256 I915_GEM_DOMAIN_COMMAND | \ 257 I915_GEM_DOMAIN_INSTRUCTION | \ 258 I915_GEM_DOMAIN_VERTEX) 259 260 #define for_each_pipe(__dev_priv, __p) \ 261 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 262 #define for_each_plane(__dev_priv, __pipe, __p) \ 263 for ((__p) = 0; \ 264 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 265 (__p)++) 266 #define for_each_sprite(__dev_priv, __p, __s) \ 267 for ((__s) = 0; \ 268 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 269 (__s)++) 270 271 #define for_each_crtc(dev, crtc) \ 272 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 273 274 #define for_each_intel_plane(dev, intel_plane) \ 275 list_for_each_entry(intel_plane, \ 276 &dev->mode_config.plane_list, \ 277 base.head) 278 279 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 280 list_for_each_entry(intel_plane, \ 281 &(dev)->mode_config.plane_list, \ 282 base.head) \ 283 if ((intel_plane)->pipe == (intel_crtc)->pipe) 284 285 #define for_each_intel_crtc(dev, intel_crtc) \ 286 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 287 288 #define for_each_intel_encoder(dev, intel_encoder) \ 289 list_for_each_entry(intel_encoder, \ 290 &(dev)->mode_config.encoder_list, \ 291 base.head) 292 293 #define for_each_intel_connector(dev, intel_connector) \ 294 list_for_each_entry(intel_connector, \ 295 &dev->mode_config.connector_list, \ 296 base.head) 297 298 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 299 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 300 if ((intel_encoder)->base.crtc == (__crtc)) 301 302 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 303 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 304 if ((intel_connector)->base.encoder == (__encoder)) 305 306 #define for_each_power_domain(domain, mask) \ 307 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 308 if ((1 << (domain)) & (mask)) 309 310 struct drm_i915_private; 311 struct i915_mm_struct; 312 struct i915_mmu_object; 313 314 struct drm_i915_file_private { 315 struct drm_i915_private *dev_priv; 316 struct drm_file *file; 317 318 struct { 319 spinlock_t lock; 320 struct list_head request_list; 321 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 322 * chosen to prevent the CPU getting more than a frame ahead of the GPU 323 * (when using lax throttling for the frontbuffer). We also use it to 324 * offer free GPU waitboosts for severely congested workloads. 325 */ 326 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 327 } mm; 328 struct idr context_idr; 329 330 struct intel_rps_client { 331 struct list_head link; 332 unsigned boosts; 333 } rps; 334 335 struct intel_engine_cs *bsd_ring; 336 }; 337 338 enum intel_dpll_id { 339 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 340 /* real shared dpll ids must be >= 0 */ 341 DPLL_ID_PCH_PLL_A = 0, 342 DPLL_ID_PCH_PLL_B = 1, 343 /* hsw/bdw */ 344 DPLL_ID_WRPLL1 = 0, 345 DPLL_ID_WRPLL2 = 1, 346 /* skl */ 347 DPLL_ID_SKL_DPLL1 = 0, 348 DPLL_ID_SKL_DPLL2 = 1, 349 DPLL_ID_SKL_DPLL3 = 2, 350 }; 351 #define I915_NUM_PLLS 3 352 353 struct intel_dpll_hw_state { 354 /* i9xx, pch plls */ 355 uint32_t dpll; 356 uint32_t dpll_md; 357 uint32_t fp0; 358 uint32_t fp1; 359 360 /* hsw, bdw */ 361 uint32_t wrpll; 362 363 /* skl */ 364 /* 365 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in 366 * lower part of ctrl1 and they get shifted into position when writing 367 * the register. This allows us to easily compare the state to share 368 * the DPLL. 369 */ 370 uint32_t ctrl1; 371 /* HDMI only, 0 when used for DP */ 372 uint32_t cfgcr1, cfgcr2; 373 374 /* bxt */ 375 uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, 376 pcsdw12; 377 }; 378 379 struct intel_shared_dpll_config { 380 unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ 381 struct intel_dpll_hw_state hw_state; 382 }; 383 384 struct intel_shared_dpll { 385 struct intel_shared_dpll_config config; 386 387 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 388 bool on; /* is the PLL actually active? Disabled during modeset */ 389 const char *name; 390 /* should match the index in the dev_priv->shared_dplls array */ 391 enum intel_dpll_id id; 392 /* The mode_set hook is optional and should be used together with the 393 * intel_prepare_shared_dpll function. */ 394 void (*mode_set)(struct drm_i915_private *dev_priv, 395 struct intel_shared_dpll *pll); 396 void (*enable)(struct drm_i915_private *dev_priv, 397 struct intel_shared_dpll *pll); 398 void (*disable)(struct drm_i915_private *dev_priv, 399 struct intel_shared_dpll *pll); 400 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 401 struct intel_shared_dpll *pll, 402 struct intel_dpll_hw_state *hw_state); 403 }; 404 405 #define SKL_DPLL0 0 406 #define SKL_DPLL1 1 407 #define SKL_DPLL2 2 408 #define SKL_DPLL3 3 409 410 /* Used by dp and fdi links */ 411 struct intel_link_m_n { 412 uint32_t tu; 413 uint32_t gmch_m; 414 uint32_t gmch_n; 415 uint32_t link_m; 416 uint32_t link_n; 417 }; 418 419 void intel_link_compute_m_n(int bpp, int nlanes, 420 int pixel_clock, int link_clock, 421 struct intel_link_m_n *m_n); 422 423 /* Interface history: 424 * 425 * 1.1: Original. 426 * 1.2: Add Power Management 427 * 1.3: Add vblank support 428 * 1.4: Fix cmdbuffer path, add heap destroy 429 * 1.5: Add vblank pipe configuration 430 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 431 * - Support vertical blank on secondary display pipe 432 */ 433 #define DRIVER_MAJOR 1 434 #define DRIVER_MINOR 6 435 #define DRIVER_PATCHLEVEL 0 436 437 #define WATCH_LISTS 0 438 439 struct opregion_header; 440 struct opregion_acpi; 441 struct opregion_swsci; 442 struct opregion_asle; 443 444 struct intel_opregion { 445 struct opregion_header __iomem *header; 446 struct opregion_acpi __iomem *acpi; 447 struct opregion_swsci __iomem *swsci; 448 u32 swsci_gbda_sub_functions; 449 u32 swsci_sbcb_sub_functions; 450 struct opregion_asle __iomem *asle; 451 void __iomem *vbt; 452 u32 __iomem *lid_state; 453 struct work_struct asle_work; 454 }; 455 #define OPREGION_SIZE (8*1024) 456 457 struct intel_overlay; 458 struct intel_overlay_error_state; 459 460 #define I915_FENCE_REG_NONE -1 461 #define I915_MAX_NUM_FENCES 32 462 /* 32 fences + sign bit for FENCE_REG_NONE */ 463 #define I915_MAX_NUM_FENCE_BITS 6 464 465 struct drm_i915_fence_reg { 466 struct list_head lru_list; 467 struct drm_i915_gem_object *obj; 468 int pin_count; 469 }; 470 471 struct sdvo_device_mapping { 472 u8 initialized; 473 u8 dvo_port; 474 u8 slave_addr; 475 u8 dvo_wiring; 476 u8 i2c_pin; 477 u8 ddc_pin; 478 }; 479 480 struct intel_display_error_state; 481 482 struct drm_i915_error_state { 483 struct kref ref; 484 struct timeval time; 485 486 char error_msg[128]; 487 int iommu; 488 u32 reset_count; 489 u32 suspend_count; 490 491 /* Generic register state */ 492 u32 eir; 493 u32 pgtbl_er; 494 u32 ier; 495 u32 gtier[4]; 496 u32 ccid; 497 u32 derrmr; 498 u32 forcewake; 499 u32 error; /* gen6+ */ 500 u32 err_int; /* gen7 */ 501 u32 fault_data0; /* gen8, gen9 */ 502 u32 fault_data1; /* gen8, gen9 */ 503 u32 done_reg; 504 u32 gac_eco; 505 u32 gam_ecochk; 506 u32 gab_ctl; 507 u32 gfx_mode; 508 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 509 u64 fence[I915_MAX_NUM_FENCES]; 510 struct intel_overlay_error_state *overlay; 511 struct intel_display_error_state *display; 512 struct drm_i915_error_object *semaphore_obj; 513 514 struct drm_i915_error_ring { 515 bool valid; 516 /* Software tracked state */ 517 bool waiting; 518 int hangcheck_score; 519 enum intel_ring_hangcheck_action hangcheck_action; 520 int num_requests; 521 522 /* our own tracking of ring head and tail */ 523 u32 cpu_ring_head; 524 u32 cpu_ring_tail; 525 526 u32 semaphore_seqno[I915_NUM_RINGS - 1]; 527 528 /* Register state */ 529 u32 start; 530 u32 tail; 531 u32 head; 532 u32 ctl; 533 u32 hws; 534 u32 ipeir; 535 u32 ipehr; 536 u32 instdone; 537 u32 bbstate; 538 u32 instpm; 539 u32 instps; 540 u32 seqno; 541 u64 bbaddr; 542 u64 acthd; 543 u32 fault_reg; 544 u64 faddr; 545 u32 rc_psmi; /* sleep state */ 546 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 547 548 struct drm_i915_error_object { 549 int page_count; 550 u32 gtt_offset; 551 u32 *pages[0]; 552 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 553 554 struct drm_i915_error_request { 555 long jiffies; 556 u32 seqno; 557 u32 tail; 558 } *requests; 559 560 struct { 561 u32 gfx_mode; 562 union { 563 u64 pdp[4]; 564 u32 pp_dir_base; 565 }; 566 } vm_info; 567 568 pid_t pid; 569 char comm[TASK_COMM_LEN]; 570 } ring[I915_NUM_RINGS]; 571 572 struct drm_i915_error_buffer { 573 u32 size; 574 u32 name; 575 u32 rseqno[I915_NUM_RINGS], wseqno; 576 u32 gtt_offset; 577 u32 read_domains; 578 u32 write_domain; 579 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 580 s32 pinned:2; 581 u32 tiling:2; 582 u32 dirty:1; 583 u32 purgeable:1; 584 u32 userptr:1; 585 s32 ring:4; 586 u32 cache_level:3; 587 } **active_bo, **pinned_bo; 588 589 u32 *active_bo_count, *pinned_bo_count; 590 u32 vm_count; 591 }; 592 593 struct intel_connector; 594 struct intel_encoder; 595 struct intel_crtc_state; 596 struct intel_initial_plane_config; 597 struct intel_crtc; 598 struct intel_limit; 599 struct dpll; 600 601 struct drm_i915_display_funcs { 602 int (*get_display_clock_speed)(struct drm_device *dev); 603 int (*get_fifo_size)(struct drm_device *dev, int plane); 604 /** 605 * find_dpll() - Find the best values for the PLL 606 * @limit: limits for the PLL 607 * @crtc: current CRTC 608 * @target: target frequency in kHz 609 * @refclk: reference clock frequency in kHz 610 * @match_clock: if provided, @best_clock P divider must 611 * match the P divider from @match_clock 612 * used for LVDS downclocking 613 * @best_clock: best PLL values found 614 * 615 * Returns true on success, false on failure. 616 */ 617 bool (*find_dpll)(const struct intel_limit *limit, 618 struct intel_crtc_state *crtc_state, 619 int target, int refclk, 620 struct dpll *match_clock, 621 struct dpll *best_clock); 622 void (*update_wm)(struct drm_crtc *crtc); 623 void (*update_sprite_wm)(struct drm_plane *plane, 624 struct drm_crtc *crtc, 625 uint32_t sprite_width, uint32_t sprite_height, 626 int pixel_size, bool enable, bool scaled); 627 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 628 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 629 /* Returns the active state of the crtc, and if the crtc is active, 630 * fills out the pipe-config with the hw state. */ 631 bool (*get_pipe_config)(struct intel_crtc *, 632 struct intel_crtc_state *); 633 void (*get_initial_plane_config)(struct intel_crtc *, 634 struct intel_initial_plane_config *); 635 int (*crtc_compute_clock)(struct intel_crtc *crtc, 636 struct intel_crtc_state *crtc_state); 637 void (*crtc_enable)(struct drm_crtc *crtc); 638 void (*crtc_disable)(struct drm_crtc *crtc); 639 void (*audio_codec_enable)(struct drm_connector *connector, 640 struct intel_encoder *encoder, 641 struct drm_display_mode *mode); 642 void (*audio_codec_disable)(struct intel_encoder *encoder); 643 void (*fdi_link_train)(struct drm_crtc *crtc); 644 void (*init_clock_gating)(struct drm_device *dev); 645 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 646 struct drm_framebuffer *fb, 647 struct drm_i915_gem_object *obj, 648 struct drm_i915_gem_request *req, 649 uint32_t flags); 650 void (*update_primary_plane)(struct drm_crtc *crtc, 651 struct drm_framebuffer *fb, 652 int x, int y); 653 void (*hpd_irq_setup)(struct drm_device *dev); 654 /* clock updates for mode set */ 655 /* cursor updates */ 656 /* render clock increase/decrease */ 657 /* display clock increase/decrease */ 658 /* pll clock increase/decrease */ 659 660 int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe); 661 uint32_t (*get_backlight)(struct intel_connector *connector); 662 void (*set_backlight)(struct intel_connector *connector, 663 uint32_t level); 664 void (*disable_backlight)(struct intel_connector *connector); 665 void (*enable_backlight)(struct intel_connector *connector); 666 }; 667 668 enum forcewake_domain_id { 669 FW_DOMAIN_ID_RENDER = 0, 670 FW_DOMAIN_ID_BLITTER, 671 FW_DOMAIN_ID_MEDIA, 672 673 FW_DOMAIN_ID_COUNT 674 }; 675 676 enum forcewake_domains { 677 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 678 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 679 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 680 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 681 FORCEWAKE_BLITTER | 682 FORCEWAKE_MEDIA) 683 }; 684 685 struct intel_uncore_funcs { 686 void (*force_wake_get)(struct drm_i915_private *dev_priv, 687 enum forcewake_domains domains); 688 void (*force_wake_put)(struct drm_i915_private *dev_priv, 689 enum forcewake_domains domains); 690 691 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 692 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 693 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 694 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 695 696 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 697 uint8_t val, bool trace); 698 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 699 uint16_t val, bool trace); 700 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 701 uint32_t val, bool trace); 702 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 703 uint64_t val, bool trace); 704 }; 705 706 struct intel_uncore { 707 spinlock_t lock; /** lock is also taken in irq contexts. */ 708 709 struct intel_uncore_funcs funcs; 710 711 unsigned fifo_count; 712 enum forcewake_domains fw_domains; 713 714 struct intel_uncore_forcewake_domain { 715 struct drm_i915_private *i915; 716 enum forcewake_domain_id id; 717 unsigned wake_count; 718 struct timer_list timer; 719 u32 reg_set; 720 u32 val_set; 721 u32 val_clear; 722 u32 reg_ack; 723 u32 reg_post; 724 u32 val_reset; 725 } fw_domain[FW_DOMAIN_ID_COUNT]; 726 }; 727 728 /* Iterate over initialised fw domains */ 729 #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ 730 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 731 (i__) < FW_DOMAIN_ID_COUNT; \ 732 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 733 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 734 735 #define for_each_fw_domain(domain__, dev_priv__, i__) \ 736 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 737 738 enum csr_state { 739 FW_UNINITIALIZED = 0, 740 FW_LOADED, 741 FW_FAILED 742 }; 743 744 struct intel_csr { 745 const char *fw_path; 746 uint32_t *dmc_payload; 747 uint32_t dmc_fw_size; 748 uint32_t mmio_count; 749 uint32_t mmioaddr[8]; 750 uint32_t mmiodata[8]; 751 enum csr_state state; 752 }; 753 754 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 755 func(is_mobile) sep \ 756 func(is_i85x) sep \ 757 func(is_i915g) sep \ 758 func(is_i945gm) sep \ 759 func(is_g33) sep \ 760 func(need_gfx_hws) sep \ 761 func(is_g4x) sep \ 762 func(is_pineview) sep \ 763 func(is_broadwater) sep \ 764 func(is_crestline) sep \ 765 func(is_ivybridge) sep \ 766 func(is_valleyview) sep \ 767 func(is_haswell) sep \ 768 func(is_skylake) sep \ 769 func(is_preliminary) sep \ 770 func(has_fbc) sep \ 771 func(has_pipe_cxsr) sep \ 772 func(has_hotplug) sep \ 773 func(cursor_needs_physical) sep \ 774 func(has_overlay) sep \ 775 func(overlay_needs_physical) sep \ 776 func(supports_tv) sep \ 777 func(has_llc) sep \ 778 func(has_ddi) sep \ 779 func(has_fpga_dbg) 780 781 #define DEFINE_FLAG(name) u8 name:1 782 #define SEP_SEMICOLON ; 783 784 struct intel_device_info { 785 u32 display_mmio_offset; 786 u16 device_id; 787 u8 num_pipes:3; 788 u8 num_sprites[I915_MAX_PIPES]; 789 u8 gen; 790 u8 ring_mask; /* Rings supported by the HW */ 791 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 792 /* Register offsets for the various display pipes and transcoders */ 793 int pipe_offsets[I915_MAX_TRANSCODERS]; 794 int trans_offsets[I915_MAX_TRANSCODERS]; 795 int palette_offsets[I915_MAX_PIPES]; 796 int cursor_offsets[I915_MAX_PIPES]; 797 798 /* Slice/subslice/EU info */ 799 u8 slice_total; 800 u8 subslice_total; 801 u8 subslice_per_slice; 802 u8 eu_total; 803 u8 eu_per_subslice; 804 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 805 u8 subslice_7eu[3]; 806 u8 has_slice_pg:1; 807 u8 has_subslice_pg:1; 808 u8 has_eu_pg:1; 809 }; 810 811 #undef DEFINE_FLAG 812 #undef SEP_SEMICOLON 813 814 enum i915_cache_level { 815 I915_CACHE_NONE = 0, 816 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 817 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 818 caches, eg sampler/render caches, and the 819 large Last-Level-Cache. LLC is coherent with 820 the CPU, but L3 is only visible to the GPU. */ 821 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 822 }; 823 824 struct i915_ctx_hang_stats { 825 /* This context had batch pending when hang was declared */ 826 unsigned batch_pending; 827 828 /* This context had batch active when hang was declared */ 829 unsigned batch_active; 830 831 /* Time when this context was last blamed for a GPU reset */ 832 unsigned long guilty_ts; 833 834 /* If the contexts causes a second GPU hang within this time, 835 * it is permanently banned from submitting any more work. 836 */ 837 unsigned long ban_period_seconds; 838 839 /* This context is banned to submit more work */ 840 bool banned; 841 }; 842 843 /* This must match up with the value previously used for execbuf2.rsvd1. */ 844 #define DEFAULT_CONTEXT_HANDLE 0 845 846 #define CONTEXT_NO_ZEROMAP (1<<0) 847 /** 848 * struct intel_context - as the name implies, represents a context. 849 * @ref: reference count. 850 * @user_handle: userspace tracking identity for this context. 851 * @remap_slice: l3 row remapping information. 852 * @flags: context specific flags: 853 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 854 * @file_priv: filp associated with this context (NULL for global default 855 * context). 856 * @hang_stats: information about the role of this context in possible GPU 857 * hangs. 858 * @ppgtt: virtual memory space used by this context. 859 * @legacy_hw_ctx: render context backing object and whether it is correctly 860 * initialized (legacy ring submission mechanism only). 861 * @link: link in the global list of contexts. 862 * 863 * Contexts are memory images used by the hardware to store copies of their 864 * internal state. 865 */ 866 struct intel_context { 867 struct kref ref; 868 int user_handle; 869 uint8_t remap_slice; 870 struct drm_i915_private *i915; 871 int flags; 872 struct drm_i915_file_private *file_priv; 873 struct i915_ctx_hang_stats hang_stats; 874 struct i915_hw_ppgtt *ppgtt; 875 876 /* Legacy ring buffer submission */ 877 struct { 878 struct drm_i915_gem_object *rcs_state; 879 bool initialized; 880 } legacy_hw_ctx; 881 882 /* Execlists */ 883 bool rcs_initialized; 884 struct { 885 struct drm_i915_gem_object *state; 886 struct intel_ringbuffer *ringbuf; 887 int pin_count; 888 } engine[I915_NUM_RINGS]; 889 890 struct list_head link; 891 }; 892 893 enum fb_op_origin { 894 ORIGIN_GTT, 895 ORIGIN_CPU, 896 ORIGIN_CS, 897 ORIGIN_FLIP, 898 ORIGIN_DIRTYFB, 899 }; 900 901 struct i915_fbc { 902 /* This is always the inner lock when overlapping with struct_mutex and 903 * it's the outer lock when overlapping with stolen_lock. */ 904 struct mutex lock; 905 unsigned long uncompressed_size; 906 unsigned threshold; 907 unsigned int fb_id; 908 unsigned int possible_framebuffer_bits; 909 unsigned int busy_bits; 910 struct intel_crtc *crtc; 911 int y; 912 913 struct drm_mm_node compressed_fb; 914 struct drm_mm_node *compressed_llb; 915 916 bool false_color; 917 918 /* Tracks whether the HW is actually enabled, not whether the feature is 919 * possible. */ 920 bool enabled; 921 922 struct intel_fbc_work { 923 struct delayed_work work; 924 struct intel_crtc *crtc; 925 struct drm_framebuffer *fb; 926 } *fbc_work; 927 928 enum no_fbc_reason { 929 FBC_OK, /* FBC is enabled */ 930 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 931 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 932 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 933 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 934 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 935 FBC_BAD_PLANE, /* fbc not supported on plane */ 936 FBC_NOT_TILED, /* buffer not tiled */ 937 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 938 FBC_MODULE_PARAM, 939 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 940 FBC_ROTATION, /* rotation is not supported */ 941 FBC_IN_DBG_MASTER, /* kernel debugger is active */ 942 } no_fbc_reason; 943 944 bool (*fbc_enabled)(struct drm_i915_private *dev_priv); 945 void (*enable_fbc)(struct intel_crtc *crtc); 946 void (*disable_fbc)(struct drm_i915_private *dev_priv); 947 }; 948 949 /** 950 * HIGH_RR is the highest eDP panel refresh rate read from EDID 951 * LOW_RR is the lowest eDP panel refresh rate found from EDID 952 * parsing for same resolution. 953 */ 954 enum drrs_refresh_rate_type { 955 DRRS_HIGH_RR, 956 DRRS_LOW_RR, 957 DRRS_MAX_RR, /* RR count */ 958 }; 959 960 enum drrs_support_type { 961 DRRS_NOT_SUPPORTED = 0, 962 STATIC_DRRS_SUPPORT = 1, 963 SEAMLESS_DRRS_SUPPORT = 2 964 }; 965 966 struct intel_dp; 967 struct i915_drrs { 968 struct mutex mutex; 969 struct delayed_work work; 970 struct intel_dp *dp; 971 unsigned busy_frontbuffer_bits; 972 enum drrs_refresh_rate_type refresh_rate_type; 973 enum drrs_support_type type; 974 }; 975 976 struct i915_psr { 977 struct mutex lock; 978 bool sink_support; 979 bool source_ok; 980 struct intel_dp *enabled; 981 bool active; 982 struct delayed_work work; 983 unsigned busy_frontbuffer_bits; 984 bool psr2_support; 985 bool aux_frame_sync; 986 }; 987 988 enum intel_pch { 989 PCH_NONE = 0, /* No PCH present */ 990 PCH_IBX, /* Ibexpeak PCH */ 991 PCH_CPT, /* Cougarpoint PCH */ 992 PCH_LPT, /* Lynxpoint PCH */ 993 PCH_SPT, /* Sunrisepoint PCH */ 994 PCH_NOP, 995 }; 996 997 enum intel_sbi_destination { 998 SBI_ICLK, 999 SBI_MPHY, 1000 }; 1001 1002 #define QUIRK_PIPEA_FORCE (1<<0) 1003 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1004 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1005 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1006 #define QUIRK_PIPEB_FORCE (1<<4) 1007 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1008 1009 struct intel_fbdev; 1010 struct intel_fbc_work; 1011 1012 struct intel_gmbus { 1013 struct i2c_adapter adapter; 1014 u32 force_bit; 1015 u32 reg0; 1016 u32 gpio_reg; 1017 struct i2c_algo_bit_data bit_algo; 1018 struct drm_i915_private *dev_priv; 1019 }; 1020 1021 struct i915_suspend_saved_registers { 1022 u32 saveDSPARB; 1023 u32 saveLVDS; 1024 u32 savePP_ON_DELAYS; 1025 u32 savePP_OFF_DELAYS; 1026 u32 savePP_ON; 1027 u32 savePP_OFF; 1028 u32 savePP_CONTROL; 1029 u32 savePP_DIVISOR; 1030 u32 saveFBC_CONTROL; 1031 u32 saveCACHE_MODE_0; 1032 u32 saveMI_ARB_STATE; 1033 u32 saveSWF0[16]; 1034 u32 saveSWF1[16]; 1035 u32 saveSWF2[3]; 1036 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1037 u32 savePCH_PORT_HOTPLUG; 1038 u16 saveGCDGMBUS; 1039 }; 1040 1041 struct vlv_s0ix_state { 1042 /* GAM */ 1043 u32 wr_watermark; 1044 u32 gfx_prio_ctrl; 1045 u32 arb_mode; 1046 u32 gfx_pend_tlb0; 1047 u32 gfx_pend_tlb1; 1048 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1049 u32 media_max_req_count; 1050 u32 gfx_max_req_count; 1051 u32 render_hwsp; 1052 u32 ecochk; 1053 u32 bsd_hwsp; 1054 u32 blt_hwsp; 1055 u32 tlb_rd_addr; 1056 1057 /* MBC */ 1058 u32 g3dctl; 1059 u32 gsckgctl; 1060 u32 mbctl; 1061 1062 /* GCP */ 1063 u32 ucgctl1; 1064 u32 ucgctl3; 1065 u32 rcgctl1; 1066 u32 rcgctl2; 1067 u32 rstctl; 1068 u32 misccpctl; 1069 1070 /* GPM */ 1071 u32 gfxpause; 1072 u32 rpdeuhwtc; 1073 u32 rpdeuc; 1074 u32 ecobus; 1075 u32 pwrdwnupctl; 1076 u32 rp_down_timeout; 1077 u32 rp_deucsw; 1078 u32 rcubmabdtmr; 1079 u32 rcedata; 1080 u32 spare2gh; 1081 1082 /* Display 1 CZ domain */ 1083 u32 gt_imr; 1084 u32 gt_ier; 1085 u32 pm_imr; 1086 u32 pm_ier; 1087 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1088 1089 /* GT SA CZ domain */ 1090 u32 tilectl; 1091 u32 gt_fifoctl; 1092 u32 gtlc_wake_ctrl; 1093 u32 gtlc_survive; 1094 u32 pmwgicz; 1095 1096 /* Display 2 CZ domain */ 1097 u32 gu_ctl0; 1098 u32 gu_ctl1; 1099 u32 pcbr; 1100 u32 clock_gate_dis2; 1101 }; 1102 1103 struct intel_rps_ei { 1104 u32 cz_clock; 1105 u32 render_c0; 1106 u32 media_c0; 1107 }; 1108 1109 struct intel_gen6_power_mgmt { 1110 /* 1111 * work, interrupts_enabled and pm_iir are protected by 1112 * dev_priv->irq_lock 1113 */ 1114 struct work_struct work; 1115 bool interrupts_enabled; 1116 u32 pm_iir; 1117 1118 /* Frequencies are stored in potentially platform dependent multiples. 1119 * In other words, *_freq needs to be multiplied by X to be interesting. 1120 * Soft limits are those which are used for the dynamic reclocking done 1121 * by the driver (raise frequencies under heavy loads, and lower for 1122 * lighter loads). Hard limits are those imposed by the hardware. 1123 * 1124 * A distinction is made for overclocking, which is never enabled by 1125 * default, and is considered to be above the hard limit if it's 1126 * possible at all. 1127 */ 1128 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1129 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1130 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1131 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1132 u8 min_freq; /* AKA RPn. Minimum frequency */ 1133 u8 idle_freq; /* Frequency to request when we are idle */ 1134 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1135 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1136 u8 rp0_freq; /* Non-overclocked max frequency. */ 1137 u32 cz_freq; 1138 1139 u8 up_threshold; /* Current %busy required to uplock */ 1140 u8 down_threshold; /* Current %busy required to downclock */ 1141 1142 int last_adj; 1143 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1144 1145 spinlock_t client_lock; 1146 struct list_head clients; 1147 bool client_boost; 1148 1149 bool enabled; 1150 struct delayed_work delayed_resume_work; 1151 unsigned boosts; 1152 1153 struct intel_rps_client semaphores, mmioflips; 1154 1155 /* manual wa residency calculations */ 1156 struct intel_rps_ei up_ei, down_ei; 1157 1158 /* 1159 * Protects RPS/RC6 register access and PCU communication. 1160 * Must be taken after struct_mutex if nested. Note that 1161 * this lock may be held for long periods of time when 1162 * talking to hw - so only take it when talking to hw! 1163 */ 1164 struct mutex hw_lock; 1165 }; 1166 1167 /* defined intel_pm.c */ 1168 extern spinlock_t mchdev_lock; 1169 1170 struct intel_ilk_power_mgmt { 1171 u8 cur_delay; 1172 u8 min_delay; 1173 u8 max_delay; 1174 u8 fmax; 1175 u8 fstart; 1176 1177 u64 last_count1; 1178 unsigned long last_time1; 1179 unsigned long chipset_power; 1180 u64 last_count2; 1181 u64 last_time2; 1182 unsigned long gfx_power; 1183 u8 corr; 1184 1185 int c_m; 1186 int r_t; 1187 }; 1188 1189 struct drm_i915_private; 1190 struct i915_power_well; 1191 1192 struct i915_power_well_ops { 1193 /* 1194 * Synchronize the well's hw state to match the current sw state, for 1195 * example enable/disable it based on the current refcount. Called 1196 * during driver init and resume time, possibly after first calling 1197 * the enable/disable handlers. 1198 */ 1199 void (*sync_hw)(struct drm_i915_private *dev_priv, 1200 struct i915_power_well *power_well); 1201 /* 1202 * Enable the well and resources that depend on it (for example 1203 * interrupts located on the well). Called after the 0->1 refcount 1204 * transition. 1205 */ 1206 void (*enable)(struct drm_i915_private *dev_priv, 1207 struct i915_power_well *power_well); 1208 /* 1209 * Disable the well and resources that depend on it. Called after 1210 * the 1->0 refcount transition. 1211 */ 1212 void (*disable)(struct drm_i915_private *dev_priv, 1213 struct i915_power_well *power_well); 1214 /* Returns the hw enabled state. */ 1215 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1216 struct i915_power_well *power_well); 1217 }; 1218 1219 /* Power well structure for haswell */ 1220 struct i915_power_well { 1221 const char *name; 1222 bool always_on; 1223 /* power well enable/disable usage count */ 1224 int count; 1225 /* cached hw enabled state */ 1226 bool hw_enabled; 1227 unsigned long domains; 1228 unsigned long data; 1229 const struct i915_power_well_ops *ops; 1230 }; 1231 1232 struct i915_power_domains { 1233 /* 1234 * Power wells needed for initialization at driver init and suspend 1235 * time are on. They are kept on until after the first modeset. 1236 */ 1237 bool init_power_on; 1238 bool initializing; 1239 int power_well_count; 1240 1241 struct mutex lock; 1242 int domain_use_count[POWER_DOMAIN_NUM]; 1243 struct i915_power_well *power_wells; 1244 }; 1245 1246 #define MAX_L3_SLICES 2 1247 struct intel_l3_parity { 1248 u32 *remap_info[MAX_L3_SLICES]; 1249 struct work_struct error_work; 1250 int which_slice; 1251 }; 1252 1253 struct i915_gem_mm { 1254 /** Memory allocator for GTT stolen memory */ 1255 struct drm_mm stolen; 1256 /** Protects the usage of the GTT stolen memory allocator. This is 1257 * always the inner lock when overlapping with struct_mutex. */ 1258 struct mutex stolen_lock; 1259 1260 /** List of all objects in gtt_space. Used to restore gtt 1261 * mappings on resume */ 1262 struct list_head bound_list; 1263 /** 1264 * List of objects which are not bound to the GTT (thus 1265 * are idle and not used by the GPU) but still have 1266 * (presumably uncached) pages still attached. 1267 */ 1268 struct list_head unbound_list; 1269 1270 /** Usable portion of the GTT for GEM */ 1271 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1272 1273 /** PPGTT used for aliasing the PPGTT with the GTT */ 1274 struct i915_hw_ppgtt *aliasing_ppgtt; 1275 1276 struct notifier_block oom_notifier; 1277 struct shrinker shrinker; 1278 bool shrinker_no_lock_stealing; 1279 1280 /** LRU list of objects with fence regs on them. */ 1281 struct list_head fence_list; 1282 1283 /** 1284 * We leave the user IRQ off as much as possible, 1285 * but this means that requests will finish and never 1286 * be retired once the system goes idle. Set a timer to 1287 * fire periodically while the ring is running. When it 1288 * fires, go retire requests. 1289 */ 1290 struct delayed_work retire_work; 1291 1292 /** 1293 * When we detect an idle GPU, we want to turn on 1294 * powersaving features. So once we see that there 1295 * are no more requests outstanding and no more 1296 * arrive within a small period of time, we fire 1297 * off the idle_work. 1298 */ 1299 struct delayed_work idle_work; 1300 1301 /** 1302 * Are we in a non-interruptible section of code like 1303 * modesetting? 1304 */ 1305 bool interruptible; 1306 1307 /** 1308 * Is the GPU currently considered idle, or busy executing userspace 1309 * requests? Whilst idle, we attempt to power down the hardware and 1310 * display clocks. In order to reduce the effect on performance, there 1311 * is a slight delay before we do so. 1312 */ 1313 bool busy; 1314 1315 /* the indicator for dispatch video commands on two BSD rings */ 1316 int bsd_ring_dispatch_index; 1317 1318 /** Bit 6 swizzling required for X tiling */ 1319 uint32_t bit_6_swizzle_x; 1320 /** Bit 6 swizzling required for Y tiling */ 1321 uint32_t bit_6_swizzle_y; 1322 1323 /* accounting, useful for userland debugging */ 1324 spinlock_t object_stat_lock; 1325 size_t object_memory; 1326 u32 object_count; 1327 }; 1328 1329 struct drm_i915_error_state_buf { 1330 struct drm_i915_private *i915; 1331 unsigned bytes; 1332 unsigned size; 1333 int err; 1334 u8 *buf; 1335 loff_t start; 1336 loff_t pos; 1337 }; 1338 1339 struct i915_error_state_file_priv { 1340 struct drm_device *dev; 1341 struct drm_i915_error_state *error; 1342 }; 1343 1344 struct i915_gpu_error { 1345 /* For hangcheck timer */ 1346 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1347 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1348 /* Hang gpu twice in this window and your context gets banned */ 1349 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1350 1351 struct workqueue_struct *hangcheck_wq; 1352 struct delayed_work hangcheck_work; 1353 1354 /* For reset and error_state handling. */ 1355 spinlock_t lock; 1356 /* Protected by the above dev->gpu_error.lock. */ 1357 struct drm_i915_error_state *first_error; 1358 1359 unsigned long missed_irq_rings; 1360 1361 /** 1362 * State variable controlling the reset flow and count 1363 * 1364 * This is a counter which gets incremented when reset is triggered, 1365 * and again when reset has been handled. So odd values (lowest bit set) 1366 * means that reset is in progress and even values that 1367 * (reset_counter >> 1):th reset was successfully completed. 1368 * 1369 * If reset is not completed succesfully, the I915_WEDGE bit is 1370 * set meaning that hardware is terminally sour and there is no 1371 * recovery. All waiters on the reset_queue will be woken when 1372 * that happens. 1373 * 1374 * This counter is used by the wait_seqno code to notice that reset 1375 * event happened and it needs to restart the entire ioctl (since most 1376 * likely the seqno it waited for won't ever signal anytime soon). 1377 * 1378 * This is important for lock-free wait paths, where no contended lock 1379 * naturally enforces the correct ordering between the bail-out of the 1380 * waiter and the gpu reset work code. 1381 */ 1382 atomic_t reset_counter; 1383 1384 #define I915_RESET_IN_PROGRESS_FLAG 1 1385 #define I915_WEDGED (1 << 31) 1386 1387 /** 1388 * Waitqueue to signal when the reset has completed. Used by clients 1389 * that wait for dev_priv->mm.wedged to settle. 1390 */ 1391 wait_queue_head_t reset_queue; 1392 1393 /* Userspace knobs for gpu hang simulation; 1394 * combines both a ring mask, and extra flags 1395 */ 1396 u32 stop_rings; 1397 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1398 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1399 1400 /* For missed irq/seqno simulation. */ 1401 unsigned int test_irq_rings; 1402 1403 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 1404 bool reload_in_reset; 1405 }; 1406 1407 enum modeset_restore { 1408 MODESET_ON_LID_OPEN, 1409 MODESET_DONE, 1410 MODESET_SUSPENDED, 1411 }; 1412 1413 #define DP_AUX_A 0x40 1414 #define DP_AUX_B 0x10 1415 #define DP_AUX_C 0x20 1416 #define DP_AUX_D 0x30 1417 1418 struct ddi_vbt_port_info { 1419 /* 1420 * This is an index in the HDMI/DVI DDI buffer translation table. 1421 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1422 * populate this field. 1423 */ 1424 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1425 uint8_t hdmi_level_shift; 1426 1427 uint8_t supports_dvi:1; 1428 uint8_t supports_hdmi:1; 1429 uint8_t supports_dp:1; 1430 1431 uint8_t alternate_aux_channel; 1432 1433 uint8_t dp_boost_level; 1434 uint8_t hdmi_boost_level; 1435 }; 1436 1437 enum psr_lines_to_wait { 1438 PSR_0_LINES_TO_WAIT = 0, 1439 PSR_1_LINE_TO_WAIT, 1440 PSR_4_LINES_TO_WAIT, 1441 PSR_8_LINES_TO_WAIT 1442 }; 1443 1444 struct intel_vbt_data { 1445 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1446 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1447 1448 /* Feature bits */ 1449 unsigned int int_tv_support:1; 1450 unsigned int lvds_dither:1; 1451 unsigned int lvds_vbt:1; 1452 unsigned int int_crt_support:1; 1453 unsigned int lvds_use_ssc:1; 1454 unsigned int display_clock_mode:1; 1455 unsigned int fdi_rx_polarity_inverted:1; 1456 unsigned int has_mipi:1; 1457 int lvds_ssc_freq; 1458 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1459 1460 enum drrs_support_type drrs_type; 1461 1462 /* eDP */ 1463 int edp_rate; 1464 int edp_lanes; 1465 int edp_preemphasis; 1466 int edp_vswing; 1467 bool edp_initialized; 1468 bool edp_support; 1469 int edp_bpp; 1470 struct edp_power_seq edp_pps; 1471 1472 struct { 1473 bool full_link; 1474 bool require_aux_wakeup; 1475 int idle_frames; 1476 enum psr_lines_to_wait lines_to_wait; 1477 int tp1_wakeup_time; 1478 int tp2_tp3_wakeup_time; 1479 } psr; 1480 1481 struct { 1482 u16 pwm_freq_hz; 1483 bool present; 1484 bool active_low_pwm; 1485 u8 min_brightness; /* min_brightness/255 of max */ 1486 } backlight; 1487 1488 /* MIPI DSI */ 1489 struct { 1490 u16 port; 1491 u16 panel_id; 1492 struct mipi_config *config; 1493 struct mipi_pps_data *pps; 1494 u8 seq_version; 1495 u32 size; 1496 u8 *data; 1497 u8 *sequence[MIPI_SEQ_MAX]; 1498 } dsi; 1499 1500 int crt_ddc_pin; 1501 1502 int child_dev_num; 1503 union child_device_config *child_dev; 1504 1505 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1506 }; 1507 1508 enum intel_ddb_partitioning { 1509 INTEL_DDB_PART_1_2, 1510 INTEL_DDB_PART_5_6, /* IVB+ */ 1511 }; 1512 1513 struct intel_wm_level { 1514 bool enable; 1515 uint32_t pri_val; 1516 uint32_t spr_val; 1517 uint32_t cur_val; 1518 uint32_t fbc_val; 1519 }; 1520 1521 struct ilk_wm_values { 1522 uint32_t wm_pipe[3]; 1523 uint32_t wm_lp[3]; 1524 uint32_t wm_lp_spr[3]; 1525 uint32_t wm_linetime[3]; 1526 bool enable_fbc_wm; 1527 enum intel_ddb_partitioning partitioning; 1528 }; 1529 1530 struct vlv_pipe_wm { 1531 uint16_t primary; 1532 uint16_t sprite[2]; 1533 uint8_t cursor; 1534 }; 1535 1536 struct vlv_sr_wm { 1537 uint16_t plane; 1538 uint8_t cursor; 1539 }; 1540 1541 struct vlv_wm_values { 1542 struct vlv_pipe_wm pipe[3]; 1543 struct vlv_sr_wm sr; 1544 struct { 1545 uint8_t cursor; 1546 uint8_t sprite[2]; 1547 uint8_t primary; 1548 } ddl[3]; 1549 uint8_t level; 1550 bool cxsr; 1551 }; 1552 1553 struct skl_ddb_entry { 1554 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1555 }; 1556 1557 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1558 { 1559 return entry->end - entry->start; 1560 } 1561 1562 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1563 const struct skl_ddb_entry *e2) 1564 { 1565 if (e1->start == e2->start && e1->end == e2->end) 1566 return true; 1567 1568 return false; 1569 } 1570 1571 struct skl_ddb_allocation { 1572 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1573 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1574 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */ 1575 struct skl_ddb_entry cursor[I915_MAX_PIPES]; 1576 }; 1577 1578 struct skl_wm_values { 1579 bool dirty[I915_MAX_PIPES]; 1580 struct skl_ddb_allocation ddb; 1581 uint32_t wm_linetime[I915_MAX_PIPES]; 1582 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1583 uint32_t cursor[I915_MAX_PIPES][8]; 1584 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1585 uint32_t cursor_trans[I915_MAX_PIPES]; 1586 }; 1587 1588 struct skl_wm_level { 1589 bool plane_en[I915_MAX_PLANES]; 1590 bool cursor_en; 1591 uint16_t plane_res_b[I915_MAX_PLANES]; 1592 uint8_t plane_res_l[I915_MAX_PLANES]; 1593 uint16_t cursor_res_b; 1594 uint8_t cursor_res_l; 1595 }; 1596 1597 /* 1598 * This struct helps tracking the state needed for runtime PM, which puts the 1599 * device in PCI D3 state. Notice that when this happens, nothing on the 1600 * graphics device works, even register access, so we don't get interrupts nor 1601 * anything else. 1602 * 1603 * Every piece of our code that needs to actually touch the hardware needs to 1604 * either call intel_runtime_pm_get or call intel_display_power_get with the 1605 * appropriate power domain. 1606 * 1607 * Our driver uses the autosuspend delay feature, which means we'll only really 1608 * suspend if we stay with zero refcount for a certain amount of time. The 1609 * default value is currently very conservative (see intel_runtime_pm_enable), but 1610 * it can be changed with the standard runtime PM files from sysfs. 1611 * 1612 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1613 * goes back to false exactly before we reenable the IRQs. We use this variable 1614 * to check if someone is trying to enable/disable IRQs while they're supposed 1615 * to be disabled. This shouldn't happen and we'll print some error messages in 1616 * case it happens. 1617 * 1618 * For more, read the Documentation/power/runtime_pm.txt. 1619 */ 1620 struct i915_runtime_pm { 1621 bool suspended; 1622 bool irqs_enabled; 1623 }; 1624 1625 enum intel_pipe_crc_source { 1626 INTEL_PIPE_CRC_SOURCE_NONE, 1627 INTEL_PIPE_CRC_SOURCE_PLANE1, 1628 INTEL_PIPE_CRC_SOURCE_PLANE2, 1629 INTEL_PIPE_CRC_SOURCE_PF, 1630 INTEL_PIPE_CRC_SOURCE_PIPE, 1631 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1632 INTEL_PIPE_CRC_SOURCE_TV, 1633 INTEL_PIPE_CRC_SOURCE_DP_B, 1634 INTEL_PIPE_CRC_SOURCE_DP_C, 1635 INTEL_PIPE_CRC_SOURCE_DP_D, 1636 INTEL_PIPE_CRC_SOURCE_AUTO, 1637 INTEL_PIPE_CRC_SOURCE_MAX, 1638 }; 1639 1640 struct intel_pipe_crc_entry { 1641 uint32_t frame; 1642 uint32_t crc[5]; 1643 }; 1644 1645 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1646 struct intel_pipe_crc { 1647 spinlock_t lock; 1648 bool opened; /* exclusive access to the result file */ 1649 struct intel_pipe_crc_entry *entries; 1650 enum intel_pipe_crc_source source; 1651 int head, tail; 1652 wait_queue_head_t wq; 1653 }; 1654 1655 struct i915_frontbuffer_tracking { 1656 struct mutex lock; 1657 1658 /* 1659 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1660 * scheduled flips. 1661 */ 1662 unsigned busy_bits; 1663 unsigned flip_bits; 1664 }; 1665 1666 struct i915_wa_reg { 1667 u32 addr; 1668 u32 value; 1669 /* bitmask representing WA bits */ 1670 u32 mask; 1671 }; 1672 1673 #define I915_MAX_WA_REGS 16 1674 1675 struct i915_workarounds { 1676 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1677 u32 count; 1678 }; 1679 1680 struct i915_virtual_gpu { 1681 bool active; 1682 }; 1683 1684 struct i915_execbuffer_params { 1685 struct drm_device *dev; 1686 struct drm_file *file; 1687 uint32_t dispatch_flags; 1688 uint32_t args_batch_start_offset; 1689 uint32_t batch_obj_vm_offset; 1690 struct intel_engine_cs *ring; 1691 struct drm_i915_gem_object *batch_obj; 1692 struct intel_context *ctx; 1693 struct drm_i915_gem_request *request; 1694 }; 1695 1696 struct drm_i915_private { 1697 struct drm_device *dev; 1698 struct kmem_cache *objects; 1699 struct kmem_cache *vmas; 1700 struct kmem_cache *requests; 1701 1702 const struct intel_device_info info; 1703 1704 int relative_constants_mode; 1705 1706 void __iomem *regs; 1707 1708 struct intel_uncore uncore; 1709 1710 struct i915_virtual_gpu vgpu; 1711 1712 struct intel_csr csr; 1713 1714 /* Display CSR-related protection */ 1715 struct mutex csr_lock; 1716 1717 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1718 1719 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1720 * controller on different i2c buses. */ 1721 struct mutex gmbus_mutex; 1722 1723 /** 1724 * Base address of the gmbus and gpio block. 1725 */ 1726 uint32_t gpio_mmio_base; 1727 1728 /* MMIO base address for MIPI regs */ 1729 uint32_t mipi_mmio_base; 1730 1731 wait_queue_head_t gmbus_wait_queue; 1732 1733 struct pci_dev *bridge_dev; 1734 struct intel_engine_cs ring[I915_NUM_RINGS]; 1735 struct drm_i915_gem_object *semaphore_obj; 1736 uint32_t last_seqno, next_seqno; 1737 1738 struct drm_dma_handle *status_page_dmah; 1739 struct resource mch_res; 1740 1741 /* protects the irq masks */ 1742 spinlock_t irq_lock; 1743 1744 /* protects the mmio flip data */ 1745 spinlock_t mmio_flip_lock; 1746 1747 bool display_irqs_enabled; 1748 1749 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1750 struct pm_qos_request pm_qos; 1751 1752 /* Sideband mailbox protection */ 1753 struct mutex sb_lock; 1754 1755 /** Cached value of IMR to avoid reads in updating the bitfield */ 1756 union { 1757 u32 irq_mask; 1758 u32 de_irq_mask[I915_MAX_PIPES]; 1759 }; 1760 u32 gt_irq_mask; 1761 u32 pm_irq_mask; 1762 u32 pm_rps_events; 1763 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1764 1765 struct i915_hotplug hotplug; 1766 struct i915_fbc fbc; 1767 struct i915_drrs drrs; 1768 struct intel_opregion opregion; 1769 struct intel_vbt_data vbt; 1770 1771 bool preserve_bios_swizzle; 1772 1773 /* overlay */ 1774 struct intel_overlay *overlay; 1775 1776 /* backlight registers and fields in struct intel_panel */ 1777 struct mutex backlight_lock; 1778 1779 /* LVDS info */ 1780 bool no_aux_handshake; 1781 1782 /* protects panel power sequencer state */ 1783 struct mutex pps_mutex; 1784 1785 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1786 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1787 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1788 1789 unsigned int fsb_freq, mem_freq, is_ddr3; 1790 unsigned int skl_boot_cdclk; 1791 unsigned int cdclk_freq, max_cdclk_freq; 1792 unsigned int hpll_freq; 1793 1794 /** 1795 * wq - Driver workqueue for GEM. 1796 * 1797 * NOTE: Work items scheduled here are not allowed to grab any modeset 1798 * locks, for otherwise the flushing done in the pageflip code will 1799 * result in deadlocks. 1800 */ 1801 struct workqueue_struct *wq; 1802 1803 /* Display functions */ 1804 struct drm_i915_display_funcs display; 1805 1806 /* PCH chipset type */ 1807 enum intel_pch pch_type; 1808 unsigned short pch_id; 1809 1810 unsigned long quirks; 1811 1812 enum modeset_restore modeset_restore; 1813 struct mutex modeset_restore_lock; 1814 1815 struct list_head vm_list; /* Global list of all address spaces */ 1816 struct i915_gtt gtt; /* VM representing the global address space */ 1817 1818 struct i915_gem_mm mm; 1819 DECLARE_HASHTABLE(mm_structs, 7); 1820 struct mutex mm_lock; 1821 1822 /* Kernel Modesetting */ 1823 1824 struct sdvo_device_mapping sdvo_mappings[2]; 1825 1826 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1827 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1828 wait_queue_head_t pending_flip_queue; 1829 1830 #ifdef CONFIG_DEBUG_FS 1831 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1832 #endif 1833 1834 int num_shared_dpll; 1835 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1836 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1837 1838 struct i915_workarounds workarounds; 1839 1840 /* Reclocking support */ 1841 bool render_reclock_avail; 1842 1843 struct i915_frontbuffer_tracking fb_tracking; 1844 1845 u16 orig_clock; 1846 1847 bool mchbar_need_disable; 1848 1849 struct intel_l3_parity l3_parity; 1850 1851 /* Cannot be determined by PCIID. You must always read a register. */ 1852 size_t ellc_size; 1853 1854 /* gen6+ rps state */ 1855 struct intel_gen6_power_mgmt rps; 1856 1857 /* ilk-only ips/rps state. Everything in here is protected by the global 1858 * mchdev_lock in intel_pm.c */ 1859 struct intel_ilk_power_mgmt ips; 1860 1861 struct i915_power_domains power_domains; 1862 1863 struct i915_psr psr; 1864 1865 struct i915_gpu_error gpu_error; 1866 1867 struct drm_i915_gem_object *vlv_pctx; 1868 1869 #ifdef CONFIG_DRM_FBDEV_EMULATION 1870 /* list of fbdev register on this device */ 1871 struct intel_fbdev *fbdev; 1872 struct work_struct fbdev_suspend_work; 1873 #endif 1874 1875 struct drm_property *broadcast_rgb_property; 1876 struct drm_property *force_audio_property; 1877 1878 /* hda/i915 audio component */ 1879 bool audio_component_registered; 1880 1881 uint32_t hw_context_size; 1882 struct list_head context_list; 1883 1884 u32 fdi_rx_config; 1885 1886 u32 chv_phy_control; 1887 1888 u32 suspend_count; 1889 struct i915_suspend_saved_registers regfile; 1890 struct vlv_s0ix_state vlv_s0ix_state; 1891 1892 struct { 1893 /* 1894 * Raw watermark latency values: 1895 * in 0.1us units for WM0, 1896 * in 0.5us units for WM1+. 1897 */ 1898 /* primary */ 1899 uint16_t pri_latency[5]; 1900 /* sprite */ 1901 uint16_t spr_latency[5]; 1902 /* cursor */ 1903 uint16_t cur_latency[5]; 1904 /* 1905 * Raw watermark memory latency values 1906 * for SKL for all 8 levels 1907 * in 1us units. 1908 */ 1909 uint16_t skl_latency[8]; 1910 1911 /* 1912 * The skl_wm_values structure is a bit too big for stack 1913 * allocation, so we keep the staging struct where we store 1914 * intermediate results here instead. 1915 */ 1916 struct skl_wm_values skl_results; 1917 1918 /* current hardware state */ 1919 union { 1920 struct ilk_wm_values hw; 1921 struct skl_wm_values skl_hw; 1922 struct vlv_wm_values vlv; 1923 }; 1924 } wm; 1925 1926 struct i915_runtime_pm pm; 1927 1928 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1929 struct { 1930 int (*execbuf_submit)(struct i915_execbuffer_params *params, 1931 struct drm_i915_gem_execbuffer2 *args, 1932 struct list_head *vmas); 1933 int (*init_rings)(struct drm_device *dev); 1934 void (*cleanup_ring)(struct intel_engine_cs *ring); 1935 void (*stop_ring)(struct intel_engine_cs *ring); 1936 } gt; 1937 1938 bool edp_low_vswing; 1939 1940 /* 1941 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1942 * will be rejected. Instead look for a better place. 1943 */ 1944 }; 1945 1946 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1947 { 1948 return dev->dev_private; 1949 } 1950 1951 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 1952 { 1953 return to_i915(dev_get_drvdata(dev)); 1954 } 1955 1956 /* Iterate over initialised rings */ 1957 #define for_each_ring(ring__, dev_priv__, i__) \ 1958 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1959 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1960 1961 enum hdmi_force_audio { 1962 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1963 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1964 HDMI_AUDIO_AUTO, /* trust EDID */ 1965 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1966 }; 1967 1968 #define I915_GTT_OFFSET_NONE ((u32)-1) 1969 1970 struct drm_i915_gem_object_ops { 1971 /* Interface between the GEM object and its backing storage. 1972 * get_pages() is called once prior to the use of the associated set 1973 * of pages before to binding them into the GTT, and put_pages() is 1974 * called after we no longer need them. As we expect there to be 1975 * associated cost with migrating pages between the backing storage 1976 * and making them available for the GPU (e.g. clflush), we may hold 1977 * onto the pages after they are no longer referenced by the GPU 1978 * in case they may be used again shortly (for example migrating the 1979 * pages to a different memory domain within the GTT). put_pages() 1980 * will therefore most likely be called when the object itself is 1981 * being released or under memory pressure (where we attempt to 1982 * reap pages for the shrinker). 1983 */ 1984 int (*get_pages)(struct drm_i915_gem_object *); 1985 void (*put_pages)(struct drm_i915_gem_object *); 1986 int (*dmabuf_export)(struct drm_i915_gem_object *); 1987 void (*release)(struct drm_i915_gem_object *); 1988 }; 1989 1990 /* 1991 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1992 * considered to be the frontbuffer for the given plane interface-vise. This 1993 * doesn't mean that the hw necessarily already scans it out, but that any 1994 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1995 * 1996 * We have one bit per pipe and per scanout plane type. 1997 */ 1998 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 1999 #define INTEL_FRONTBUFFER_BITS \ 2000 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 2001 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2002 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2003 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2004 (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2005 #define INTEL_FRONTBUFFER_SPRITE(pipe) \ 2006 (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2007 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2008 (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2009 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2010 (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2011 2012 struct drm_i915_gem_object { 2013 struct drm_gem_object base; 2014 2015 const struct drm_i915_gem_object_ops *ops; 2016 2017 /** List of VMAs backed by this object */ 2018 struct list_head vma_list; 2019 2020 /** Stolen memory for this object, instead of being backed by shmem. */ 2021 struct drm_mm_node *stolen; 2022 struct list_head global_list; 2023 2024 struct list_head ring_list[I915_NUM_RINGS]; 2025 /** Used in execbuf to temporarily hold a ref */ 2026 struct list_head obj_exec_link; 2027 2028 struct list_head batch_pool_link; 2029 2030 /** 2031 * This is set if the object is on the active lists (has pending 2032 * rendering and so a non-zero seqno), and is not set if it i s on 2033 * inactive (ready to be unbound) list. 2034 */ 2035 unsigned int active:I915_NUM_RINGS; 2036 2037 /** 2038 * This is set if the object has been written to since last bound 2039 * to the GTT 2040 */ 2041 unsigned int dirty:1; 2042 2043 /** 2044 * Fence register bits (if any) for this object. Will be set 2045 * as needed when mapped into the GTT. 2046 * Protected by dev->struct_mutex. 2047 */ 2048 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2049 2050 /** 2051 * Advice: are the backing pages purgeable? 2052 */ 2053 unsigned int madv:2; 2054 2055 /** 2056 * Current tiling mode for the object. 2057 */ 2058 unsigned int tiling_mode:2; 2059 /** 2060 * Whether the tiling parameters for the currently associated fence 2061 * register have changed. Note that for the purposes of tracking 2062 * tiling changes we also treat the unfenced register, the register 2063 * slot that the object occupies whilst it executes a fenced 2064 * command (such as BLT on gen2/3), as a "fence". 2065 */ 2066 unsigned int fence_dirty:1; 2067 2068 /** 2069 * Is the object at the current location in the gtt mappable and 2070 * fenceable? Used to avoid costly recalculations. 2071 */ 2072 unsigned int map_and_fenceable:1; 2073 2074 /** 2075 * Whether the current gtt mapping needs to be mappable (and isn't just 2076 * mappable by accident). Track pin and fault separate for a more 2077 * accurate mappable working set. 2078 */ 2079 unsigned int fault_mappable:1; 2080 2081 /* 2082 * Is the object to be mapped as read-only to the GPU 2083 * Only honoured if hardware has relevant pte bit 2084 */ 2085 unsigned long gt_ro:1; 2086 unsigned int cache_level:3; 2087 unsigned int cache_dirty:1; 2088 2089 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2090 2091 unsigned int pin_display; 2092 2093 struct sg_table *pages; 2094 int pages_pin_count; 2095 struct get_page { 2096 struct scatterlist *sg; 2097 int last; 2098 } get_page; 2099 2100 /* prime dma-buf support */ 2101 void *dma_buf_vmapping; 2102 int vmapping_count; 2103 2104 /** Breadcrumb of last rendering to the buffer. 2105 * There can only be one writer, but we allow for multiple readers. 2106 * If there is a writer that necessarily implies that all other 2107 * read requests are complete - but we may only be lazily clearing 2108 * the read requests. A read request is naturally the most recent 2109 * request on a ring, so we may have two different write and read 2110 * requests on one ring where the write request is older than the 2111 * read request. This allows for the CPU to read from an active 2112 * buffer by only waiting for the write to complete. 2113 * */ 2114 struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; 2115 struct drm_i915_gem_request *last_write_req; 2116 /** Breadcrumb of last fenced GPU access to the buffer. */ 2117 struct drm_i915_gem_request *last_fenced_req; 2118 2119 /** Current tiling stride for the object, if it's tiled. */ 2120 uint32_t stride; 2121 2122 /** References from framebuffers, locks out tiling changes. */ 2123 unsigned long framebuffer_references; 2124 2125 /** Record of address bit 17 of each page at last unbind. */ 2126 unsigned long *bit_17; 2127 2128 union { 2129 /** for phy allocated objects */ 2130 struct drm_dma_handle *phys_handle; 2131 2132 struct i915_gem_userptr { 2133 uintptr_t ptr; 2134 unsigned read_only :1; 2135 unsigned workers :4; 2136 #define I915_GEM_USERPTR_MAX_WORKERS 15 2137 2138 struct i915_mm_struct *mm; 2139 struct i915_mmu_object *mmu_object; 2140 struct work_struct *work; 2141 } userptr; 2142 }; 2143 }; 2144 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2145 2146 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2147 struct drm_i915_gem_object *new, 2148 unsigned frontbuffer_bits); 2149 2150 /** 2151 * Request queue structure. 2152 * 2153 * The request queue allows us to note sequence numbers that have been emitted 2154 * and may be associated with active buffers to be retired. 2155 * 2156 * By keeping this list, we can avoid having to do questionable sequence 2157 * number comparisons on buffer last_read|write_seqno. It also allows an 2158 * emission time to be associated with the request for tracking how far ahead 2159 * of the GPU the submission is. 2160 * 2161 * The requests are reference counted, so upon creation they should have an 2162 * initial reference taken using kref_init 2163 */ 2164 struct drm_i915_gem_request { 2165 struct kref ref; 2166 2167 /** On Which ring this request was generated */ 2168 struct drm_i915_private *i915; 2169 struct intel_engine_cs *ring; 2170 2171 /** GEM sequence number associated with this request. */ 2172 uint32_t seqno; 2173 2174 /** Position in the ringbuffer of the start of the request */ 2175 u32 head; 2176 2177 /** 2178 * Position in the ringbuffer of the start of the postfix. 2179 * This is required to calculate the maximum available ringbuffer 2180 * space without overwriting the postfix. 2181 */ 2182 u32 postfix; 2183 2184 /** Position in the ringbuffer of the end of the whole request */ 2185 u32 tail; 2186 2187 /** 2188 * Context and ring buffer related to this request 2189 * Contexts are refcounted, so when this request is associated with a 2190 * context, we must increment the context's refcount, to guarantee that 2191 * it persists while any request is linked to it. Requests themselves 2192 * are also refcounted, so the request will only be freed when the last 2193 * reference to it is dismissed, and the code in 2194 * i915_gem_request_free() will then decrement the refcount on the 2195 * context. 2196 */ 2197 struct intel_context *ctx; 2198 struct intel_ringbuffer *ringbuf; 2199 2200 /** Batch buffer related to this request if any (used for 2201 error state dump only) */ 2202 struct drm_i915_gem_object *batch_obj; 2203 2204 /** Time at which this request was emitted, in jiffies. */ 2205 unsigned long emitted_jiffies; 2206 2207 /** global list entry for this request */ 2208 struct list_head list; 2209 2210 struct drm_i915_file_private *file_priv; 2211 /** file_priv list entry for this request */ 2212 struct list_head client_list; 2213 2214 /** process identifier submitting this request */ 2215 struct pid *pid; 2216 2217 /** 2218 * The ELSP only accepts two elements at a time, so we queue 2219 * context/tail pairs on a given queue (ring->execlist_queue) until the 2220 * hardware is available. The queue serves a double purpose: we also use 2221 * it to keep track of the up to 2 contexts currently in the hardware 2222 * (usually one in execution and the other queued up by the GPU): We 2223 * only remove elements from the head of the queue when the hardware 2224 * informs us that an element has been completed. 2225 * 2226 * All accesses to the queue are mediated by a spinlock 2227 * (ring->execlist_lock). 2228 */ 2229 2230 /** Execlist link in the submission queue.*/ 2231 struct list_head execlist_link; 2232 2233 /** Execlists no. of times this request has been sent to the ELSP */ 2234 int elsp_submitted; 2235 2236 }; 2237 2238 int i915_gem_request_alloc(struct intel_engine_cs *ring, 2239 struct intel_context *ctx, 2240 struct drm_i915_gem_request **req_out); 2241 void i915_gem_request_cancel(struct drm_i915_gem_request *req); 2242 void i915_gem_request_free(struct kref *req_ref); 2243 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2244 struct drm_file *file); 2245 2246 static inline uint32_t 2247 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2248 { 2249 return req ? req->seqno : 0; 2250 } 2251 2252 static inline struct intel_engine_cs * 2253 i915_gem_request_get_ring(struct drm_i915_gem_request *req) 2254 { 2255 return req ? req->ring : NULL; 2256 } 2257 2258 static inline struct drm_i915_gem_request * 2259 i915_gem_request_reference(struct drm_i915_gem_request *req) 2260 { 2261 if (req) 2262 kref_get(&req->ref); 2263 return req; 2264 } 2265 2266 static inline void 2267 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2268 { 2269 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); 2270 kref_put(&req->ref, i915_gem_request_free); 2271 } 2272 2273 static inline void 2274 i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) 2275 { 2276 struct drm_device *dev; 2277 2278 if (!req) 2279 return; 2280 2281 dev = req->ring->dev; 2282 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) 2283 mutex_unlock(&dev->struct_mutex); 2284 } 2285 2286 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2287 struct drm_i915_gem_request *src) 2288 { 2289 if (src) 2290 i915_gem_request_reference(src); 2291 2292 if (*pdst) 2293 i915_gem_request_unreference(*pdst); 2294 2295 *pdst = src; 2296 } 2297 2298 /* 2299 * XXX: i915_gem_request_completed should be here but currently needs the 2300 * definition of i915_seqno_passed() which is below. It will be moved in 2301 * a later patch when the call to i915_seqno_passed() is obsoleted... 2302 */ 2303 2304 /* 2305 * A command that requires special handling by the command parser. 2306 */ 2307 struct drm_i915_cmd_descriptor { 2308 /* 2309 * Flags describing how the command parser processes the command. 2310 * 2311 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2312 * a length mask if not set 2313 * CMD_DESC_SKIP: The command is allowed but does not follow the 2314 * standard length encoding for the opcode range in 2315 * which it falls 2316 * CMD_DESC_REJECT: The command is never allowed 2317 * CMD_DESC_REGISTER: The command should be checked against the 2318 * register whitelist for the appropriate ring 2319 * CMD_DESC_MASTER: The command is allowed if the submitting process 2320 * is the DRM master 2321 */ 2322 u32 flags; 2323 #define CMD_DESC_FIXED (1<<0) 2324 #define CMD_DESC_SKIP (1<<1) 2325 #define CMD_DESC_REJECT (1<<2) 2326 #define CMD_DESC_REGISTER (1<<3) 2327 #define CMD_DESC_BITMASK (1<<4) 2328 #define CMD_DESC_MASTER (1<<5) 2329 2330 /* 2331 * The command's unique identification bits and the bitmask to get them. 2332 * This isn't strictly the opcode field as defined in the spec and may 2333 * also include type, subtype, and/or subop fields. 2334 */ 2335 struct { 2336 u32 value; 2337 u32 mask; 2338 } cmd; 2339 2340 /* 2341 * The command's length. The command is either fixed length (i.e. does 2342 * not include a length field) or has a length field mask. The flag 2343 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2344 * a length mask. All command entries in a command table must include 2345 * length information. 2346 */ 2347 union { 2348 u32 fixed; 2349 u32 mask; 2350 } length; 2351 2352 /* 2353 * Describes where to find a register address in the command to check 2354 * against the ring's register whitelist. Only valid if flags has the 2355 * CMD_DESC_REGISTER bit set. 2356 * 2357 * A non-zero step value implies that the command may access multiple 2358 * registers in sequence (e.g. LRI), in that case step gives the 2359 * distance in dwords between individual offset fields. 2360 */ 2361 struct { 2362 u32 offset; 2363 u32 mask; 2364 u32 step; 2365 } reg; 2366 2367 #define MAX_CMD_DESC_BITMASKS 3 2368 /* 2369 * Describes command checks where a particular dword is masked and 2370 * compared against an expected value. If the command does not match 2371 * the expected value, the parser rejects it. Only valid if flags has 2372 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2373 * are valid. 2374 * 2375 * If the check specifies a non-zero condition_mask then the parser 2376 * only performs the check when the bits specified by condition_mask 2377 * are non-zero. 2378 */ 2379 struct { 2380 u32 offset; 2381 u32 mask; 2382 u32 expected; 2383 u32 condition_offset; 2384 u32 condition_mask; 2385 } bits[MAX_CMD_DESC_BITMASKS]; 2386 }; 2387 2388 /* 2389 * A table of commands requiring special handling by the command parser. 2390 * 2391 * Each ring has an array of tables. Each table consists of an array of command 2392 * descriptors, which must be sorted with command opcodes in ascending order. 2393 */ 2394 struct drm_i915_cmd_table { 2395 const struct drm_i915_cmd_descriptor *table; 2396 int count; 2397 }; 2398 2399 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2400 #define __I915__(p) ({ \ 2401 struct drm_i915_private *__p; \ 2402 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2403 __p = (struct drm_i915_private *)p; \ 2404 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2405 __p = to_i915((struct drm_device *)p); \ 2406 else \ 2407 BUILD_BUG(); \ 2408 __p; \ 2409 }) 2410 #define INTEL_INFO(p) (&__I915__(p)->info) 2411 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2412 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2413 2414 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2415 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2416 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2417 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2418 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2419 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2420 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2421 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2422 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2423 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2424 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2425 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2426 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2427 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2428 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2429 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2430 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2431 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2432 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2433 INTEL_DEVID(dev) == 0x0152 || \ 2434 INTEL_DEVID(dev) == 0x015a) 2435 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2436 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2437 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2438 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2439 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2440 #define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) 2441 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2442 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2443 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2444 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2445 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2446 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2447 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2448 /* ULX machines are also considered ULT. */ 2449 #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ 2450 (INTEL_DEVID(dev) & 0xf) == 0xe) 2451 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2452 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2453 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2454 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2455 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2456 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2457 /* ULX machines are also considered ULT. */ 2458 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2459 INTEL_DEVID(dev) == 0x0A1E) 2460 #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ 2461 INTEL_DEVID(dev) == 0x1913 || \ 2462 INTEL_DEVID(dev) == 0x1916 || \ 2463 INTEL_DEVID(dev) == 0x1921 || \ 2464 INTEL_DEVID(dev) == 0x1926) 2465 #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ 2466 INTEL_DEVID(dev) == 0x1915 || \ 2467 INTEL_DEVID(dev) == 0x191E) 2468 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2469 2470 #define SKL_REVID_A0 (0x0) 2471 #define SKL_REVID_B0 (0x1) 2472 #define SKL_REVID_C0 (0x2) 2473 #define SKL_REVID_D0 (0x3) 2474 #define SKL_REVID_E0 (0x4) 2475 #define SKL_REVID_F0 (0x5) 2476 2477 #define BXT_REVID_A0 (0x0) 2478 #define BXT_REVID_B0 (0x3) 2479 #define BXT_REVID_C0 (0x6) 2480 2481 /* 2482 * The genX designation typically refers to the render engine, so render 2483 * capability related checks should use IS_GEN, while display and other checks 2484 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2485 * chips, etc.). 2486 */ 2487 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2488 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2489 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2490 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2491 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2492 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2493 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2494 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2495 2496 #define RENDER_RING (1<<RCS) 2497 #define BSD_RING (1<<VCS) 2498 #define BLT_RING (1<<BCS) 2499 #define VEBOX_RING (1<<VECS) 2500 #define BSD2_RING (1<<VCS2) 2501 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2502 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2503 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2504 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2505 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2506 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2507 __I915__(dev)->ellc_size) 2508 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2509 2510 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2511 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2512 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2513 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2514 2515 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2516 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2517 2518 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2519 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2520 /* 2521 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2522 * even when in MSI mode. This results in spurious interrupt warnings if the 2523 * legacy irq no. is shared with another device. The kernel then disables that 2524 * interrupt source and so prevents the other device from working properly. 2525 */ 2526 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2527 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2528 2529 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2530 * rows, which changed the alignment requirements and fence programming. 2531 */ 2532 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2533 IS_I915GM(dev))) 2534 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2535 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2536 2537 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2538 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2539 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2540 2541 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2542 2543 #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2544 INTEL_INFO(dev)->gen >= 9) 2545 2546 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2547 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2548 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2549 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2550 IS_SKYLAKE(dev)) 2551 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2552 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2553 IS_SKYLAKE(dev)) 2554 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2555 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2556 2557 #define HAS_CSR(dev) (IS_SKYLAKE(dev)) 2558 2559 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2560 INTEL_INFO(dev)->gen >= 8) 2561 2562 #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ 2563 !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) 2564 2565 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2566 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2567 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2568 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2569 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2570 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2571 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2572 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2573 2574 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2575 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2576 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2577 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2578 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2579 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2580 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2581 2582 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 2583 2584 /* DPF == dynamic parity feature */ 2585 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2586 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2587 2588 #define GT_FREQUENCY_MULTIPLIER 50 2589 #define GEN9_FREQ_SCALER 3 2590 2591 #include "i915_trace.h" 2592 2593 extern const struct drm_ioctl_desc i915_ioctls[]; 2594 extern int i915_max_ioctl; 2595 2596 extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); 2597 extern int i915_resume_legacy(struct drm_device *dev); 2598 2599 /* i915_params.c */ 2600 struct i915_params { 2601 int modeset; 2602 int panel_ignore_lid; 2603 int semaphores; 2604 int lvds_channel_mode; 2605 int panel_use_ssc; 2606 int vbt_sdvo_panel_type; 2607 int enable_rc6; 2608 int enable_fbc; 2609 int enable_ppgtt; 2610 int enable_execlists; 2611 int enable_psr; 2612 unsigned int preliminary_hw_support; 2613 int disable_power_well; 2614 int enable_ips; 2615 int invert_brightness; 2616 int enable_cmd_parser; 2617 /* leave bools at the end to not create holes */ 2618 bool enable_hangcheck; 2619 bool fastboot; 2620 bool prefault_disable; 2621 bool load_detect_test; 2622 bool reset; 2623 bool disable_display; 2624 bool disable_vtd_wa; 2625 bool enable_guc_submission; 2626 int guc_log_level; 2627 int use_mmio_flip; 2628 int mmio_debug; 2629 bool verbose_state_checks; 2630 int edp_vswing; 2631 }; 2632 extern struct i915_params i915 __read_mostly; 2633 2634 /* i915_dma.c */ 2635 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2636 extern int i915_driver_unload(struct drm_device *); 2637 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2638 extern void i915_driver_lastclose(struct drm_device * dev); 2639 extern void i915_driver_preclose(struct drm_device *dev, 2640 struct drm_file *file); 2641 extern void i915_driver_postclose(struct drm_device *dev, 2642 struct drm_file *file); 2643 #ifdef CONFIG_COMPAT 2644 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2645 unsigned long arg); 2646 #endif 2647 extern int intel_gpu_reset(struct drm_device *dev); 2648 extern bool intel_has_gpu_reset(struct drm_device *dev); 2649 extern int i915_reset(struct drm_device *dev); 2650 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2651 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2652 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2653 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2654 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2655 void i915_firmware_load_error_print(const char *fw_path, int err); 2656 2657 /* intel_hotplug.c */ 2658 void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2659 void intel_hpd_init(struct drm_i915_private *dev_priv); 2660 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2661 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2662 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2663 2664 /* i915_irq.c */ 2665 void i915_queue_hangcheck(struct drm_device *dev); 2666 __printf(3, 4) 2667 void i915_handle_error(struct drm_device *dev, bool wedged, 2668 const char *fmt, ...); 2669 2670 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2671 int intel_irq_install(struct drm_i915_private *dev_priv); 2672 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2673 2674 extern void intel_uncore_sanitize(struct drm_device *dev); 2675 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2676 bool restore_forcewake); 2677 extern void intel_uncore_init(struct drm_device *dev); 2678 extern void intel_uncore_check_errors(struct drm_device *dev); 2679 extern void intel_uncore_fini(struct drm_device *dev); 2680 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2681 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2682 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2683 enum forcewake_domains domains); 2684 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2685 enum forcewake_domains domains); 2686 /* Like above but the caller must manage the uncore.lock itself. 2687 * Must be used with I915_READ_FW and friends. 2688 */ 2689 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2690 enum forcewake_domains domains); 2691 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2692 enum forcewake_domains domains); 2693 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2694 static inline bool intel_vgpu_active(struct drm_device *dev) 2695 { 2696 return to_i915(dev)->vgpu.active; 2697 } 2698 2699 void 2700 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2701 u32 status_mask); 2702 2703 void 2704 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2705 u32 status_mask); 2706 2707 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2708 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2709 void 2710 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2711 void 2712 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2713 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2714 uint32_t interrupt_mask, 2715 uint32_t enabled_irq_mask); 2716 #define ibx_enable_display_interrupt(dev_priv, bits) \ 2717 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2718 #define ibx_disable_display_interrupt(dev_priv, bits) \ 2719 ibx_display_interrupt_update((dev_priv), (bits), 0) 2720 2721 /* i915_gem.c */ 2722 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2723 struct drm_file *file_priv); 2724 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2725 struct drm_file *file_priv); 2726 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2727 struct drm_file *file_priv); 2728 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2729 struct drm_file *file_priv); 2730 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2731 struct drm_file *file_priv); 2732 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2733 struct drm_file *file_priv); 2734 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2735 struct drm_file *file_priv); 2736 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2737 struct drm_i915_gem_request *req); 2738 void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); 2739 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, 2740 struct drm_i915_gem_execbuffer2 *args, 2741 struct list_head *vmas); 2742 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2743 struct drm_file *file_priv); 2744 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2745 struct drm_file *file_priv); 2746 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2747 struct drm_file *file_priv); 2748 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2749 struct drm_file *file); 2750 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2751 struct drm_file *file); 2752 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2753 struct drm_file *file_priv); 2754 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2755 struct drm_file *file_priv); 2756 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2757 struct drm_file *file_priv); 2758 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2759 struct drm_file *file_priv); 2760 int i915_gem_init_userptr(struct drm_device *dev); 2761 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2762 struct drm_file *file); 2763 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2764 struct drm_file *file_priv); 2765 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2766 struct drm_file *file_priv); 2767 void i915_gem_load(struct drm_device *dev); 2768 void *i915_gem_object_alloc(struct drm_device *dev); 2769 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2770 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2771 const struct drm_i915_gem_object_ops *ops); 2772 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2773 size_t size); 2774 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2775 struct drm_device *dev, const void *data, size_t size); 2776 void i915_init_vm(struct drm_i915_private *dev_priv, 2777 struct i915_address_space *vm); 2778 void i915_gem_free_object(struct drm_gem_object *obj); 2779 void i915_gem_vma_destroy(struct i915_vma *vma); 2780 2781 /* Flags used by pin/bind&friends. */ 2782 #define PIN_MAPPABLE (1<<0) 2783 #define PIN_NONBLOCK (1<<1) 2784 #define PIN_GLOBAL (1<<2) 2785 #define PIN_OFFSET_BIAS (1<<3) 2786 #define PIN_USER (1<<4) 2787 #define PIN_UPDATE (1<<5) 2788 #define PIN_OFFSET_MASK (~4095) 2789 int __must_check 2790 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2791 struct i915_address_space *vm, 2792 uint32_t alignment, 2793 uint64_t flags); 2794 int __must_check 2795 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2796 const struct i915_ggtt_view *view, 2797 uint32_t alignment, 2798 uint64_t flags); 2799 2800 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2801 u32 flags); 2802 int __must_check i915_vma_unbind(struct i915_vma *vma); 2803 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2804 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2805 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2806 2807 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2808 int *needs_clflush); 2809 2810 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2811 2812 static inline int __sg_page_count(struct scatterlist *sg) 2813 { 2814 return sg->length >> PAGE_SHIFT; 2815 } 2816 2817 static inline struct page * 2818 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2819 { 2820 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) 2821 return NULL; 2822 2823 if (n < obj->get_page.last) { 2824 obj->get_page.sg = obj->pages->sgl; 2825 obj->get_page.last = 0; 2826 } 2827 2828 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 2829 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 2830 if (unlikely(sg_is_chain(obj->get_page.sg))) 2831 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 2832 } 2833 2834 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); 2835 } 2836 2837 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2838 { 2839 BUG_ON(obj->pages == NULL); 2840 obj->pages_pin_count++; 2841 } 2842 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2843 { 2844 BUG_ON(obj->pages_pin_count == 0); 2845 obj->pages_pin_count--; 2846 } 2847 2848 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2849 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2850 struct intel_engine_cs *to, 2851 struct drm_i915_gem_request **to_req); 2852 void i915_vma_move_to_active(struct i915_vma *vma, 2853 struct drm_i915_gem_request *req); 2854 int i915_gem_dumb_create(struct drm_file *file_priv, 2855 struct drm_device *dev, 2856 struct drm_mode_create_dumb *args); 2857 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2858 uint32_t handle, uint64_t *offset); 2859 /** 2860 * Returns true if seq1 is later than seq2. 2861 */ 2862 static inline bool 2863 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 2864 { 2865 return (int32_t)(seq1 - seq2) >= 0; 2866 } 2867 2868 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2869 bool lazy_coherency) 2870 { 2871 u32 seqno; 2872 2873 BUG_ON(req == NULL); 2874 2875 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2876 2877 return i915_seqno_passed(seqno, req->seqno); 2878 } 2879 2880 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 2881 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2882 2883 struct drm_i915_gem_request * 2884 i915_gem_find_active_request(struct intel_engine_cs *ring); 2885 2886 bool i915_gem_retire_requests(struct drm_device *dev); 2887 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2888 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2889 bool interruptible); 2890 2891 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2892 { 2893 return unlikely(atomic_read(&error->reset_counter) 2894 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 2895 } 2896 2897 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 2898 { 2899 return atomic_read(&error->reset_counter) & I915_WEDGED; 2900 } 2901 2902 static inline u32 i915_reset_count(struct i915_gpu_error *error) 2903 { 2904 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2905 } 2906 2907 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 2908 { 2909 return dev_priv->gpu_error.stop_rings == 0 || 2910 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 2911 } 2912 2913 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 2914 { 2915 return dev_priv->gpu_error.stop_rings == 0 || 2916 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 2917 } 2918 2919 void i915_gem_reset(struct drm_device *dev); 2920 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2921 int __must_check i915_gem_init(struct drm_device *dev); 2922 int i915_gem_init_rings(struct drm_device *dev); 2923 int __must_check i915_gem_init_hw(struct drm_device *dev); 2924 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); 2925 void i915_gem_init_swizzling(struct drm_device *dev); 2926 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2927 int __must_check i915_gpu_idle(struct drm_device *dev); 2928 int __must_check i915_gem_suspend(struct drm_device *dev); 2929 void __i915_add_request(struct drm_i915_gem_request *req, 2930 struct drm_i915_gem_object *batch_obj, 2931 bool flush_caches); 2932 #define i915_add_request(req) \ 2933 __i915_add_request(req, NULL, true) 2934 #define i915_add_request_no_flush(req) \ 2935 __i915_add_request(req, NULL, false) 2936 int __i915_wait_request(struct drm_i915_gem_request *req, 2937 unsigned reset_counter, 2938 bool interruptible, 2939 s64 *timeout, 2940 struct intel_rps_client *rps); 2941 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 2942 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 2943 int __must_check 2944 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 2945 bool readonly); 2946 int __must_check 2947 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 2948 bool write); 2949 int __must_check 2950 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 2951 int __must_check 2952 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2953 u32 alignment, 2954 struct intel_engine_cs *pipelined, 2955 struct drm_i915_gem_request **pipelined_request, 2956 const struct i915_ggtt_view *view); 2957 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 2958 const struct i915_ggtt_view *view); 2959 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2960 int align); 2961 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2962 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2963 2964 uint32_t 2965 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 2966 uint32_t 2967 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 2968 int tiling_mode, bool fenced); 2969 2970 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 2971 enum i915_cache_level cache_level); 2972 2973 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 2974 struct dma_buf *dma_buf); 2975 2976 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 2977 struct drm_gem_object *gem_obj, int flags); 2978 2979 unsigned long 2980 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 2981 const struct i915_ggtt_view *view); 2982 unsigned long 2983 i915_gem_obj_offset(struct drm_i915_gem_object *o, 2984 struct i915_address_space *vm); 2985 static inline unsigned long 2986 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 2987 { 2988 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 2989 } 2990 2991 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 2992 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 2993 const struct i915_ggtt_view *view); 2994 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 2995 struct i915_address_space *vm); 2996 2997 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 2998 struct i915_address_space *vm); 2999 struct i915_vma * 3000 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3001 struct i915_address_space *vm); 3002 struct i915_vma * 3003 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 3004 const struct i915_ggtt_view *view); 3005 3006 struct i915_vma * 3007 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3008 struct i915_address_space *vm); 3009 struct i915_vma * 3010 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3011 const struct i915_ggtt_view *view); 3012 3013 static inline struct i915_vma * 3014 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 3015 { 3016 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 3017 } 3018 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 3019 3020 /* Some GGTT VM helpers */ 3021 #define i915_obj_to_ggtt(obj) \ 3022 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 3023 static inline bool i915_is_ggtt(struct i915_address_space *vm) 3024 { 3025 struct i915_address_space *ggtt = 3026 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; 3027 return vm == ggtt; 3028 } 3029 3030 static inline struct i915_hw_ppgtt * 3031 i915_vm_to_ppgtt(struct i915_address_space *vm) 3032 { 3033 WARN_ON(i915_is_ggtt(vm)); 3034 3035 return container_of(vm, struct i915_hw_ppgtt, base); 3036 } 3037 3038 3039 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 3040 { 3041 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3042 } 3043 3044 static inline unsigned long 3045 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3046 { 3047 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); 3048 } 3049 3050 static inline int __must_check 3051 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3052 uint32_t alignment, 3053 unsigned flags) 3054 { 3055 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), 3056 alignment, flags | PIN_GLOBAL); 3057 } 3058 3059 static inline int 3060 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 3061 { 3062 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 3063 } 3064 3065 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3066 const struct i915_ggtt_view *view); 3067 static inline void 3068 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3069 { 3070 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 3071 } 3072 3073 /* i915_gem_fence.c */ 3074 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 3075 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 3076 3077 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 3078 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 3079 3080 void i915_gem_restore_fences(struct drm_device *dev); 3081 3082 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3083 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3084 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3085 3086 /* i915_gem_context.c */ 3087 int __must_check i915_gem_context_init(struct drm_device *dev); 3088 void i915_gem_context_fini(struct drm_device *dev); 3089 void i915_gem_context_reset(struct drm_device *dev); 3090 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3091 int i915_gem_context_enable(struct drm_i915_gem_request *req); 3092 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3093 int i915_switch_context(struct drm_i915_gem_request *req); 3094 struct intel_context * 3095 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3096 void i915_gem_context_free(struct kref *ctx_ref); 3097 struct drm_i915_gem_object * 3098 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3099 static inline void i915_gem_context_reference(struct intel_context *ctx) 3100 { 3101 kref_get(&ctx->ref); 3102 } 3103 3104 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3105 { 3106 kref_put(&ctx->ref, i915_gem_context_free); 3107 } 3108 3109 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3110 { 3111 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3112 } 3113 3114 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3115 struct drm_file *file); 3116 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3117 struct drm_file *file); 3118 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3119 struct drm_file *file_priv); 3120 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3121 struct drm_file *file_priv); 3122 3123 /* i915_gem_evict.c */ 3124 int __must_check i915_gem_evict_something(struct drm_device *dev, 3125 struct i915_address_space *vm, 3126 int min_size, 3127 unsigned alignment, 3128 unsigned cache_level, 3129 unsigned long start, 3130 unsigned long end, 3131 unsigned flags); 3132 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3133 int i915_gem_evict_everything(struct drm_device *dev); 3134 3135 /* belongs in i915_gem_gtt.h */ 3136 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3137 { 3138 if (INTEL_INFO(dev)->gen < 6) 3139 intel_gtt_chipset_flush(); 3140 } 3141 3142 /* i915_gem_stolen.c */ 3143 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3144 struct drm_mm_node *node, u64 size, 3145 unsigned alignment); 3146 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3147 struct drm_mm_node *node); 3148 int i915_gem_init_stolen(struct drm_device *dev); 3149 void i915_gem_cleanup_stolen(struct drm_device *dev); 3150 struct drm_i915_gem_object * 3151 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3152 struct drm_i915_gem_object * 3153 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3154 u32 stolen_offset, 3155 u32 gtt_offset, 3156 u32 size); 3157 3158 /* i915_gem_shrinker.c */ 3159 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3160 long target, 3161 unsigned flags); 3162 #define I915_SHRINK_PURGEABLE 0x1 3163 #define I915_SHRINK_UNBOUND 0x2 3164 #define I915_SHRINK_BOUND 0x4 3165 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3166 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3167 3168 3169 /* i915_gem_tiling.c */ 3170 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3171 { 3172 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3173 3174 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3175 obj->tiling_mode != I915_TILING_NONE; 3176 } 3177 3178 /* i915_gem_debug.c */ 3179 #if WATCH_LISTS 3180 int i915_verify_lists(struct drm_device *dev); 3181 #else 3182 #define i915_verify_lists(dev) 0 3183 #endif 3184 3185 /* i915_debugfs.c */ 3186 int i915_debugfs_init(struct drm_minor *minor); 3187 void i915_debugfs_cleanup(struct drm_minor *minor); 3188 #ifdef CONFIG_DEBUG_FS 3189 int i915_debugfs_connector_add(struct drm_connector *connector); 3190 void intel_display_crc_init(struct drm_device *dev); 3191 #else 3192 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3193 { return 0; } 3194 static inline void intel_display_crc_init(struct drm_device *dev) {} 3195 #endif 3196 3197 /* i915_gpu_error.c */ 3198 __printf(2, 3) 3199 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3200 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3201 const struct i915_error_state_file_priv *error); 3202 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3203 struct drm_i915_private *i915, 3204 size_t count, loff_t pos); 3205 static inline void i915_error_state_buf_release( 3206 struct drm_i915_error_state_buf *eb) 3207 { 3208 kfree(eb->buf); 3209 } 3210 void i915_capture_error_state(struct drm_device *dev, bool wedge, 3211 const char *error_msg); 3212 void i915_error_state_get(struct drm_device *dev, 3213 struct i915_error_state_file_priv *error_priv); 3214 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3215 void i915_destroy_error_state(struct drm_device *dev); 3216 3217 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3218 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3219 3220 /* i915_cmd_parser.c */ 3221 int i915_cmd_parser_get_version(void); 3222 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 3223 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 3224 bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 3225 int i915_parse_cmds(struct intel_engine_cs *ring, 3226 struct drm_i915_gem_object *batch_obj, 3227 struct drm_i915_gem_object *shadow_batch_obj, 3228 u32 batch_start_offset, 3229 u32 batch_len, 3230 bool is_master); 3231 3232 /* i915_suspend.c */ 3233 extern int i915_save_state(struct drm_device *dev); 3234 extern int i915_restore_state(struct drm_device *dev); 3235 3236 /* i915_sysfs.c */ 3237 void i915_setup_sysfs(struct drm_device *dev_priv); 3238 void i915_teardown_sysfs(struct drm_device *dev_priv); 3239 3240 /* intel_i2c.c */ 3241 extern int intel_setup_gmbus(struct drm_device *dev); 3242 extern void intel_teardown_gmbus(struct drm_device *dev); 3243 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3244 unsigned int pin); 3245 3246 extern struct i2c_adapter * 3247 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3248 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3249 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3250 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3251 { 3252 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3253 } 3254 extern void intel_i2c_reset(struct drm_device *dev); 3255 3256 /* intel_opregion.c */ 3257 #ifdef CONFIG_ACPI 3258 extern int intel_opregion_setup(struct drm_device *dev); 3259 extern void intel_opregion_init(struct drm_device *dev); 3260 extern void intel_opregion_fini(struct drm_device *dev); 3261 extern void intel_opregion_asle_intr(struct drm_device *dev); 3262 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3263 bool enable); 3264 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3265 pci_power_t state); 3266 #else 3267 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3268 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3269 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3270 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3271 static inline int 3272 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3273 { 3274 return 0; 3275 } 3276 static inline int 3277 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3278 { 3279 return 0; 3280 } 3281 #endif 3282 3283 /* intel_acpi.c */ 3284 #ifdef CONFIG_ACPI 3285 extern void intel_register_dsm_handler(void); 3286 extern void intel_unregister_dsm_handler(void); 3287 #else 3288 static inline void intel_register_dsm_handler(void) { return; } 3289 static inline void intel_unregister_dsm_handler(void) { return; } 3290 #endif /* CONFIG_ACPI */ 3291 3292 /* modesetting */ 3293 extern void intel_modeset_init_hw(struct drm_device *dev); 3294 extern void intel_modeset_init(struct drm_device *dev); 3295 extern void intel_modeset_gem_init(struct drm_device *dev); 3296 extern void intel_modeset_cleanup(struct drm_device *dev); 3297 extern void intel_connector_unregister(struct intel_connector *); 3298 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3299 extern void intel_display_resume(struct drm_device *dev); 3300 extern void i915_redisable_vga(struct drm_device *dev); 3301 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3302 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3303 extern void intel_init_pch_refclk(struct drm_device *dev); 3304 extern void intel_set_rps(struct drm_device *dev, u8 val); 3305 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3306 bool enable); 3307 extern void intel_detect_pch(struct drm_device *dev); 3308 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 3309 extern int intel_enable_rc6(const struct drm_device *dev); 3310 3311 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3312 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3313 struct drm_file *file); 3314 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3315 struct drm_file *file); 3316 3317 /* overlay */ 3318 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3319 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3320 struct intel_overlay_error_state *error); 3321 3322 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3323 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3324 struct drm_device *dev, 3325 struct intel_display_error_state *error); 3326 3327 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3328 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3329 3330 /* intel_sideband.c */ 3331 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3332 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3333 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3334 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); 3335 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3336 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3337 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3338 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3339 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3340 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3341 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3342 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); 3343 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3344 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3345 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3346 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3347 enum intel_sbi_destination destination); 3348 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3349 enum intel_sbi_destination destination); 3350 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3351 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3352 3353 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3354 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3355 3356 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3357 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3358 3359 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3360 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3361 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3362 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3363 3364 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3365 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3366 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3367 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3368 3369 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3370 * will be implemented using 2 32-bit writes in an arbitrary order with 3371 * an arbitrary delay between them. This can cause the hardware to 3372 * act upon the intermediate value, possibly leading to corruption and 3373 * machine death. You have been warned. 3374 */ 3375 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3376 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3377 3378 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3379 u32 upper, lower, tmp; \ 3380 tmp = I915_READ(upper_reg); \ 3381 do { \ 3382 upper = tmp; \ 3383 lower = I915_READ(lower_reg); \ 3384 tmp = I915_READ(upper_reg); \ 3385 } while (upper != tmp); \ 3386 (u64)upper << 32 | lower; }) 3387 3388 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3389 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3390 3391 /* These are untraced mmio-accessors that are only valid to be used inside 3392 * criticial sections inside IRQ handlers where forcewake is explicitly 3393 * controlled. 3394 * Think twice, and think again, before using these. 3395 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3396 * intel_uncore_forcewake_irqunlock(). 3397 */ 3398 #define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) 3399 #define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) 3400 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3401 3402 /* "Broadcast RGB" property */ 3403 #define INTEL_BROADCAST_RGB_AUTO 0 3404 #define INTEL_BROADCAST_RGB_FULL 1 3405 #define INTEL_BROADCAST_RGB_LIMITED 2 3406 3407 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3408 { 3409 if (IS_VALLEYVIEW(dev)) 3410 return VLV_VGACNTRL; 3411 else if (INTEL_INFO(dev)->gen >= 5) 3412 return CPU_VGACNTRL; 3413 else 3414 return VGACNTRL; 3415 } 3416 3417 static inline void __user *to_user_ptr(u64 address) 3418 { 3419 return (void __user *)(uintptr_t)address; 3420 } 3421 3422 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3423 { 3424 unsigned long j = msecs_to_jiffies(m); 3425 3426 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3427 } 3428 3429 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3430 { 3431 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3432 } 3433 3434 static inline unsigned long 3435 timespec_to_jiffies_timeout(const struct timespec *value) 3436 { 3437 unsigned long j = timespec_to_jiffies(value); 3438 3439 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3440 } 3441 3442 /* 3443 * If you need to wait X milliseconds between events A and B, but event B 3444 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3445 * when event A happened, then just before event B you call this function and 3446 * pass the timestamp as the first argument, and X as the second argument. 3447 */ 3448 static inline void 3449 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3450 { 3451 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3452 3453 /* 3454 * Don't re-read the value of "jiffies" every time since it may change 3455 * behind our back and break the math. 3456 */ 3457 tmp_jiffies = jiffies; 3458 target_jiffies = timestamp_jiffies + 3459 msecs_to_jiffies_timeout(to_wait_ms); 3460 3461 if (time_after(target_jiffies, tmp_jiffies)) { 3462 remaining_jiffies = target_jiffies - tmp_jiffies; 3463 while (remaining_jiffies) 3464 remaining_jiffies = 3465 schedule_timeout_uninterruptible(remaining_jiffies); 3466 } 3467 } 3468 3469 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, 3470 struct drm_i915_gem_request *req) 3471 { 3472 if (ring->trace_irq_req == NULL && ring->irq_get(ring)) 3473 i915_gem_request_assign(&ring->trace_irq_req, req); 3474 } 3475 3476 #endif 3477