1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hash.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/mm_types.h> 44 #include <linux/perf_event.h> 45 #include <linux/pm_qos.h> 46 #include <linux/reservation.h> 47 #include <linux/shmem_fs.h> 48 #include <linux/stackdepot.h> 49 50 #include <drm/intel-gtt.h> 51 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 52 #include <drm/drm_gem.h> 53 #include <drm/drm_auth.h> 54 #include <drm/drm_cache.h> 55 #include <drm/drm_util.h> 56 #include <drm/drm_dsc.h> 57 #include <drm/drm_connector.h> 58 #include <drm/i915_mei_hdcp_interface.h> 59 60 #include "i915_fixed.h" 61 #include "i915_params.h" 62 #include "i915_reg.h" 63 #include "i915_utils.h" 64 65 #include "intel_bios.h" 66 #include "intel_device_info.h" 67 #include "intel_display.h" 68 #include "intel_dpll_mgr.h" 69 #include "intel_lrc.h" 70 #include "intel_opregion.h" 71 #include "intel_ringbuffer.h" 72 #include "intel_uncore.h" 73 #include "intel_wopcm.h" 74 #include "intel_workarounds.h" 75 #include "intel_uc.h" 76 77 #include "i915_gem.h" 78 #include "i915_gem_context.h" 79 #include "i915_gem_fence_reg.h" 80 #include "i915_gem_object.h" 81 #include "i915_gem_gtt.h" 82 #include "i915_gpu_error.h" 83 #include "i915_request.h" 84 #include "i915_scheduler.h" 85 #include "i915_timeline.h" 86 #include "i915_vma.h" 87 88 #include "intel_gvt.h" 89 90 /* General customization: 91 */ 92 93 #define DRIVER_NAME "i915" 94 #define DRIVER_DESC "Intel Graphics" 95 #define DRIVER_DATE "20190328" 96 #define DRIVER_TIMESTAMP 1553776914 97 98 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 99 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 100 * which may not necessarily be a user visible problem. This will either 101 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 102 * enable distros and users to tailor their preferred amount of i915 abrt 103 * spam. 104 */ 105 #define I915_STATE_WARN(condition, format...) ({ \ 106 int __ret_warn_on = !!(condition); \ 107 if (unlikely(__ret_warn_on)) \ 108 if (!WARN(i915_modparams.verbose_state_checks, format)) \ 109 DRM_ERROR(format); \ 110 unlikely(__ret_warn_on); \ 111 }) 112 113 #define I915_STATE_WARN_ON(x) \ 114 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 115 116 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 117 118 bool __i915_inject_load_failure(const char *func, int line); 119 #define i915_inject_load_failure() \ 120 __i915_inject_load_failure(__func__, __LINE__) 121 122 bool i915_error_injected(void); 123 124 #else 125 126 #define i915_inject_load_failure() false 127 #define i915_error_injected() false 128 129 #endif 130 131 #define i915_load_error(i915, fmt, ...) \ 132 __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ 133 fmt, ##__VA_ARGS__) 134 135 typedef depot_stack_handle_t intel_wakeref_t; 136 137 enum hpd_pin { 138 HPD_NONE = 0, 139 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 140 HPD_CRT, 141 HPD_SDVO_B, 142 HPD_SDVO_C, 143 HPD_PORT_A, 144 HPD_PORT_B, 145 HPD_PORT_C, 146 HPD_PORT_D, 147 HPD_PORT_E, 148 HPD_PORT_F, 149 HPD_NUM_PINS 150 }; 151 152 #define for_each_hpd_pin(__pin) \ 153 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 154 155 /* Threshold == 5 for long IRQs, 50 for short */ 156 #define HPD_STORM_DEFAULT_THRESHOLD 50 157 158 struct i915_hotplug { 159 struct work_struct hotplug_work; 160 161 struct { 162 unsigned long last_jiffies; 163 int count; 164 enum { 165 HPD_ENABLED = 0, 166 HPD_DISABLED = 1, 167 HPD_MARK_DISABLED = 2 168 } state; 169 } stats[HPD_NUM_PINS]; 170 u32 event_bits; 171 struct delayed_work reenable_work; 172 173 u32 long_port_mask; 174 u32 short_port_mask; 175 struct work_struct dig_port_work; 176 177 struct work_struct poll_init_work; 178 bool poll_enabled; 179 180 unsigned int hpd_storm_threshold; 181 /* Whether or not to count short HPD IRQs in HPD storms */ 182 u8 hpd_short_storm_enabled; 183 184 /* 185 * if we get a HPD irq from DP and a HPD irq from non-DP 186 * the non-DP HPD could block the workqueue on a mode config 187 * mutex getting, that userspace may have taken. However 188 * userspace is waiting on the DP workqueue to run which is 189 * blocked behind the non-DP one. 190 */ 191 struct workqueue_struct *dp_wq; 192 }; 193 194 #define I915_GEM_GPU_DOMAINS \ 195 (I915_GEM_DOMAIN_RENDER | \ 196 I915_GEM_DOMAIN_SAMPLER | \ 197 I915_GEM_DOMAIN_COMMAND | \ 198 I915_GEM_DOMAIN_INSTRUCTION | \ 199 I915_GEM_DOMAIN_VERTEX) 200 201 struct drm_i915_private; 202 struct i915_mm_struct; 203 struct i915_mmu_object; 204 205 struct drm_i915_file_private { 206 struct drm_i915_private *dev_priv; 207 struct drm_file *file; 208 209 struct { 210 spinlock_t lock; 211 struct list_head request_list; 212 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 213 * chosen to prevent the CPU getting more than a frame ahead of the GPU 214 * (when using lax throttling for the frontbuffer). We also use it to 215 * offer free GPU waitboosts for severely congested workloads. 216 */ 217 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 218 } mm; 219 220 struct idr context_idr; 221 struct mutex context_idr_lock; /* guards context_idr */ 222 223 struct idr vm_idr; 224 struct mutex vm_idr_lock; /* guards vm_idr */ 225 226 unsigned int bsd_engine; 227 228 /* 229 * Every context ban increments per client ban score. Also 230 * hangs in short succession increments ban score. If ban threshold 231 * is reached, client is considered banned and submitting more work 232 * will fail. This is a stop gap measure to limit the badly behaving 233 * clients access to gpu. Note that unbannable contexts never increment 234 * the client ban score. 235 */ 236 #define I915_CLIENT_SCORE_HANG_FAST 1 237 #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) 238 #define I915_CLIENT_SCORE_CONTEXT_BAN 3 239 #define I915_CLIENT_SCORE_BANNED 9 240 /** ban_score: Accumulated score of all ctx bans and fast hangs. */ 241 atomic_t ban_score; 242 unsigned long hang_timestamp; 243 }; 244 245 /* Interface history: 246 * 247 * 1.1: Original. 248 * 1.2: Add Power Management 249 * 1.3: Add vblank support 250 * 1.4: Fix cmdbuffer path, add heap destroy 251 * 1.5: Add vblank pipe configuration 252 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 253 * - Support vertical blank on secondary display pipe 254 */ 255 #define DRIVER_MAJOR 1 256 #define DRIVER_MINOR 6 257 #define DRIVER_PATCHLEVEL 0 258 259 struct intel_overlay; 260 struct intel_overlay_error_state; 261 262 struct sdvo_device_mapping { 263 u8 initialized; 264 u8 dvo_port; 265 u8 slave_addr; 266 u8 dvo_wiring; 267 u8 i2c_pin; 268 u8 ddc_pin; 269 }; 270 271 struct intel_connector; 272 struct intel_encoder; 273 struct intel_atomic_state; 274 struct intel_crtc_state; 275 struct intel_initial_plane_config; 276 struct intel_crtc; 277 struct intel_limit; 278 struct dpll; 279 struct intel_cdclk_state; 280 281 struct drm_i915_display_funcs { 282 void (*get_cdclk)(struct drm_i915_private *dev_priv, 283 struct intel_cdclk_state *cdclk_state); 284 void (*set_cdclk)(struct drm_i915_private *dev_priv, 285 const struct intel_cdclk_state *cdclk_state); 286 int (*get_fifo_size)(struct drm_i915_private *dev_priv, 287 enum i9xx_plane_id i9xx_plane); 288 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 289 int (*compute_intermediate_wm)(struct intel_crtc_state *newstate); 290 void (*initial_watermarks)(struct intel_atomic_state *state, 291 struct intel_crtc_state *cstate); 292 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 293 struct intel_crtc_state *cstate); 294 void (*optimize_watermarks)(struct intel_atomic_state *state, 295 struct intel_crtc_state *cstate); 296 int (*compute_global_watermarks)(struct intel_atomic_state *state); 297 void (*update_wm)(struct intel_crtc *crtc); 298 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 299 /* Returns the active state of the crtc, and if the crtc is active, 300 * fills out the pipe-config with the hw state. */ 301 bool (*get_pipe_config)(struct intel_crtc *, 302 struct intel_crtc_state *); 303 void (*get_initial_plane_config)(struct intel_crtc *, 304 struct intel_initial_plane_config *); 305 int (*crtc_compute_clock)(struct intel_crtc *crtc, 306 struct intel_crtc_state *crtc_state); 307 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 308 struct drm_atomic_state *old_state); 309 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 310 struct drm_atomic_state *old_state); 311 void (*update_crtcs)(struct drm_atomic_state *state); 312 void (*audio_codec_enable)(struct intel_encoder *encoder, 313 const struct intel_crtc_state *crtc_state, 314 const struct drm_connector_state *conn_state); 315 void (*audio_codec_disable)(struct intel_encoder *encoder, 316 const struct intel_crtc_state *old_crtc_state, 317 const struct drm_connector_state *old_conn_state); 318 void (*fdi_link_train)(struct intel_crtc *crtc, 319 const struct intel_crtc_state *crtc_state); 320 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 321 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 322 /* clock updates for mode set */ 323 /* cursor updates */ 324 /* render clock increase/decrease */ 325 /* display clock increase/decrease */ 326 /* pll clock increase/decrease */ 327 328 /* 329 * Program double buffered color management registers during 330 * vblank evasion. The registers should then latch during the 331 * next vblank start, alongside any other double buffered registers 332 * involved with the same commit. 333 */ 334 void (*color_commit)(const struct intel_crtc_state *crtc_state); 335 /* 336 * Load LUTs (and other single buffered color management 337 * registers). Will (hopefully) be called during the vblank 338 * following the latching of any double buffered registers 339 * involved with the same commit. 340 */ 341 void (*load_luts)(const struct intel_crtc_state *crtc_state); 342 }; 343 344 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 345 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 346 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 347 348 struct intel_csr { 349 struct work_struct work; 350 const char *fw_path; 351 u32 required_version; 352 u32 max_fw_size; /* bytes */ 353 u32 *dmc_payload; 354 u32 dmc_fw_size; /* dwords */ 355 u32 version; 356 u32 mmio_count; 357 i915_reg_t mmioaddr[8]; 358 u32 mmiodata[8]; 359 u32 dc_state; 360 u32 allowed_dc_mask; 361 intel_wakeref_t wakeref; 362 }; 363 364 enum i915_cache_level { 365 I915_CACHE_NONE = 0, 366 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 367 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 368 caches, eg sampler/render caches, and the 369 large Last-Level-Cache. LLC is coherent with 370 the CPU, but L3 is only visible to the GPU. */ 371 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 372 }; 373 374 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 375 376 enum fb_op_origin { 377 ORIGIN_GTT, 378 ORIGIN_CPU, 379 ORIGIN_CS, 380 ORIGIN_FLIP, 381 ORIGIN_DIRTYFB, 382 }; 383 384 struct intel_fbc { 385 /* This is always the inner lock when overlapping with struct_mutex and 386 * it's the outer lock when overlapping with stolen_lock. */ 387 struct mutex lock; 388 unsigned threshold; 389 unsigned int possible_framebuffer_bits; 390 unsigned int busy_bits; 391 unsigned int visible_pipes_mask; 392 struct intel_crtc *crtc; 393 394 struct drm_mm_node compressed_fb; 395 struct drm_mm_node *compressed_llb; 396 397 bool false_color; 398 399 bool enabled; 400 bool active; 401 bool flip_pending; 402 403 bool underrun_detected; 404 struct work_struct underrun_work; 405 406 /* 407 * Due to the atomic rules we can't access some structures without the 408 * appropriate locking, so we cache information here in order to avoid 409 * these problems. 410 */ 411 struct intel_fbc_state_cache { 412 struct i915_vma *vma; 413 unsigned long flags; 414 415 struct { 416 unsigned int mode_flags; 417 u32 hsw_bdw_pixel_rate; 418 } crtc; 419 420 struct { 421 unsigned int rotation; 422 int src_w; 423 int src_h; 424 bool visible; 425 /* 426 * Display surface base address adjustement for 427 * pageflips. Note that on gen4+ this only adjusts up 428 * to a tile, offsets within a tile are handled in 429 * the hw itself (with the TILEOFF register). 430 */ 431 int adjusted_x; 432 int adjusted_y; 433 434 int y; 435 436 u16 pixel_blend_mode; 437 } plane; 438 439 struct { 440 const struct drm_format_info *format; 441 unsigned int stride; 442 } fb; 443 } state_cache; 444 445 /* 446 * This structure contains everything that's relevant to program the 447 * hardware registers. When we want to figure out if we need to disable 448 * and re-enable FBC for a new configuration we just check if there's 449 * something different in the struct. The genx_fbc_activate functions 450 * are supposed to read from it in order to program the registers. 451 */ 452 struct intel_fbc_reg_params { 453 struct i915_vma *vma; 454 unsigned long flags; 455 456 struct { 457 enum pipe pipe; 458 enum i9xx_plane_id i9xx_plane; 459 unsigned int fence_y_offset; 460 } crtc; 461 462 struct { 463 const struct drm_format_info *format; 464 unsigned int stride; 465 } fb; 466 467 int cfb_size; 468 unsigned int gen9_wa_cfb_stride; 469 } params; 470 471 const char *no_fbc_reason; 472 }; 473 474 /* 475 * HIGH_RR is the highest eDP panel refresh rate read from EDID 476 * LOW_RR is the lowest eDP panel refresh rate found from EDID 477 * parsing for same resolution. 478 */ 479 enum drrs_refresh_rate_type { 480 DRRS_HIGH_RR, 481 DRRS_LOW_RR, 482 DRRS_MAX_RR, /* RR count */ 483 }; 484 485 enum drrs_support_type { 486 DRRS_NOT_SUPPORTED = 0, 487 STATIC_DRRS_SUPPORT = 1, 488 SEAMLESS_DRRS_SUPPORT = 2 489 }; 490 491 struct intel_dp; 492 struct i915_drrs { 493 struct mutex mutex; 494 struct delayed_work work; 495 struct intel_dp *dp; 496 unsigned busy_frontbuffer_bits; 497 enum drrs_refresh_rate_type refresh_rate_type; 498 enum drrs_support_type type; 499 }; 500 501 struct i915_psr { 502 struct mutex lock; 503 504 #define I915_PSR_DEBUG_MODE_MASK 0x0f 505 #define I915_PSR_DEBUG_DEFAULT 0x00 506 #define I915_PSR_DEBUG_DISABLE 0x01 507 #define I915_PSR_DEBUG_ENABLE 0x02 508 #define I915_PSR_DEBUG_FORCE_PSR1 0x03 509 #define I915_PSR_DEBUG_IRQ 0x10 510 511 u32 debug; 512 bool sink_support; 513 bool enabled; 514 struct intel_dp *dp; 515 enum pipe pipe; 516 bool active; 517 struct work_struct work; 518 unsigned busy_frontbuffer_bits; 519 bool sink_psr2_support; 520 bool link_standby; 521 bool colorimetry_support; 522 bool psr2_enabled; 523 u8 sink_sync_latency; 524 ktime_t last_entry_attempt; 525 ktime_t last_exit; 526 bool sink_not_reliable; 527 bool irq_aux_error; 528 u16 su_x_granularity; 529 }; 530 531 /* 532 * Sorted by south display engine compatibility. 533 * If the new PCH comes with a south display engine that is not 534 * inherited from the latest item, please do not add it to the 535 * end. Instead, add it right after its "parent" PCH. 536 */ 537 enum intel_pch { 538 PCH_NOP = -1, /* PCH without south display */ 539 PCH_NONE = 0, /* No PCH present */ 540 PCH_IBX, /* Ibexpeak PCH */ 541 PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ 542 PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ 543 PCH_SPT, /* Sunrisepoint PCH */ 544 PCH_KBP, /* Kaby Lake PCH */ 545 PCH_CNP, /* Cannon/Comet Lake PCH */ 546 PCH_ICP, /* Ice Lake PCH */ 547 }; 548 549 enum intel_sbi_destination { 550 SBI_ICLK, 551 SBI_MPHY, 552 }; 553 554 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 555 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 556 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 557 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 558 #define QUIRK_INCREASE_T12_DELAY (1<<6) 559 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 560 561 struct intel_fbdev; 562 struct intel_fbc_work; 563 564 struct intel_gmbus { 565 struct i2c_adapter adapter; 566 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 567 u32 force_bit; 568 u32 reg0; 569 i915_reg_t gpio_reg; 570 struct i2c_algo_bit_data bit_algo; 571 struct drm_i915_private *dev_priv; 572 }; 573 574 struct i915_suspend_saved_registers { 575 u32 saveDSPARB; 576 u32 saveFBC_CONTROL; 577 u32 saveCACHE_MODE_0; 578 u32 saveMI_ARB_STATE; 579 u32 saveSWF0[16]; 580 u32 saveSWF1[16]; 581 u32 saveSWF3[3]; 582 u64 saveFENCE[I915_MAX_NUM_FENCES]; 583 u32 savePCH_PORT_HOTPLUG; 584 u16 saveGCDGMBUS; 585 }; 586 587 struct vlv_s0ix_state { 588 /* GAM */ 589 u32 wr_watermark; 590 u32 gfx_prio_ctrl; 591 u32 arb_mode; 592 u32 gfx_pend_tlb0; 593 u32 gfx_pend_tlb1; 594 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 595 u32 media_max_req_count; 596 u32 gfx_max_req_count; 597 u32 render_hwsp; 598 u32 ecochk; 599 u32 bsd_hwsp; 600 u32 blt_hwsp; 601 u32 tlb_rd_addr; 602 603 /* MBC */ 604 u32 g3dctl; 605 u32 gsckgctl; 606 u32 mbctl; 607 608 /* GCP */ 609 u32 ucgctl1; 610 u32 ucgctl3; 611 u32 rcgctl1; 612 u32 rcgctl2; 613 u32 rstctl; 614 u32 misccpctl; 615 616 /* GPM */ 617 u32 gfxpause; 618 u32 rpdeuhwtc; 619 u32 rpdeuc; 620 u32 ecobus; 621 u32 pwrdwnupctl; 622 u32 rp_down_timeout; 623 u32 rp_deucsw; 624 u32 rcubmabdtmr; 625 u32 rcedata; 626 u32 spare2gh; 627 628 /* Display 1 CZ domain */ 629 u32 gt_imr; 630 u32 gt_ier; 631 u32 pm_imr; 632 u32 pm_ier; 633 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 634 635 /* GT SA CZ domain */ 636 u32 tilectl; 637 u32 gt_fifoctl; 638 u32 gtlc_wake_ctrl; 639 u32 gtlc_survive; 640 u32 pmwgicz; 641 642 /* Display 2 CZ domain */ 643 u32 gu_ctl0; 644 u32 gu_ctl1; 645 u32 pcbr; 646 u32 clock_gate_dis2; 647 }; 648 649 struct intel_rps_ei { 650 ktime_t ktime; 651 u32 render_c0; 652 u32 media_c0; 653 }; 654 655 struct intel_rps { 656 /* 657 * work, interrupts_enabled and pm_iir are protected by 658 * dev_priv->irq_lock 659 */ 660 struct work_struct work; 661 bool interrupts_enabled; 662 u32 pm_iir; 663 664 /* PM interrupt bits that should never be masked */ 665 u32 pm_intrmsk_mbz; 666 667 /* Frequencies are stored in potentially platform dependent multiples. 668 * In other words, *_freq needs to be multiplied by X to be interesting. 669 * Soft limits are those which are used for the dynamic reclocking done 670 * by the driver (raise frequencies under heavy loads, and lower for 671 * lighter loads). Hard limits are those imposed by the hardware. 672 * 673 * A distinction is made for overclocking, which is never enabled by 674 * default, and is considered to be above the hard limit if it's 675 * possible at all. 676 */ 677 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 678 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 679 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 680 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 681 u8 min_freq; /* AKA RPn. Minimum frequency */ 682 u8 boost_freq; /* Frequency to request when wait boosting */ 683 u8 idle_freq; /* Frequency to request when we are idle */ 684 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 685 u8 rp1_freq; /* "less than" RP0 power/freqency */ 686 u8 rp0_freq; /* Non-overclocked max frequency. */ 687 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 688 689 int last_adj; 690 691 struct { 692 struct mutex mutex; 693 694 enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; 695 unsigned int interactive; 696 697 u8 up_threshold; /* Current %busy required to uplock */ 698 u8 down_threshold; /* Current %busy required to downclock */ 699 } power; 700 701 bool enabled; 702 atomic_t num_waiters; 703 atomic_t boosts; 704 705 /* manual wa residency calculations */ 706 struct intel_rps_ei ei; 707 }; 708 709 struct intel_rc6 { 710 bool enabled; 711 u64 prev_hw_residency[4]; 712 u64 cur_residency[4]; 713 }; 714 715 struct intel_llc_pstate { 716 bool enabled; 717 }; 718 719 struct intel_gen6_power_mgmt { 720 struct intel_rps rps; 721 struct intel_rc6 rc6; 722 struct intel_llc_pstate llc_pstate; 723 }; 724 725 /* defined intel_pm.c */ 726 extern spinlock_t mchdev_lock; 727 728 struct intel_ilk_power_mgmt { 729 u8 cur_delay; 730 u8 min_delay; 731 u8 max_delay; 732 u8 fmax; 733 u8 fstart; 734 735 u64 last_count1; 736 unsigned long last_time1; 737 unsigned long chipset_power; 738 u64 last_count2; 739 u64 last_time2; 740 unsigned long gfx_power; 741 u8 corr; 742 743 int c_m; 744 int r_t; 745 }; 746 747 struct drm_i915_private; 748 struct i915_power_well; 749 750 struct i915_power_well_ops { 751 /* 752 * Synchronize the well's hw state to match the current sw state, for 753 * example enable/disable it based on the current refcount. Called 754 * during driver init and resume time, possibly after first calling 755 * the enable/disable handlers. 756 */ 757 void (*sync_hw)(struct drm_i915_private *dev_priv, 758 struct i915_power_well *power_well); 759 /* 760 * Enable the well and resources that depend on it (for example 761 * interrupts located on the well). Called after the 0->1 refcount 762 * transition. 763 */ 764 void (*enable)(struct drm_i915_private *dev_priv, 765 struct i915_power_well *power_well); 766 /* 767 * Disable the well and resources that depend on it. Called after 768 * the 1->0 refcount transition. 769 */ 770 void (*disable)(struct drm_i915_private *dev_priv, 771 struct i915_power_well *power_well); 772 /* Returns the hw enabled state. */ 773 bool (*is_enabled)(struct drm_i915_private *dev_priv, 774 struct i915_power_well *power_well); 775 }; 776 777 struct i915_power_well_regs { 778 i915_reg_t bios; 779 i915_reg_t driver; 780 i915_reg_t kvmr; 781 i915_reg_t debug; 782 }; 783 784 /* Power well structure for haswell */ 785 struct i915_power_well_desc { 786 const char *name; 787 bool always_on; 788 u64 domains; 789 /* unique identifier for this power well */ 790 enum i915_power_well_id id; 791 /* 792 * Arbitraty data associated with this power well. Platform and power 793 * well specific. 794 */ 795 union { 796 struct { 797 /* 798 * request/status flag index in the PUNIT power well 799 * control/status registers. 800 */ 801 u8 idx; 802 } vlv; 803 struct { 804 enum dpio_phy phy; 805 } bxt; 806 struct { 807 const struct i915_power_well_regs *regs; 808 /* 809 * request/status flag index in the power well 810 * constrol/status registers. 811 */ 812 u8 idx; 813 /* Mask of pipes whose IRQ logic is backed by the pw */ 814 u8 irq_pipe_mask; 815 /* The pw is backing the VGA functionality */ 816 bool has_vga:1; 817 bool has_fuses:1; 818 /* 819 * The pw is for an ICL+ TypeC PHY port in 820 * Thunderbolt mode. 821 */ 822 bool is_tc_tbt:1; 823 } hsw; 824 }; 825 const struct i915_power_well_ops *ops; 826 }; 827 828 struct i915_power_well { 829 const struct i915_power_well_desc *desc; 830 /* power well enable/disable usage count */ 831 int count; 832 /* cached hw enabled state */ 833 bool hw_enabled; 834 }; 835 836 struct i915_power_domains { 837 /* 838 * Power wells needed for initialization at driver init and suspend 839 * time are on. They are kept on until after the first modeset. 840 */ 841 bool initializing; 842 bool display_core_suspended; 843 int power_well_count; 844 845 intel_wakeref_t wakeref; 846 847 struct mutex lock; 848 int domain_use_count[POWER_DOMAIN_NUM]; 849 struct i915_power_well *power_wells; 850 }; 851 852 #define MAX_L3_SLICES 2 853 struct intel_l3_parity { 854 u32 *remap_info[MAX_L3_SLICES]; 855 struct work_struct error_work; 856 int which_slice; 857 }; 858 859 struct i915_gem_mm { 860 /** Memory allocator for GTT stolen memory */ 861 struct drm_mm stolen; 862 /** Protects the usage of the GTT stolen memory allocator. This is 863 * always the inner lock when overlapping with struct_mutex. */ 864 struct mutex stolen_lock; 865 866 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 867 spinlock_t obj_lock; 868 869 /** List of all objects in gtt_space. Used to restore gtt 870 * mappings on resume */ 871 struct list_head bound_list; 872 /** 873 * List of objects which are not bound to the GTT (thus 874 * are idle and not used by the GPU). These objects may or may 875 * not actually have any pages attached. 876 */ 877 struct list_head unbound_list; 878 879 /** List of all objects in gtt_space, currently mmaped by userspace. 880 * All objects within this list must also be on bound_list. 881 */ 882 struct list_head userfault_list; 883 884 /** 885 * List of objects which are pending destruction. 886 */ 887 struct llist_head free_list; 888 struct work_struct free_work; 889 spinlock_t free_lock; 890 /** 891 * Count of objects pending destructions. Used to skip needlessly 892 * waiting on an RCU barrier if no objects are waiting to be freed. 893 */ 894 atomic_t free_count; 895 896 /** 897 * Small stash of WC pages 898 */ 899 struct pagestash wc_stash; 900 901 /** 902 * tmpfs instance used for shmem backed objects 903 */ 904 struct vfsmount *gemfs; 905 906 /** PPGTT used for aliasing the PPGTT with the GTT */ 907 struct i915_hw_ppgtt *aliasing_ppgtt; 908 909 struct notifier_block oom_notifier; 910 struct notifier_block vmap_notifier; 911 struct shrinker shrinker; 912 913 /** LRU list of objects with fence regs on them. */ 914 struct list_head fence_list; 915 916 /** 917 * Workqueue to fault in userptr pages, flushed by the execbuf 918 * when required but otherwise left to userspace to try again 919 * on EAGAIN. 920 */ 921 struct workqueue_struct *userptr_wq; 922 923 u64 unordered_timeline; 924 925 /* the indicator for dispatch video commands on two BSD rings */ 926 atomic_t bsd_engine_dispatch_index; 927 928 /** Bit 6 swizzling required for X tiling */ 929 u32 bit_6_swizzle_x; 930 /** Bit 6 swizzling required for Y tiling */ 931 u32 bit_6_swizzle_y; 932 933 /* accounting, useful for userland debugging */ 934 spinlock_t object_stat_lock; 935 u64 object_memory; 936 u32 object_count; 937 }; 938 939 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 940 941 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 942 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 943 944 #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ 945 #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ 946 947 #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ 948 949 struct ddi_vbt_port_info { 950 int max_tmds_clock; 951 952 /* 953 * This is an index in the HDMI/DVI DDI buffer translation table. 954 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 955 * populate this field. 956 */ 957 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 958 u8 hdmi_level_shift; 959 960 u8 present:1; 961 u8 supports_dvi:1; 962 u8 supports_hdmi:1; 963 u8 supports_dp:1; 964 u8 supports_edp:1; 965 u8 supports_typec_usb:1; 966 u8 supports_tbt:1; 967 968 u8 alternate_aux_channel; 969 u8 alternate_ddc_pin; 970 971 u8 dp_boost_level; 972 u8 hdmi_boost_level; 973 int dp_max_link_rate; /* 0 for not limited by VBT */ 974 }; 975 976 enum psr_lines_to_wait { 977 PSR_0_LINES_TO_WAIT = 0, 978 PSR_1_LINE_TO_WAIT, 979 PSR_4_LINES_TO_WAIT, 980 PSR_8_LINES_TO_WAIT 981 }; 982 983 struct intel_vbt_data { 984 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 985 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 986 987 /* Feature bits */ 988 unsigned int int_tv_support:1; 989 unsigned int lvds_dither:1; 990 unsigned int int_crt_support:1; 991 unsigned int lvds_use_ssc:1; 992 unsigned int int_lvds_support:1; 993 unsigned int display_clock_mode:1; 994 unsigned int fdi_rx_polarity_inverted:1; 995 unsigned int panel_type:4; 996 int lvds_ssc_freq; 997 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 998 enum drm_panel_orientation orientation; 999 1000 enum drrs_support_type drrs_type; 1001 1002 struct { 1003 int rate; 1004 int lanes; 1005 int preemphasis; 1006 int vswing; 1007 bool low_vswing; 1008 bool initialized; 1009 int bpp; 1010 struct edp_power_seq pps; 1011 } edp; 1012 1013 struct { 1014 bool enable; 1015 bool full_link; 1016 bool require_aux_wakeup; 1017 int idle_frames; 1018 enum psr_lines_to_wait lines_to_wait; 1019 int tp1_wakeup_time_us; 1020 int tp2_tp3_wakeup_time_us; 1021 int psr2_tp2_tp3_wakeup_time_us; 1022 } psr; 1023 1024 struct { 1025 u16 pwm_freq_hz; 1026 bool present; 1027 bool active_low_pwm; 1028 u8 min_brightness; /* min_brightness/255 of max */ 1029 u8 controller; /* brightness controller number */ 1030 enum intel_backlight_type type; 1031 } backlight; 1032 1033 /* MIPI DSI */ 1034 struct { 1035 u16 panel_id; 1036 struct mipi_config *config; 1037 struct mipi_pps_data *pps; 1038 u16 bl_ports; 1039 u16 cabc_ports; 1040 u8 seq_version; 1041 u32 size; 1042 u8 *data; 1043 const u8 *sequence[MIPI_SEQ_MAX]; 1044 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 1045 enum drm_panel_orientation orientation; 1046 } dsi; 1047 1048 int crt_ddc_pin; 1049 1050 int child_dev_num; 1051 struct child_device_config *child_dev; 1052 1053 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1054 struct sdvo_device_mapping sdvo_mappings[2]; 1055 }; 1056 1057 enum intel_ddb_partitioning { 1058 INTEL_DDB_PART_1_2, 1059 INTEL_DDB_PART_5_6, /* IVB+ */ 1060 }; 1061 1062 struct intel_wm_level { 1063 bool enable; 1064 u32 pri_val; 1065 u32 spr_val; 1066 u32 cur_val; 1067 u32 fbc_val; 1068 }; 1069 1070 struct ilk_wm_values { 1071 u32 wm_pipe[3]; 1072 u32 wm_lp[3]; 1073 u32 wm_lp_spr[3]; 1074 u32 wm_linetime[3]; 1075 bool enable_fbc_wm; 1076 enum intel_ddb_partitioning partitioning; 1077 }; 1078 1079 struct g4x_pipe_wm { 1080 u16 plane[I915_MAX_PLANES]; 1081 u16 fbc; 1082 }; 1083 1084 struct g4x_sr_wm { 1085 u16 plane; 1086 u16 cursor; 1087 u16 fbc; 1088 }; 1089 1090 struct vlv_wm_ddl_values { 1091 u8 plane[I915_MAX_PLANES]; 1092 }; 1093 1094 struct vlv_wm_values { 1095 struct g4x_pipe_wm pipe[3]; 1096 struct g4x_sr_wm sr; 1097 struct vlv_wm_ddl_values ddl[3]; 1098 u8 level; 1099 bool cxsr; 1100 }; 1101 1102 struct g4x_wm_values { 1103 struct g4x_pipe_wm pipe[2]; 1104 struct g4x_sr_wm sr; 1105 struct g4x_sr_wm hpll; 1106 bool cxsr; 1107 bool hpll_en; 1108 bool fbc_en; 1109 }; 1110 1111 struct skl_ddb_entry { 1112 u16 start, end; /* in number of blocks, 'end' is exclusive */ 1113 }; 1114 1115 static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1116 { 1117 return entry->end - entry->start; 1118 } 1119 1120 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1121 const struct skl_ddb_entry *e2) 1122 { 1123 if (e1->start == e2->start && e1->end == e2->end) 1124 return true; 1125 1126 return false; 1127 } 1128 1129 struct skl_ddb_allocation { 1130 u8 enabled_slices; /* GEN11 has configurable 2 slices */ 1131 }; 1132 1133 struct skl_ddb_values { 1134 unsigned dirty_pipes; 1135 struct skl_ddb_allocation ddb; 1136 }; 1137 1138 struct skl_wm_level { 1139 u16 min_ddb_alloc; 1140 u16 plane_res_b; 1141 u8 plane_res_l; 1142 bool plane_en; 1143 bool ignore_lines; 1144 }; 1145 1146 /* Stores plane specific WM parameters */ 1147 struct skl_wm_params { 1148 bool x_tiled, y_tiled; 1149 bool rc_surface; 1150 bool is_planar; 1151 u32 width; 1152 u8 cpp; 1153 u32 plane_pixel_rate; 1154 u32 y_min_scanlines; 1155 u32 plane_bytes_per_line; 1156 uint_fixed_16_16_t plane_blocks_per_line; 1157 uint_fixed_16_16_t y_tile_minimum; 1158 u32 linetime_us; 1159 u32 dbuf_block_size; 1160 }; 1161 1162 /* 1163 * This struct helps tracking the state needed for runtime PM, which puts the 1164 * device in PCI D3 state. Notice that when this happens, nothing on the 1165 * graphics device works, even register access, so we don't get interrupts nor 1166 * anything else. 1167 * 1168 * Every piece of our code that needs to actually touch the hardware needs to 1169 * either call intel_runtime_pm_get or call intel_display_power_get with the 1170 * appropriate power domain. 1171 * 1172 * Our driver uses the autosuspend delay feature, which means we'll only really 1173 * suspend if we stay with zero refcount for a certain amount of time. The 1174 * default value is currently very conservative (see intel_runtime_pm_enable), but 1175 * it can be changed with the standard runtime PM files from sysfs. 1176 * 1177 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1178 * goes back to false exactly before we reenable the IRQs. We use this variable 1179 * to check if someone is trying to enable/disable IRQs while they're supposed 1180 * to be disabled. This shouldn't happen and we'll print some error messages in 1181 * case it happens. 1182 * 1183 * For more, read the Documentation/power/runtime_pm.txt. 1184 */ 1185 struct i915_runtime_pm { 1186 atomic_t wakeref_count; 1187 bool suspended; 1188 bool irqs_enabled; 1189 1190 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1191 /* 1192 * To aide detection of wakeref leaks and general misuse, we 1193 * track all wakeref holders. With manual markup (i.e. returning 1194 * a cookie to each rpm_get caller which they then supply to their 1195 * paired rpm_put) we can remove corresponding pairs of and keep 1196 * the array trimmed to active wakerefs. 1197 */ 1198 struct intel_runtime_pm_debug { 1199 spinlock_t lock; 1200 1201 depot_stack_handle_t last_acquire; 1202 depot_stack_handle_t last_release; 1203 1204 depot_stack_handle_t *owners; 1205 unsigned long count; 1206 } debug; 1207 #endif 1208 }; 1209 1210 enum intel_pipe_crc_source { 1211 INTEL_PIPE_CRC_SOURCE_NONE, 1212 INTEL_PIPE_CRC_SOURCE_PLANE1, 1213 INTEL_PIPE_CRC_SOURCE_PLANE2, 1214 INTEL_PIPE_CRC_SOURCE_PLANE3, 1215 INTEL_PIPE_CRC_SOURCE_PLANE4, 1216 INTEL_PIPE_CRC_SOURCE_PLANE5, 1217 INTEL_PIPE_CRC_SOURCE_PLANE6, 1218 INTEL_PIPE_CRC_SOURCE_PLANE7, 1219 INTEL_PIPE_CRC_SOURCE_PIPE, 1220 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1221 INTEL_PIPE_CRC_SOURCE_TV, 1222 INTEL_PIPE_CRC_SOURCE_DP_B, 1223 INTEL_PIPE_CRC_SOURCE_DP_C, 1224 INTEL_PIPE_CRC_SOURCE_DP_D, 1225 INTEL_PIPE_CRC_SOURCE_AUTO, 1226 INTEL_PIPE_CRC_SOURCE_MAX, 1227 }; 1228 1229 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1230 struct intel_pipe_crc { 1231 spinlock_t lock; 1232 int skipped; 1233 enum intel_pipe_crc_source source; 1234 }; 1235 1236 struct i915_frontbuffer_tracking { 1237 spinlock_t lock; 1238 1239 /* 1240 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1241 * scheduled flips. 1242 */ 1243 unsigned busy_bits; 1244 unsigned flip_bits; 1245 }; 1246 1247 struct i915_virtual_gpu { 1248 bool active; 1249 u32 caps; 1250 }; 1251 1252 /* used in computing the new watermarks state */ 1253 struct intel_wm_config { 1254 unsigned int num_pipes_active; 1255 bool sprites_enabled; 1256 bool sprites_scaled; 1257 }; 1258 1259 struct i915_oa_format { 1260 u32 format; 1261 int size; 1262 }; 1263 1264 struct i915_oa_reg { 1265 i915_reg_t addr; 1266 u32 value; 1267 }; 1268 1269 struct i915_oa_config { 1270 char uuid[UUID_STRING_LEN + 1]; 1271 int id; 1272 1273 const struct i915_oa_reg *mux_regs; 1274 u32 mux_regs_len; 1275 const struct i915_oa_reg *b_counter_regs; 1276 u32 b_counter_regs_len; 1277 const struct i915_oa_reg *flex_regs; 1278 u32 flex_regs_len; 1279 1280 struct attribute_group sysfs_metric; 1281 struct attribute *attrs[2]; 1282 struct device_attribute sysfs_metric_id; 1283 1284 atomic_t ref_count; 1285 }; 1286 1287 struct i915_perf_stream; 1288 1289 /** 1290 * struct i915_perf_stream_ops - the OPs to support a specific stream type 1291 */ 1292 struct i915_perf_stream_ops { 1293 /** 1294 * @enable: Enables the collection of HW samples, either in response to 1295 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened 1296 * without `I915_PERF_FLAG_DISABLED`. 1297 */ 1298 void (*enable)(struct i915_perf_stream *stream); 1299 1300 /** 1301 * @disable: Disables the collection of HW samples, either in response 1302 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying 1303 * the stream. 1304 */ 1305 void (*disable)(struct i915_perf_stream *stream); 1306 1307 /** 1308 * @poll_wait: Call poll_wait, passing a wait queue that will be woken 1309 * once there is something ready to read() for the stream 1310 */ 1311 void (*poll_wait)(struct i915_perf_stream *stream, 1312 struct file *file, 1313 poll_table *wait); 1314 1315 /** 1316 * @wait_unlocked: For handling a blocking read, wait until there is 1317 * something to ready to read() for the stream. E.g. wait on the same 1318 * wait queue that would be passed to poll_wait(). 1319 */ 1320 int (*wait_unlocked)(struct i915_perf_stream *stream); 1321 1322 /** 1323 * @read: Copy buffered metrics as records to userspace 1324 * **buf**: the userspace, destination buffer 1325 * **count**: the number of bytes to copy, requested by userspace 1326 * **offset**: zero at the start of the read, updated as the read 1327 * proceeds, it represents how many bytes have been copied so far and 1328 * the buffer offset for copying the next record. 1329 * 1330 * Copy as many buffered i915 perf samples and records for this stream 1331 * to userspace as will fit in the given buffer. 1332 * 1333 * Only write complete records; returning -%ENOSPC if there isn't room 1334 * for a complete record. 1335 * 1336 * Return any error condition that results in a short read such as 1337 * -%ENOSPC or -%EFAULT, even though these may be squashed before 1338 * returning to userspace. 1339 */ 1340 int (*read)(struct i915_perf_stream *stream, 1341 char __user *buf, 1342 size_t count, 1343 size_t *offset); 1344 1345 /** 1346 * @destroy: Cleanup any stream specific resources. 1347 * 1348 * The stream will always be disabled before this is called. 1349 */ 1350 void (*destroy)(struct i915_perf_stream *stream); 1351 }; 1352 1353 /** 1354 * struct i915_perf_stream - state for a single open stream FD 1355 */ 1356 struct i915_perf_stream { 1357 /** 1358 * @dev_priv: i915 drm device 1359 */ 1360 struct drm_i915_private *dev_priv; 1361 1362 /** 1363 * @link: Links the stream into ``&drm_i915_private->streams`` 1364 */ 1365 struct list_head link; 1366 1367 /** 1368 * @wakeref: As we keep the device awake while the perf stream is 1369 * active, we track our runtime pm reference for later release. 1370 */ 1371 intel_wakeref_t wakeref; 1372 1373 /** 1374 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` 1375 * properties given when opening a stream, representing the contents 1376 * of a single sample as read() by userspace. 1377 */ 1378 u32 sample_flags; 1379 1380 /** 1381 * @sample_size: Considering the configured contents of a sample 1382 * combined with the required header size, this is the total size 1383 * of a single sample record. 1384 */ 1385 int sample_size; 1386 1387 /** 1388 * @ctx: %NULL if measuring system-wide across all contexts or a 1389 * specific context that is being monitored. 1390 */ 1391 struct i915_gem_context *ctx; 1392 1393 /** 1394 * @enabled: Whether the stream is currently enabled, considering 1395 * whether the stream was opened in a disabled state and based 1396 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. 1397 */ 1398 bool enabled; 1399 1400 /** 1401 * @ops: The callbacks providing the implementation of this specific 1402 * type of configured stream. 1403 */ 1404 const struct i915_perf_stream_ops *ops; 1405 1406 /** 1407 * @oa_config: The OA configuration used by the stream. 1408 */ 1409 struct i915_oa_config *oa_config; 1410 }; 1411 1412 /** 1413 * struct i915_oa_ops - Gen specific implementation of an OA unit stream 1414 */ 1415 struct i915_oa_ops { 1416 /** 1417 * @is_valid_b_counter_reg: Validates register's address for 1418 * programming boolean counters for a particular platform. 1419 */ 1420 bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv, 1421 u32 addr); 1422 1423 /** 1424 * @is_valid_mux_reg: Validates register's address for programming mux 1425 * for a particular platform. 1426 */ 1427 bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr); 1428 1429 /** 1430 * @is_valid_flex_reg: Validates register's address for programming 1431 * flex EU filtering for a particular platform. 1432 */ 1433 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); 1434 1435 /** 1436 * @enable_metric_set: Selects and applies any MUX configuration to set 1437 * up the Boolean and Custom (B/C) counters that are part of the 1438 * counter reports being sampled. May apply system constraints such as 1439 * disabling EU clock gating as required. 1440 */ 1441 int (*enable_metric_set)(struct i915_perf_stream *stream); 1442 1443 /** 1444 * @disable_metric_set: Remove system constraints associated with using 1445 * the OA unit. 1446 */ 1447 void (*disable_metric_set)(struct drm_i915_private *dev_priv); 1448 1449 /** 1450 * @oa_enable: Enable periodic sampling 1451 */ 1452 void (*oa_enable)(struct i915_perf_stream *stream); 1453 1454 /** 1455 * @oa_disable: Disable periodic sampling 1456 */ 1457 void (*oa_disable)(struct i915_perf_stream *stream); 1458 1459 /** 1460 * @read: Copy data from the circular OA buffer into a given userspace 1461 * buffer. 1462 */ 1463 int (*read)(struct i915_perf_stream *stream, 1464 char __user *buf, 1465 size_t count, 1466 size_t *offset); 1467 1468 /** 1469 * @oa_hw_tail_read: read the OA tail pointer register 1470 * 1471 * In particular this enables us to share all the fiddly code for 1472 * handling the OA unit tail pointer race that affects multiple 1473 * generations. 1474 */ 1475 u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv); 1476 }; 1477 1478 struct intel_cdclk_state { 1479 unsigned int cdclk, vco, ref, bypass; 1480 u8 voltage_level; 1481 }; 1482 1483 struct drm_i915_private { 1484 struct drm_device drm; 1485 1486 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 1487 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 1488 struct intel_driver_caps caps; 1489 1490 /** 1491 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 1492 * end of stolen which we can optionally use to create GEM objects 1493 * backed by stolen memory. Note that stolen_usable_size tells us 1494 * exactly how much of this we are actually allowed to use, given that 1495 * some portion of it is in fact reserved for use by hardware functions. 1496 */ 1497 struct resource dsm; 1498 /** 1499 * Reseved portion of Data Stolen Memory 1500 */ 1501 struct resource dsm_reserved; 1502 1503 /* 1504 * Stolen memory is segmented in hardware with different portions 1505 * offlimits to certain functions. 1506 * 1507 * The drm_mm is initialised to the total accessible range, as found 1508 * from the PCI config. On Broadwell+, this is further restricted to 1509 * avoid the first page! The upper end of stolen memory is reserved for 1510 * hardware functions and similarly removed from the accessible range. 1511 */ 1512 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 1513 1514 struct intel_uncore uncore; 1515 1516 struct i915_virtual_gpu vgpu; 1517 1518 struct intel_gvt *gvt; 1519 1520 struct intel_wopcm wopcm; 1521 1522 struct intel_huc huc; 1523 struct intel_guc guc; 1524 1525 struct intel_csr csr; 1526 1527 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1528 1529 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1530 * controller on different i2c buses. */ 1531 struct mutex gmbus_mutex; 1532 1533 /** 1534 * Base address of where the gmbus and gpio blocks are located (either 1535 * on PCH or on SoC for platforms without PCH). 1536 */ 1537 u32 gpio_mmio_base; 1538 1539 /* MMIO base address for MIPI regs */ 1540 u32 mipi_mmio_base; 1541 1542 u32 psr_mmio_base; 1543 1544 u32 pps_mmio_base; 1545 1546 wait_queue_head_t gmbus_wait_queue; 1547 1548 struct pci_dev *bridge_dev; 1549 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 1550 /* Context used internally to idle the GPU and setup initial state */ 1551 struct i915_gem_context *kernel_context; 1552 /* Context only to be used for injecting preemption commands */ 1553 struct i915_gem_context *preempt_context; 1554 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 1555 [MAX_ENGINE_INSTANCE + 1]; 1556 1557 struct resource mch_res; 1558 1559 /* protects the irq masks */ 1560 spinlock_t irq_lock; 1561 1562 bool display_irqs_enabled; 1563 1564 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1565 struct pm_qos_request pm_qos; 1566 1567 /* Sideband mailbox protection */ 1568 struct mutex sb_lock; 1569 1570 /** Cached value of IMR to avoid reads in updating the bitfield */ 1571 union { 1572 u32 irq_mask; 1573 u32 de_irq_mask[I915_MAX_PIPES]; 1574 }; 1575 u32 gt_irq_mask; 1576 u32 pm_imr; 1577 u32 pm_ier; 1578 u32 pm_rps_events; 1579 u32 pm_guc_events; 1580 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1581 1582 struct i915_hotplug hotplug; 1583 struct intel_fbc fbc; 1584 struct i915_drrs drrs; 1585 struct intel_opregion opregion; 1586 struct intel_vbt_data vbt; 1587 1588 bool preserve_bios_swizzle; 1589 1590 /* overlay */ 1591 struct intel_overlay *overlay; 1592 1593 /* backlight registers and fields in struct intel_panel */ 1594 struct mutex backlight_lock; 1595 1596 /* LVDS info */ 1597 bool no_aux_handshake; 1598 1599 /* protects panel power sequencer state */ 1600 struct mutex pps_mutex; 1601 1602 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1603 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1604 1605 unsigned int fsb_freq, mem_freq, is_ddr3; 1606 unsigned int skl_preferred_vco_freq; 1607 unsigned int max_cdclk_freq; 1608 1609 unsigned int max_dotclk_freq; 1610 unsigned int rawclk_freq; 1611 unsigned int hpll_freq; 1612 unsigned int fdi_pll_freq; 1613 unsigned int czclk_freq; 1614 1615 struct { 1616 /* 1617 * The current logical cdclk state. 1618 * See intel_atomic_state.cdclk.logical 1619 * 1620 * For reading holding any crtc lock is sufficient, 1621 * for writing must hold all of them. 1622 */ 1623 struct intel_cdclk_state logical; 1624 /* 1625 * The current actual cdclk state. 1626 * See intel_atomic_state.cdclk.actual 1627 */ 1628 struct intel_cdclk_state actual; 1629 /* The current hardware cdclk state */ 1630 struct intel_cdclk_state hw; 1631 } cdclk; 1632 1633 /** 1634 * wq - Driver workqueue for GEM. 1635 * 1636 * NOTE: Work items scheduled here are not allowed to grab any modeset 1637 * locks, for otherwise the flushing done in the pageflip code will 1638 * result in deadlocks. 1639 */ 1640 struct workqueue_struct *wq; 1641 1642 /* ordered wq for modesets */ 1643 struct workqueue_struct *modeset_wq; 1644 1645 /* Display functions */ 1646 struct drm_i915_display_funcs display; 1647 1648 /* PCH chipset type */ 1649 enum intel_pch pch_type; 1650 unsigned short pch_id; 1651 1652 unsigned long quirks; 1653 1654 struct drm_atomic_state *modeset_restore_state; 1655 struct drm_modeset_acquire_ctx reset_ctx; 1656 1657 struct i915_ggtt ggtt; /* VM representing the global address space */ 1658 1659 struct i915_gem_mm mm; 1660 DECLARE_HASHTABLE(mm_structs, 7); 1661 struct mutex mm_lock; 1662 1663 struct intel_ppat ppat; 1664 1665 /* Kernel Modesetting */ 1666 1667 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1668 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1669 1670 #ifdef CONFIG_DEBUG_FS 1671 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1672 #endif 1673 1674 /* dpll and cdclk state is protected by connection_mutex */ 1675 int num_shared_dpll; 1676 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1677 const struct intel_dpll_mgr *dpll_mgr; 1678 1679 /* 1680 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1681 * Must be global rather than per dpll, because on some platforms 1682 * plls share registers. 1683 */ 1684 struct mutex dpll_lock; 1685 1686 unsigned int active_crtcs; 1687 /* minimum acceptable cdclk for each pipe */ 1688 int min_cdclk[I915_MAX_PIPES]; 1689 /* minimum acceptable voltage level for each pipe */ 1690 u8 min_voltage_level[I915_MAX_PIPES]; 1691 1692 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1693 1694 struct i915_wa_list gt_wa_list; 1695 1696 struct i915_frontbuffer_tracking fb_tracking; 1697 1698 struct intel_atomic_helper { 1699 struct llist_head free_list; 1700 struct work_struct free_work; 1701 } atomic_helper; 1702 1703 u16 orig_clock; 1704 1705 bool mchbar_need_disable; 1706 1707 struct intel_l3_parity l3_parity; 1708 1709 /* Cannot be determined by PCIID. You must always read a register. */ 1710 u32 edram_cap; 1711 1712 /* 1713 * Protects RPS/RC6 register access and PCU communication. 1714 * Must be taken after struct_mutex if nested. Note that 1715 * this lock may be held for long periods of time when 1716 * talking to hw - so only take it when talking to hw! 1717 */ 1718 struct mutex pcu_lock; 1719 1720 /* gen6+ GT PM state */ 1721 struct intel_gen6_power_mgmt gt_pm; 1722 1723 /* ilk-only ips/rps state. Everything in here is protected by the global 1724 * mchdev_lock in intel_pm.c */ 1725 struct intel_ilk_power_mgmt ips; 1726 1727 struct i915_power_domains power_domains; 1728 1729 struct i915_psr psr; 1730 1731 struct i915_gpu_error gpu_error; 1732 1733 struct drm_i915_gem_object *vlv_pctx; 1734 1735 /* list of fbdev register on this device */ 1736 struct intel_fbdev *fbdev; 1737 struct work_struct fbdev_suspend_work; 1738 1739 struct drm_property *broadcast_rgb_property; 1740 struct drm_property *force_audio_property; 1741 1742 /* hda/i915 audio component */ 1743 struct i915_audio_component *audio_component; 1744 bool audio_component_registered; 1745 /** 1746 * av_mutex - mutex for audio/video sync 1747 * 1748 */ 1749 struct mutex av_mutex; 1750 1751 struct { 1752 struct mutex mutex; 1753 struct list_head list; 1754 struct llist_head free_list; 1755 struct work_struct free_work; 1756 1757 /* The hw wants to have a stable context identifier for the 1758 * lifetime of the context (for OA, PASID, faults, etc). 1759 * This is limited in execlists to 21 bits. 1760 */ 1761 struct ida hw_ida; 1762 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 1763 #define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ 1764 #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ 1765 struct list_head hw_id_list; 1766 } contexts; 1767 1768 u32 fdi_rx_config; 1769 1770 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 1771 u32 chv_phy_control; 1772 /* 1773 * Shadows for CHV DPLL_MD regs to keep the state 1774 * checker somewhat working in the presence hardware 1775 * crappiness (can't read out DPLL_MD for pipes B & C). 1776 */ 1777 u32 chv_dpll_md[I915_MAX_PIPES]; 1778 u32 bxt_phy_grc; 1779 1780 u32 suspend_count; 1781 bool power_domains_suspended; 1782 struct i915_suspend_saved_registers regfile; 1783 struct vlv_s0ix_state vlv_s0ix_state; 1784 1785 enum { 1786 I915_SAGV_UNKNOWN = 0, 1787 I915_SAGV_DISABLED, 1788 I915_SAGV_ENABLED, 1789 I915_SAGV_NOT_CONTROLLED 1790 } sagv_status; 1791 1792 struct { 1793 /* 1794 * Raw watermark latency values: 1795 * in 0.1us units for WM0, 1796 * in 0.5us units for WM1+. 1797 */ 1798 /* primary */ 1799 u16 pri_latency[5]; 1800 /* sprite */ 1801 u16 spr_latency[5]; 1802 /* cursor */ 1803 u16 cur_latency[5]; 1804 /* 1805 * Raw watermark memory latency values 1806 * for SKL for all 8 levels 1807 * in 1us units. 1808 */ 1809 u16 skl_latency[8]; 1810 1811 /* current hardware state */ 1812 union { 1813 struct ilk_wm_values hw; 1814 struct skl_ddb_values skl_hw; 1815 struct vlv_wm_values vlv; 1816 struct g4x_wm_values g4x; 1817 }; 1818 1819 u8 max_level; 1820 1821 /* 1822 * Should be held around atomic WM register writing; also 1823 * protects * intel_crtc->wm.active and 1824 * cstate->wm.need_postvbl_update. 1825 */ 1826 struct mutex wm_mutex; 1827 1828 /* 1829 * Set during HW readout of watermarks/DDB. Some platforms 1830 * need to know when we're still using BIOS-provided values 1831 * (which we don't fully trust). 1832 */ 1833 bool distrust_bios_wm; 1834 } wm; 1835 1836 struct dram_info { 1837 bool valid; 1838 bool is_16gb_dimm; 1839 u8 num_channels; 1840 u8 ranks; 1841 u32 bandwidth_kbps; 1842 bool symmetric_memory; 1843 enum intel_dram_type { 1844 INTEL_DRAM_UNKNOWN, 1845 INTEL_DRAM_DDR3, 1846 INTEL_DRAM_DDR4, 1847 INTEL_DRAM_LPDDR3, 1848 INTEL_DRAM_LPDDR4 1849 } type; 1850 } dram_info; 1851 1852 struct i915_runtime_pm runtime_pm; 1853 1854 struct { 1855 bool initialized; 1856 1857 struct kobject *metrics_kobj; 1858 struct ctl_table_header *sysctl_header; 1859 1860 /* 1861 * Lock associated with adding/modifying/removing OA configs 1862 * in dev_priv->perf.metrics_idr. 1863 */ 1864 struct mutex metrics_lock; 1865 1866 /* 1867 * List of dynamic configurations, you need to hold 1868 * dev_priv->perf.metrics_lock to access it. 1869 */ 1870 struct idr metrics_idr; 1871 1872 /* 1873 * Lock associated with anything below within this structure 1874 * except exclusive_stream. 1875 */ 1876 struct mutex lock; 1877 struct list_head streams; 1878 1879 struct { 1880 /* 1881 * The stream currently using the OA unit. If accessed 1882 * outside a syscall associated to its file 1883 * descriptor, you need to hold 1884 * dev_priv->drm.struct_mutex. 1885 */ 1886 struct i915_perf_stream *exclusive_stream; 1887 1888 struct intel_context *pinned_ctx; 1889 u32 specific_ctx_id; 1890 u32 specific_ctx_id_mask; 1891 1892 struct hrtimer poll_check_timer; 1893 wait_queue_head_t poll_wq; 1894 bool pollin; 1895 1896 /** 1897 * For rate limiting any notifications of spurious 1898 * invalid OA reports 1899 */ 1900 struct ratelimit_state spurious_report_rs; 1901 1902 bool periodic; 1903 int period_exponent; 1904 1905 struct i915_oa_config test_config; 1906 1907 struct { 1908 struct i915_vma *vma; 1909 u8 *vaddr; 1910 u32 last_ctx_id; 1911 int format; 1912 int format_size; 1913 1914 /** 1915 * Locks reads and writes to all head/tail state 1916 * 1917 * Consider: the head and tail pointer state 1918 * needs to be read consistently from a hrtimer 1919 * callback (atomic context) and read() fop 1920 * (user context) with tail pointer updates 1921 * happening in atomic context and head updates 1922 * in user context and the (unlikely) 1923 * possibility of read() errors needing to 1924 * reset all head/tail state. 1925 * 1926 * Note: Contention or performance aren't 1927 * currently a significant concern here 1928 * considering the relatively low frequency of 1929 * hrtimer callbacks (5ms period) and that 1930 * reads typically only happen in response to a 1931 * hrtimer event and likely complete before the 1932 * next callback. 1933 * 1934 * Note: This lock is not held *while* reading 1935 * and copying data to userspace so the value 1936 * of head observed in htrimer callbacks won't 1937 * represent any partial consumption of data. 1938 */ 1939 spinlock_t ptr_lock; 1940 1941 /** 1942 * One 'aging' tail pointer and one 'aged' 1943 * tail pointer ready to used for reading. 1944 * 1945 * Initial values of 0xffffffff are invalid 1946 * and imply that an update is required 1947 * (and should be ignored by an attempted 1948 * read) 1949 */ 1950 struct { 1951 u32 offset; 1952 } tails[2]; 1953 1954 /** 1955 * Index for the aged tail ready to read() 1956 * data up to. 1957 */ 1958 unsigned int aged_tail_idx; 1959 1960 /** 1961 * A monotonic timestamp for when the current 1962 * aging tail pointer was read; used to 1963 * determine when it is old enough to trust. 1964 */ 1965 u64 aging_timestamp; 1966 1967 /** 1968 * Although we can always read back the head 1969 * pointer register, we prefer to avoid 1970 * trusting the HW state, just to avoid any 1971 * risk that some hardware condition could 1972 * somehow bump the head pointer unpredictably 1973 * and cause us to forward the wrong OA buffer 1974 * data to userspace. 1975 */ 1976 u32 head; 1977 } oa_buffer; 1978 1979 u32 gen7_latched_oastatus1; 1980 u32 ctx_oactxctrl_offset; 1981 u32 ctx_flexeu0_offset; 1982 1983 /** 1984 * The RPT_ID/reason field for Gen8+ includes a bit 1985 * to determine if the CTX ID in the report is valid 1986 * but the specific bit differs between Gen 8 and 9 1987 */ 1988 u32 gen8_valid_ctx_bit; 1989 1990 struct i915_oa_ops ops; 1991 const struct i915_oa_format *oa_formats; 1992 } oa; 1993 } perf; 1994 1995 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1996 struct { 1997 void (*resume)(struct drm_i915_private *); 1998 void (*cleanup_engine)(struct intel_engine_cs *engine); 1999 2000 struct i915_gt_timelines { 2001 struct mutex mutex; /* protects list, tainted by GPU */ 2002 struct list_head active_list; 2003 2004 /* Pack multiple timelines' seqnos into the same page */ 2005 spinlock_t hwsp_lock; 2006 struct list_head hwsp_free_list; 2007 } timelines; 2008 2009 intel_engine_mask_t active_engines; 2010 struct list_head active_rings; 2011 struct list_head closed_vma; 2012 u32 active_requests; 2013 2014 /** 2015 * Is the GPU currently considered idle, or busy executing 2016 * userspace requests? Whilst idle, we allow runtime power 2017 * management to power down the hardware and display clocks. 2018 * In order to reduce the effect on performance, there 2019 * is a slight delay before we do so. 2020 */ 2021 intel_wakeref_t awake; 2022 2023 /** 2024 * We leave the user IRQ off as much as possible, 2025 * but this means that requests will finish and never 2026 * be retired once the system goes idle. Set a timer to 2027 * fire periodically while the ring is running. When it 2028 * fires, go retire requests. 2029 */ 2030 struct delayed_work retire_work; 2031 2032 /** 2033 * When we detect an idle GPU, we want to turn on 2034 * powersaving features. So once we see that there 2035 * are no more requests outstanding and no more 2036 * arrive within a small period of time, we fire 2037 * off the idle_work. 2038 */ 2039 struct delayed_work idle_work; 2040 2041 ktime_t last_init_time; 2042 2043 struct i915_vma *scratch; 2044 } gt; 2045 2046 /* For i945gm vblank irq vs. C3 workaround */ 2047 struct { 2048 struct work_struct work; 2049 struct pm_qos_request pm_qos; 2050 u8 c3_disable_latency; 2051 u8 enabled; 2052 } i945gm_vblank; 2053 2054 /* perform PHY state sanity checks? */ 2055 bool chv_phy_assert[2]; 2056 2057 bool ipc_enabled; 2058 2059 /* Used to save the pipe-to-encoder mapping for audio */ 2060 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2061 2062 /* necessary resource sharing with HDMI LPE audio driver. */ 2063 struct { 2064 struct platform_device *platdev; 2065 int irq; 2066 } lpe_audio; 2067 2068 struct i915_pmu pmu; 2069 2070 struct i915_hdcp_comp_master *hdcp_master; 2071 bool hdcp_comp_added; 2072 2073 /* Mutex to protect the above hdcp component related values. */ 2074 struct mutex hdcp_comp_mutex; 2075 2076 /* 2077 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2078 * will be rejected. Instead look for a better place. 2079 */ 2080 }; 2081 2082 struct dram_dimm_info { 2083 u8 size, width, ranks; 2084 }; 2085 2086 struct dram_channel_info { 2087 struct dram_dimm_info dimm_l, dimm_s; 2088 u8 ranks; 2089 bool is_16gb_dimm; 2090 }; 2091 2092 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2093 { 2094 return container_of(dev, struct drm_i915_private, drm); 2095 } 2096 2097 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2098 { 2099 return to_i915(dev_get_drvdata(kdev)); 2100 } 2101 2102 static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) 2103 { 2104 return container_of(wopcm, struct drm_i915_private, wopcm); 2105 } 2106 2107 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2108 { 2109 return container_of(guc, struct drm_i915_private, guc); 2110 } 2111 2112 static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) 2113 { 2114 return container_of(huc, struct drm_i915_private, huc); 2115 } 2116 2117 static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore) 2118 { 2119 return container_of(uncore, struct drm_i915_private, uncore); 2120 } 2121 2122 /* Simple iterator over all initialised engines */ 2123 #define for_each_engine(engine__, dev_priv__, id__) \ 2124 for ((id__) = 0; \ 2125 (id__) < I915_NUM_ENGINES; \ 2126 (id__)++) \ 2127 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2128 2129 /* Iterator over subset of engines selected by mask */ 2130 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2131 for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \ 2132 (tmp__) ? \ 2133 ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \ 2134 0;) 2135 2136 enum hdmi_force_audio { 2137 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2138 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2139 HDMI_AUDIO_AUTO, /* trust EDID */ 2140 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2141 }; 2142 2143 #define I915_GTT_OFFSET_NONE ((u32)-1) 2144 2145 /* 2146 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2147 * considered to be the frontbuffer for the given plane interface-wise. This 2148 * doesn't mean that the hw necessarily already scans it out, but that any 2149 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2150 * 2151 * We have one bit per pipe and per scanout plane type. 2152 */ 2153 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2154 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 2155 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 2156 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 2157 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 2158 }) 2159 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2160 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 2161 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2162 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 2163 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 2164 2165 /* 2166 * Optimised SGL iterator for GEM objects 2167 */ 2168 static __always_inline struct sgt_iter { 2169 struct scatterlist *sgp; 2170 union { 2171 unsigned long pfn; 2172 dma_addr_t dma; 2173 }; 2174 unsigned int curr; 2175 unsigned int max; 2176 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2177 struct sgt_iter s = { .sgp = sgl }; 2178 2179 if (s.sgp) { 2180 s.max = s.curr = s.sgp->offset; 2181 s.max += s.sgp->length; 2182 if (dma) 2183 s.dma = sg_dma_address(s.sgp); 2184 else 2185 s.pfn = page_to_pfn(sg_page(s.sgp)); 2186 } 2187 2188 return s; 2189 } 2190 2191 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 2192 { 2193 ++sg; 2194 if (unlikely(sg_is_chain(sg))) 2195 sg = sg_chain_ptr(sg); 2196 return sg; 2197 } 2198 2199 /** 2200 * __sg_next - return the next scatterlist entry in a list 2201 * @sg: The current sg entry 2202 * 2203 * Description: 2204 * If the entry is the last, return NULL; otherwise, step to the next 2205 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2206 * otherwise just return the pointer to the current element. 2207 **/ 2208 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2209 { 2210 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2211 } 2212 2213 /** 2214 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2215 * @__dmap: DMA address (output) 2216 * @__iter: 'struct sgt_iter' (iterator state, internal) 2217 * @__sgt: sg_table to iterate over (input) 2218 */ 2219 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2220 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2221 ((__dmap) = (__iter).dma + (__iter).curr); \ 2222 (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \ 2223 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 2224 2225 /** 2226 * for_each_sgt_page - iterate over the pages of the given sg_table 2227 * @__pp: page pointer (output) 2228 * @__iter: 'struct sgt_iter' (iterator state, internal) 2229 * @__sgt: sg_table to iterate over (input) 2230 */ 2231 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2232 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2233 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2234 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2235 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 2236 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 2237 2238 bool i915_sg_trim(struct sg_table *orig_st); 2239 2240 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) 2241 { 2242 unsigned int page_sizes; 2243 2244 page_sizes = 0; 2245 while (sg) { 2246 GEM_BUG_ON(sg->offset); 2247 GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); 2248 page_sizes |= sg->length; 2249 sg = __sg_next(sg); 2250 } 2251 2252 return page_sizes; 2253 } 2254 2255 static inline unsigned int i915_sg_segment_size(void) 2256 { 2257 unsigned int size = swiotlb_max_segment(); 2258 2259 if (size == 0) 2260 return SCATTERLIST_MAX_SEGMENT; 2261 2262 size = rounddown(size, PAGE_SIZE); 2263 /* swiotlb_max_segment_size can return 1 byte when it means one page. */ 2264 if (size < PAGE_SIZE) 2265 size = PAGE_SIZE; 2266 2267 return size; 2268 } 2269 2270 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) 2271 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) 2272 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 2273 2274 #define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen) 2275 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) 2276 2277 #define REVID_FOREVER 0xff 2278 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2279 2280 #define INTEL_GEN_MASK(s, e) ( \ 2281 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ 2282 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ 2283 GENMASK((e) - 1, (s) - 1)) 2284 2285 /* Returns true if Gen is in inclusive range [Start, End] */ 2286 #define IS_GEN_RANGE(dev_priv, s, e) \ 2287 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e)))) 2288 2289 #define IS_GEN(dev_priv, n) \ 2290 (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ 2291 INTEL_INFO(dev_priv)->gen == (n)) 2292 2293 /* 2294 * Return true if revision is in range [since,until] inclusive. 2295 * 2296 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2297 */ 2298 #define IS_REVID(p, since, until) \ 2299 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2300 2301 #define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p)) 2302 2303 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 2304 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 2305 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 2306 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 2307 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 2308 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 2309 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 2310 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 2311 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 2312 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 2313 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 2314 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 2315 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 2316 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2317 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2318 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 2319 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 2320 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2321 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 2322 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 2323 INTEL_INFO(dev_priv)->gt == 1) 2324 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 2325 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 2326 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 2327 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 2328 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 2329 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 2330 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 2331 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 2332 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 2333 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) 2334 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 2335 #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) 2336 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) 2337 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2338 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2339 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2340 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2341 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2342 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2343 /* ULX machines are also considered ULT. */ 2344 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2345 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2346 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2347 INTEL_INFO(dev_priv)->gt == 3) 2348 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2349 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2350 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2351 INTEL_INFO(dev_priv)->gt == 3) 2352 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ 2353 INTEL_INFO(dev_priv)->gt == 1) 2354 /* ULX machines are also considered ULT. */ 2355 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2356 INTEL_DEVID(dev_priv) == 0x0A1E) 2357 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2358 INTEL_DEVID(dev_priv) == 0x1913 || \ 2359 INTEL_DEVID(dev_priv) == 0x1916 || \ 2360 INTEL_DEVID(dev_priv) == 0x1921 || \ 2361 INTEL_DEVID(dev_priv) == 0x1926) 2362 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2363 INTEL_DEVID(dev_priv) == 0x1915 || \ 2364 INTEL_DEVID(dev_priv) == 0x191E) 2365 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2366 INTEL_DEVID(dev_priv) == 0x5913 || \ 2367 INTEL_DEVID(dev_priv) == 0x5916 || \ 2368 INTEL_DEVID(dev_priv) == 0x5921 || \ 2369 INTEL_DEVID(dev_priv) == 0x5926) 2370 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2371 INTEL_DEVID(dev_priv) == 0x5915 || \ 2372 INTEL_DEVID(dev_priv) == 0x591E) 2373 #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ 2374 INTEL_DEVID(dev_priv) == 0x87C0 || \ 2375 INTEL_DEVID(dev_priv) == 0x87CA) 2376 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2377 INTEL_INFO(dev_priv)->gt == 2) 2378 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2379 INTEL_INFO(dev_priv)->gt == 3) 2380 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2381 INTEL_INFO(dev_priv)->gt == 4) 2382 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 2383 INTEL_INFO(dev_priv)->gt == 2) 2384 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 2385 INTEL_INFO(dev_priv)->gt == 3) 2386 #define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2387 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) 2388 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2389 INTEL_INFO(dev_priv)->gt == 2) 2390 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2391 INTEL_INFO(dev_priv)->gt == 3) 2392 #define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \ 2393 (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004) 2394 #define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \ 2395 INTEL_DEVID(dev_priv) != 0x8A51) 2396 2397 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2398 2399 #define SKL_REVID_A0 0x0 2400 #define SKL_REVID_B0 0x1 2401 #define SKL_REVID_C0 0x2 2402 #define SKL_REVID_D0 0x3 2403 #define SKL_REVID_E0 0x4 2404 #define SKL_REVID_F0 0x5 2405 #define SKL_REVID_G0 0x6 2406 #define SKL_REVID_H0 0x7 2407 2408 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2409 2410 #define BXT_REVID_A0 0x0 2411 #define BXT_REVID_A1 0x1 2412 #define BXT_REVID_B0 0x3 2413 #define BXT_REVID_B_LAST 0x8 2414 #define BXT_REVID_C0 0x9 2415 2416 #define IS_BXT_REVID(dev_priv, since, until) \ 2417 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2418 2419 #define KBL_REVID_A0 0x0 2420 #define KBL_REVID_B0 0x1 2421 #define KBL_REVID_C0 0x2 2422 #define KBL_REVID_D0 0x3 2423 #define KBL_REVID_E0 0x4 2424 2425 #define IS_KBL_REVID(dev_priv, since, until) \ 2426 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2427 2428 #define GLK_REVID_A0 0x0 2429 #define GLK_REVID_A1 0x1 2430 2431 #define IS_GLK_REVID(dev_priv, since, until) \ 2432 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2433 2434 #define CNL_REVID_A0 0x0 2435 #define CNL_REVID_B0 0x1 2436 #define CNL_REVID_C0 0x2 2437 2438 #define IS_CNL_REVID(p, since, until) \ 2439 (IS_CANNONLAKE(p) && IS_REVID(p, since, until)) 2440 2441 #define ICL_REVID_A0 0x0 2442 #define ICL_REVID_A2 0x1 2443 #define ICL_REVID_B0 0x3 2444 #define ICL_REVID_B2 0x4 2445 #define ICL_REVID_C0 0x5 2446 2447 #define IS_ICL_REVID(p, since, until) \ 2448 (IS_ICELAKE(p) && IS_REVID(p, since, until)) 2449 2450 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 2451 #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) 2452 #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) 2453 2454 #define ALL_ENGINES (~0u) 2455 #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) 2456 2457 #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \ 2458 unsigned int first__ = (first); \ 2459 unsigned int count__ = (count); \ 2460 (INTEL_INFO(dev_priv)->engine_mask & \ 2461 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 2462 }) 2463 #define VDBOX_MASK(dev_priv) \ 2464 ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS) 2465 #define VEBOX_MASK(dev_priv) \ 2466 ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) 2467 2468 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) 2469 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) 2470 #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) 2471 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2472 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2473 2474 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) 2475 2476 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2477 (INTEL_INFO(dev_priv)->has_logical_ring_contexts) 2478 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 2479 (INTEL_INFO(dev_priv)->has_logical_ring_elsq) 2480 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ 2481 (INTEL_INFO(dev_priv)->has_logical_ring_preemption) 2482 2483 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 2484 2485 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) 2486 #define HAS_PPGTT(dev_priv) \ 2487 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) 2488 #define HAS_FULL_PPGTT(dev_priv) \ 2489 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) 2490 2491 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 2492 GEM_BUG_ON((sizes) == 0); \ 2493 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ 2494 }) 2495 2496 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) 2497 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 2498 (INTEL_INFO(dev_priv)->display.overlay_needs_physical) 2499 2500 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2501 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2502 2503 /* WaRsDisableCoarsePowerGating:skl,cnl */ 2504 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2505 (IS_CANNONLAKE(dev_priv) || \ 2506 IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 2507 2508 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) 2509 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ 2510 IS_GEMINILAKE(dev_priv) || \ 2511 IS_KABYLAKE(dev_priv)) 2512 2513 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2514 * rows, which changed the alignment requirements and fence programming. 2515 */ 2516 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \ 2517 !(IS_I915G(dev_priv) || \ 2518 IS_I915GM(dev_priv))) 2519 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) 2520 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) 2521 2522 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2523 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) 2524 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7) 2525 2526 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2527 2528 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) 2529 2530 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) 2531 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) 2532 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) 2533 #define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0) 2534 2535 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) 2536 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) 2537 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 2538 2539 #define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr) 2540 2541 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) 2542 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) 2543 2544 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 2545 2546 /* 2547 * For now, anything with a GuC requires uCode loading, and then supports 2548 * command submission once loaded. But these are logically independent 2549 * properties, so we have separate macros to test them. 2550 */ 2551 #define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) 2552 #define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct) 2553 #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2554 #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) 2555 2556 /* For now, anything with a GuC has also HuC */ 2557 #define HAS_HUC(dev_priv) (HAS_GUC(dev_priv)) 2558 #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2559 2560 /* Having a GuC is not the same as using a GuC */ 2561 #define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv) 2562 #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv) 2563 #define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv) 2564 2565 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) 2566 2567 #define INTEL_PCH_DEVICE_ID_MASK 0xff80 2568 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2569 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2570 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2571 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2572 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2573 #define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80 2574 #define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80 2575 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2576 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2577 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 2578 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 2579 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 2580 #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 2581 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 2582 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2583 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2584 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2585 2586 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2587 #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) 2588 #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) 2589 #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) 2590 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2591 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2592 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2593 #define HAS_PCH_LPT_LP(dev_priv) \ 2594 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ 2595 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) 2596 #define HAS_PCH_LPT_H(dev_priv) \ 2597 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ 2598 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE) 2599 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2600 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2601 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2602 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2603 2604 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) 2605 2606 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) 2607 2608 /* DPF == dynamic parity feature */ 2609 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) 2610 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2611 2 : HAS_L3_DPF(dev_priv)) 2612 2613 #define GT_FREQUENCY_MULTIPLIER 50 2614 #define GEN9_FREQ_SCALER 3 2615 2616 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0) 2617 2618 #include "i915_trace.h" 2619 2620 static inline bool intel_vtd_active(void) 2621 { 2622 #ifdef CONFIG_INTEL_IOMMU 2623 if (intel_iommu_gfx_mapped) 2624 return true; 2625 #endif 2626 return false; 2627 } 2628 2629 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2630 { 2631 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active(); 2632 } 2633 2634 static inline bool 2635 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) 2636 { 2637 return IS_BROXTON(dev_priv) && intel_vtd_active(); 2638 } 2639 2640 /* i915_drv.c */ 2641 void __printf(3, 4) 2642 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2643 const char *fmt, ...); 2644 2645 #define i915_report_error(dev_priv, fmt, ...) \ 2646 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2647 2648 #ifdef CONFIG_COMPAT 2649 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2650 unsigned long arg); 2651 #else 2652 #define i915_compat_ioctl NULL 2653 #endif 2654 extern const struct dev_pm_ops i915_pm_ops; 2655 2656 extern int i915_driver_load(struct pci_dev *pdev, 2657 const struct pci_device_id *ent); 2658 extern void i915_driver_unload(struct drm_device *dev); 2659 2660 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2661 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); 2662 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2663 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2664 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2665 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2666 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2667 2668 int intel_engines_init_mmio(struct drm_i915_private *dev_priv); 2669 int intel_engines_init(struct drm_i915_private *dev_priv); 2670 2671 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); 2672 2673 /* intel_hotplug.c */ 2674 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 2675 u32 pin_mask, u32 long_mask); 2676 void intel_hpd_init(struct drm_i915_private *dev_priv); 2677 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2678 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2679 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, 2680 enum port port); 2681 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2682 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2683 2684 /* i915_irq.c */ 2685 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 2686 { 2687 unsigned long delay; 2688 2689 if (unlikely(!i915_modparams.enable_hangcheck)) 2690 return; 2691 2692 /* Don't continually defer the hangcheck so that it is always run at 2693 * least once after work has been scheduled on any ring. Otherwise, 2694 * we will ignore a hung ring if a second ring is kept busy. 2695 */ 2696 2697 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 2698 queue_delayed_work(system_long_wq, 2699 &dev_priv->gpu_error.hangcheck_work, delay); 2700 } 2701 2702 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2703 extern void intel_irq_fini(struct drm_i915_private *dev_priv); 2704 int intel_irq_install(struct drm_i915_private *dev_priv); 2705 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2706 2707 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 2708 { 2709 return dev_priv->gvt; 2710 } 2711 2712 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 2713 { 2714 return dev_priv->vgpu.active; 2715 } 2716 2717 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 2718 enum pipe pipe); 2719 void 2720 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2721 u32 status_mask); 2722 2723 void 2724 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2725 u32 status_mask); 2726 2727 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2728 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2729 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2730 u32 mask, 2731 u32 bits); 2732 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 2733 u32 interrupt_mask, 2734 u32 enabled_irq_mask); 2735 static inline void 2736 ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits) 2737 { 2738 ilk_update_display_irq(dev_priv, bits, bits); 2739 } 2740 static inline void 2741 ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits) 2742 { 2743 ilk_update_display_irq(dev_priv, bits, 0); 2744 } 2745 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 2746 enum pipe pipe, 2747 u32 interrupt_mask, 2748 u32 enabled_irq_mask); 2749 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 2750 enum pipe pipe, u32 bits) 2751 { 2752 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 2753 } 2754 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 2755 enum pipe pipe, u32 bits) 2756 { 2757 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 2758 } 2759 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2760 u32 interrupt_mask, 2761 u32 enabled_irq_mask); 2762 static inline void 2763 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) 2764 { 2765 ibx_display_interrupt_update(dev_priv, bits, bits); 2766 } 2767 static inline void 2768 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) 2769 { 2770 ibx_display_interrupt_update(dev_priv, bits, 0); 2771 } 2772 2773 /* i915_gem.c */ 2774 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2775 struct drm_file *file_priv); 2776 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2777 struct drm_file *file_priv); 2778 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2779 struct drm_file *file_priv); 2780 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2781 struct drm_file *file_priv); 2782 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2783 struct drm_file *file_priv); 2784 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2785 struct drm_file *file_priv); 2786 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2787 struct drm_file *file_priv); 2788 int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, 2789 struct drm_file *file_priv); 2790 int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 2791 struct drm_file *file_priv); 2792 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2793 struct drm_file *file_priv); 2794 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2795 struct drm_file *file); 2796 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2797 struct drm_file *file); 2798 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2799 struct drm_file *file_priv); 2800 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2801 struct drm_file *file_priv); 2802 int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 2803 struct drm_file *file_priv); 2804 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 2805 struct drm_file *file_priv); 2806 int i915_gem_init_userptr(struct drm_i915_private *dev_priv); 2807 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); 2808 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2809 struct drm_file *file); 2810 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2811 struct drm_file *file_priv); 2812 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2813 struct drm_file *file_priv); 2814 void i915_gem_sanitize(struct drm_i915_private *i915); 2815 int i915_gem_init_early(struct drm_i915_private *dev_priv); 2816 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); 2817 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 2818 int i915_gem_freeze(struct drm_i915_private *dev_priv); 2819 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 2820 2821 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2822 const struct drm_i915_gem_object_ops *ops); 2823 struct drm_i915_gem_object * 2824 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); 2825 struct drm_i915_gem_object * 2826 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 2827 const void *data, size_t size); 2828 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 2829 void i915_gem_free_object(struct drm_gem_object *obj); 2830 2831 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 2832 { 2833 if (!atomic_read(&i915->mm.free_count)) 2834 return; 2835 2836 /* A single pass should suffice to release all the freed objects (along 2837 * most call paths) , but be a little more paranoid in that freeing 2838 * the objects does take a little amount of time, during which the rcu 2839 * callbacks could have added new objects into the freed list, and 2840 * armed the work again. 2841 */ 2842 do { 2843 rcu_barrier(); 2844 } while (flush_work(&i915->mm.free_work)); 2845 } 2846 2847 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 2848 { 2849 /* 2850 * Similar to objects above (see i915_gem_drain_freed-objects), in 2851 * general we have workers that are armed by RCU and then rearm 2852 * themselves in their callbacks. To be paranoid, we need to 2853 * drain the workqueue a second time after waiting for the RCU 2854 * grace period so that we catch work queued via RCU from the first 2855 * pass. As neither drain_workqueue() nor flush_workqueue() report 2856 * a result, we make an assumption that we only don't require more 2857 * than 2 passes to catch all recursive RCU delayed work. 2858 * 2859 */ 2860 int pass = 2; 2861 do { 2862 rcu_barrier(); 2863 drain_workqueue(i915->wq); 2864 } while (--pass); 2865 } 2866 2867 struct i915_vma * __must_check 2868 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2869 const struct i915_ggtt_view *view, 2870 u64 size, 2871 u64 alignment, 2872 u64 flags); 2873 2874 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 2875 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2876 2877 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 2878 2879 static inline int __sg_page_count(const struct scatterlist *sg) 2880 { 2881 return sg->length >> PAGE_SHIFT; 2882 } 2883 2884 struct scatterlist * 2885 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 2886 unsigned int n, unsigned int *offset); 2887 2888 struct page * 2889 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 2890 unsigned int n); 2891 2892 struct page * 2893 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 2894 unsigned int n); 2895 2896 dma_addr_t 2897 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 2898 unsigned long n); 2899 2900 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2901 struct sg_table *pages, 2902 unsigned int sg_page_sizes); 2903 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2904 2905 static inline int __must_check 2906 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2907 { 2908 might_lock(&obj->mm.lock); 2909 2910 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 2911 return 0; 2912 2913 return __i915_gem_object_get_pages(obj); 2914 } 2915 2916 static inline bool 2917 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 2918 { 2919 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 2920 } 2921 2922 static inline void 2923 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2924 { 2925 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 2926 2927 atomic_inc(&obj->mm.pages_pin_count); 2928 } 2929 2930 static inline bool 2931 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 2932 { 2933 return atomic_read(&obj->mm.pages_pin_count); 2934 } 2935 2936 static inline void 2937 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2938 { 2939 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 2940 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 2941 2942 atomic_dec(&obj->mm.pages_pin_count); 2943 } 2944 2945 static inline void 2946 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2947 { 2948 __i915_gem_object_unpin_pages(obj); 2949 } 2950 2951 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ 2952 I915_MM_NORMAL = 0, 2953 I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ 2954 }; 2955 2956 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2957 enum i915_mm_subclass subclass); 2958 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); 2959 2960 enum i915_map_type { 2961 I915_MAP_WB = 0, 2962 I915_MAP_WC, 2963 #define I915_MAP_OVERRIDE BIT(31) 2964 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 2965 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 2966 }; 2967 2968 static inline enum i915_map_type 2969 i915_coherent_map_type(struct drm_i915_private *i915) 2970 { 2971 return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; 2972 } 2973 2974 /** 2975 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 2976 * @obj: the object to map into kernel address space 2977 * @type: the type of mapping, used to select pgprot_t 2978 * 2979 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 2980 * pages and then returns a contiguous mapping of the backing storage into 2981 * the kernel address space. Based on the @type of mapping, the PTE will be 2982 * set to either WriteBack or WriteCombine (via pgprot_t). 2983 * 2984 * The caller is responsible for calling i915_gem_object_unpin_map() when the 2985 * mapping is no longer required. 2986 * 2987 * Returns the pointer through which to access the mapped object, or an 2988 * ERR_PTR() on error. 2989 */ 2990 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2991 enum i915_map_type type); 2992 2993 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, 2994 unsigned long offset, 2995 unsigned long size); 2996 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) 2997 { 2998 __i915_gem_object_flush_map(obj, 0, obj->base.size); 2999 } 3000 3001 /** 3002 * i915_gem_object_unpin_map - releases an earlier mapping 3003 * @obj: the object to unmap 3004 * 3005 * After pinning the object and mapping its pages, once you are finished 3006 * with your access, call i915_gem_object_unpin_map() to release the pin 3007 * upon the mapping. Once the pin count reaches zero, that mapping may be 3008 * removed. 3009 */ 3010 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3011 { 3012 i915_gem_object_unpin_pages(obj); 3013 } 3014 3015 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3016 unsigned int *needs_clflush); 3017 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3018 unsigned int *needs_clflush); 3019 #define CLFLUSH_BEFORE BIT(0) 3020 #define CLFLUSH_AFTER BIT(1) 3021 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3022 3023 static inline void 3024 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3025 { 3026 i915_gem_object_unpin_pages(obj); 3027 } 3028 3029 static inline int __must_check 3030 i915_mutex_lock_interruptible(struct drm_device *dev) 3031 { 3032 return mutex_lock_interruptible(&dev->struct_mutex); 3033 } 3034 3035 int i915_gem_dumb_create(struct drm_file *file_priv, 3036 struct drm_device *dev, 3037 struct drm_mode_create_dumb *args); 3038 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3039 u32 handle, u64 *offset); 3040 int i915_gem_mmap_gtt_version(void); 3041 3042 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3043 struct drm_i915_gem_object *new, 3044 unsigned frontbuffer_bits); 3045 3046 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3047 3048 static inline bool __i915_wedged(struct i915_gpu_error *error) 3049 { 3050 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3051 } 3052 3053 static inline bool i915_reset_failed(struct drm_i915_private *i915) 3054 { 3055 return __i915_wedged(&i915->gpu_error); 3056 } 3057 3058 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3059 { 3060 return READ_ONCE(error->reset_count); 3061 } 3062 3063 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, 3064 struct intel_engine_cs *engine) 3065 { 3066 return READ_ONCE(error->reset_engine_count[engine->id]); 3067 } 3068 3069 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3070 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); 3071 3072 void i915_gem_init_mmio(struct drm_i915_private *i915); 3073 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 3074 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); 3075 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); 3076 void i915_gem_fini(struct drm_i915_private *dev_priv); 3077 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); 3078 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3079 unsigned int flags, long timeout); 3080 void i915_gem_suspend(struct drm_i915_private *dev_priv); 3081 void i915_gem_suspend_late(struct drm_i915_private *dev_priv); 3082 void i915_gem_resume(struct drm_i915_private *dev_priv); 3083 vm_fault_t i915_gem_fault(struct vm_fault *vmf); 3084 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 3085 unsigned int flags, 3086 long timeout); 3087 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3088 unsigned int flags, 3089 const struct i915_sched_attr *attr); 3090 #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) 3091 3092 int __must_check 3093 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 3094 int __must_check 3095 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 3096 int __must_check 3097 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3098 struct i915_vma * __must_check 3099 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3100 u32 alignment, 3101 const struct i915_ggtt_view *view, 3102 unsigned int flags); 3103 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3104 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3105 int align); 3106 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 3107 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3108 3109 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3110 enum i915_cache_level cache_level); 3111 3112 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3113 struct dma_buf *dma_buf); 3114 3115 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3116 struct drm_gem_object *gem_obj, int flags); 3117 3118 static inline struct i915_hw_ppgtt * 3119 i915_vm_to_ppgtt(struct i915_address_space *vm) 3120 { 3121 return container_of(vm, struct i915_hw_ppgtt, vm); 3122 } 3123 3124 /* i915_gem_fence_reg.c */ 3125 struct drm_i915_fence_reg * 3126 i915_reserve_fence(struct drm_i915_private *dev_priv); 3127 void i915_unreserve_fence(struct drm_i915_fence_reg *fence); 3128 3129 void i915_gem_restore_fences(struct drm_i915_private *dev_priv); 3130 3131 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); 3132 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, 3133 struct sg_table *pages); 3134 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, 3135 struct sg_table *pages); 3136 3137 static inline struct i915_gem_context * 3138 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) 3139 { 3140 return idr_find(&file_priv->context_idr, id); 3141 } 3142 3143 static inline struct i915_gem_context * 3144 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3145 { 3146 struct i915_gem_context *ctx; 3147 3148 rcu_read_lock(); 3149 ctx = __i915_gem_context_lookup_rcu(file_priv, id); 3150 if (ctx && !kref_get_unless_zero(&ctx->ref)) 3151 ctx = NULL; 3152 rcu_read_unlock(); 3153 3154 return ctx; 3155 } 3156 3157 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3158 struct drm_file *file); 3159 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 3160 struct drm_file *file); 3161 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 3162 struct drm_file *file); 3163 void i915_oa_init_reg_state(struct intel_engine_cs *engine, 3164 struct intel_context *ce, 3165 u32 *reg_state); 3166 3167 /* i915_gem_evict.c */ 3168 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3169 u64 min_size, u64 alignment, 3170 unsigned cache_level, 3171 u64 start, u64 end, 3172 unsigned flags); 3173 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 3174 struct drm_mm_node *node, 3175 unsigned int flags); 3176 int i915_gem_evict_vm(struct i915_address_space *vm); 3177 3178 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv); 3179 3180 /* belongs in i915_gem_gtt.h */ 3181 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3182 { 3183 wmb(); 3184 if (INTEL_GEN(dev_priv) < 6) 3185 intel_gtt_chipset_flush(); 3186 } 3187 3188 /* i915_gem_stolen.c */ 3189 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3190 struct drm_mm_node *node, u64 size, 3191 unsigned alignment); 3192 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3193 struct drm_mm_node *node, u64 size, 3194 unsigned alignment, u64 start, 3195 u64 end); 3196 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3197 struct drm_mm_node *node); 3198 int i915_gem_init_stolen(struct drm_i915_private *dev_priv); 3199 void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv); 3200 struct drm_i915_gem_object * 3201 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 3202 resource_size_t size); 3203 struct drm_i915_gem_object * 3204 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, 3205 resource_size_t stolen_offset, 3206 resource_size_t gtt_offset, 3207 resource_size_t size); 3208 3209 /* i915_gem_internal.c */ 3210 struct drm_i915_gem_object * 3211 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 3212 phys_addr_t size); 3213 3214 /* i915_gem_shrinker.c */ 3215 unsigned long i915_gem_shrink(struct drm_i915_private *i915, 3216 unsigned long target, 3217 unsigned long *nr_scanned, 3218 unsigned flags); 3219 #define I915_SHRINK_PURGEABLE 0x1 3220 #define I915_SHRINK_UNBOUND 0x2 3221 #define I915_SHRINK_BOUND 0x4 3222 #define I915_SHRINK_ACTIVE 0x8 3223 #define I915_SHRINK_VMAPS 0x10 3224 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); 3225 void i915_gem_shrinker_register(struct drm_i915_private *i915); 3226 void i915_gem_shrinker_unregister(struct drm_i915_private *i915); 3227 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, 3228 struct mutex *mutex); 3229 3230 /* i915_gem_tiling.c */ 3231 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3232 { 3233 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3234 3235 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3236 i915_gem_object_is_tiled(obj); 3237 } 3238 3239 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, 3240 unsigned int tiling, unsigned int stride); 3241 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, 3242 unsigned int tiling, unsigned int stride); 3243 3244 /* i915_debugfs.c */ 3245 #ifdef CONFIG_DEBUG_FS 3246 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3247 int i915_debugfs_connector_add(struct drm_connector *connector); 3248 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3249 #else 3250 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3251 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3252 { return 0; } 3253 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3254 #endif 3255 3256 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3257 3258 /* i915_cmd_parser.c */ 3259 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3260 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3261 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3262 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3263 struct drm_i915_gem_object *batch_obj, 3264 struct drm_i915_gem_object *shadow_batch_obj, 3265 u32 batch_start_offset, 3266 u32 batch_len, 3267 bool is_master); 3268 3269 /* i915_perf.c */ 3270 extern void i915_perf_init(struct drm_i915_private *dev_priv); 3271 extern void i915_perf_fini(struct drm_i915_private *dev_priv); 3272 extern void i915_perf_register(struct drm_i915_private *dev_priv); 3273 extern void i915_perf_unregister(struct drm_i915_private *dev_priv); 3274 3275 /* i915_suspend.c */ 3276 extern int i915_save_state(struct drm_i915_private *dev_priv); 3277 extern int i915_restore_state(struct drm_i915_private *dev_priv); 3278 3279 /* i915_sysfs.c */ 3280 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3281 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3282 3283 /* intel_lpe_audio.c */ 3284 int intel_lpe_audio_init(struct drm_i915_private *dev_priv); 3285 void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); 3286 void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); 3287 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, 3288 enum pipe pipe, enum port port, 3289 const void *eld, int ls_clock, bool dp_output); 3290 3291 /* intel_i2c.c */ 3292 extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); 3293 extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); 3294 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3295 unsigned int pin); 3296 extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter); 3297 3298 extern struct i2c_adapter * 3299 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3300 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3301 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3302 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3303 { 3304 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3305 } 3306 extern void intel_i2c_reset(struct drm_i915_private *dev_priv); 3307 3308 /* intel_bios.c */ 3309 void intel_bios_init(struct drm_i915_private *dev_priv); 3310 void intel_bios_cleanup(struct drm_i915_private *dev_priv); 3311 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3312 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3313 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3314 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3315 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3316 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3317 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3318 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3319 enum port port); 3320 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3321 enum port port); 3322 enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); 3323 3324 /* intel_acpi.c */ 3325 #ifdef CONFIG_ACPI 3326 extern void intel_register_dsm_handler(void); 3327 extern void intel_unregister_dsm_handler(void); 3328 #else 3329 static inline void intel_register_dsm_handler(void) { return; } 3330 static inline void intel_unregister_dsm_handler(void) { return; } 3331 #endif /* CONFIG_ACPI */ 3332 3333 /* intel_device_info.c */ 3334 static inline struct intel_device_info * 3335 mkwrite_device_info(struct drm_i915_private *dev_priv) 3336 { 3337 return (struct intel_device_info *)INTEL_INFO(dev_priv); 3338 } 3339 3340 static inline struct intel_sseu 3341 intel_device_default_sseu(struct drm_i915_private *i915) 3342 { 3343 const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; 3344 struct intel_sseu value = { 3345 .slice_mask = sseu->slice_mask, 3346 .subslice_mask = sseu->subslice_mask[0], 3347 .min_eus_per_subslice = sseu->max_eus_per_subslice, 3348 .max_eus_per_subslice = sseu->max_eus_per_subslice, 3349 }; 3350 3351 return value; 3352 } 3353 3354 /* modesetting */ 3355 extern void intel_modeset_init_hw(struct drm_device *dev); 3356 extern int intel_modeset_init(struct drm_device *dev); 3357 extern void intel_modeset_cleanup(struct drm_device *dev); 3358 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3359 bool state); 3360 extern void intel_display_resume(struct drm_device *dev); 3361 extern void i915_redisable_vga(struct drm_i915_private *dev_priv); 3362 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); 3363 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3364 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); 3365 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3366 extern void intel_rps_mark_interactive(struct drm_i915_private *i915, 3367 bool interactive); 3368 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3369 bool enable); 3370 void intel_dsc_enable(struct intel_encoder *encoder, 3371 const struct intel_crtc_state *crtc_state); 3372 void intel_dsc_disable(const struct intel_crtc_state *crtc_state); 3373 3374 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3375 struct drm_file *file); 3376 3377 /* overlay */ 3378 extern struct intel_overlay_error_state * 3379 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3380 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3381 struct intel_overlay_error_state *error); 3382 3383 extern struct intel_display_error_state * 3384 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3385 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3386 struct intel_display_error_state *error); 3387 3388 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3389 int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, 3390 u32 val, int fast_timeout_us, 3391 int slow_timeout_ms); 3392 #define sandybridge_pcode_write(dev_priv, mbox, val) \ 3393 sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0) 3394 3395 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3396 u32 reply_mask, u32 reply, int timeout_base_ms); 3397 3398 /* intel_sideband.c */ 3399 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3400 int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3401 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3402 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3403 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3404 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3405 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3406 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3407 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3408 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3409 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3410 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3411 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3412 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3413 enum intel_sbi_destination destination); 3414 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3415 enum intel_sbi_destination destination); 3416 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3417 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3418 3419 /* intel_dpio_phy.c */ 3420 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, 3421 enum dpio_phy *phy, enum dpio_channel *ch); 3422 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, 3423 enum port port, u32 margin, u32 scale, 3424 u32 enable, u32 deemphasis); 3425 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3426 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3427 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, 3428 enum dpio_phy phy); 3429 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, 3430 enum dpio_phy phy); 3431 u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count); 3432 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, 3433 u8 lane_lat_optim_mask); 3434 u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); 3435 3436 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3437 u32 deemph_reg_value, u32 margin_reg_value, 3438 bool uniq_trans_scale); 3439 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3440 const struct intel_crtc_state *crtc_state, 3441 bool reset); 3442 void chv_phy_pre_pll_enable(struct intel_encoder *encoder, 3443 const struct intel_crtc_state *crtc_state); 3444 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, 3445 const struct intel_crtc_state *crtc_state); 3446 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3447 void chv_phy_post_pll_disable(struct intel_encoder *encoder, 3448 const struct intel_crtc_state *old_crtc_state); 3449 3450 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3451 u32 demph_reg_value, u32 preemph_reg_value, 3452 u32 uniqtranscale_reg_value, u32 tx3_demph); 3453 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, 3454 const struct intel_crtc_state *crtc_state); 3455 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, 3456 const struct intel_crtc_state *crtc_state); 3457 void vlv_phy_reset_lanes(struct intel_encoder *encoder, 3458 const struct intel_crtc_state *old_crtc_state); 3459 3460 /* intel_combo_phy.c */ 3461 void icl_combo_phys_init(struct drm_i915_private *dev_priv); 3462 void icl_combo_phys_uninit(struct drm_i915_private *dev_priv); 3463 void cnl_combo_phys_init(struct drm_i915_private *dev_priv); 3464 void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv); 3465 3466 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3467 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3468 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, 3469 const i915_reg_t reg); 3470 3471 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); 3472 3473 static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, 3474 const i915_reg_t reg) 3475 { 3476 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); 3477 } 3478 3479 #define __I915_REG_OP(op__, dev_priv__, ...) \ 3480 intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) 3481 3482 #define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__)) 3483 #define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__)) 3484 3485 #define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__)) 3486 #define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__)) 3487 #define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__)) 3488 #define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__)) 3489 3490 #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) 3491 #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) 3492 #define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__)) 3493 #define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__)) 3494 3495 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3496 * will be implemented using 2 32-bit writes in an arbitrary order with 3497 * an arbitrary delay between them. This can cause the hardware to 3498 * act upon the intermediate value, possibly leading to corruption and 3499 * machine death. For this reason we do not support I915_WRITE64, or 3500 * dev_priv->uncore.funcs.mmio_writeq. 3501 * 3502 * When reading a 64-bit value as two 32-bit values, the delay may cause 3503 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3504 * occasionally a 64-bit register does not actualy support a full readq 3505 * and must be read using two 32-bit reads. 3506 * 3507 * You have been warned. 3508 */ 3509 #define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__)) 3510 #define I915_READ64_2x32(lower_reg__, upper_reg__) \ 3511 __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__)) 3512 3513 #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) 3514 #define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__)) 3515 3516 /* These are untraced mmio-accessors that are only valid to be used inside 3517 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 3518 * controlled. 3519 * 3520 * Think twice, and think again, before using these. 3521 * 3522 * As an example, these accessors can possibly be used between: 3523 * 3524 * spin_lock_irq(&dev_priv->uncore.lock); 3525 * intel_uncore_forcewake_get__locked(); 3526 * 3527 * and 3528 * 3529 * intel_uncore_forcewake_put__locked(); 3530 * spin_unlock_irq(&dev_priv->uncore.lock); 3531 * 3532 * 3533 * Note: some registers may not need forcewake held, so 3534 * intel_uncore_forcewake_{get,put} can be omitted, see 3535 * intel_uncore_forcewake_for_reg(). 3536 * 3537 * Certain architectures will die if the same cacheline is concurrently accessed 3538 * by different clients (e.g. on Ivybridge). Access to registers should 3539 * therefore generally be serialised, by either the dev_priv->uncore.lock or 3540 * a more localised lock guarding all access to that bank of registers. 3541 */ 3542 #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) 3543 #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) 3544 #define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__)) 3545 #define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__)) 3546 3547 /* "Broadcast RGB" property */ 3548 #define INTEL_BROADCAST_RGB_AUTO 0 3549 #define INTEL_BROADCAST_RGB_FULL 1 3550 #define INTEL_BROADCAST_RGB_LIMITED 2 3551 3552 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 3553 { 3554 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3555 return VLV_VGACNTRL; 3556 else if (INTEL_GEN(dev_priv) >= 5) 3557 return CPU_VGACNTRL; 3558 else 3559 return VGACNTRL; 3560 } 3561 3562 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3563 { 3564 unsigned long j = msecs_to_jiffies(m); 3565 3566 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3567 } 3568 3569 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3570 { 3571 /* nsecs_to_jiffies64() does not guard against overflow */ 3572 if (NSEC_PER_SEC % HZ && 3573 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 3574 return MAX_JIFFY_OFFSET; 3575 3576 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3577 } 3578 3579 /* 3580 * If you need to wait X milliseconds between events A and B, but event B 3581 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3582 * when event A happened, then just before event B you call this function and 3583 * pass the timestamp as the first argument, and X as the second argument. 3584 */ 3585 static inline void 3586 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3587 { 3588 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3589 3590 /* 3591 * Don't re-read the value of "jiffies" every time since it may change 3592 * behind our back and break the math. 3593 */ 3594 tmp_jiffies = jiffies; 3595 target_jiffies = timestamp_jiffies + 3596 msecs_to_jiffies_timeout(to_wait_ms); 3597 3598 if (time_after(target_jiffies, tmp_jiffies)) { 3599 remaining_jiffies = target_jiffies - tmp_jiffies; 3600 while (remaining_jiffies) 3601 remaining_jiffies = 3602 schedule_timeout_uninterruptible(remaining_jiffies); 3603 } 3604 } 3605 3606 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 3607 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 3608 3609 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, 3610 * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot 3611 * perform the operation. To check beforehand, pass in the parameters to 3612 * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits, 3613 * you only need to pass in the minor offsets, page-aligned pointers are 3614 * always valid. 3615 * 3616 * For just checking for SSE4.1, in the foreknowledge that the future use 3617 * will be correctly aligned, just use i915_has_memcpy_from_wc(). 3618 */ 3619 #define i915_can_memcpy_from_wc(dst, src, len) \ 3620 i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0) 3621 3622 #define i915_has_memcpy_from_wc() \ 3623 i915_memcpy_from_wc(NULL, NULL, 0) 3624 3625 /* i915_mm.c */ 3626 int remap_io_mapping(struct vm_area_struct *vma, 3627 unsigned long addr, unsigned long pfn, unsigned long size, 3628 struct io_mapping *iomap); 3629 3630 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) 3631 { 3632 if (INTEL_GEN(i915) >= 10) 3633 return CNL_HWS_CSB_WRITE_INDEX; 3634 else 3635 return I915_HWS_CSB_WRITE_INDEX; 3636 } 3637 3638 static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) 3639 { 3640 return i915_ggtt_offset(i915->gt.scratch); 3641 } 3642 3643 #endif 3644