1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 35 #include <linux/pm_qos.h> 36 37 #include <drm/drm_connector.h> 38 #include <drm/ttm/ttm_device.h> 39 40 #include "display/intel_cdclk.h" 41 #include "display/intel_display.h" 42 #include "display/intel_display_power.h" 43 #include "display/intel_dmc.h" 44 #include "display/intel_dpll_mgr.h" 45 #include "display/intel_dsb.h" 46 #include "display/intel_fbc.h" 47 #include "display/intel_frontbuffer.h" 48 #include "display/intel_global_state.h" 49 #include "display/intel_gmbus.h" 50 #include "display/intel_opregion.h" 51 52 #include "gem/i915_gem_context_types.h" 53 #include "gem/i915_gem_lmem.h" 54 #include "gem/i915_gem_shrinker.h" 55 #include "gem/i915_gem_stolen.h" 56 57 #include "gt/intel_engine.h" 58 #include "gt/intel_gt_types.h" 59 #include "gt/intel_region_lmem.h" 60 #include "gt/intel_workarounds.h" 61 #include "gt/uc/intel_uc.h" 62 63 #include "i915_drm_client.h" 64 #include "i915_gem.h" 65 #include "i915_gpu_error.h" 66 #include "i915_params.h" 67 #include "i915_perf_types.h" 68 #include "i915_scheduler.h" 69 #include "i915_utils.h" 70 #include "intel_device_info.h" 71 #include "intel_memory_region.h" 72 #include "intel_pch.h" 73 #include "intel_pm_types.h" 74 #include "intel_runtime_pm.h" 75 #include "intel_step.h" 76 #include "intel_uncore.h" 77 #include "intel_wopcm.h" 78 79 struct dpll; 80 struct drm_i915_clock_gating_funcs; 81 struct drm_i915_gem_object; 82 struct drm_i915_private; 83 struct intel_atomic_state; 84 struct intel_audio_funcs; 85 struct intel_cdclk_config; 86 struct intel_cdclk_funcs; 87 struct intel_cdclk_state; 88 struct intel_cdclk_vals; 89 struct intel_color_funcs; 90 struct intel_connector; 91 struct intel_crtc; 92 struct intel_dp; 93 struct intel_dpll_funcs; 94 struct intel_encoder; 95 struct intel_fbdev; 96 struct intel_fdi_funcs; 97 struct intel_gmbus; 98 struct intel_hotplug_funcs; 99 struct intel_initial_plane_config; 100 struct intel_limit; 101 struct intel_overlay; 102 struct intel_overlay_error_state; 103 struct vlv_s0ix_state; 104 105 /* Threshold == 5 for long IRQs, 50 for short */ 106 #define HPD_STORM_DEFAULT_THRESHOLD 50 107 108 struct i915_hotplug { 109 struct delayed_work hotplug_work; 110 111 const u32 *hpd, *pch_hpd; 112 113 struct { 114 unsigned long last_jiffies; 115 int count; 116 enum { 117 HPD_ENABLED = 0, 118 HPD_DISABLED = 1, 119 HPD_MARK_DISABLED = 2 120 } state; 121 } stats[HPD_NUM_PINS]; 122 u32 event_bits; 123 u32 retry_bits; 124 struct delayed_work reenable_work; 125 126 u32 long_port_mask; 127 u32 short_port_mask; 128 struct work_struct dig_port_work; 129 130 struct work_struct poll_init_work; 131 bool poll_enabled; 132 133 unsigned int hpd_storm_threshold; 134 /* Whether or not to count short HPD IRQs in HPD storms */ 135 u8 hpd_short_storm_enabled; 136 137 /* 138 * if we get a HPD irq from DP and a HPD irq from non-DP 139 * the non-DP HPD could block the workqueue on a mode config 140 * mutex getting, that userspace may have taken. However 141 * userspace is waiting on the DP workqueue to run which is 142 * blocked behind the non-DP one. 143 */ 144 struct workqueue_struct *dp_wq; 145 }; 146 147 #define I915_GEM_GPU_DOMAINS \ 148 (I915_GEM_DOMAIN_RENDER | \ 149 I915_GEM_DOMAIN_SAMPLER | \ 150 I915_GEM_DOMAIN_COMMAND | \ 151 I915_GEM_DOMAIN_INSTRUCTION | \ 152 I915_GEM_DOMAIN_VERTEX) 153 154 struct sdvo_device_mapping { 155 u8 initialized; 156 u8 dvo_port; 157 u8 slave_addr; 158 u8 dvo_wiring; 159 u8 i2c_pin; 160 u8 ddc_pin; 161 }; 162 163 /* functions used for watermark calcs for display. */ 164 struct drm_i915_wm_disp_funcs { 165 /* update_wm is for legacy wm management */ 166 void (*update_wm)(struct drm_i915_private *dev_priv); 167 int (*compute_pipe_wm)(struct intel_atomic_state *state, 168 struct intel_crtc *crtc); 169 int (*compute_intermediate_wm)(struct intel_atomic_state *state, 170 struct intel_crtc *crtc); 171 void (*initial_watermarks)(struct intel_atomic_state *state, 172 struct intel_crtc *crtc); 173 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 174 struct intel_crtc *crtc); 175 void (*optimize_watermarks)(struct intel_atomic_state *state, 176 struct intel_crtc *crtc); 177 int (*compute_global_watermarks)(struct intel_atomic_state *state); 178 }; 179 180 struct drm_i915_display_funcs { 181 /* Returns the active state of the crtc, and if the crtc is active, 182 * fills out the pipe-config with the hw state. */ 183 bool (*get_pipe_config)(struct intel_crtc *, 184 struct intel_crtc_state *); 185 void (*get_initial_plane_config)(struct intel_crtc *, 186 struct intel_initial_plane_config *); 187 void (*crtc_enable)(struct intel_atomic_state *state, 188 struct intel_crtc *crtc); 189 void (*crtc_disable)(struct intel_atomic_state *state, 190 struct intel_crtc *crtc); 191 void (*commit_modeset_enables)(struct intel_atomic_state *state); 192 }; 193 194 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 195 196 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 197 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 198 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 199 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 200 #define QUIRK_INCREASE_T12_DELAY (1<<6) 201 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 202 #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) 203 204 struct i915_suspend_saved_registers { 205 u32 saveDSPARB; 206 u32 saveSWF0[16]; 207 u32 saveSWF1[16]; 208 u32 saveSWF3[3]; 209 u16 saveGCDGMBUS; 210 }; 211 212 #define MAX_L3_SLICES 2 213 struct intel_l3_parity { 214 u32 *remap_info[MAX_L3_SLICES]; 215 struct work_struct error_work; 216 int which_slice; 217 }; 218 219 struct i915_gem_mm { 220 /* 221 * Shortcut for the stolen region. This points to either 222 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or 223 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't 224 * support stolen. 225 */ 226 struct intel_memory_region *stolen_region; 227 /** Memory allocator for GTT stolen memory */ 228 struct drm_mm stolen; 229 /** Protects the usage of the GTT stolen memory allocator. This is 230 * always the inner lock when overlapping with struct_mutex. */ 231 struct mutex stolen_lock; 232 233 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 234 spinlock_t obj_lock; 235 236 /** 237 * List of objects which are purgeable. 238 */ 239 struct list_head purge_list; 240 241 /** 242 * List of objects which have allocated pages and are shrinkable. 243 */ 244 struct list_head shrink_list; 245 246 /** 247 * List of objects which are pending destruction. 248 */ 249 struct llist_head free_list; 250 struct work_struct free_work; 251 /** 252 * Count of objects pending destructions. Used to skip needlessly 253 * waiting on an RCU barrier if no objects are waiting to be freed. 254 */ 255 atomic_t free_count; 256 257 /** 258 * tmpfs instance used for shmem backed objects 259 */ 260 struct vfsmount *gemfs; 261 262 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; 263 264 struct notifier_block oom_notifier; 265 struct notifier_block vmap_notifier; 266 struct shrinker shrinker; 267 268 #ifdef CONFIG_MMU_NOTIFIER 269 /** 270 * notifier_lock for mmu notifiers, memory may not be allocated 271 * while holding this lock. 272 */ 273 rwlock_t notifier_lock; 274 #endif 275 276 /* shrinker accounting, also useful for userland debugging */ 277 u64 shrink_memory; 278 u32 shrink_count; 279 }; 280 281 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 282 283 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915, 284 u64 context); 285 286 static inline unsigned long 287 i915_fence_timeout(const struct drm_i915_private *i915) 288 { 289 return i915_fence_context_timeout(i915, U64_MAX); 290 } 291 292 /* Amount of SAGV/QGV points, BSpec precisely defines this */ 293 #define I915_NUM_QGV_POINTS 8 294 295 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) 296 297 /* Amount of PSF GV points, BSpec precisely defines this */ 298 #define I915_NUM_PSF_GV_POINTS 3 299 300 struct intel_vbt_data { 301 /* bdb version */ 302 u16 version; 303 304 /* Feature bits */ 305 unsigned int int_tv_support:1; 306 unsigned int int_crt_support:1; 307 unsigned int lvds_use_ssc:1; 308 unsigned int int_lvds_support:1; 309 unsigned int display_clock_mode:1; 310 unsigned int fdi_rx_polarity_inverted:1; 311 int lvds_ssc_freq; 312 enum drm_panel_orientation orientation; 313 314 bool override_afc_startup; 315 u8 override_afc_startup_val; 316 317 int crt_ddc_pin; 318 319 struct list_head display_devices; 320 struct list_head bdb_blocks; 321 322 struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ 323 struct sdvo_device_mapping sdvo_mappings[2]; 324 }; 325 326 struct i915_frontbuffer_tracking { 327 spinlock_t lock; 328 329 /* 330 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 331 * scheduled flips. 332 */ 333 unsigned busy_bits; 334 unsigned flip_bits; 335 }; 336 337 struct i915_virtual_gpu { 338 struct mutex lock; /* serialises sending of g2v_notify command pkts */ 339 bool active; 340 u32 caps; 341 u32 *initial_mmio; 342 u8 *initial_cfg_space; 343 struct list_head entry; 344 }; 345 346 struct i915_selftest_stash { 347 atomic_t counter; 348 struct ida mock_region_instances; 349 }; 350 351 /* intel_audio.c private */ 352 struct intel_audio_private { 353 /* Display internal audio functions */ 354 const struct intel_audio_funcs *funcs; 355 356 /* hda/i915 audio component */ 357 struct i915_audio_component *component; 358 bool component_registered; 359 /* mutex for audio/video sync */ 360 struct mutex mutex; 361 int power_refcount; 362 u32 freq_cntrl; 363 364 /* Used to save the pipe-to-encoder mapping for audio */ 365 struct intel_encoder *encoder_map[I915_MAX_PIPES]; 366 367 /* necessary resource sharing with HDMI LPE audio driver. */ 368 struct { 369 struct platform_device *platdev; 370 int irq; 371 } lpe; 372 }; 373 374 struct drm_i915_private { 375 struct drm_device drm; 376 377 /* FIXME: Device release actions should all be moved to drmm_ */ 378 bool do_release; 379 380 /* i915 device parameters */ 381 struct i915_params params; 382 383 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 384 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 385 struct intel_driver_caps caps; 386 387 /** 388 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 389 * end of stolen which we can optionally use to create GEM objects 390 * backed by stolen memory. Note that stolen_usable_size tells us 391 * exactly how much of this we are actually allowed to use, given that 392 * some portion of it is in fact reserved for use by hardware functions. 393 */ 394 struct resource dsm; 395 /** 396 * Reseved portion of Data Stolen Memory 397 */ 398 struct resource dsm_reserved; 399 400 /* 401 * Stolen memory is segmented in hardware with different portions 402 * offlimits to certain functions. 403 * 404 * The drm_mm is initialised to the total accessible range, as found 405 * from the PCI config. On Broadwell+, this is further restricted to 406 * avoid the first page! The upper end of stolen memory is reserved for 407 * hardware functions and similarly removed from the accessible range. 408 */ 409 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 410 411 struct intel_uncore uncore; 412 struct intel_uncore_mmio_debug mmio_debug; 413 414 struct i915_virtual_gpu vgpu; 415 416 struct intel_gvt *gvt; 417 418 struct intel_wopcm wopcm; 419 420 struct intel_dmc dmc; 421 422 struct intel_gmbus *gmbus[GMBUS_NUM_PINS]; 423 424 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 425 * controller on different i2c buses. */ 426 struct mutex gmbus_mutex; 427 428 /** 429 * Base address of where the gmbus and gpio blocks are located (either 430 * on PCH or on SoC for platforms without PCH). 431 */ 432 u32 gpio_mmio_base; 433 434 /* MMIO base address for MIPI regs */ 435 u32 mipi_mmio_base; 436 437 u32 pps_mmio_base; 438 439 wait_queue_head_t gmbus_wait_queue; 440 441 struct pci_dev *bridge_dev; 442 443 struct rb_root uabi_engines; 444 unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1]; 445 446 struct resource mch_res; 447 448 /* protects the irq masks */ 449 spinlock_t irq_lock; 450 451 bool display_irqs_enabled; 452 453 /* Sideband mailbox protection */ 454 struct mutex sb_lock; 455 struct pm_qos_request sb_qos; 456 457 /** Cached value of IMR to avoid reads in updating the bitfield */ 458 union { 459 u32 irq_mask; 460 u32 de_irq_mask[I915_MAX_PIPES]; 461 }; 462 u32 pipestat_irq_mask[I915_MAX_PIPES]; 463 464 struct i915_hotplug hotplug; 465 struct intel_fbc *fbc[I915_MAX_FBCS]; 466 struct intel_opregion opregion; 467 struct intel_vbt_data vbt; 468 469 bool preserve_bios_swizzle; 470 471 /* overlay */ 472 struct intel_overlay *overlay; 473 474 /* backlight registers and fields in struct intel_panel */ 475 struct mutex backlight_lock; 476 477 /* protects panel power sequencer state */ 478 struct mutex pps_mutex; 479 480 unsigned int fsb_freq, mem_freq, is_ddr3; 481 unsigned int skl_preferred_vco_freq; 482 unsigned int max_cdclk_freq; 483 484 unsigned int max_dotclk_freq; 485 unsigned int hpll_freq; 486 unsigned int fdi_pll_freq; 487 unsigned int czclk_freq; 488 489 struct { 490 /* The current hardware cdclk configuration */ 491 struct intel_cdclk_config hw; 492 493 /* cdclk, divider, and ratio table from bspec */ 494 const struct intel_cdclk_vals *table; 495 496 struct intel_global_obj obj; 497 } cdclk; 498 499 struct { 500 /* The current hardware dbuf configuration */ 501 u8 enabled_slices; 502 503 struct intel_global_obj obj; 504 } dbuf; 505 506 /** 507 * wq - Driver workqueue for GEM. 508 * 509 * NOTE: Work items scheduled here are not allowed to grab any modeset 510 * locks, for otherwise the flushing done in the pageflip code will 511 * result in deadlocks. 512 */ 513 struct workqueue_struct *wq; 514 515 /* ordered wq for modesets */ 516 struct workqueue_struct *modeset_wq; 517 /* unbound hipri wq for page flips/plane updates */ 518 struct workqueue_struct *flip_wq; 519 520 /* pm private clock gating functions */ 521 const struct drm_i915_clock_gating_funcs *clock_gating_funcs; 522 523 /* pm display functions */ 524 const struct drm_i915_wm_disp_funcs *wm_disp; 525 526 /* irq display functions */ 527 const struct intel_hotplug_funcs *hotplug_funcs; 528 529 /* fdi display functions */ 530 const struct intel_fdi_funcs *fdi_funcs; 531 532 /* display pll funcs */ 533 const struct intel_dpll_funcs *dpll_funcs; 534 535 /* Display functions */ 536 const struct drm_i915_display_funcs *display; 537 538 /* Display internal color functions */ 539 const struct intel_color_funcs *color_funcs; 540 541 /* Display CDCLK functions */ 542 const struct intel_cdclk_funcs *cdclk_funcs; 543 544 /* PCH chipset type */ 545 enum intel_pch pch_type; 546 unsigned short pch_id; 547 548 unsigned long quirks; 549 550 struct drm_atomic_state *modeset_restore_state; 551 struct drm_modeset_acquire_ctx reset_ctx; 552 553 struct i915_gem_mm mm; 554 555 /* Kernel Modesetting */ 556 557 /** 558 * dpll and cdclk state is protected by connection_mutex 559 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. 560 * Must be global rather than per dpll, because on some platforms plls 561 * share registers. 562 */ 563 struct { 564 struct mutex lock; 565 566 int num_shared_dpll; 567 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 568 const struct intel_dpll_mgr *mgr; 569 570 struct { 571 int nssc; 572 int ssc; 573 } ref_clks; 574 } dpll; 575 576 struct list_head global_obj_list; 577 578 struct i915_frontbuffer_tracking fb_tracking; 579 580 struct intel_atomic_helper { 581 struct llist_head free_list; 582 struct work_struct free_work; 583 } atomic_helper; 584 585 bool mchbar_need_disable; 586 587 struct intel_l3_parity l3_parity; 588 589 /* 590 * HTI (aka HDPORT) state read during initial hw readout. Most 591 * platforms don't have HTI, so this will just stay 0. Those that do 592 * will use this later to figure out which PLLs and PHYs are unavailable 593 * for driver usage. 594 */ 595 u32 hti_state; 596 597 /* 598 * edram size in MB. 599 * Cannot be determined by PCIID. You must always read a register. 600 */ 601 u32 edram_size_mb; 602 603 struct i915_power_domains power_domains; 604 605 struct i915_gpu_error gpu_error; 606 607 /* list of fbdev register on this device */ 608 struct intel_fbdev *fbdev; 609 struct work_struct fbdev_suspend_work; 610 611 struct drm_property *broadcast_rgb_property; 612 struct drm_property *force_audio_property; 613 614 u32 fdi_rx_config; 615 616 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 617 u32 chv_phy_control; 618 /* 619 * Shadows for CHV DPLL_MD regs to keep the state 620 * checker somewhat working in the presence hardware 621 * crappiness (can't read out DPLL_MD for pipes B & C). 622 */ 623 u32 chv_dpll_md[I915_MAX_PIPES]; 624 u32 bxt_phy_grc; 625 626 u32 suspend_count; 627 struct i915_suspend_saved_registers regfile; 628 struct vlv_s0ix_state *vlv_s0ix_state; 629 630 enum { 631 I915_SAGV_UNKNOWN = 0, 632 I915_SAGV_DISABLED, 633 I915_SAGV_ENABLED, 634 I915_SAGV_NOT_CONTROLLED 635 } sagv_status; 636 637 u32 sagv_block_time_us; 638 639 struct { 640 /* 641 * Raw watermark latency values: 642 * in 0.1us units for WM0, 643 * in 0.5us units for WM1+. 644 */ 645 /* primary */ 646 u16 pri_latency[5]; 647 /* sprite */ 648 u16 spr_latency[5]; 649 /* cursor */ 650 u16 cur_latency[5]; 651 /* 652 * Raw watermark memory latency values 653 * for SKL for all 8 levels 654 * in 1us units. 655 */ 656 u16 skl_latency[8]; 657 658 /* current hardware state */ 659 union { 660 struct ilk_wm_values hw; 661 struct vlv_wm_values vlv; 662 struct g4x_wm_values g4x; 663 }; 664 665 u8 max_level; 666 667 /* 668 * Should be held around atomic WM register writing; also 669 * protects * intel_crtc->wm.active and 670 * crtc_state->wm.need_postvbl_update. 671 */ 672 struct mutex wm_mutex; 673 } wm; 674 675 struct dram_info { 676 bool wm_lv_0_adjust_needed; 677 u8 num_channels; 678 bool symmetric_memory; 679 enum intel_dram_type { 680 INTEL_DRAM_UNKNOWN, 681 INTEL_DRAM_DDR3, 682 INTEL_DRAM_DDR4, 683 INTEL_DRAM_LPDDR3, 684 INTEL_DRAM_LPDDR4, 685 INTEL_DRAM_DDR5, 686 INTEL_DRAM_LPDDR5, 687 } type; 688 u8 num_qgv_points; 689 u8 num_psf_gv_points; 690 } dram_info; 691 692 struct intel_bw_info { 693 /* for each QGV point */ 694 unsigned int deratedbw[I915_NUM_QGV_POINTS]; 695 /* for each PSF GV point */ 696 unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; 697 u8 num_qgv_points; 698 u8 num_psf_gv_points; 699 u8 num_planes; 700 } max_bw[6]; 701 702 struct intel_global_obj bw_obj; 703 704 struct intel_runtime_pm runtime_pm; 705 706 struct i915_perf perf; 707 708 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 709 struct intel_gt gt0; 710 711 /* 712 * i915->gt[0] == &i915->gt0 713 */ 714 #define I915_MAX_GT 4 715 struct intel_gt *gt[I915_MAX_GT]; 716 717 struct kobject *sysfs_gt; 718 719 struct { 720 struct i915_gem_contexts { 721 spinlock_t lock; /* locks list */ 722 struct list_head list; 723 } contexts; 724 725 /* 726 * We replace the local file with a global mappings as the 727 * backing storage for the mmap is on the device and not 728 * on the struct file, and we do not want to prolong the 729 * lifetime of the local fd. To minimise the number of 730 * anonymous inodes we create, we use a global singleton to 731 * share the global mapping. 732 */ 733 struct file *mmap_singleton; 734 } gem; 735 736 /* Window2 specifies time required to program DSB (Window2) in number of scan lines */ 737 u8 window2_delay; 738 739 u8 pch_ssc_use; 740 741 /* For i915gm/i945gm vblank irq workaround */ 742 u8 vblank_enabled; 743 744 bool irq_enabled; 745 746 union { 747 /* perform PHY state sanity checks? */ 748 bool chv_phy_assert[2]; 749 750 /* 751 * DG2: Mask of PHYs that were not calibrated by the firmware 752 * and should not be used. 753 */ 754 u8 snps_phy_failed_calibration; 755 }; 756 757 bool ipc_enabled; 758 759 struct intel_audio_private audio; 760 761 struct i915_pmu pmu; 762 763 struct i915_drm_clients clients; 764 765 struct i915_hdcp_comp_master *hdcp_master; 766 bool hdcp_comp_added; 767 768 /* Mutex to protect the above hdcp component related values. */ 769 struct mutex hdcp_comp_mutex; 770 771 /* The TTM device structure. */ 772 struct ttm_device bdev; 773 774 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) 775 776 /* 777 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 778 * will be rejected. Instead look for a better place. 779 */ 780 }; 781 782 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 783 { 784 return container_of(dev, struct drm_i915_private, drm); 785 } 786 787 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 788 { 789 return dev_get_drvdata(kdev); 790 } 791 792 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) 793 { 794 return pci_get_drvdata(pdev); 795 } 796 797 static inline struct intel_gt *to_gt(struct drm_i915_private *i915) 798 { 799 return &i915->gt0; 800 } 801 802 /* Simple iterator over all initialised engines */ 803 #define for_each_engine(engine__, dev_priv__, id__) \ 804 for ((id__) = 0; \ 805 (id__) < I915_NUM_ENGINES; \ 806 (id__)++) \ 807 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 808 809 /* Iterator over subset of engines selected by mask */ 810 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 811 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 812 (tmp__) ? \ 813 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 814 0;) 815 816 #define rb_to_uabi_engine(rb) \ 817 rb_entry_safe(rb, struct intel_engine_cs, uabi_node) 818 819 #define for_each_uabi_engine(engine__, i915__) \ 820 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 821 (engine__); \ 822 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 823 824 #define for_each_uabi_class_engine(engine__, class__, i915__) \ 825 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \ 826 (engine__) && (engine__)->uabi_class == (class__); \ 827 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 828 829 #define I915_GTT_OFFSET_NONE ((u32)-1) 830 831 /* 832 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 833 * considered to be the frontbuffer for the given plane interface-wise. This 834 * doesn't mean that the hw necessarily already scans it out, but that any 835 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 836 * 837 * We have one bit per pipe and per scanout plane type. 838 */ 839 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 840 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 841 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 842 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 843 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 844 }) 845 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 846 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 847 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 848 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 849 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 850 851 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) 852 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) 853 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 854 855 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) 856 857 #define IP_VER(ver, rel) ((ver) << 8 | (rel)) 858 859 #define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) 860 #define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ 861 INTEL_INFO(i915)->graphics.rel) 862 #define IS_GRAPHICS_VER(i915, from, until) \ 863 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) 864 865 #define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) 866 #define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \ 867 INTEL_INFO(i915)->media.rel) 868 #define IS_MEDIA_VER(i915, from, until) \ 869 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) 870 871 #define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver) 872 #define IS_DISPLAY_VER(i915, from, until) \ 873 (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) 874 875 #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision) 876 877 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) 878 879 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) 880 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) 881 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) 882 #define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step) 883 884 #define IS_DISPLAY_STEP(__i915, since, until) \ 885 (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ 886 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) 887 888 #define IS_GRAPHICS_STEP(__i915, since, until) \ 889 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ 890 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) 891 892 #define IS_MEDIA_STEP(__i915, since, until) \ 893 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ 894 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) 895 896 #define IS_BASEDIE_STEP(__i915, since, until) \ 897 (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \ 898 INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until)) 899 900 static __always_inline unsigned int 901 __platform_mask_index(const struct intel_runtime_info *info, 902 enum intel_platform p) 903 { 904 const unsigned int pbits = 905 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 906 907 /* Expand the platform_mask array if this fails. */ 908 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 909 pbits * ARRAY_SIZE(info->platform_mask)); 910 911 return p / pbits; 912 } 913 914 static __always_inline unsigned int 915 __platform_mask_bit(const struct intel_runtime_info *info, 916 enum intel_platform p) 917 { 918 const unsigned int pbits = 919 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 920 921 return p % pbits + INTEL_SUBPLATFORM_BITS; 922 } 923 924 static inline u32 925 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) 926 { 927 const unsigned int pi = __platform_mask_index(info, p); 928 929 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK; 930 } 931 932 static __always_inline bool 933 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) 934 { 935 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 936 const unsigned int pi = __platform_mask_index(info, p); 937 const unsigned int pb = __platform_mask_bit(info, p); 938 939 BUILD_BUG_ON(!__builtin_constant_p(p)); 940 941 return info->platform_mask[pi] & BIT(pb); 942 } 943 944 static __always_inline bool 945 IS_SUBPLATFORM(const struct drm_i915_private *i915, 946 enum intel_platform p, unsigned int s) 947 { 948 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 949 const unsigned int pi = __platform_mask_index(info, p); 950 const unsigned int pb = __platform_mask_bit(info, p); 951 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; 952 const u32 mask = info->platform_mask[pi]; 953 954 BUILD_BUG_ON(!__builtin_constant_p(p)); 955 BUILD_BUG_ON(!__builtin_constant_p(s)); 956 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); 957 958 /* Shift and test on the MSB position so sign flag can be used. */ 959 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); 960 } 961 962 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) 963 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) 964 965 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 966 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 967 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 968 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 969 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 970 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 971 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 972 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 973 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 974 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 975 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 976 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 977 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 978 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 979 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 980 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE) 981 #define IS_IRONLAKE_M(dev_priv) \ 982 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv)) 983 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE) 984 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 985 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 986 INTEL_INFO(dev_priv)->gt == 1) 987 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 988 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 989 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 990 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 991 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 992 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 993 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 994 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 995 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 996 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) 997 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 998 #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ 999 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) 1000 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) 1001 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) 1002 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) 1003 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) 1004 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) 1005 #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV) 1006 #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2) 1007 #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO) 1008 #define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_METEORLAKE) 1009 1010 #define IS_METEORLAKE_M(dev_priv) \ 1011 IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M) 1012 #define IS_METEORLAKE_P(dev_priv) \ 1013 IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P) 1014 #define IS_DG2_G10(dev_priv) \ 1015 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) 1016 #define IS_DG2_G11(dev_priv) \ 1017 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) 1018 #define IS_DG2_G12(dev_priv) \ 1019 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12) 1020 #define IS_ADLS_RPLS(dev_priv) \ 1021 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL) 1022 #define IS_ADLP_N(dev_priv) \ 1023 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) 1024 #define IS_ADLP_RPLP(dev_priv) \ 1025 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) 1026 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 1027 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 1028 #define IS_BDW_ULT(dev_priv) \ 1029 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) 1030 #define IS_BDW_ULX(dev_priv) \ 1031 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) 1032 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 1033 INTEL_INFO(dev_priv)->gt == 3) 1034 #define IS_HSW_ULT(dev_priv) \ 1035 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) 1036 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 1037 INTEL_INFO(dev_priv)->gt == 3) 1038 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ 1039 INTEL_INFO(dev_priv)->gt == 1) 1040 /* ULX machines are also considered ULT. */ 1041 #define IS_HSW_ULX(dev_priv) \ 1042 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) 1043 #define IS_SKL_ULT(dev_priv) \ 1044 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) 1045 #define IS_SKL_ULX(dev_priv) \ 1046 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) 1047 #define IS_KBL_ULT(dev_priv) \ 1048 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) 1049 #define IS_KBL_ULX(dev_priv) \ 1050 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) 1051 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1052 INTEL_INFO(dev_priv)->gt == 2) 1053 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1054 INTEL_INFO(dev_priv)->gt == 3) 1055 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1056 INTEL_INFO(dev_priv)->gt == 4) 1057 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1058 INTEL_INFO(dev_priv)->gt == 2) 1059 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1060 INTEL_INFO(dev_priv)->gt == 3) 1061 #define IS_CFL_ULT(dev_priv) \ 1062 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) 1063 #define IS_CFL_ULX(dev_priv) \ 1064 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) 1065 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1066 INTEL_INFO(dev_priv)->gt == 2) 1067 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1068 INTEL_INFO(dev_priv)->gt == 3) 1069 1070 #define IS_CML_ULT(dev_priv) \ 1071 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) 1072 #define IS_CML_ULX(dev_priv) \ 1073 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) 1074 #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \ 1075 INTEL_INFO(dev_priv)->gt == 2) 1076 1077 #define IS_ICL_WITH_PORT_F(dev_priv) \ 1078 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) 1079 1080 #define IS_TGL_UY(dev_priv) \ 1081 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) 1082 1083 #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) 1084 1085 #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ 1086 (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) 1087 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ 1088 (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) 1089 1090 #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ 1091 (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) 1092 #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ 1093 (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) 1094 1095 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \ 1096 (IS_TIGERLAKE(__i915) && \ 1097 IS_DISPLAY_STEP(__i915, since, until)) 1098 1099 #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ 1100 (IS_TGL_UY(__i915) && \ 1101 IS_GRAPHICS_STEP(__i915, since, until)) 1102 1103 #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ 1104 (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \ 1105 IS_GRAPHICS_STEP(__i915, since, until)) 1106 1107 #define IS_RKL_DISPLAY_STEP(p, since, until) \ 1108 (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) 1109 1110 #define IS_DG1_GRAPHICS_STEP(p, since, until) \ 1111 (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) 1112 #define IS_DG1_DISPLAY_STEP(p, since, until) \ 1113 (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) 1114 1115 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ 1116 (IS_ALDERLAKE_S(__i915) && \ 1117 IS_DISPLAY_STEP(__i915, since, until)) 1118 1119 #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ 1120 (IS_ALDERLAKE_S(__i915) && \ 1121 IS_GRAPHICS_STEP(__i915, since, until)) 1122 1123 #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ 1124 (IS_ALDERLAKE_P(__i915) && \ 1125 IS_DISPLAY_STEP(__i915, since, until)) 1126 1127 #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ 1128 (IS_ALDERLAKE_P(__i915) && \ 1129 IS_GRAPHICS_STEP(__i915, since, until)) 1130 1131 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ 1132 (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) 1133 1134 /* 1135 * DG2 hardware steppings are a bit unusual. The hardware design was forked to 1136 * create three variants (G10, G11, and G12) which each have distinct 1137 * workaround sets. The G11 and G12 forks of the DG2 design reset the GT 1138 * stepping back to "A0" for their first iterations, even though they're more 1139 * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of 1140 * functionality and workarounds. However the display stepping does not reset 1141 * in the same manner --- a specific stepping like "B0" has a consistent 1142 * meaning regardless of whether it belongs to a G10, G11, or G12 DG2. 1143 * 1144 * TLDR: All GT workarounds and stepping-specific logic must be applied in 1145 * relation to a specific subplatform (G10/G11/G12), whereas display workarounds 1146 * and stepping-specific logic will be applied with a general DG2-wide stepping 1147 * number. 1148 */ 1149 #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ 1150 (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ 1151 IS_GRAPHICS_STEP(__i915, since, until)) 1152 1153 #define IS_DG2_DISPLAY_STEP(__i915, since, until) \ 1154 (IS_DG2(__i915) && \ 1155 IS_DISPLAY_STEP(__i915, since, until)) 1156 1157 #define IS_PVC_BD_STEP(__i915, since, until) \ 1158 (IS_PONTEVECCHIO(__i915) && \ 1159 IS_BASEDIE_STEP(__i915, since, until)) 1160 1161 #define IS_PVC_CT_STEP(__i915, since, until) \ 1162 (IS_PONTEVECCHIO(__i915) && \ 1163 IS_GRAPHICS_STEP(__i915, since, until)) 1164 1165 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 1166 #define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv)) 1167 #define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv)) 1168 1169 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 1170 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) 1171 1172 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ 1173 unsigned int first__ = (first); \ 1174 unsigned int count__ = (count); \ 1175 ((gt)->info.engine_mask & \ 1176 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1177 }) 1178 #define RCS_MASK(gt) \ 1179 ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS) 1180 #define BCS_MASK(gt) \ 1181 ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS) 1182 #define VDBOX_MASK(gt) \ 1183 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) 1184 #define VEBOX_MASK(gt) \ 1185 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) 1186 #define CCS_MASK(gt) \ 1187 ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS) 1188 1189 #define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode) 1190 1191 /* 1192 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution 1193 * All later gens can run the final buffer from the ppgtt 1194 */ 1195 #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7) 1196 1197 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) 1198 #define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile) 1199 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) 1200 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) 1201 #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6) 1202 #define HAS_WT(dev_priv) HAS_EDRAM(dev_priv) 1203 1204 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) 1205 1206 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 1207 (INTEL_INFO(dev_priv)->has_logical_ring_contexts) 1208 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 1209 (INTEL_INFO(dev_priv)->has_logical_ring_elsq) 1210 1211 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 1212 1213 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) 1214 #define HAS_PPGTT(dev_priv) \ 1215 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) 1216 #define HAS_FULL_PPGTT(dev_priv) \ 1217 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) 1218 1219 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 1220 GEM_BUG_ON((sizes) == 0); \ 1221 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ 1222 }) 1223 1224 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) 1225 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 1226 (INTEL_INFO(dev_priv)->display.overlay_needs_physical) 1227 1228 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1229 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 1230 1231 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ 1232 (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9) 1233 1234 /* WaRsDisableCoarsePowerGating:skl,cnl */ 1235 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 1236 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 1237 1238 #define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4) 1239 #define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \ 1240 IS_GEMINILAKE(dev_priv) || \ 1241 IS_KABYLAKE(dev_priv)) 1242 1243 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1244 * rows, which changed the alignment requirements and fence programming. 1245 */ 1246 #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \ 1247 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv))) 1248 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) 1249 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) 1250 1251 #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) 1252 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) 1253 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) 1254 1255 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 1256 1257 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) 1258 #define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) 1259 1260 #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) 1261 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) 1262 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg) 1263 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) 1264 #define HAS_PSR_HW_TRACKING(dev_priv) \ 1265 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) 1266 #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) 1267 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) 1268 1269 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) 1270 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) 1271 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 1272 1273 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) 1274 1275 #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) 1276 1277 #define HAS_HECI_PXP(dev_priv) \ 1278 (INTEL_INFO(dev_priv)->has_heci_pxp) 1279 1280 #define HAS_HECI_GSCFI(dev_priv) \ 1281 (INTEL_INFO(dev_priv)->has_heci_gscfi) 1282 1283 #define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv)) 1284 1285 #define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) 1286 1287 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) 1288 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) 1289 1290 /* 1291 * Set this flag, when platform requires 64K GTT page sizes or larger for 1292 * device local memory access. 1293 */ 1294 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) 1295 1296 /* 1297 * Set this flag when platform doesn't allow both 64k pages and 4k pages in 1298 * the same PT. this flag means we need to support compact PT layout for the 1299 * ppGTT when using the 64K GTT pages. 1300 */ 1301 #define NEEDS_COMPACT_PT(dev_priv) (INTEL_INFO(dev_priv)->needs_compact_pt) 1302 1303 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 1304 1305 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) 1306 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) 1307 1308 /* 1309 * Platform has the dedicated compression control state for each lmem surfaces 1310 * stored in lmem to support the 3D and media compression formats. 1311 */ 1312 #define HAS_FLAT_CCS(dev_priv) (INTEL_INFO(dev_priv)->has_flat_ccs) 1313 1314 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) 1315 1316 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) 1317 1318 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) 1319 1320 #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \ 1321 INTEL_INFO(dev_priv)->has_pxp) && \ 1322 VDBOX_MASK(to_gt(dev_priv))) 1323 1324 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) 1325 1326 #define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10)) 1327 1328 #define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read) 1329 1330 /* DPF == dynamic parity feature */ 1331 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) 1332 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 1333 2 : HAS_L3_DPF(dev_priv)) 1334 1335 #define GT_FREQUENCY_MULTIPLIER 50 1336 #define GEN9_FREQ_SCALER 3 1337 1338 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) 1339 1340 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) 1341 1342 #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) 1343 1344 #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) 1345 1346 /* Only valid when HAS_DISPLAY() is true */ 1347 #define INTEL_DISPLAY_ENABLED(dev_priv) \ 1348 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), \ 1349 !(dev_priv)->params.disable_display && \ 1350 !intel_opregion_headless_sku(dev_priv)) 1351 1352 #define HAS_GUC_DEPRIVILEGE(dev_priv) \ 1353 (INTEL_INFO(dev_priv)->has_guc_deprivilege) 1354 1355 #define HAS_PERCTX_PREEMPT_CTRL(i915) \ 1356 ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) 1357 1358 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ 1359 IS_ALDERLAKE_S(dev_priv)) 1360 1361 #define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915)) 1362 1363 #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline) 1364 1365 #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit) 1366 1367 /* i915_gem.c */ 1368 void i915_gem_init_early(struct drm_i915_private *dev_priv); 1369 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); 1370 1371 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 1372 { 1373 /* 1374 * A single pass should suffice to release all the freed objects (along 1375 * most call paths) , but be a little more paranoid in that freeing 1376 * the objects does take a little amount of time, during which the rcu 1377 * callbacks could have added new objects into the freed list, and 1378 * armed the work again. 1379 */ 1380 while (atomic_read(&i915->mm.free_count)) { 1381 flush_work(&i915->mm.free_work); 1382 flush_delayed_work(&i915->bdev.wq); 1383 rcu_barrier(); 1384 } 1385 } 1386 1387 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 1388 { 1389 /* 1390 * Similar to objects above (see i915_gem_drain_freed-objects), in 1391 * general we have workers that are armed by RCU and then rearm 1392 * themselves in their callbacks. To be paranoid, we need to 1393 * drain the workqueue a second time after waiting for the RCU 1394 * grace period so that we catch work queued via RCU from the first 1395 * pass. As neither drain_workqueue() nor flush_workqueue() report 1396 * a result, we make an assumption that we only don't require more 1397 * than 3 passes to catch all _recursive_ RCU delayed work. 1398 * 1399 */ 1400 int pass = 3; 1401 do { 1402 flush_workqueue(i915->wq); 1403 rcu_barrier(); 1404 i915_gem_drain_freed_objects(i915); 1405 } while (--pass); 1406 drain_workqueue(i915->wq); 1407 } 1408 1409 struct i915_vma * __must_check 1410 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, 1411 struct i915_gem_ww_ctx *ww, 1412 const struct i915_ggtt_view *view, 1413 u64 size, u64 alignment, u64 flags); 1414 1415 struct i915_vma * __must_check 1416 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 1417 const struct i915_ggtt_view *view, 1418 u64 size, u64 alignment, u64 flags); 1419 1420 int i915_gem_object_unbind(struct drm_i915_gem_object *obj, 1421 unsigned long flags); 1422 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) 1423 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) 1424 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2) 1425 #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) 1426 #define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) 1427 1428 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 1429 1430 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 1431 1432 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 1433 void i915_gem_driver_register(struct drm_i915_private *i915); 1434 void i915_gem_driver_unregister(struct drm_i915_private *i915); 1435 void i915_gem_driver_remove(struct drm_i915_private *dev_priv); 1436 void i915_gem_driver_release(struct drm_i915_private *dev_priv); 1437 1438 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 1439 1440 /* intel_device_info.c */ 1441 static inline struct intel_device_info * 1442 mkwrite_device_info(struct drm_i915_private *dev_priv) 1443 { 1444 return (struct intel_device_info *)INTEL_INFO(dev_priv); 1445 } 1446 1447 static inline enum i915_map_type 1448 i915_coherent_map_type(struct drm_i915_private *i915, 1449 struct drm_i915_gem_object *obj, bool always_coherent) 1450 { 1451 if (i915_gem_object_is_lmem(obj)) 1452 return I915_MAP_WC; 1453 if (HAS_LLC(i915) || always_coherent) 1454 return I915_MAP_WB; 1455 else 1456 return I915_MAP_WC; 1457 } 1458 1459 #endif 1460