1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 35 #include <linux/pm_qos.h> 36 37 #include <drm/drm_connector.h> 38 #include <drm/ttm/ttm_device.h> 39 40 #include "display/intel_bios.h" 41 #include "display/intel_cdclk.h" 42 #include "display/intel_display.h" 43 #include "display/intel_display_power.h" 44 #include "display/intel_dmc.h" 45 #include "display/intel_dpll_mgr.h" 46 #include "display/intel_dsb.h" 47 #include "display/intel_fbc.h" 48 #include "display/intel_frontbuffer.h" 49 #include "display/intel_global_state.h" 50 #include "display/intel_gmbus.h" 51 #include "display/intel_opregion.h" 52 53 #include "gem/i915_gem_context_types.h" 54 #include "gem/i915_gem_lmem.h" 55 #include "gem/i915_gem_shrinker.h" 56 #include "gem/i915_gem_stolen.h" 57 58 #include "gt/intel_engine.h" 59 #include "gt/intel_gt_types.h" 60 #include "gt/intel_region_lmem.h" 61 #include "gt/intel_workarounds.h" 62 #include "gt/uc/intel_uc.h" 63 64 #include "i915_drm_client.h" 65 #include "i915_gem.h" 66 #include "i915_gpu_error.h" 67 #include "i915_params.h" 68 #include "i915_perf_types.h" 69 #include "i915_scheduler.h" 70 #include "i915_utils.h" 71 #include "intel_device_info.h" 72 #include "intel_memory_region.h" 73 #include "intel_pch.h" 74 #include "intel_pm_types.h" 75 #include "intel_runtime_pm.h" 76 #include "intel_step.h" 77 #include "intel_uncore.h" 78 #include "intel_wopcm.h" 79 80 struct dpll; 81 struct drm_i915_clock_gating_funcs; 82 struct drm_i915_gem_object; 83 struct drm_i915_private; 84 struct intel_atomic_state; 85 struct intel_audio_funcs; 86 struct intel_cdclk_config; 87 struct intel_cdclk_funcs; 88 struct intel_cdclk_state; 89 struct intel_cdclk_vals; 90 struct intel_color_funcs; 91 struct intel_connector; 92 struct intel_crtc; 93 struct intel_dp; 94 struct intel_dpll_funcs; 95 struct intel_encoder; 96 struct intel_fbdev; 97 struct intel_fdi_funcs; 98 struct intel_gmbus; 99 struct intel_hotplug_funcs; 100 struct intel_initial_plane_config; 101 struct intel_limit; 102 struct intel_overlay; 103 struct intel_overlay_error_state; 104 struct vlv_s0ix_state; 105 106 /* Threshold == 5 for long IRQs, 50 for short */ 107 #define HPD_STORM_DEFAULT_THRESHOLD 50 108 109 struct i915_hotplug { 110 struct delayed_work hotplug_work; 111 112 const u32 *hpd, *pch_hpd; 113 114 struct { 115 unsigned long last_jiffies; 116 int count; 117 enum { 118 HPD_ENABLED = 0, 119 HPD_DISABLED = 1, 120 HPD_MARK_DISABLED = 2 121 } state; 122 } stats[HPD_NUM_PINS]; 123 u32 event_bits; 124 u32 retry_bits; 125 struct delayed_work reenable_work; 126 127 u32 long_port_mask; 128 u32 short_port_mask; 129 struct work_struct dig_port_work; 130 131 struct work_struct poll_init_work; 132 bool poll_enabled; 133 134 unsigned int hpd_storm_threshold; 135 /* Whether or not to count short HPD IRQs in HPD storms */ 136 u8 hpd_short_storm_enabled; 137 138 /* 139 * if we get a HPD irq from DP and a HPD irq from non-DP 140 * the non-DP HPD could block the workqueue on a mode config 141 * mutex getting, that userspace may have taken. However 142 * userspace is waiting on the DP workqueue to run which is 143 * blocked behind the non-DP one. 144 */ 145 struct workqueue_struct *dp_wq; 146 }; 147 148 #define I915_GEM_GPU_DOMAINS \ 149 (I915_GEM_DOMAIN_RENDER | \ 150 I915_GEM_DOMAIN_SAMPLER | \ 151 I915_GEM_DOMAIN_COMMAND | \ 152 I915_GEM_DOMAIN_INSTRUCTION | \ 153 I915_GEM_DOMAIN_VERTEX) 154 155 struct sdvo_device_mapping { 156 u8 initialized; 157 u8 dvo_port; 158 u8 slave_addr; 159 u8 dvo_wiring; 160 u8 i2c_pin; 161 u8 ddc_pin; 162 }; 163 164 /* functions used for watermark calcs for display. */ 165 struct drm_i915_wm_disp_funcs { 166 /* update_wm is for legacy wm management */ 167 void (*update_wm)(struct drm_i915_private *dev_priv); 168 int (*compute_pipe_wm)(struct intel_atomic_state *state, 169 struct intel_crtc *crtc); 170 int (*compute_intermediate_wm)(struct intel_atomic_state *state, 171 struct intel_crtc *crtc); 172 void (*initial_watermarks)(struct intel_atomic_state *state, 173 struct intel_crtc *crtc); 174 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 175 struct intel_crtc *crtc); 176 void (*optimize_watermarks)(struct intel_atomic_state *state, 177 struct intel_crtc *crtc); 178 int (*compute_global_watermarks)(struct intel_atomic_state *state); 179 }; 180 181 struct drm_i915_display_funcs { 182 /* Returns the active state of the crtc, and if the crtc is active, 183 * fills out the pipe-config with the hw state. */ 184 bool (*get_pipe_config)(struct intel_crtc *, 185 struct intel_crtc_state *); 186 void (*get_initial_plane_config)(struct intel_crtc *, 187 struct intel_initial_plane_config *); 188 void (*crtc_enable)(struct intel_atomic_state *state, 189 struct intel_crtc *crtc); 190 void (*crtc_disable)(struct intel_atomic_state *state, 191 struct intel_crtc *crtc); 192 void (*commit_modeset_enables)(struct intel_atomic_state *state); 193 }; 194 195 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 196 197 enum drrs_type { 198 DRRS_TYPE_NONE, 199 DRRS_TYPE_STATIC, 200 DRRS_TYPE_SEAMLESS, 201 }; 202 203 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 204 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 205 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 206 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 207 #define QUIRK_INCREASE_T12_DELAY (1<<6) 208 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 209 #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) 210 211 struct i915_suspend_saved_registers { 212 u32 saveDSPARB; 213 u32 saveSWF0[16]; 214 u32 saveSWF1[16]; 215 u32 saveSWF3[3]; 216 u16 saveGCDGMBUS; 217 }; 218 219 #define MAX_L3_SLICES 2 220 struct intel_l3_parity { 221 u32 *remap_info[MAX_L3_SLICES]; 222 struct work_struct error_work; 223 int which_slice; 224 }; 225 226 struct i915_gem_mm { 227 /* 228 * Shortcut for the stolen region. This points to either 229 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or 230 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't 231 * support stolen. 232 */ 233 struct intel_memory_region *stolen_region; 234 /** Memory allocator for GTT stolen memory */ 235 struct drm_mm stolen; 236 /** Protects the usage of the GTT stolen memory allocator. This is 237 * always the inner lock when overlapping with struct_mutex. */ 238 struct mutex stolen_lock; 239 240 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 241 spinlock_t obj_lock; 242 243 /** 244 * List of objects which are purgeable. 245 */ 246 struct list_head purge_list; 247 248 /** 249 * List of objects which have allocated pages and are shrinkable. 250 */ 251 struct list_head shrink_list; 252 253 /** 254 * List of objects which are pending destruction. 255 */ 256 struct llist_head free_list; 257 struct delayed_work free_work; 258 /** 259 * Count of objects pending destructions. Used to skip needlessly 260 * waiting on an RCU barrier if no objects are waiting to be freed. 261 */ 262 atomic_t free_count; 263 264 /** 265 * tmpfs instance used for shmem backed objects 266 */ 267 struct vfsmount *gemfs; 268 269 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; 270 271 struct notifier_block oom_notifier; 272 struct notifier_block vmap_notifier; 273 struct shrinker shrinker; 274 275 #ifdef CONFIG_MMU_NOTIFIER 276 /** 277 * notifier_lock for mmu notifiers, memory may not be allocated 278 * while holding this lock. 279 */ 280 rwlock_t notifier_lock; 281 #endif 282 283 /* shrinker accounting, also useful for userland debugging */ 284 u64 shrink_memory; 285 u32 shrink_count; 286 }; 287 288 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 289 290 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915, 291 u64 context); 292 293 static inline unsigned long 294 i915_fence_timeout(const struct drm_i915_private *i915) 295 { 296 return i915_fence_context_timeout(i915, U64_MAX); 297 } 298 299 /* Amount of SAGV/QGV points, BSpec precisely defines this */ 300 #define I915_NUM_QGV_POINTS 8 301 302 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) 303 304 /* Amount of PSF GV points, BSpec precisely defines this */ 305 #define I915_NUM_PSF_GV_POINTS 3 306 307 struct intel_vbt_data { 308 /* bdb version */ 309 u16 version; 310 311 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 312 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 313 314 /* Feature bits */ 315 unsigned int int_tv_support:1; 316 unsigned int lvds_dither:1; 317 unsigned int int_crt_support:1; 318 unsigned int lvds_use_ssc:1; 319 unsigned int int_lvds_support:1; 320 unsigned int display_clock_mode:1; 321 unsigned int fdi_rx_polarity_inverted:1; 322 unsigned int panel_type:4; 323 int lvds_ssc_freq; 324 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 325 enum drm_panel_orientation orientation; 326 327 bool override_afc_startup; 328 u8 override_afc_startup_val; 329 330 enum drrs_type drrs_type; 331 332 struct { 333 int rate; 334 int lanes; 335 int preemphasis; 336 int vswing; 337 int bpp; 338 struct edp_power_seq pps; 339 u8 drrs_msa_timing_delay; 340 bool low_vswing; 341 bool initialized; 342 bool hobl; 343 } edp; 344 345 struct { 346 bool enable; 347 bool full_link; 348 bool require_aux_wakeup; 349 int idle_frames; 350 int tp1_wakeup_time_us; 351 int tp2_tp3_wakeup_time_us; 352 int psr2_tp2_tp3_wakeup_time_us; 353 } psr; 354 355 struct { 356 u16 pwm_freq_hz; 357 u16 brightness_precision_bits; 358 bool present; 359 bool active_low_pwm; 360 u8 min_brightness; /* min_brightness/255 of max */ 361 u8 controller; /* brightness controller number */ 362 enum intel_backlight_type type; 363 } backlight; 364 365 /* MIPI DSI */ 366 struct { 367 u16 panel_id; 368 struct mipi_config *config; 369 struct mipi_pps_data *pps; 370 u16 bl_ports; 371 u16 cabc_ports; 372 u8 seq_version; 373 u32 size; 374 u8 *data; 375 const u8 *sequence[MIPI_SEQ_MAX]; 376 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 377 enum drm_panel_orientation orientation; 378 } dsi; 379 380 int crt_ddc_pin; 381 382 struct list_head display_devices; 383 struct list_head bdb_blocks; 384 385 struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ 386 struct sdvo_device_mapping sdvo_mappings[2]; 387 }; 388 389 struct i915_frontbuffer_tracking { 390 spinlock_t lock; 391 392 /* 393 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 394 * scheduled flips. 395 */ 396 unsigned busy_bits; 397 unsigned flip_bits; 398 }; 399 400 struct i915_virtual_gpu { 401 struct mutex lock; /* serialises sending of g2v_notify command pkts */ 402 bool active; 403 u32 caps; 404 }; 405 406 struct i915_selftest_stash { 407 atomic_t counter; 408 struct ida mock_region_instances; 409 }; 410 411 /* intel_audio.c private */ 412 struct intel_audio_private { 413 /* Display internal audio functions */ 414 const struct intel_audio_funcs *funcs; 415 416 /* hda/i915 audio component */ 417 struct i915_audio_component *component; 418 bool component_registered; 419 /* mutex for audio/video sync */ 420 struct mutex mutex; 421 int power_refcount; 422 u32 freq_cntrl; 423 424 /* Used to save the pipe-to-encoder mapping for audio */ 425 struct intel_encoder *encoder_map[I915_MAX_PIPES]; 426 427 /* necessary resource sharing with HDMI LPE audio driver. */ 428 struct { 429 struct platform_device *platdev; 430 int irq; 431 } lpe; 432 }; 433 434 struct drm_i915_private { 435 struct drm_device drm; 436 437 /* FIXME: Device release actions should all be moved to drmm_ */ 438 bool do_release; 439 440 /* i915 device parameters */ 441 struct i915_params params; 442 443 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 444 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 445 struct intel_driver_caps caps; 446 447 /** 448 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 449 * end of stolen which we can optionally use to create GEM objects 450 * backed by stolen memory. Note that stolen_usable_size tells us 451 * exactly how much of this we are actually allowed to use, given that 452 * some portion of it is in fact reserved for use by hardware functions. 453 */ 454 struct resource dsm; 455 /** 456 * Reseved portion of Data Stolen Memory 457 */ 458 struct resource dsm_reserved; 459 460 /* 461 * Stolen memory is segmented in hardware with different portions 462 * offlimits to certain functions. 463 * 464 * The drm_mm is initialised to the total accessible range, as found 465 * from the PCI config. On Broadwell+, this is further restricted to 466 * avoid the first page! The upper end of stolen memory is reserved for 467 * hardware functions and similarly removed from the accessible range. 468 */ 469 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 470 471 struct intel_uncore uncore; 472 struct intel_uncore_mmio_debug mmio_debug; 473 474 struct i915_virtual_gpu vgpu; 475 476 struct intel_gvt *gvt; 477 478 struct intel_wopcm wopcm; 479 480 struct intel_dmc dmc; 481 482 struct intel_gmbus *gmbus[GMBUS_NUM_PINS]; 483 484 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 485 * controller on different i2c buses. */ 486 struct mutex gmbus_mutex; 487 488 /** 489 * Base address of where the gmbus and gpio blocks are located (either 490 * on PCH or on SoC for platforms without PCH). 491 */ 492 u32 gpio_mmio_base; 493 494 /* MMIO base address for MIPI regs */ 495 u32 mipi_mmio_base; 496 497 u32 pps_mmio_base; 498 499 wait_queue_head_t gmbus_wait_queue; 500 501 struct pci_dev *bridge_dev; 502 503 struct rb_root uabi_engines; 504 unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1]; 505 506 struct resource mch_res; 507 508 /* protects the irq masks */ 509 spinlock_t irq_lock; 510 511 bool display_irqs_enabled; 512 513 /* Sideband mailbox protection */ 514 struct mutex sb_lock; 515 struct pm_qos_request sb_qos; 516 517 /** Cached value of IMR to avoid reads in updating the bitfield */ 518 union { 519 u32 irq_mask; 520 u32 de_irq_mask[I915_MAX_PIPES]; 521 }; 522 u32 pipestat_irq_mask[I915_MAX_PIPES]; 523 524 struct i915_hotplug hotplug; 525 struct intel_fbc *fbc[I915_MAX_FBCS]; 526 struct intel_opregion opregion; 527 struct intel_vbt_data vbt; 528 529 bool preserve_bios_swizzle; 530 531 /* overlay */ 532 struct intel_overlay *overlay; 533 534 /* backlight registers and fields in struct intel_panel */ 535 struct mutex backlight_lock; 536 537 /* protects panel power sequencer state */ 538 struct mutex pps_mutex; 539 540 unsigned int fsb_freq, mem_freq, is_ddr3; 541 unsigned int skl_preferred_vco_freq; 542 unsigned int max_cdclk_freq; 543 544 unsigned int max_dotclk_freq; 545 unsigned int hpll_freq; 546 unsigned int fdi_pll_freq; 547 unsigned int czclk_freq; 548 549 struct { 550 /* The current hardware cdclk configuration */ 551 struct intel_cdclk_config hw; 552 553 /* cdclk, divider, and ratio table from bspec */ 554 const struct intel_cdclk_vals *table; 555 556 struct intel_global_obj obj; 557 } cdclk; 558 559 struct { 560 /* The current hardware dbuf configuration */ 561 u8 enabled_slices; 562 563 struct intel_global_obj obj; 564 } dbuf; 565 566 /** 567 * wq - Driver workqueue for GEM. 568 * 569 * NOTE: Work items scheduled here are not allowed to grab any modeset 570 * locks, for otherwise the flushing done in the pageflip code will 571 * result in deadlocks. 572 */ 573 struct workqueue_struct *wq; 574 575 /* ordered wq for modesets */ 576 struct workqueue_struct *modeset_wq; 577 /* unbound hipri wq for page flips/plane updates */ 578 struct workqueue_struct *flip_wq; 579 580 /* pm private clock gating functions */ 581 const struct drm_i915_clock_gating_funcs *clock_gating_funcs; 582 583 /* pm display functions */ 584 const struct drm_i915_wm_disp_funcs *wm_disp; 585 586 /* irq display functions */ 587 const struct intel_hotplug_funcs *hotplug_funcs; 588 589 /* fdi display functions */ 590 const struct intel_fdi_funcs *fdi_funcs; 591 592 /* display pll funcs */ 593 const struct intel_dpll_funcs *dpll_funcs; 594 595 /* Display functions */ 596 const struct drm_i915_display_funcs *display; 597 598 /* Display internal color functions */ 599 const struct intel_color_funcs *color_funcs; 600 601 /* Display CDCLK functions */ 602 const struct intel_cdclk_funcs *cdclk_funcs; 603 604 /* PCH chipset type */ 605 enum intel_pch pch_type; 606 unsigned short pch_id; 607 608 unsigned long quirks; 609 610 struct drm_atomic_state *modeset_restore_state; 611 struct drm_modeset_acquire_ctx reset_ctx; 612 613 struct i915_gem_mm mm; 614 615 /* Kernel Modesetting */ 616 617 /** 618 * dpll and cdclk state is protected by connection_mutex 619 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. 620 * Must be global rather than per dpll, because on some platforms plls 621 * share registers. 622 */ 623 struct { 624 struct mutex lock; 625 626 int num_shared_dpll; 627 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 628 const struct intel_dpll_mgr *mgr; 629 630 struct { 631 int nssc; 632 int ssc; 633 } ref_clks; 634 } dpll; 635 636 struct list_head global_obj_list; 637 638 struct i915_frontbuffer_tracking fb_tracking; 639 640 struct intel_atomic_helper { 641 struct llist_head free_list; 642 struct work_struct free_work; 643 } atomic_helper; 644 645 bool mchbar_need_disable; 646 647 struct intel_l3_parity l3_parity; 648 649 /* 650 * HTI (aka HDPORT) state read during initial hw readout. Most 651 * platforms don't have HTI, so this will just stay 0. Those that do 652 * will use this later to figure out which PLLs and PHYs are unavailable 653 * for driver usage. 654 */ 655 u32 hti_state; 656 657 /* 658 * edram size in MB. 659 * Cannot be determined by PCIID. You must always read a register. 660 */ 661 u32 edram_size_mb; 662 663 struct i915_power_domains power_domains; 664 665 struct i915_gpu_error gpu_error; 666 667 /* list of fbdev register on this device */ 668 struct intel_fbdev *fbdev; 669 struct work_struct fbdev_suspend_work; 670 671 struct drm_property *broadcast_rgb_property; 672 struct drm_property *force_audio_property; 673 674 u32 fdi_rx_config; 675 676 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 677 u32 chv_phy_control; 678 /* 679 * Shadows for CHV DPLL_MD regs to keep the state 680 * checker somewhat working in the presence hardware 681 * crappiness (can't read out DPLL_MD for pipes B & C). 682 */ 683 u32 chv_dpll_md[I915_MAX_PIPES]; 684 u32 bxt_phy_grc; 685 686 u32 suspend_count; 687 struct i915_suspend_saved_registers regfile; 688 struct vlv_s0ix_state *vlv_s0ix_state; 689 690 enum { 691 I915_SAGV_UNKNOWN = 0, 692 I915_SAGV_DISABLED, 693 I915_SAGV_ENABLED, 694 I915_SAGV_NOT_CONTROLLED 695 } sagv_status; 696 697 u32 sagv_block_time_us; 698 699 struct { 700 /* 701 * Raw watermark latency values: 702 * in 0.1us units for WM0, 703 * in 0.5us units for WM1+. 704 */ 705 /* primary */ 706 u16 pri_latency[5]; 707 /* sprite */ 708 u16 spr_latency[5]; 709 /* cursor */ 710 u16 cur_latency[5]; 711 /* 712 * Raw watermark memory latency values 713 * for SKL for all 8 levels 714 * in 1us units. 715 */ 716 u16 skl_latency[8]; 717 718 /* current hardware state */ 719 union { 720 struct ilk_wm_values hw; 721 struct vlv_wm_values vlv; 722 struct g4x_wm_values g4x; 723 }; 724 725 u8 max_level; 726 727 /* 728 * Should be held around atomic WM register writing; also 729 * protects * intel_crtc->wm.active and 730 * crtc_state->wm.need_postvbl_update. 731 */ 732 struct mutex wm_mutex; 733 } wm; 734 735 struct dram_info { 736 bool wm_lv_0_adjust_needed; 737 u8 num_channels; 738 bool symmetric_memory; 739 enum intel_dram_type { 740 INTEL_DRAM_UNKNOWN, 741 INTEL_DRAM_DDR3, 742 INTEL_DRAM_DDR4, 743 INTEL_DRAM_LPDDR3, 744 INTEL_DRAM_LPDDR4, 745 INTEL_DRAM_DDR5, 746 INTEL_DRAM_LPDDR5, 747 } type; 748 u8 num_qgv_points; 749 u8 num_psf_gv_points; 750 } dram_info; 751 752 struct intel_bw_info { 753 /* for each QGV point */ 754 unsigned int deratedbw[I915_NUM_QGV_POINTS]; 755 /* for each PSF GV point */ 756 unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; 757 u8 num_qgv_points; 758 u8 num_psf_gv_points; 759 u8 num_planes; 760 } max_bw[6]; 761 762 struct intel_global_obj bw_obj; 763 764 struct intel_runtime_pm runtime_pm; 765 766 struct i915_perf perf; 767 768 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 769 struct intel_gt gt0; 770 771 /* 772 * i915->gt[0] == &i915->gt0 773 */ 774 #define I915_MAX_GT 4 775 struct intel_gt *gt[I915_MAX_GT]; 776 777 struct kobject *sysfs_gt; 778 779 struct { 780 struct i915_gem_contexts { 781 spinlock_t lock; /* locks list */ 782 struct list_head list; 783 } contexts; 784 785 /* 786 * We replace the local file with a global mappings as the 787 * backing storage for the mmap is on the device and not 788 * on the struct file, and we do not want to prolong the 789 * lifetime of the local fd. To minimise the number of 790 * anonymous inodes we create, we use a global singleton to 791 * share the global mapping. 792 */ 793 struct file *mmap_singleton; 794 } gem; 795 796 /* Window2 specifies time required to program DSB (Window2) in number of scan lines */ 797 u8 window2_delay; 798 799 u8 pch_ssc_use; 800 801 /* For i915gm/i945gm vblank irq workaround */ 802 u8 vblank_enabled; 803 804 bool irq_enabled; 805 806 union { 807 /* perform PHY state sanity checks? */ 808 bool chv_phy_assert[2]; 809 810 /* 811 * DG2: Mask of PHYs that were not calibrated by the firmware 812 * and should not be used. 813 */ 814 u8 snps_phy_failed_calibration; 815 }; 816 817 bool ipc_enabled; 818 819 struct intel_audio_private audio; 820 821 struct i915_pmu pmu; 822 823 struct i915_drm_clients clients; 824 825 struct i915_hdcp_comp_master *hdcp_master; 826 bool hdcp_comp_added; 827 828 /* Mutex to protect the above hdcp component related values. */ 829 struct mutex hdcp_comp_mutex; 830 831 /* The TTM device structure. */ 832 struct ttm_device bdev; 833 834 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) 835 836 /* 837 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 838 * will be rejected. Instead look for a better place. 839 */ 840 }; 841 842 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 843 { 844 return container_of(dev, struct drm_i915_private, drm); 845 } 846 847 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 848 { 849 return dev_get_drvdata(kdev); 850 } 851 852 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) 853 { 854 return pci_get_drvdata(pdev); 855 } 856 857 static inline struct intel_gt *to_gt(struct drm_i915_private *i915) 858 { 859 return &i915->gt0; 860 } 861 862 /* Simple iterator over all initialised engines */ 863 #define for_each_engine(engine__, dev_priv__, id__) \ 864 for ((id__) = 0; \ 865 (id__) < I915_NUM_ENGINES; \ 866 (id__)++) \ 867 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 868 869 /* Iterator over subset of engines selected by mask */ 870 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 871 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 872 (tmp__) ? \ 873 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 874 0;) 875 876 #define rb_to_uabi_engine(rb) \ 877 rb_entry_safe(rb, struct intel_engine_cs, uabi_node) 878 879 #define for_each_uabi_engine(engine__, i915__) \ 880 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 881 (engine__); \ 882 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 883 884 #define for_each_uabi_class_engine(engine__, class__, i915__) \ 885 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \ 886 (engine__) && (engine__)->uabi_class == (class__); \ 887 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 888 889 #define I915_GTT_OFFSET_NONE ((u32)-1) 890 891 /* 892 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 893 * considered to be the frontbuffer for the given plane interface-wise. This 894 * doesn't mean that the hw necessarily already scans it out, but that any 895 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 896 * 897 * We have one bit per pipe and per scanout plane type. 898 */ 899 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 900 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 901 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 902 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 903 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 904 }) 905 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 906 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 907 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 908 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 909 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 910 911 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) 912 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) 913 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 914 915 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) 916 917 #define IP_VER(ver, rel) ((ver) << 8 | (rel)) 918 919 #define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) 920 #define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ 921 INTEL_INFO(i915)->graphics.rel) 922 #define IS_GRAPHICS_VER(i915, from, until) \ 923 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) 924 925 #define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) 926 #define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \ 927 INTEL_INFO(i915)->media.rel) 928 #define IS_MEDIA_VER(i915, from, until) \ 929 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) 930 931 #define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver) 932 #define IS_DISPLAY_VER(i915, from, until) \ 933 (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) 934 935 #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision) 936 937 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) 938 939 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) 940 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) 941 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) 942 943 #define IS_DISPLAY_STEP(__i915, since, until) \ 944 (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ 945 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) 946 947 #define IS_GRAPHICS_STEP(__i915, since, until) \ 948 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ 949 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) 950 951 #define IS_MEDIA_STEP(__i915, since, until) \ 952 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ 953 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) 954 955 static __always_inline unsigned int 956 __platform_mask_index(const struct intel_runtime_info *info, 957 enum intel_platform p) 958 { 959 const unsigned int pbits = 960 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 961 962 /* Expand the platform_mask array if this fails. */ 963 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 964 pbits * ARRAY_SIZE(info->platform_mask)); 965 966 return p / pbits; 967 } 968 969 static __always_inline unsigned int 970 __platform_mask_bit(const struct intel_runtime_info *info, 971 enum intel_platform p) 972 { 973 const unsigned int pbits = 974 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 975 976 return p % pbits + INTEL_SUBPLATFORM_BITS; 977 } 978 979 static inline u32 980 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) 981 { 982 const unsigned int pi = __platform_mask_index(info, p); 983 984 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK; 985 } 986 987 static __always_inline bool 988 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) 989 { 990 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 991 const unsigned int pi = __platform_mask_index(info, p); 992 const unsigned int pb = __platform_mask_bit(info, p); 993 994 BUILD_BUG_ON(!__builtin_constant_p(p)); 995 996 return info->platform_mask[pi] & BIT(pb); 997 } 998 999 static __always_inline bool 1000 IS_SUBPLATFORM(const struct drm_i915_private *i915, 1001 enum intel_platform p, unsigned int s) 1002 { 1003 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1004 const unsigned int pi = __platform_mask_index(info, p); 1005 const unsigned int pb = __platform_mask_bit(info, p); 1006 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; 1007 const u32 mask = info->platform_mask[pi]; 1008 1009 BUILD_BUG_ON(!__builtin_constant_p(p)); 1010 BUILD_BUG_ON(!__builtin_constant_p(s)); 1011 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); 1012 1013 /* Shift and test on the MSB position so sign flag can be used. */ 1014 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); 1015 } 1016 1017 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) 1018 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) 1019 1020 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 1021 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 1022 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 1023 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 1024 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 1025 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 1026 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 1027 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 1028 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 1029 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 1030 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 1031 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 1032 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 1033 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 1034 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 1035 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE) 1036 #define IS_IRONLAKE_M(dev_priv) \ 1037 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv)) 1038 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE) 1039 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 1040 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 1041 INTEL_INFO(dev_priv)->gt == 1) 1042 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 1043 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 1044 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 1045 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 1046 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 1047 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 1048 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 1049 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 1050 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 1051 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) 1052 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 1053 #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ 1054 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) 1055 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) 1056 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) 1057 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) 1058 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) 1059 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) 1060 #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV) 1061 #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2) 1062 #define IS_DG2_G10(dev_priv) \ 1063 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) 1064 #define IS_DG2_G11(dev_priv) \ 1065 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) 1066 #define IS_DG2_G12(dev_priv) \ 1067 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12) 1068 #define IS_ADLS_RPLS(dev_priv) \ 1069 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) 1070 #define IS_ADLP_N(dev_priv) \ 1071 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) 1072 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 1073 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 1074 #define IS_BDW_ULT(dev_priv) \ 1075 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) 1076 #define IS_BDW_ULX(dev_priv) \ 1077 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) 1078 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 1079 INTEL_INFO(dev_priv)->gt == 3) 1080 #define IS_HSW_ULT(dev_priv) \ 1081 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) 1082 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 1083 INTEL_INFO(dev_priv)->gt == 3) 1084 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ 1085 INTEL_INFO(dev_priv)->gt == 1) 1086 /* ULX machines are also considered ULT. */ 1087 #define IS_HSW_ULX(dev_priv) \ 1088 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) 1089 #define IS_SKL_ULT(dev_priv) \ 1090 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) 1091 #define IS_SKL_ULX(dev_priv) \ 1092 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) 1093 #define IS_KBL_ULT(dev_priv) \ 1094 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) 1095 #define IS_KBL_ULX(dev_priv) \ 1096 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) 1097 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1098 INTEL_INFO(dev_priv)->gt == 2) 1099 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1100 INTEL_INFO(dev_priv)->gt == 3) 1101 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1102 INTEL_INFO(dev_priv)->gt == 4) 1103 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1104 INTEL_INFO(dev_priv)->gt == 2) 1105 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1106 INTEL_INFO(dev_priv)->gt == 3) 1107 #define IS_CFL_ULT(dev_priv) \ 1108 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) 1109 #define IS_CFL_ULX(dev_priv) \ 1110 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) 1111 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1112 INTEL_INFO(dev_priv)->gt == 2) 1113 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1114 INTEL_INFO(dev_priv)->gt == 3) 1115 1116 #define IS_CML_ULT(dev_priv) \ 1117 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) 1118 #define IS_CML_ULX(dev_priv) \ 1119 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) 1120 #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \ 1121 INTEL_INFO(dev_priv)->gt == 2) 1122 1123 #define IS_ICL_WITH_PORT_F(dev_priv) \ 1124 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) 1125 1126 #define IS_TGL_UY(dev_priv) \ 1127 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) 1128 1129 #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) 1130 1131 #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ 1132 (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) 1133 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ 1134 (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) 1135 1136 #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ 1137 (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) 1138 #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ 1139 (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) 1140 1141 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \ 1142 (IS_TIGERLAKE(__i915) && \ 1143 IS_DISPLAY_STEP(__i915, since, until)) 1144 1145 #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ 1146 (IS_TGL_UY(__i915) && \ 1147 IS_GRAPHICS_STEP(__i915, since, until)) 1148 1149 #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ 1150 (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \ 1151 IS_GRAPHICS_STEP(__i915, since, until)) 1152 1153 #define IS_RKL_DISPLAY_STEP(p, since, until) \ 1154 (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) 1155 1156 #define IS_DG1_GRAPHICS_STEP(p, since, until) \ 1157 (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) 1158 #define IS_DG1_DISPLAY_STEP(p, since, until) \ 1159 (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) 1160 1161 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ 1162 (IS_ALDERLAKE_S(__i915) && \ 1163 IS_DISPLAY_STEP(__i915, since, until)) 1164 1165 #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ 1166 (IS_ALDERLAKE_S(__i915) && \ 1167 IS_GRAPHICS_STEP(__i915, since, until)) 1168 1169 #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ 1170 (IS_ALDERLAKE_P(__i915) && \ 1171 IS_DISPLAY_STEP(__i915, since, until)) 1172 1173 #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ 1174 (IS_ALDERLAKE_P(__i915) && \ 1175 IS_GRAPHICS_STEP(__i915, since, until)) 1176 1177 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ 1178 (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) 1179 1180 /* 1181 * DG2 hardware steppings are a bit unusual. The hardware design was forked to 1182 * create three variants (G10, G11, and G12) which each have distinct 1183 * workaround sets. The G11 and G12 forks of the DG2 design reset the GT 1184 * stepping back to "A0" for their first iterations, even though they're more 1185 * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of 1186 * functionality and workarounds. However the display stepping does not reset 1187 * in the same manner --- a specific stepping like "B0" has a consistent 1188 * meaning regardless of whether it belongs to a G10, G11, or G12 DG2. 1189 * 1190 * TLDR: All GT workarounds and stepping-specific logic must be applied in 1191 * relation to a specific subplatform (G10/G11/G12), whereas display workarounds 1192 * and stepping-specific logic will be applied with a general DG2-wide stepping 1193 * number. 1194 */ 1195 #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ 1196 (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ 1197 IS_GRAPHICS_STEP(__i915, since, until)) 1198 1199 #define IS_DG2_DISPLAY_STEP(__i915, since, until) \ 1200 (IS_DG2(__i915) && \ 1201 IS_DISPLAY_STEP(__i915, since, until)) 1202 1203 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 1204 #define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv)) 1205 #define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv)) 1206 1207 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 1208 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) 1209 1210 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ 1211 unsigned int first__ = (first); \ 1212 unsigned int count__ = (count); \ 1213 ((gt)->info.engine_mask & \ 1214 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1215 }) 1216 #define RCS_MASK(gt) \ 1217 ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS) 1218 #define VDBOX_MASK(gt) \ 1219 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) 1220 #define VEBOX_MASK(gt) \ 1221 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) 1222 #define CCS_MASK(gt) \ 1223 ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS) 1224 1225 /* 1226 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution 1227 * All later gens can run the final buffer from the ppgtt 1228 */ 1229 #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7) 1230 1231 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) 1232 #define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile) 1233 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) 1234 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) 1235 #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6) 1236 #define HAS_WT(dev_priv) HAS_EDRAM(dev_priv) 1237 1238 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) 1239 1240 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 1241 (INTEL_INFO(dev_priv)->has_logical_ring_contexts) 1242 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 1243 (INTEL_INFO(dev_priv)->has_logical_ring_elsq) 1244 1245 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 1246 1247 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) 1248 #define HAS_PPGTT(dev_priv) \ 1249 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) 1250 #define HAS_FULL_PPGTT(dev_priv) \ 1251 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) 1252 1253 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 1254 GEM_BUG_ON((sizes) == 0); \ 1255 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ 1256 }) 1257 1258 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) 1259 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 1260 (INTEL_INFO(dev_priv)->display.overlay_needs_physical) 1261 1262 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1263 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 1264 1265 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ 1266 (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9) 1267 1268 /* WaRsDisableCoarsePowerGating:skl,cnl */ 1269 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 1270 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 1271 1272 #define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4) 1273 #define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \ 1274 IS_GEMINILAKE(dev_priv) || \ 1275 IS_KABYLAKE(dev_priv)) 1276 1277 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1278 * rows, which changed the alignment requirements and fence programming. 1279 */ 1280 #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \ 1281 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv))) 1282 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) 1283 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) 1284 1285 #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) 1286 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) 1287 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) 1288 1289 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 1290 1291 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) 1292 #define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) 1293 1294 #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) 1295 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) 1296 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg) 1297 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) 1298 #define HAS_PSR_HW_TRACKING(dev_priv) \ 1299 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) 1300 #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) 1301 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) 1302 1303 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) 1304 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) 1305 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 1306 1307 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) 1308 1309 #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) 1310 1311 #define HAS_HECI_PXP(dev_priv) \ 1312 (INTEL_INFO(dev_priv)->has_heci_pxp) 1313 1314 #define HAS_HECI_GSCFI(dev_priv) \ 1315 (INTEL_INFO(dev_priv)->has_heci_gscfi) 1316 1317 #define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv)) 1318 1319 #define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) 1320 1321 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) 1322 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) 1323 1324 #define HAS_MSLICES(dev_priv) \ 1325 (INTEL_INFO(dev_priv)->has_mslices) 1326 1327 /* 1328 * Set this flag, when platform requires 64K GTT page sizes or larger for 1329 * device local memory access. 1330 */ 1331 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) 1332 1333 /* 1334 * Set this flag when platform doesn't allow both 64k pages and 4k pages in 1335 * the same PT. this flag means we need to support compact PT layout for the 1336 * ppGTT when using the 64K GTT pages. 1337 */ 1338 #define NEEDS_COMPACT_PT(dev_priv) (INTEL_INFO(dev_priv)->needs_compact_pt) 1339 1340 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 1341 1342 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) 1343 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) 1344 1345 /* 1346 * Platform has the dedicated compression control state for each lmem surfaces 1347 * stored in lmem to support the 3D and media compression formats. 1348 */ 1349 #define HAS_FLAT_CCS(dev_priv) (INTEL_INFO(dev_priv)->has_flat_ccs) 1350 1351 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) 1352 1353 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) 1354 1355 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) 1356 1357 #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \ 1358 INTEL_INFO(dev_priv)->has_pxp) && \ 1359 VDBOX_MASK(to_gt(dev_priv))) 1360 1361 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) 1362 1363 #define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10)) 1364 1365 /* DPF == dynamic parity feature */ 1366 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) 1367 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 1368 2 : HAS_L3_DPF(dev_priv)) 1369 1370 #define GT_FREQUENCY_MULTIPLIER 50 1371 #define GEN9_FREQ_SCALER 3 1372 1373 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) 1374 1375 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) 1376 1377 #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) 1378 1379 #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) 1380 1381 /* Only valid when HAS_DISPLAY() is true */ 1382 #define INTEL_DISPLAY_ENABLED(dev_priv) \ 1383 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display) 1384 1385 #define HAS_GUC_DEPRIVILEGE(dev_priv) \ 1386 (INTEL_INFO(dev_priv)->has_guc_deprivilege) 1387 1388 #define HAS_PERCTX_PREEMPT_CTRL(i915) \ 1389 ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) 1390 1391 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ 1392 IS_ALDERLAKE_S(dev_priv)) 1393 1394 #define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915)) 1395 1396 /* i915_gem.c */ 1397 void i915_gem_init_early(struct drm_i915_private *dev_priv); 1398 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); 1399 1400 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 1401 { 1402 /* 1403 * A single pass should suffice to release all the freed objects (along 1404 * most call paths) , but be a little more paranoid in that freeing 1405 * the objects does take a little amount of time, during which the rcu 1406 * callbacks could have added new objects into the freed list, and 1407 * armed the work again. 1408 */ 1409 while (atomic_read(&i915->mm.free_count)) { 1410 flush_delayed_work(&i915->mm.free_work); 1411 flush_delayed_work(&i915->bdev.wq); 1412 rcu_barrier(); 1413 } 1414 } 1415 1416 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 1417 { 1418 /* 1419 * Similar to objects above (see i915_gem_drain_freed-objects), in 1420 * general we have workers that are armed by RCU and then rearm 1421 * themselves in their callbacks. To be paranoid, we need to 1422 * drain the workqueue a second time after waiting for the RCU 1423 * grace period so that we catch work queued via RCU from the first 1424 * pass. As neither drain_workqueue() nor flush_workqueue() report 1425 * a result, we make an assumption that we only don't require more 1426 * than 3 passes to catch all _recursive_ RCU delayed work. 1427 * 1428 */ 1429 int pass = 3; 1430 do { 1431 flush_workqueue(i915->wq); 1432 rcu_barrier(); 1433 i915_gem_drain_freed_objects(i915); 1434 } while (--pass); 1435 drain_workqueue(i915->wq); 1436 } 1437 1438 struct i915_vma * __must_check 1439 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, 1440 struct i915_gem_ww_ctx *ww, 1441 const struct i915_ggtt_view *view, 1442 u64 size, u64 alignment, u64 flags); 1443 1444 struct i915_vma * __must_check 1445 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 1446 const struct i915_ggtt_view *view, 1447 u64 size, u64 alignment, u64 flags); 1448 1449 int i915_gem_object_unbind(struct drm_i915_gem_object *obj, 1450 unsigned long flags); 1451 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) 1452 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) 1453 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2) 1454 #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) 1455 #define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) 1456 1457 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 1458 1459 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 1460 1461 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 1462 void i915_gem_driver_register(struct drm_i915_private *i915); 1463 void i915_gem_driver_unregister(struct drm_i915_private *i915); 1464 void i915_gem_driver_remove(struct drm_i915_private *dev_priv); 1465 void i915_gem_driver_release(struct drm_i915_private *dev_priv); 1466 1467 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 1468 1469 /* intel_device_info.c */ 1470 static inline struct intel_device_info * 1471 mkwrite_device_info(struct drm_i915_private *dev_priv) 1472 { 1473 return (struct intel_device_info *)INTEL_INFO(dev_priv); 1474 } 1475 1476 static inline enum i915_map_type 1477 i915_coherent_map_type(struct drm_i915_private *i915, 1478 struct drm_i915_gem_object *obj, bool always_coherent) 1479 { 1480 if (i915_gem_object_is_lmem(obj)) 1481 return I915_MAP_WC; 1482 if (HAS_LLC(i915) || always_coherent) 1483 return I915_MAP_WB; 1484 else 1485 return I915_MAP_WC; 1486 } 1487 1488 #endif 1489