1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <asm/hypervisor.h> 37 38 #include <linux/io-mapping.h> 39 #include <linux/i2c.h> 40 #include <linux/i2c-algo-bit.h> 41 #include <linux/backlight.h> 42 #include <linux/hash.h> 43 #include <linux/intel-iommu.h> 44 #include <linux/kref.h> 45 #include <linux/mm_types.h> 46 #include <linux/perf_event.h> 47 #include <linux/pm_qos.h> 48 #include <linux/dma-resv.h> 49 #include <linux/shmem_fs.h> 50 #include <linux/stackdepot.h> 51 #include <linux/xarray.h> 52 53 #include <drm/drm_gem.h> 54 #include <drm/drm_auth.h> 55 #include <drm/drm_cache.h> 56 #include <drm/drm_util.h> 57 #include <drm/drm_dsc.h> 58 #include <drm/drm_atomic.h> 59 #include <drm/drm_connector.h> 60 #include <drm/i915_mei_hdcp_interface.h> 61 #include <drm/ttm/ttm_device.h> 62 63 #include "i915_params.h" 64 #include "i915_utils.h" 65 66 #include "display/intel_bios.h" 67 #include "display/intel_cdclk.h" 68 #include "display/intel_display.h" 69 #include "display/intel_display_power.h" 70 #include "display/intel_dmc.h" 71 #include "display/intel_dpll_mgr.h" 72 #include "display/intel_dsb.h" 73 #include "display/intel_fbc.h" 74 #include "display/intel_frontbuffer.h" 75 #include "display/intel_global_state.h" 76 #include "display/intel_gmbus.h" 77 #include "display/intel_opregion.h" 78 79 #include "gem/i915_gem_context_types.h" 80 #include "gem/i915_gem_shrinker.h" 81 #include "gem/i915_gem_stolen.h" 82 #include "gem/i915_gem_lmem.h" 83 84 #include "gt/intel_engine.h" 85 #include "gt/intel_gt_types.h" 86 #include "gt/intel_region_lmem.h" 87 #include "gt/intel_workarounds.h" 88 #include "gt/uc/intel_uc.h" 89 90 #include "intel_device_info.h" 91 #include "intel_memory_region.h" 92 #include "intel_pch.h" 93 #include "intel_pm_types.h" 94 #include "intel_runtime_pm.h" 95 #include "intel_step.h" 96 #include "intel_uncore.h" 97 #include "intel_wakeref.h" 98 #include "intel_wopcm.h" 99 100 #include "i915_gem.h" 101 #include "i915_gem_gtt.h" 102 #include "i915_gpu_error.h" 103 #include "i915_perf_types.h" 104 #include "i915_request.h" 105 #include "i915_scheduler.h" 106 #include "gt/intel_timeline.h" 107 #include "i915_vma.h" 108 109 110 /* General customization: 111 */ 112 113 #define DRIVER_NAME "i915" 114 #define DRIVER_DESC "Intel Graphics" 115 #define DRIVER_DATE "20201103" 116 #define DRIVER_TIMESTAMP 1604406085 117 118 struct drm_i915_gem_object; 119 120 /* Threshold == 5 for long IRQs, 50 for short */ 121 #define HPD_STORM_DEFAULT_THRESHOLD 50 122 123 struct i915_hotplug { 124 struct delayed_work hotplug_work; 125 126 const u32 *hpd, *pch_hpd; 127 128 struct { 129 unsigned long last_jiffies; 130 int count; 131 enum { 132 HPD_ENABLED = 0, 133 HPD_DISABLED = 1, 134 HPD_MARK_DISABLED = 2 135 } state; 136 } stats[HPD_NUM_PINS]; 137 u32 event_bits; 138 u32 retry_bits; 139 struct delayed_work reenable_work; 140 141 u32 long_port_mask; 142 u32 short_port_mask; 143 struct work_struct dig_port_work; 144 145 struct work_struct poll_init_work; 146 bool poll_enabled; 147 148 unsigned int hpd_storm_threshold; 149 /* Whether or not to count short HPD IRQs in HPD storms */ 150 u8 hpd_short_storm_enabled; 151 152 /* 153 * if we get a HPD irq from DP and a HPD irq from non-DP 154 * the non-DP HPD could block the workqueue on a mode config 155 * mutex getting, that userspace may have taken. However 156 * userspace is waiting on the DP workqueue to run which is 157 * blocked behind the non-DP one. 158 */ 159 struct workqueue_struct *dp_wq; 160 }; 161 162 #define I915_GEM_GPU_DOMAINS \ 163 (I915_GEM_DOMAIN_RENDER | \ 164 I915_GEM_DOMAIN_SAMPLER | \ 165 I915_GEM_DOMAIN_COMMAND | \ 166 I915_GEM_DOMAIN_INSTRUCTION | \ 167 I915_GEM_DOMAIN_VERTEX) 168 169 struct drm_i915_private; 170 171 struct drm_i915_file_private { 172 struct drm_i915_private *dev_priv; 173 174 union { 175 struct drm_file *file; 176 struct rcu_head rcu; 177 }; 178 179 /** @proto_context_lock: Guards all struct i915_gem_proto_context 180 * operations 181 * 182 * This not only guards @proto_context_xa, but is always held 183 * whenever we manipulate any struct i915_gem_proto_context, 184 * including finalizing it on first actual use of the GEM context. 185 * 186 * See i915_gem_proto_context. 187 */ 188 struct mutex proto_context_lock; 189 190 /** @proto_context_xa: xarray of struct i915_gem_proto_context 191 * 192 * Historically, the context uAPI allowed for two methods of 193 * setting context parameters: SET_CONTEXT_PARAM and 194 * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called 195 * at any time while the later happens as part of 196 * GEM_CONTEXT_CREATE. Everything settable via one was settable 197 * via the other. While some params are fairly simple and setting 198 * them on a live context is harmless such as the context priority, 199 * others are far trickier such as the VM or the set of engines. 200 * In order to swap out the VM, for instance, we have to delay 201 * until all current in-flight work is complete, swap in the new 202 * VM, and then continue. This leads to a plethora of potential 203 * race conditions we'd really rather avoid. 204 * 205 * We have since disallowed setting these more complex parameters 206 * on active contexts. This works by delaying the creation of the 207 * actual context until after the client is done configuring it 208 * with SET_CONTEXT_PARAM. From the perspective of the client, it 209 * has the same u32 context ID the whole time. From the 210 * perspective of i915, however, it's a struct i915_gem_proto_context 211 * right up until the point where we attempt to do something which 212 * the proto-context can't handle. Then the struct i915_gem_context 213 * gets created. 214 * 215 * This is accomplished via a little xarray dance. When 216 * GEM_CONTEXT_CREATE is called, we create a struct 217 * i915_gem_proto_context, reserve a slot in @context_xa but leave 218 * it NULL, and place the proto-context in the corresponding slot 219 * in @proto_context_xa. Then, in i915_gem_context_lookup(), we 220 * first check @context_xa. If it's there, we return the struct 221 * i915_gem_context and we're done. If it's not, we look in 222 * @proto_context_xa and, if we find it there, we create the actual 223 * context and kill the proto-context. 224 * 225 * In order for this dance to work properly, everything which ever 226 * touches a struct i915_gem_proto_context is guarded by 227 * @proto_context_lock, including context creation. Yes, this 228 * means context creation now takes a giant global lock but it 229 * can't really be helped and that should never be on any driver's 230 * fast-path anyway. 231 */ 232 struct xarray proto_context_xa; 233 234 /** @context_xa: xarray of fully created i915_gem_context 235 * 236 * Write access to this xarray is guarded by @proto_context_lock. 237 * Otherwise, writers may race with finalize_create_context_locked(). 238 * 239 * See @proto_context_xa. 240 */ 241 struct xarray context_xa; 242 struct xarray vm_xa; 243 244 unsigned int bsd_engine; 245 246 /* 247 * Every context ban increments per client ban score. Also 248 * hangs in short succession increments ban score. If ban threshold 249 * is reached, client is considered banned and submitting more work 250 * will fail. This is a stop gap measure to limit the badly behaving 251 * clients access to gpu. Note that unbannable contexts never increment 252 * the client ban score. 253 */ 254 #define I915_CLIENT_SCORE_HANG_FAST 1 255 #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) 256 #define I915_CLIENT_SCORE_CONTEXT_BAN 3 257 #define I915_CLIENT_SCORE_BANNED 9 258 /** ban_score: Accumulated score of all ctx bans and fast hangs. */ 259 atomic_t ban_score; 260 unsigned long hang_timestamp; 261 }; 262 263 /* Interface history: 264 * 265 * 1.1: Original. 266 * 1.2: Add Power Management 267 * 1.3: Add vblank support 268 * 1.4: Fix cmdbuffer path, add heap destroy 269 * 1.5: Add vblank pipe configuration 270 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 271 * - Support vertical blank on secondary display pipe 272 */ 273 #define DRIVER_MAJOR 1 274 #define DRIVER_MINOR 6 275 #define DRIVER_PATCHLEVEL 0 276 277 struct intel_overlay; 278 struct intel_overlay_error_state; 279 280 struct sdvo_device_mapping { 281 u8 initialized; 282 u8 dvo_port; 283 u8 slave_addr; 284 u8 dvo_wiring; 285 u8 i2c_pin; 286 u8 ddc_pin; 287 }; 288 289 struct intel_connector; 290 struct intel_encoder; 291 struct intel_atomic_state; 292 struct intel_cdclk_config; 293 struct intel_cdclk_funcs; 294 struct intel_cdclk_state; 295 struct intel_cdclk_vals; 296 struct intel_initial_plane_config; 297 struct intel_crtc; 298 struct intel_limit; 299 struct dpll; 300 301 /* functions used internal in intel_pm.c */ 302 struct drm_i915_clock_gating_funcs { 303 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 304 }; 305 306 /* functions used for watermark calcs for display. */ 307 struct drm_i915_wm_disp_funcs { 308 /* update_wm is for legacy wm management */ 309 void (*update_wm)(struct drm_i915_private *dev_priv); 310 int (*compute_pipe_wm)(struct intel_atomic_state *state, 311 struct intel_crtc *crtc); 312 int (*compute_intermediate_wm)(struct intel_atomic_state *state, 313 struct intel_crtc *crtc); 314 void (*initial_watermarks)(struct intel_atomic_state *state, 315 struct intel_crtc *crtc); 316 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 317 struct intel_crtc *crtc); 318 void (*optimize_watermarks)(struct intel_atomic_state *state, 319 struct intel_crtc *crtc); 320 int (*compute_global_watermarks)(struct intel_atomic_state *state); 321 }; 322 323 struct intel_color_funcs { 324 int (*color_check)(struct intel_crtc_state *crtc_state); 325 /* 326 * Program double buffered color management registers during 327 * vblank evasion. The registers should then latch during the 328 * next vblank start, alongside any other double buffered registers 329 * involved with the same commit. 330 */ 331 void (*color_commit)(const struct intel_crtc_state *crtc_state); 332 /* 333 * Load LUTs (and other single buffered color management 334 * registers). Will (hopefully) be called during the vblank 335 * following the latching of any double buffered registers 336 * involved with the same commit. 337 */ 338 void (*load_luts)(const struct intel_crtc_state *crtc_state); 339 void (*read_luts)(struct intel_crtc_state *crtc_state); 340 }; 341 342 struct intel_hotplug_funcs { 343 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 344 }; 345 346 struct intel_fdi_funcs { 347 void (*fdi_link_train)(struct intel_crtc *crtc, 348 const struct intel_crtc_state *crtc_state); 349 }; 350 351 struct intel_dpll_funcs { 352 int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); 353 }; 354 355 struct drm_i915_display_funcs { 356 /* Returns the active state of the crtc, and if the crtc is active, 357 * fills out the pipe-config with the hw state. */ 358 bool (*get_pipe_config)(struct intel_crtc *, 359 struct intel_crtc_state *); 360 void (*get_initial_plane_config)(struct intel_crtc *, 361 struct intel_initial_plane_config *); 362 void (*crtc_enable)(struct intel_atomic_state *state, 363 struct intel_crtc *crtc); 364 void (*crtc_disable)(struct intel_atomic_state *state, 365 struct intel_crtc *crtc); 366 void (*commit_modeset_enables)(struct intel_atomic_state *state); 367 }; 368 369 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 370 371 /* 372 * HIGH_RR is the highest eDP panel refresh rate read from EDID 373 * LOW_RR is the lowest eDP panel refresh rate found from EDID 374 * parsing for same resolution. 375 */ 376 enum drrs_refresh_rate_type { 377 DRRS_HIGH_RR, 378 DRRS_LOW_RR, 379 DRRS_MAX_RR, /* RR count */ 380 }; 381 382 enum drrs_support_type { 383 DRRS_NOT_SUPPORTED = 0, 384 STATIC_DRRS_SUPPORT = 1, 385 SEAMLESS_DRRS_SUPPORT = 2 386 }; 387 388 struct intel_dp; 389 struct i915_drrs { 390 struct mutex mutex; 391 struct delayed_work work; 392 struct intel_dp *dp; 393 unsigned busy_frontbuffer_bits; 394 enum drrs_refresh_rate_type refresh_rate_type; 395 enum drrs_support_type type; 396 }; 397 398 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 399 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 400 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 401 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 402 #define QUIRK_INCREASE_T12_DELAY (1<<6) 403 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 404 #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) 405 406 struct intel_fbdev; 407 408 struct intel_gmbus { 409 struct i2c_adapter adapter; 410 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 411 u32 force_bit; 412 u32 reg0; 413 i915_reg_t gpio_reg; 414 struct i2c_algo_bit_data bit_algo; 415 struct drm_i915_private *dev_priv; 416 }; 417 418 struct i915_suspend_saved_registers { 419 u32 saveDSPARB; 420 u32 saveSWF0[16]; 421 u32 saveSWF1[16]; 422 u32 saveSWF3[3]; 423 u16 saveGCDGMBUS; 424 }; 425 426 struct vlv_s0ix_state; 427 428 #define MAX_L3_SLICES 2 429 struct intel_l3_parity { 430 u32 *remap_info[MAX_L3_SLICES]; 431 struct work_struct error_work; 432 int which_slice; 433 }; 434 435 struct i915_gem_mm { 436 /* 437 * Shortcut for the stolen region. This points to either 438 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or 439 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't 440 * support stolen. 441 */ 442 struct intel_memory_region *stolen_region; 443 /** Memory allocator for GTT stolen memory */ 444 struct drm_mm stolen; 445 /** Protects the usage of the GTT stolen memory allocator. This is 446 * always the inner lock when overlapping with struct_mutex. */ 447 struct mutex stolen_lock; 448 449 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 450 spinlock_t obj_lock; 451 452 /** 453 * List of objects which are purgeable. 454 */ 455 struct list_head purge_list; 456 457 /** 458 * List of objects which have allocated pages and are shrinkable. 459 */ 460 struct list_head shrink_list; 461 462 /** 463 * List of objects which are pending destruction. 464 */ 465 struct llist_head free_list; 466 struct delayed_work free_work; 467 /** 468 * Count of objects pending destructions. Used to skip needlessly 469 * waiting on an RCU barrier if no objects are waiting to be freed. 470 */ 471 atomic_t free_count; 472 473 /** 474 * tmpfs instance used for shmem backed objects 475 */ 476 struct vfsmount *gemfs; 477 478 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; 479 480 struct notifier_block oom_notifier; 481 struct notifier_block vmap_notifier; 482 struct shrinker shrinker; 483 484 #ifdef CONFIG_MMU_NOTIFIER 485 /** 486 * notifier_lock for mmu notifiers, memory may not be allocated 487 * while holding this lock. 488 */ 489 rwlock_t notifier_lock; 490 #endif 491 492 /* shrinker accounting, also useful for userland debugging */ 493 u64 shrink_memory; 494 u32 shrink_count; 495 }; 496 497 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 498 499 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915, 500 u64 context); 501 502 static inline unsigned long 503 i915_fence_timeout(const struct drm_i915_private *i915) 504 { 505 return i915_fence_context_timeout(i915, U64_MAX); 506 } 507 508 /* Amount of SAGV/QGV points, BSpec precisely defines this */ 509 #define I915_NUM_QGV_POINTS 8 510 511 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) 512 513 /* Amount of PSF GV points, BSpec precisely defines this */ 514 #define I915_NUM_PSF_GV_POINTS 3 515 516 struct intel_vbt_data { 517 /* bdb version */ 518 u16 version; 519 520 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 521 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 522 523 /* Feature bits */ 524 unsigned int int_tv_support:1; 525 unsigned int lvds_dither:1; 526 unsigned int int_crt_support:1; 527 unsigned int lvds_use_ssc:1; 528 unsigned int int_lvds_support:1; 529 unsigned int display_clock_mode:1; 530 unsigned int fdi_rx_polarity_inverted:1; 531 unsigned int panel_type:4; 532 int lvds_ssc_freq; 533 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 534 enum drm_panel_orientation orientation; 535 536 enum drrs_support_type drrs_type; 537 538 struct { 539 int rate; 540 int lanes; 541 int preemphasis; 542 int vswing; 543 bool low_vswing; 544 bool initialized; 545 int bpp; 546 struct edp_power_seq pps; 547 bool hobl; 548 } edp; 549 550 struct { 551 bool enable; 552 bool full_link; 553 bool require_aux_wakeup; 554 int idle_frames; 555 int tp1_wakeup_time_us; 556 int tp2_tp3_wakeup_time_us; 557 int psr2_tp2_tp3_wakeup_time_us; 558 } psr; 559 560 struct { 561 u16 pwm_freq_hz; 562 u16 brightness_precision_bits; 563 bool present; 564 bool active_low_pwm; 565 u8 min_brightness; /* min_brightness/255 of max */ 566 u8 controller; /* brightness controller number */ 567 enum intel_backlight_type type; 568 } backlight; 569 570 /* MIPI DSI */ 571 struct { 572 u16 panel_id; 573 struct mipi_config *config; 574 struct mipi_pps_data *pps; 575 u16 bl_ports; 576 u16 cabc_ports; 577 u8 seq_version; 578 u32 size; 579 u8 *data; 580 const u8 *sequence[MIPI_SEQ_MAX]; 581 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 582 enum drm_panel_orientation orientation; 583 } dsi; 584 585 int crt_ddc_pin; 586 587 struct list_head display_devices; 588 589 struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ 590 struct sdvo_device_mapping sdvo_mappings[2]; 591 }; 592 593 struct i915_frontbuffer_tracking { 594 spinlock_t lock; 595 596 /* 597 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 598 * scheduled flips. 599 */ 600 unsigned busy_bits; 601 unsigned flip_bits; 602 }; 603 604 struct i915_virtual_gpu { 605 struct mutex lock; /* serialises sending of g2v_notify command pkts */ 606 bool active; 607 u32 caps; 608 }; 609 610 struct i915_selftest_stash { 611 atomic_t counter; 612 struct ida mock_region_instances; 613 }; 614 615 /* intel_audio.c private */ 616 struct intel_audio_funcs; 617 struct intel_audio_private { 618 /* Display internal audio functions */ 619 const struct intel_audio_funcs *funcs; 620 621 /* hda/i915 audio component */ 622 struct i915_audio_component *component; 623 bool component_registered; 624 /* mutex for audio/video sync */ 625 struct mutex mutex; 626 int power_refcount; 627 u32 freq_cntrl; 628 629 /* Used to save the pipe-to-encoder mapping for audio */ 630 struct intel_encoder *encoder_map[I915_MAX_PIPES]; 631 632 /* necessary resource sharing with HDMI LPE audio driver. */ 633 struct { 634 struct platform_device *platdev; 635 int irq; 636 } lpe; 637 }; 638 639 struct drm_i915_private { 640 struct drm_device drm; 641 642 /* FIXME: Device release actions should all be moved to drmm_ */ 643 bool do_release; 644 645 /* i915 device parameters */ 646 struct i915_params params; 647 648 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 649 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 650 struct intel_driver_caps caps; 651 652 /** 653 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 654 * end of stolen which we can optionally use to create GEM objects 655 * backed by stolen memory. Note that stolen_usable_size tells us 656 * exactly how much of this we are actually allowed to use, given that 657 * some portion of it is in fact reserved for use by hardware functions. 658 */ 659 struct resource dsm; 660 /** 661 * Reseved portion of Data Stolen Memory 662 */ 663 struct resource dsm_reserved; 664 665 /* 666 * Stolen memory is segmented in hardware with different portions 667 * offlimits to certain functions. 668 * 669 * The drm_mm is initialised to the total accessible range, as found 670 * from the PCI config. On Broadwell+, this is further restricted to 671 * avoid the first page! The upper end of stolen memory is reserved for 672 * hardware functions and similarly removed from the accessible range. 673 */ 674 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 675 676 struct intel_uncore uncore; 677 struct intel_uncore_mmio_debug mmio_debug; 678 679 struct i915_virtual_gpu vgpu; 680 681 struct intel_gvt *gvt; 682 683 struct intel_wopcm wopcm; 684 685 struct intel_dmc dmc; 686 687 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 688 689 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 690 * controller on different i2c buses. */ 691 struct mutex gmbus_mutex; 692 693 /** 694 * Base address of where the gmbus and gpio blocks are located (either 695 * on PCH or on SoC for platforms without PCH). 696 */ 697 u32 gpio_mmio_base; 698 699 /* MMIO base address for MIPI regs */ 700 u32 mipi_mmio_base; 701 702 u32 pps_mmio_base; 703 704 wait_queue_head_t gmbus_wait_queue; 705 706 struct pci_dev *bridge_dev; 707 708 struct rb_root uabi_engines; 709 710 struct resource mch_res; 711 712 /* protects the irq masks */ 713 spinlock_t irq_lock; 714 715 bool display_irqs_enabled; 716 717 /* Sideband mailbox protection */ 718 struct mutex sb_lock; 719 struct pm_qos_request sb_qos; 720 721 /** Cached value of IMR to avoid reads in updating the bitfield */ 722 union { 723 u32 irq_mask; 724 u32 de_irq_mask[I915_MAX_PIPES]; 725 }; 726 u32 pipestat_irq_mask[I915_MAX_PIPES]; 727 728 struct i915_hotplug hotplug; 729 struct intel_fbc *fbc[I915_MAX_FBCS]; 730 struct i915_drrs drrs; 731 struct intel_opregion opregion; 732 struct intel_vbt_data vbt; 733 734 bool preserve_bios_swizzle; 735 736 /* overlay */ 737 struct intel_overlay *overlay; 738 739 /* backlight registers and fields in struct intel_panel */ 740 struct mutex backlight_lock; 741 742 /* protects panel power sequencer state */ 743 struct mutex pps_mutex; 744 745 unsigned int fsb_freq, mem_freq, is_ddr3; 746 unsigned int skl_preferred_vco_freq; 747 unsigned int max_cdclk_freq; 748 749 unsigned int max_dotclk_freq; 750 unsigned int hpll_freq; 751 unsigned int fdi_pll_freq; 752 unsigned int czclk_freq; 753 754 struct { 755 /* The current hardware cdclk configuration */ 756 struct intel_cdclk_config hw; 757 758 /* cdclk, divider, and ratio table from bspec */ 759 const struct intel_cdclk_vals *table; 760 761 struct intel_global_obj obj; 762 } cdclk; 763 764 struct { 765 /* The current hardware dbuf configuration */ 766 u8 enabled_slices; 767 768 struct intel_global_obj obj; 769 } dbuf; 770 771 /** 772 * wq - Driver workqueue for GEM. 773 * 774 * NOTE: Work items scheduled here are not allowed to grab any modeset 775 * locks, for otherwise the flushing done in the pageflip code will 776 * result in deadlocks. 777 */ 778 struct workqueue_struct *wq; 779 780 /* ordered wq for modesets */ 781 struct workqueue_struct *modeset_wq; 782 /* unbound hipri wq for page flips/plane updates */ 783 struct workqueue_struct *flip_wq; 784 785 /* pm private clock gating functions */ 786 const struct drm_i915_clock_gating_funcs *clock_gating_funcs; 787 788 /* pm display functions */ 789 const struct drm_i915_wm_disp_funcs *wm_disp; 790 791 /* irq display functions */ 792 const struct intel_hotplug_funcs *hotplug_funcs; 793 794 /* fdi display functions */ 795 const struct intel_fdi_funcs *fdi_funcs; 796 797 /* display pll funcs */ 798 const struct intel_dpll_funcs *dpll_funcs; 799 800 /* Display functions */ 801 const struct drm_i915_display_funcs *display; 802 803 /* Display internal color functions */ 804 const struct intel_color_funcs *color_funcs; 805 806 /* Display CDCLK functions */ 807 const struct intel_cdclk_funcs *cdclk_funcs; 808 809 /* PCH chipset type */ 810 enum intel_pch pch_type; 811 unsigned short pch_id; 812 813 unsigned long quirks; 814 815 struct drm_atomic_state *modeset_restore_state; 816 struct drm_modeset_acquire_ctx reset_ctx; 817 818 struct i915_ggtt ggtt; /* VM representing the global address space */ 819 820 struct i915_gem_mm mm; 821 822 /* Kernel Modesetting */ 823 824 /** 825 * dpll and cdclk state is protected by connection_mutex 826 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. 827 * Must be global rather than per dpll, because on some platforms plls 828 * share registers. 829 */ 830 struct { 831 struct mutex lock; 832 833 int num_shared_dpll; 834 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 835 const struct intel_dpll_mgr *mgr; 836 837 struct { 838 int nssc; 839 int ssc; 840 } ref_clks; 841 } dpll; 842 843 struct list_head global_obj_list; 844 845 /* 846 * For reading active_pipes holding any crtc lock is 847 * sufficient, for writing must hold all of them. 848 */ 849 u8 active_pipes; 850 851 struct i915_frontbuffer_tracking fb_tracking; 852 853 struct intel_atomic_helper { 854 struct llist_head free_list; 855 struct work_struct free_work; 856 } atomic_helper; 857 858 bool mchbar_need_disable; 859 860 struct intel_l3_parity l3_parity; 861 862 /* 863 * HTI (aka HDPORT) state read during initial hw readout. Most 864 * platforms don't have HTI, so this will just stay 0. Those that do 865 * will use this later to figure out which PLLs and PHYs are unavailable 866 * for driver usage. 867 */ 868 u32 hti_state; 869 870 /* 871 * edram size in MB. 872 * Cannot be determined by PCIID. You must always read a register. 873 */ 874 u32 edram_size_mb; 875 876 struct i915_power_domains power_domains; 877 878 struct i915_gpu_error gpu_error; 879 880 struct drm_i915_gem_object *vlv_pctx; 881 882 /* list of fbdev register on this device */ 883 struct intel_fbdev *fbdev; 884 struct work_struct fbdev_suspend_work; 885 886 struct drm_property *broadcast_rgb_property; 887 struct drm_property *force_audio_property; 888 889 u32 fdi_rx_config; 890 891 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 892 u32 chv_phy_control; 893 /* 894 * Shadows for CHV DPLL_MD regs to keep the state 895 * checker somewhat working in the presence hardware 896 * crappiness (can't read out DPLL_MD for pipes B & C). 897 */ 898 u32 chv_dpll_md[I915_MAX_PIPES]; 899 u32 bxt_phy_grc; 900 901 u32 suspend_count; 902 bool power_domains_suspended; 903 struct i915_suspend_saved_registers regfile; 904 struct vlv_s0ix_state *vlv_s0ix_state; 905 906 enum { 907 I915_SAGV_UNKNOWN = 0, 908 I915_SAGV_DISABLED, 909 I915_SAGV_ENABLED, 910 I915_SAGV_NOT_CONTROLLED 911 } sagv_status; 912 913 u32 sagv_block_time_us; 914 915 struct { 916 /* 917 * Raw watermark latency values: 918 * in 0.1us units for WM0, 919 * in 0.5us units for WM1+. 920 */ 921 /* primary */ 922 u16 pri_latency[5]; 923 /* sprite */ 924 u16 spr_latency[5]; 925 /* cursor */ 926 u16 cur_latency[5]; 927 /* 928 * Raw watermark memory latency values 929 * for SKL for all 8 levels 930 * in 1us units. 931 */ 932 u16 skl_latency[8]; 933 934 /* current hardware state */ 935 union { 936 struct ilk_wm_values hw; 937 struct vlv_wm_values vlv; 938 struct g4x_wm_values g4x; 939 }; 940 941 u8 max_level; 942 943 /* 944 * Should be held around atomic WM register writing; also 945 * protects * intel_crtc->wm.active and 946 * crtc_state->wm.need_postvbl_update. 947 */ 948 struct mutex wm_mutex; 949 } wm; 950 951 struct dram_info { 952 bool wm_lv_0_adjust_needed; 953 u8 num_channels; 954 bool symmetric_memory; 955 enum intel_dram_type { 956 INTEL_DRAM_UNKNOWN, 957 INTEL_DRAM_DDR3, 958 INTEL_DRAM_DDR4, 959 INTEL_DRAM_LPDDR3, 960 INTEL_DRAM_LPDDR4, 961 INTEL_DRAM_DDR5, 962 INTEL_DRAM_LPDDR5, 963 } type; 964 u8 num_qgv_points; 965 u8 num_psf_gv_points; 966 } dram_info; 967 968 struct intel_bw_info { 969 /* for each QGV point */ 970 unsigned int deratedbw[I915_NUM_QGV_POINTS]; 971 /* for each PSF GV point */ 972 unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; 973 u8 num_qgv_points; 974 u8 num_psf_gv_points; 975 u8 num_planes; 976 } max_bw[6]; 977 978 struct intel_global_obj bw_obj; 979 980 struct intel_runtime_pm runtime_pm; 981 982 struct i915_perf perf; 983 984 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 985 struct intel_gt gt0; 986 987 struct { 988 struct i915_gem_contexts { 989 spinlock_t lock; /* locks list */ 990 struct list_head list; 991 } contexts; 992 993 /* 994 * We replace the local file with a global mappings as the 995 * backing storage for the mmap is on the device and not 996 * on the struct file, and we do not want to prolong the 997 * lifetime of the local fd. To minimise the number of 998 * anonymous inodes we create, we use a global singleton to 999 * share the global mapping. 1000 */ 1001 struct file *mmap_singleton; 1002 } gem; 1003 1004 u8 framestart_delay; 1005 1006 /* Window2 specifies time required to program DSB (Window2) in number of scan lines */ 1007 u8 window2_delay; 1008 1009 u8 pch_ssc_use; 1010 1011 /* For i915gm/i945gm vblank irq workaround */ 1012 u8 vblank_enabled; 1013 1014 bool irq_enabled; 1015 1016 /* perform PHY state sanity checks? */ 1017 bool chv_phy_assert[2]; 1018 1019 bool ipc_enabled; 1020 1021 struct intel_audio_private audio; 1022 1023 struct i915_pmu pmu; 1024 1025 struct i915_hdcp_comp_master *hdcp_master; 1026 bool hdcp_comp_added; 1027 1028 /* Mutex to protect the above hdcp component related values. */ 1029 struct mutex hdcp_comp_mutex; 1030 1031 /* The TTM device structure. */ 1032 struct ttm_device bdev; 1033 1034 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) 1035 1036 /* 1037 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1038 * will be rejected. Instead look for a better place. 1039 */ 1040 }; 1041 1042 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1043 { 1044 return container_of(dev, struct drm_i915_private, drm); 1045 } 1046 1047 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 1048 { 1049 return dev_get_drvdata(kdev); 1050 } 1051 1052 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) 1053 { 1054 return pci_get_drvdata(pdev); 1055 } 1056 1057 static inline struct intel_gt *to_gt(struct drm_i915_private *i915) 1058 { 1059 return &i915->gt0; 1060 } 1061 1062 /* Simple iterator over all initialised engines */ 1063 #define for_each_engine(engine__, dev_priv__, id__) \ 1064 for ((id__) = 0; \ 1065 (id__) < I915_NUM_ENGINES; \ 1066 (id__)++) \ 1067 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 1068 1069 /* Iterator over subset of engines selected by mask */ 1070 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 1071 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 1072 (tmp__) ? \ 1073 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 1074 0;) 1075 1076 #define rb_to_uabi_engine(rb) \ 1077 rb_entry_safe(rb, struct intel_engine_cs, uabi_node) 1078 1079 #define for_each_uabi_engine(engine__, i915__) \ 1080 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 1081 (engine__); \ 1082 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 1083 1084 #define for_each_uabi_class_engine(engine__, class__, i915__) \ 1085 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \ 1086 (engine__) && (engine__)->uabi_class == (class__); \ 1087 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 1088 1089 #define I915_GTT_OFFSET_NONE ((u32)-1) 1090 1091 /* 1092 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1093 * considered to be the frontbuffer for the given plane interface-wise. This 1094 * doesn't mean that the hw necessarily already scans it out, but that any 1095 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1096 * 1097 * We have one bit per pipe and per scanout plane type. 1098 */ 1099 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 1100 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 1101 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 1102 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 1103 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 1104 }) 1105 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 1106 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 1107 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 1108 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 1109 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 1110 1111 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) 1112 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) 1113 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 1114 1115 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) 1116 1117 #define IP_VER(ver, rel) ((ver) << 8 | (rel)) 1118 1119 #define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) 1120 #define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ 1121 INTEL_INFO(i915)->graphics.rel) 1122 #define IS_GRAPHICS_VER(i915, from, until) \ 1123 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) 1124 1125 #define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) 1126 #define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \ 1127 INTEL_INFO(i915)->media.rel) 1128 #define IS_MEDIA_VER(i915, from, until) \ 1129 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) 1130 1131 #define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver) 1132 #define IS_DISPLAY_VER(i915, from, until) \ 1133 (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) 1134 1135 #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision) 1136 1137 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) 1138 1139 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) 1140 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) 1141 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) 1142 1143 #define IS_DISPLAY_STEP(__i915, since, until) \ 1144 (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ 1145 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) 1146 1147 #define IS_GRAPHICS_STEP(__i915, since, until) \ 1148 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ 1149 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) 1150 1151 #define IS_MEDIA_STEP(__i915, since, until) \ 1152 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ 1153 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) 1154 1155 static __always_inline unsigned int 1156 __platform_mask_index(const struct intel_runtime_info *info, 1157 enum intel_platform p) 1158 { 1159 const unsigned int pbits = 1160 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 1161 1162 /* Expand the platform_mask array if this fails. */ 1163 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 1164 pbits * ARRAY_SIZE(info->platform_mask)); 1165 1166 return p / pbits; 1167 } 1168 1169 static __always_inline unsigned int 1170 __platform_mask_bit(const struct intel_runtime_info *info, 1171 enum intel_platform p) 1172 { 1173 const unsigned int pbits = 1174 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 1175 1176 return p % pbits + INTEL_SUBPLATFORM_BITS; 1177 } 1178 1179 static inline u32 1180 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) 1181 { 1182 const unsigned int pi = __platform_mask_index(info, p); 1183 1184 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK; 1185 } 1186 1187 static __always_inline bool 1188 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) 1189 { 1190 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1191 const unsigned int pi = __platform_mask_index(info, p); 1192 const unsigned int pb = __platform_mask_bit(info, p); 1193 1194 BUILD_BUG_ON(!__builtin_constant_p(p)); 1195 1196 return info->platform_mask[pi] & BIT(pb); 1197 } 1198 1199 static __always_inline bool 1200 IS_SUBPLATFORM(const struct drm_i915_private *i915, 1201 enum intel_platform p, unsigned int s) 1202 { 1203 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1204 const unsigned int pi = __platform_mask_index(info, p); 1205 const unsigned int pb = __platform_mask_bit(info, p); 1206 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; 1207 const u32 mask = info->platform_mask[pi]; 1208 1209 BUILD_BUG_ON(!__builtin_constant_p(p)); 1210 BUILD_BUG_ON(!__builtin_constant_p(s)); 1211 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); 1212 1213 /* Shift and test on the MSB position so sign flag can be used. */ 1214 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); 1215 } 1216 1217 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) 1218 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) 1219 1220 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 1221 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 1222 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 1223 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 1224 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 1225 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 1226 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 1227 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 1228 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 1229 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 1230 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 1231 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 1232 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 1233 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 1234 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 1235 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE) 1236 #define IS_IRONLAKE_M(dev_priv) \ 1237 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv)) 1238 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE) 1239 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 1240 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 1241 INTEL_INFO(dev_priv)->gt == 1) 1242 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 1243 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 1244 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 1245 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 1246 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 1247 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 1248 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 1249 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 1250 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 1251 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) 1252 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 1253 #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ 1254 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) 1255 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) 1256 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) 1257 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) 1258 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) 1259 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) 1260 #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV) 1261 #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2) 1262 #define IS_DG2_G10(dev_priv) \ 1263 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) 1264 #define IS_DG2_G11(dev_priv) \ 1265 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) 1266 #define IS_ADLS_RPLS(dev_priv) \ 1267 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) 1268 #define IS_ADLP_N(dev_priv) \ 1269 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) 1270 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 1271 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 1272 #define IS_BDW_ULT(dev_priv) \ 1273 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) 1274 #define IS_BDW_ULX(dev_priv) \ 1275 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) 1276 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 1277 INTEL_INFO(dev_priv)->gt == 3) 1278 #define IS_HSW_ULT(dev_priv) \ 1279 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) 1280 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 1281 INTEL_INFO(dev_priv)->gt == 3) 1282 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ 1283 INTEL_INFO(dev_priv)->gt == 1) 1284 /* ULX machines are also considered ULT. */ 1285 #define IS_HSW_ULX(dev_priv) \ 1286 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) 1287 #define IS_SKL_ULT(dev_priv) \ 1288 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) 1289 #define IS_SKL_ULX(dev_priv) \ 1290 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) 1291 #define IS_KBL_ULT(dev_priv) \ 1292 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) 1293 #define IS_KBL_ULX(dev_priv) \ 1294 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) 1295 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1296 INTEL_INFO(dev_priv)->gt == 2) 1297 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1298 INTEL_INFO(dev_priv)->gt == 3) 1299 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1300 INTEL_INFO(dev_priv)->gt == 4) 1301 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1302 INTEL_INFO(dev_priv)->gt == 2) 1303 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1304 INTEL_INFO(dev_priv)->gt == 3) 1305 #define IS_CFL_ULT(dev_priv) \ 1306 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) 1307 #define IS_CFL_ULX(dev_priv) \ 1308 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) 1309 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1310 INTEL_INFO(dev_priv)->gt == 2) 1311 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1312 INTEL_INFO(dev_priv)->gt == 3) 1313 1314 #define IS_CML_ULT(dev_priv) \ 1315 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) 1316 #define IS_CML_ULX(dev_priv) \ 1317 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) 1318 #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \ 1319 INTEL_INFO(dev_priv)->gt == 2) 1320 1321 #define IS_ICL_WITH_PORT_F(dev_priv) \ 1322 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) 1323 1324 #define IS_TGL_U(dev_priv) \ 1325 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT) 1326 1327 #define IS_TGL_Y(dev_priv) \ 1328 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX) 1329 1330 #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) 1331 1332 #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ 1333 (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) 1334 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ 1335 (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) 1336 1337 #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ 1338 (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) 1339 #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ 1340 (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) 1341 1342 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \ 1343 (IS_TIGERLAKE(__i915) && \ 1344 IS_DISPLAY_STEP(__i915, since, until)) 1345 1346 #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ 1347 ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ 1348 IS_GRAPHICS_STEP(__i915, since, until)) 1349 1350 #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ 1351 (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ 1352 IS_GRAPHICS_STEP(__i915, since, until)) 1353 1354 #define IS_RKL_DISPLAY_STEP(p, since, until) \ 1355 (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) 1356 1357 #define IS_DG1_GRAPHICS_STEP(p, since, until) \ 1358 (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) 1359 #define IS_DG1_DISPLAY_STEP(p, since, until) \ 1360 (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) 1361 1362 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ 1363 (IS_ALDERLAKE_S(__i915) && \ 1364 IS_DISPLAY_STEP(__i915, since, until)) 1365 1366 #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ 1367 (IS_ALDERLAKE_S(__i915) && \ 1368 IS_GRAPHICS_STEP(__i915, since, until)) 1369 1370 #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ 1371 (IS_ALDERLAKE_P(__i915) && \ 1372 IS_DISPLAY_STEP(__i915, since, until)) 1373 1374 #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ 1375 (IS_ALDERLAKE_P(__i915) && \ 1376 IS_GRAPHICS_STEP(__i915, since, until)) 1377 1378 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ 1379 (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) 1380 1381 /* 1382 * DG2 hardware steppings are a bit unusual. The hardware design was forked 1383 * to create two variants (G10 and G11) which have distinct workaround sets. 1384 * The G11 fork of the DG2 design resets the GT stepping back to "A0" for its 1385 * first iteration, even though it's more similar to a G10 B0 stepping in terms 1386 * of functionality and workarounds. However the display stepping does not 1387 * reset in the same manner --- a specific stepping like "B0" has a consistent 1388 * meaning regardless of whether it belongs to a G10 or G11 DG2. 1389 * 1390 * TLDR: All GT workarounds and stepping-specific logic must be applied in 1391 * relation to a specific subplatform (G10 or G11), whereas display workarounds 1392 * and stepping-specific logic will be applied with a general DG2-wide stepping 1393 * number. 1394 */ 1395 #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ 1396 (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ 1397 IS_GRAPHICS_STEP(__i915, since, until)) 1398 1399 #define IS_DG2_DISPLAY_STEP(__i915, since, until) \ 1400 (IS_DG2(__i915) && \ 1401 IS_DISPLAY_STEP(__i915, since, until)) 1402 1403 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 1404 #define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv)) 1405 #define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv)) 1406 1407 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 1408 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) 1409 1410 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ 1411 unsigned int first__ = (first); \ 1412 unsigned int count__ = (count); \ 1413 ((gt)->info.engine_mask & \ 1414 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1415 }) 1416 #define VDBOX_MASK(gt) \ 1417 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) 1418 #define VEBOX_MASK(gt) \ 1419 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) 1420 1421 /* 1422 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution 1423 * All later gens can run the final buffer from the ppgtt 1424 */ 1425 #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7) 1426 1427 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) 1428 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) 1429 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) 1430 #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6) 1431 #define HAS_WT(dev_priv) HAS_EDRAM(dev_priv) 1432 1433 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) 1434 1435 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 1436 (INTEL_INFO(dev_priv)->has_logical_ring_contexts) 1437 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 1438 (INTEL_INFO(dev_priv)->has_logical_ring_elsq) 1439 1440 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 1441 1442 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) 1443 #define HAS_PPGTT(dev_priv) \ 1444 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) 1445 #define HAS_FULL_PPGTT(dev_priv) \ 1446 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) 1447 1448 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 1449 GEM_BUG_ON((sizes) == 0); \ 1450 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ 1451 }) 1452 1453 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) 1454 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 1455 (INTEL_INFO(dev_priv)->display.overlay_needs_physical) 1456 1457 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1458 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 1459 1460 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ 1461 (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9) 1462 1463 /* WaRsDisableCoarsePowerGating:skl,cnl */ 1464 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 1465 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 1466 1467 #define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4) 1468 #define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \ 1469 IS_GEMINILAKE(dev_priv) || \ 1470 IS_KABYLAKE(dev_priv)) 1471 1472 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1473 * rows, which changed the alignment requirements and fence programming. 1474 */ 1475 #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \ 1476 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv))) 1477 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) 1478 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) 1479 1480 #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) 1481 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) 1482 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) 1483 1484 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 1485 1486 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) 1487 #define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) 1488 1489 #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) 1490 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) 1491 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg) 1492 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) 1493 #define HAS_PSR_HW_TRACKING(dev_priv) \ 1494 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) 1495 #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) 1496 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) 1497 1498 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) 1499 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) 1500 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 1501 1502 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) 1503 1504 #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) 1505 1506 #define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) 1507 1508 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) 1509 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) 1510 1511 #define HAS_MSLICES(dev_priv) \ 1512 (INTEL_INFO(dev_priv)->has_mslices) 1513 1514 /* 1515 * Set this flag, when platform requires 64K GTT page sizes or larger for 1516 * device local memory access. Also this flag implies that we require or 1517 * at least support the compact PT layout for the ppGTT when using the 64K 1518 * GTT pages. 1519 */ 1520 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) 1521 1522 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 1523 1524 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) 1525 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) 1526 1527 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) 1528 1529 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) 1530 1531 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) 1532 1533 #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \ 1534 INTEL_INFO(dev_priv)->has_pxp) && \ 1535 VDBOX_MASK(to_gt(dev_priv))) 1536 1537 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) 1538 1539 #define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10)) 1540 1541 /* DPF == dynamic parity feature */ 1542 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) 1543 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 1544 2 : HAS_L3_DPF(dev_priv)) 1545 1546 #define GT_FREQUENCY_MULTIPLIER 50 1547 #define GEN9_FREQ_SCALER 3 1548 1549 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) 1550 1551 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) 1552 1553 #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) 1554 1555 #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) 1556 1557 /* Only valid when HAS_DISPLAY() is true */ 1558 #define INTEL_DISPLAY_ENABLED(dev_priv) \ 1559 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display) 1560 1561 static inline bool run_as_guest(void) 1562 { 1563 return !hypervisor_is_type(X86_HYPER_NATIVE); 1564 } 1565 1566 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ 1567 IS_ALDERLAKE_S(dev_priv)) 1568 1569 static inline bool intel_vtd_active(struct drm_i915_private *i915) 1570 { 1571 if (device_iommu_mapped(i915->drm.dev)) 1572 return true; 1573 1574 /* Running as a guest, we assume the host is enforcing VT'd */ 1575 return run_as_guest(); 1576 } 1577 1578 void 1579 i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); 1580 1581 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 1582 { 1583 return DISPLAY_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); 1584 } 1585 1586 static inline bool 1587 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) 1588 { 1589 return IS_BROXTON(i915) && intel_vtd_active(i915); 1590 } 1591 1592 static inline bool 1593 intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) 1594 { 1595 return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); 1596 } 1597 1598 /* i915_gem.c */ 1599 void i915_gem_init_early(struct drm_i915_private *dev_priv); 1600 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); 1601 1602 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 1603 { 1604 /* 1605 * A single pass should suffice to release all the freed objects (along 1606 * most call paths) , but be a little more paranoid in that freeing 1607 * the objects does take a little amount of time, during which the rcu 1608 * callbacks could have added new objects into the freed list, and 1609 * armed the work again. 1610 */ 1611 while (atomic_read(&i915->mm.free_count)) { 1612 flush_delayed_work(&i915->mm.free_work); 1613 flush_delayed_work(&i915->bdev.wq); 1614 rcu_barrier(); 1615 } 1616 } 1617 1618 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 1619 { 1620 /* 1621 * Similar to objects above (see i915_gem_drain_freed-objects), in 1622 * general we have workers that are armed by RCU and then rearm 1623 * themselves in their callbacks. To be paranoid, we need to 1624 * drain the workqueue a second time after waiting for the RCU 1625 * grace period so that we catch work queued via RCU from the first 1626 * pass. As neither drain_workqueue() nor flush_workqueue() report 1627 * a result, we make an assumption that we only don't require more 1628 * than 3 passes to catch all _recursive_ RCU delayed work. 1629 * 1630 */ 1631 int pass = 3; 1632 do { 1633 flush_workqueue(i915->wq); 1634 rcu_barrier(); 1635 i915_gem_drain_freed_objects(i915); 1636 } while (--pass); 1637 drain_workqueue(i915->wq); 1638 } 1639 1640 struct i915_vma * __must_check 1641 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, 1642 struct i915_gem_ww_ctx *ww, 1643 const struct i915_ggtt_view *view, 1644 u64 size, u64 alignment, u64 flags); 1645 1646 struct i915_vma * __must_check 1647 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 1648 const struct i915_ggtt_view *view, 1649 u64 size, u64 alignment, u64 flags); 1650 1651 int i915_gem_object_unbind(struct drm_i915_gem_object *obj, 1652 unsigned long flags); 1653 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) 1654 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) 1655 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2) 1656 #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) 1657 1658 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 1659 1660 int i915_gem_dumb_create(struct drm_file *file_priv, 1661 struct drm_device *dev, 1662 struct drm_mode_create_dumb *args); 1663 1664 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 1665 1666 static inline u32 i915_reset_count(struct i915_gpu_error *error) 1667 { 1668 return atomic_read(&error->reset_count); 1669 } 1670 1671 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, 1672 const struct intel_engine_cs *engine) 1673 { 1674 return atomic_read(&error->reset_engine_count[engine->uabi_class]); 1675 } 1676 1677 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 1678 void i915_gem_driver_register(struct drm_i915_private *i915); 1679 void i915_gem_driver_unregister(struct drm_i915_private *i915); 1680 void i915_gem_driver_remove(struct drm_i915_private *dev_priv); 1681 void i915_gem_driver_release(struct drm_i915_private *dev_priv); 1682 void i915_gem_suspend(struct drm_i915_private *dev_priv); 1683 void i915_gem_suspend_late(struct drm_i915_private *dev_priv); 1684 void i915_gem_resume(struct drm_i915_private *dev_priv); 1685 1686 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 1687 1688 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1689 enum i915_cache_level cache_level); 1690 1691 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 1692 struct dma_buf *dma_buf); 1693 1694 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags); 1695 1696 static inline struct i915_address_space * 1697 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) 1698 { 1699 struct i915_address_space *vm; 1700 1701 xa_lock(&file_priv->vm_xa); 1702 vm = xa_load(&file_priv->vm_xa, id); 1703 if (vm) 1704 kref_get(&vm->ref); 1705 xa_unlock(&file_priv->vm_xa); 1706 1707 return vm; 1708 } 1709 1710 /* i915_gem_internal.c */ 1711 struct drm_i915_gem_object * 1712 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 1713 phys_addr_t size); 1714 struct drm_i915_gem_object * 1715 __i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 1716 const struct drm_i915_gem_object_ops *ops, 1717 phys_addr_t size); 1718 1719 /* i915_gem_tiling.c */ 1720 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 1721 { 1722 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1723 1724 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1725 i915_gem_object_is_tiled(obj); 1726 } 1727 1728 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 1729 1730 /* intel_device_info.c */ 1731 static inline struct intel_device_info * 1732 mkwrite_device_info(struct drm_i915_private *dev_priv) 1733 { 1734 return (struct intel_device_info *)INTEL_INFO(dev_priv); 1735 } 1736 1737 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) 1738 { 1739 if (GRAPHICS_VER(i915) >= 11) 1740 return ICL_HWS_CSB_WRITE_INDEX; 1741 else 1742 return I915_HWS_CSB_WRITE_INDEX; 1743 } 1744 1745 static inline enum i915_map_type 1746 i915_coherent_map_type(struct drm_i915_private *i915, 1747 struct drm_i915_gem_object *obj, bool always_coherent) 1748 { 1749 if (i915_gem_object_is_lmem(obj)) 1750 return I915_MAP_WC; 1751 if (HAS_LLC(i915) || always_coherent) 1752 return I915_MAP_WB; 1753 else 1754 return I915_MAP_WC; 1755 } 1756 1757 #endif 1758