1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <asm/hypervisor.h> 37 38 #include <linux/io-mapping.h> 39 #include <linux/i2c.h> 40 #include <linux/i2c-algo-bit.h> 41 #include <linux/backlight.h> 42 #include <linux/hash.h> 43 #include <linux/intel-iommu.h> 44 #include <linux/kref.h> 45 #include <linux/mm_types.h> 46 #include <linux/perf_event.h> 47 #include <linux/pm_qos.h> 48 #include <linux/dma-resv.h> 49 #include <linux/shmem_fs.h> 50 #include <linux/stackdepot.h> 51 #include <linux/xarray.h> 52 53 #include <drm/drm_gem.h> 54 #include <drm/drm_auth.h> 55 #include <drm/drm_cache.h> 56 #include <drm/drm_util.h> 57 #include <drm/drm_dsc.h> 58 #include <drm/drm_atomic.h> 59 #include <drm/drm_connector.h> 60 #include <drm/i915_mei_hdcp_interface.h> 61 #include <drm/ttm/ttm_device.h> 62 63 #include "i915_params.h" 64 #include "i915_reg.h" 65 #include "i915_utils.h" 66 67 #include "display/intel_bios.h" 68 #include "display/intel_display.h" 69 #include "display/intel_display_power.h" 70 #include "display/intel_dmc.h" 71 #include "display/intel_dpll_mgr.h" 72 #include "display/intel_dsb.h" 73 #include "display/intel_frontbuffer.h" 74 #include "display/intel_global_state.h" 75 #include "display/intel_gmbus.h" 76 #include "display/intel_opregion.h" 77 78 #include "gem/i915_gem_context_types.h" 79 #include "gem/i915_gem_shrinker.h" 80 #include "gem/i915_gem_stolen.h" 81 #include "gem/i915_gem_lmem.h" 82 83 #include "gt/intel_engine.h" 84 #include "gt/intel_gt_types.h" 85 #include "gt/intel_region_lmem.h" 86 #include "gt/intel_workarounds.h" 87 #include "gt/uc/intel_uc.h" 88 89 #include "intel_device_info.h" 90 #include "intel_memory_region.h" 91 #include "intel_pch.h" 92 #include "intel_pm_types.h" 93 #include "intel_runtime_pm.h" 94 #include "intel_step.h" 95 #include "intel_uncore.h" 96 #include "intel_wakeref.h" 97 #include "intel_wopcm.h" 98 99 #include "i915_gem.h" 100 #include "i915_gem_gtt.h" 101 #include "i915_gpu_error.h" 102 #include "i915_perf_types.h" 103 #include "i915_request.h" 104 #include "i915_scheduler.h" 105 #include "gt/intel_timeline.h" 106 #include "i915_vma.h" 107 #include "i915_irq.h" 108 109 110 /* General customization: 111 */ 112 113 #define DRIVER_NAME "i915" 114 #define DRIVER_DESC "Intel Graphics" 115 #define DRIVER_DATE "20201103" 116 #define DRIVER_TIMESTAMP 1604406085 117 118 struct drm_i915_gem_object; 119 120 /* Threshold == 5 for long IRQs, 50 for short */ 121 #define HPD_STORM_DEFAULT_THRESHOLD 50 122 123 struct i915_hotplug { 124 struct delayed_work hotplug_work; 125 126 const u32 *hpd, *pch_hpd; 127 128 struct { 129 unsigned long last_jiffies; 130 int count; 131 enum { 132 HPD_ENABLED = 0, 133 HPD_DISABLED = 1, 134 HPD_MARK_DISABLED = 2 135 } state; 136 } stats[HPD_NUM_PINS]; 137 u32 event_bits; 138 u32 retry_bits; 139 struct delayed_work reenable_work; 140 141 u32 long_port_mask; 142 u32 short_port_mask; 143 struct work_struct dig_port_work; 144 145 struct work_struct poll_init_work; 146 bool poll_enabled; 147 148 unsigned int hpd_storm_threshold; 149 /* Whether or not to count short HPD IRQs in HPD storms */ 150 u8 hpd_short_storm_enabled; 151 152 /* 153 * if we get a HPD irq from DP and a HPD irq from non-DP 154 * the non-DP HPD could block the workqueue on a mode config 155 * mutex getting, that userspace may have taken. However 156 * userspace is waiting on the DP workqueue to run which is 157 * blocked behind the non-DP one. 158 */ 159 struct workqueue_struct *dp_wq; 160 }; 161 162 #define I915_GEM_GPU_DOMAINS \ 163 (I915_GEM_DOMAIN_RENDER | \ 164 I915_GEM_DOMAIN_SAMPLER | \ 165 I915_GEM_DOMAIN_COMMAND | \ 166 I915_GEM_DOMAIN_INSTRUCTION | \ 167 I915_GEM_DOMAIN_VERTEX) 168 169 struct drm_i915_private; 170 171 struct drm_i915_file_private { 172 struct drm_i915_private *dev_priv; 173 174 union { 175 struct drm_file *file; 176 struct rcu_head rcu; 177 }; 178 179 /** @proto_context_lock: Guards all struct i915_gem_proto_context 180 * operations 181 * 182 * This not only guards @proto_context_xa, but is always held 183 * whenever we manipulate any struct i915_gem_proto_context, 184 * including finalizing it on first actual use of the GEM context. 185 * 186 * See i915_gem_proto_context. 187 */ 188 struct mutex proto_context_lock; 189 190 /** @proto_context_xa: xarray of struct i915_gem_proto_context 191 * 192 * Historically, the context uAPI allowed for two methods of 193 * setting context parameters: SET_CONTEXT_PARAM and 194 * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called 195 * at any time while the later happens as part of 196 * GEM_CONTEXT_CREATE. Everything settable via one was settable 197 * via the other. While some params are fairly simple and setting 198 * them on a live context is harmless such as the context priority, 199 * others are far trickier such as the VM or the set of engines. 200 * In order to swap out the VM, for instance, we have to delay 201 * until all current in-flight work is complete, swap in the new 202 * VM, and then continue. This leads to a plethora of potential 203 * race conditions we'd really rather avoid. 204 * 205 * We have since disallowed setting these more complex parameters 206 * on active contexts. This works by delaying the creation of the 207 * actual context until after the client is done configuring it 208 * with SET_CONTEXT_PARAM. From the perspective of the client, it 209 * has the same u32 context ID the whole time. From the 210 * perspective of i915, however, it's a struct i915_gem_proto_context 211 * right up until the point where we attempt to do something which 212 * the proto-context can't handle. Then the struct i915_gem_context 213 * gets created. 214 * 215 * This is accomplished via a little xarray dance. When 216 * GEM_CONTEXT_CREATE is called, we create a struct 217 * i915_gem_proto_context, reserve a slot in @context_xa but leave 218 * it NULL, and place the proto-context in the corresponding slot 219 * in @proto_context_xa. Then, in i915_gem_context_lookup(), we 220 * first check @context_xa. If it's there, we return the struct 221 * i915_gem_context and we're done. If it's not, we look in 222 * @proto_context_xa and, if we find it there, we create the actual 223 * context and kill the proto-context. 224 * 225 * In order for this dance to work properly, everything which ever 226 * touches a struct i915_gem_proto_context is guarded by 227 * @proto_context_lock, including context creation. Yes, this 228 * means context creation now takes a giant global lock but it 229 * can't really be helped and that should never be on any driver's 230 * fast-path anyway. 231 */ 232 struct xarray proto_context_xa; 233 234 /** @context_xa: xarray of fully created i915_gem_context 235 * 236 * Write access to this xarray is guarded by @proto_context_lock. 237 * Otherwise, writers may race with finalize_create_context_locked(). 238 * 239 * See @proto_context_xa. 240 */ 241 struct xarray context_xa; 242 struct xarray vm_xa; 243 244 unsigned int bsd_engine; 245 246 /* 247 * Every context ban increments per client ban score. Also 248 * hangs in short succession increments ban score. If ban threshold 249 * is reached, client is considered banned and submitting more work 250 * will fail. This is a stop gap measure to limit the badly behaving 251 * clients access to gpu. Note that unbannable contexts never increment 252 * the client ban score. 253 */ 254 #define I915_CLIENT_SCORE_HANG_FAST 1 255 #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) 256 #define I915_CLIENT_SCORE_CONTEXT_BAN 3 257 #define I915_CLIENT_SCORE_BANNED 9 258 /** ban_score: Accumulated score of all ctx bans and fast hangs. */ 259 atomic_t ban_score; 260 unsigned long hang_timestamp; 261 }; 262 263 /* Interface history: 264 * 265 * 1.1: Original. 266 * 1.2: Add Power Management 267 * 1.3: Add vblank support 268 * 1.4: Fix cmdbuffer path, add heap destroy 269 * 1.5: Add vblank pipe configuration 270 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 271 * - Support vertical blank on secondary display pipe 272 */ 273 #define DRIVER_MAJOR 1 274 #define DRIVER_MINOR 6 275 #define DRIVER_PATCHLEVEL 0 276 277 struct intel_overlay; 278 struct intel_overlay_error_state; 279 280 struct sdvo_device_mapping { 281 u8 initialized; 282 u8 dvo_port; 283 u8 slave_addr; 284 u8 dvo_wiring; 285 u8 i2c_pin; 286 u8 ddc_pin; 287 }; 288 289 struct intel_connector; 290 struct intel_encoder; 291 struct intel_atomic_state; 292 struct intel_cdclk_config; 293 struct intel_cdclk_state; 294 struct intel_cdclk_vals; 295 struct intel_initial_plane_config; 296 struct intel_crtc; 297 struct intel_limit; 298 struct dpll; 299 300 /* functions used internal in intel_pm.c */ 301 struct drm_i915_clock_gating_funcs { 302 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 303 }; 304 305 /* functions used for watermark calcs for display. */ 306 struct drm_i915_wm_disp_funcs { 307 /* update_wm is for legacy wm management */ 308 void (*update_wm)(struct drm_i915_private *dev_priv); 309 int (*compute_pipe_wm)(struct intel_atomic_state *state, 310 struct intel_crtc *crtc); 311 int (*compute_intermediate_wm)(struct intel_atomic_state *state, 312 struct intel_crtc *crtc); 313 void (*initial_watermarks)(struct intel_atomic_state *state, 314 struct intel_crtc *crtc); 315 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 316 struct intel_crtc *crtc); 317 void (*optimize_watermarks)(struct intel_atomic_state *state, 318 struct intel_crtc *crtc); 319 int (*compute_global_watermarks)(struct intel_atomic_state *state); 320 }; 321 322 struct intel_color_funcs { 323 int (*color_check)(struct intel_crtc_state *crtc_state); 324 /* 325 * Program double buffered color management registers during 326 * vblank evasion. The registers should then latch during the 327 * next vblank start, alongside any other double buffered registers 328 * involved with the same commit. 329 */ 330 void (*color_commit)(const struct intel_crtc_state *crtc_state); 331 /* 332 * Load LUTs (and other single buffered color management 333 * registers). Will (hopefully) be called during the vblank 334 * following the latching of any double buffered registers 335 * involved with the same commit. 336 */ 337 void (*load_luts)(const struct intel_crtc_state *crtc_state); 338 void (*read_luts)(struct intel_crtc_state *crtc_state); 339 }; 340 341 struct intel_cdclk_funcs { 342 void (*get_cdclk)(struct drm_i915_private *dev_priv, 343 struct intel_cdclk_config *cdclk_config); 344 void (*set_cdclk)(struct drm_i915_private *dev_priv, 345 const struct intel_cdclk_config *cdclk_config, 346 enum pipe pipe); 347 int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); 348 int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); 349 u8 (*calc_voltage_level)(int cdclk); 350 }; 351 352 struct intel_hotplug_funcs { 353 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 354 }; 355 356 struct intel_fdi_funcs { 357 void (*fdi_link_train)(struct intel_crtc *crtc, 358 const struct intel_crtc_state *crtc_state); 359 }; 360 361 struct intel_dpll_funcs { 362 int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); 363 }; 364 365 struct drm_i915_display_funcs { 366 /* Returns the active state of the crtc, and if the crtc is active, 367 * fills out the pipe-config with the hw state. */ 368 bool (*get_pipe_config)(struct intel_crtc *, 369 struct intel_crtc_state *); 370 void (*get_initial_plane_config)(struct intel_crtc *, 371 struct intel_initial_plane_config *); 372 void (*crtc_enable)(struct intel_atomic_state *state, 373 struct intel_crtc *crtc); 374 void (*crtc_disable)(struct intel_atomic_state *state, 375 struct intel_crtc *crtc); 376 void (*commit_modeset_enables)(struct intel_atomic_state *state); 377 }; 378 379 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 380 381 /* 382 * HIGH_RR is the highest eDP panel refresh rate read from EDID 383 * LOW_RR is the lowest eDP panel refresh rate found from EDID 384 * parsing for same resolution. 385 */ 386 enum drrs_refresh_rate_type { 387 DRRS_HIGH_RR, 388 DRRS_LOW_RR, 389 DRRS_MAX_RR, /* RR count */ 390 }; 391 392 enum drrs_support_type { 393 DRRS_NOT_SUPPORTED = 0, 394 STATIC_DRRS_SUPPORT = 1, 395 SEAMLESS_DRRS_SUPPORT = 2 396 }; 397 398 struct intel_dp; 399 struct i915_drrs { 400 struct mutex mutex; 401 struct delayed_work work; 402 struct intel_dp *dp; 403 unsigned busy_frontbuffer_bits; 404 enum drrs_refresh_rate_type refresh_rate_type; 405 enum drrs_support_type type; 406 }; 407 408 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 409 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 410 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 411 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 412 #define QUIRK_INCREASE_T12_DELAY (1<<6) 413 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 414 #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) 415 416 struct intel_fbdev; 417 418 struct intel_gmbus { 419 struct i2c_adapter adapter; 420 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 421 u32 force_bit; 422 u32 reg0; 423 i915_reg_t gpio_reg; 424 struct i2c_algo_bit_data bit_algo; 425 struct drm_i915_private *dev_priv; 426 }; 427 428 struct i915_suspend_saved_registers { 429 u32 saveDSPARB; 430 u32 saveSWF0[16]; 431 u32 saveSWF1[16]; 432 u32 saveSWF3[3]; 433 u16 saveGCDGMBUS; 434 }; 435 436 struct vlv_s0ix_state; 437 438 #define MAX_L3_SLICES 2 439 struct intel_l3_parity { 440 u32 *remap_info[MAX_L3_SLICES]; 441 struct work_struct error_work; 442 int which_slice; 443 }; 444 445 struct i915_gem_mm { 446 /* 447 * Shortcut for the stolen region. This points to either 448 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or 449 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't 450 * support stolen. 451 */ 452 struct intel_memory_region *stolen_region; 453 /** Memory allocator for GTT stolen memory */ 454 struct drm_mm stolen; 455 /** Protects the usage of the GTT stolen memory allocator. This is 456 * always the inner lock when overlapping with struct_mutex. */ 457 struct mutex stolen_lock; 458 459 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 460 spinlock_t obj_lock; 461 462 /** 463 * List of objects which are purgeable. 464 */ 465 struct list_head purge_list; 466 467 /** 468 * List of objects which have allocated pages and are shrinkable. 469 */ 470 struct list_head shrink_list; 471 472 /** 473 * List of objects which are pending destruction. 474 */ 475 struct llist_head free_list; 476 struct delayed_work free_work; 477 /** 478 * Count of objects pending destructions. Used to skip needlessly 479 * waiting on an RCU barrier if no objects are waiting to be freed. 480 */ 481 atomic_t free_count; 482 483 /** 484 * tmpfs instance used for shmem backed objects 485 */ 486 struct vfsmount *gemfs; 487 488 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; 489 490 struct notifier_block oom_notifier; 491 struct notifier_block vmap_notifier; 492 struct shrinker shrinker; 493 494 #ifdef CONFIG_MMU_NOTIFIER 495 /** 496 * notifier_lock for mmu notifiers, memory may not be allocated 497 * while holding this lock. 498 */ 499 rwlock_t notifier_lock; 500 #endif 501 502 /* shrinker accounting, also useful for userland debugging */ 503 u64 shrink_memory; 504 u32 shrink_count; 505 }; 506 507 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 508 509 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915, 510 u64 context); 511 512 static inline unsigned long 513 i915_fence_timeout(const struct drm_i915_private *i915) 514 { 515 return i915_fence_context_timeout(i915, U64_MAX); 516 } 517 518 /* Amount of SAGV/QGV points, BSpec precisely defines this */ 519 #define I915_NUM_QGV_POINTS 8 520 521 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) 522 523 /* Amount of PSF GV points, BSpec precisely defines this */ 524 #define I915_NUM_PSF_GV_POINTS 3 525 526 enum psr_lines_to_wait { 527 PSR_0_LINES_TO_WAIT = 0, 528 PSR_1_LINE_TO_WAIT, 529 PSR_4_LINES_TO_WAIT, 530 PSR_8_LINES_TO_WAIT 531 }; 532 533 struct intel_vbt_data { 534 /* bdb version */ 535 u16 version; 536 537 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 538 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 539 540 /* Feature bits */ 541 unsigned int int_tv_support:1; 542 unsigned int lvds_dither:1; 543 unsigned int int_crt_support:1; 544 unsigned int lvds_use_ssc:1; 545 unsigned int int_lvds_support:1; 546 unsigned int display_clock_mode:1; 547 unsigned int fdi_rx_polarity_inverted:1; 548 unsigned int panel_type:4; 549 int lvds_ssc_freq; 550 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 551 enum drm_panel_orientation orientation; 552 553 enum drrs_support_type drrs_type; 554 555 struct { 556 int rate; 557 int lanes; 558 int preemphasis; 559 int vswing; 560 bool low_vswing; 561 bool initialized; 562 int bpp; 563 struct edp_power_seq pps; 564 bool hobl; 565 } edp; 566 567 struct { 568 bool enable; 569 bool full_link; 570 bool require_aux_wakeup; 571 int idle_frames; 572 enum psr_lines_to_wait lines_to_wait; 573 int tp1_wakeup_time_us; 574 int tp2_tp3_wakeup_time_us; 575 int psr2_tp2_tp3_wakeup_time_us; 576 } psr; 577 578 struct { 579 u16 pwm_freq_hz; 580 u16 brightness_precision_bits; 581 bool present; 582 bool active_low_pwm; 583 u8 min_brightness; /* min_brightness/255 of max */ 584 u8 controller; /* brightness controller number */ 585 enum intel_backlight_type type; 586 } backlight; 587 588 /* MIPI DSI */ 589 struct { 590 u16 panel_id; 591 struct mipi_config *config; 592 struct mipi_pps_data *pps; 593 u16 bl_ports; 594 u16 cabc_ports; 595 u8 seq_version; 596 u32 size; 597 u8 *data; 598 const u8 *sequence[MIPI_SEQ_MAX]; 599 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 600 enum drm_panel_orientation orientation; 601 } dsi; 602 603 int crt_ddc_pin; 604 605 struct list_head display_devices; 606 607 struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ 608 struct sdvo_device_mapping sdvo_mappings[2]; 609 }; 610 611 struct i915_frontbuffer_tracking { 612 spinlock_t lock; 613 614 /* 615 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 616 * scheduled flips. 617 */ 618 unsigned busy_bits; 619 unsigned flip_bits; 620 }; 621 622 struct i915_virtual_gpu { 623 struct mutex lock; /* serialises sending of g2v_notify command pkts */ 624 bool active; 625 u32 caps; 626 }; 627 628 struct intel_cdclk_config { 629 unsigned int cdclk, vco, ref, bypass; 630 u8 voltage_level; 631 }; 632 633 struct i915_selftest_stash { 634 atomic_t counter; 635 struct ida mock_region_instances; 636 }; 637 638 /* intel_audio.c private */ 639 struct intel_audio_funcs; 640 struct intel_audio_private { 641 /* Display internal audio functions */ 642 const struct intel_audio_funcs *funcs; 643 644 /* hda/i915 audio component */ 645 struct i915_audio_component *component; 646 bool component_registered; 647 /* mutex for audio/video sync */ 648 struct mutex mutex; 649 int power_refcount; 650 u32 freq_cntrl; 651 652 /* Used to save the pipe-to-encoder mapping for audio */ 653 struct intel_encoder *encoder_map[I915_MAX_PIPES]; 654 655 /* necessary resource sharing with HDMI LPE audio driver. */ 656 struct { 657 struct platform_device *platdev; 658 int irq; 659 } lpe; 660 }; 661 662 struct drm_i915_private { 663 struct drm_device drm; 664 665 /* FIXME: Device release actions should all be moved to drmm_ */ 666 bool do_release; 667 668 /* i915 device parameters */ 669 struct i915_params params; 670 671 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 672 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 673 struct intel_driver_caps caps; 674 675 /** 676 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 677 * end of stolen which we can optionally use to create GEM objects 678 * backed by stolen memory. Note that stolen_usable_size tells us 679 * exactly how much of this we are actually allowed to use, given that 680 * some portion of it is in fact reserved for use by hardware functions. 681 */ 682 struct resource dsm; 683 /** 684 * Reseved portion of Data Stolen Memory 685 */ 686 struct resource dsm_reserved; 687 688 /* 689 * Stolen memory is segmented in hardware with different portions 690 * offlimits to certain functions. 691 * 692 * The drm_mm is initialised to the total accessible range, as found 693 * from the PCI config. On Broadwell+, this is further restricted to 694 * avoid the first page! The upper end of stolen memory is reserved for 695 * hardware functions and similarly removed from the accessible range. 696 */ 697 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 698 699 struct intel_uncore uncore; 700 struct intel_uncore_mmio_debug mmio_debug; 701 702 struct i915_virtual_gpu vgpu; 703 704 struct intel_gvt *gvt; 705 706 struct intel_wopcm wopcm; 707 708 struct intel_dmc dmc; 709 710 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 711 712 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 713 * controller on different i2c buses. */ 714 struct mutex gmbus_mutex; 715 716 /** 717 * Base address of where the gmbus and gpio blocks are located (either 718 * on PCH or on SoC for platforms without PCH). 719 */ 720 u32 gpio_mmio_base; 721 722 /* MMIO base address for MIPI regs */ 723 u32 mipi_mmio_base; 724 725 u32 pps_mmio_base; 726 727 wait_queue_head_t gmbus_wait_queue; 728 729 struct pci_dev *bridge_dev; 730 731 struct rb_root uabi_engines; 732 733 struct resource mch_res; 734 735 /* protects the irq masks */ 736 spinlock_t irq_lock; 737 738 bool display_irqs_enabled; 739 740 /* Sideband mailbox protection */ 741 struct mutex sb_lock; 742 struct pm_qos_request sb_qos; 743 744 /** Cached value of IMR to avoid reads in updating the bitfield */ 745 union { 746 u32 irq_mask; 747 u32 de_irq_mask[I915_MAX_PIPES]; 748 }; 749 u32 pipestat_irq_mask[I915_MAX_PIPES]; 750 751 struct i915_hotplug hotplug; 752 struct intel_fbc *fbc; 753 struct i915_drrs drrs; 754 struct intel_opregion opregion; 755 struct intel_vbt_data vbt; 756 757 bool preserve_bios_swizzle; 758 759 /* overlay */ 760 struct intel_overlay *overlay; 761 762 /* backlight registers and fields in struct intel_panel */ 763 struct mutex backlight_lock; 764 765 /* protects panel power sequencer state */ 766 struct mutex pps_mutex; 767 768 unsigned int fsb_freq, mem_freq, is_ddr3; 769 unsigned int skl_preferred_vco_freq; 770 unsigned int max_cdclk_freq; 771 772 unsigned int max_dotclk_freq; 773 unsigned int hpll_freq; 774 unsigned int fdi_pll_freq; 775 unsigned int czclk_freq; 776 777 struct { 778 /* The current hardware cdclk configuration */ 779 struct intel_cdclk_config hw; 780 781 /* cdclk, divider, and ratio table from bspec */ 782 const struct intel_cdclk_vals *table; 783 784 struct intel_global_obj obj; 785 } cdclk; 786 787 struct { 788 /* The current hardware dbuf configuration */ 789 u8 enabled_slices; 790 791 struct intel_global_obj obj; 792 } dbuf; 793 794 /** 795 * wq - Driver workqueue for GEM. 796 * 797 * NOTE: Work items scheduled here are not allowed to grab any modeset 798 * locks, for otherwise the flushing done in the pageflip code will 799 * result in deadlocks. 800 */ 801 struct workqueue_struct *wq; 802 803 /* ordered wq for modesets */ 804 struct workqueue_struct *modeset_wq; 805 /* unbound hipri wq for page flips/plane updates */ 806 struct workqueue_struct *flip_wq; 807 808 /* pm private clock gating functions */ 809 const struct drm_i915_clock_gating_funcs *clock_gating_funcs; 810 811 /* pm display functions */ 812 const struct drm_i915_wm_disp_funcs *wm_disp; 813 814 /* irq display functions */ 815 const struct intel_hotplug_funcs *hotplug_funcs; 816 817 /* fdi display functions */ 818 const struct intel_fdi_funcs *fdi_funcs; 819 820 /* display pll funcs */ 821 const struct intel_dpll_funcs *dpll_funcs; 822 823 /* Display functions */ 824 const struct drm_i915_display_funcs *display; 825 826 /* Display internal color functions */ 827 const struct intel_color_funcs *color_funcs; 828 829 /* Display CDCLK functions */ 830 const struct intel_cdclk_funcs *cdclk_funcs; 831 832 /* PCH chipset type */ 833 enum intel_pch pch_type; 834 unsigned short pch_id; 835 836 unsigned long quirks; 837 838 struct drm_atomic_state *modeset_restore_state; 839 struct drm_modeset_acquire_ctx reset_ctx; 840 841 struct i915_ggtt ggtt; /* VM representing the global address space */ 842 843 struct i915_gem_mm mm; 844 845 /* Kernel Modesetting */ 846 847 /** 848 * dpll and cdclk state is protected by connection_mutex 849 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. 850 * Must be global rather than per dpll, because on some platforms plls 851 * share registers. 852 */ 853 struct { 854 struct mutex lock; 855 856 int num_shared_dpll; 857 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 858 const struct intel_dpll_mgr *mgr; 859 860 struct { 861 int nssc; 862 int ssc; 863 } ref_clks; 864 } dpll; 865 866 struct list_head global_obj_list; 867 868 /* 869 * For reading active_pipes holding any crtc lock is 870 * sufficient, for writing must hold all of them. 871 */ 872 u8 active_pipes; 873 874 struct i915_frontbuffer_tracking fb_tracking; 875 876 struct intel_atomic_helper { 877 struct llist_head free_list; 878 struct work_struct free_work; 879 } atomic_helper; 880 881 bool mchbar_need_disable; 882 883 struct intel_l3_parity l3_parity; 884 885 /* 886 * HTI (aka HDPORT) state read during initial hw readout. Most 887 * platforms don't have HTI, so this will just stay 0. Those that do 888 * will use this later to figure out which PLLs and PHYs are unavailable 889 * for driver usage. 890 */ 891 u32 hti_state; 892 893 /* 894 * edram size in MB. 895 * Cannot be determined by PCIID. You must always read a register. 896 */ 897 u32 edram_size_mb; 898 899 struct i915_power_domains power_domains; 900 901 struct i915_gpu_error gpu_error; 902 903 struct drm_i915_gem_object *vlv_pctx; 904 905 /* list of fbdev register on this device */ 906 struct intel_fbdev *fbdev; 907 struct work_struct fbdev_suspend_work; 908 909 struct drm_property *broadcast_rgb_property; 910 struct drm_property *force_audio_property; 911 912 u32 fdi_rx_config; 913 914 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 915 u32 chv_phy_control; 916 /* 917 * Shadows for CHV DPLL_MD regs to keep the state 918 * checker somewhat working in the presence hardware 919 * crappiness (can't read out DPLL_MD for pipes B & C). 920 */ 921 u32 chv_dpll_md[I915_MAX_PIPES]; 922 u32 bxt_phy_grc; 923 924 u32 suspend_count; 925 bool power_domains_suspended; 926 struct i915_suspend_saved_registers regfile; 927 struct vlv_s0ix_state *vlv_s0ix_state; 928 929 enum { 930 I915_SAGV_UNKNOWN = 0, 931 I915_SAGV_DISABLED, 932 I915_SAGV_ENABLED, 933 I915_SAGV_NOT_CONTROLLED 934 } sagv_status; 935 936 u32 sagv_block_time_us; 937 938 struct { 939 /* 940 * Raw watermark latency values: 941 * in 0.1us units for WM0, 942 * in 0.5us units for WM1+. 943 */ 944 /* primary */ 945 u16 pri_latency[5]; 946 /* sprite */ 947 u16 spr_latency[5]; 948 /* cursor */ 949 u16 cur_latency[5]; 950 /* 951 * Raw watermark memory latency values 952 * for SKL for all 8 levels 953 * in 1us units. 954 */ 955 u16 skl_latency[8]; 956 957 /* current hardware state */ 958 union { 959 struct ilk_wm_values hw; 960 struct vlv_wm_values vlv; 961 struct g4x_wm_values g4x; 962 }; 963 964 u8 max_level; 965 966 /* 967 * Should be held around atomic WM register writing; also 968 * protects * intel_crtc->wm.active and 969 * crtc_state->wm.need_postvbl_update. 970 */ 971 struct mutex wm_mutex; 972 } wm; 973 974 struct dram_info { 975 bool wm_lv_0_adjust_needed; 976 u8 num_channels; 977 bool symmetric_memory; 978 enum intel_dram_type { 979 INTEL_DRAM_UNKNOWN, 980 INTEL_DRAM_DDR3, 981 INTEL_DRAM_DDR4, 982 INTEL_DRAM_LPDDR3, 983 INTEL_DRAM_LPDDR4, 984 INTEL_DRAM_DDR5, 985 INTEL_DRAM_LPDDR5, 986 } type; 987 u8 num_qgv_points; 988 u8 num_psf_gv_points; 989 } dram_info; 990 991 struct intel_bw_info { 992 /* for each QGV point */ 993 unsigned int deratedbw[I915_NUM_QGV_POINTS]; 994 /* for each PSF GV point */ 995 unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; 996 u8 num_qgv_points; 997 u8 num_psf_gv_points; 998 u8 num_planes; 999 } max_bw[6]; 1000 1001 struct intel_global_obj bw_obj; 1002 1003 struct intel_runtime_pm runtime_pm; 1004 1005 struct i915_perf perf; 1006 1007 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1008 struct intel_gt gt0; 1009 1010 struct { 1011 struct i915_gem_contexts { 1012 spinlock_t lock; /* locks list */ 1013 struct list_head list; 1014 } contexts; 1015 1016 /* 1017 * We replace the local file with a global mappings as the 1018 * backing storage for the mmap is on the device and not 1019 * on the struct file, and we do not want to prolong the 1020 * lifetime of the local fd. To minimise the number of 1021 * anonymous inodes we create, we use a global singleton to 1022 * share the global mapping. 1023 */ 1024 struct file *mmap_singleton; 1025 } gem; 1026 1027 u8 framestart_delay; 1028 1029 /* Window2 specifies time required to program DSB (Window2) in number of scan lines */ 1030 u8 window2_delay; 1031 1032 u8 pch_ssc_use; 1033 1034 /* For i915gm/i945gm vblank irq workaround */ 1035 u8 vblank_enabled; 1036 1037 bool irq_enabled; 1038 1039 /* perform PHY state sanity checks? */ 1040 bool chv_phy_assert[2]; 1041 1042 bool ipc_enabled; 1043 1044 struct intel_audio_private audio; 1045 1046 struct i915_pmu pmu; 1047 1048 struct i915_hdcp_comp_master *hdcp_master; 1049 bool hdcp_comp_added; 1050 1051 /* Mutex to protect the above hdcp component related values. */ 1052 struct mutex hdcp_comp_mutex; 1053 1054 /* The TTM device structure. */ 1055 struct ttm_device bdev; 1056 1057 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) 1058 1059 /* 1060 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1061 * will be rejected. Instead look for a better place. 1062 */ 1063 }; 1064 1065 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1066 { 1067 return container_of(dev, struct drm_i915_private, drm); 1068 } 1069 1070 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 1071 { 1072 return dev_get_drvdata(kdev); 1073 } 1074 1075 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) 1076 { 1077 return pci_get_drvdata(pdev); 1078 } 1079 1080 static inline struct intel_gt *to_gt(struct drm_i915_private *i915) 1081 { 1082 return &i915->gt0; 1083 } 1084 1085 /* Simple iterator over all initialised engines */ 1086 #define for_each_engine(engine__, dev_priv__, id__) \ 1087 for ((id__) = 0; \ 1088 (id__) < I915_NUM_ENGINES; \ 1089 (id__)++) \ 1090 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 1091 1092 /* Iterator over subset of engines selected by mask */ 1093 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 1094 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 1095 (tmp__) ? \ 1096 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 1097 0;) 1098 1099 #define rb_to_uabi_engine(rb) \ 1100 rb_entry_safe(rb, struct intel_engine_cs, uabi_node) 1101 1102 #define for_each_uabi_engine(engine__, i915__) \ 1103 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 1104 (engine__); \ 1105 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 1106 1107 #define for_each_uabi_class_engine(engine__, class__, i915__) \ 1108 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \ 1109 (engine__) && (engine__)->uabi_class == (class__); \ 1110 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 1111 1112 #define I915_GTT_OFFSET_NONE ((u32)-1) 1113 1114 /* 1115 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1116 * considered to be the frontbuffer for the given plane interface-wise. This 1117 * doesn't mean that the hw necessarily already scans it out, but that any 1118 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1119 * 1120 * We have one bit per pipe and per scanout plane type. 1121 */ 1122 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 1123 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 1124 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 1125 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 1126 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 1127 }) 1128 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 1129 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 1130 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 1131 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 1132 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 1133 1134 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) 1135 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) 1136 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 1137 1138 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) 1139 1140 #define IP_VER(ver, rel) ((ver) << 8 | (rel)) 1141 1142 #define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) 1143 #define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ 1144 INTEL_INFO(i915)->graphics.rel) 1145 #define IS_GRAPHICS_VER(i915, from, until) \ 1146 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) 1147 1148 #define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) 1149 #define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \ 1150 INTEL_INFO(i915)->media.rel) 1151 #define IS_MEDIA_VER(i915, from, until) \ 1152 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) 1153 1154 #define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver) 1155 #define IS_DISPLAY_VER(i915, from, until) \ 1156 (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) 1157 1158 #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision) 1159 1160 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) 1161 1162 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) 1163 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) 1164 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) 1165 1166 #define IS_DISPLAY_STEP(__i915, since, until) \ 1167 (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ 1168 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) 1169 1170 #define IS_GRAPHICS_STEP(__i915, since, until) \ 1171 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ 1172 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) 1173 1174 #define IS_MEDIA_STEP(__i915, since, until) \ 1175 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ 1176 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) 1177 1178 static __always_inline unsigned int 1179 __platform_mask_index(const struct intel_runtime_info *info, 1180 enum intel_platform p) 1181 { 1182 const unsigned int pbits = 1183 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 1184 1185 /* Expand the platform_mask array if this fails. */ 1186 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 1187 pbits * ARRAY_SIZE(info->platform_mask)); 1188 1189 return p / pbits; 1190 } 1191 1192 static __always_inline unsigned int 1193 __platform_mask_bit(const struct intel_runtime_info *info, 1194 enum intel_platform p) 1195 { 1196 const unsigned int pbits = 1197 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 1198 1199 return p % pbits + INTEL_SUBPLATFORM_BITS; 1200 } 1201 1202 static inline u32 1203 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) 1204 { 1205 const unsigned int pi = __platform_mask_index(info, p); 1206 1207 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK; 1208 } 1209 1210 static __always_inline bool 1211 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) 1212 { 1213 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1214 const unsigned int pi = __platform_mask_index(info, p); 1215 const unsigned int pb = __platform_mask_bit(info, p); 1216 1217 BUILD_BUG_ON(!__builtin_constant_p(p)); 1218 1219 return info->platform_mask[pi] & BIT(pb); 1220 } 1221 1222 static __always_inline bool 1223 IS_SUBPLATFORM(const struct drm_i915_private *i915, 1224 enum intel_platform p, unsigned int s) 1225 { 1226 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1227 const unsigned int pi = __platform_mask_index(info, p); 1228 const unsigned int pb = __platform_mask_bit(info, p); 1229 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; 1230 const u32 mask = info->platform_mask[pi]; 1231 1232 BUILD_BUG_ON(!__builtin_constant_p(p)); 1233 BUILD_BUG_ON(!__builtin_constant_p(s)); 1234 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); 1235 1236 /* Shift and test on the MSB position so sign flag can be used. */ 1237 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); 1238 } 1239 1240 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) 1241 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) 1242 1243 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 1244 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 1245 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 1246 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 1247 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 1248 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 1249 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 1250 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 1251 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 1252 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 1253 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 1254 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 1255 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 1256 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 1257 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 1258 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE) 1259 #define IS_IRONLAKE_M(dev_priv) \ 1260 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv)) 1261 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE) 1262 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 1263 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 1264 INTEL_INFO(dev_priv)->gt == 1) 1265 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 1266 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 1267 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 1268 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 1269 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 1270 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 1271 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 1272 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 1273 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 1274 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) 1275 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 1276 #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ 1277 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) 1278 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) 1279 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) 1280 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) 1281 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) 1282 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) 1283 #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV) 1284 #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2) 1285 #define IS_DG2_G10(dev_priv) \ 1286 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) 1287 #define IS_DG2_G11(dev_priv) \ 1288 IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) 1289 #define IS_ADLS_RPLS(dev_priv) \ 1290 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) 1291 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 1292 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 1293 #define IS_BDW_ULT(dev_priv) \ 1294 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) 1295 #define IS_BDW_ULX(dev_priv) \ 1296 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) 1297 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 1298 INTEL_INFO(dev_priv)->gt == 3) 1299 #define IS_HSW_ULT(dev_priv) \ 1300 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) 1301 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 1302 INTEL_INFO(dev_priv)->gt == 3) 1303 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ 1304 INTEL_INFO(dev_priv)->gt == 1) 1305 /* ULX machines are also considered ULT. */ 1306 #define IS_HSW_ULX(dev_priv) \ 1307 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) 1308 #define IS_SKL_ULT(dev_priv) \ 1309 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) 1310 #define IS_SKL_ULX(dev_priv) \ 1311 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) 1312 #define IS_KBL_ULT(dev_priv) \ 1313 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) 1314 #define IS_KBL_ULX(dev_priv) \ 1315 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) 1316 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1317 INTEL_INFO(dev_priv)->gt == 2) 1318 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1319 INTEL_INFO(dev_priv)->gt == 3) 1320 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1321 INTEL_INFO(dev_priv)->gt == 4) 1322 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1323 INTEL_INFO(dev_priv)->gt == 2) 1324 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1325 INTEL_INFO(dev_priv)->gt == 3) 1326 #define IS_CFL_ULT(dev_priv) \ 1327 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) 1328 #define IS_CFL_ULX(dev_priv) \ 1329 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) 1330 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1331 INTEL_INFO(dev_priv)->gt == 2) 1332 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1333 INTEL_INFO(dev_priv)->gt == 3) 1334 1335 #define IS_CML_ULT(dev_priv) \ 1336 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) 1337 #define IS_CML_ULX(dev_priv) \ 1338 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) 1339 #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \ 1340 INTEL_INFO(dev_priv)->gt == 2) 1341 1342 #define IS_ICL_WITH_PORT_F(dev_priv) \ 1343 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) 1344 1345 #define IS_TGL_U(dev_priv) \ 1346 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT) 1347 1348 #define IS_TGL_Y(dev_priv) \ 1349 IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX) 1350 1351 #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) 1352 1353 #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ 1354 (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) 1355 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ 1356 (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) 1357 1358 #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ 1359 (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) 1360 #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ 1361 (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) 1362 1363 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \ 1364 (IS_TIGERLAKE(__i915) && \ 1365 IS_DISPLAY_STEP(__i915, since, until)) 1366 1367 #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ 1368 ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ 1369 IS_GRAPHICS_STEP(__i915, since, until)) 1370 1371 #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ 1372 (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ 1373 IS_GRAPHICS_STEP(__i915, since, until)) 1374 1375 #define IS_RKL_DISPLAY_STEP(p, since, until) \ 1376 (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) 1377 1378 #define IS_DG1_GRAPHICS_STEP(p, since, until) \ 1379 (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) 1380 #define IS_DG1_DISPLAY_STEP(p, since, until) \ 1381 (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) 1382 1383 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ 1384 (IS_ALDERLAKE_S(__i915) && \ 1385 IS_DISPLAY_STEP(__i915, since, until)) 1386 1387 #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ 1388 (IS_ALDERLAKE_S(__i915) && \ 1389 IS_GRAPHICS_STEP(__i915, since, until)) 1390 1391 #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ 1392 (IS_ALDERLAKE_P(__i915) && \ 1393 IS_DISPLAY_STEP(__i915, since, until)) 1394 1395 #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ 1396 (IS_ALDERLAKE_P(__i915) && \ 1397 IS_GRAPHICS_STEP(__i915, since, until)) 1398 1399 #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ 1400 (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) 1401 1402 /* 1403 * DG2 hardware steppings are a bit unusual. The hardware design was forked 1404 * to create two variants (G10 and G11) which have distinct workaround sets. 1405 * The G11 fork of the DG2 design resets the GT stepping back to "A0" for its 1406 * first iteration, even though it's more similar to a G10 B0 stepping in terms 1407 * of functionality and workarounds. However the display stepping does not 1408 * reset in the same manner --- a specific stepping like "B0" has a consistent 1409 * meaning regardless of whether it belongs to a G10 or G11 DG2. 1410 * 1411 * TLDR: All GT workarounds and stepping-specific logic must be applied in 1412 * relation to a specific subplatform (G10 or G11), whereas display workarounds 1413 * and stepping-specific logic will be applied with a general DG2-wide stepping 1414 * number. 1415 */ 1416 #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ 1417 (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ 1418 IS_GRAPHICS_STEP(__i915, since, until)) 1419 1420 #define IS_DG2_DISPLAY_STEP(__i915, since, until) \ 1421 (IS_DG2(__i915) && \ 1422 IS_DISPLAY_STEP(__i915, since, until)) 1423 1424 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 1425 #define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv)) 1426 #define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv)) 1427 1428 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 1429 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) 1430 1431 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ 1432 unsigned int first__ = (first); \ 1433 unsigned int count__ = (count); \ 1434 ((gt)->info.engine_mask & \ 1435 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1436 }) 1437 #define VDBOX_MASK(gt) \ 1438 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) 1439 #define VEBOX_MASK(gt) \ 1440 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) 1441 1442 /* 1443 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution 1444 * All later gens can run the final buffer from the ppgtt 1445 */ 1446 #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7) 1447 1448 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) 1449 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) 1450 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) 1451 #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6) 1452 #define HAS_WT(dev_priv) HAS_EDRAM(dev_priv) 1453 1454 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) 1455 1456 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 1457 (INTEL_INFO(dev_priv)->has_logical_ring_contexts) 1458 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 1459 (INTEL_INFO(dev_priv)->has_logical_ring_elsq) 1460 1461 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 1462 1463 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) 1464 #define HAS_PPGTT(dev_priv) \ 1465 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) 1466 #define HAS_FULL_PPGTT(dev_priv) \ 1467 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) 1468 1469 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 1470 GEM_BUG_ON((sizes) == 0); \ 1471 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ 1472 }) 1473 1474 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) 1475 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 1476 (INTEL_INFO(dev_priv)->display.overlay_needs_physical) 1477 1478 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1479 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 1480 1481 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ 1482 (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9) 1483 1484 /* WaRsDisableCoarsePowerGating:skl,cnl */ 1485 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 1486 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 1487 1488 #define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4) 1489 #define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 11 || \ 1490 IS_GEMINILAKE(dev_priv) || \ 1491 IS_KABYLAKE(dev_priv)) 1492 1493 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1494 * rows, which changed the alignment requirements and fence programming. 1495 */ 1496 #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \ 1497 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv))) 1498 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) 1499 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) 1500 1501 #define HAS_FW_BLC(dev_priv) (GRAPHICS_VER(dev_priv) > 2) 1502 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) 1503 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7) 1504 1505 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 1506 1507 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) 1508 #define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) 1509 1510 #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) 1511 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) 1512 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg) 1513 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) 1514 #define HAS_PSR_HW_TRACKING(dev_priv) \ 1515 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) 1516 #define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12) 1517 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) 1518 1519 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) 1520 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) 1521 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 1522 1523 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) 1524 1525 #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) 1526 1527 #define HAS_MSO(i915) (GRAPHICS_VER(i915) >= 12) 1528 1529 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) 1530 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) 1531 1532 #define HAS_MSLICES(dev_priv) \ 1533 (INTEL_INFO(dev_priv)->has_mslices) 1534 1535 /* 1536 * Set this flag, when platform requires 64K GTT page sizes or larger for 1537 * device local memory access. Also this flag implies that we require or 1538 * at least support the compact PT layout for the ppGTT when using the 64K 1539 * GTT pages. 1540 */ 1541 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) 1542 1543 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 1544 1545 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) 1546 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) 1547 1548 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) 1549 1550 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) 1551 1552 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) 1553 1554 #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \ 1555 INTEL_INFO(dev_priv)->has_pxp) && \ 1556 VDBOX_MASK(to_gt(dev_priv))) 1557 1558 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) 1559 1560 #define HAS_LSPCON(dev_priv) (IS_GRAPHICS_VER(dev_priv, 9, 10)) 1561 1562 /* DPF == dynamic parity feature */ 1563 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) 1564 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 1565 2 : HAS_L3_DPF(dev_priv)) 1566 1567 #define GT_FREQUENCY_MULTIPLIER 50 1568 #define GEN9_FREQ_SCALER 3 1569 1570 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) 1571 1572 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) 1573 1574 #define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) 1575 1576 #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) 1577 1578 /* Only valid when HAS_DISPLAY() is true */ 1579 #define INTEL_DISPLAY_ENABLED(dev_priv) \ 1580 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display) 1581 1582 static inline bool run_as_guest(void) 1583 { 1584 return !hypervisor_is_type(X86_HYPER_NATIVE); 1585 } 1586 1587 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ 1588 IS_ALDERLAKE_S(dev_priv)) 1589 1590 static inline bool intel_vtd_active(struct drm_i915_private *i915) 1591 { 1592 if (device_iommu_mapped(i915->drm.dev)) 1593 return true; 1594 1595 /* Running as a guest, we assume the host is enforcing VT'd */ 1596 return run_as_guest(); 1597 } 1598 1599 void 1600 i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); 1601 1602 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 1603 { 1604 return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); 1605 } 1606 1607 static inline bool 1608 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) 1609 { 1610 return IS_BROXTON(i915) && intel_vtd_active(i915); 1611 } 1612 1613 static inline bool 1614 intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) 1615 { 1616 return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); 1617 } 1618 1619 /* i915_getparam.c */ 1620 int i915_getparam_ioctl(struct drm_device *dev, void *data, 1621 struct drm_file *file_priv); 1622 1623 /* i915_gem.c */ 1624 int i915_gem_init_userptr(struct drm_i915_private *dev_priv); 1625 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); 1626 void i915_gem_init_early(struct drm_i915_private *dev_priv); 1627 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); 1628 1629 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 1630 { 1631 /* 1632 * A single pass should suffice to release all the freed objects (along 1633 * most call paths) , but be a little more paranoid in that freeing 1634 * the objects does take a little amount of time, during which the rcu 1635 * callbacks could have added new objects into the freed list, and 1636 * armed the work again. 1637 */ 1638 while (atomic_read(&i915->mm.free_count)) { 1639 flush_delayed_work(&i915->mm.free_work); 1640 flush_delayed_work(&i915->bdev.wq); 1641 rcu_barrier(); 1642 } 1643 } 1644 1645 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 1646 { 1647 /* 1648 * Similar to objects above (see i915_gem_drain_freed-objects), in 1649 * general we have workers that are armed by RCU and then rearm 1650 * themselves in their callbacks. To be paranoid, we need to 1651 * drain the workqueue a second time after waiting for the RCU 1652 * grace period so that we catch work queued via RCU from the first 1653 * pass. As neither drain_workqueue() nor flush_workqueue() report 1654 * a result, we make an assumption that we only don't require more 1655 * than 3 passes to catch all _recursive_ RCU delayed work. 1656 * 1657 */ 1658 int pass = 3; 1659 do { 1660 flush_workqueue(i915->wq); 1661 rcu_barrier(); 1662 i915_gem_drain_freed_objects(i915); 1663 } while (--pass); 1664 drain_workqueue(i915->wq); 1665 } 1666 1667 struct i915_vma * __must_check 1668 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, 1669 struct i915_gem_ww_ctx *ww, 1670 const struct i915_ggtt_view *view, 1671 u64 size, u64 alignment, u64 flags); 1672 1673 struct i915_vma * __must_check 1674 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 1675 const struct i915_ggtt_view *view, 1676 u64 size, u64 alignment, u64 flags); 1677 1678 int i915_gem_object_unbind(struct drm_i915_gem_object *obj, 1679 unsigned long flags); 1680 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) 1681 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) 1682 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2) 1683 #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) 1684 1685 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 1686 1687 int i915_gem_dumb_create(struct drm_file *file_priv, 1688 struct drm_device *dev, 1689 struct drm_mode_create_dumb *args); 1690 1691 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 1692 1693 static inline u32 i915_reset_count(struct i915_gpu_error *error) 1694 { 1695 return atomic_read(&error->reset_count); 1696 } 1697 1698 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, 1699 const struct intel_engine_cs *engine) 1700 { 1701 return atomic_read(&error->reset_engine_count[engine->uabi_class]); 1702 } 1703 1704 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 1705 void i915_gem_driver_register(struct drm_i915_private *i915); 1706 void i915_gem_driver_unregister(struct drm_i915_private *i915); 1707 void i915_gem_driver_remove(struct drm_i915_private *dev_priv); 1708 void i915_gem_driver_release(struct drm_i915_private *dev_priv); 1709 void i915_gem_suspend(struct drm_i915_private *dev_priv); 1710 void i915_gem_suspend_late(struct drm_i915_private *dev_priv); 1711 void i915_gem_resume(struct drm_i915_private *dev_priv); 1712 1713 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 1714 1715 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1716 enum i915_cache_level cache_level); 1717 1718 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 1719 struct dma_buf *dma_buf); 1720 1721 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags); 1722 1723 static inline struct i915_address_space * 1724 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) 1725 { 1726 struct i915_address_space *vm; 1727 1728 xa_lock(&file_priv->vm_xa); 1729 vm = xa_load(&file_priv->vm_xa, id); 1730 if (vm) 1731 kref_get(&vm->ref); 1732 xa_unlock(&file_priv->vm_xa); 1733 1734 return vm; 1735 } 1736 1737 /* i915_gem_evict.c */ 1738 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 1739 u64 min_size, u64 alignment, 1740 unsigned long color, 1741 u64 start, u64 end, 1742 unsigned flags); 1743 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 1744 struct drm_mm_node *node, 1745 unsigned int flags); 1746 int i915_gem_evict_vm(struct i915_address_space *vm); 1747 1748 /* i915_gem_internal.c */ 1749 struct drm_i915_gem_object * 1750 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 1751 phys_addr_t size); 1752 struct drm_i915_gem_object * 1753 __i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 1754 const struct drm_i915_gem_object_ops *ops, 1755 phys_addr_t size); 1756 1757 /* i915_gem_tiling.c */ 1758 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 1759 { 1760 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1761 1762 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1763 i915_gem_object_is_tiled(obj); 1764 } 1765 1766 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, 1767 unsigned int tiling, unsigned int stride); 1768 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, 1769 unsigned int tiling, unsigned int stride); 1770 1771 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 1772 1773 /* i915_cmd_parser.c */ 1774 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 1775 int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 1776 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 1777 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 1778 struct i915_vma *batch, 1779 unsigned long batch_offset, 1780 unsigned long batch_length, 1781 struct i915_vma *shadow, 1782 bool trampoline); 1783 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 1784 1785 /* intel_device_info.c */ 1786 static inline struct intel_device_info * 1787 mkwrite_device_info(struct drm_i915_private *dev_priv) 1788 { 1789 return (struct intel_device_info *)INTEL_INFO(dev_priv); 1790 } 1791 1792 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 1793 struct drm_file *file); 1794 1795 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) 1796 { 1797 if (GRAPHICS_VER(i915) >= 11) 1798 return ICL_HWS_CSB_WRITE_INDEX; 1799 else 1800 return I915_HWS_CSB_WRITE_INDEX; 1801 } 1802 1803 static inline enum i915_map_type 1804 i915_coherent_map_type(struct drm_i915_private *i915, 1805 struct drm_i915_gem_object *obj, bool always_coherent) 1806 { 1807 if (i915_gem_object_is_lmem(obj)) 1808 return I915_MAP_WC; 1809 if (HAS_LLC(i915) || always_coherent) 1810 return I915_MAP_WB; 1811 else 1812 return I915_MAP_WC; 1813 } 1814 1815 #endif 1816