1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hash.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/perf_event.h> 44 #include <linux/pm_qos.h> 45 #include <linux/reservation.h> 46 #include <linux/shmem_fs.h> 47 48 #include <drm/drmP.h> 49 #include <drm/intel-gtt.h> 50 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 51 #include <drm/drm_gem.h> 52 #include <drm/drm_auth.h> 53 #include <drm/drm_cache.h> 54 55 #include "i915_params.h" 56 #include "i915_reg.h" 57 #include "i915_utils.h" 58 59 #include "intel_bios.h" 60 #include "intel_device_info.h" 61 #include "intel_display.h" 62 #include "intel_dpll_mgr.h" 63 #include "intel_lrc.h" 64 #include "intel_opregion.h" 65 #include "intel_ringbuffer.h" 66 #include "intel_uncore.h" 67 #include "intel_uc.h" 68 69 #include "i915_gem.h" 70 #include "i915_gem_context.h" 71 #include "i915_gem_fence_reg.h" 72 #include "i915_gem_object.h" 73 #include "i915_gem_gtt.h" 74 #include "i915_gem_request.h" 75 #include "i915_gem_timeline.h" 76 77 #include "i915_vma.h" 78 79 #include "intel_gvt.h" 80 81 /* General customization: 82 */ 83 84 #define DRIVER_NAME "i915" 85 #define DRIVER_DESC "Intel Graphics" 86 #define DRIVER_DATE "20171222" 87 #define DRIVER_TIMESTAMP 1513971710 88 89 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 91 * which may not necessarily be a user visible problem. This will either 92 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 93 * enable distros and users to tailor their preferred amount of i915 abrt 94 * spam. 95 */ 96 #define I915_STATE_WARN(condition, format...) ({ \ 97 int __ret_warn_on = !!(condition); \ 98 if (unlikely(__ret_warn_on)) \ 99 if (!WARN(i915_modparams.verbose_state_checks, format)) \ 100 DRM_ERROR(format); \ 101 unlikely(__ret_warn_on); \ 102 }) 103 104 #define I915_STATE_WARN_ON(x) \ 105 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 106 107 bool __i915_inject_load_failure(const char *func, int line); 108 #define i915_inject_load_failure() \ 109 __i915_inject_load_failure(__func__, __LINE__) 110 111 typedef struct { 112 uint32_t val; 113 } uint_fixed_16_16_t; 114 115 #define FP_16_16_MAX ({ \ 116 uint_fixed_16_16_t fp; \ 117 fp.val = UINT_MAX; \ 118 fp; \ 119 }) 120 121 static inline bool is_fixed16_zero(uint_fixed_16_16_t val) 122 { 123 if (val.val == 0) 124 return true; 125 return false; 126 } 127 128 static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) 129 { 130 uint_fixed_16_16_t fp; 131 132 WARN_ON(val > U16_MAX); 133 134 fp.val = val << 16; 135 return fp; 136 } 137 138 static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp) 139 { 140 return DIV_ROUND_UP(fp.val, 1 << 16); 141 } 142 143 static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp) 144 { 145 return fp.val >> 16; 146 } 147 148 static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, 149 uint_fixed_16_16_t min2) 150 { 151 uint_fixed_16_16_t min; 152 153 min.val = min(min1.val, min2.val); 154 return min; 155 } 156 157 static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, 158 uint_fixed_16_16_t max2) 159 { 160 uint_fixed_16_16_t max; 161 162 max.val = max(max1.val, max2.val); 163 return max; 164 } 165 166 static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) 167 { 168 uint_fixed_16_16_t fp; 169 WARN_ON(val > U32_MAX); 170 fp.val = (uint32_t) val; 171 return fp; 172 } 173 174 static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, 175 uint_fixed_16_16_t d) 176 { 177 return DIV_ROUND_UP(val.val, d.val); 178 } 179 180 static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, 181 uint_fixed_16_16_t mul) 182 { 183 uint64_t intermediate_val; 184 185 intermediate_val = (uint64_t) val * mul.val; 186 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); 187 WARN_ON(intermediate_val > U32_MAX); 188 return (uint32_t) intermediate_val; 189 } 190 191 static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, 192 uint_fixed_16_16_t mul) 193 { 194 uint64_t intermediate_val; 195 196 intermediate_val = (uint64_t) val.val * mul.val; 197 intermediate_val = intermediate_val >> 16; 198 return clamp_u64_to_fixed16(intermediate_val); 199 } 200 201 static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d) 202 { 203 uint64_t interm_val; 204 205 interm_val = (uint64_t)val << 16; 206 interm_val = DIV_ROUND_UP_ULL(interm_val, d); 207 return clamp_u64_to_fixed16(interm_val); 208 } 209 210 static inline uint32_t div_round_up_u32_fixed16(uint32_t val, 211 uint_fixed_16_16_t d) 212 { 213 uint64_t interm_val; 214 215 interm_val = (uint64_t)val << 16; 216 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); 217 WARN_ON(interm_val > U32_MAX); 218 return (uint32_t) interm_val; 219 } 220 221 static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, 222 uint_fixed_16_16_t mul) 223 { 224 uint64_t intermediate_val; 225 226 intermediate_val = (uint64_t) val * mul.val; 227 return clamp_u64_to_fixed16(intermediate_val); 228 } 229 230 static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, 231 uint_fixed_16_16_t add2) 232 { 233 uint64_t interm_sum; 234 235 interm_sum = (uint64_t) add1.val + add2.val; 236 return clamp_u64_to_fixed16(interm_sum); 237 } 238 239 static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, 240 uint32_t add2) 241 { 242 uint64_t interm_sum; 243 uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2); 244 245 interm_sum = (uint64_t) add1.val + interm_add2.val; 246 return clamp_u64_to_fixed16(interm_sum); 247 } 248 249 enum hpd_pin { 250 HPD_NONE = 0, 251 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 252 HPD_CRT, 253 HPD_SDVO_B, 254 HPD_SDVO_C, 255 HPD_PORT_A, 256 HPD_PORT_B, 257 HPD_PORT_C, 258 HPD_PORT_D, 259 HPD_PORT_E, 260 HPD_NUM_PINS 261 }; 262 263 #define for_each_hpd_pin(__pin) \ 264 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 265 266 #define HPD_STORM_DEFAULT_THRESHOLD 5 267 268 struct i915_hotplug { 269 struct work_struct hotplug_work; 270 271 struct { 272 unsigned long last_jiffies; 273 int count; 274 enum { 275 HPD_ENABLED = 0, 276 HPD_DISABLED = 1, 277 HPD_MARK_DISABLED = 2 278 } state; 279 } stats[HPD_NUM_PINS]; 280 u32 event_bits; 281 struct delayed_work reenable_work; 282 283 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 284 u32 long_port_mask; 285 u32 short_port_mask; 286 struct work_struct dig_port_work; 287 288 struct work_struct poll_init_work; 289 bool poll_enabled; 290 291 unsigned int hpd_storm_threshold; 292 293 /* 294 * if we get a HPD irq from DP and a HPD irq from non-DP 295 * the non-DP HPD could block the workqueue on a mode config 296 * mutex getting, that userspace may have taken. However 297 * userspace is waiting on the DP workqueue to run which is 298 * blocked behind the non-DP one. 299 */ 300 struct workqueue_struct *dp_wq; 301 }; 302 303 #define I915_GEM_GPU_DOMAINS \ 304 (I915_GEM_DOMAIN_RENDER | \ 305 I915_GEM_DOMAIN_SAMPLER | \ 306 I915_GEM_DOMAIN_COMMAND | \ 307 I915_GEM_DOMAIN_INSTRUCTION | \ 308 I915_GEM_DOMAIN_VERTEX) 309 310 struct drm_i915_private; 311 struct i915_mm_struct; 312 struct i915_mmu_object; 313 314 struct drm_i915_file_private { 315 struct drm_i915_private *dev_priv; 316 struct drm_file *file; 317 318 struct { 319 spinlock_t lock; 320 struct list_head request_list; 321 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 322 * chosen to prevent the CPU getting more than a frame ahead of the GPU 323 * (when using lax throttling for the frontbuffer). We also use it to 324 * offer free GPU waitboosts for severely congested workloads. 325 */ 326 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 327 } mm; 328 struct idr context_idr; 329 330 struct intel_rps_client { 331 atomic_t boosts; 332 } rps_client; 333 334 unsigned int bsd_engine; 335 336 /* Client can have a maximum of 3 contexts banned before 337 * it is denied of creating new contexts. As one context 338 * ban needs 4 consecutive hangs, and more if there is 339 * progress in between, this is a last resort stop gap measure 340 * to limit the badly behaving clients access to gpu. 341 */ 342 #define I915_MAX_CLIENT_CONTEXT_BANS 3 343 atomic_t context_bans; 344 }; 345 346 /* Interface history: 347 * 348 * 1.1: Original. 349 * 1.2: Add Power Management 350 * 1.3: Add vblank support 351 * 1.4: Fix cmdbuffer path, add heap destroy 352 * 1.5: Add vblank pipe configuration 353 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 354 * - Support vertical blank on secondary display pipe 355 */ 356 #define DRIVER_MAJOR 1 357 #define DRIVER_MINOR 6 358 #define DRIVER_PATCHLEVEL 0 359 360 struct intel_overlay; 361 struct intel_overlay_error_state; 362 363 struct sdvo_device_mapping { 364 u8 initialized; 365 u8 dvo_port; 366 u8 slave_addr; 367 u8 dvo_wiring; 368 u8 i2c_pin; 369 u8 ddc_pin; 370 }; 371 372 struct intel_connector; 373 struct intel_encoder; 374 struct intel_atomic_state; 375 struct intel_crtc_state; 376 struct intel_initial_plane_config; 377 struct intel_crtc; 378 struct intel_limit; 379 struct dpll; 380 struct intel_cdclk_state; 381 382 struct drm_i915_display_funcs { 383 void (*get_cdclk)(struct drm_i915_private *dev_priv, 384 struct intel_cdclk_state *cdclk_state); 385 void (*set_cdclk)(struct drm_i915_private *dev_priv, 386 const struct intel_cdclk_state *cdclk_state); 387 int (*get_fifo_size)(struct drm_i915_private *dev_priv, 388 enum i9xx_plane_id i9xx_plane); 389 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 390 int (*compute_intermediate_wm)(struct drm_device *dev, 391 struct intel_crtc *intel_crtc, 392 struct intel_crtc_state *newstate); 393 void (*initial_watermarks)(struct intel_atomic_state *state, 394 struct intel_crtc_state *cstate); 395 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 396 struct intel_crtc_state *cstate); 397 void (*optimize_watermarks)(struct intel_atomic_state *state, 398 struct intel_crtc_state *cstate); 399 int (*compute_global_watermarks)(struct drm_atomic_state *state); 400 void (*update_wm)(struct intel_crtc *crtc); 401 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 402 /* Returns the active state of the crtc, and if the crtc is active, 403 * fills out the pipe-config with the hw state. */ 404 bool (*get_pipe_config)(struct intel_crtc *, 405 struct intel_crtc_state *); 406 void (*get_initial_plane_config)(struct intel_crtc *, 407 struct intel_initial_plane_config *); 408 int (*crtc_compute_clock)(struct intel_crtc *crtc, 409 struct intel_crtc_state *crtc_state); 410 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 411 struct drm_atomic_state *old_state); 412 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 413 struct drm_atomic_state *old_state); 414 void (*update_crtcs)(struct drm_atomic_state *state); 415 void (*audio_codec_enable)(struct intel_encoder *encoder, 416 const struct intel_crtc_state *crtc_state, 417 const struct drm_connector_state *conn_state); 418 void (*audio_codec_disable)(struct intel_encoder *encoder, 419 const struct intel_crtc_state *old_crtc_state, 420 const struct drm_connector_state *old_conn_state); 421 void (*fdi_link_train)(struct intel_crtc *crtc, 422 const struct intel_crtc_state *crtc_state); 423 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 424 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 425 /* clock updates for mode set */ 426 /* cursor updates */ 427 /* render clock increase/decrease */ 428 /* display clock increase/decrease */ 429 /* pll clock increase/decrease */ 430 431 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 432 void (*load_luts)(struct drm_crtc_state *crtc_state); 433 }; 434 435 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 436 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 437 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 438 439 struct intel_csr { 440 struct work_struct work; 441 const char *fw_path; 442 uint32_t *dmc_payload; 443 uint32_t dmc_fw_size; 444 uint32_t version; 445 uint32_t mmio_count; 446 i915_reg_t mmioaddr[8]; 447 uint32_t mmiodata[8]; 448 uint32_t dc_state; 449 uint32_t allowed_dc_mask; 450 }; 451 452 struct intel_display_error_state; 453 454 struct i915_gpu_state { 455 struct kref ref; 456 struct timeval time; 457 struct timeval boottime; 458 struct timeval uptime; 459 460 struct drm_i915_private *i915; 461 462 char error_msg[128]; 463 bool simulated; 464 bool awake; 465 bool wakelock; 466 bool suspended; 467 int iommu; 468 u32 reset_count; 469 u32 suspend_count; 470 struct intel_device_info device_info; 471 struct i915_params params; 472 473 struct i915_error_uc { 474 struct intel_uc_fw guc_fw; 475 struct intel_uc_fw huc_fw; 476 struct drm_i915_error_object *guc_log; 477 } uc; 478 479 /* Generic register state */ 480 u32 eir; 481 u32 pgtbl_er; 482 u32 ier; 483 u32 gtier[4], ngtier; 484 u32 ccid; 485 u32 derrmr; 486 u32 forcewake; 487 u32 error; /* gen6+ */ 488 u32 err_int; /* gen7 */ 489 u32 fault_data0; /* gen8, gen9 */ 490 u32 fault_data1; /* gen8, gen9 */ 491 u32 done_reg; 492 u32 gac_eco; 493 u32 gam_ecochk; 494 u32 gab_ctl; 495 u32 gfx_mode; 496 497 u32 nfence; 498 u64 fence[I915_MAX_NUM_FENCES]; 499 struct intel_overlay_error_state *overlay; 500 struct intel_display_error_state *display; 501 502 struct drm_i915_error_engine { 503 int engine_id; 504 /* Software tracked state */ 505 bool idle; 506 bool waiting; 507 int num_waiters; 508 unsigned long hangcheck_timestamp; 509 bool hangcheck_stalled; 510 enum intel_engine_hangcheck_action hangcheck_action; 511 struct i915_address_space *vm; 512 int num_requests; 513 u32 reset_count; 514 515 /* position of active request inside the ring */ 516 u32 rq_head, rq_post, rq_tail; 517 518 /* our own tracking of ring head and tail */ 519 u32 cpu_ring_head; 520 u32 cpu_ring_tail; 521 522 u32 last_seqno; 523 524 /* Register state */ 525 u32 start; 526 u32 tail; 527 u32 head; 528 u32 ctl; 529 u32 mode; 530 u32 hws; 531 u32 ipeir; 532 u32 ipehr; 533 u32 bbstate; 534 u32 instpm; 535 u32 instps; 536 u32 seqno; 537 u64 bbaddr; 538 u64 acthd; 539 u32 fault_reg; 540 u64 faddr; 541 u32 rc_psmi; /* sleep state */ 542 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 543 struct intel_instdone instdone; 544 545 struct drm_i915_error_context { 546 char comm[TASK_COMM_LEN]; 547 pid_t pid; 548 u32 handle; 549 u32 hw_id; 550 int priority; 551 int ban_score; 552 int active; 553 int guilty; 554 } context; 555 556 struct drm_i915_error_object { 557 u64 gtt_offset; 558 u64 gtt_size; 559 int page_count; 560 int unused; 561 u32 *pages[0]; 562 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 563 564 struct drm_i915_error_object **user_bo; 565 long user_bo_count; 566 567 struct drm_i915_error_object *wa_ctx; 568 struct drm_i915_error_object *default_state; 569 570 struct drm_i915_error_request { 571 long jiffies; 572 pid_t pid; 573 u32 context; 574 int priority; 575 int ban_score; 576 u32 seqno; 577 u32 head; 578 u32 tail; 579 } *requests, execlist[EXECLIST_MAX_PORTS]; 580 unsigned int num_ports; 581 582 struct drm_i915_error_waiter { 583 char comm[TASK_COMM_LEN]; 584 pid_t pid; 585 u32 seqno; 586 } *waiters; 587 588 struct { 589 u32 gfx_mode; 590 union { 591 u64 pdp[4]; 592 u32 pp_dir_base; 593 }; 594 } vm_info; 595 } engine[I915_NUM_ENGINES]; 596 597 struct drm_i915_error_buffer { 598 u32 size; 599 u32 name; 600 u32 rseqno[I915_NUM_ENGINES], wseqno; 601 u64 gtt_offset; 602 u32 read_domains; 603 u32 write_domain; 604 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 605 u32 tiling:2; 606 u32 dirty:1; 607 u32 purgeable:1; 608 u32 userptr:1; 609 s32 engine:4; 610 u32 cache_level:3; 611 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 612 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 613 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 614 }; 615 616 enum i915_cache_level { 617 I915_CACHE_NONE = 0, 618 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 619 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 620 caches, eg sampler/render caches, and the 621 large Last-Level-Cache. LLC is coherent with 622 the CPU, but L3 is only visible to the GPU. */ 623 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 624 }; 625 626 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 627 628 enum fb_op_origin { 629 ORIGIN_GTT, 630 ORIGIN_CPU, 631 ORIGIN_CS, 632 ORIGIN_FLIP, 633 ORIGIN_DIRTYFB, 634 }; 635 636 struct intel_fbc { 637 /* This is always the inner lock when overlapping with struct_mutex and 638 * it's the outer lock when overlapping with stolen_lock. */ 639 struct mutex lock; 640 unsigned threshold; 641 unsigned int possible_framebuffer_bits; 642 unsigned int busy_bits; 643 unsigned int visible_pipes_mask; 644 struct intel_crtc *crtc; 645 646 struct drm_mm_node compressed_fb; 647 struct drm_mm_node *compressed_llb; 648 649 bool false_color; 650 651 bool enabled; 652 bool active; 653 654 bool underrun_detected; 655 struct work_struct underrun_work; 656 657 /* 658 * Due to the atomic rules we can't access some structures without the 659 * appropriate locking, so we cache information here in order to avoid 660 * these problems. 661 */ 662 struct intel_fbc_state_cache { 663 struct i915_vma *vma; 664 665 struct { 666 unsigned int mode_flags; 667 uint32_t hsw_bdw_pixel_rate; 668 } crtc; 669 670 struct { 671 unsigned int rotation; 672 int src_w; 673 int src_h; 674 bool visible; 675 /* 676 * Display surface base address adjustement for 677 * pageflips. Note that on gen4+ this only adjusts up 678 * to a tile, offsets within a tile are handled in 679 * the hw itself (with the TILEOFF register). 680 */ 681 int adjusted_x; 682 int adjusted_y; 683 684 int y; 685 } plane; 686 687 struct { 688 const struct drm_format_info *format; 689 unsigned int stride; 690 } fb; 691 } state_cache; 692 693 /* 694 * This structure contains everything that's relevant to program the 695 * hardware registers. When we want to figure out if we need to disable 696 * and re-enable FBC for a new configuration we just check if there's 697 * something different in the struct. The genx_fbc_activate functions 698 * are supposed to read from it in order to program the registers. 699 */ 700 struct intel_fbc_reg_params { 701 struct i915_vma *vma; 702 703 struct { 704 enum pipe pipe; 705 enum i9xx_plane_id i9xx_plane; 706 unsigned int fence_y_offset; 707 } crtc; 708 709 struct { 710 const struct drm_format_info *format; 711 unsigned int stride; 712 } fb; 713 714 int cfb_size; 715 unsigned int gen9_wa_cfb_stride; 716 } params; 717 718 struct intel_fbc_work { 719 bool scheduled; 720 u32 scheduled_vblank; 721 struct work_struct work; 722 } work; 723 724 const char *no_fbc_reason; 725 }; 726 727 /* 728 * HIGH_RR is the highest eDP panel refresh rate read from EDID 729 * LOW_RR is the lowest eDP panel refresh rate found from EDID 730 * parsing for same resolution. 731 */ 732 enum drrs_refresh_rate_type { 733 DRRS_HIGH_RR, 734 DRRS_LOW_RR, 735 DRRS_MAX_RR, /* RR count */ 736 }; 737 738 enum drrs_support_type { 739 DRRS_NOT_SUPPORTED = 0, 740 STATIC_DRRS_SUPPORT = 1, 741 SEAMLESS_DRRS_SUPPORT = 2 742 }; 743 744 struct intel_dp; 745 struct i915_drrs { 746 struct mutex mutex; 747 struct delayed_work work; 748 struct intel_dp *dp; 749 unsigned busy_frontbuffer_bits; 750 enum drrs_refresh_rate_type refresh_rate_type; 751 enum drrs_support_type type; 752 }; 753 754 struct i915_psr { 755 struct mutex lock; 756 bool sink_support; 757 bool source_ok; 758 struct intel_dp *enabled; 759 bool active; 760 struct delayed_work work; 761 unsigned busy_frontbuffer_bits; 762 bool psr2_support; 763 bool aux_frame_sync; 764 bool link_standby; 765 bool y_cord_support; 766 bool colorimetry_support; 767 bool alpm; 768 769 void (*enable_source)(struct intel_dp *, 770 const struct intel_crtc_state *); 771 void (*disable_source)(struct intel_dp *, 772 const struct intel_crtc_state *); 773 void (*enable_sink)(struct intel_dp *); 774 void (*activate)(struct intel_dp *); 775 void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *); 776 }; 777 778 enum intel_pch { 779 PCH_NONE = 0, /* No PCH present */ 780 PCH_IBX, /* Ibexpeak PCH */ 781 PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ 782 PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ 783 PCH_SPT, /* Sunrisepoint PCH */ 784 PCH_KBP, /* Kaby Lake PCH */ 785 PCH_CNP, /* Cannon Lake PCH */ 786 PCH_NOP, 787 }; 788 789 enum intel_sbi_destination { 790 SBI_ICLK, 791 SBI_MPHY, 792 }; 793 794 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 795 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 796 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 797 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 798 #define QUIRK_INCREASE_T12_DELAY (1<<6) 799 800 struct intel_fbdev; 801 struct intel_fbc_work; 802 803 struct intel_gmbus { 804 struct i2c_adapter adapter; 805 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 806 u32 force_bit; 807 u32 reg0; 808 i915_reg_t gpio_reg; 809 struct i2c_algo_bit_data bit_algo; 810 struct drm_i915_private *dev_priv; 811 }; 812 813 struct i915_suspend_saved_registers { 814 u32 saveDSPARB; 815 u32 saveFBC_CONTROL; 816 u32 saveCACHE_MODE_0; 817 u32 saveMI_ARB_STATE; 818 u32 saveSWF0[16]; 819 u32 saveSWF1[16]; 820 u32 saveSWF3[3]; 821 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 822 u32 savePCH_PORT_HOTPLUG; 823 u16 saveGCDGMBUS; 824 }; 825 826 struct vlv_s0ix_state { 827 /* GAM */ 828 u32 wr_watermark; 829 u32 gfx_prio_ctrl; 830 u32 arb_mode; 831 u32 gfx_pend_tlb0; 832 u32 gfx_pend_tlb1; 833 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 834 u32 media_max_req_count; 835 u32 gfx_max_req_count; 836 u32 render_hwsp; 837 u32 ecochk; 838 u32 bsd_hwsp; 839 u32 blt_hwsp; 840 u32 tlb_rd_addr; 841 842 /* MBC */ 843 u32 g3dctl; 844 u32 gsckgctl; 845 u32 mbctl; 846 847 /* GCP */ 848 u32 ucgctl1; 849 u32 ucgctl3; 850 u32 rcgctl1; 851 u32 rcgctl2; 852 u32 rstctl; 853 u32 misccpctl; 854 855 /* GPM */ 856 u32 gfxpause; 857 u32 rpdeuhwtc; 858 u32 rpdeuc; 859 u32 ecobus; 860 u32 pwrdwnupctl; 861 u32 rp_down_timeout; 862 u32 rp_deucsw; 863 u32 rcubmabdtmr; 864 u32 rcedata; 865 u32 spare2gh; 866 867 /* Display 1 CZ domain */ 868 u32 gt_imr; 869 u32 gt_ier; 870 u32 pm_imr; 871 u32 pm_ier; 872 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 873 874 /* GT SA CZ domain */ 875 u32 tilectl; 876 u32 gt_fifoctl; 877 u32 gtlc_wake_ctrl; 878 u32 gtlc_survive; 879 u32 pmwgicz; 880 881 /* Display 2 CZ domain */ 882 u32 gu_ctl0; 883 u32 gu_ctl1; 884 u32 pcbr; 885 u32 clock_gate_dis2; 886 }; 887 888 struct intel_rps_ei { 889 ktime_t ktime; 890 u32 render_c0; 891 u32 media_c0; 892 }; 893 894 struct intel_rps { 895 /* 896 * work, interrupts_enabled and pm_iir are protected by 897 * dev_priv->irq_lock 898 */ 899 struct work_struct work; 900 bool interrupts_enabled; 901 u32 pm_iir; 902 903 /* PM interrupt bits that should never be masked */ 904 u32 pm_intrmsk_mbz; 905 906 /* Frequencies are stored in potentially platform dependent multiples. 907 * In other words, *_freq needs to be multiplied by X to be interesting. 908 * Soft limits are those which are used for the dynamic reclocking done 909 * by the driver (raise frequencies under heavy loads, and lower for 910 * lighter loads). Hard limits are those imposed by the hardware. 911 * 912 * A distinction is made for overclocking, which is never enabled by 913 * default, and is considered to be above the hard limit if it's 914 * possible at all. 915 */ 916 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 917 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 918 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 919 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 920 u8 min_freq; /* AKA RPn. Minimum frequency */ 921 u8 boost_freq; /* Frequency to request when wait boosting */ 922 u8 idle_freq; /* Frequency to request when we are idle */ 923 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 924 u8 rp1_freq; /* "less than" RP0 power/freqency */ 925 u8 rp0_freq; /* Non-overclocked max frequency. */ 926 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 927 928 u8 up_threshold; /* Current %busy required to uplock */ 929 u8 down_threshold; /* Current %busy required to downclock */ 930 931 int last_adj; 932 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 933 934 bool enabled; 935 atomic_t num_waiters; 936 atomic_t boosts; 937 938 /* manual wa residency calculations */ 939 struct intel_rps_ei ei; 940 }; 941 942 struct intel_rc6 { 943 bool enabled; 944 }; 945 946 struct intel_llc_pstate { 947 bool enabled; 948 }; 949 950 struct intel_gen6_power_mgmt { 951 struct intel_rps rps; 952 struct intel_rc6 rc6; 953 struct intel_llc_pstate llc_pstate; 954 }; 955 956 /* defined intel_pm.c */ 957 extern spinlock_t mchdev_lock; 958 959 struct intel_ilk_power_mgmt { 960 u8 cur_delay; 961 u8 min_delay; 962 u8 max_delay; 963 u8 fmax; 964 u8 fstart; 965 966 u64 last_count1; 967 unsigned long last_time1; 968 unsigned long chipset_power; 969 u64 last_count2; 970 u64 last_time2; 971 unsigned long gfx_power; 972 u8 corr; 973 974 int c_m; 975 int r_t; 976 }; 977 978 struct drm_i915_private; 979 struct i915_power_well; 980 981 struct i915_power_well_ops { 982 /* 983 * Synchronize the well's hw state to match the current sw state, for 984 * example enable/disable it based on the current refcount. Called 985 * during driver init and resume time, possibly after first calling 986 * the enable/disable handlers. 987 */ 988 void (*sync_hw)(struct drm_i915_private *dev_priv, 989 struct i915_power_well *power_well); 990 /* 991 * Enable the well and resources that depend on it (for example 992 * interrupts located on the well). Called after the 0->1 refcount 993 * transition. 994 */ 995 void (*enable)(struct drm_i915_private *dev_priv, 996 struct i915_power_well *power_well); 997 /* 998 * Disable the well and resources that depend on it. Called after 999 * the 1->0 refcount transition. 1000 */ 1001 void (*disable)(struct drm_i915_private *dev_priv, 1002 struct i915_power_well *power_well); 1003 /* Returns the hw enabled state. */ 1004 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1005 struct i915_power_well *power_well); 1006 }; 1007 1008 /* Power well structure for haswell */ 1009 struct i915_power_well { 1010 const char *name; 1011 bool always_on; 1012 /* power well enable/disable usage count */ 1013 int count; 1014 /* cached hw enabled state */ 1015 bool hw_enabled; 1016 u64 domains; 1017 /* unique identifier for this power well */ 1018 enum i915_power_well_id id; 1019 /* 1020 * Arbitraty data associated with this power well. Platform and power 1021 * well specific. 1022 */ 1023 union { 1024 struct { 1025 enum dpio_phy phy; 1026 } bxt; 1027 struct { 1028 /* Mask of pipes whose IRQ logic is backed by the pw */ 1029 u8 irq_pipe_mask; 1030 /* The pw is backing the VGA functionality */ 1031 bool has_vga:1; 1032 bool has_fuses:1; 1033 } hsw; 1034 }; 1035 const struct i915_power_well_ops *ops; 1036 }; 1037 1038 struct i915_power_domains { 1039 /* 1040 * Power wells needed for initialization at driver init and suspend 1041 * time are on. They are kept on until after the first modeset. 1042 */ 1043 bool init_power_on; 1044 bool initializing; 1045 int power_well_count; 1046 1047 struct mutex lock; 1048 int domain_use_count[POWER_DOMAIN_NUM]; 1049 struct i915_power_well *power_wells; 1050 }; 1051 1052 #define MAX_L3_SLICES 2 1053 struct intel_l3_parity { 1054 u32 *remap_info[MAX_L3_SLICES]; 1055 struct work_struct error_work; 1056 int which_slice; 1057 }; 1058 1059 struct i915_gem_mm { 1060 /** Memory allocator for GTT stolen memory */ 1061 struct drm_mm stolen; 1062 /** Protects the usage of the GTT stolen memory allocator. This is 1063 * always the inner lock when overlapping with struct_mutex. */ 1064 struct mutex stolen_lock; 1065 1066 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 1067 spinlock_t obj_lock; 1068 1069 /** List of all objects in gtt_space. Used to restore gtt 1070 * mappings on resume */ 1071 struct list_head bound_list; 1072 /** 1073 * List of objects which are not bound to the GTT (thus 1074 * are idle and not used by the GPU). These objects may or may 1075 * not actually have any pages attached. 1076 */ 1077 struct list_head unbound_list; 1078 1079 /** List of all objects in gtt_space, currently mmaped by userspace. 1080 * All objects within this list must also be on bound_list. 1081 */ 1082 struct list_head userfault_list; 1083 1084 /** 1085 * List of objects which are pending destruction. 1086 */ 1087 struct llist_head free_list; 1088 struct work_struct free_work; 1089 spinlock_t free_lock; 1090 1091 /** 1092 * Small stash of WC pages 1093 */ 1094 struct pagevec wc_stash; 1095 1096 /** 1097 * tmpfs instance used for shmem backed objects 1098 */ 1099 struct vfsmount *gemfs; 1100 1101 /** PPGTT used for aliasing the PPGTT with the GTT */ 1102 struct i915_hw_ppgtt *aliasing_ppgtt; 1103 1104 struct notifier_block oom_notifier; 1105 struct notifier_block vmap_notifier; 1106 struct shrinker shrinker; 1107 1108 /** LRU list of objects with fence regs on them. */ 1109 struct list_head fence_list; 1110 1111 /** 1112 * Workqueue to fault in userptr pages, flushed by the execbuf 1113 * when required but otherwise left to userspace to try again 1114 * on EAGAIN. 1115 */ 1116 struct workqueue_struct *userptr_wq; 1117 1118 u64 unordered_timeline; 1119 1120 /* the indicator for dispatch video commands on two BSD rings */ 1121 atomic_t bsd_engine_dispatch_index; 1122 1123 /** Bit 6 swizzling required for X tiling */ 1124 uint32_t bit_6_swizzle_x; 1125 /** Bit 6 swizzling required for Y tiling */ 1126 uint32_t bit_6_swizzle_y; 1127 1128 /* accounting, useful for userland debugging */ 1129 spinlock_t object_stat_lock; 1130 u64 object_memory; 1131 u32 object_count; 1132 }; 1133 1134 struct drm_i915_error_state_buf { 1135 struct drm_i915_private *i915; 1136 unsigned bytes; 1137 unsigned size; 1138 int err; 1139 u8 *buf; 1140 loff_t start; 1141 loff_t pos; 1142 }; 1143 1144 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 1145 1146 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 1147 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 1148 1149 #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ 1150 #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ 1151 1152 struct i915_gpu_error { 1153 /* For hangcheck timer */ 1154 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1155 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1156 1157 struct delayed_work hangcheck_work; 1158 1159 /* For reset and error_state handling. */ 1160 spinlock_t lock; 1161 /* Protected by the above dev->gpu_error.lock. */ 1162 struct i915_gpu_state *first_error; 1163 1164 atomic_t pending_fb_pin; 1165 1166 unsigned long missed_irq_rings; 1167 1168 /** 1169 * State variable controlling the reset flow and count 1170 * 1171 * This is a counter which gets incremented when reset is triggered, 1172 * 1173 * Before the reset commences, the I915_RESET_BACKOFF bit is set 1174 * meaning that any waiters holding onto the struct_mutex should 1175 * relinquish the lock immediately in order for the reset to start. 1176 * 1177 * If reset is not completed succesfully, the I915_WEDGE bit is 1178 * set meaning that hardware is terminally sour and there is no 1179 * recovery. All waiters on the reset_queue will be woken when 1180 * that happens. 1181 * 1182 * This counter is used by the wait_seqno code to notice that reset 1183 * event happened and it needs to restart the entire ioctl (since most 1184 * likely the seqno it waited for won't ever signal anytime soon). 1185 * 1186 * This is important for lock-free wait paths, where no contended lock 1187 * naturally enforces the correct ordering between the bail-out of the 1188 * waiter and the gpu reset work code. 1189 */ 1190 unsigned long reset_count; 1191 1192 /** 1193 * flags: Control various stages of the GPU reset 1194 * 1195 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any 1196 * other users acquiring the struct_mutex. To do this we set the 1197 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset 1198 * and then check for that bit before acquiring the struct_mutex (in 1199 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a 1200 * secondary role in preventing two concurrent global reset attempts. 1201 * 1202 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the 1203 * struct_mutex. We try to acquire the struct_mutex in the reset worker, 1204 * but it may be held by some long running waiter (that we cannot 1205 * interrupt without causing trouble). Once we are ready to do the GPU 1206 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If 1207 * they already hold the struct_mutex and want to participate they can 1208 * inspect the bit and do the reset directly, otherwise the worker 1209 * waits for the struct_mutex. 1210 * 1211 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to 1212 * acquire the struct_mutex to reset an engine, we need an explicit 1213 * flag to prevent two concurrent reset attempts in the same engine. 1214 * As the number of engines continues to grow, allocate the flags from 1215 * the most significant bits. 1216 * 1217 * #I915_WEDGED - If reset fails and we can no longer use the GPU, 1218 * we set the #I915_WEDGED bit. Prior to command submission, e.g. 1219 * i915_gem_request_alloc(), this bit is checked and the sequence 1220 * aborted (with -EIO reported to userspace) if set. 1221 */ 1222 unsigned long flags; 1223 #define I915_RESET_BACKOFF 0 1224 #define I915_RESET_HANDOFF 1 1225 #define I915_RESET_MODESET 2 1226 #define I915_WEDGED (BITS_PER_LONG - 1) 1227 #define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) 1228 1229 /** Number of times an engine has been reset */ 1230 u32 reset_engine_count[I915_NUM_ENGINES]; 1231 1232 /** 1233 * Waitqueue to signal when a hang is detected. Used to for waiters 1234 * to release the struct_mutex for the reset to procede. 1235 */ 1236 wait_queue_head_t wait_queue; 1237 1238 /** 1239 * Waitqueue to signal when the reset has completed. Used by clients 1240 * that wait for dev_priv->mm.wedged to settle. 1241 */ 1242 wait_queue_head_t reset_queue; 1243 1244 /* For missed irq/seqno simulation. */ 1245 unsigned long test_irq_rings; 1246 }; 1247 1248 enum modeset_restore { 1249 MODESET_ON_LID_OPEN, 1250 MODESET_DONE, 1251 MODESET_SUSPENDED, 1252 }; 1253 1254 #define DP_AUX_A 0x40 1255 #define DP_AUX_B 0x10 1256 #define DP_AUX_C 0x20 1257 #define DP_AUX_D 0x30 1258 1259 #define DDC_PIN_B 0x05 1260 #define DDC_PIN_C 0x04 1261 #define DDC_PIN_D 0x06 1262 1263 struct ddi_vbt_port_info { 1264 int max_tmds_clock; 1265 1266 /* 1267 * This is an index in the HDMI/DVI DDI buffer translation table. 1268 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1269 * populate this field. 1270 */ 1271 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1272 uint8_t hdmi_level_shift; 1273 1274 uint8_t supports_dvi:1; 1275 uint8_t supports_hdmi:1; 1276 uint8_t supports_dp:1; 1277 uint8_t supports_edp:1; 1278 1279 uint8_t alternate_aux_channel; 1280 uint8_t alternate_ddc_pin; 1281 1282 uint8_t dp_boost_level; 1283 uint8_t hdmi_boost_level; 1284 }; 1285 1286 enum psr_lines_to_wait { 1287 PSR_0_LINES_TO_WAIT = 0, 1288 PSR_1_LINE_TO_WAIT, 1289 PSR_4_LINES_TO_WAIT, 1290 PSR_8_LINES_TO_WAIT 1291 }; 1292 1293 struct intel_vbt_data { 1294 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1295 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1296 1297 /* Feature bits */ 1298 unsigned int int_tv_support:1; 1299 unsigned int lvds_dither:1; 1300 unsigned int lvds_vbt:1; 1301 unsigned int int_crt_support:1; 1302 unsigned int lvds_use_ssc:1; 1303 unsigned int display_clock_mode:1; 1304 unsigned int fdi_rx_polarity_inverted:1; 1305 unsigned int panel_type:4; 1306 int lvds_ssc_freq; 1307 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1308 1309 enum drrs_support_type drrs_type; 1310 1311 struct { 1312 int rate; 1313 int lanes; 1314 int preemphasis; 1315 int vswing; 1316 bool low_vswing; 1317 bool initialized; 1318 bool support; 1319 int bpp; 1320 struct edp_power_seq pps; 1321 } edp; 1322 1323 struct { 1324 bool full_link; 1325 bool require_aux_wakeup; 1326 int idle_frames; 1327 enum psr_lines_to_wait lines_to_wait; 1328 int tp1_wakeup_time; 1329 int tp2_tp3_wakeup_time; 1330 } psr; 1331 1332 struct { 1333 u16 pwm_freq_hz; 1334 bool present; 1335 bool active_low_pwm; 1336 u8 min_brightness; /* min_brightness/255 of max */ 1337 u8 controller; /* brightness controller number */ 1338 enum intel_backlight_type type; 1339 } backlight; 1340 1341 /* MIPI DSI */ 1342 struct { 1343 u16 panel_id; 1344 struct mipi_config *config; 1345 struct mipi_pps_data *pps; 1346 u16 bl_ports; 1347 u16 cabc_ports; 1348 u8 seq_version; 1349 u32 size; 1350 u8 *data; 1351 const u8 *sequence[MIPI_SEQ_MAX]; 1352 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 1353 } dsi; 1354 1355 int crt_ddc_pin; 1356 1357 int child_dev_num; 1358 struct child_device_config *child_dev; 1359 1360 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1361 struct sdvo_device_mapping sdvo_mappings[2]; 1362 }; 1363 1364 enum intel_ddb_partitioning { 1365 INTEL_DDB_PART_1_2, 1366 INTEL_DDB_PART_5_6, /* IVB+ */ 1367 }; 1368 1369 struct intel_wm_level { 1370 bool enable; 1371 uint32_t pri_val; 1372 uint32_t spr_val; 1373 uint32_t cur_val; 1374 uint32_t fbc_val; 1375 }; 1376 1377 struct ilk_wm_values { 1378 uint32_t wm_pipe[3]; 1379 uint32_t wm_lp[3]; 1380 uint32_t wm_lp_spr[3]; 1381 uint32_t wm_linetime[3]; 1382 bool enable_fbc_wm; 1383 enum intel_ddb_partitioning partitioning; 1384 }; 1385 1386 struct g4x_pipe_wm { 1387 uint16_t plane[I915_MAX_PLANES]; 1388 uint16_t fbc; 1389 }; 1390 1391 struct g4x_sr_wm { 1392 uint16_t plane; 1393 uint16_t cursor; 1394 uint16_t fbc; 1395 }; 1396 1397 struct vlv_wm_ddl_values { 1398 uint8_t plane[I915_MAX_PLANES]; 1399 }; 1400 1401 struct vlv_wm_values { 1402 struct g4x_pipe_wm pipe[3]; 1403 struct g4x_sr_wm sr; 1404 struct vlv_wm_ddl_values ddl[3]; 1405 uint8_t level; 1406 bool cxsr; 1407 }; 1408 1409 struct g4x_wm_values { 1410 struct g4x_pipe_wm pipe[2]; 1411 struct g4x_sr_wm sr; 1412 struct g4x_sr_wm hpll; 1413 bool cxsr; 1414 bool hpll_en; 1415 bool fbc_en; 1416 }; 1417 1418 struct skl_ddb_entry { 1419 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1420 }; 1421 1422 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1423 { 1424 return entry->end - entry->start; 1425 } 1426 1427 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1428 const struct skl_ddb_entry *e2) 1429 { 1430 if (e1->start == e2->start && e1->end == e2->end) 1431 return true; 1432 1433 return false; 1434 } 1435 1436 struct skl_ddb_allocation { 1437 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1438 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1439 }; 1440 1441 struct skl_wm_values { 1442 unsigned dirty_pipes; 1443 struct skl_ddb_allocation ddb; 1444 }; 1445 1446 struct skl_wm_level { 1447 bool plane_en; 1448 uint16_t plane_res_b; 1449 uint8_t plane_res_l; 1450 }; 1451 1452 /* Stores plane specific WM parameters */ 1453 struct skl_wm_params { 1454 bool x_tiled, y_tiled; 1455 bool rc_surface; 1456 uint32_t width; 1457 uint8_t cpp; 1458 uint32_t plane_pixel_rate; 1459 uint32_t y_min_scanlines; 1460 uint32_t plane_bytes_per_line; 1461 uint_fixed_16_16_t plane_blocks_per_line; 1462 uint_fixed_16_16_t y_tile_minimum; 1463 uint32_t linetime_us; 1464 }; 1465 1466 /* 1467 * This struct helps tracking the state needed for runtime PM, which puts the 1468 * device in PCI D3 state. Notice that when this happens, nothing on the 1469 * graphics device works, even register access, so we don't get interrupts nor 1470 * anything else. 1471 * 1472 * Every piece of our code that needs to actually touch the hardware needs to 1473 * either call intel_runtime_pm_get or call intel_display_power_get with the 1474 * appropriate power domain. 1475 * 1476 * Our driver uses the autosuspend delay feature, which means we'll only really 1477 * suspend if we stay with zero refcount for a certain amount of time. The 1478 * default value is currently very conservative (see intel_runtime_pm_enable), but 1479 * it can be changed with the standard runtime PM files from sysfs. 1480 * 1481 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1482 * goes back to false exactly before we reenable the IRQs. We use this variable 1483 * to check if someone is trying to enable/disable IRQs while they're supposed 1484 * to be disabled. This shouldn't happen and we'll print some error messages in 1485 * case it happens. 1486 * 1487 * For more, read the Documentation/power/runtime_pm.txt. 1488 */ 1489 struct i915_runtime_pm { 1490 atomic_t wakeref_count; 1491 bool suspended; 1492 bool irqs_enabled; 1493 }; 1494 1495 enum intel_pipe_crc_source { 1496 INTEL_PIPE_CRC_SOURCE_NONE, 1497 INTEL_PIPE_CRC_SOURCE_PLANE1, 1498 INTEL_PIPE_CRC_SOURCE_PLANE2, 1499 INTEL_PIPE_CRC_SOURCE_PF, 1500 INTEL_PIPE_CRC_SOURCE_PIPE, 1501 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1502 INTEL_PIPE_CRC_SOURCE_TV, 1503 INTEL_PIPE_CRC_SOURCE_DP_B, 1504 INTEL_PIPE_CRC_SOURCE_DP_C, 1505 INTEL_PIPE_CRC_SOURCE_DP_D, 1506 INTEL_PIPE_CRC_SOURCE_AUTO, 1507 INTEL_PIPE_CRC_SOURCE_MAX, 1508 }; 1509 1510 struct intel_pipe_crc_entry { 1511 uint32_t frame; 1512 uint32_t crc[5]; 1513 }; 1514 1515 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1516 struct intel_pipe_crc { 1517 spinlock_t lock; 1518 bool opened; /* exclusive access to the result file */ 1519 struct intel_pipe_crc_entry *entries; 1520 enum intel_pipe_crc_source source; 1521 int head, tail; 1522 wait_queue_head_t wq; 1523 int skipped; 1524 }; 1525 1526 struct i915_frontbuffer_tracking { 1527 spinlock_t lock; 1528 1529 /* 1530 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1531 * scheduled flips. 1532 */ 1533 unsigned busy_bits; 1534 unsigned flip_bits; 1535 }; 1536 1537 struct i915_wa_reg { 1538 i915_reg_t addr; 1539 u32 value; 1540 /* bitmask representing WA bits */ 1541 u32 mask; 1542 }; 1543 1544 #define I915_MAX_WA_REGS 16 1545 1546 struct i915_workarounds { 1547 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1548 u32 count; 1549 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1550 }; 1551 1552 struct i915_virtual_gpu { 1553 bool active; 1554 u32 caps; 1555 }; 1556 1557 /* used in computing the new watermarks state */ 1558 struct intel_wm_config { 1559 unsigned int num_pipes_active; 1560 bool sprites_enabled; 1561 bool sprites_scaled; 1562 }; 1563 1564 struct i915_oa_format { 1565 u32 format; 1566 int size; 1567 }; 1568 1569 struct i915_oa_reg { 1570 i915_reg_t addr; 1571 u32 value; 1572 }; 1573 1574 struct i915_oa_config { 1575 char uuid[UUID_STRING_LEN + 1]; 1576 int id; 1577 1578 const struct i915_oa_reg *mux_regs; 1579 u32 mux_regs_len; 1580 const struct i915_oa_reg *b_counter_regs; 1581 u32 b_counter_regs_len; 1582 const struct i915_oa_reg *flex_regs; 1583 u32 flex_regs_len; 1584 1585 struct attribute_group sysfs_metric; 1586 struct attribute *attrs[2]; 1587 struct device_attribute sysfs_metric_id; 1588 1589 atomic_t ref_count; 1590 }; 1591 1592 struct i915_perf_stream; 1593 1594 /** 1595 * struct i915_perf_stream_ops - the OPs to support a specific stream type 1596 */ 1597 struct i915_perf_stream_ops { 1598 /** 1599 * @enable: Enables the collection of HW samples, either in response to 1600 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened 1601 * without `I915_PERF_FLAG_DISABLED`. 1602 */ 1603 void (*enable)(struct i915_perf_stream *stream); 1604 1605 /** 1606 * @disable: Disables the collection of HW samples, either in response 1607 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying 1608 * the stream. 1609 */ 1610 void (*disable)(struct i915_perf_stream *stream); 1611 1612 /** 1613 * @poll_wait: Call poll_wait, passing a wait queue that will be woken 1614 * once there is something ready to read() for the stream 1615 */ 1616 void (*poll_wait)(struct i915_perf_stream *stream, 1617 struct file *file, 1618 poll_table *wait); 1619 1620 /** 1621 * @wait_unlocked: For handling a blocking read, wait until there is 1622 * something to ready to read() for the stream. E.g. wait on the same 1623 * wait queue that would be passed to poll_wait(). 1624 */ 1625 int (*wait_unlocked)(struct i915_perf_stream *stream); 1626 1627 /** 1628 * @read: Copy buffered metrics as records to userspace 1629 * **buf**: the userspace, destination buffer 1630 * **count**: the number of bytes to copy, requested by userspace 1631 * **offset**: zero at the start of the read, updated as the read 1632 * proceeds, it represents how many bytes have been copied so far and 1633 * the buffer offset for copying the next record. 1634 * 1635 * Copy as many buffered i915 perf samples and records for this stream 1636 * to userspace as will fit in the given buffer. 1637 * 1638 * Only write complete records; returning -%ENOSPC if there isn't room 1639 * for a complete record. 1640 * 1641 * Return any error condition that results in a short read such as 1642 * -%ENOSPC or -%EFAULT, even though these may be squashed before 1643 * returning to userspace. 1644 */ 1645 int (*read)(struct i915_perf_stream *stream, 1646 char __user *buf, 1647 size_t count, 1648 size_t *offset); 1649 1650 /** 1651 * @destroy: Cleanup any stream specific resources. 1652 * 1653 * The stream will always be disabled before this is called. 1654 */ 1655 void (*destroy)(struct i915_perf_stream *stream); 1656 }; 1657 1658 /** 1659 * struct i915_perf_stream - state for a single open stream FD 1660 */ 1661 struct i915_perf_stream { 1662 /** 1663 * @dev_priv: i915 drm device 1664 */ 1665 struct drm_i915_private *dev_priv; 1666 1667 /** 1668 * @link: Links the stream into ``&drm_i915_private->streams`` 1669 */ 1670 struct list_head link; 1671 1672 /** 1673 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` 1674 * properties given when opening a stream, representing the contents 1675 * of a single sample as read() by userspace. 1676 */ 1677 u32 sample_flags; 1678 1679 /** 1680 * @sample_size: Considering the configured contents of a sample 1681 * combined with the required header size, this is the total size 1682 * of a single sample record. 1683 */ 1684 int sample_size; 1685 1686 /** 1687 * @ctx: %NULL if measuring system-wide across all contexts or a 1688 * specific context that is being monitored. 1689 */ 1690 struct i915_gem_context *ctx; 1691 1692 /** 1693 * @enabled: Whether the stream is currently enabled, considering 1694 * whether the stream was opened in a disabled state and based 1695 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. 1696 */ 1697 bool enabled; 1698 1699 /** 1700 * @ops: The callbacks providing the implementation of this specific 1701 * type of configured stream. 1702 */ 1703 const struct i915_perf_stream_ops *ops; 1704 1705 /** 1706 * @oa_config: The OA configuration used by the stream. 1707 */ 1708 struct i915_oa_config *oa_config; 1709 }; 1710 1711 /** 1712 * struct i915_oa_ops - Gen specific implementation of an OA unit stream 1713 */ 1714 struct i915_oa_ops { 1715 /** 1716 * @is_valid_b_counter_reg: Validates register's address for 1717 * programming boolean counters for a particular platform. 1718 */ 1719 bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv, 1720 u32 addr); 1721 1722 /** 1723 * @is_valid_mux_reg: Validates register's address for programming mux 1724 * for a particular platform. 1725 */ 1726 bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr); 1727 1728 /** 1729 * @is_valid_flex_reg: Validates register's address for programming 1730 * flex EU filtering for a particular platform. 1731 */ 1732 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); 1733 1734 /** 1735 * @init_oa_buffer: Resets the head and tail pointers of the 1736 * circular buffer for periodic OA reports. 1737 * 1738 * Called when first opening a stream for OA metrics, but also may be 1739 * called in response to an OA buffer overflow or other error 1740 * condition. 1741 * 1742 * Note it may be necessary to clear the full OA buffer here as part of 1743 * maintaining the invariable that new reports must be written to 1744 * zeroed memory for us to be able to reliable detect if an expected 1745 * report has not yet landed in memory. (At least on Haswell the OA 1746 * buffer tail pointer is not synchronized with reports being visible 1747 * to the CPU) 1748 */ 1749 void (*init_oa_buffer)(struct drm_i915_private *dev_priv); 1750 1751 /** 1752 * @enable_metric_set: Selects and applies any MUX configuration to set 1753 * up the Boolean and Custom (B/C) counters that are part of the 1754 * counter reports being sampled. May apply system constraints such as 1755 * disabling EU clock gating as required. 1756 */ 1757 int (*enable_metric_set)(struct drm_i915_private *dev_priv, 1758 const struct i915_oa_config *oa_config); 1759 1760 /** 1761 * @disable_metric_set: Remove system constraints associated with using 1762 * the OA unit. 1763 */ 1764 void (*disable_metric_set)(struct drm_i915_private *dev_priv); 1765 1766 /** 1767 * @oa_enable: Enable periodic sampling 1768 */ 1769 void (*oa_enable)(struct drm_i915_private *dev_priv); 1770 1771 /** 1772 * @oa_disable: Disable periodic sampling 1773 */ 1774 void (*oa_disable)(struct drm_i915_private *dev_priv); 1775 1776 /** 1777 * @read: Copy data from the circular OA buffer into a given userspace 1778 * buffer. 1779 */ 1780 int (*read)(struct i915_perf_stream *stream, 1781 char __user *buf, 1782 size_t count, 1783 size_t *offset); 1784 1785 /** 1786 * @oa_hw_tail_read: read the OA tail pointer register 1787 * 1788 * In particular this enables us to share all the fiddly code for 1789 * handling the OA unit tail pointer race that affects multiple 1790 * generations. 1791 */ 1792 u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv); 1793 }; 1794 1795 struct intel_cdclk_state { 1796 unsigned int cdclk, vco, ref; 1797 u8 voltage_level; 1798 }; 1799 1800 struct drm_i915_private { 1801 struct drm_device drm; 1802 1803 struct kmem_cache *objects; 1804 struct kmem_cache *vmas; 1805 struct kmem_cache *luts; 1806 struct kmem_cache *requests; 1807 struct kmem_cache *dependencies; 1808 struct kmem_cache *priorities; 1809 1810 const struct intel_device_info info; 1811 1812 /** 1813 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 1814 * end of stolen which we can optionally use to create GEM objects 1815 * backed by stolen memory. Note that stolen_usable_size tells us 1816 * exactly how much of this we are actually allowed to use, given that 1817 * some portion of it is in fact reserved for use by hardware functions. 1818 */ 1819 struct resource dsm; 1820 /** 1821 * Reseved portion of Data Stolen Memory 1822 */ 1823 struct resource dsm_reserved; 1824 1825 /* 1826 * Stolen memory is segmented in hardware with different portions 1827 * offlimits to certain functions. 1828 * 1829 * The drm_mm is initialised to the total accessible range, as found 1830 * from the PCI config. On Broadwell+, this is further restricted to 1831 * avoid the first page! The upper end of stolen memory is reserved for 1832 * hardware functions and similarly removed from the accessible range. 1833 */ 1834 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 1835 1836 void __iomem *regs; 1837 1838 struct intel_uncore uncore; 1839 1840 struct i915_virtual_gpu vgpu; 1841 1842 struct intel_gvt *gvt; 1843 1844 struct intel_huc huc; 1845 struct intel_guc guc; 1846 1847 struct intel_csr csr; 1848 1849 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1850 1851 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1852 * controller on different i2c buses. */ 1853 struct mutex gmbus_mutex; 1854 1855 /** 1856 * Base address of the gmbus and gpio block. 1857 */ 1858 uint32_t gpio_mmio_base; 1859 1860 /* MMIO base address for MIPI regs */ 1861 uint32_t mipi_mmio_base; 1862 1863 uint32_t psr_mmio_base; 1864 1865 uint32_t pps_mmio_base; 1866 1867 wait_queue_head_t gmbus_wait_queue; 1868 1869 struct pci_dev *bridge_dev; 1870 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 1871 /* Context used internally to idle the GPU and setup initial state */ 1872 struct i915_gem_context *kernel_context; 1873 /* Context only to be used for injecting preemption commands */ 1874 struct i915_gem_context *preempt_context; 1875 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 1876 [MAX_ENGINE_INSTANCE + 1]; 1877 1878 struct drm_dma_handle *status_page_dmah; 1879 struct resource mch_res; 1880 1881 /* protects the irq masks */ 1882 spinlock_t irq_lock; 1883 1884 bool display_irqs_enabled; 1885 1886 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1887 struct pm_qos_request pm_qos; 1888 1889 /* Sideband mailbox protection */ 1890 struct mutex sb_lock; 1891 1892 /** Cached value of IMR to avoid reads in updating the bitfield */ 1893 union { 1894 u32 irq_mask; 1895 u32 de_irq_mask[I915_MAX_PIPES]; 1896 }; 1897 u32 gt_irq_mask; 1898 u32 pm_imr; 1899 u32 pm_ier; 1900 u32 pm_rps_events; 1901 u32 pm_guc_events; 1902 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1903 1904 struct i915_hotplug hotplug; 1905 struct intel_fbc fbc; 1906 struct i915_drrs drrs; 1907 struct intel_opregion opregion; 1908 struct intel_vbt_data vbt; 1909 1910 bool preserve_bios_swizzle; 1911 1912 /* overlay */ 1913 struct intel_overlay *overlay; 1914 1915 /* backlight registers and fields in struct intel_panel */ 1916 struct mutex backlight_lock; 1917 1918 /* LVDS info */ 1919 bool no_aux_handshake; 1920 1921 /* protects panel power sequencer state */ 1922 struct mutex pps_mutex; 1923 1924 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1925 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1926 1927 unsigned int fsb_freq, mem_freq, is_ddr3; 1928 unsigned int skl_preferred_vco_freq; 1929 unsigned int max_cdclk_freq; 1930 1931 unsigned int max_dotclk_freq; 1932 unsigned int rawclk_freq; 1933 unsigned int hpll_freq; 1934 unsigned int fdi_pll_freq; 1935 unsigned int czclk_freq; 1936 1937 struct { 1938 /* 1939 * The current logical cdclk state. 1940 * See intel_atomic_state.cdclk.logical 1941 * 1942 * For reading holding any crtc lock is sufficient, 1943 * for writing must hold all of them. 1944 */ 1945 struct intel_cdclk_state logical; 1946 /* 1947 * The current actual cdclk state. 1948 * See intel_atomic_state.cdclk.actual 1949 */ 1950 struct intel_cdclk_state actual; 1951 /* The current hardware cdclk state */ 1952 struct intel_cdclk_state hw; 1953 } cdclk; 1954 1955 /** 1956 * wq - Driver workqueue for GEM. 1957 * 1958 * NOTE: Work items scheduled here are not allowed to grab any modeset 1959 * locks, for otherwise the flushing done in the pageflip code will 1960 * result in deadlocks. 1961 */ 1962 struct workqueue_struct *wq; 1963 1964 /* ordered wq for modesets */ 1965 struct workqueue_struct *modeset_wq; 1966 1967 /* Display functions */ 1968 struct drm_i915_display_funcs display; 1969 1970 /* PCH chipset type */ 1971 enum intel_pch pch_type; 1972 unsigned short pch_id; 1973 1974 unsigned long quirks; 1975 1976 enum modeset_restore modeset_restore; 1977 struct mutex modeset_restore_lock; 1978 struct drm_atomic_state *modeset_restore_state; 1979 struct drm_modeset_acquire_ctx reset_ctx; 1980 1981 struct list_head vm_list; /* Global list of all address spaces */ 1982 struct i915_ggtt ggtt; /* VM representing the global address space */ 1983 1984 struct i915_gem_mm mm; 1985 DECLARE_HASHTABLE(mm_structs, 7); 1986 struct mutex mm_lock; 1987 1988 struct intel_ppat ppat; 1989 1990 /* Kernel Modesetting */ 1991 1992 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1993 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1994 1995 #ifdef CONFIG_DEBUG_FS 1996 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1997 #endif 1998 1999 /* dpll and cdclk state is protected by connection_mutex */ 2000 int num_shared_dpll; 2001 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 2002 const struct intel_dpll_mgr *dpll_mgr; 2003 2004 /* 2005 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 2006 * Must be global rather than per dpll, because on some platforms 2007 * plls share registers. 2008 */ 2009 struct mutex dpll_lock; 2010 2011 unsigned int active_crtcs; 2012 /* minimum acceptable cdclk for each pipe */ 2013 int min_cdclk[I915_MAX_PIPES]; 2014 /* minimum acceptable voltage level for each pipe */ 2015 u8 min_voltage_level[I915_MAX_PIPES]; 2016 2017 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 2018 2019 struct i915_workarounds workarounds; 2020 2021 struct i915_frontbuffer_tracking fb_tracking; 2022 2023 struct intel_atomic_helper { 2024 struct llist_head free_list; 2025 struct work_struct free_work; 2026 } atomic_helper; 2027 2028 u16 orig_clock; 2029 2030 bool mchbar_need_disable; 2031 2032 struct intel_l3_parity l3_parity; 2033 2034 /* Cannot be determined by PCIID. You must always read a register. */ 2035 u32 edram_cap; 2036 2037 /* 2038 * Protects RPS/RC6 register access and PCU communication. 2039 * Must be taken after struct_mutex if nested. Note that 2040 * this lock may be held for long periods of time when 2041 * talking to hw - so only take it when talking to hw! 2042 */ 2043 struct mutex pcu_lock; 2044 2045 /* gen6+ GT PM state */ 2046 struct intel_gen6_power_mgmt gt_pm; 2047 2048 /* ilk-only ips/rps state. Everything in here is protected by the global 2049 * mchdev_lock in intel_pm.c */ 2050 struct intel_ilk_power_mgmt ips; 2051 2052 struct i915_power_domains power_domains; 2053 2054 struct i915_psr psr; 2055 2056 struct i915_gpu_error gpu_error; 2057 2058 struct drm_i915_gem_object *vlv_pctx; 2059 2060 /* list of fbdev register on this device */ 2061 struct intel_fbdev *fbdev; 2062 struct work_struct fbdev_suspend_work; 2063 2064 struct drm_property *broadcast_rgb_property; 2065 struct drm_property *force_audio_property; 2066 2067 /* hda/i915 audio component */ 2068 struct i915_audio_component *audio_component; 2069 bool audio_component_registered; 2070 /** 2071 * av_mutex - mutex for audio/video sync 2072 * 2073 */ 2074 struct mutex av_mutex; 2075 2076 struct { 2077 struct list_head list; 2078 struct llist_head free_list; 2079 struct work_struct free_work; 2080 2081 /* The hw wants to have a stable context identifier for the 2082 * lifetime of the context (for OA, PASID, faults, etc). 2083 * This is limited in execlists to 21 bits. 2084 */ 2085 struct ida hw_ida; 2086 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 2087 } contexts; 2088 2089 u32 fdi_rx_config; 2090 2091 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 2092 u32 chv_phy_control; 2093 /* 2094 * Shadows for CHV DPLL_MD regs to keep the state 2095 * checker somewhat working in the presence hardware 2096 * crappiness (can't read out DPLL_MD for pipes B & C). 2097 */ 2098 u32 chv_dpll_md[I915_MAX_PIPES]; 2099 u32 bxt_phy_grc; 2100 2101 u32 suspend_count; 2102 bool suspended_to_idle; 2103 struct i915_suspend_saved_registers regfile; 2104 struct vlv_s0ix_state vlv_s0ix_state; 2105 2106 enum { 2107 I915_SAGV_UNKNOWN = 0, 2108 I915_SAGV_DISABLED, 2109 I915_SAGV_ENABLED, 2110 I915_SAGV_NOT_CONTROLLED 2111 } sagv_status; 2112 2113 struct { 2114 /* 2115 * Raw watermark latency values: 2116 * in 0.1us units for WM0, 2117 * in 0.5us units for WM1+. 2118 */ 2119 /* primary */ 2120 uint16_t pri_latency[5]; 2121 /* sprite */ 2122 uint16_t spr_latency[5]; 2123 /* cursor */ 2124 uint16_t cur_latency[5]; 2125 /* 2126 * Raw watermark memory latency values 2127 * for SKL for all 8 levels 2128 * in 1us units. 2129 */ 2130 uint16_t skl_latency[8]; 2131 2132 /* current hardware state */ 2133 union { 2134 struct ilk_wm_values hw; 2135 struct skl_wm_values skl_hw; 2136 struct vlv_wm_values vlv; 2137 struct g4x_wm_values g4x; 2138 }; 2139 2140 uint8_t max_level; 2141 2142 /* 2143 * Should be held around atomic WM register writing; also 2144 * protects * intel_crtc->wm.active and 2145 * cstate->wm.need_postvbl_update. 2146 */ 2147 struct mutex wm_mutex; 2148 2149 /* 2150 * Set during HW readout of watermarks/DDB. Some platforms 2151 * need to know when we're still using BIOS-provided values 2152 * (which we don't fully trust). 2153 */ 2154 bool distrust_bios_wm; 2155 } wm; 2156 2157 struct i915_runtime_pm runtime_pm; 2158 2159 struct { 2160 bool initialized; 2161 2162 struct kobject *metrics_kobj; 2163 struct ctl_table_header *sysctl_header; 2164 2165 /* 2166 * Lock associated with adding/modifying/removing OA configs 2167 * in dev_priv->perf.metrics_idr. 2168 */ 2169 struct mutex metrics_lock; 2170 2171 /* 2172 * List of dynamic configurations, you need to hold 2173 * dev_priv->perf.metrics_lock to access it. 2174 */ 2175 struct idr metrics_idr; 2176 2177 /* 2178 * Lock associated with anything below within this structure 2179 * except exclusive_stream. 2180 */ 2181 struct mutex lock; 2182 struct list_head streams; 2183 2184 struct { 2185 /* 2186 * The stream currently using the OA unit. If accessed 2187 * outside a syscall associated to its file 2188 * descriptor, you need to hold 2189 * dev_priv->drm.struct_mutex. 2190 */ 2191 struct i915_perf_stream *exclusive_stream; 2192 2193 u32 specific_ctx_id; 2194 2195 struct hrtimer poll_check_timer; 2196 wait_queue_head_t poll_wq; 2197 bool pollin; 2198 2199 /** 2200 * For rate limiting any notifications of spurious 2201 * invalid OA reports 2202 */ 2203 struct ratelimit_state spurious_report_rs; 2204 2205 bool periodic; 2206 int period_exponent; 2207 2208 struct i915_oa_config test_config; 2209 2210 struct { 2211 struct i915_vma *vma; 2212 u8 *vaddr; 2213 u32 last_ctx_id; 2214 int format; 2215 int format_size; 2216 2217 /** 2218 * Locks reads and writes to all head/tail state 2219 * 2220 * Consider: the head and tail pointer state 2221 * needs to be read consistently from a hrtimer 2222 * callback (atomic context) and read() fop 2223 * (user context) with tail pointer updates 2224 * happening in atomic context and head updates 2225 * in user context and the (unlikely) 2226 * possibility of read() errors needing to 2227 * reset all head/tail state. 2228 * 2229 * Note: Contention or performance aren't 2230 * currently a significant concern here 2231 * considering the relatively low frequency of 2232 * hrtimer callbacks (5ms period) and that 2233 * reads typically only happen in response to a 2234 * hrtimer event and likely complete before the 2235 * next callback. 2236 * 2237 * Note: This lock is not held *while* reading 2238 * and copying data to userspace so the value 2239 * of head observed in htrimer callbacks won't 2240 * represent any partial consumption of data. 2241 */ 2242 spinlock_t ptr_lock; 2243 2244 /** 2245 * One 'aging' tail pointer and one 'aged' 2246 * tail pointer ready to used for reading. 2247 * 2248 * Initial values of 0xffffffff are invalid 2249 * and imply that an update is required 2250 * (and should be ignored by an attempted 2251 * read) 2252 */ 2253 struct { 2254 u32 offset; 2255 } tails[2]; 2256 2257 /** 2258 * Index for the aged tail ready to read() 2259 * data up to. 2260 */ 2261 unsigned int aged_tail_idx; 2262 2263 /** 2264 * A monotonic timestamp for when the current 2265 * aging tail pointer was read; used to 2266 * determine when it is old enough to trust. 2267 */ 2268 u64 aging_timestamp; 2269 2270 /** 2271 * Although we can always read back the head 2272 * pointer register, we prefer to avoid 2273 * trusting the HW state, just to avoid any 2274 * risk that some hardware condition could 2275 * somehow bump the head pointer unpredictably 2276 * and cause us to forward the wrong OA buffer 2277 * data to userspace. 2278 */ 2279 u32 head; 2280 } oa_buffer; 2281 2282 u32 gen7_latched_oastatus1; 2283 u32 ctx_oactxctrl_offset; 2284 u32 ctx_flexeu0_offset; 2285 2286 /** 2287 * The RPT_ID/reason field for Gen8+ includes a bit 2288 * to determine if the CTX ID in the report is valid 2289 * but the specific bit differs between Gen 8 and 9 2290 */ 2291 u32 gen8_valid_ctx_bit; 2292 2293 struct i915_oa_ops ops; 2294 const struct i915_oa_format *oa_formats; 2295 } oa; 2296 } perf; 2297 2298 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 2299 struct { 2300 void (*resume)(struct drm_i915_private *); 2301 void (*cleanup_engine)(struct intel_engine_cs *engine); 2302 2303 struct list_head timelines; 2304 struct i915_gem_timeline global_timeline; 2305 u32 active_requests; 2306 2307 /** 2308 * Is the GPU currently considered idle, or busy executing 2309 * userspace requests? Whilst idle, we allow runtime power 2310 * management to power down the hardware and display clocks. 2311 * In order to reduce the effect on performance, there 2312 * is a slight delay before we do so. 2313 */ 2314 bool awake; 2315 2316 /** 2317 * We leave the user IRQ off as much as possible, 2318 * but this means that requests will finish and never 2319 * be retired once the system goes idle. Set a timer to 2320 * fire periodically while the ring is running. When it 2321 * fires, go retire requests. 2322 */ 2323 struct delayed_work retire_work; 2324 2325 /** 2326 * When we detect an idle GPU, we want to turn on 2327 * powersaving features. So once we see that there 2328 * are no more requests outstanding and no more 2329 * arrive within a small period of time, we fire 2330 * off the idle_work. 2331 */ 2332 struct delayed_work idle_work; 2333 2334 ktime_t last_init_time; 2335 } gt; 2336 2337 /* perform PHY state sanity checks? */ 2338 bool chv_phy_assert[2]; 2339 2340 bool ipc_enabled; 2341 2342 /* Used to save the pipe-to-encoder mapping for audio */ 2343 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2344 2345 /* necessary resource sharing with HDMI LPE audio driver. */ 2346 struct { 2347 struct platform_device *platdev; 2348 int irq; 2349 } lpe_audio; 2350 2351 struct i915_pmu pmu; 2352 2353 /* 2354 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2355 * will be rejected. Instead look for a better place. 2356 */ 2357 }; 2358 2359 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2360 { 2361 return container_of(dev, struct drm_i915_private, drm); 2362 } 2363 2364 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2365 { 2366 return to_i915(dev_get_drvdata(kdev)); 2367 } 2368 2369 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2370 { 2371 return container_of(guc, struct drm_i915_private, guc); 2372 } 2373 2374 static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) 2375 { 2376 return container_of(huc, struct drm_i915_private, huc); 2377 } 2378 2379 /* Simple iterator over all initialised engines */ 2380 #define for_each_engine(engine__, dev_priv__, id__) \ 2381 for ((id__) = 0; \ 2382 (id__) < I915_NUM_ENGINES; \ 2383 (id__)++) \ 2384 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2385 2386 /* Iterator over subset of engines selected by mask */ 2387 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2388 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2389 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) 2390 2391 enum hdmi_force_audio { 2392 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2393 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2394 HDMI_AUDIO_AUTO, /* trust EDID */ 2395 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2396 }; 2397 2398 #define I915_GTT_OFFSET_NONE ((u32)-1) 2399 2400 /* 2401 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2402 * considered to be the frontbuffer for the given plane interface-wise. This 2403 * doesn't mean that the hw necessarily already scans it out, but that any 2404 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2405 * 2406 * We have one bit per pipe and per scanout plane type. 2407 */ 2408 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2409 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2410 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2411 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2412 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2413 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2414 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2415 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2416 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2417 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2418 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2419 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2420 2421 /* 2422 * Optimised SGL iterator for GEM objects 2423 */ 2424 static __always_inline struct sgt_iter { 2425 struct scatterlist *sgp; 2426 union { 2427 unsigned long pfn; 2428 dma_addr_t dma; 2429 }; 2430 unsigned int curr; 2431 unsigned int max; 2432 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2433 struct sgt_iter s = { .sgp = sgl }; 2434 2435 if (s.sgp) { 2436 s.max = s.curr = s.sgp->offset; 2437 s.max += s.sgp->length; 2438 if (dma) 2439 s.dma = sg_dma_address(s.sgp); 2440 else 2441 s.pfn = page_to_pfn(sg_page(s.sgp)); 2442 } 2443 2444 return s; 2445 } 2446 2447 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 2448 { 2449 ++sg; 2450 if (unlikely(sg_is_chain(sg))) 2451 sg = sg_chain_ptr(sg); 2452 return sg; 2453 } 2454 2455 /** 2456 * __sg_next - return the next scatterlist entry in a list 2457 * @sg: The current sg entry 2458 * 2459 * Description: 2460 * If the entry is the last, return NULL; otherwise, step to the next 2461 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2462 * otherwise just return the pointer to the current element. 2463 **/ 2464 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2465 { 2466 #ifdef CONFIG_DEBUG_SG 2467 BUG_ON(sg->sg_magic != SG_MAGIC); 2468 #endif 2469 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2470 } 2471 2472 /** 2473 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2474 * @__dmap: DMA address (output) 2475 * @__iter: 'struct sgt_iter' (iterator state, internal) 2476 * @__sgt: sg_table to iterate over (input) 2477 */ 2478 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2479 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2480 ((__dmap) = (__iter).dma + (__iter).curr); \ 2481 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 2482 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) 2483 2484 /** 2485 * for_each_sgt_page - iterate over the pages of the given sg_table 2486 * @__pp: page pointer (output) 2487 * @__iter: 'struct sgt_iter' (iterator state, internal) 2488 * @__sgt: sg_table to iterate over (input) 2489 */ 2490 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2491 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2492 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2493 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2494 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ 2495 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) 2496 2497 static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) 2498 { 2499 unsigned int page_sizes; 2500 2501 page_sizes = 0; 2502 while (sg) { 2503 GEM_BUG_ON(sg->offset); 2504 GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); 2505 page_sizes |= sg->length; 2506 sg = __sg_next(sg); 2507 } 2508 2509 return page_sizes; 2510 } 2511 2512 static inline unsigned int i915_sg_segment_size(void) 2513 { 2514 unsigned int size = swiotlb_max_segment(); 2515 2516 if (size == 0) 2517 return SCATTERLIST_MAX_SEGMENT; 2518 2519 size = rounddown(size, PAGE_SIZE); 2520 /* swiotlb_max_segment_size can return 1 byte when it means one page. */ 2521 if (size < PAGE_SIZE) 2522 size = PAGE_SIZE; 2523 2524 return size; 2525 } 2526 2527 static inline const struct intel_device_info * 2528 intel_info(const struct drm_i915_private *dev_priv) 2529 { 2530 return &dev_priv->info; 2531 } 2532 2533 #define INTEL_INFO(dev_priv) intel_info((dev_priv)) 2534 2535 #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2536 #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2537 2538 #define REVID_FOREVER 0xff 2539 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2540 2541 #define GEN_FOREVER (0) 2542 2543 #define INTEL_GEN_MASK(s, e) ( \ 2544 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ 2545 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ 2546 GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ 2547 (s) != GEN_FOREVER ? (s) - 1 : 0) \ 2548 ) 2549 2550 /* 2551 * Returns true if Gen is in inclusive range [Start, End]. 2552 * 2553 * Use GEN_FOREVER for unbound start and or end. 2554 */ 2555 #define IS_GEN(dev_priv, s, e) \ 2556 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) 2557 2558 /* 2559 * Return true if revision is in range [since,until] inclusive. 2560 * 2561 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2562 */ 2563 #define IS_REVID(p, since, until) \ 2564 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2565 2566 #define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p)) 2567 2568 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 2569 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 2570 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 2571 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 2572 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 2573 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 2574 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 2575 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 2576 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 2577 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 2578 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 2579 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 2580 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 2581 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2582 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2583 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 2584 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 2585 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2586 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 2587 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 2588 (dev_priv)->info.gt == 1) 2589 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 2590 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 2591 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 2592 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 2593 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 2594 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 2595 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 2596 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 2597 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 2598 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) 2599 #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2600 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2601 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2602 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2603 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2604 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2605 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2606 /* ULX machines are also considered ULT. */ 2607 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2608 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2609 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2610 (dev_priv)->info.gt == 3) 2611 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2612 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2613 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2614 (dev_priv)->info.gt == 3) 2615 /* ULX machines are also considered ULT. */ 2616 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2617 INTEL_DEVID(dev_priv) == 0x0A1E) 2618 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2619 INTEL_DEVID(dev_priv) == 0x1913 || \ 2620 INTEL_DEVID(dev_priv) == 0x1916 || \ 2621 INTEL_DEVID(dev_priv) == 0x1921 || \ 2622 INTEL_DEVID(dev_priv) == 0x1926) 2623 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2624 INTEL_DEVID(dev_priv) == 0x1915 || \ 2625 INTEL_DEVID(dev_priv) == 0x191E) 2626 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2627 INTEL_DEVID(dev_priv) == 0x5913 || \ 2628 INTEL_DEVID(dev_priv) == 0x5916 || \ 2629 INTEL_DEVID(dev_priv) == 0x5921 || \ 2630 INTEL_DEVID(dev_priv) == 0x5926) 2631 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2632 INTEL_DEVID(dev_priv) == 0x5915 || \ 2633 INTEL_DEVID(dev_priv) == 0x591E) 2634 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2635 (dev_priv)->info.gt == 2) 2636 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2637 (dev_priv)->info.gt == 3) 2638 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2639 (dev_priv)->info.gt == 4) 2640 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 2641 (dev_priv)->info.gt == 2) 2642 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 2643 (dev_priv)->info.gt == 3) 2644 #define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2645 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) 2646 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2647 (dev_priv)->info.gt == 2) 2648 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 2649 (dev_priv)->info.gt == 3) 2650 2651 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2652 2653 #define SKL_REVID_A0 0x0 2654 #define SKL_REVID_B0 0x1 2655 #define SKL_REVID_C0 0x2 2656 #define SKL_REVID_D0 0x3 2657 #define SKL_REVID_E0 0x4 2658 #define SKL_REVID_F0 0x5 2659 #define SKL_REVID_G0 0x6 2660 #define SKL_REVID_H0 0x7 2661 2662 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2663 2664 #define BXT_REVID_A0 0x0 2665 #define BXT_REVID_A1 0x1 2666 #define BXT_REVID_B0 0x3 2667 #define BXT_REVID_B_LAST 0x8 2668 #define BXT_REVID_C0 0x9 2669 2670 #define IS_BXT_REVID(dev_priv, since, until) \ 2671 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2672 2673 #define KBL_REVID_A0 0x0 2674 #define KBL_REVID_B0 0x1 2675 #define KBL_REVID_C0 0x2 2676 #define KBL_REVID_D0 0x3 2677 #define KBL_REVID_E0 0x4 2678 2679 #define IS_KBL_REVID(dev_priv, since, until) \ 2680 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2681 2682 #define GLK_REVID_A0 0x0 2683 #define GLK_REVID_A1 0x1 2684 2685 #define IS_GLK_REVID(dev_priv, since, until) \ 2686 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2687 2688 #define CNL_REVID_A0 0x0 2689 #define CNL_REVID_B0 0x1 2690 #define CNL_REVID_C0 0x2 2691 2692 #define IS_CNL_REVID(p, since, until) \ 2693 (IS_CANNONLAKE(p) && IS_REVID(p, since, until)) 2694 2695 /* 2696 * The genX designation typically refers to the render engine, so render 2697 * capability related checks should use IS_GEN, while display and other checks 2698 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2699 * chips, etc.). 2700 */ 2701 #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) 2702 #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) 2703 #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) 2704 #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) 2705 #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) 2706 #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2707 #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2708 #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2709 #define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9))) 2710 2711 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 2712 #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) 2713 #define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv)) 2714 2715 #define ENGINE_MASK(id) BIT(id) 2716 #define RENDER_RING ENGINE_MASK(RCS) 2717 #define BSD_RING ENGINE_MASK(VCS) 2718 #define BLT_RING ENGINE_MASK(BCS) 2719 #define VEBOX_RING ENGINE_MASK(VECS) 2720 #define BSD2_RING ENGINE_MASK(VCS2) 2721 #define ALL_ENGINES (~0) 2722 2723 #define HAS_ENGINE(dev_priv, id) \ 2724 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) 2725 2726 #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2727 #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2728 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2729 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2730 2731 #define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv) 2732 2733 #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) 2734 #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) 2735 #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) 2736 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2737 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2738 2739 #define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) 2740 2741 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2742 ((dev_priv)->info.has_logical_ring_contexts) 2743 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ 2744 ((dev_priv)->info.has_logical_ring_preemption) 2745 2746 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 2747 2748 #define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) 2749 #define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) 2750 #define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) 2751 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 2752 GEM_BUG_ON((sizes) == 0); \ 2753 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ 2754 }) 2755 2756 #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) 2757 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 2758 ((dev_priv)->info.overlay_needs_physical) 2759 2760 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2761 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2762 2763 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2764 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2765 (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) 2766 2767 /* 2768 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2769 * even when in MSI mode. This results in spurious interrupt warnings if the 2770 * legacy irq no. is shared with another device. The kernel then disables that 2771 * interrupt source and so prevents the other device from working properly. 2772 * 2773 * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX 2774 * interrupts. 2775 */ 2776 #define HAS_AUX_IRQ(dev_priv) true 2777 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) 2778 2779 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2780 * rows, which changed the alignment requirements and fence programming. 2781 */ 2782 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2783 !(IS_I915G(dev_priv) || \ 2784 IS_I915GM(dev_priv))) 2785 #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) 2786 #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) 2787 2788 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2789 #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) 2790 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7) 2791 2792 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2793 2794 #define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) 2795 2796 #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) 2797 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) 2798 #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) 2799 2800 #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) 2801 #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) 2802 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 2803 2804 #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) 2805 2806 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2807 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 2808 2809 #define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) 2810 2811 /* 2812 * For now, anything with a GuC requires uCode loading, and then supports 2813 * command submission once loaded. But these are logically independent 2814 * properties, so we have separate macros to test them. 2815 */ 2816 #define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) 2817 #define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct) 2818 #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2819 #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) 2820 2821 /* For now, anything with a GuC has also HuC */ 2822 #define HAS_HUC(dev_priv) (HAS_GUC(dev_priv)) 2823 #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2824 2825 /* Having a GuC is not the same as using a GuC */ 2826 #define USES_GUC(dev_priv) intel_uc_is_using_guc() 2827 #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() 2828 #define USES_HUC(dev_priv) intel_uc_is_using_huc() 2829 2830 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) 2831 2832 #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) 2833 2834 #define INTEL_PCH_DEVICE_ID_MASK 0xff80 2835 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2836 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2837 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2838 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2839 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2840 #define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80 2841 #define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80 2842 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2843 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2844 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 2845 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 2846 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 2847 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2848 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2849 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2850 2851 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2852 #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) 2853 #define HAS_PCH_CNP_LP(dev_priv) \ 2854 ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) 2855 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2856 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2857 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2858 #define HAS_PCH_LPT_LP(dev_priv) \ 2859 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ 2860 (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) 2861 #define HAS_PCH_LPT_H(dev_priv) \ 2862 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ 2863 (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE) 2864 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2865 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2866 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2867 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2868 2869 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 2870 2871 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) 2872 2873 /* DPF == dynamic parity feature */ 2874 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2875 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2876 2 : HAS_L3_DPF(dev_priv)) 2877 2878 #define GT_FREQUENCY_MULTIPLIER 50 2879 #define GEN9_FREQ_SCALER 3 2880 2881 #include "i915_trace.h" 2882 2883 static inline bool intel_vtd_active(void) 2884 { 2885 #ifdef CONFIG_INTEL_IOMMU 2886 if (intel_iommu_gfx_mapped) 2887 return true; 2888 #endif 2889 return false; 2890 } 2891 2892 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2893 { 2894 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active(); 2895 } 2896 2897 static inline bool 2898 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) 2899 { 2900 return IS_BROXTON(dev_priv) && intel_vtd_active(); 2901 } 2902 2903 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2904 int enable_ppgtt); 2905 2906 /* i915_drv.c */ 2907 void __printf(3, 4) 2908 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2909 const char *fmt, ...); 2910 2911 #define i915_report_error(dev_priv, fmt, ...) \ 2912 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2913 2914 #ifdef CONFIG_COMPAT 2915 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2916 unsigned long arg); 2917 #else 2918 #define i915_compat_ioctl NULL 2919 #endif 2920 extern const struct dev_pm_ops i915_pm_ops; 2921 2922 extern int i915_driver_load(struct pci_dev *pdev, 2923 const struct pci_device_id *ent); 2924 extern void i915_driver_unload(struct drm_device *dev); 2925 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 2926 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 2927 2928 #define I915_RESET_QUIET BIT(0) 2929 extern void i915_reset(struct drm_i915_private *i915, unsigned int flags); 2930 extern int i915_reset_engine(struct intel_engine_cs *engine, 2931 unsigned int flags); 2932 2933 extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); 2934 extern int intel_reset_guc(struct drm_i915_private *dev_priv); 2935 extern int intel_guc_reset_engine(struct intel_guc *guc, 2936 struct intel_engine_cs *engine); 2937 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2938 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); 2939 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2940 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2941 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2942 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2943 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2944 2945 int intel_engines_init_mmio(struct drm_i915_private *dev_priv); 2946 int intel_engines_init(struct drm_i915_private *dev_priv); 2947 2948 /* intel_hotplug.c */ 2949 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 2950 u32 pin_mask, u32 long_mask); 2951 void intel_hpd_init(struct drm_i915_private *dev_priv); 2952 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2953 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2954 enum port intel_hpd_pin_to_port(enum hpd_pin pin); 2955 enum hpd_pin intel_hpd_pin(enum port port); 2956 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2957 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2958 2959 /* i915_irq.c */ 2960 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 2961 { 2962 unsigned long delay; 2963 2964 if (unlikely(!i915_modparams.enable_hangcheck)) 2965 return; 2966 2967 /* Don't continually defer the hangcheck so that it is always run at 2968 * least once after work has been scheduled on any ring. Otherwise, 2969 * we will ignore a hung ring if a second ring is kept busy. 2970 */ 2971 2972 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 2973 queue_delayed_work(system_long_wq, 2974 &dev_priv->gpu_error.hangcheck_work, delay); 2975 } 2976 2977 __printf(3, 4) 2978 void i915_handle_error(struct drm_i915_private *dev_priv, 2979 u32 engine_mask, 2980 const char *fmt, ...); 2981 2982 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2983 extern void intel_irq_fini(struct drm_i915_private *dev_priv); 2984 int intel_irq_install(struct drm_i915_private *dev_priv); 2985 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2986 2987 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 2988 { 2989 return dev_priv->gvt; 2990 } 2991 2992 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 2993 { 2994 return dev_priv->vgpu.active; 2995 } 2996 2997 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 2998 enum pipe pipe); 2999 void 3000 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3001 u32 status_mask); 3002 3003 void 3004 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 3005 u32 status_mask); 3006 3007 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 3008 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 3009 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 3010 uint32_t mask, 3011 uint32_t bits); 3012 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 3013 uint32_t interrupt_mask, 3014 uint32_t enabled_irq_mask); 3015 static inline void 3016 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3017 { 3018 ilk_update_display_irq(dev_priv, bits, bits); 3019 } 3020 static inline void 3021 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3022 { 3023 ilk_update_display_irq(dev_priv, bits, 0); 3024 } 3025 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 3026 enum pipe pipe, 3027 uint32_t interrupt_mask, 3028 uint32_t enabled_irq_mask); 3029 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 3030 enum pipe pipe, uint32_t bits) 3031 { 3032 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 3033 } 3034 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 3035 enum pipe pipe, uint32_t bits) 3036 { 3037 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 3038 } 3039 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 3040 uint32_t interrupt_mask, 3041 uint32_t enabled_irq_mask); 3042 static inline void 3043 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3044 { 3045 ibx_display_interrupt_update(dev_priv, bits, bits); 3046 } 3047 static inline void 3048 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3049 { 3050 ibx_display_interrupt_update(dev_priv, bits, 0); 3051 } 3052 3053 /* i915_gem.c */ 3054 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 3055 struct drm_file *file_priv); 3056 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 3057 struct drm_file *file_priv); 3058 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 3059 struct drm_file *file_priv); 3060 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 3061 struct drm_file *file_priv); 3062 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 3063 struct drm_file *file_priv); 3064 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 3065 struct drm_file *file_priv); 3066 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 3067 struct drm_file *file_priv); 3068 int i915_gem_execbuffer(struct drm_device *dev, void *data, 3069 struct drm_file *file_priv); 3070 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 3071 struct drm_file *file_priv); 3072 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3073 struct drm_file *file_priv); 3074 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3075 struct drm_file *file); 3076 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3077 struct drm_file *file); 3078 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3079 struct drm_file *file_priv); 3080 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3081 struct drm_file *file_priv); 3082 int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 3083 struct drm_file *file_priv); 3084 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 3085 struct drm_file *file_priv); 3086 int i915_gem_init_userptr(struct drm_i915_private *dev_priv); 3087 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); 3088 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3089 struct drm_file *file); 3090 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3091 struct drm_file *file_priv); 3092 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 3093 struct drm_file *file_priv); 3094 void i915_gem_sanitize(struct drm_i915_private *i915); 3095 int i915_gem_load_init(struct drm_i915_private *dev_priv); 3096 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); 3097 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3098 int i915_gem_freeze(struct drm_i915_private *dev_priv); 3099 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 3100 3101 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); 3102 void i915_gem_object_free(struct drm_i915_gem_object *obj); 3103 void i915_gem_object_init(struct drm_i915_gem_object *obj, 3104 const struct drm_i915_gem_object_ops *ops); 3105 struct drm_i915_gem_object * 3106 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); 3107 struct drm_i915_gem_object * 3108 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 3109 const void *data, size_t size); 3110 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 3111 void i915_gem_free_object(struct drm_gem_object *obj); 3112 3113 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 3114 { 3115 /* A single pass should suffice to release all the freed objects (along 3116 * most call paths) , but be a little more paranoid in that freeing 3117 * the objects does take a little amount of time, during which the rcu 3118 * callbacks could have added new objects into the freed list, and 3119 * armed the work again. 3120 */ 3121 do { 3122 rcu_barrier(); 3123 } while (flush_work(&i915->mm.free_work)); 3124 } 3125 3126 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 3127 { 3128 /* 3129 * Similar to objects above (see i915_gem_drain_freed-objects), in 3130 * general we have workers that are armed by RCU and then rearm 3131 * themselves in their callbacks. To be paranoid, we need to 3132 * drain the workqueue a second time after waiting for the RCU 3133 * grace period so that we catch work queued via RCU from the first 3134 * pass. As neither drain_workqueue() nor flush_workqueue() report 3135 * a result, we make an assumption that we only don't require more 3136 * than 2 passes to catch all recursive RCU delayed work. 3137 * 3138 */ 3139 int pass = 2; 3140 do { 3141 rcu_barrier(); 3142 drain_workqueue(i915->wq); 3143 } while (--pass); 3144 } 3145 3146 struct i915_vma * __must_check 3147 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 3148 const struct i915_ggtt_view *view, 3149 u64 size, 3150 u64 alignment, 3151 u64 flags); 3152 3153 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 3154 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 3155 3156 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 3157 3158 static inline int __sg_page_count(const struct scatterlist *sg) 3159 { 3160 return sg->length >> PAGE_SHIFT; 3161 } 3162 3163 struct scatterlist * 3164 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 3165 unsigned int n, unsigned int *offset); 3166 3167 struct page * 3168 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 3169 unsigned int n); 3170 3171 struct page * 3172 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 3173 unsigned int n); 3174 3175 dma_addr_t 3176 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 3177 unsigned long n); 3178 3179 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 3180 struct sg_table *pages, 3181 unsigned int sg_page_sizes); 3182 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 3183 3184 static inline int __must_check 3185 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3186 { 3187 might_lock(&obj->mm.lock); 3188 3189 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 3190 return 0; 3191 3192 return __i915_gem_object_get_pages(obj); 3193 } 3194 3195 static inline bool 3196 i915_gem_object_has_pages(struct drm_i915_gem_object *obj) 3197 { 3198 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); 3199 } 3200 3201 static inline void 3202 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3203 { 3204 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 3205 3206 atomic_inc(&obj->mm.pages_pin_count); 3207 } 3208 3209 static inline bool 3210 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 3211 { 3212 return atomic_read(&obj->mm.pages_pin_count); 3213 } 3214 3215 static inline void 3216 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3217 { 3218 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 3219 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 3220 3221 atomic_dec(&obj->mm.pages_pin_count); 3222 } 3223 3224 static inline void 3225 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3226 { 3227 __i915_gem_object_unpin_pages(obj); 3228 } 3229 3230 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ 3231 I915_MM_NORMAL = 0, 3232 I915_MM_SHRINKER 3233 }; 3234 3235 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 3236 enum i915_mm_subclass subclass); 3237 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); 3238 3239 enum i915_map_type { 3240 I915_MAP_WB = 0, 3241 I915_MAP_WC, 3242 #define I915_MAP_OVERRIDE BIT(31) 3243 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 3244 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 3245 }; 3246 3247 /** 3248 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3249 * @obj: the object to map into kernel address space 3250 * @type: the type of mapping, used to select pgprot_t 3251 * 3252 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3253 * pages and then returns a contiguous mapping of the backing storage into 3254 * the kernel address space. Based on the @type of mapping, the PTE will be 3255 * set to either WriteBack or WriteCombine (via pgprot_t). 3256 * 3257 * The caller is responsible for calling i915_gem_object_unpin_map() when the 3258 * mapping is no longer required. 3259 * 3260 * Returns the pointer through which to access the mapped object, or an 3261 * ERR_PTR() on error. 3262 */ 3263 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 3264 enum i915_map_type type); 3265 3266 /** 3267 * i915_gem_object_unpin_map - releases an earlier mapping 3268 * @obj: the object to unmap 3269 * 3270 * After pinning the object and mapping its pages, once you are finished 3271 * with your access, call i915_gem_object_unpin_map() to release the pin 3272 * upon the mapping. Once the pin count reaches zero, that mapping may be 3273 * removed. 3274 */ 3275 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3276 { 3277 i915_gem_object_unpin_pages(obj); 3278 } 3279 3280 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3281 unsigned int *needs_clflush); 3282 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3283 unsigned int *needs_clflush); 3284 #define CLFLUSH_BEFORE BIT(0) 3285 #define CLFLUSH_AFTER BIT(1) 3286 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3287 3288 static inline void 3289 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3290 { 3291 i915_gem_object_unpin_pages(obj); 3292 } 3293 3294 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3295 void i915_vma_move_to_active(struct i915_vma *vma, 3296 struct drm_i915_gem_request *req, 3297 unsigned int flags); 3298 int i915_gem_dumb_create(struct drm_file *file_priv, 3299 struct drm_device *dev, 3300 struct drm_mode_create_dumb *args); 3301 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3302 uint32_t handle, uint64_t *offset); 3303 int i915_gem_mmap_gtt_version(void); 3304 3305 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3306 struct drm_i915_gem_object *new, 3307 unsigned frontbuffer_bits); 3308 3309 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3310 3311 struct drm_i915_gem_request * 3312 i915_gem_find_active_request(struct intel_engine_cs *engine); 3313 3314 void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3315 3316 static inline bool i915_reset_backoff(struct i915_gpu_error *error) 3317 { 3318 return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); 3319 } 3320 3321 static inline bool i915_reset_handoff(struct i915_gpu_error *error) 3322 { 3323 return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags)); 3324 } 3325 3326 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3327 { 3328 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3329 } 3330 3331 static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error) 3332 { 3333 return i915_reset_backoff(error) | i915_terminally_wedged(error); 3334 } 3335 3336 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3337 { 3338 return READ_ONCE(error->reset_count); 3339 } 3340 3341 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, 3342 struct intel_engine_cs *engine) 3343 { 3344 return READ_ONCE(error->reset_engine_count[engine->id]); 3345 } 3346 3347 struct drm_i915_gem_request * 3348 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); 3349 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3350 void i915_gem_reset(struct drm_i915_private *dev_priv); 3351 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine); 3352 void i915_gem_reset_finish(struct drm_i915_private *dev_priv); 3353 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3354 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); 3355 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3356 struct drm_i915_gem_request *request); 3357 3358 void i915_gem_init_mmio(struct drm_i915_private *i915); 3359 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 3360 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); 3361 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); 3362 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); 3363 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3364 unsigned int flags); 3365 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); 3366 void i915_gem_resume(struct drm_i915_private *dev_priv); 3367 int i915_gem_fault(struct vm_fault *vmf); 3368 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 3369 unsigned int flags, 3370 long timeout, 3371 struct intel_rps_client *rps); 3372 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3373 unsigned int flags, 3374 int priority); 3375 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX 3376 3377 int __must_check 3378 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); 3379 int __must_check 3380 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); 3381 int __must_check 3382 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3383 struct i915_vma * __must_check 3384 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3385 u32 alignment, 3386 const struct i915_ggtt_view *view); 3387 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3388 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3389 int align); 3390 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 3391 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3392 3393 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3394 enum i915_cache_level cache_level); 3395 3396 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3397 struct dma_buf *dma_buf); 3398 3399 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3400 struct drm_gem_object *gem_obj, int flags); 3401 3402 static inline struct i915_hw_ppgtt * 3403 i915_vm_to_ppgtt(struct i915_address_space *vm) 3404 { 3405 return container_of(vm, struct i915_hw_ppgtt, base); 3406 } 3407 3408 /* i915_gem_fence_reg.c */ 3409 struct drm_i915_fence_reg * 3410 i915_reserve_fence(struct drm_i915_private *dev_priv); 3411 void i915_unreserve_fence(struct drm_i915_fence_reg *fence); 3412 3413 void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); 3414 void i915_gem_restore_fences(struct drm_i915_private *dev_priv); 3415 3416 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); 3417 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, 3418 struct sg_table *pages); 3419 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, 3420 struct sg_table *pages); 3421 3422 static inline struct i915_gem_context * 3423 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) 3424 { 3425 return idr_find(&file_priv->context_idr, id); 3426 } 3427 3428 static inline struct i915_gem_context * 3429 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3430 { 3431 struct i915_gem_context *ctx; 3432 3433 rcu_read_lock(); 3434 ctx = __i915_gem_context_lookup_rcu(file_priv, id); 3435 if (ctx && !kref_get_unless_zero(&ctx->ref)) 3436 ctx = NULL; 3437 rcu_read_unlock(); 3438 3439 return ctx; 3440 } 3441 3442 static inline struct intel_timeline * 3443 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, 3444 struct intel_engine_cs *engine) 3445 { 3446 struct i915_address_space *vm; 3447 3448 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; 3449 return &vm->timeline.engine[engine->id]; 3450 } 3451 3452 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3453 struct drm_file *file); 3454 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 3455 struct drm_file *file); 3456 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 3457 struct drm_file *file); 3458 void i915_oa_init_reg_state(struct intel_engine_cs *engine, 3459 struct i915_gem_context *ctx, 3460 uint32_t *reg_state); 3461 3462 /* i915_gem_evict.c */ 3463 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3464 u64 min_size, u64 alignment, 3465 unsigned cache_level, 3466 u64 start, u64 end, 3467 unsigned flags); 3468 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 3469 struct drm_mm_node *node, 3470 unsigned int flags); 3471 int i915_gem_evict_vm(struct i915_address_space *vm); 3472 3473 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv); 3474 3475 /* belongs in i915_gem_gtt.h */ 3476 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3477 { 3478 wmb(); 3479 if (INTEL_GEN(dev_priv) < 6) 3480 intel_gtt_chipset_flush(); 3481 } 3482 3483 /* i915_gem_stolen.c */ 3484 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3485 struct drm_mm_node *node, u64 size, 3486 unsigned alignment); 3487 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3488 struct drm_mm_node *node, u64 size, 3489 unsigned alignment, u64 start, 3490 u64 end); 3491 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3492 struct drm_mm_node *node); 3493 int i915_gem_init_stolen(struct drm_i915_private *dev_priv); 3494 void i915_gem_cleanup_stolen(struct drm_device *dev); 3495 struct drm_i915_gem_object * 3496 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 3497 resource_size_t size); 3498 struct drm_i915_gem_object * 3499 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, 3500 resource_size_t stolen_offset, 3501 resource_size_t gtt_offset, 3502 resource_size_t size); 3503 3504 /* i915_gem_internal.c */ 3505 struct drm_i915_gem_object * 3506 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 3507 phys_addr_t size); 3508 3509 /* i915_gem_shrinker.c */ 3510 unsigned long i915_gem_shrink(struct drm_i915_private *i915, 3511 unsigned long target, 3512 unsigned long *nr_scanned, 3513 unsigned flags); 3514 #define I915_SHRINK_PURGEABLE 0x1 3515 #define I915_SHRINK_UNBOUND 0x2 3516 #define I915_SHRINK_BOUND 0x4 3517 #define I915_SHRINK_ACTIVE 0x8 3518 #define I915_SHRINK_VMAPS 0x10 3519 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); 3520 void i915_gem_shrinker_register(struct drm_i915_private *i915); 3521 void i915_gem_shrinker_unregister(struct drm_i915_private *i915); 3522 3523 3524 /* i915_gem_tiling.c */ 3525 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3526 { 3527 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3528 3529 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3530 i915_gem_object_is_tiled(obj); 3531 } 3532 3533 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, 3534 unsigned int tiling, unsigned int stride); 3535 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, 3536 unsigned int tiling, unsigned int stride); 3537 3538 /* i915_debugfs.c */ 3539 #ifdef CONFIG_DEBUG_FS 3540 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3541 int i915_debugfs_connector_add(struct drm_connector *connector); 3542 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3543 #else 3544 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3545 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3546 { return 0; } 3547 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3548 #endif 3549 3550 /* i915_gpu_error.c */ 3551 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 3552 3553 __printf(2, 3) 3554 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3555 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3556 const struct i915_gpu_state *gpu); 3557 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3558 struct drm_i915_private *i915, 3559 size_t count, loff_t pos); 3560 static inline void i915_error_state_buf_release( 3561 struct drm_i915_error_state_buf *eb) 3562 { 3563 kfree(eb->buf); 3564 } 3565 3566 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); 3567 void i915_capture_error_state(struct drm_i915_private *dev_priv, 3568 u32 engine_mask, 3569 const char *error_msg); 3570 3571 static inline struct i915_gpu_state * 3572 i915_gpu_state_get(struct i915_gpu_state *gpu) 3573 { 3574 kref_get(&gpu->ref); 3575 return gpu; 3576 } 3577 3578 void __i915_gpu_state_free(struct kref *kref); 3579 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) 3580 { 3581 if (gpu) 3582 kref_put(&gpu->ref, __i915_gpu_state_free); 3583 } 3584 3585 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); 3586 void i915_reset_error_state(struct drm_i915_private *i915); 3587 3588 #else 3589 3590 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 3591 u32 engine_mask, 3592 const char *error_msg) 3593 { 3594 } 3595 3596 static inline struct i915_gpu_state * 3597 i915_first_error_state(struct drm_i915_private *i915) 3598 { 3599 return NULL; 3600 } 3601 3602 static inline void i915_reset_error_state(struct drm_i915_private *i915) 3603 { 3604 } 3605 3606 #endif 3607 3608 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3609 3610 /* i915_cmd_parser.c */ 3611 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3612 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3613 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3614 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3615 struct drm_i915_gem_object *batch_obj, 3616 struct drm_i915_gem_object *shadow_batch_obj, 3617 u32 batch_start_offset, 3618 u32 batch_len, 3619 bool is_master); 3620 3621 /* i915_perf.c */ 3622 extern void i915_perf_init(struct drm_i915_private *dev_priv); 3623 extern void i915_perf_fini(struct drm_i915_private *dev_priv); 3624 extern void i915_perf_register(struct drm_i915_private *dev_priv); 3625 extern void i915_perf_unregister(struct drm_i915_private *dev_priv); 3626 3627 /* i915_suspend.c */ 3628 extern int i915_save_state(struct drm_i915_private *dev_priv); 3629 extern int i915_restore_state(struct drm_i915_private *dev_priv); 3630 3631 /* i915_sysfs.c */ 3632 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3633 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3634 3635 /* intel_lpe_audio.c */ 3636 int intel_lpe_audio_init(struct drm_i915_private *dev_priv); 3637 void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); 3638 void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); 3639 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, 3640 enum pipe pipe, enum port port, 3641 const void *eld, int ls_clock, bool dp_output); 3642 3643 /* intel_i2c.c */ 3644 extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); 3645 extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); 3646 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3647 unsigned int pin); 3648 3649 extern struct i2c_adapter * 3650 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3651 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3652 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3653 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3654 { 3655 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3656 } 3657 extern void intel_i2c_reset(struct drm_i915_private *dev_priv); 3658 3659 /* intel_bios.c */ 3660 void intel_bios_init(struct drm_i915_private *dev_priv); 3661 void intel_bios_cleanup(struct drm_i915_private *dev_priv); 3662 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3663 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3664 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3665 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3666 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3667 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3668 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3669 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3670 enum port port); 3671 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3672 enum port port); 3673 3674 /* intel_acpi.c */ 3675 #ifdef CONFIG_ACPI 3676 extern void intel_register_dsm_handler(void); 3677 extern void intel_unregister_dsm_handler(void); 3678 #else 3679 static inline void intel_register_dsm_handler(void) { return; } 3680 static inline void intel_unregister_dsm_handler(void) { return; } 3681 #endif /* CONFIG_ACPI */ 3682 3683 /* intel_device_info.c */ 3684 static inline struct intel_device_info * 3685 mkwrite_device_info(struct drm_i915_private *dev_priv) 3686 { 3687 return (struct intel_device_info *)&dev_priv->info; 3688 } 3689 3690 /* modesetting */ 3691 extern void intel_modeset_init_hw(struct drm_device *dev); 3692 extern int intel_modeset_init(struct drm_device *dev); 3693 extern void intel_modeset_cleanup(struct drm_device *dev); 3694 extern int intel_connector_register(struct drm_connector *); 3695 extern void intel_connector_unregister(struct drm_connector *); 3696 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3697 bool state); 3698 extern void intel_display_resume(struct drm_device *dev); 3699 extern void i915_redisable_vga(struct drm_i915_private *dev_priv); 3700 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); 3701 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3702 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); 3703 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3704 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3705 bool enable); 3706 3707 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3708 struct drm_file *file); 3709 3710 /* overlay */ 3711 extern struct intel_overlay_error_state * 3712 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3713 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3714 struct intel_overlay_error_state *error); 3715 3716 extern struct intel_display_error_state * 3717 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3718 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3719 struct intel_display_error_state *error); 3720 3721 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3722 int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, 3723 u32 val, int timeout_us); 3724 #define sandybridge_pcode_write(dev_priv, mbox, val) \ 3725 sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500) 3726 3727 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3728 u32 reply_mask, u32 reply, int timeout_base_ms); 3729 3730 /* intel_sideband.c */ 3731 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3732 int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3733 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3734 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3735 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3736 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3737 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3738 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3739 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3740 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3741 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3742 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3743 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3744 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3745 enum intel_sbi_destination destination); 3746 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3747 enum intel_sbi_destination destination); 3748 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3749 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3750 3751 /* intel_dpio_phy.c */ 3752 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, 3753 enum dpio_phy *phy, enum dpio_channel *ch); 3754 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, 3755 enum port port, u32 margin, u32 scale, 3756 u32 enable, u32 deemphasis); 3757 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3758 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3759 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, 3760 enum dpio_phy phy); 3761 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, 3762 enum dpio_phy phy); 3763 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count); 3764 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, 3765 uint8_t lane_lat_optim_mask); 3766 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); 3767 3768 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3769 u32 deemph_reg_value, u32 margin_reg_value, 3770 bool uniq_trans_scale); 3771 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3772 const struct intel_crtc_state *crtc_state, 3773 bool reset); 3774 void chv_phy_pre_pll_enable(struct intel_encoder *encoder, 3775 const struct intel_crtc_state *crtc_state); 3776 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, 3777 const struct intel_crtc_state *crtc_state); 3778 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3779 void chv_phy_post_pll_disable(struct intel_encoder *encoder, 3780 const struct intel_crtc_state *old_crtc_state); 3781 3782 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3783 u32 demph_reg_value, u32 preemph_reg_value, 3784 u32 uniqtranscale_reg_value, u32 tx3_demph); 3785 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, 3786 const struct intel_crtc_state *crtc_state); 3787 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, 3788 const struct intel_crtc_state *crtc_state); 3789 void vlv_phy_reset_lanes(struct intel_encoder *encoder, 3790 const struct intel_crtc_state *old_crtc_state); 3791 3792 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3793 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3794 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, 3795 const i915_reg_t reg); 3796 3797 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); 3798 3799 static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, 3800 const i915_reg_t reg) 3801 { 3802 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); 3803 } 3804 3805 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3806 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3807 3808 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3809 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3810 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3811 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3812 3813 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3814 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3815 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3816 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3817 3818 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3819 * will be implemented using 2 32-bit writes in an arbitrary order with 3820 * an arbitrary delay between them. This can cause the hardware to 3821 * act upon the intermediate value, possibly leading to corruption and 3822 * machine death. For this reason we do not support I915_WRITE64, or 3823 * dev_priv->uncore.funcs.mmio_writeq. 3824 * 3825 * When reading a 64-bit value as two 32-bit values, the delay may cause 3826 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3827 * occasionally a 64-bit register does not actualy support a full readq 3828 * and must be read using two 32-bit reads. 3829 * 3830 * You have been warned. 3831 */ 3832 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3833 3834 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3835 u32 upper, lower, old_upper, loop = 0; \ 3836 upper = I915_READ(upper_reg); \ 3837 do { \ 3838 old_upper = upper; \ 3839 lower = I915_READ(lower_reg); \ 3840 upper = I915_READ(upper_reg); \ 3841 } while (upper != old_upper && loop++ < 2); \ 3842 (u64)upper << 32 | lower; }) 3843 3844 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3845 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3846 3847 #define __raw_read(x, s) \ 3848 static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ 3849 i915_reg_t reg) \ 3850 { \ 3851 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3852 } 3853 3854 #define __raw_write(x, s) \ 3855 static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \ 3856 i915_reg_t reg, uint##x##_t val) \ 3857 { \ 3858 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3859 } 3860 __raw_read(8, b) 3861 __raw_read(16, w) 3862 __raw_read(32, l) 3863 __raw_read(64, q) 3864 3865 __raw_write(8, b) 3866 __raw_write(16, w) 3867 __raw_write(32, l) 3868 __raw_write(64, q) 3869 3870 #undef __raw_read 3871 #undef __raw_write 3872 3873 /* These are untraced mmio-accessors that are only valid to be used inside 3874 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 3875 * controlled. 3876 * 3877 * Think twice, and think again, before using these. 3878 * 3879 * As an example, these accessors can possibly be used between: 3880 * 3881 * spin_lock_irq(&dev_priv->uncore.lock); 3882 * intel_uncore_forcewake_get__locked(); 3883 * 3884 * and 3885 * 3886 * intel_uncore_forcewake_put__locked(); 3887 * spin_unlock_irq(&dev_priv->uncore.lock); 3888 * 3889 * 3890 * Note: some registers may not need forcewake held, so 3891 * intel_uncore_forcewake_{get,put} can be omitted, see 3892 * intel_uncore_forcewake_for_reg(). 3893 * 3894 * Certain architectures will die if the same cacheline is concurrently accessed 3895 * by different clients (e.g. on Ivybridge). Access to registers should 3896 * therefore generally be serialised, by either the dev_priv->uncore.lock or 3897 * a more localised lock guarding all access to that bank of registers. 3898 */ 3899 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3900 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3901 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) 3902 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3903 3904 /* "Broadcast RGB" property */ 3905 #define INTEL_BROADCAST_RGB_AUTO 0 3906 #define INTEL_BROADCAST_RGB_FULL 1 3907 #define INTEL_BROADCAST_RGB_LIMITED 2 3908 3909 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 3910 { 3911 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3912 return VLV_VGACNTRL; 3913 else if (INTEL_GEN(dev_priv) >= 5) 3914 return CPU_VGACNTRL; 3915 else 3916 return VGACNTRL; 3917 } 3918 3919 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3920 { 3921 unsigned long j = msecs_to_jiffies(m); 3922 3923 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3924 } 3925 3926 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3927 { 3928 /* nsecs_to_jiffies64() does not guard against overflow */ 3929 if (NSEC_PER_SEC % HZ && 3930 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 3931 return MAX_JIFFY_OFFSET; 3932 3933 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3934 } 3935 3936 static inline unsigned long 3937 timespec_to_jiffies_timeout(const struct timespec *value) 3938 { 3939 unsigned long j = timespec_to_jiffies(value); 3940 3941 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3942 } 3943 3944 /* 3945 * If you need to wait X milliseconds between events A and B, but event B 3946 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3947 * when event A happened, then just before event B you call this function and 3948 * pass the timestamp as the first argument, and X as the second argument. 3949 */ 3950 static inline void 3951 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3952 { 3953 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3954 3955 /* 3956 * Don't re-read the value of "jiffies" every time since it may change 3957 * behind our back and break the math. 3958 */ 3959 tmp_jiffies = jiffies; 3960 target_jiffies = timestamp_jiffies + 3961 msecs_to_jiffies_timeout(to_wait_ms); 3962 3963 if (time_after(target_jiffies, tmp_jiffies)) { 3964 remaining_jiffies = target_jiffies - tmp_jiffies; 3965 while (remaining_jiffies) 3966 remaining_jiffies = 3967 schedule_timeout_uninterruptible(remaining_jiffies); 3968 } 3969 } 3970 3971 static inline bool 3972 __i915_request_irq_complete(const struct drm_i915_gem_request *req) 3973 { 3974 struct intel_engine_cs *engine = req->engine; 3975 u32 seqno; 3976 3977 /* Note that the engine may have wrapped around the seqno, and 3978 * so our request->global_seqno will be ahead of the hardware, 3979 * even though it completed the request before wrapping. We catch 3980 * this by kicking all the waiters before resetting the seqno 3981 * in hardware, and also signal the fence. 3982 */ 3983 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) 3984 return true; 3985 3986 /* The request was dequeued before we were awoken. We check after 3987 * inspecting the hw to confirm that this was the same request 3988 * that generated the HWS update. The memory barriers within 3989 * the request execution are sufficient to ensure that a check 3990 * after reading the value from hw matches this request. 3991 */ 3992 seqno = i915_gem_request_global_seqno(req); 3993 if (!seqno) 3994 return false; 3995 3996 /* Before we do the heavier coherent read of the seqno, 3997 * check the value (hopefully) in the CPU cacheline. 3998 */ 3999 if (__i915_gem_request_completed(req, seqno)) 4000 return true; 4001 4002 /* Ensure our read of the seqno is coherent so that we 4003 * do not "miss an interrupt" (i.e. if this is the last 4004 * request and the seqno write from the GPU is not visible 4005 * by the time the interrupt fires, we will see that the 4006 * request is incomplete and go back to sleep awaiting 4007 * another interrupt that will never come.) 4008 * 4009 * Strictly, we only need to do this once after an interrupt, 4010 * but it is easier and safer to do it every time the waiter 4011 * is woken. 4012 */ 4013 if (engine->irq_seqno_barrier && 4014 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { 4015 struct intel_breadcrumbs *b = &engine->breadcrumbs; 4016 4017 /* The ordering of irq_posted versus applying the barrier 4018 * is crucial. The clearing of the current irq_posted must 4019 * be visible before we perform the barrier operation, 4020 * such that if a subsequent interrupt arrives, irq_posted 4021 * is reasserted and our task rewoken (which causes us to 4022 * do another __i915_request_irq_complete() immediately 4023 * and reapply the barrier). Conversely, if the clear 4024 * occurs after the barrier, then an interrupt that arrived 4025 * whilst we waited on the barrier would not trigger a 4026 * barrier on the next pass, and the read may not see the 4027 * seqno update. 4028 */ 4029 engine->irq_seqno_barrier(engine); 4030 4031 /* If we consume the irq, but we are no longer the bottom-half, 4032 * the real bottom-half may not have serialised their own 4033 * seqno check with the irq-barrier (i.e. may have inspected 4034 * the seqno before we believe it coherent since they see 4035 * irq_posted == false but we are still running). 4036 */ 4037 spin_lock_irq(&b->irq_lock); 4038 if (b->irq_wait && b->irq_wait->tsk != current) 4039 /* Note that if the bottom-half is changed as we 4040 * are sending the wake-up, the new bottom-half will 4041 * be woken by whomever made the change. We only have 4042 * to worry about when we steal the irq-posted for 4043 * ourself. 4044 */ 4045 wake_up_process(b->irq_wait->tsk); 4046 spin_unlock_irq(&b->irq_lock); 4047 4048 if (__i915_gem_request_completed(req, seqno)) 4049 return true; 4050 } 4051 4052 return false; 4053 } 4054 4055 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 4056 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 4057 4058 /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, 4059 * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot 4060 * perform the operation. To check beforehand, pass in the parameters to 4061 * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits, 4062 * you only need to pass in the minor offsets, page-aligned pointers are 4063 * always valid. 4064 * 4065 * For just checking for SSE4.1, in the foreknowledge that the future use 4066 * will be correctly aligned, just use i915_has_memcpy_from_wc(). 4067 */ 4068 #define i915_can_memcpy_from_wc(dst, src, len) \ 4069 i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0) 4070 4071 #define i915_has_memcpy_from_wc() \ 4072 i915_memcpy_from_wc(NULL, NULL, 0) 4073 4074 /* i915_mm.c */ 4075 int remap_io_mapping(struct vm_area_struct *vma, 4076 unsigned long addr, unsigned long pfn, unsigned long size, 4077 struct io_mapping *iomap); 4078 4079 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) 4080 { 4081 if (INTEL_GEN(i915) >= 10) 4082 return CNL_HWS_CSB_WRITE_INDEX; 4083 else 4084 return I915_HWS_CSB_WRITE_INDEX; 4085 } 4086 4087 #endif 4088