1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2020 Intel Corporation 4 * 5 * Please try to maintain the following order within this file unless it makes 6 * sense to do otherwise. From top to bottom: 7 * 1. typedefs 8 * 2. #defines, and macros 9 * 3. structure definitions 10 * 4. function prototypes 11 * 12 * Within each section, please try to order by generation in ascending order, 13 * from top to bottom (ie. gen6 on the top, gen8 on the bottom). 14 */ 15 16 #ifndef __INTEL_GTT_H__ 17 #define __INTEL_GTT_H__ 18 19 #include <linux/io-mapping.h> 20 #include <linux/kref.h> 21 #include <linux/mm.h> 22 #include <linux/pagevec.h> 23 #include <linux/scatterlist.h> 24 #include <linux/workqueue.h> 25 26 #include <drm/drm_mm.h> 27 28 #include "gt/intel_reset.h" 29 #include "i915_selftest.h" 30 #include "i915_vma_types.h" 31 32 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 33 34 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) 35 #define DBG(...) trace_printk(__VA_ARGS__) 36 #else 37 #define DBG(...) 38 #endif 39 40 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ 41 42 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) 43 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) 44 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) 45 46 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 47 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 48 49 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE 50 51 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 52 53 #define I915_FENCE_REG_NONE -1 54 #define I915_MAX_NUM_FENCES 32 55 /* 32 fences + sign bit for FENCE_REG_NONE */ 56 #define I915_MAX_NUM_FENCE_BITS 6 57 58 typedef u32 gen6_pte_t; 59 typedef u64 gen8_pte_t; 60 61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 62 63 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) 64 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) 65 #define I915_PDES 512 66 #define I915_PDE_MASK (I915_PDES - 1) 67 68 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 69 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 70 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 71 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 72 #define GEN6_PTE_CACHE_LLC (2 << 1) 73 #define GEN6_PTE_UNCACHED (1 << 1) 74 #define GEN6_PTE_VALID REG_BIT(0) 75 76 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) 77 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) 78 #define GEN6_PD_ALIGN (PAGE_SIZE * 16) 79 #define GEN6_PDE_SHIFT 22 80 #define GEN6_PDE_VALID REG_BIT(0) 81 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) 82 83 #define GEN7_PTE_CACHE_L3_LLC (3 << 1) 84 85 #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) 86 #define BYT_PTE_WRITEABLE REG_BIT(1) 87 88 #define GEN12_PPGTT_PTE_LM BIT_ULL(11) 89 90 #define GEN12_GGTT_PTE_LM BIT_ULL(1) 91 92 /* 93 * Cacheability Control is a 4-bit value. The low three bits are stored in bits 94 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. 95 */ 96 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ 97 (((bits) & 0x8) << (11 - 3))) 98 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 99 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 100 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) 101 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 102 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) 103 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 104 #define HSW_PTE_UNCACHED (0) 105 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) 106 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) 107 108 /* 109 * GEN8 32b style address is defined as a 3 level page table: 110 * 31:30 | 29:21 | 20:12 | 11:0 111 * PDPE | PDE | PTE | offset 112 * The difference as compared to normal x86 3 level page table is the PDPEs are 113 * programmed via register. 114 * 115 * GEN8 48b style address is defined as a 4 level page table: 116 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 117 * PML4E | PDPE | PDE | PTE | offset 118 */ 119 #define GEN8_3LVL_PDPES 4 120 121 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) 122 #define PPAT_CACHED_PDE 0 /* WB LLC */ 123 #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ 124 #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ 125 126 #define CHV_PPAT_SNOOP REG_BIT(6) 127 #define GEN8_PPAT_AGE(x) ((x)<<4) 128 #define GEN8_PPAT_LLCeLLC (3<<2) 129 #define GEN8_PPAT_LLCELLC (2<<2) 130 #define GEN8_PPAT_LLC (1<<2) 131 #define GEN8_PPAT_WB (3<<0) 132 #define GEN8_PPAT_WT (2<<0) 133 #define GEN8_PPAT_WC (1<<0) 134 #define GEN8_PPAT_UC (0<<0) 135 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) 136 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) 137 138 #define GEN8_PAGE_PRESENT BIT_ULL(0) 139 #define GEN8_PAGE_RW BIT_ULL(1) 140 141 #define GEN8_PDE_IPS_64K BIT(11) 142 #define GEN8_PDE_PS_2M BIT(7) 143 144 enum i915_cache_level; 145 146 struct drm_i915_gem_object; 147 struct i915_fence_reg; 148 struct i915_vma; 149 struct intel_gt; 150 151 #define for_each_sgt_daddr(__dp, __iter, __sgt) \ 152 __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) 153 154 struct i915_page_table { 155 struct drm_i915_gem_object *base; 156 union { 157 atomic_t used; 158 struct i915_page_table *stash; 159 }; 160 }; 161 162 struct i915_page_directory { 163 struct i915_page_table pt; 164 spinlock_t lock; 165 void **entry; 166 }; 167 168 #define __px_choose_expr(x, type, expr, other) \ 169 __builtin_choose_expr( \ 170 __builtin_types_compatible_p(typeof(x), type) || \ 171 __builtin_types_compatible_p(typeof(x), const type), \ 172 ({ type __x = (type)(x); expr; }), \ 173 other) 174 175 #define px_base(px) \ 176 __px_choose_expr(px, struct drm_i915_gem_object *, __x, \ 177 __px_choose_expr(px, struct i915_page_table *, __x->base, \ 178 __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \ 179 (void)0))) 180 181 struct page *__px_page(struct drm_i915_gem_object *p); 182 dma_addr_t __px_dma(struct drm_i915_gem_object *p); 183 #define px_dma(px) (__px_dma(px_base(px))) 184 185 void *__px_vaddr(struct drm_i915_gem_object *p); 186 #define px_vaddr(px) (__px_vaddr(px_base(px))) 187 188 #define px_pt(px) \ 189 __px_choose_expr(px, struct i915_page_table *, __x, \ 190 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ 191 (void)0)) 192 #define px_used(px) (&px_pt(px)->used) 193 194 struct i915_vm_pt_stash { 195 /* preallocated chains of page tables/directories */ 196 struct i915_page_table *pt[2]; 197 }; 198 199 struct i915_vma_ops { 200 /* Map an object into an address space with the given cache flags. */ 201 void (*bind_vma)(struct i915_address_space *vm, 202 struct i915_vm_pt_stash *stash, 203 struct i915_vma *vma, 204 enum i915_cache_level cache_level, 205 u32 flags); 206 /* 207 * Unmap an object from an address space. This usually consists of 208 * setting the valid PTE entries to a reserved scratch page. 209 */ 210 void (*unbind_vma)(struct i915_address_space *vm, 211 struct i915_vma *vma); 212 }; 213 214 struct i915_address_space { 215 struct kref ref; 216 struct work_struct release_work; 217 218 struct drm_mm mm; 219 struct intel_gt *gt; 220 struct drm_i915_private *i915; 221 struct device *dma; 222 u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 223 u64 reserved; /* size addr space reserved */ 224 225 unsigned int bind_async_flags; 226 227 /* 228 * Each active user context has its own address space (in full-ppgtt). 229 * Since the vm may be shared between multiple contexts, we count how 230 * many contexts keep us "open". Once open hits zero, we are closed 231 * and do not allow any new attachments, and proceed to shutdown our 232 * vma and page directories. 233 */ 234 atomic_t open; 235 236 struct mutex mutex; /* protects vma and our lists */ 237 238 struct kref resv_ref; /* kref to keep the reservation lock alive. */ 239 struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */ 240 #define VM_CLASS_GGTT 0 241 #define VM_CLASS_PPGTT 1 242 #define VM_CLASS_DPT 2 243 244 struct drm_i915_gem_object *scratch[4]; 245 /** 246 * List of vma currently bound. 247 */ 248 struct list_head bound_list; 249 250 /* Global GTT */ 251 bool is_ggtt:1; 252 253 /* Display page table */ 254 bool is_dpt:1; 255 256 /* Some systems support read-only mappings for GGTT and/or PPGTT */ 257 bool has_read_only:1; 258 259 u8 top; 260 u8 pd_shift; 261 u8 scratch_order; 262 263 /* Flags used when creating page-table objects for this vm */ 264 unsigned long lmem_pt_obj_flags; 265 266 struct drm_i915_gem_object * 267 (*alloc_pt_dma)(struct i915_address_space *vm, int sz); 268 struct drm_i915_gem_object * 269 (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); 270 271 u64 (*pte_encode)(dma_addr_t addr, 272 enum i915_cache_level level, 273 u32 flags); /* Create a valid PTE */ 274 #define PTE_READ_ONLY BIT(0) 275 #define PTE_LM BIT(1) 276 277 void (*allocate_va_range)(struct i915_address_space *vm, 278 struct i915_vm_pt_stash *stash, 279 u64 start, u64 length); 280 void (*clear_range)(struct i915_address_space *vm, 281 u64 start, u64 length); 282 void (*insert_page)(struct i915_address_space *vm, 283 dma_addr_t addr, 284 u64 offset, 285 enum i915_cache_level cache_level, 286 u32 flags); 287 void (*insert_entries)(struct i915_address_space *vm, 288 struct i915_vma *vma, 289 enum i915_cache_level cache_level, 290 u32 flags); 291 void (*cleanup)(struct i915_address_space *vm); 292 293 void (*foreach)(struct i915_address_space *vm, 294 u64 start, u64 length, 295 void (*fn)(struct i915_address_space *vm, 296 struct i915_page_table *pt, 297 void *data), 298 void *data); 299 300 struct i915_vma_ops vma_ops; 301 302 I915_SELFTEST_DECLARE(struct fault_attr fault_attr); 303 I915_SELFTEST_DECLARE(bool scrub_64K); 304 }; 305 306 /* 307 * The Graphics Translation Table is the way in which GEN hardware translates a 308 * Graphics Virtual Address into a Physical Address. In addition to the normal 309 * collateral associated with any va->pa translations GEN hardware also has a 310 * portion of the GTT which can be mapped by the CPU and remain both coherent 311 * and correct (in cases like swizzling). That region is referred to as GMADR in 312 * the spec. 313 */ 314 struct i915_ggtt { 315 struct i915_address_space vm; 316 317 struct io_mapping iomap; /* Mapping to our CPU mappable region */ 318 struct resource gmadr; /* GMADR resource */ 319 resource_size_t mappable_end; /* End offset that we can CPU map */ 320 321 /** "Graphics Stolen Memory" holds the global PTEs */ 322 void __iomem *gsm; 323 void (*invalidate)(struct i915_ggtt *ggtt); 324 325 /** PPGTT used for aliasing the PPGTT with the GTT */ 326 struct i915_ppgtt *alias; 327 328 bool do_idle_maps; 329 330 int mtrr; 331 332 /** Bit 6 swizzling required for X tiling */ 333 u32 bit_6_swizzle_x; 334 /** Bit 6 swizzling required for Y tiling */ 335 u32 bit_6_swizzle_y; 336 337 u32 pin_bias; 338 339 unsigned int num_fences; 340 struct i915_fence_reg *fence_regs; 341 struct list_head fence_list; 342 343 /** 344 * List of all objects in gtt_space, currently mmaped by userspace. 345 * All objects within this list must also be on bound_list. 346 */ 347 struct list_head userfault_list; 348 349 /* Manual runtime pm autosuspend delay for user GGTT mmaps */ 350 struct intel_wakeref_auto userfault_wakeref; 351 352 struct mutex error_mutex; 353 struct drm_mm_node error_capture; 354 struct drm_mm_node uc_fw; 355 }; 356 357 struct i915_ppgtt { 358 struct i915_address_space vm; 359 360 struct i915_page_directory *pd; 361 }; 362 363 #define i915_is_ggtt(vm) ((vm)->is_ggtt) 364 #define i915_is_dpt(vm) ((vm)->is_dpt) 365 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) 366 367 int __must_check 368 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); 369 370 static inline bool 371 i915_vm_is_4lvl(const struct i915_address_space *vm) 372 { 373 return (vm->total - 1) >> 32; 374 } 375 376 static inline bool 377 i915_vm_has_scratch_64K(struct i915_address_space *vm) 378 { 379 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); 380 } 381 382 static inline bool 383 i915_vm_has_cache_coloring(struct i915_address_space *vm) 384 { 385 return i915_is_ggtt(vm) && vm->mm.color_adjust; 386 } 387 388 static inline struct i915_ggtt * 389 i915_vm_to_ggtt(struct i915_address_space *vm) 390 { 391 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); 392 GEM_BUG_ON(!i915_is_ggtt(vm)); 393 return container_of(vm, struct i915_ggtt, vm); 394 } 395 396 static inline struct i915_ppgtt * 397 i915_vm_to_ppgtt(struct i915_address_space *vm) 398 { 399 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); 400 GEM_BUG_ON(i915_is_ggtt_or_dpt(vm)); 401 return container_of(vm, struct i915_ppgtt, vm); 402 } 403 404 static inline struct i915_address_space * 405 i915_vm_get(struct i915_address_space *vm) 406 { 407 kref_get(&vm->ref); 408 return vm; 409 } 410 411 /** 412 * i915_vm_resv_get - Obtain a reference on the vm's reservation lock 413 * @vm: The vm whose reservation lock we want to share. 414 * 415 * Return: A pointer to the vm's reservation lock. 416 */ 417 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm) 418 { 419 kref_get(&vm->resv_ref); 420 return &vm->_resv; 421 } 422 423 void i915_vm_release(struct kref *kref); 424 425 void i915_vm_resv_release(struct kref *kref); 426 427 static inline void i915_vm_put(struct i915_address_space *vm) 428 { 429 kref_put(&vm->ref, i915_vm_release); 430 } 431 432 /** 433 * i915_vm_resv_put - Release a reference on the vm's reservation lock 434 * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get() 435 */ 436 static inline void i915_vm_resv_put(struct i915_address_space *vm) 437 { 438 kref_put(&vm->resv_ref, i915_vm_resv_release); 439 } 440 441 static inline struct i915_address_space * 442 i915_vm_open(struct i915_address_space *vm) 443 { 444 GEM_BUG_ON(!atomic_read(&vm->open)); 445 atomic_inc(&vm->open); 446 return i915_vm_get(vm); 447 } 448 449 static inline bool 450 i915_vm_tryopen(struct i915_address_space *vm) 451 { 452 if (atomic_add_unless(&vm->open, 1, 0)) 453 return i915_vm_get(vm); 454 455 return false; 456 } 457 458 void __i915_vm_close(struct i915_address_space *vm); 459 460 static inline void 461 i915_vm_close(struct i915_address_space *vm) 462 { 463 GEM_BUG_ON(!atomic_read(&vm->open)); 464 __i915_vm_close(vm); 465 466 i915_vm_put(vm); 467 } 468 469 void i915_address_space_init(struct i915_address_space *vm, int subclass); 470 void i915_address_space_fini(struct i915_address_space *vm); 471 472 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) 473 { 474 const u32 mask = NUM_PTE(pde_shift) - 1; 475 476 return (address >> PAGE_SHIFT) & mask; 477 } 478 479 /* 480 * Helper to counts the number of PTEs within the given length. This count 481 * does not cross a page table boundary, so the max value would be 482 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. 483 */ 484 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) 485 { 486 const u64 mask = ~((1ULL << pde_shift) - 1); 487 u64 end; 488 489 GEM_BUG_ON(length == 0); 490 GEM_BUG_ON(offset_in_page(addr | length)); 491 492 end = addr + length; 493 494 if ((addr & mask) != (end & mask)) 495 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); 496 497 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); 498 } 499 500 static inline u32 i915_pde_index(u64 addr, u32 shift) 501 { 502 return (addr >> shift) & I915_PDE_MASK; 503 } 504 505 static inline struct i915_page_table * 506 i915_pt_entry(const struct i915_page_directory * const pd, 507 const unsigned short n) 508 { 509 return pd->entry[n]; 510 } 511 512 static inline struct i915_page_directory * 513 i915_pd_entry(const struct i915_page_directory * const pdp, 514 const unsigned short n) 515 { 516 return pdp->entry[n]; 517 } 518 519 static inline dma_addr_t 520 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) 521 { 522 struct i915_page_table *pt = ppgtt->pd->entry[n]; 523 524 return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]); 525 } 526 527 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, 528 unsigned long lmem_pt_obj_flags); 529 530 int i915_ggtt_probe_hw(struct drm_i915_private *i915); 531 int i915_ggtt_init_hw(struct drm_i915_private *i915); 532 int i915_ggtt_enable_hw(struct drm_i915_private *i915); 533 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); 534 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); 535 int i915_init_ggtt(struct drm_i915_private *i915); 536 void i915_ggtt_driver_release(struct drm_i915_private *i915); 537 void i915_ggtt_driver_late_release(struct drm_i915_private *i915); 538 539 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) 540 { 541 return ggtt->mappable_end > 0; 542 } 543 544 int i915_ppgtt_init_hw(struct intel_gt *gt); 545 546 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, 547 unsigned long lmem_pt_obj_flags); 548 549 void i915_ggtt_suspend_vm(struct i915_address_space *vm); 550 bool i915_ggtt_resume_vm(struct i915_address_space *vm); 551 void i915_ggtt_suspend(struct i915_ggtt *gtt); 552 void i915_ggtt_resume(struct i915_ggtt *ggtt); 553 554 void 555 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); 556 557 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) 558 #define fill32_px(px, v) do { \ 559 u64 v__ = lower_32_bits(v); \ 560 fill_px((px), v__ << 32 | v__); \ 561 } while (0) 562 563 int setup_scratch_page(struct i915_address_space *vm); 564 void free_scratch(struct i915_address_space *vm); 565 566 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); 567 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz); 568 struct i915_page_table *alloc_pt(struct i915_address_space *vm); 569 struct i915_page_directory *alloc_pd(struct i915_address_space *vm); 570 struct i915_page_directory *__alloc_pd(int npde); 571 572 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); 573 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); 574 575 void free_px(struct i915_address_space *vm, 576 struct i915_page_table *pt, int lvl); 577 #define free_pt(vm, px) free_px(vm, px, 0) 578 #define free_pd(vm, px) free_px(vm, px_pt(px), 1) 579 580 void 581 __set_pd_entry(struct i915_page_directory * const pd, 582 const unsigned short idx, 583 struct i915_page_table *pt, 584 u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); 585 586 #define set_pd_entry(pd, idx, to) \ 587 __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode) 588 589 void 590 clear_pd_entry(struct i915_page_directory * const pd, 591 const unsigned short idx, 592 const struct drm_i915_gem_object * const scratch); 593 594 bool 595 release_pd_entry(struct i915_page_directory * const pd, 596 const unsigned short idx, 597 struct i915_page_table * const pt, 598 const struct drm_i915_gem_object * const scratch); 599 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); 600 601 void ppgtt_bind_vma(struct i915_address_space *vm, 602 struct i915_vm_pt_stash *stash, 603 struct i915_vma *vma, 604 enum i915_cache_level cache_level, 605 u32 flags); 606 void ppgtt_unbind_vma(struct i915_address_space *vm, 607 struct i915_vma *vma); 608 609 void gtt_write_workarounds(struct intel_gt *gt); 610 611 void setup_private_pat(struct intel_uncore *uncore); 612 613 int i915_vm_alloc_pt_stash(struct i915_address_space *vm, 614 struct i915_vm_pt_stash *stash, 615 u64 size); 616 int i915_vm_map_pt_stash(struct i915_address_space *vm, 617 struct i915_vm_pt_stash *stash); 618 void i915_vm_free_pt_stash(struct i915_address_space *vm, 619 struct i915_vm_pt_stash *stash); 620 621 struct i915_vma * 622 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size); 623 624 struct i915_vma * 625 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size); 626 627 static inline struct sgt_dma { 628 struct scatterlist *sg; 629 dma_addr_t dma, max; 630 } sgt_dma(struct i915_vma *vma) { 631 struct scatterlist *sg = vma->pages->sgl; 632 dma_addr_t addr = sg_dma_address(sg); 633 634 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; 635 } 636 637 #endif 638