1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2020 Intel Corporation 4 * 5 * Please try to maintain the following order within this file unless it makes 6 * sense to do otherwise. From top to bottom: 7 * 1. typedefs 8 * 2. #defines, and macros 9 * 3. structure definitions 10 * 4. function prototypes 11 * 12 * Within each section, please try to order by generation in ascending order, 13 * from top to bottom (ie. gen6 on the top, gen8 on the bottom). 14 */ 15 16 #ifndef __INTEL_GTT_H__ 17 #define __INTEL_GTT_H__ 18 19 #include <linux/io-mapping.h> 20 #include <linux/kref.h> 21 #include <linux/mm.h> 22 #include <linux/pagevec.h> 23 #include <linux/scatterlist.h> 24 #include <linux/workqueue.h> 25 26 #include <drm/drm_mm.h> 27 28 #include "gt/intel_reset.h" 29 #include "i915_selftest.h" 30 #include "i915_vma_types.h" 31 32 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 33 34 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) 35 #define DBG(...) trace_printk(__VA_ARGS__) 36 #else 37 #define DBG(...) 38 #endif 39 40 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ 41 42 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) 43 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) 44 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) 45 46 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 47 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 48 49 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE 50 51 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 52 53 #define I915_FENCE_REG_NONE -1 54 #define I915_MAX_NUM_FENCES 32 55 /* 32 fences + sign bit for FENCE_REG_NONE */ 56 #define I915_MAX_NUM_FENCE_BITS 6 57 58 typedef u32 gen6_pte_t; 59 typedef u64 gen8_pte_t; 60 61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 62 63 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) 64 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) 65 #define I915_PDES 512 66 #define I915_PDE_MASK (I915_PDES - 1) 67 68 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 69 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 70 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 71 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 72 #define GEN6_PTE_CACHE_LLC (2 << 1) 73 #define GEN6_PTE_UNCACHED (1 << 1) 74 #define GEN6_PTE_VALID REG_BIT(0) 75 76 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) 77 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) 78 #define GEN6_PD_ALIGN (PAGE_SIZE * 16) 79 #define GEN6_PDE_SHIFT 22 80 #define GEN6_PDE_VALID REG_BIT(0) 81 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) 82 83 #define GEN7_PTE_CACHE_L3_LLC (3 << 1) 84 85 #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) 86 #define BYT_PTE_WRITEABLE REG_BIT(1) 87 88 /* 89 * Cacheability Control is a 4-bit value. The low three bits are stored in bits 90 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. 91 */ 92 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ 93 (((bits) & 0x8) << (11 - 3))) 94 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 95 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 96 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) 97 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 98 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) 99 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 100 #define HSW_PTE_UNCACHED (0) 101 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) 102 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) 103 104 /* 105 * GEN8 32b style address is defined as a 3 level page table: 106 * 31:30 | 29:21 | 20:12 | 11:0 107 * PDPE | PDE | PTE | offset 108 * The difference as compared to normal x86 3 level page table is the PDPEs are 109 * programmed via register. 110 * 111 * GEN8 48b style address is defined as a 4 level page table: 112 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 113 * PML4E | PDPE | PDE | PTE | offset 114 */ 115 #define GEN8_3LVL_PDPES 4 116 117 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) 118 #define PPAT_CACHED_PDE 0 /* WB LLC */ 119 #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ 120 #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ 121 122 #define CHV_PPAT_SNOOP REG_BIT(6) 123 #define GEN8_PPAT_AGE(x) ((x)<<4) 124 #define GEN8_PPAT_LLCeLLC (3<<2) 125 #define GEN8_PPAT_LLCELLC (2<<2) 126 #define GEN8_PPAT_LLC (1<<2) 127 #define GEN8_PPAT_WB (3<<0) 128 #define GEN8_PPAT_WT (2<<0) 129 #define GEN8_PPAT_WC (1<<0) 130 #define GEN8_PPAT_UC (0<<0) 131 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) 132 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) 133 134 #define GEN8_PDE_IPS_64K BIT(11) 135 #define GEN8_PDE_PS_2M BIT(7) 136 137 struct i915_fence_reg; 138 139 #define for_each_sgt_daddr(__dp, __iter, __sgt) \ 140 __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) 141 142 struct i915_page_dma { 143 struct page *page; 144 union { 145 dma_addr_t daddr; 146 147 /* 148 * For gen6/gen7 only. This is the offset in the GGTT 149 * where the page directory entries for PPGTT begin 150 */ 151 u32 ggtt_offset; 152 }; 153 }; 154 155 struct i915_page_scratch { 156 struct i915_page_dma base; 157 u64 encode; 158 }; 159 160 struct i915_page_table { 161 struct i915_page_dma base; 162 atomic_t used; 163 }; 164 165 struct i915_page_directory { 166 struct i915_page_table pt; 167 spinlock_t lock; 168 void *entry[512]; 169 }; 170 171 #define __px_choose_expr(x, type, expr, other) \ 172 __builtin_choose_expr( \ 173 __builtin_types_compatible_p(typeof(x), type) || \ 174 __builtin_types_compatible_p(typeof(x), const type), \ 175 ({ type __x = (type)(x); expr; }), \ 176 other) 177 178 #define px_base(px) \ 179 __px_choose_expr(px, struct i915_page_dma *, __x, \ 180 __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ 181 __px_choose_expr(px, struct i915_page_table *, &__x->base, \ 182 __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ 183 (void)0)))) 184 #define px_dma(px) (px_base(px)->daddr) 185 186 #define px_pt(px) \ 187 __px_choose_expr(px, struct i915_page_table *, __x, \ 188 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ 189 (void)0)) 190 #define px_used(px) (&px_pt(px)->used) 191 192 enum i915_cache_level; 193 194 struct drm_i915_file_private; 195 struct drm_i915_gem_object; 196 struct i915_vma; 197 struct intel_gt; 198 199 struct i915_vma_ops { 200 /* Map an object into an address space with the given cache flags. */ 201 int (*bind_vma)(struct i915_vma *vma, 202 enum i915_cache_level cache_level, 203 u32 flags); 204 /* 205 * Unmap an object from an address space. This usually consists of 206 * setting the valid PTE entries to a reserved scratch page. 207 */ 208 void (*unbind_vma)(struct i915_vma *vma); 209 210 int (*set_pages)(struct i915_vma *vma); 211 void (*clear_pages)(struct i915_vma *vma); 212 }; 213 214 struct pagestash { 215 spinlock_t lock; 216 struct pagevec pvec; 217 }; 218 219 void stash_init(struct pagestash *stash); 220 221 struct i915_address_space { 222 struct kref ref; 223 struct rcu_work rcu; 224 225 struct drm_mm mm; 226 struct intel_gt *gt; 227 struct drm_i915_private *i915; 228 struct device *dma; 229 /* 230 * Every address space belongs to a struct file - except for the global 231 * GTT that is owned by the driver (and so @file is set to NULL). In 232 * principle, no information should leak from one context to another 233 * (or between files/processes etc) unless explicitly shared by the 234 * owner. Tracking the owner is important in order to free up per-file 235 * objects along with the file, to aide resource tracking, and to 236 * assign blame. 237 */ 238 struct drm_i915_file_private *file; 239 u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 240 u64 reserved; /* size addr space reserved */ 241 242 unsigned int bind_async_flags; 243 244 /* 245 * Each active user context has its own address space (in full-ppgtt). 246 * Since the vm may be shared between multiple contexts, we count how 247 * many contexts keep us "open". Once open hits zero, we are closed 248 * and do not allow any new attachments, and proceed to shutdown our 249 * vma and page directories. 250 */ 251 atomic_t open; 252 253 struct mutex mutex; /* protects vma and our lists */ 254 #define VM_CLASS_GGTT 0 255 #define VM_CLASS_PPGTT 1 256 257 struct i915_page_scratch scratch[4]; 258 unsigned int scratch_order; 259 unsigned int top; 260 261 /** 262 * List of vma currently bound. 263 */ 264 struct list_head bound_list; 265 266 struct pagestash free_pages; 267 268 /* Global GTT */ 269 bool is_ggtt:1; 270 271 /* Some systems require uncached updates of the page directories */ 272 bool pt_kmap_wc:1; 273 274 /* Some systems support read-only mappings for GGTT and/or PPGTT */ 275 bool has_read_only:1; 276 277 u64 (*pte_encode)(dma_addr_t addr, 278 enum i915_cache_level level, 279 u32 flags); /* Create a valid PTE */ 280 #define PTE_READ_ONLY BIT(0) 281 282 int (*allocate_va_range)(struct i915_address_space *vm, 283 u64 start, u64 length); 284 void (*clear_range)(struct i915_address_space *vm, 285 u64 start, u64 length); 286 void (*insert_page)(struct i915_address_space *vm, 287 dma_addr_t addr, 288 u64 offset, 289 enum i915_cache_level cache_level, 290 u32 flags); 291 void (*insert_entries)(struct i915_address_space *vm, 292 struct i915_vma *vma, 293 enum i915_cache_level cache_level, 294 u32 flags); 295 void (*cleanup)(struct i915_address_space *vm); 296 297 struct i915_vma_ops vma_ops; 298 299 I915_SELFTEST_DECLARE(struct fault_attr fault_attr); 300 I915_SELFTEST_DECLARE(bool scrub_64K); 301 }; 302 303 /* 304 * The Graphics Translation Table is the way in which GEN hardware translates a 305 * Graphics Virtual Address into a Physical Address. In addition to the normal 306 * collateral associated with any va->pa translations GEN hardware also has a 307 * portion of the GTT which can be mapped by the CPU and remain both coherent 308 * and correct (in cases like swizzling). That region is referred to as GMADR in 309 * the spec. 310 */ 311 struct i915_ggtt { 312 struct i915_address_space vm; 313 314 struct io_mapping iomap; /* Mapping to our CPU mappable region */ 315 struct resource gmadr; /* GMADR resource */ 316 resource_size_t mappable_end; /* End offset that we can CPU map */ 317 318 /** "Graphics Stolen Memory" holds the global PTEs */ 319 void __iomem *gsm; 320 void (*invalidate)(struct i915_ggtt *ggtt); 321 322 /** PPGTT used for aliasing the PPGTT with the GTT */ 323 struct i915_ppgtt *alias; 324 325 bool do_idle_maps; 326 327 int mtrr; 328 329 /** Bit 6 swizzling required for X tiling */ 330 u32 bit_6_swizzle_x; 331 /** Bit 6 swizzling required for Y tiling */ 332 u32 bit_6_swizzle_y; 333 334 u32 pin_bias; 335 336 unsigned int num_fences; 337 struct i915_fence_reg *fence_regs; 338 struct list_head fence_list; 339 340 /** 341 * List of all objects in gtt_space, currently mmaped by userspace. 342 * All objects within this list must also be on bound_list. 343 */ 344 struct list_head userfault_list; 345 346 /* Manual runtime pm autosuspend delay for user GGTT mmaps */ 347 struct intel_wakeref_auto userfault_wakeref; 348 349 struct mutex error_mutex; 350 struct drm_mm_node error_capture; 351 struct drm_mm_node uc_fw; 352 }; 353 354 struct i915_ppgtt { 355 struct i915_address_space vm; 356 357 struct i915_page_directory *pd; 358 }; 359 360 #define i915_is_ggtt(vm) ((vm)->is_ggtt) 361 362 static inline bool 363 i915_vm_is_4lvl(const struct i915_address_space *vm) 364 { 365 return (vm->total - 1) >> 32; 366 } 367 368 static inline bool 369 i915_vm_has_scratch_64K(struct i915_address_space *vm) 370 { 371 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); 372 } 373 374 static inline bool 375 i915_vm_has_cache_coloring(struct i915_address_space *vm) 376 { 377 return i915_is_ggtt(vm) && vm->mm.color_adjust; 378 } 379 380 static inline struct i915_ggtt * 381 i915_vm_to_ggtt(struct i915_address_space *vm) 382 { 383 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); 384 GEM_BUG_ON(!i915_is_ggtt(vm)); 385 return container_of(vm, struct i915_ggtt, vm); 386 } 387 388 static inline struct i915_ppgtt * 389 i915_vm_to_ppgtt(struct i915_address_space *vm) 390 { 391 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); 392 GEM_BUG_ON(i915_is_ggtt(vm)); 393 return container_of(vm, struct i915_ppgtt, vm); 394 } 395 396 static inline struct i915_address_space * 397 i915_vm_get(struct i915_address_space *vm) 398 { 399 kref_get(&vm->ref); 400 return vm; 401 } 402 403 void i915_vm_release(struct kref *kref); 404 405 static inline void i915_vm_put(struct i915_address_space *vm) 406 { 407 kref_put(&vm->ref, i915_vm_release); 408 } 409 410 static inline struct i915_address_space * 411 i915_vm_open(struct i915_address_space *vm) 412 { 413 GEM_BUG_ON(!atomic_read(&vm->open)); 414 atomic_inc(&vm->open); 415 return i915_vm_get(vm); 416 } 417 418 static inline bool 419 i915_vm_tryopen(struct i915_address_space *vm) 420 { 421 if (atomic_add_unless(&vm->open, 1, 0)) 422 return i915_vm_get(vm); 423 424 return false; 425 } 426 427 void __i915_vm_close(struct i915_address_space *vm); 428 429 static inline void 430 i915_vm_close(struct i915_address_space *vm) 431 { 432 GEM_BUG_ON(!atomic_read(&vm->open)); 433 __i915_vm_close(vm); 434 435 i915_vm_put(vm); 436 } 437 438 void i915_address_space_init(struct i915_address_space *vm, int subclass); 439 void i915_address_space_fini(struct i915_address_space *vm); 440 441 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) 442 { 443 const u32 mask = NUM_PTE(pde_shift) - 1; 444 445 return (address >> PAGE_SHIFT) & mask; 446 } 447 448 /* 449 * Helper to counts the number of PTEs within the given length. This count 450 * does not cross a page table boundary, so the max value would be 451 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. 452 */ 453 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) 454 { 455 const u64 mask = ~((1ULL << pde_shift) - 1); 456 u64 end; 457 458 GEM_BUG_ON(length == 0); 459 GEM_BUG_ON(offset_in_page(addr | length)); 460 461 end = addr + length; 462 463 if ((addr & mask) != (end & mask)) 464 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); 465 466 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); 467 } 468 469 static inline u32 i915_pde_index(u64 addr, u32 shift) 470 { 471 return (addr >> shift) & I915_PDE_MASK; 472 } 473 474 static inline struct i915_page_table * 475 i915_pt_entry(const struct i915_page_directory * const pd, 476 const unsigned short n) 477 { 478 return pd->entry[n]; 479 } 480 481 static inline struct i915_page_directory * 482 i915_pd_entry(const struct i915_page_directory * const pdp, 483 const unsigned short n) 484 { 485 return pdp->entry[n]; 486 } 487 488 static inline dma_addr_t 489 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) 490 { 491 struct i915_page_dma *pt = ppgtt->pd->entry[n]; 492 493 return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); 494 } 495 496 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); 497 498 int i915_ggtt_probe_hw(struct drm_i915_private *i915); 499 int i915_ggtt_init_hw(struct drm_i915_private *i915); 500 int i915_ggtt_enable_hw(struct drm_i915_private *i915); 501 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); 502 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); 503 int i915_init_ggtt(struct drm_i915_private *i915); 504 void i915_ggtt_driver_release(struct drm_i915_private *i915); 505 506 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) 507 { 508 return ggtt->mappable_end > 0; 509 } 510 511 int i915_ppgtt_init_hw(struct intel_gt *gt); 512 513 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); 514 515 void i915_ggtt_suspend(struct i915_ggtt *gtt); 516 void i915_ggtt_resume(struct i915_ggtt *ggtt); 517 518 int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); 519 void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); 520 521 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) 522 523 void 524 fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); 525 526 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) 527 #define fill32_px(px, v) do { \ 528 u64 v__ = lower_32_bits(v); \ 529 fill_px((px), v__ << 32 | v__); \ 530 } while (0) 531 532 int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp); 533 void cleanup_scratch_page(struct i915_address_space *vm); 534 void free_scratch(struct i915_address_space *vm); 535 536 struct i915_page_table *alloc_pt(struct i915_address_space *vm); 537 struct i915_page_directory *alloc_pd(struct i915_address_space *vm); 538 struct i915_page_directory *__alloc_pd(size_t sz); 539 540 void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd); 541 542 #define free_px(vm, px) free_pd(vm, px_base(px)) 543 544 void 545 __set_pd_entry(struct i915_page_directory * const pd, 546 const unsigned short idx, 547 struct i915_page_dma * const to, 548 u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); 549 550 #define set_pd_entry(pd, idx, to) \ 551 __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) 552 553 void 554 clear_pd_entry(struct i915_page_directory * const pd, 555 const unsigned short idx, 556 const struct i915_page_scratch * const scratch); 557 558 bool 559 release_pd_entry(struct i915_page_directory * const pd, 560 const unsigned short idx, 561 struct i915_page_table * const pt, 562 const struct i915_page_scratch * const scratch); 563 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); 564 565 int ggtt_set_pages(struct i915_vma *vma); 566 int ppgtt_set_pages(struct i915_vma *vma); 567 void clear_pages(struct i915_vma *vma); 568 569 void gtt_write_workarounds(struct intel_gt *gt); 570 571 void setup_private_pat(struct intel_uncore *uncore); 572 573 static inline struct sgt_dma { 574 struct scatterlist *sg; 575 dma_addr_t dma, max; 576 } sgt_dma(struct i915_vma *vma) { 577 struct scatterlist *sg = vma->pages->sgl; 578 dma_addr_t addr = sg_dma_address(sg); 579 580 return (struct sgt_dma){ sg, addr, addr + sg->length }; 581 } 582 583 #endif 584