12c86e55dSMatthew Auld /* SPDX-License-Identifier: MIT */ 22c86e55dSMatthew Auld /* 32c86e55dSMatthew Auld * Copyright © 2020 Intel Corporation 42c86e55dSMatthew Auld * 52c86e55dSMatthew Auld * Please try to maintain the following order within this file unless it makes 62c86e55dSMatthew Auld * sense to do otherwise. From top to bottom: 72c86e55dSMatthew Auld * 1. typedefs 82c86e55dSMatthew Auld * 2. #defines, and macros 92c86e55dSMatthew Auld * 3. structure definitions 102c86e55dSMatthew Auld * 4. function prototypes 112c86e55dSMatthew Auld * 122c86e55dSMatthew Auld * Within each section, please try to order by generation in ascending order, 132c86e55dSMatthew Auld * from top to bottom (ie. gen6 on the top, gen8 on the bottom). 142c86e55dSMatthew Auld */ 152c86e55dSMatthew Auld 162c86e55dSMatthew Auld #ifndef __INTEL_GTT_H__ 172c86e55dSMatthew Auld #define __INTEL_GTT_H__ 182c86e55dSMatthew Auld 192c86e55dSMatthew Auld #include <linux/io-mapping.h> 202c86e55dSMatthew Auld #include <linux/kref.h> 212c86e55dSMatthew Auld #include <linux/mm.h> 222c86e55dSMatthew Auld #include <linux/pagevec.h> 232c86e55dSMatthew Auld #include <linux/scatterlist.h> 242c86e55dSMatthew Auld #include <linux/workqueue.h> 252c86e55dSMatthew Auld 262c86e55dSMatthew Auld #include <drm/drm_mm.h> 272c86e55dSMatthew Auld 282c86e55dSMatthew Auld #include "gt/intel_reset.h" 292c86e55dSMatthew Auld #include "i915_selftest.h" 302c86e55dSMatthew Auld #include "i915_vma_types.h" 312c86e55dSMatthew Auld 322c86e55dSMatthew Auld #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 332c86e55dSMatthew Auld 342c86e55dSMatthew Auld #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) 352c86e55dSMatthew Auld #define DBG(...) trace_printk(__VA_ARGS__) 362c86e55dSMatthew Auld #else 372c86e55dSMatthew Auld #define DBG(...) 382c86e55dSMatthew Auld #endif 392c86e55dSMatthew Auld 402c86e55dSMatthew Auld #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */ 412c86e55dSMatthew Auld 422c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) 432c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) 442c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) 452c86e55dSMatthew Auld 462c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K 472c86e55dSMatthew Auld #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M 482c86e55dSMatthew Auld 492c86e55dSMatthew Auld #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE 502c86e55dSMatthew Auld 512c86e55dSMatthew Auld #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE 522c86e55dSMatthew Auld 532c86e55dSMatthew Auld #define I915_FENCE_REG_NONE -1 542c86e55dSMatthew Auld #define I915_MAX_NUM_FENCES 32 552c86e55dSMatthew Auld /* 32 fences + sign bit for FENCE_REG_NONE */ 562c86e55dSMatthew Auld #define I915_MAX_NUM_FENCE_BITS 6 572c86e55dSMatthew Auld 582c86e55dSMatthew Auld typedef u32 gen6_pte_t; 592c86e55dSMatthew Auld typedef u64 gen8_pte_t; 602c86e55dSMatthew Auld 612c86e55dSMatthew Auld #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 622c86e55dSMatthew Auld 632c86e55dSMatthew Auld #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len))) 642c86e55dSMatthew Auld #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) 652c86e55dSMatthew Auld #define I915_PDES 512 662c86e55dSMatthew Auld #define I915_PDE_MASK (I915_PDES - 1) 672c86e55dSMatthew Auld 682c86e55dSMatthew Auld /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ 692c86e55dSMatthew Auld #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) 702c86e55dSMatthew Auld #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 712c86e55dSMatthew Auld #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 722c86e55dSMatthew Auld #define GEN6_PTE_CACHE_LLC (2 << 1) 732c86e55dSMatthew Auld #define GEN6_PTE_UNCACHED (1 << 1) 742c86e55dSMatthew Auld #define GEN6_PTE_VALID REG_BIT(0) 752c86e55dSMatthew Auld 762c86e55dSMatthew Auld #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) 772c86e55dSMatthew Auld #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) 782c86e55dSMatthew Auld #define GEN6_PD_ALIGN (PAGE_SIZE * 16) 792c86e55dSMatthew Auld #define GEN6_PDE_SHIFT 22 802c86e55dSMatthew Auld #define GEN6_PDE_VALID REG_BIT(0) 812c86e55dSMatthew Auld #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) 822c86e55dSMatthew Auld 832c86e55dSMatthew Auld #define GEN7_PTE_CACHE_L3_LLC (3 << 1) 842c86e55dSMatthew Auld 852c86e55dSMatthew Auld #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2) 862c86e55dSMatthew Auld #define BYT_PTE_WRITEABLE REG_BIT(1) 872c86e55dSMatthew Auld 882c86e55dSMatthew Auld /* 892c86e55dSMatthew Auld * Cacheability Control is a 4-bit value. The low three bits are stored in bits 902c86e55dSMatthew Auld * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. 912c86e55dSMatthew Auld */ 922c86e55dSMatthew Auld #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ 932c86e55dSMatthew Auld (((bits) & 0x8) << (11 - 3))) 942c86e55dSMatthew Auld #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 952c86e55dSMatthew Auld #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 962c86e55dSMatthew Auld #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) 972c86e55dSMatthew Auld #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 982c86e55dSMatthew Auld #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) 992c86e55dSMatthew Auld #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 1002c86e55dSMatthew Auld #define HSW_PTE_UNCACHED (0) 1012c86e55dSMatthew Auld #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) 1022c86e55dSMatthew Auld #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) 1032c86e55dSMatthew Auld 1042c86e55dSMatthew Auld /* 1052c86e55dSMatthew Auld * GEN8 32b style address is defined as a 3 level page table: 1062c86e55dSMatthew Auld * 31:30 | 29:21 | 20:12 | 11:0 1072c86e55dSMatthew Auld * PDPE | PDE | PTE | offset 1082c86e55dSMatthew Auld * The difference as compared to normal x86 3 level page table is the PDPEs are 1092c86e55dSMatthew Auld * programmed via register. 1102c86e55dSMatthew Auld * 1112c86e55dSMatthew Auld * GEN8 48b style address is defined as a 4 level page table: 1122c86e55dSMatthew Auld * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 1132c86e55dSMatthew Auld * PML4E | PDPE | PDE | PTE | offset 1142c86e55dSMatthew Auld */ 1152c86e55dSMatthew Auld #define GEN8_3LVL_PDPES 4 1162c86e55dSMatthew Auld 1172c86e55dSMatthew Auld #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) 1182c86e55dSMatthew Auld #define PPAT_CACHED_PDE 0 /* WB LLC */ 1192c86e55dSMatthew Auld #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ 1202c86e55dSMatthew Auld #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ 1212c86e55dSMatthew Auld 1222c86e55dSMatthew Auld #define CHV_PPAT_SNOOP REG_BIT(6) 1232c86e55dSMatthew Auld #define GEN8_PPAT_AGE(x) ((x)<<4) 1242c86e55dSMatthew Auld #define GEN8_PPAT_LLCeLLC (3<<2) 1252c86e55dSMatthew Auld #define GEN8_PPAT_LLCELLC (2<<2) 1262c86e55dSMatthew Auld #define GEN8_PPAT_LLC (1<<2) 1272c86e55dSMatthew Auld #define GEN8_PPAT_WB (3<<0) 1282c86e55dSMatthew Auld #define GEN8_PPAT_WT (2<<0) 1292c86e55dSMatthew Auld #define GEN8_PPAT_WC (1<<0) 1302c86e55dSMatthew Auld #define GEN8_PPAT_UC (0<<0) 1312c86e55dSMatthew Auld #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) 1322c86e55dSMatthew Auld #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) 1332c86e55dSMatthew Auld 1342c86e55dSMatthew Auld #define GEN8_PDE_IPS_64K BIT(11) 1352c86e55dSMatthew Auld #define GEN8_PDE_PS_2M BIT(7) 1362c86e55dSMatthew Auld 1370b6bc81dSChris Wilson struct i915_fence_reg; 1380b6bc81dSChris Wilson 1392c86e55dSMatthew Auld #define for_each_sgt_daddr(__dp, __iter, __sgt) \ 1402c86e55dSMatthew Auld __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) 1412c86e55dSMatthew Auld 1422c86e55dSMatthew Auld struct i915_page_dma { 1432c86e55dSMatthew Auld struct page *page; 1442c86e55dSMatthew Auld union { 1452c86e55dSMatthew Auld dma_addr_t daddr; 1462c86e55dSMatthew Auld 1472c86e55dSMatthew Auld /* 1482c86e55dSMatthew Auld * For gen6/gen7 only. This is the offset in the GGTT 1492c86e55dSMatthew Auld * where the page directory entries for PPGTT begin 1502c86e55dSMatthew Auld */ 1512c86e55dSMatthew Auld u32 ggtt_offset; 1522c86e55dSMatthew Auld }; 1532c86e55dSMatthew Auld }; 1542c86e55dSMatthew Auld 1552c86e55dSMatthew Auld struct i915_page_scratch { 1562c86e55dSMatthew Auld struct i915_page_dma base; 1572c86e55dSMatthew Auld u64 encode; 1582c86e55dSMatthew Auld }; 1592c86e55dSMatthew Auld 1602c86e55dSMatthew Auld struct i915_page_table { 1612c86e55dSMatthew Auld struct i915_page_dma base; 1622c86e55dSMatthew Auld atomic_t used; 1632c86e55dSMatthew Auld }; 1642c86e55dSMatthew Auld 1652c86e55dSMatthew Auld struct i915_page_directory { 1662c86e55dSMatthew Auld struct i915_page_table pt; 1672c86e55dSMatthew Auld spinlock_t lock; 1682c86e55dSMatthew Auld void *entry[512]; 1692c86e55dSMatthew Auld }; 1702c86e55dSMatthew Auld 1712c86e55dSMatthew Auld #define __px_choose_expr(x, type, expr, other) \ 1722c86e55dSMatthew Auld __builtin_choose_expr( \ 1732c86e55dSMatthew Auld __builtin_types_compatible_p(typeof(x), type) || \ 1742c86e55dSMatthew Auld __builtin_types_compatible_p(typeof(x), const type), \ 1752c86e55dSMatthew Auld ({ type __x = (type)(x); expr; }), \ 1762c86e55dSMatthew Auld other) 1772c86e55dSMatthew Auld 1782c86e55dSMatthew Auld #define px_base(px) \ 1792c86e55dSMatthew Auld __px_choose_expr(px, struct i915_page_dma *, __x, \ 1802c86e55dSMatthew Auld __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ 1812c86e55dSMatthew Auld __px_choose_expr(px, struct i915_page_table *, &__x->base, \ 1822c86e55dSMatthew Auld __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ 1832c86e55dSMatthew Auld (void)0)))) 1842c86e55dSMatthew Auld #define px_dma(px) (px_base(px)->daddr) 1852c86e55dSMatthew Auld 1862c86e55dSMatthew Auld #define px_pt(px) \ 1872c86e55dSMatthew Auld __px_choose_expr(px, struct i915_page_table *, __x, \ 1882c86e55dSMatthew Auld __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ 1892c86e55dSMatthew Auld (void)0)) 1902c86e55dSMatthew Auld #define px_used(px) (&px_pt(px)->used) 1912c86e55dSMatthew Auld 1922c86e55dSMatthew Auld enum i915_cache_level; 1932c86e55dSMatthew Auld 1942c86e55dSMatthew Auld struct drm_i915_file_private; 1952c86e55dSMatthew Auld struct drm_i915_gem_object; 1962c86e55dSMatthew Auld struct i915_vma; 1972c86e55dSMatthew Auld struct intel_gt; 1982c86e55dSMatthew Auld 1992c86e55dSMatthew Auld struct i915_vma_ops { 2002c86e55dSMatthew Auld /* Map an object into an address space with the given cache flags. */ 2012c86e55dSMatthew Auld int (*bind_vma)(struct i915_vma *vma, 2022c86e55dSMatthew Auld enum i915_cache_level cache_level, 2032c86e55dSMatthew Auld u32 flags); 2042c86e55dSMatthew Auld /* 2052c86e55dSMatthew Auld * Unmap an object from an address space. This usually consists of 2062c86e55dSMatthew Auld * setting the valid PTE entries to a reserved scratch page. 2072c86e55dSMatthew Auld */ 2082c86e55dSMatthew Auld void (*unbind_vma)(struct i915_vma *vma); 2092c86e55dSMatthew Auld 2102c86e55dSMatthew Auld int (*set_pages)(struct i915_vma *vma); 2112c86e55dSMatthew Auld void (*clear_pages)(struct i915_vma *vma); 2122c86e55dSMatthew Auld }; 2132c86e55dSMatthew Auld 2142c86e55dSMatthew Auld struct pagestash { 2152c86e55dSMatthew Auld spinlock_t lock; 2162c86e55dSMatthew Auld struct pagevec pvec; 2172c86e55dSMatthew Auld }; 2182c86e55dSMatthew Auld 2192c86e55dSMatthew Auld void stash_init(struct pagestash *stash); 2202c86e55dSMatthew Auld 2212c86e55dSMatthew Auld struct i915_address_space { 2222c86e55dSMatthew Auld struct kref ref; 2232c86e55dSMatthew Auld struct rcu_work rcu; 2242c86e55dSMatthew Auld 2252c86e55dSMatthew Auld struct drm_mm mm; 2262c86e55dSMatthew Auld struct intel_gt *gt; 2272c86e55dSMatthew Auld struct drm_i915_private *i915; 2282c86e55dSMatthew Auld struct device *dma; 2292c86e55dSMatthew Auld /* 2302c86e55dSMatthew Auld * Every address space belongs to a struct file - except for the global 2312c86e55dSMatthew Auld * GTT that is owned by the driver (and so @file is set to NULL). In 2322c86e55dSMatthew Auld * principle, no information should leak from one context to another 2332c86e55dSMatthew Auld * (or between files/processes etc) unless explicitly shared by the 2342c86e55dSMatthew Auld * owner. Tracking the owner is important in order to free up per-file 2352c86e55dSMatthew Auld * objects along with the file, to aide resource tracking, and to 2362c86e55dSMatthew Auld * assign blame. 2372c86e55dSMatthew Auld */ 2382c86e55dSMatthew Auld struct drm_i915_file_private *file; 2392c86e55dSMatthew Auld u64 total; /* size addr space maps (ex. 2GB for ggtt) */ 2402c86e55dSMatthew Auld u64 reserved; /* size addr space reserved */ 2412c86e55dSMatthew Auld 2422c86e55dSMatthew Auld unsigned int bind_async_flags; 2432c86e55dSMatthew Auld 2442c86e55dSMatthew Auld /* 2452c86e55dSMatthew Auld * Each active user context has its own address space (in full-ppgtt). 2462c86e55dSMatthew Auld * Since the vm may be shared between multiple contexts, we count how 2472c86e55dSMatthew Auld * many contexts keep us "open". Once open hits zero, we are closed 2482c86e55dSMatthew Auld * and do not allow any new attachments, and proceed to shutdown our 2492c86e55dSMatthew Auld * vma and page directories. 2502c86e55dSMatthew Auld */ 2512c86e55dSMatthew Auld atomic_t open; 2522c86e55dSMatthew Auld 2532c86e55dSMatthew Auld struct mutex mutex; /* protects vma and our lists */ 2542c86e55dSMatthew Auld #define VM_CLASS_GGTT 0 2552c86e55dSMatthew Auld #define VM_CLASS_PPGTT 1 2562c86e55dSMatthew Auld 2572c86e55dSMatthew Auld struct i915_page_scratch scratch[4]; 2582c86e55dSMatthew Auld unsigned int scratch_order; 2592c86e55dSMatthew Auld unsigned int top; 2602c86e55dSMatthew Auld 2612c86e55dSMatthew Auld /** 2622c86e55dSMatthew Auld * List of vma currently bound. 2632c86e55dSMatthew Auld */ 2642c86e55dSMatthew Auld struct list_head bound_list; 2652c86e55dSMatthew Auld 2662c86e55dSMatthew Auld struct pagestash free_pages; 2672c86e55dSMatthew Auld 2682c86e55dSMatthew Auld /* Global GTT */ 2692c86e55dSMatthew Auld bool is_ggtt:1; 2702c86e55dSMatthew Auld 2712c86e55dSMatthew Auld /* Some systems require uncached updates of the page directories */ 2722c86e55dSMatthew Auld bool pt_kmap_wc:1; 2732c86e55dSMatthew Auld 2742c86e55dSMatthew Auld /* Some systems support read-only mappings for GGTT and/or PPGTT */ 2752c86e55dSMatthew Auld bool has_read_only:1; 2762c86e55dSMatthew Auld 2772c86e55dSMatthew Auld u64 (*pte_encode)(dma_addr_t addr, 2782c86e55dSMatthew Auld enum i915_cache_level level, 2792c86e55dSMatthew Auld u32 flags); /* Create a valid PTE */ 2802c86e55dSMatthew Auld #define PTE_READ_ONLY BIT(0) 2812c86e55dSMatthew Auld 2822c86e55dSMatthew Auld int (*allocate_va_range)(struct i915_address_space *vm, 2832c86e55dSMatthew Auld u64 start, u64 length); 2842c86e55dSMatthew Auld void (*clear_range)(struct i915_address_space *vm, 2852c86e55dSMatthew Auld u64 start, u64 length); 2862c86e55dSMatthew Auld void (*insert_page)(struct i915_address_space *vm, 2872c86e55dSMatthew Auld dma_addr_t addr, 2882c86e55dSMatthew Auld u64 offset, 2892c86e55dSMatthew Auld enum i915_cache_level cache_level, 2902c86e55dSMatthew Auld u32 flags); 2912c86e55dSMatthew Auld void (*insert_entries)(struct i915_address_space *vm, 2922c86e55dSMatthew Auld struct i915_vma *vma, 2932c86e55dSMatthew Auld enum i915_cache_level cache_level, 2942c86e55dSMatthew Auld u32 flags); 2952c86e55dSMatthew Auld void (*cleanup)(struct i915_address_space *vm); 2962c86e55dSMatthew Auld 2972c86e55dSMatthew Auld struct i915_vma_ops vma_ops; 2982c86e55dSMatthew Auld 2992c86e55dSMatthew Auld I915_SELFTEST_DECLARE(struct fault_attr fault_attr); 3002c86e55dSMatthew Auld I915_SELFTEST_DECLARE(bool scrub_64K); 3012c86e55dSMatthew Auld }; 3022c86e55dSMatthew Auld 3032c86e55dSMatthew Auld /* 3042c86e55dSMatthew Auld * The Graphics Translation Table is the way in which GEN hardware translates a 3052c86e55dSMatthew Auld * Graphics Virtual Address into a Physical Address. In addition to the normal 3062c86e55dSMatthew Auld * collateral associated with any va->pa translations GEN hardware also has a 3072c86e55dSMatthew Auld * portion of the GTT which can be mapped by the CPU and remain both coherent 3082c86e55dSMatthew Auld * and correct (in cases like swizzling). That region is referred to as GMADR in 3092c86e55dSMatthew Auld * the spec. 3102c86e55dSMatthew Auld */ 3112c86e55dSMatthew Auld struct i915_ggtt { 3122c86e55dSMatthew Auld struct i915_address_space vm; 3132c86e55dSMatthew Auld 3142c86e55dSMatthew Auld struct io_mapping iomap; /* Mapping to our CPU mappable region */ 3152c86e55dSMatthew Auld struct resource gmadr; /* GMADR resource */ 3162c86e55dSMatthew Auld resource_size_t mappable_end; /* End offset that we can CPU map */ 3172c86e55dSMatthew Auld 3182c86e55dSMatthew Auld /** "Graphics Stolen Memory" holds the global PTEs */ 3192c86e55dSMatthew Auld void __iomem *gsm; 3202c86e55dSMatthew Auld void (*invalidate)(struct i915_ggtt *ggtt); 3212c86e55dSMatthew Auld 3222c86e55dSMatthew Auld /** PPGTT used for aliasing the PPGTT with the GTT */ 3232c86e55dSMatthew Auld struct i915_ppgtt *alias; 3242c86e55dSMatthew Auld 3252c86e55dSMatthew Auld bool do_idle_maps; 3262c86e55dSMatthew Auld 3272c86e55dSMatthew Auld int mtrr; 3282c86e55dSMatthew Auld 3292c86e55dSMatthew Auld /** Bit 6 swizzling required for X tiling */ 3302c86e55dSMatthew Auld u32 bit_6_swizzle_x; 3312c86e55dSMatthew Auld /** Bit 6 swizzling required for Y tiling */ 3322c86e55dSMatthew Auld u32 bit_6_swizzle_y; 3332c86e55dSMatthew Auld 3342c86e55dSMatthew Auld u32 pin_bias; 3352c86e55dSMatthew Auld 3362c86e55dSMatthew Auld unsigned int num_fences; 3370b6bc81dSChris Wilson struct i915_fence_reg *fence_regs; 3382c86e55dSMatthew Auld struct list_head fence_list; 3392c86e55dSMatthew Auld 3402c86e55dSMatthew Auld /** 3412c86e55dSMatthew Auld * List of all objects in gtt_space, currently mmaped by userspace. 3422c86e55dSMatthew Auld * All objects within this list must also be on bound_list. 3432c86e55dSMatthew Auld */ 3442c86e55dSMatthew Auld struct list_head userfault_list; 3452c86e55dSMatthew Auld 3462c86e55dSMatthew Auld /* Manual runtime pm autosuspend delay for user GGTT mmaps */ 3472c86e55dSMatthew Auld struct intel_wakeref_auto userfault_wakeref; 3482c86e55dSMatthew Auld 349742379c0SChris Wilson struct mutex error_mutex; 3502c86e55dSMatthew Auld struct drm_mm_node error_capture; 3512c86e55dSMatthew Auld struct drm_mm_node uc_fw; 3522c86e55dSMatthew Auld }; 3532c86e55dSMatthew Auld 3542c86e55dSMatthew Auld struct i915_ppgtt { 3552c86e55dSMatthew Auld struct i915_address_space vm; 3562c86e55dSMatthew Auld 3572c86e55dSMatthew Auld struct i915_page_directory *pd; 3582c86e55dSMatthew Auld }; 3592c86e55dSMatthew Auld 3602c86e55dSMatthew Auld #define i915_is_ggtt(vm) ((vm)->is_ggtt) 3612c86e55dSMatthew Auld 3622c86e55dSMatthew Auld static inline bool 3632c86e55dSMatthew Auld i915_vm_is_4lvl(const struct i915_address_space *vm) 3642c86e55dSMatthew Auld { 3652c86e55dSMatthew Auld return (vm->total - 1) >> 32; 3662c86e55dSMatthew Auld } 3672c86e55dSMatthew Auld 3682c86e55dSMatthew Auld static inline bool 3692c86e55dSMatthew Auld i915_vm_has_scratch_64K(struct i915_address_space *vm) 3702c86e55dSMatthew Auld { 3712c86e55dSMatthew Auld return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); 3722c86e55dSMatthew Auld } 3732c86e55dSMatthew Auld 3742c86e55dSMatthew Auld static inline bool 3752c86e55dSMatthew Auld i915_vm_has_cache_coloring(struct i915_address_space *vm) 3762c86e55dSMatthew Auld { 3772c86e55dSMatthew Auld return i915_is_ggtt(vm) && vm->mm.color_adjust; 3782c86e55dSMatthew Auld } 3792c86e55dSMatthew Auld 3802c86e55dSMatthew Auld static inline struct i915_ggtt * 3812c86e55dSMatthew Auld i915_vm_to_ggtt(struct i915_address_space *vm) 3822c86e55dSMatthew Auld { 3832c86e55dSMatthew Auld BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); 3842c86e55dSMatthew Auld GEM_BUG_ON(!i915_is_ggtt(vm)); 3852c86e55dSMatthew Auld return container_of(vm, struct i915_ggtt, vm); 3862c86e55dSMatthew Auld } 3872c86e55dSMatthew Auld 3882c86e55dSMatthew Auld static inline struct i915_ppgtt * 3892c86e55dSMatthew Auld i915_vm_to_ppgtt(struct i915_address_space *vm) 3902c86e55dSMatthew Auld { 3912c86e55dSMatthew Auld BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); 3922c86e55dSMatthew Auld GEM_BUG_ON(i915_is_ggtt(vm)); 3932c86e55dSMatthew Auld return container_of(vm, struct i915_ppgtt, vm); 3942c86e55dSMatthew Auld } 3952c86e55dSMatthew Auld 3962c86e55dSMatthew Auld static inline struct i915_address_space * 3972c86e55dSMatthew Auld i915_vm_get(struct i915_address_space *vm) 3982c86e55dSMatthew Auld { 3992c86e55dSMatthew Auld kref_get(&vm->ref); 4002c86e55dSMatthew Auld return vm; 4012c86e55dSMatthew Auld } 4022c86e55dSMatthew Auld 4032c86e55dSMatthew Auld void i915_vm_release(struct kref *kref); 4042c86e55dSMatthew Auld 4052c86e55dSMatthew Auld static inline void i915_vm_put(struct i915_address_space *vm) 4062c86e55dSMatthew Auld { 4072c86e55dSMatthew Auld kref_put(&vm->ref, i915_vm_release); 4082c86e55dSMatthew Auld } 4092c86e55dSMatthew Auld 4102c86e55dSMatthew Auld static inline struct i915_address_space * 4112c86e55dSMatthew Auld i915_vm_open(struct i915_address_space *vm) 4122c86e55dSMatthew Auld { 4132c86e55dSMatthew Auld GEM_BUG_ON(!atomic_read(&vm->open)); 4142c86e55dSMatthew Auld atomic_inc(&vm->open); 4152c86e55dSMatthew Auld return i915_vm_get(vm); 4162c86e55dSMatthew Auld } 4172c86e55dSMatthew Auld 4182c86e55dSMatthew Auld static inline bool 4192c86e55dSMatthew Auld i915_vm_tryopen(struct i915_address_space *vm) 4202c86e55dSMatthew Auld { 4212c86e55dSMatthew Auld if (atomic_add_unless(&vm->open, 1, 0)) 4222c86e55dSMatthew Auld return i915_vm_get(vm); 4232c86e55dSMatthew Auld 4242c86e55dSMatthew Auld return false; 4252c86e55dSMatthew Auld } 4262c86e55dSMatthew Auld 4272c86e55dSMatthew Auld void __i915_vm_close(struct i915_address_space *vm); 4282c86e55dSMatthew Auld 4292c86e55dSMatthew Auld static inline void 4302c86e55dSMatthew Auld i915_vm_close(struct i915_address_space *vm) 4312c86e55dSMatthew Auld { 4322c86e55dSMatthew Auld GEM_BUG_ON(!atomic_read(&vm->open)); 4332c86e55dSMatthew Auld __i915_vm_close(vm); 4342c86e55dSMatthew Auld 4352c86e55dSMatthew Auld i915_vm_put(vm); 4362c86e55dSMatthew Auld } 4372c86e55dSMatthew Auld 4382c86e55dSMatthew Auld void i915_address_space_init(struct i915_address_space *vm, int subclass); 4392c86e55dSMatthew Auld void i915_address_space_fini(struct i915_address_space *vm); 4402c86e55dSMatthew Auld 4412c86e55dSMatthew Auld static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) 4422c86e55dSMatthew Auld { 4432c86e55dSMatthew Auld const u32 mask = NUM_PTE(pde_shift) - 1; 4442c86e55dSMatthew Auld 4452c86e55dSMatthew Auld return (address >> PAGE_SHIFT) & mask; 4462c86e55dSMatthew Auld } 4472c86e55dSMatthew Auld 4482c86e55dSMatthew Auld /* 4492c86e55dSMatthew Auld * Helper to counts the number of PTEs within the given length. This count 4502c86e55dSMatthew Auld * does not cross a page table boundary, so the max value would be 4512c86e55dSMatthew Auld * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. 4522c86e55dSMatthew Auld */ 4532c86e55dSMatthew Auld static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) 4542c86e55dSMatthew Auld { 4552c86e55dSMatthew Auld const u64 mask = ~((1ULL << pde_shift) - 1); 4562c86e55dSMatthew Auld u64 end; 4572c86e55dSMatthew Auld 4582c86e55dSMatthew Auld GEM_BUG_ON(length == 0); 4592c86e55dSMatthew Auld GEM_BUG_ON(offset_in_page(addr | length)); 4602c86e55dSMatthew Auld 4612c86e55dSMatthew Auld end = addr + length; 4622c86e55dSMatthew Auld 4632c86e55dSMatthew Auld if ((addr & mask) != (end & mask)) 4642c86e55dSMatthew Auld return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); 4652c86e55dSMatthew Auld 4662c86e55dSMatthew Auld return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); 4672c86e55dSMatthew Auld } 4682c86e55dSMatthew Auld 4692c86e55dSMatthew Auld static inline u32 i915_pde_index(u64 addr, u32 shift) 4702c86e55dSMatthew Auld { 4712c86e55dSMatthew Auld return (addr >> shift) & I915_PDE_MASK; 4722c86e55dSMatthew Auld } 4732c86e55dSMatthew Auld 4742c86e55dSMatthew Auld static inline struct i915_page_table * 4752c86e55dSMatthew Auld i915_pt_entry(const struct i915_page_directory * const pd, 4762c86e55dSMatthew Auld const unsigned short n) 4772c86e55dSMatthew Auld { 4782c86e55dSMatthew Auld return pd->entry[n]; 4792c86e55dSMatthew Auld } 4802c86e55dSMatthew Auld 4812c86e55dSMatthew Auld static inline struct i915_page_directory * 4822c86e55dSMatthew Auld i915_pd_entry(const struct i915_page_directory * const pdp, 4832c86e55dSMatthew Auld const unsigned short n) 4842c86e55dSMatthew Auld { 4852c86e55dSMatthew Auld return pdp->entry[n]; 4862c86e55dSMatthew Auld } 4872c86e55dSMatthew Auld 4882c86e55dSMatthew Auld static inline dma_addr_t 4892c86e55dSMatthew Auld i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) 4902c86e55dSMatthew Auld { 4912c86e55dSMatthew Auld struct i915_page_dma *pt = ppgtt->pd->entry[n]; 4922c86e55dSMatthew Auld 4932c86e55dSMatthew Auld return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); 4942c86e55dSMatthew Auld } 4952c86e55dSMatthew Auld 4962c86e55dSMatthew Auld void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); 4972c86e55dSMatthew Auld 4982c86e55dSMatthew Auld int i915_ggtt_probe_hw(struct drm_i915_private *i915); 4992c86e55dSMatthew Auld int i915_ggtt_init_hw(struct drm_i915_private *i915); 5002c86e55dSMatthew Auld int i915_ggtt_enable_hw(struct drm_i915_private *i915); 5012c86e55dSMatthew Auld void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); 5022c86e55dSMatthew Auld void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); 5032c86e55dSMatthew Auld int i915_init_ggtt(struct drm_i915_private *i915); 5042c86e55dSMatthew Auld void i915_ggtt_driver_release(struct drm_i915_private *i915); 5052c86e55dSMatthew Auld 5062c86e55dSMatthew Auld static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) 5072c86e55dSMatthew Auld { 5082c86e55dSMatthew Auld return ggtt->mappable_end > 0; 5092c86e55dSMatthew Auld } 5102c86e55dSMatthew Auld 5112c86e55dSMatthew Auld int i915_ppgtt_init_hw(struct intel_gt *gt); 5122c86e55dSMatthew Auld 5132c86e55dSMatthew Auld struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); 5142c86e55dSMatthew Auld 515e986209cSChris Wilson void i915_ggtt_suspend(struct i915_ggtt *gtt); 516e986209cSChris Wilson void i915_ggtt_resume(struct i915_ggtt *ggtt); 5172c86e55dSMatthew Auld 5182c86e55dSMatthew Auld int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); 5192c86e55dSMatthew Auld void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); 5202c86e55dSMatthew Auld 5212c86e55dSMatthew Auld #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) 5222c86e55dSMatthew Auld 5232c86e55dSMatthew Auld void 5242c86e55dSMatthew Auld fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); 5252c86e55dSMatthew Auld 5262c86e55dSMatthew Auld #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) 5272c86e55dSMatthew Auld #define fill32_px(px, v) do { \ 5282c86e55dSMatthew Auld u64 v__ = lower_32_bits(v); \ 5292c86e55dSMatthew Auld fill_px((px), v__ << 32 | v__); \ 5302c86e55dSMatthew Auld } while (0) 5312c86e55dSMatthew Auld 5322c86e55dSMatthew Auld int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp); 5332c86e55dSMatthew Auld void cleanup_scratch_page(struct i915_address_space *vm); 5342c86e55dSMatthew Auld void free_scratch(struct i915_address_space *vm); 5352c86e55dSMatthew Auld 5362c86e55dSMatthew Auld struct i915_page_table *alloc_pt(struct i915_address_space *vm); 5372c86e55dSMatthew Auld struct i915_page_directory *alloc_pd(struct i915_address_space *vm); 5382c86e55dSMatthew Auld struct i915_page_directory *__alloc_pd(size_t sz); 5392c86e55dSMatthew Auld 5402c86e55dSMatthew Auld void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd); 5412c86e55dSMatthew Auld 5422c86e55dSMatthew Auld #define free_px(vm, px) free_pd(vm, px_base(px)) 5432c86e55dSMatthew Auld 5442c86e55dSMatthew Auld void 5452c86e55dSMatthew Auld __set_pd_entry(struct i915_page_directory * const pd, 5462c86e55dSMatthew Auld const unsigned short idx, 5472c86e55dSMatthew Auld struct i915_page_dma * const to, 5482c86e55dSMatthew Auld u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); 5492c86e55dSMatthew Auld 5502c86e55dSMatthew Auld #define set_pd_entry(pd, idx, to) \ 5512c86e55dSMatthew Auld __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) 5522c86e55dSMatthew Auld 5532c86e55dSMatthew Auld void 5542c86e55dSMatthew Auld clear_pd_entry(struct i915_page_directory * const pd, 5552c86e55dSMatthew Auld const unsigned short idx, 5562c86e55dSMatthew Auld const struct i915_page_scratch * const scratch); 5572c86e55dSMatthew Auld 5582c86e55dSMatthew Auld bool 5592c86e55dSMatthew Auld release_pd_entry(struct i915_page_directory * const pd, 5602c86e55dSMatthew Auld const unsigned short idx, 5612c86e55dSMatthew Auld struct i915_page_table * const pt, 5622c86e55dSMatthew Auld const struct i915_page_scratch * const scratch); 5632c86e55dSMatthew Auld void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); 5642c86e55dSMatthew Auld 5652c86e55dSMatthew Auld int ggtt_set_pages(struct i915_vma *vma); 5662c86e55dSMatthew Auld int ppgtt_set_pages(struct i915_vma *vma); 5672c86e55dSMatthew Auld void clear_pages(struct i915_vma *vma); 5682c86e55dSMatthew Auld 5692c86e55dSMatthew Auld void gtt_write_workarounds(struct intel_gt *gt); 5702c86e55dSMatthew Auld 5712c86e55dSMatthew Auld void setup_private_pat(struct intel_uncore *uncore); 5722c86e55dSMatthew Auld 5732c86e55dSMatthew Auld static inline struct sgt_dma { 5742c86e55dSMatthew Auld struct scatterlist *sg; 5752c86e55dSMatthew Auld dma_addr_t dma, max; 5762c86e55dSMatthew Auld } sgt_dma(struct i915_vma *vma) { 5772c86e55dSMatthew Auld struct scatterlist *sg = vma->pages->sgl; 5782c86e55dSMatthew Auld dma_addr_t addr = sg_dma_address(sg); 5792c86e55dSMatthew Auld 5802c86e55dSMatthew Auld return (struct sgt_dma){ sg, addr, addr + sg->length }; 5812c86e55dSMatthew Auld } 5822c86e55dSMatthew Auld 5832c86e55dSMatthew Auld #endif 584