xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision 0f857158)
12c86e55dSMatthew Auld /* SPDX-License-Identifier: MIT */
22c86e55dSMatthew Auld /*
32c86e55dSMatthew Auld  * Copyright © 2020 Intel Corporation
42c86e55dSMatthew Auld  *
52c86e55dSMatthew Auld  * Please try to maintain the following order within this file unless it makes
62c86e55dSMatthew Auld  * sense to do otherwise. From top to bottom:
72c86e55dSMatthew Auld  * 1. typedefs
82c86e55dSMatthew Auld  * 2. #defines, and macros
92c86e55dSMatthew Auld  * 3. structure definitions
102c86e55dSMatthew Auld  * 4. function prototypes
112c86e55dSMatthew Auld  *
122c86e55dSMatthew Auld  * Within each section, please try to order by generation in ascending order,
132c86e55dSMatthew Auld  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
142c86e55dSMatthew Auld  */
152c86e55dSMatthew Auld 
162c86e55dSMatthew Auld #ifndef __INTEL_GTT_H__
172c86e55dSMatthew Auld #define __INTEL_GTT_H__
182c86e55dSMatthew Auld 
192c86e55dSMatthew Auld #include <linux/io-mapping.h>
202c86e55dSMatthew Auld #include <linux/kref.h>
212c86e55dSMatthew Auld #include <linux/mm.h>
222c86e55dSMatthew Auld #include <linux/pagevec.h>
232c86e55dSMatthew Auld #include <linux/scatterlist.h>
242c86e55dSMatthew Auld #include <linux/workqueue.h>
252c86e55dSMatthew Auld 
262c86e55dSMatthew Auld #include <drm/drm_mm.h>
272c86e55dSMatthew Auld 
282c86e55dSMatthew Auld #include "gt/intel_reset.h"
292c86e55dSMatthew Auld #include "i915_selftest.h"
3039a2bd34SThomas Hellström #include "i915_vma_resource.h"
312c86e55dSMatthew Auld #include "i915_vma_types.h"
3287bd701eSMatthew Auld #include "i915_params.h"
3387bd701eSMatthew Auld #include "intel_memory_region.h"
342c86e55dSMatthew Auld 
352c86e55dSMatthew Auld #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
362c86e55dSMatthew Auld 
372c86e55dSMatthew Auld #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
382c86e55dSMatthew Auld #define DBG(...) trace_printk(__VA_ARGS__)
392c86e55dSMatthew Auld #else
402c86e55dSMatthew Auld #define DBG(...)
412c86e55dSMatthew Auld #endif
422c86e55dSMatthew Auld 
432c86e55dSMatthew Auld #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
442c86e55dSMatthew Auld 
452c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
462c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
472c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
482c86e55dSMatthew Auld 
492c86e55dSMatthew Auld #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
502c86e55dSMatthew Auld #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
512c86e55dSMatthew Auld 
522c86e55dSMatthew Auld #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
532c86e55dSMatthew Auld 
542c86e55dSMatthew Auld #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
552c86e55dSMatthew Auld 
562c86e55dSMatthew Auld #define I915_FENCE_REG_NONE -1
572c86e55dSMatthew Auld #define I915_MAX_NUM_FENCES 32
582c86e55dSMatthew Auld /* 32 fences + sign bit for FENCE_REG_NONE */
592c86e55dSMatthew Auld #define I915_MAX_NUM_FENCE_BITS 6
602c86e55dSMatthew Auld 
612c86e55dSMatthew Auld typedef u32 gen6_pte_t;
622c86e55dSMatthew Auld typedef u64 gen8_pte_t;
632c86e55dSMatthew Auld 
642c86e55dSMatthew Auld #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
652c86e55dSMatthew Auld 
662c86e55dSMatthew Auld #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
672c86e55dSMatthew Auld #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
682c86e55dSMatthew Auld #define I915_PDES			512
692c86e55dSMatthew Auld #define I915_PDE_MASK			(I915_PDES - 1)
702c86e55dSMatthew Auld 
712c86e55dSMatthew Auld /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
722c86e55dSMatthew Auld #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
732c86e55dSMatthew Auld #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
742c86e55dSMatthew Auld #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
752c86e55dSMatthew Auld #define GEN6_PTE_CACHE_LLC		(2 << 1)
762c86e55dSMatthew Auld #define GEN6_PTE_UNCACHED		(1 << 1)
772c86e55dSMatthew Auld #define GEN6_PTE_VALID			REG_BIT(0)
782c86e55dSMatthew Auld 
792c86e55dSMatthew Auld #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
802c86e55dSMatthew Auld #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
812c86e55dSMatthew Auld #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
822c86e55dSMatthew Auld #define GEN6_PDE_SHIFT			22
832c86e55dSMatthew Auld #define GEN6_PDE_VALID			REG_BIT(0)
842c86e55dSMatthew Auld #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
852c86e55dSMatthew Auld 
862c86e55dSMatthew Auld #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
872c86e55dSMatthew Auld 
882c86e55dSMatthew Auld #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
892c86e55dSMatthew Auld #define BYT_PTE_WRITEABLE		REG_BIT(1)
902c86e55dSMatthew Auld 
9111724eeaSMatthew Auld #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
92e762bdf5SMatthew Auld 
93e762bdf5SMatthew Auld #define GEN12_GGTT_PTE_LM	BIT_ULL(1)
9411724eeaSMatthew Auld 
955189e312SMatthew Auld #define GEN12_PDE_64K BIT(6)
968133a6daSMatthew Auld #define GEN12_PTE_PS64 BIT(8)
975189e312SMatthew Auld 
982c86e55dSMatthew Auld /*
992c86e55dSMatthew Auld  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
1002c86e55dSMatthew Auld  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
1012c86e55dSMatthew Auld  */
1022c86e55dSMatthew Auld #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
1032c86e55dSMatthew Auld 					 (((bits) & 0x8) << (11 - 3)))
1042c86e55dSMatthew Auld #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
1052c86e55dSMatthew Auld #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
1062c86e55dSMatthew Auld #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
1072c86e55dSMatthew Auld #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
1082c86e55dSMatthew Auld #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
1092c86e55dSMatthew Auld #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
1102c86e55dSMatthew Auld #define HSW_PTE_UNCACHED		(0)
1112c86e55dSMatthew Auld #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
1122c86e55dSMatthew Auld #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
1132c86e55dSMatthew Auld 
1142c86e55dSMatthew Auld /*
1152c86e55dSMatthew Auld  * GEN8 32b style address is defined as a 3 level page table:
1162c86e55dSMatthew Auld  * 31:30 | 29:21 | 20:12 |  11:0
1172c86e55dSMatthew Auld  * PDPE  |  PDE  |  PTE  | offset
1182c86e55dSMatthew Auld  * The difference as compared to normal x86 3 level page table is the PDPEs are
1192c86e55dSMatthew Auld  * programmed via register.
1202c86e55dSMatthew Auld  *
1212c86e55dSMatthew Auld  * GEN8 48b style address is defined as a 4 level page table:
1222c86e55dSMatthew Auld  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
1232c86e55dSMatthew Auld  * PML4E | PDPE  |  PDE  |  PTE  | offset
1242c86e55dSMatthew Auld  */
1252c86e55dSMatthew Auld #define GEN8_3LVL_PDPES			4
1262c86e55dSMatthew Auld 
1272c86e55dSMatthew Auld #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
1282c86e55dSMatthew Auld #define PPAT_CACHED_PDE			0 /* WB LLC */
1292c86e55dSMatthew Auld #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
1302c86e55dSMatthew Auld #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
1312c86e55dSMatthew Auld 
1322c86e55dSMatthew Auld #define CHV_PPAT_SNOOP			REG_BIT(6)
1332c86e55dSMatthew Auld #define GEN8_PPAT_AGE(x)		((x)<<4)
1342c86e55dSMatthew Auld #define GEN8_PPAT_LLCeLLC		(3<<2)
1352c86e55dSMatthew Auld #define GEN8_PPAT_LLCELLC		(2<<2)
1362c86e55dSMatthew Auld #define GEN8_PPAT_LLC			(1<<2)
1372c86e55dSMatthew Auld #define GEN8_PPAT_WB			(3<<0)
1382c86e55dSMatthew Auld #define GEN8_PPAT_WT			(2<<0)
1392c86e55dSMatthew Auld #define GEN8_PPAT_WC			(1<<0)
1402c86e55dSMatthew Auld #define GEN8_PPAT_UC			(0<<0)
1412c86e55dSMatthew Auld #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
1422c86e55dSMatthew Auld #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
1432c86e55dSMatthew Auld 
1445f978167SMichael Cheng #define GEN8_PAGE_PRESENT		BIT_ULL(0)
1455f978167SMichael Cheng #define GEN8_PAGE_RW			BIT_ULL(1)
1465f978167SMichael Cheng 
1472c86e55dSMatthew Auld #define GEN8_PDE_IPS_64K BIT(11)
1482c86e55dSMatthew Auld #define GEN8_PDE_PS_2M   BIT(7)
1492c86e55dSMatthew Auld 
15089351925SChris Wilson enum i915_cache_level;
15189351925SChris Wilson 
15289351925SChris Wilson struct drm_i915_gem_object;
1530b6bc81dSChris Wilson struct i915_fence_reg;
15489351925SChris Wilson struct i915_vma;
15589351925SChris Wilson struct intel_gt;
1560b6bc81dSChris Wilson 
1572c86e55dSMatthew Auld #define for_each_sgt_daddr(__dp, __iter, __sgt) \
1582c86e55dSMatthew Auld 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
1592c86e55dSMatthew Auld 
1602c86e55dSMatthew Auld struct i915_page_table {
16189351925SChris Wilson 	struct drm_i915_gem_object *base;
162cd0452aaSChris Wilson 	union {
1632c86e55dSMatthew Auld 		atomic_t used;
164cd0452aaSChris Wilson 		struct i915_page_table *stash;
165cd0452aaSChris Wilson 	};
1665189e312SMatthew Auld 	bool is_compact;
1672c86e55dSMatthew Auld };
1682c86e55dSMatthew Auld 
1692c86e55dSMatthew Auld struct i915_page_directory {
1702c86e55dSMatthew Auld 	struct i915_page_table pt;
1712c86e55dSMatthew Auld 	spinlock_t lock;
17282adf901SChris Wilson 	void **entry;
1732c86e55dSMatthew Auld };
1742c86e55dSMatthew Auld 
1752c86e55dSMatthew Auld #define __px_choose_expr(x, type, expr, other) \
1762c86e55dSMatthew Auld 	__builtin_choose_expr( \
1772c86e55dSMatthew Auld 	__builtin_types_compatible_p(typeof(x), type) || \
1782c86e55dSMatthew Auld 	__builtin_types_compatible_p(typeof(x), const type), \
1792c86e55dSMatthew Auld 	({ type __x = (type)(x); expr; }), \
1802c86e55dSMatthew Auld 	other)
1812c86e55dSMatthew Auld 
1822c86e55dSMatthew Auld #define px_base(px) \
18389351925SChris Wilson 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
18489351925SChris Wilson 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
18589351925SChris Wilson 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
18689351925SChris Wilson 	(void)0)))
18789351925SChris Wilson 
18889351925SChris Wilson struct page *__px_page(struct drm_i915_gem_object *p);
18989351925SChris Wilson dma_addr_t __px_dma(struct drm_i915_gem_object *p);
19089351925SChris Wilson #define px_dma(px) (__px_dma(px_base(px)))
1912c86e55dSMatthew Auld 
192529b9ec8SMatthew Auld void *__px_vaddr(struct drm_i915_gem_object *p);
193529b9ec8SMatthew Auld #define px_vaddr(px) (__px_vaddr(px_base(px)))
194529b9ec8SMatthew Auld 
1952c86e55dSMatthew Auld #define px_pt(px) \
1962c86e55dSMatthew Auld 	__px_choose_expr(px, struct i915_page_table *, __x, \
1972c86e55dSMatthew Auld 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
1982c86e55dSMatthew Auld 	(void)0))
1992c86e55dSMatthew Auld #define px_used(px) (&px_pt(px)->used)
2002c86e55dSMatthew Auld 
201cd0452aaSChris Wilson struct i915_vm_pt_stash {
202cd0452aaSChris Wilson 	/* preallocated chains of page tables/directories */
203cd0452aaSChris Wilson 	struct i915_page_table *pt[2];
2042cff4b9eSMatthew Auld 	/*
2052cff4b9eSMatthew Auld 	 * Optionally override the alignment/size of the physical page that
2062cff4b9eSMatthew Auld 	 * contains each PT. If not set defaults back to the usual
2072cff4b9eSMatthew Auld 	 * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging
2082cff4b9eSMatthew Auld 	 * structures. MUST be a power-of-two. ONLY applicable on discrete
2092cff4b9eSMatthew Auld 	 * platforms.
2102cff4b9eSMatthew Auld 	 */
2112cff4b9eSMatthew Auld 	int pt_sz;
212cd0452aaSChris Wilson };
213cd0452aaSChris Wilson 
2142c86e55dSMatthew Auld struct i915_vma_ops {
2152c86e55dSMatthew Auld 	/* Map an object into an address space with the given cache flags. */
216cd0452aaSChris Wilson 	void (*bind_vma)(struct i915_address_space *vm,
217cd0452aaSChris Wilson 			 struct i915_vm_pt_stash *stash,
21839a2bd34SThomas Hellström 			 struct i915_vma_resource *vma_res,
2192c86e55dSMatthew Auld 			 enum i915_cache_level cache_level,
2202c86e55dSMatthew Auld 			 u32 flags);
2212c86e55dSMatthew Auld 	/*
2222c86e55dSMatthew Auld 	 * Unmap an object from an address space. This usually consists of
2232c86e55dSMatthew Auld 	 * setting the valid PTE entries to a reserved scratch page.
2242c86e55dSMatthew Auld 	 */
22512b07256SChris Wilson 	void (*unbind_vma)(struct i915_address_space *vm,
22639a2bd34SThomas Hellström 			   struct i915_vma_resource *vma_res);
22739a2bd34SThomas Hellström 
2282c86e55dSMatthew Auld };
2292c86e55dSMatthew Auld 
2302c86e55dSMatthew Auld struct i915_address_space {
2312c86e55dSMatthew Auld 	struct kref ref;
232dcc5d820SDaniel Vetter 	struct work_struct release_work;
2332c86e55dSMatthew Auld 
2342c86e55dSMatthew Auld 	struct drm_mm mm;
2352c86e55dSMatthew Auld 	struct intel_gt *gt;
2362c86e55dSMatthew Auld 	struct drm_i915_private *i915;
2372c86e55dSMatthew Auld 	struct device *dma;
2382c86e55dSMatthew Auld 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
2392c86e55dSMatthew Auld 	u64 reserved;		/* size addr space reserved */
24087bd701eSMatthew Auld 	u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
2412c86e55dSMatthew Auld 
2422c86e55dSMatthew Auld 	unsigned int bind_async_flags;
2432c86e55dSMatthew Auld 
2442c86e55dSMatthew Auld 	struct mutex mutex; /* protects vma and our lists */
2454d8151aeSThomas Hellström 
2464d8151aeSThomas Hellström 	struct kref resv_ref; /* kref to keep the reservation lock alive. */
2474d8151aeSThomas Hellström 	struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
2482c86e55dSMatthew Auld #define VM_CLASS_GGTT 0
2492c86e55dSMatthew Auld #define VM_CLASS_PPGTT 1
25033e7a975SVille Syrjälä #define VM_CLASS_DPT 2
2512c86e55dSMatthew Auld 
25289351925SChris Wilson 	struct drm_i915_gem_object *scratch[4];
2532c86e55dSMatthew Auld 	/**
2542c86e55dSMatthew Auld 	 * List of vma currently bound.
2552c86e55dSMatthew Auld 	 */
2562c86e55dSMatthew Auld 	struct list_head bound_list;
2572c86e55dSMatthew Auld 
258e1a7ab4fSThomas Hellström 	/**
259e1a7ab4fSThomas Hellström 	 * List of vmas not yet bound or evicted.
260e1a7ab4fSThomas Hellström 	 */
261e1a7ab4fSThomas Hellström 	struct list_head unbound_list;
262e1a7ab4fSThomas Hellström 
2632c86e55dSMatthew Auld 	/* Global GTT */
2642c86e55dSMatthew Auld 	bool is_ggtt:1;
2652c86e55dSMatthew Auld 
26633e7a975SVille Syrjälä 	/* Display page table */
26733e7a975SVille Syrjälä 	bool is_dpt:1;
26833e7a975SVille Syrjälä 
2692c86e55dSMatthew Auld 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
2702c86e55dSMatthew Auld 	bool has_read_only:1;
2712c86e55dSMatthew Auld 
272e1a7ab4fSThomas Hellström 	/* Skip pte rewrite on unbind for suspend. Protected by @mutex */
273e1a7ab4fSThomas Hellström 	bool skip_pte_rewrite:1;
274e1a7ab4fSThomas Hellström 
275cd0452aaSChris Wilson 	u8 top;
276cd0452aaSChris Wilson 	u8 pd_shift;
277cd0452aaSChris Wilson 	u8 scratch_order;
278cd0452aaSChris Wilson 
279a259cc14SThomas Hellström 	/* Flags used when creating page-table objects for this vm */
280a259cc14SThomas Hellström 	unsigned long lmem_pt_obj_flags;
281a259cc14SThomas Hellström 
2822f6b90daSThomas Hellström 	/* Interval tree for pending unbind vma resources */
2832f6b90daSThomas Hellström 	struct rb_root_cached pending_unbind;
2842f6b90daSThomas Hellström 
28589351925SChris Wilson 	struct drm_i915_gem_object *
28689351925SChris Wilson 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
287fef53be0SMatthew Auld 	struct drm_i915_gem_object *
288fef53be0SMatthew Auld 		(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
28989351925SChris Wilson 
2902c86e55dSMatthew Auld 	u64 (*pte_encode)(dma_addr_t addr,
2912c86e55dSMatthew Auld 			  enum i915_cache_level level,
2922c86e55dSMatthew Auld 			  u32 flags); /* Create a valid PTE */
2932c86e55dSMatthew Auld #define PTE_READ_ONLY	BIT(0)
29411724eeaSMatthew Auld #define PTE_LM		BIT(1)
2952c86e55dSMatthew Auld 
296cd0452aaSChris Wilson 	void (*allocate_va_range)(struct i915_address_space *vm,
297cd0452aaSChris Wilson 				  struct i915_vm_pt_stash *stash,
2982c86e55dSMatthew Auld 				  u64 start, u64 length);
2992c86e55dSMatthew Auld 	void (*clear_range)(struct i915_address_space *vm,
3002c86e55dSMatthew Auld 			    u64 start, u64 length);
3012c86e55dSMatthew Auld 	void (*insert_page)(struct i915_address_space *vm,
3022c86e55dSMatthew Auld 			    dma_addr_t addr,
3032c86e55dSMatthew Auld 			    u64 offset,
3042c86e55dSMatthew Auld 			    enum i915_cache_level cache_level,
3052c86e55dSMatthew Auld 			    u32 flags);
3062c86e55dSMatthew Auld 	void (*insert_entries)(struct i915_address_space *vm,
30739a2bd34SThomas Hellström 			       struct i915_vma_resource *vma_res,
3082c86e55dSMatthew Auld 			       enum i915_cache_level cache_level,
3092c86e55dSMatthew Auld 			       u32 flags);
310a0696856SNirmoy Das 	void (*raw_insert_page)(struct i915_address_space *vm,
311a0696856SNirmoy Das 				dma_addr_t addr,
312a0696856SNirmoy Das 				u64 offset,
313a0696856SNirmoy Das 				enum i915_cache_level cache_level,
314a0696856SNirmoy Das 				u32 flags);
315a0696856SNirmoy Das 	void (*raw_insert_entries)(struct i915_address_space *vm,
316a0696856SNirmoy Das 				   struct i915_vma_resource *vma_res,
317a0696856SNirmoy Das 				   enum i915_cache_level cache_level,
318a0696856SNirmoy Das 				   u32 flags);
3192c86e55dSMatthew Auld 	void (*cleanup)(struct i915_address_space *vm);
3202c86e55dSMatthew Auld 
3213607e1e9SChris Wilson 	void (*foreach)(struct i915_address_space *vm,
3223607e1e9SChris Wilson 			u64 start, u64 length,
3233607e1e9SChris Wilson 			void (*fn)(struct i915_address_space *vm,
3243607e1e9SChris Wilson 				   struct i915_page_table *pt,
3253607e1e9SChris Wilson 				   void *data),
3263607e1e9SChris Wilson 			void *data);
3273607e1e9SChris Wilson 
3282c86e55dSMatthew Auld 	struct i915_vma_ops vma_ops;
3292c86e55dSMatthew Auld 
3302c86e55dSMatthew Auld 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
3312c86e55dSMatthew Auld 	I915_SELFTEST_DECLARE(bool scrub_64K);
3322c86e55dSMatthew Auld };
3332c86e55dSMatthew Auld 
3342c86e55dSMatthew Auld /*
3352c86e55dSMatthew Auld  * The Graphics Translation Table is the way in which GEN hardware translates a
3362c86e55dSMatthew Auld  * Graphics Virtual Address into a Physical Address. In addition to the normal
3372c86e55dSMatthew Auld  * collateral associated with any va->pa translations GEN hardware also has a
3382c86e55dSMatthew Auld  * portion of the GTT which can be mapped by the CPU and remain both coherent
3392c86e55dSMatthew Auld  * and correct (in cases like swizzling). That region is referred to as GMADR in
3402c86e55dSMatthew Auld  * the spec.
3412c86e55dSMatthew Auld  */
3422c86e55dSMatthew Auld struct i915_ggtt {
3432c86e55dSMatthew Auld 	struct i915_address_space vm;
3442c86e55dSMatthew Auld 
3452c86e55dSMatthew Auld 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
3462c86e55dSMatthew Auld 	struct resource gmadr;          /* GMADR resource */
3472c86e55dSMatthew Auld 	resource_size_t mappable_end;	/* End offset that we can CPU map */
3482c86e55dSMatthew Auld 
3492c86e55dSMatthew Auld 	/** "Graphics Stolen Memory" holds the global PTEs */
3502c86e55dSMatthew Auld 	void __iomem *gsm;
3512c86e55dSMatthew Auld 	void (*invalidate)(struct i915_ggtt *ggtt);
3522c86e55dSMatthew Auld 
3532c86e55dSMatthew Auld 	/** PPGTT used for aliasing the PPGTT with the GTT */
3542c86e55dSMatthew Auld 	struct i915_ppgtt *alias;
3552c86e55dSMatthew Auld 
3562c86e55dSMatthew Auld 	bool do_idle_maps;
3572c86e55dSMatthew Auld 
3582ef6efa7SThomas Hellström 	/**
3592ef6efa7SThomas Hellström 	 * @pte_lost: Are ptes lost on resume?
3602ef6efa7SThomas Hellström 	 *
3612ef6efa7SThomas Hellström 	 * Whether the system was recently restored from hibernate and
3622ef6efa7SThomas Hellström 	 * thus may have lost pte content.
3632ef6efa7SThomas Hellström 	 */
3642ef6efa7SThomas Hellström 	bool pte_lost;
3652ef6efa7SThomas Hellström 
3662ef6efa7SThomas Hellström 	/**
3672ef6efa7SThomas Hellström 	 * @probed_pte: Probed pte value on suspend. Re-checked on resume.
3682ef6efa7SThomas Hellström 	 */
3692ef6efa7SThomas Hellström 	u64 probed_pte;
3702ef6efa7SThomas Hellström 
3712c86e55dSMatthew Auld 	int mtrr;
3722c86e55dSMatthew Auld 
3732c86e55dSMatthew Auld 	/** Bit 6 swizzling required for X tiling */
3742c86e55dSMatthew Auld 	u32 bit_6_swizzle_x;
3752c86e55dSMatthew Auld 	/** Bit 6 swizzling required for Y tiling */
3762c86e55dSMatthew Auld 	u32 bit_6_swizzle_y;
3772c86e55dSMatthew Auld 
3782c86e55dSMatthew Auld 	u32 pin_bias;
3792c86e55dSMatthew Auld 
3802c86e55dSMatthew Auld 	unsigned int num_fences;
3810b6bc81dSChris Wilson 	struct i915_fence_reg *fence_regs;
3822c86e55dSMatthew Auld 	struct list_head fence_list;
3832c86e55dSMatthew Auld 
3842c86e55dSMatthew Auld 	/**
3852c86e55dSMatthew Auld 	 * List of all objects in gtt_space, currently mmaped by userspace.
3862c86e55dSMatthew Auld 	 * All objects within this list must also be on bound_list.
3872c86e55dSMatthew Auld 	 */
3882c86e55dSMatthew Auld 	struct list_head userfault_list;
3892c86e55dSMatthew Auld 
390742379c0SChris Wilson 	struct mutex error_mutex;
3912c86e55dSMatthew Auld 	struct drm_mm_node error_capture;
3922c86e55dSMatthew Auld 	struct drm_mm_node uc_fw;
393*0f857158SAravind Iddamsetty 
394*0f857158SAravind Iddamsetty 	/** List of GTs mapping this GGTT */
395*0f857158SAravind Iddamsetty 	struct list_head gt_list;
3962c86e55dSMatthew Auld };
3972c86e55dSMatthew Auld 
3982c86e55dSMatthew Auld struct i915_ppgtt {
3992c86e55dSMatthew Auld 	struct i915_address_space vm;
4002c86e55dSMatthew Auld 
4012c86e55dSMatthew Auld 	struct i915_page_directory *pd;
4022c86e55dSMatthew Auld };
4032c86e55dSMatthew Auld 
4042c86e55dSMatthew Auld #define i915_is_ggtt(vm) ((vm)->is_ggtt)
40533e7a975SVille Syrjälä #define i915_is_dpt(vm) ((vm)->is_dpt)
40674862d4cSImre Deak #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
4072c86e55dSMatthew Auld 
408a7f46d5bSTvrtko Ursulin bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
409a7f46d5bSTvrtko Ursulin 
41026ad4f8bSMaarten Lankhorst int __must_check
41126ad4f8bSMaarten Lankhorst i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
41226ad4f8bSMaarten Lankhorst 
4132c86e55dSMatthew Auld static inline bool
4142c86e55dSMatthew Auld i915_vm_is_4lvl(const struct i915_address_space *vm)
4152c86e55dSMatthew Auld {
4162c86e55dSMatthew Auld 	return (vm->total - 1) >> 32;
4172c86e55dSMatthew Auld }
4182c86e55dSMatthew Auld 
4192c86e55dSMatthew Auld static inline bool
4202c86e55dSMatthew Auld i915_vm_has_scratch_64K(struct i915_address_space *vm)
4212c86e55dSMatthew Auld {
4222c86e55dSMatthew Auld 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
4232c86e55dSMatthew Auld }
4242c86e55dSMatthew Auld 
42587bd701eSMatthew Auld static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
42687bd701eSMatthew Auld 					enum intel_memory_type type)
42787bd701eSMatthew Auld {
42887bd701eSMatthew Auld 	/* avoid INTEL_MEMORY_MOCK overflow */
42987bd701eSMatthew Auld 	if ((int)type >= ARRAY_SIZE(vm->min_alignment))
43087bd701eSMatthew Auld 		type = INTEL_MEMORY_SYSTEM;
43187bd701eSMatthew Auld 
43287bd701eSMatthew Auld 	return vm->min_alignment[type];
43387bd701eSMatthew Auld }
43487bd701eSMatthew Auld 
43587bd701eSMatthew Auld static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
43687bd701eSMatthew Auld 					    struct drm_i915_gem_object  *obj)
43787bd701eSMatthew Auld {
43887bd701eSMatthew Auld 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
43987bd701eSMatthew Auld 	enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
44087bd701eSMatthew Auld 
44187bd701eSMatthew Auld 	return i915_vm_min_alignment(vm, type);
44287bd701eSMatthew Auld }
44387bd701eSMatthew Auld 
4442c86e55dSMatthew Auld static inline bool
4452c86e55dSMatthew Auld i915_vm_has_cache_coloring(struct i915_address_space *vm)
4462c86e55dSMatthew Auld {
4472c86e55dSMatthew Auld 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
4482c86e55dSMatthew Auld }
4492c86e55dSMatthew Auld 
4502c86e55dSMatthew Auld static inline struct i915_ggtt *
4512c86e55dSMatthew Auld i915_vm_to_ggtt(struct i915_address_space *vm)
4522c86e55dSMatthew Auld {
4532c86e55dSMatthew Auld 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
4542c86e55dSMatthew Auld 	GEM_BUG_ON(!i915_is_ggtt(vm));
4552c86e55dSMatthew Auld 	return container_of(vm, struct i915_ggtt, vm);
4562c86e55dSMatthew Auld }
4572c86e55dSMatthew Auld 
4582c86e55dSMatthew Auld static inline struct i915_ppgtt *
4592c86e55dSMatthew Auld i915_vm_to_ppgtt(struct i915_address_space *vm)
4602c86e55dSMatthew Auld {
4612c86e55dSMatthew Auld 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
46274862d4cSImre Deak 	GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
4632c86e55dSMatthew Auld 	return container_of(vm, struct i915_ppgtt, vm);
4642c86e55dSMatthew Auld }
4652c86e55dSMatthew Auld 
4662c86e55dSMatthew Auld static inline struct i915_address_space *
4672c86e55dSMatthew Auld i915_vm_get(struct i915_address_space *vm)
4682c86e55dSMatthew Auld {
4692c86e55dSMatthew Auld 	kref_get(&vm->ref);
4702c86e55dSMatthew Auld 	return vm;
4712c86e55dSMatthew Auld }
4722c86e55dSMatthew Auld 
473e1a7ab4fSThomas Hellström static inline struct i915_address_space *
474e1a7ab4fSThomas Hellström i915_vm_tryget(struct i915_address_space *vm)
475e1a7ab4fSThomas Hellström {
476e1a7ab4fSThomas Hellström 	return kref_get_unless_zero(&vm->ref) ? vm : NULL;
477e1a7ab4fSThomas Hellström }
478e1a7ab4fSThomas Hellström 
479e1a7ab4fSThomas Hellström static inline void assert_vm_alive(struct i915_address_space *vm)
480e1a7ab4fSThomas Hellström {
481e1a7ab4fSThomas Hellström 	GEM_BUG_ON(!kref_read(&vm->ref));
482e1a7ab4fSThomas Hellström }
483e1a7ab4fSThomas Hellström 
4844d8151aeSThomas Hellström /**
4854d8151aeSThomas Hellström  * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
4864d8151aeSThomas Hellström  * @vm: The vm whose reservation lock we want to share.
4874d8151aeSThomas Hellström  *
4884d8151aeSThomas Hellström  * Return: A pointer to the vm's reservation lock.
4894d8151aeSThomas Hellström  */
4904d8151aeSThomas Hellström static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
4914d8151aeSThomas Hellström {
4924d8151aeSThomas Hellström 	kref_get(&vm->resv_ref);
4934d8151aeSThomas Hellström 	return &vm->_resv;
4944d8151aeSThomas Hellström }
4954d8151aeSThomas Hellström 
4962c86e55dSMatthew Auld void i915_vm_release(struct kref *kref);
4972c86e55dSMatthew Auld 
4984d8151aeSThomas Hellström void i915_vm_resv_release(struct kref *kref);
4994d8151aeSThomas Hellström 
5002c86e55dSMatthew Auld static inline void i915_vm_put(struct i915_address_space *vm)
5012c86e55dSMatthew Auld {
5022c86e55dSMatthew Auld 	kref_put(&vm->ref, i915_vm_release);
5032c86e55dSMatthew Auld }
5042c86e55dSMatthew Auld 
5054d8151aeSThomas Hellström /**
5064d8151aeSThomas Hellström  * i915_vm_resv_put - Release a reference on the vm's reservation lock
5074d8151aeSThomas Hellström  * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
5084d8151aeSThomas Hellström  */
5094d8151aeSThomas Hellström static inline void i915_vm_resv_put(struct i915_address_space *vm)
5104d8151aeSThomas Hellström {
5114d8151aeSThomas Hellström 	kref_put(&vm->resv_ref, i915_vm_resv_release);
5124d8151aeSThomas Hellström }
5134d8151aeSThomas Hellström 
5142c86e55dSMatthew Auld void i915_address_space_init(struct i915_address_space *vm, int subclass);
5152c86e55dSMatthew Auld void i915_address_space_fini(struct i915_address_space *vm);
5162c86e55dSMatthew Auld 
5172c86e55dSMatthew Auld static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
5182c86e55dSMatthew Auld {
5192c86e55dSMatthew Auld 	const u32 mask = NUM_PTE(pde_shift) - 1;
5202c86e55dSMatthew Auld 
5212c86e55dSMatthew Auld 	return (address >> PAGE_SHIFT) & mask;
5222c86e55dSMatthew Auld }
5232c86e55dSMatthew Auld 
5242c86e55dSMatthew Auld /*
5252c86e55dSMatthew Auld  * Helper to counts the number of PTEs within the given length. This count
5262c86e55dSMatthew Auld  * does not cross a page table boundary, so the max value would be
5272c86e55dSMatthew Auld  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
5282c86e55dSMatthew Auld  */
5292c86e55dSMatthew Auld static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
5302c86e55dSMatthew Auld {
5312c86e55dSMatthew Auld 	const u64 mask = ~((1ULL << pde_shift) - 1);
5322c86e55dSMatthew Auld 	u64 end;
5332c86e55dSMatthew Auld 
5342c86e55dSMatthew Auld 	GEM_BUG_ON(length == 0);
5352c86e55dSMatthew Auld 	GEM_BUG_ON(offset_in_page(addr | length));
5362c86e55dSMatthew Auld 
5372c86e55dSMatthew Auld 	end = addr + length;
5382c86e55dSMatthew Auld 
5392c86e55dSMatthew Auld 	if ((addr & mask) != (end & mask))
5402c86e55dSMatthew Auld 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
5412c86e55dSMatthew Auld 
5422c86e55dSMatthew Auld 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
5432c86e55dSMatthew Auld }
5442c86e55dSMatthew Auld 
5452c86e55dSMatthew Auld static inline u32 i915_pde_index(u64 addr, u32 shift)
5462c86e55dSMatthew Auld {
5472c86e55dSMatthew Auld 	return (addr >> shift) & I915_PDE_MASK;
5482c86e55dSMatthew Auld }
5492c86e55dSMatthew Auld 
5502c86e55dSMatthew Auld static inline struct i915_page_table *
5512c86e55dSMatthew Auld i915_pt_entry(const struct i915_page_directory * const pd,
5522c86e55dSMatthew Auld 	      const unsigned short n)
5532c86e55dSMatthew Auld {
5542c86e55dSMatthew Auld 	return pd->entry[n];
5552c86e55dSMatthew Auld }
5562c86e55dSMatthew Auld 
5572c86e55dSMatthew Auld static inline struct i915_page_directory *
5582c86e55dSMatthew Auld i915_pd_entry(const struct i915_page_directory * const pdp,
5592c86e55dSMatthew Auld 	      const unsigned short n)
5602c86e55dSMatthew Auld {
5612c86e55dSMatthew Auld 	return pdp->entry[n];
5622c86e55dSMatthew Auld }
5632c86e55dSMatthew Auld 
5642c86e55dSMatthew Auld static inline dma_addr_t
5652c86e55dSMatthew Auld i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
5662c86e55dSMatthew Auld {
56789351925SChris Wilson 	struct i915_page_table *pt = ppgtt->pd->entry[n];
5682c86e55dSMatthew Auld 
56989351925SChris Wilson 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
5702c86e55dSMatthew Auld }
5712c86e55dSMatthew Auld 
572a259cc14SThomas Hellström void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
573a259cc14SThomas Hellström 		unsigned long lmem_pt_obj_flags);
5747a5c9223SCasey Bowman void intel_ggtt_bind_vma(struct i915_address_space *vm,
5757a5c9223SCasey Bowman 			 struct i915_vm_pt_stash *stash,
5767a5c9223SCasey Bowman 			 struct i915_vma_resource *vma_res,
5777a5c9223SCasey Bowman 			 enum i915_cache_level cache_level,
5787a5c9223SCasey Bowman 			 u32 flags);
5797a5c9223SCasey Bowman void intel_ggtt_unbind_vma(struct i915_address_space *vm,
5807a5c9223SCasey Bowman 			   struct i915_vma_resource *vma_res);
5817a5c9223SCasey Bowman 
5822c86e55dSMatthew Auld int i915_ggtt_probe_hw(struct drm_i915_private *i915);
5832c86e55dSMatthew Auld int i915_ggtt_init_hw(struct drm_i915_private *i915);
5842c86e55dSMatthew Auld int i915_ggtt_enable_hw(struct drm_i915_private *i915);
5852c86e55dSMatthew Auld void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
5862c86e55dSMatthew Auld void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
5872c86e55dSMatthew Auld int i915_init_ggtt(struct drm_i915_private *i915);
5882c86e55dSMatthew Auld void i915_ggtt_driver_release(struct drm_i915_private *i915);
5894d8151aeSThomas Hellström void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
590*0f857158SAravind Iddamsetty struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915);
5912c86e55dSMatthew Auld 
5922c86e55dSMatthew Auld static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
5932c86e55dSMatthew Auld {
5942c86e55dSMatthew Auld 	return ggtt->mappable_end > 0;
5952c86e55dSMatthew Auld }
5962c86e55dSMatthew Auld 
5972c86e55dSMatthew Auld int i915_ppgtt_init_hw(struct intel_gt *gt);
5982c86e55dSMatthew Auld 
599a259cc14SThomas Hellström struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
600a259cc14SThomas Hellström 				     unsigned long lmem_pt_obj_flags);
6012c86e55dSMatthew Auld 
6028d2f683fSImre Deak void i915_ggtt_suspend_vm(struct i915_address_space *vm);
6038d2f683fSImre Deak bool i915_ggtt_resume_vm(struct i915_address_space *vm);
604e986209cSChris Wilson void i915_ggtt_suspend(struct i915_ggtt *gtt);
605e986209cSChris Wilson void i915_ggtt_resume(struct i915_ggtt *ggtt);
6062c86e55dSMatthew Auld 
6072ef6efa7SThomas Hellström /**
6082ef6efa7SThomas Hellström  * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
6092ef6efa7SThomas Hellström  * @i915 The device private.
6102ef6efa7SThomas Hellström  * @val whether the ptes should be marked as lost.
6112ef6efa7SThomas Hellström  *
6122ef6efa7SThomas Hellström  * In some cases pte content is retained across suspend, but typically lost
6132ef6efa7SThomas Hellström  * across hibernate. Typically they should be marked as lost on
6142ef6efa7SThomas Hellström  * hibernation restore and such marking cleared on suspend.
6152ef6efa7SThomas Hellström  */
6162ef6efa7SThomas Hellström void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
6172ef6efa7SThomas Hellström 
6182c86e55dSMatthew Auld void
61989351925SChris Wilson fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
6202c86e55dSMatthew Auld 
6212c86e55dSMatthew Auld #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
6222c86e55dSMatthew Auld #define fill32_px(px, v) do {						\
6232c86e55dSMatthew Auld 	u64 v__ = lower_32_bits(v);					\
6242c86e55dSMatthew Auld 	fill_px((px), v__ << 32 | v__);					\
6252c86e55dSMatthew Auld } while (0)
6262c86e55dSMatthew Auld 
62789351925SChris Wilson int setup_scratch_page(struct i915_address_space *vm);
6282c86e55dSMatthew Auld void free_scratch(struct i915_address_space *vm);
6292c86e55dSMatthew Auld 
63089351925SChris Wilson struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
6316aed5673SMatthew Auld struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
6322cff4b9eSMatthew Auld struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
6332c86e55dSMatthew Auld struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
63482adf901SChris Wilson struct i915_page_directory *__alloc_pd(int npde);
6352c86e55dSMatthew Auld 
636529b9ec8SMatthew Auld int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
637529b9ec8SMatthew Auld int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
6382c86e55dSMatthew Auld 
63982adf901SChris Wilson void free_px(struct i915_address_space *vm,
64082adf901SChris Wilson 	     struct i915_page_table *pt, int lvl);
64182adf901SChris Wilson #define free_pt(vm, px) free_px(vm, px, 0)
64282adf901SChris Wilson #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
6432c86e55dSMatthew Auld 
6442c86e55dSMatthew Auld void
6452c86e55dSMatthew Auld __set_pd_entry(struct i915_page_directory * const pd,
6462c86e55dSMatthew Auld 	       const unsigned short idx,
64789351925SChris Wilson 	       struct i915_page_table *pt,
6482c86e55dSMatthew Auld 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
6492c86e55dSMatthew Auld 
6502c86e55dSMatthew Auld #define set_pd_entry(pd, idx, to) \
65189351925SChris Wilson 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
6522c86e55dSMatthew Auld 
6532c86e55dSMatthew Auld void
6542c86e55dSMatthew Auld clear_pd_entry(struct i915_page_directory * const pd,
6552c86e55dSMatthew Auld 	       const unsigned short idx,
65689351925SChris Wilson 	       const struct drm_i915_gem_object * const scratch);
6572c86e55dSMatthew Auld 
6582c86e55dSMatthew Auld bool
6592c86e55dSMatthew Auld release_pd_entry(struct i915_page_directory * const pd,
6602c86e55dSMatthew Auld 		 const unsigned short idx,
6612c86e55dSMatthew Auld 		 struct i915_page_table * const pt,
66289351925SChris Wilson 		 const struct drm_i915_gem_object * const scratch);
6632c86e55dSMatthew Auld void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
6642c86e55dSMatthew Auld 
665cd0452aaSChris Wilson void ppgtt_bind_vma(struct i915_address_space *vm,
666cd0452aaSChris Wilson 		    struct i915_vm_pt_stash *stash,
66739a2bd34SThomas Hellström 		    struct i915_vma_resource *vma_res,
66812b07256SChris Wilson 		    enum i915_cache_level cache_level,
66912b07256SChris Wilson 		    u32 flags);
67012b07256SChris Wilson void ppgtt_unbind_vma(struct i915_address_space *vm,
67139a2bd34SThomas Hellström 		      struct i915_vma_resource *vma_res);
67212b07256SChris Wilson 
6732c86e55dSMatthew Auld void gtt_write_workarounds(struct intel_gt *gt);
6742c86e55dSMatthew Auld 
67577fa9efcSMatt Roper void setup_private_pat(struct intel_gt *gt);
6762c86e55dSMatthew Auld 
677cd0452aaSChris Wilson int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
678cd0452aaSChris Wilson 			   struct i915_vm_pt_stash *stash,
679cd0452aaSChris Wilson 			   u64 size);
680529b9ec8SMatthew Auld int i915_vm_map_pt_stash(struct i915_address_space *vm,
68189351925SChris Wilson 			 struct i915_vm_pt_stash *stash);
682cd0452aaSChris Wilson void i915_vm_free_pt_stash(struct i915_address_space *vm,
683cd0452aaSChris Wilson 			   struct i915_vm_pt_stash *stash);
684cd0452aaSChris Wilson 
685a4d86249SChris Wilson struct i915_vma *
686a4d86249SChris Wilson __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
687a4d86249SChris Wilson 
6882a665968SMaarten Lankhorst struct i915_vma *
6892a665968SMaarten Lankhorst __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
6902a665968SMaarten Lankhorst 
6912c86e55dSMatthew Auld static inline struct sgt_dma {
6922c86e55dSMatthew Auld 	struct scatterlist *sg;
6932c86e55dSMatthew Auld 	dma_addr_t dma, max;
69439a2bd34SThomas Hellström } sgt_dma(struct i915_vma_resource *vma_res) {
69539a2bd34SThomas Hellström 	struct scatterlist *sg = vma_res->bi.pages->sgl;
6962c86e55dSMatthew Auld 	dma_addr_t addr = sg_dma_address(sg);
6972c86e55dSMatthew Auld 
6988a473dbaSTvrtko Ursulin 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
6992c86e55dSMatthew Auld }
7002c86e55dSMatthew Auld 
7012c86e55dSMatthew Auld #endif
702