xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision 3a9a6f3d)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2020 Intel Corporation
4  *
5  * Please try to maintain the following order within this file unless it makes
6  * sense to do otherwise. From top to bottom:
7  * 1. typedefs
8  * 2. #defines, and macros
9  * 3. structure definitions
10  * 4. function prototypes
11  *
12  * Within each section, please try to order by generation in ascending order,
13  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14  */
15 
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18 
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25 
26 #include <drm/drm_mm.h>
27 
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_resource.h"
31 #include "i915_vma_types.h"
32 
33 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
34 
35 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
36 #define DBG(...) trace_printk(__VA_ARGS__)
37 #else
38 #define DBG(...)
39 #endif
40 
41 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
42 
43 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
44 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
45 #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
46 
47 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
48 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
49 
50 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
51 
52 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
53 
54 #define I915_FENCE_REG_NONE -1
55 #define I915_MAX_NUM_FENCES 32
56 /* 32 fences + sign bit for FENCE_REG_NONE */
57 #define I915_MAX_NUM_FENCE_BITS 6
58 
59 typedef u32 gen6_pte_t;
60 typedef u64 gen8_pte_t;
61 
62 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
63 
64 #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
65 #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
66 #define I915_PDES			512
67 #define I915_PDE_MASK			(I915_PDES - 1)
68 
69 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
70 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
71 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
72 #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
73 #define GEN6_PTE_CACHE_LLC		(2 << 1)
74 #define GEN6_PTE_UNCACHED		(1 << 1)
75 #define GEN6_PTE_VALID			REG_BIT(0)
76 
77 #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
78 #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
79 #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
80 #define GEN6_PDE_SHIFT			22
81 #define GEN6_PDE_VALID			REG_BIT(0)
82 #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
83 
84 #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
85 
86 #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
87 #define BYT_PTE_WRITEABLE		REG_BIT(1)
88 
89 #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
90 
91 #define GEN12_GGTT_PTE_LM	BIT_ULL(1)
92 
93 /*
94  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
95  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
96  */
97 #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
98 					 (((bits) & 0x8) << (11 - 3)))
99 #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
100 #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
101 #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
102 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
103 #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
104 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
105 #define HSW_PTE_UNCACHED		(0)
106 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
107 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
108 
109 /*
110  * GEN8 32b style address is defined as a 3 level page table:
111  * 31:30 | 29:21 | 20:12 |  11:0
112  * PDPE  |  PDE  |  PTE  | offset
113  * The difference as compared to normal x86 3 level page table is the PDPEs are
114  * programmed via register.
115  *
116  * GEN8 48b style address is defined as a 4 level page table:
117  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
118  * PML4E | PDPE  |  PDE  |  PTE  | offset
119  */
120 #define GEN8_3LVL_PDPES			4
121 
122 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
123 #define PPAT_CACHED_PDE			0 /* WB LLC */
124 #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
125 #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
126 
127 #define CHV_PPAT_SNOOP			REG_BIT(6)
128 #define GEN8_PPAT_AGE(x)		((x)<<4)
129 #define GEN8_PPAT_LLCeLLC		(3<<2)
130 #define GEN8_PPAT_LLCELLC		(2<<2)
131 #define GEN8_PPAT_LLC			(1<<2)
132 #define GEN8_PPAT_WB			(3<<0)
133 #define GEN8_PPAT_WT			(2<<0)
134 #define GEN8_PPAT_WC			(1<<0)
135 #define GEN8_PPAT_UC			(0<<0)
136 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
137 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
138 
139 #define GEN8_PAGE_PRESENT		BIT_ULL(0)
140 #define GEN8_PAGE_RW			BIT_ULL(1)
141 
142 #define GEN8_PDE_IPS_64K BIT(11)
143 #define GEN8_PDE_PS_2M   BIT(7)
144 
145 enum i915_cache_level;
146 
147 struct drm_i915_gem_object;
148 struct i915_fence_reg;
149 struct i915_vma;
150 struct intel_gt;
151 
152 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
153 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
154 
155 struct i915_page_table {
156 	struct drm_i915_gem_object *base;
157 	union {
158 		atomic_t used;
159 		struct i915_page_table *stash;
160 	};
161 };
162 
163 struct i915_page_directory {
164 	struct i915_page_table pt;
165 	spinlock_t lock;
166 	void **entry;
167 };
168 
169 #define __px_choose_expr(x, type, expr, other) \
170 	__builtin_choose_expr( \
171 	__builtin_types_compatible_p(typeof(x), type) || \
172 	__builtin_types_compatible_p(typeof(x), const type), \
173 	({ type __x = (type)(x); expr; }), \
174 	other)
175 
176 #define px_base(px) \
177 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
178 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
179 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
180 	(void)0)))
181 
182 struct page *__px_page(struct drm_i915_gem_object *p);
183 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
184 #define px_dma(px) (__px_dma(px_base(px)))
185 
186 void *__px_vaddr(struct drm_i915_gem_object *p);
187 #define px_vaddr(px) (__px_vaddr(px_base(px)))
188 
189 #define px_pt(px) \
190 	__px_choose_expr(px, struct i915_page_table *, __x, \
191 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
192 	(void)0))
193 #define px_used(px) (&px_pt(px)->used)
194 
195 struct i915_vm_pt_stash {
196 	/* preallocated chains of page tables/directories */
197 	struct i915_page_table *pt[2];
198 };
199 
200 struct i915_vma_ops {
201 	/* Map an object into an address space with the given cache flags. */
202 	void (*bind_vma)(struct i915_address_space *vm,
203 			 struct i915_vm_pt_stash *stash,
204 			 struct i915_vma_resource *vma_res,
205 			 enum i915_cache_level cache_level,
206 			 u32 flags);
207 	/*
208 	 * Unmap an object from an address space. This usually consists of
209 	 * setting the valid PTE entries to a reserved scratch page.
210 	 */
211 	void (*unbind_vma)(struct i915_address_space *vm,
212 			   struct i915_vma_resource *vma_res);
213 
214 };
215 
216 struct i915_address_space {
217 	struct kref ref;
218 	struct work_struct release_work;
219 
220 	struct drm_mm mm;
221 	struct intel_gt *gt;
222 	struct drm_i915_private *i915;
223 	struct device *dma;
224 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
225 	u64 reserved;		/* size addr space reserved */
226 
227 	unsigned int bind_async_flags;
228 
229 	/*
230 	 * Each active user context has its own address space (in full-ppgtt).
231 	 * Since the vm may be shared between multiple contexts, we count how
232 	 * many contexts keep us "open". Once open hits zero, we are closed
233 	 * and do not allow any new attachments, and proceed to shutdown our
234 	 * vma and page directories.
235 	 */
236 	atomic_t open;
237 
238 	struct mutex mutex; /* protects vma and our lists */
239 
240 	struct kref resv_ref; /* kref to keep the reservation lock alive. */
241 	struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
242 #define VM_CLASS_GGTT 0
243 #define VM_CLASS_PPGTT 1
244 #define VM_CLASS_DPT 2
245 
246 	struct drm_i915_gem_object *scratch[4];
247 	/**
248 	 * List of vma currently bound.
249 	 */
250 	struct list_head bound_list;
251 
252 	/* Global GTT */
253 	bool is_ggtt:1;
254 
255 	/* Display page table */
256 	bool is_dpt:1;
257 
258 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
259 	bool has_read_only:1;
260 
261 	u8 top;
262 	u8 pd_shift;
263 	u8 scratch_order;
264 
265 	/* Flags used when creating page-table objects for this vm */
266 	unsigned long lmem_pt_obj_flags;
267 
268 	/* Interval tree for pending unbind vma resources */
269 	struct rb_root_cached pending_unbind;
270 
271 	struct drm_i915_gem_object *
272 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
273 	struct drm_i915_gem_object *
274 		(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
275 
276 	u64 (*pte_encode)(dma_addr_t addr,
277 			  enum i915_cache_level level,
278 			  u32 flags); /* Create a valid PTE */
279 #define PTE_READ_ONLY	BIT(0)
280 #define PTE_LM		BIT(1)
281 
282 	void (*allocate_va_range)(struct i915_address_space *vm,
283 				  struct i915_vm_pt_stash *stash,
284 				  u64 start, u64 length);
285 	void (*clear_range)(struct i915_address_space *vm,
286 			    u64 start, u64 length);
287 	void (*insert_page)(struct i915_address_space *vm,
288 			    dma_addr_t addr,
289 			    u64 offset,
290 			    enum i915_cache_level cache_level,
291 			    u32 flags);
292 	void (*insert_entries)(struct i915_address_space *vm,
293 			       struct i915_vma_resource *vma_res,
294 			       enum i915_cache_level cache_level,
295 			       u32 flags);
296 	void (*cleanup)(struct i915_address_space *vm);
297 
298 	void (*foreach)(struct i915_address_space *vm,
299 			u64 start, u64 length,
300 			void (*fn)(struct i915_address_space *vm,
301 				   struct i915_page_table *pt,
302 				   void *data),
303 			void *data);
304 
305 	struct i915_vma_ops vma_ops;
306 
307 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
308 	I915_SELFTEST_DECLARE(bool scrub_64K);
309 };
310 
311 /*
312  * The Graphics Translation Table is the way in which GEN hardware translates a
313  * Graphics Virtual Address into a Physical Address. In addition to the normal
314  * collateral associated with any va->pa translations GEN hardware also has a
315  * portion of the GTT which can be mapped by the CPU and remain both coherent
316  * and correct (in cases like swizzling). That region is referred to as GMADR in
317  * the spec.
318  */
319 struct i915_ggtt {
320 	struct i915_address_space vm;
321 
322 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
323 	struct resource gmadr;          /* GMADR resource */
324 	resource_size_t mappable_end;	/* End offset that we can CPU map */
325 
326 	/** "Graphics Stolen Memory" holds the global PTEs */
327 	void __iomem *gsm;
328 	void (*invalidate)(struct i915_ggtt *ggtt);
329 
330 	/** PPGTT used for aliasing the PPGTT with the GTT */
331 	struct i915_ppgtt *alias;
332 
333 	bool do_idle_maps;
334 
335 	int mtrr;
336 
337 	/** Bit 6 swizzling required for X tiling */
338 	u32 bit_6_swizzle_x;
339 	/** Bit 6 swizzling required for Y tiling */
340 	u32 bit_6_swizzle_y;
341 
342 	u32 pin_bias;
343 
344 	unsigned int num_fences;
345 	struct i915_fence_reg *fence_regs;
346 	struct list_head fence_list;
347 
348 	/**
349 	 * List of all objects in gtt_space, currently mmaped by userspace.
350 	 * All objects within this list must also be on bound_list.
351 	 */
352 	struct list_head userfault_list;
353 
354 	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
355 	struct intel_wakeref_auto userfault_wakeref;
356 
357 	struct mutex error_mutex;
358 	struct drm_mm_node error_capture;
359 	struct drm_mm_node uc_fw;
360 };
361 
362 struct i915_ppgtt {
363 	struct i915_address_space vm;
364 
365 	struct i915_page_directory *pd;
366 };
367 
368 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
369 #define i915_is_dpt(vm) ((vm)->is_dpt)
370 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
371 
372 int __must_check
373 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
374 
375 static inline bool
376 i915_vm_is_4lvl(const struct i915_address_space *vm)
377 {
378 	return (vm->total - 1) >> 32;
379 }
380 
381 static inline bool
382 i915_vm_has_scratch_64K(struct i915_address_space *vm)
383 {
384 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
385 }
386 
387 static inline bool
388 i915_vm_has_cache_coloring(struct i915_address_space *vm)
389 {
390 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
391 }
392 
393 static inline struct i915_ggtt *
394 i915_vm_to_ggtt(struct i915_address_space *vm)
395 {
396 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
397 	GEM_BUG_ON(!i915_is_ggtt(vm));
398 	return container_of(vm, struct i915_ggtt, vm);
399 }
400 
401 static inline struct i915_ppgtt *
402 i915_vm_to_ppgtt(struct i915_address_space *vm)
403 {
404 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
405 	GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
406 	return container_of(vm, struct i915_ppgtt, vm);
407 }
408 
409 static inline struct i915_address_space *
410 i915_vm_get(struct i915_address_space *vm)
411 {
412 	kref_get(&vm->ref);
413 	return vm;
414 }
415 
416 /**
417  * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
418  * @vm: The vm whose reservation lock we want to share.
419  *
420  * Return: A pointer to the vm's reservation lock.
421  */
422 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
423 {
424 	kref_get(&vm->resv_ref);
425 	return &vm->_resv;
426 }
427 
428 void i915_vm_release(struct kref *kref);
429 
430 void i915_vm_resv_release(struct kref *kref);
431 
432 static inline void i915_vm_put(struct i915_address_space *vm)
433 {
434 	kref_put(&vm->ref, i915_vm_release);
435 }
436 
437 /**
438  * i915_vm_resv_put - Release a reference on the vm's reservation lock
439  * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
440  */
441 static inline void i915_vm_resv_put(struct i915_address_space *vm)
442 {
443 	kref_put(&vm->resv_ref, i915_vm_resv_release);
444 }
445 
446 static inline struct i915_address_space *
447 i915_vm_open(struct i915_address_space *vm)
448 {
449 	GEM_BUG_ON(!atomic_read(&vm->open));
450 	atomic_inc(&vm->open);
451 	return i915_vm_get(vm);
452 }
453 
454 static inline bool
455 i915_vm_tryopen(struct i915_address_space *vm)
456 {
457 	if (atomic_add_unless(&vm->open, 1, 0))
458 		return i915_vm_get(vm);
459 
460 	return false;
461 }
462 
463 void __i915_vm_close(struct i915_address_space *vm);
464 
465 static inline void
466 i915_vm_close(struct i915_address_space *vm)
467 {
468 	GEM_BUG_ON(!atomic_read(&vm->open));
469 	__i915_vm_close(vm);
470 
471 	i915_vm_put(vm);
472 }
473 
474 void i915_address_space_init(struct i915_address_space *vm, int subclass);
475 void i915_address_space_fini(struct i915_address_space *vm);
476 
477 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
478 {
479 	const u32 mask = NUM_PTE(pde_shift) - 1;
480 
481 	return (address >> PAGE_SHIFT) & mask;
482 }
483 
484 /*
485  * Helper to counts the number of PTEs within the given length. This count
486  * does not cross a page table boundary, so the max value would be
487  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
488  */
489 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
490 {
491 	const u64 mask = ~((1ULL << pde_shift) - 1);
492 	u64 end;
493 
494 	GEM_BUG_ON(length == 0);
495 	GEM_BUG_ON(offset_in_page(addr | length));
496 
497 	end = addr + length;
498 
499 	if ((addr & mask) != (end & mask))
500 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
501 
502 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
503 }
504 
505 static inline u32 i915_pde_index(u64 addr, u32 shift)
506 {
507 	return (addr >> shift) & I915_PDE_MASK;
508 }
509 
510 static inline struct i915_page_table *
511 i915_pt_entry(const struct i915_page_directory * const pd,
512 	      const unsigned short n)
513 {
514 	return pd->entry[n];
515 }
516 
517 static inline struct i915_page_directory *
518 i915_pd_entry(const struct i915_page_directory * const pdp,
519 	      const unsigned short n)
520 {
521 	return pdp->entry[n];
522 }
523 
524 static inline dma_addr_t
525 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
526 {
527 	struct i915_page_table *pt = ppgtt->pd->entry[n];
528 
529 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
530 }
531 
532 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
533 		unsigned long lmem_pt_obj_flags);
534 
535 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
536 int i915_ggtt_init_hw(struct drm_i915_private *i915);
537 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
538 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
539 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
540 int i915_init_ggtt(struct drm_i915_private *i915);
541 void i915_ggtt_driver_release(struct drm_i915_private *i915);
542 void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
543 
544 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
545 {
546 	return ggtt->mappable_end > 0;
547 }
548 
549 int i915_ppgtt_init_hw(struct intel_gt *gt);
550 
551 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
552 				     unsigned long lmem_pt_obj_flags);
553 
554 void i915_ggtt_suspend_vm(struct i915_address_space *vm);
555 bool i915_ggtt_resume_vm(struct i915_address_space *vm);
556 void i915_ggtt_suspend(struct i915_ggtt *gtt);
557 void i915_ggtt_resume(struct i915_ggtt *ggtt);
558 
559 void
560 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
561 
562 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
563 #define fill32_px(px, v) do {						\
564 	u64 v__ = lower_32_bits(v);					\
565 	fill_px((px), v__ << 32 | v__);					\
566 } while (0)
567 
568 int setup_scratch_page(struct i915_address_space *vm);
569 void free_scratch(struct i915_address_space *vm);
570 
571 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
572 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
573 struct i915_page_table *alloc_pt(struct i915_address_space *vm);
574 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
575 struct i915_page_directory *__alloc_pd(int npde);
576 
577 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
578 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
579 
580 void free_px(struct i915_address_space *vm,
581 	     struct i915_page_table *pt, int lvl);
582 #define free_pt(vm, px) free_px(vm, px, 0)
583 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
584 
585 void
586 __set_pd_entry(struct i915_page_directory * const pd,
587 	       const unsigned short idx,
588 	       struct i915_page_table *pt,
589 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
590 
591 #define set_pd_entry(pd, idx, to) \
592 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
593 
594 void
595 clear_pd_entry(struct i915_page_directory * const pd,
596 	       const unsigned short idx,
597 	       const struct drm_i915_gem_object * const scratch);
598 
599 bool
600 release_pd_entry(struct i915_page_directory * const pd,
601 		 const unsigned short idx,
602 		 struct i915_page_table * const pt,
603 		 const struct drm_i915_gem_object * const scratch);
604 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
605 
606 void ppgtt_bind_vma(struct i915_address_space *vm,
607 		    struct i915_vm_pt_stash *stash,
608 		    struct i915_vma_resource *vma_res,
609 		    enum i915_cache_level cache_level,
610 		    u32 flags);
611 void ppgtt_unbind_vma(struct i915_address_space *vm,
612 		      struct i915_vma_resource *vma_res);
613 
614 void gtt_write_workarounds(struct intel_gt *gt);
615 
616 void setup_private_pat(struct intel_uncore *uncore);
617 
618 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
619 			   struct i915_vm_pt_stash *stash,
620 			   u64 size);
621 int i915_vm_map_pt_stash(struct i915_address_space *vm,
622 			 struct i915_vm_pt_stash *stash);
623 void i915_vm_free_pt_stash(struct i915_address_space *vm,
624 			   struct i915_vm_pt_stash *stash);
625 
626 struct i915_vma *
627 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
628 
629 struct i915_vma *
630 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
631 
632 static inline struct sgt_dma {
633 	struct scatterlist *sg;
634 	dma_addr_t dma, max;
635 } sgt_dma(struct i915_vma_resource *vma_res) {
636 	struct scatterlist *sg = vma_res->bi.pages->sgl;
637 	dma_addr_t addr = sg_dma_address(sg);
638 
639 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
640 }
641 
642 #endif
643