xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision a531b0c2)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2020 Intel Corporation
4  *
5  * Please try to maintain the following order within this file unless it makes
6  * sense to do otherwise. From top to bottom:
7  * 1. typedefs
8  * 2. #defines, and macros
9  * 3. structure definitions
10  * 4. function prototypes
11  *
12  * Within each section, please try to order by generation in ascending order,
13  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14  */
15 
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18 
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25 
26 #include <drm/drm_mm.h>
27 
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_types.h"
31 
32 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
33 
34 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
35 #define DBG(...) trace_printk(__VA_ARGS__)
36 #else
37 #define DBG(...)
38 #endif
39 
40 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
41 
42 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
43 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
44 #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
45 
46 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
47 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
48 
49 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
50 
51 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
52 
53 #define I915_FENCE_REG_NONE -1
54 #define I915_MAX_NUM_FENCES 32
55 /* 32 fences + sign bit for FENCE_REG_NONE */
56 #define I915_MAX_NUM_FENCE_BITS 6
57 
58 typedef u32 gen6_pte_t;
59 typedef u64 gen8_pte_t;
60 
61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
62 
63 #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
64 #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
65 #define I915_PDES			512
66 #define I915_PDE_MASK			(I915_PDES - 1)
67 
68 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
69 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
70 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
71 #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
72 #define GEN6_PTE_CACHE_LLC		(2 << 1)
73 #define GEN6_PTE_UNCACHED		(1 << 1)
74 #define GEN6_PTE_VALID			REG_BIT(0)
75 
76 #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
77 #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
78 #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
79 #define GEN6_PDE_SHIFT			22
80 #define GEN6_PDE_VALID			REG_BIT(0)
81 #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
82 
83 #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
84 
85 #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
86 #define BYT_PTE_WRITEABLE		REG_BIT(1)
87 
88 #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
89 
90 #define GEN12_GGTT_PTE_LM	BIT_ULL(1)
91 
92 /*
93  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
94  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
95  */
96 #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
97 					 (((bits) & 0x8) << (11 - 3)))
98 #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
99 #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
100 #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
101 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
102 #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
103 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
104 #define HSW_PTE_UNCACHED		(0)
105 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
106 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
107 
108 /*
109  * GEN8 32b style address is defined as a 3 level page table:
110  * 31:30 | 29:21 | 20:12 |  11:0
111  * PDPE  |  PDE  |  PTE  | offset
112  * The difference as compared to normal x86 3 level page table is the PDPEs are
113  * programmed via register.
114  *
115  * GEN8 48b style address is defined as a 4 level page table:
116  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
117  * PML4E | PDPE  |  PDE  |  PTE  | offset
118  */
119 #define GEN8_3LVL_PDPES			4
120 
121 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
122 #define PPAT_CACHED_PDE			0 /* WB LLC */
123 #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
124 #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
125 
126 #define CHV_PPAT_SNOOP			REG_BIT(6)
127 #define GEN8_PPAT_AGE(x)		((x)<<4)
128 #define GEN8_PPAT_LLCeLLC		(3<<2)
129 #define GEN8_PPAT_LLCELLC		(2<<2)
130 #define GEN8_PPAT_LLC			(1<<2)
131 #define GEN8_PPAT_WB			(3<<0)
132 #define GEN8_PPAT_WT			(2<<0)
133 #define GEN8_PPAT_WC			(1<<0)
134 #define GEN8_PPAT_UC			(0<<0)
135 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
136 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
137 
138 #define GEN8_PDE_IPS_64K BIT(11)
139 #define GEN8_PDE_PS_2M   BIT(7)
140 
141 enum i915_cache_level;
142 
143 struct drm_i915_gem_object;
144 struct i915_fence_reg;
145 struct i915_vma;
146 struct intel_gt;
147 
148 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
149 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
150 
151 struct i915_page_table {
152 	struct drm_i915_gem_object *base;
153 	union {
154 		atomic_t used;
155 		struct i915_page_table *stash;
156 	};
157 };
158 
159 struct i915_page_directory {
160 	struct i915_page_table pt;
161 	spinlock_t lock;
162 	void **entry;
163 };
164 
165 #define __px_choose_expr(x, type, expr, other) \
166 	__builtin_choose_expr( \
167 	__builtin_types_compatible_p(typeof(x), type) || \
168 	__builtin_types_compatible_p(typeof(x), const type), \
169 	({ type __x = (type)(x); expr; }), \
170 	other)
171 
172 #define px_base(px) \
173 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
174 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
175 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
176 	(void)0)))
177 
178 struct page *__px_page(struct drm_i915_gem_object *p);
179 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
180 #define px_dma(px) (__px_dma(px_base(px)))
181 
182 void *__px_vaddr(struct drm_i915_gem_object *p);
183 #define px_vaddr(px) (__px_vaddr(px_base(px)))
184 
185 #define px_pt(px) \
186 	__px_choose_expr(px, struct i915_page_table *, __x, \
187 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
188 	(void)0))
189 #define px_used(px) (&px_pt(px)->used)
190 
191 struct i915_vm_pt_stash {
192 	/* preallocated chains of page tables/directories */
193 	struct i915_page_table *pt[2];
194 };
195 
196 struct i915_vma_ops {
197 	/* Map an object into an address space with the given cache flags. */
198 	void (*bind_vma)(struct i915_address_space *vm,
199 			 struct i915_vm_pt_stash *stash,
200 			 struct i915_vma *vma,
201 			 enum i915_cache_level cache_level,
202 			 u32 flags);
203 	/*
204 	 * Unmap an object from an address space. This usually consists of
205 	 * setting the valid PTE entries to a reserved scratch page.
206 	 */
207 	void (*unbind_vma)(struct i915_address_space *vm,
208 			   struct i915_vma *vma);
209 
210 	int (*set_pages)(struct i915_vma *vma);
211 	void (*clear_pages)(struct i915_vma *vma);
212 };
213 
214 struct i915_address_space {
215 	struct kref ref;
216 	struct work_struct release_work;
217 
218 	struct drm_mm mm;
219 	struct intel_gt *gt;
220 	struct drm_i915_private *i915;
221 	struct device *dma;
222 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
223 	u64 reserved;		/* size addr space reserved */
224 
225 	unsigned int bind_async_flags;
226 
227 	/*
228 	 * Each active user context has its own address space (in full-ppgtt).
229 	 * Since the vm may be shared between multiple contexts, we count how
230 	 * many contexts keep us "open". Once open hits zero, we are closed
231 	 * and do not allow any new attachments, and proceed to shutdown our
232 	 * vma and page directories.
233 	 */
234 	atomic_t open;
235 
236 	struct mutex mutex; /* protects vma and our lists */
237 
238 	struct kref resv_ref; /* kref to keep the reservation lock alive. */
239 	struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
240 #define VM_CLASS_GGTT 0
241 #define VM_CLASS_PPGTT 1
242 #define VM_CLASS_DPT 2
243 
244 	struct drm_i915_gem_object *scratch[4];
245 	/**
246 	 * List of vma currently bound.
247 	 */
248 	struct list_head bound_list;
249 
250 	/* Global GTT */
251 	bool is_ggtt:1;
252 
253 	/* Display page table */
254 	bool is_dpt:1;
255 
256 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
257 	bool has_read_only:1;
258 
259 	u8 top;
260 	u8 pd_shift;
261 	u8 scratch_order;
262 
263 	/* Flags used when creating page-table objects for this vm */
264 	unsigned long lmem_pt_obj_flags;
265 
266 	struct drm_i915_gem_object *
267 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
268 
269 	u64 (*pte_encode)(dma_addr_t addr,
270 			  enum i915_cache_level level,
271 			  u32 flags); /* Create a valid PTE */
272 #define PTE_READ_ONLY	BIT(0)
273 #define PTE_LM		BIT(1)
274 
275 	void (*allocate_va_range)(struct i915_address_space *vm,
276 				  struct i915_vm_pt_stash *stash,
277 				  u64 start, u64 length);
278 	void (*clear_range)(struct i915_address_space *vm,
279 			    u64 start, u64 length);
280 	void (*insert_page)(struct i915_address_space *vm,
281 			    dma_addr_t addr,
282 			    u64 offset,
283 			    enum i915_cache_level cache_level,
284 			    u32 flags);
285 	void (*insert_entries)(struct i915_address_space *vm,
286 			       struct i915_vma *vma,
287 			       enum i915_cache_level cache_level,
288 			       u32 flags);
289 	void (*cleanup)(struct i915_address_space *vm);
290 
291 	void (*foreach)(struct i915_address_space *vm,
292 			u64 start, u64 length,
293 			void (*fn)(struct i915_address_space *vm,
294 				   struct i915_page_table *pt,
295 				   void *data),
296 			void *data);
297 
298 	struct i915_vma_ops vma_ops;
299 
300 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
301 	I915_SELFTEST_DECLARE(bool scrub_64K);
302 };
303 
304 /*
305  * The Graphics Translation Table is the way in which GEN hardware translates a
306  * Graphics Virtual Address into a Physical Address. In addition to the normal
307  * collateral associated with any va->pa translations GEN hardware also has a
308  * portion of the GTT which can be mapped by the CPU and remain both coherent
309  * and correct (in cases like swizzling). That region is referred to as GMADR in
310  * the spec.
311  */
312 struct i915_ggtt {
313 	struct i915_address_space vm;
314 
315 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
316 	struct resource gmadr;          /* GMADR resource */
317 	resource_size_t mappable_end;	/* End offset that we can CPU map */
318 
319 	/** "Graphics Stolen Memory" holds the global PTEs */
320 	void __iomem *gsm;
321 	void (*invalidate)(struct i915_ggtt *ggtt);
322 
323 	/** PPGTT used for aliasing the PPGTT with the GTT */
324 	struct i915_ppgtt *alias;
325 
326 	bool do_idle_maps;
327 
328 	int mtrr;
329 
330 	/** Bit 6 swizzling required for X tiling */
331 	u32 bit_6_swizzle_x;
332 	/** Bit 6 swizzling required for Y tiling */
333 	u32 bit_6_swizzle_y;
334 
335 	u32 pin_bias;
336 
337 	unsigned int num_fences;
338 	struct i915_fence_reg *fence_regs;
339 	struct list_head fence_list;
340 
341 	/**
342 	 * List of all objects in gtt_space, currently mmaped by userspace.
343 	 * All objects within this list must also be on bound_list.
344 	 */
345 	struct list_head userfault_list;
346 
347 	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
348 	struct intel_wakeref_auto userfault_wakeref;
349 
350 	struct mutex error_mutex;
351 	struct drm_mm_node error_capture;
352 	struct drm_mm_node uc_fw;
353 };
354 
355 struct i915_ppgtt {
356 	struct i915_address_space vm;
357 
358 	struct i915_page_directory *pd;
359 };
360 
361 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
362 #define i915_is_dpt(vm) ((vm)->is_dpt)
363 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
364 
365 int __must_check
366 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
367 
368 static inline bool
369 i915_vm_is_4lvl(const struct i915_address_space *vm)
370 {
371 	return (vm->total - 1) >> 32;
372 }
373 
374 static inline bool
375 i915_vm_has_scratch_64K(struct i915_address_space *vm)
376 {
377 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
378 }
379 
380 static inline bool
381 i915_vm_has_cache_coloring(struct i915_address_space *vm)
382 {
383 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
384 }
385 
386 static inline struct i915_ggtt *
387 i915_vm_to_ggtt(struct i915_address_space *vm)
388 {
389 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
390 	GEM_BUG_ON(!i915_is_ggtt(vm));
391 	return container_of(vm, struct i915_ggtt, vm);
392 }
393 
394 static inline struct i915_ppgtt *
395 i915_vm_to_ppgtt(struct i915_address_space *vm)
396 {
397 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
398 	GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
399 	return container_of(vm, struct i915_ppgtt, vm);
400 }
401 
402 static inline struct i915_address_space *
403 i915_vm_get(struct i915_address_space *vm)
404 {
405 	kref_get(&vm->ref);
406 	return vm;
407 }
408 
409 /**
410  * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
411  * @vm: The vm whose reservation lock we want to share.
412  *
413  * Return: A pointer to the vm's reservation lock.
414  */
415 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
416 {
417 	kref_get(&vm->resv_ref);
418 	return &vm->_resv;
419 }
420 
421 void i915_vm_release(struct kref *kref);
422 
423 void i915_vm_resv_release(struct kref *kref);
424 
425 static inline void i915_vm_put(struct i915_address_space *vm)
426 {
427 	kref_put(&vm->ref, i915_vm_release);
428 }
429 
430 /**
431  * i915_vm_resv_put - Release a reference on the vm's reservation lock
432  * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
433  */
434 static inline void i915_vm_resv_put(struct i915_address_space *vm)
435 {
436 	kref_put(&vm->resv_ref, i915_vm_resv_release);
437 }
438 
439 static inline struct i915_address_space *
440 i915_vm_open(struct i915_address_space *vm)
441 {
442 	GEM_BUG_ON(!atomic_read(&vm->open));
443 	atomic_inc(&vm->open);
444 	return i915_vm_get(vm);
445 }
446 
447 static inline bool
448 i915_vm_tryopen(struct i915_address_space *vm)
449 {
450 	if (atomic_add_unless(&vm->open, 1, 0))
451 		return i915_vm_get(vm);
452 
453 	return false;
454 }
455 
456 void __i915_vm_close(struct i915_address_space *vm);
457 
458 static inline void
459 i915_vm_close(struct i915_address_space *vm)
460 {
461 	GEM_BUG_ON(!atomic_read(&vm->open));
462 	__i915_vm_close(vm);
463 
464 	i915_vm_put(vm);
465 }
466 
467 void i915_address_space_init(struct i915_address_space *vm, int subclass);
468 void i915_address_space_fini(struct i915_address_space *vm);
469 
470 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
471 {
472 	const u32 mask = NUM_PTE(pde_shift) - 1;
473 
474 	return (address >> PAGE_SHIFT) & mask;
475 }
476 
477 /*
478  * Helper to counts the number of PTEs within the given length. This count
479  * does not cross a page table boundary, so the max value would be
480  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
481  */
482 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
483 {
484 	const u64 mask = ~((1ULL << pde_shift) - 1);
485 	u64 end;
486 
487 	GEM_BUG_ON(length == 0);
488 	GEM_BUG_ON(offset_in_page(addr | length));
489 
490 	end = addr + length;
491 
492 	if ((addr & mask) != (end & mask))
493 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
494 
495 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
496 }
497 
498 static inline u32 i915_pde_index(u64 addr, u32 shift)
499 {
500 	return (addr >> shift) & I915_PDE_MASK;
501 }
502 
503 static inline struct i915_page_table *
504 i915_pt_entry(const struct i915_page_directory * const pd,
505 	      const unsigned short n)
506 {
507 	return pd->entry[n];
508 }
509 
510 static inline struct i915_page_directory *
511 i915_pd_entry(const struct i915_page_directory * const pdp,
512 	      const unsigned short n)
513 {
514 	return pdp->entry[n];
515 }
516 
517 static inline dma_addr_t
518 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
519 {
520 	struct i915_page_table *pt = ppgtt->pd->entry[n];
521 
522 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
523 }
524 
525 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
526 		unsigned long lmem_pt_obj_flags);
527 
528 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
529 int i915_ggtt_init_hw(struct drm_i915_private *i915);
530 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
531 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
532 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
533 int i915_init_ggtt(struct drm_i915_private *i915);
534 void i915_ggtt_driver_release(struct drm_i915_private *i915);
535 void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
536 
537 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
538 {
539 	return ggtt->mappable_end > 0;
540 }
541 
542 int i915_ppgtt_init_hw(struct intel_gt *gt);
543 
544 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
545 				     unsigned long lmem_pt_obj_flags);
546 
547 void i915_ggtt_suspend(struct i915_ggtt *gtt);
548 void i915_ggtt_resume(struct i915_ggtt *ggtt);
549 
550 void
551 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
552 
553 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
554 #define fill32_px(px, v) do {						\
555 	u64 v__ = lower_32_bits(v);					\
556 	fill_px((px), v__ << 32 | v__);					\
557 } while (0)
558 
559 int setup_scratch_page(struct i915_address_space *vm);
560 void free_scratch(struct i915_address_space *vm);
561 
562 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
563 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
564 struct i915_page_table *alloc_pt(struct i915_address_space *vm);
565 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
566 struct i915_page_directory *__alloc_pd(int npde);
567 
568 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
569 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
570 
571 void free_px(struct i915_address_space *vm,
572 	     struct i915_page_table *pt, int lvl);
573 #define free_pt(vm, px) free_px(vm, px, 0)
574 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
575 
576 void
577 __set_pd_entry(struct i915_page_directory * const pd,
578 	       const unsigned short idx,
579 	       struct i915_page_table *pt,
580 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
581 
582 #define set_pd_entry(pd, idx, to) \
583 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
584 
585 void
586 clear_pd_entry(struct i915_page_directory * const pd,
587 	       const unsigned short idx,
588 	       const struct drm_i915_gem_object * const scratch);
589 
590 bool
591 release_pd_entry(struct i915_page_directory * const pd,
592 		 const unsigned short idx,
593 		 struct i915_page_table * const pt,
594 		 const struct drm_i915_gem_object * const scratch);
595 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
596 
597 int ggtt_set_pages(struct i915_vma *vma);
598 int ppgtt_set_pages(struct i915_vma *vma);
599 void clear_pages(struct i915_vma *vma);
600 
601 void ppgtt_bind_vma(struct i915_address_space *vm,
602 		    struct i915_vm_pt_stash *stash,
603 		    struct i915_vma *vma,
604 		    enum i915_cache_level cache_level,
605 		    u32 flags);
606 void ppgtt_unbind_vma(struct i915_address_space *vm,
607 		      struct i915_vma *vma);
608 
609 void gtt_write_workarounds(struct intel_gt *gt);
610 
611 void setup_private_pat(struct intel_uncore *uncore);
612 
613 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
614 			   struct i915_vm_pt_stash *stash,
615 			   u64 size);
616 int i915_vm_map_pt_stash(struct i915_address_space *vm,
617 			 struct i915_vm_pt_stash *stash);
618 void i915_vm_free_pt_stash(struct i915_address_space *vm,
619 			   struct i915_vm_pt_stash *stash);
620 
621 struct i915_vma *
622 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
623 
624 struct i915_vma *
625 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
626 
627 static inline struct sgt_dma {
628 	struct scatterlist *sg;
629 	dma_addr_t dma, max;
630 } sgt_dma(struct i915_vma *vma) {
631 	struct scatterlist *sg = vma->pages->sgl;
632 	dma_addr_t addr = sg_dma_address(sg);
633 
634 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
635 }
636 
637 #endif
638