xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision 501f94d0)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2020 Intel Corporation
4  *
5  * Please try to maintain the following order within this file unless it makes
6  * sense to do otherwise. From top to bottom:
7  * 1. typedefs
8  * 2. #defines, and macros
9  * 3. structure definitions
10  * 4. function prototypes
11  *
12  * Within each section, please try to order by generation in ascending order,
13  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14  */
15 
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18 
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25 
26 #include <drm/drm_mm.h>
27 
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_resource.h"
31 #include "i915_vma_types.h"
32 #include "i915_params.h"
33 #include "intel_memory_region.h"
34 
35 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
36 
37 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
38 #define DBG(...) trace_printk(__VA_ARGS__)
39 #else
40 #define DBG(...)
41 #endif
42 
43 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
44 
45 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
46 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
47 #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
48 
49 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
50 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
51 
52 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
53 
54 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
55 
56 #define I915_FENCE_REG_NONE -1
57 #define I915_MAX_NUM_FENCES 32
58 /* 32 fences + sign bit for FENCE_REG_NONE */
59 #define I915_MAX_NUM_FENCE_BITS 6
60 
61 typedef u32 gen6_pte_t;
62 typedef u64 gen8_pte_t;
63 
64 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
65 
66 #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
67 #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
68 #define I915_PDES			512
69 #define I915_PDE_MASK			(I915_PDES - 1)
70 
71 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
72 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
73 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
74 #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
75 #define GEN6_PTE_CACHE_LLC		(2 << 1)
76 #define GEN6_PTE_UNCACHED		(1 << 1)
77 #define GEN6_PTE_VALID			REG_BIT(0)
78 
79 #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
80 #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
81 #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
82 #define GEN6_PDE_SHIFT			22
83 #define GEN6_PDE_VALID			REG_BIT(0)
84 #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
85 
86 #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
87 
88 #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
89 #define BYT_PTE_WRITEABLE		REG_BIT(1)
90 
91 #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
92 
93 #define GEN12_GGTT_PTE_LM	BIT_ULL(1)
94 
95 #define GEN12_PDE_64K BIT(6)
96 
97 /*
98  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
99  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
100  */
101 #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
102 					 (((bits) & 0x8) << (11 - 3)))
103 #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
104 #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
105 #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
106 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
107 #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
108 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
109 #define HSW_PTE_UNCACHED		(0)
110 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
111 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
112 
113 /*
114  * GEN8 32b style address is defined as a 3 level page table:
115  * 31:30 | 29:21 | 20:12 |  11:0
116  * PDPE  |  PDE  |  PTE  | offset
117  * The difference as compared to normal x86 3 level page table is the PDPEs are
118  * programmed via register.
119  *
120  * GEN8 48b style address is defined as a 4 level page table:
121  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
122  * PML4E | PDPE  |  PDE  |  PTE  | offset
123  */
124 #define GEN8_3LVL_PDPES			4
125 
126 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
127 #define PPAT_CACHED_PDE			0 /* WB LLC */
128 #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
129 #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
130 
131 #define CHV_PPAT_SNOOP			REG_BIT(6)
132 #define GEN8_PPAT_AGE(x)		((x)<<4)
133 #define GEN8_PPAT_LLCeLLC		(3<<2)
134 #define GEN8_PPAT_LLCELLC		(2<<2)
135 #define GEN8_PPAT_LLC			(1<<2)
136 #define GEN8_PPAT_WB			(3<<0)
137 #define GEN8_PPAT_WT			(2<<0)
138 #define GEN8_PPAT_WC			(1<<0)
139 #define GEN8_PPAT_UC			(0<<0)
140 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
141 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
142 
143 #define GEN8_PAGE_PRESENT		BIT_ULL(0)
144 #define GEN8_PAGE_RW			BIT_ULL(1)
145 
146 #define GEN8_PDE_IPS_64K BIT(11)
147 #define GEN8_PDE_PS_2M   BIT(7)
148 
149 enum i915_cache_level;
150 
151 struct drm_i915_gem_object;
152 struct i915_fence_reg;
153 struct i915_vma;
154 struct intel_gt;
155 
156 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
157 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
158 
159 struct i915_page_table {
160 	struct drm_i915_gem_object *base;
161 	union {
162 		atomic_t used;
163 		struct i915_page_table *stash;
164 	};
165 	bool is_compact;
166 };
167 
168 struct i915_page_directory {
169 	struct i915_page_table pt;
170 	spinlock_t lock;
171 	void **entry;
172 };
173 
174 #define __px_choose_expr(x, type, expr, other) \
175 	__builtin_choose_expr( \
176 	__builtin_types_compatible_p(typeof(x), type) || \
177 	__builtin_types_compatible_p(typeof(x), const type), \
178 	({ type __x = (type)(x); expr; }), \
179 	other)
180 
181 #define px_base(px) \
182 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
183 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
184 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
185 	(void)0)))
186 
187 struct page *__px_page(struct drm_i915_gem_object *p);
188 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
189 #define px_dma(px) (__px_dma(px_base(px)))
190 
191 void *__px_vaddr(struct drm_i915_gem_object *p);
192 #define px_vaddr(px) (__px_vaddr(px_base(px)))
193 
194 #define px_pt(px) \
195 	__px_choose_expr(px, struct i915_page_table *, __x, \
196 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
197 	(void)0))
198 #define px_used(px) (&px_pt(px)->used)
199 
200 struct i915_vm_pt_stash {
201 	/* preallocated chains of page tables/directories */
202 	struct i915_page_table *pt[2];
203 	/*
204 	 * Optionally override the alignment/size of the physical page that
205 	 * contains each PT. If not set defaults back to the usual
206 	 * I915_GTT_PAGE_SIZE_4K. This does not influence the other paging
207 	 * structures. MUST be a power-of-two. ONLY applicable on discrete
208 	 * platforms.
209 	 */
210 	int pt_sz;
211 };
212 
213 struct i915_vma_ops {
214 	/* Map an object into an address space with the given cache flags. */
215 	void (*bind_vma)(struct i915_address_space *vm,
216 			 struct i915_vm_pt_stash *stash,
217 			 struct i915_vma_resource *vma_res,
218 			 enum i915_cache_level cache_level,
219 			 u32 flags);
220 	/*
221 	 * Unmap an object from an address space. This usually consists of
222 	 * setting the valid PTE entries to a reserved scratch page.
223 	 */
224 	void (*unbind_vma)(struct i915_address_space *vm,
225 			   struct i915_vma_resource *vma_res);
226 
227 };
228 
229 struct i915_address_space {
230 	struct kref ref;
231 	struct work_struct release_work;
232 
233 	struct drm_mm mm;
234 	struct intel_gt *gt;
235 	struct drm_i915_private *i915;
236 	struct device *dma;
237 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
238 	u64 reserved;		/* size addr space reserved */
239 	u64 min_alignment[INTEL_MEMORY_STOLEN_LOCAL + 1];
240 
241 	unsigned int bind_async_flags;
242 
243 	/*
244 	 * Each active user context has its own address space (in full-ppgtt).
245 	 * Since the vm may be shared between multiple contexts, we count how
246 	 * many contexts keep us "open". Once open hits zero, we are closed
247 	 * and do not allow any new attachments, and proceed to shutdown our
248 	 * vma and page directories.
249 	 */
250 	atomic_t open;
251 
252 	struct mutex mutex; /* protects vma and our lists */
253 
254 	struct kref resv_ref; /* kref to keep the reservation lock alive. */
255 	struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */
256 #define VM_CLASS_GGTT 0
257 #define VM_CLASS_PPGTT 1
258 #define VM_CLASS_DPT 2
259 
260 	struct drm_i915_gem_object *scratch[4];
261 	/**
262 	 * List of vma currently bound.
263 	 */
264 	struct list_head bound_list;
265 
266 	/* Global GTT */
267 	bool is_ggtt:1;
268 
269 	/* Display page table */
270 	bool is_dpt:1;
271 
272 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
273 	bool has_read_only:1;
274 
275 	u8 top;
276 	u8 pd_shift;
277 	u8 scratch_order;
278 
279 	/* Flags used when creating page-table objects for this vm */
280 	unsigned long lmem_pt_obj_flags;
281 
282 	/* Interval tree for pending unbind vma resources */
283 	struct rb_root_cached pending_unbind;
284 
285 	struct drm_i915_gem_object *
286 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
287 	struct drm_i915_gem_object *
288 		(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
289 
290 	u64 (*pte_encode)(dma_addr_t addr,
291 			  enum i915_cache_level level,
292 			  u32 flags); /* Create a valid PTE */
293 #define PTE_READ_ONLY	BIT(0)
294 #define PTE_LM		BIT(1)
295 
296 	void (*allocate_va_range)(struct i915_address_space *vm,
297 				  struct i915_vm_pt_stash *stash,
298 				  u64 start, u64 length);
299 	void (*clear_range)(struct i915_address_space *vm,
300 			    u64 start, u64 length);
301 	void (*insert_page)(struct i915_address_space *vm,
302 			    dma_addr_t addr,
303 			    u64 offset,
304 			    enum i915_cache_level cache_level,
305 			    u32 flags);
306 	void (*insert_entries)(struct i915_address_space *vm,
307 			       struct i915_vma_resource *vma_res,
308 			       enum i915_cache_level cache_level,
309 			       u32 flags);
310 	void (*cleanup)(struct i915_address_space *vm);
311 
312 	void (*foreach)(struct i915_address_space *vm,
313 			u64 start, u64 length,
314 			void (*fn)(struct i915_address_space *vm,
315 				   struct i915_page_table *pt,
316 				   void *data),
317 			void *data);
318 
319 	struct i915_vma_ops vma_ops;
320 
321 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
322 	I915_SELFTEST_DECLARE(bool scrub_64K);
323 };
324 
325 /*
326  * The Graphics Translation Table is the way in which GEN hardware translates a
327  * Graphics Virtual Address into a Physical Address. In addition to the normal
328  * collateral associated with any va->pa translations GEN hardware also has a
329  * portion of the GTT which can be mapped by the CPU and remain both coherent
330  * and correct (in cases like swizzling). That region is referred to as GMADR in
331  * the spec.
332  */
333 struct i915_ggtt {
334 	struct i915_address_space vm;
335 
336 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
337 	struct resource gmadr;          /* GMADR resource */
338 	resource_size_t mappable_end;	/* End offset that we can CPU map */
339 
340 	/** "Graphics Stolen Memory" holds the global PTEs */
341 	void __iomem *gsm;
342 	void (*invalidate)(struct i915_ggtt *ggtt);
343 
344 	/** PPGTT used for aliasing the PPGTT with the GTT */
345 	struct i915_ppgtt *alias;
346 
347 	bool do_idle_maps;
348 
349 	int mtrr;
350 
351 	/** Bit 6 swizzling required for X tiling */
352 	u32 bit_6_swizzle_x;
353 	/** Bit 6 swizzling required for Y tiling */
354 	u32 bit_6_swizzle_y;
355 
356 	u32 pin_bias;
357 
358 	unsigned int num_fences;
359 	struct i915_fence_reg *fence_regs;
360 	struct list_head fence_list;
361 
362 	/**
363 	 * List of all objects in gtt_space, currently mmaped by userspace.
364 	 * All objects within this list must also be on bound_list.
365 	 */
366 	struct list_head userfault_list;
367 
368 	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
369 	struct intel_wakeref_auto userfault_wakeref;
370 
371 	struct mutex error_mutex;
372 	struct drm_mm_node error_capture;
373 	struct drm_mm_node uc_fw;
374 };
375 
376 struct i915_ppgtt {
377 	struct i915_address_space vm;
378 
379 	struct i915_page_directory *pd;
380 };
381 
382 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
383 #define i915_is_dpt(vm) ((vm)->is_dpt)
384 #define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
385 
386 bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915);
387 
388 int __must_check
389 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
390 
391 static inline bool
392 i915_vm_is_4lvl(const struct i915_address_space *vm)
393 {
394 	return (vm->total - 1) >> 32;
395 }
396 
397 static inline bool
398 i915_vm_has_scratch_64K(struct i915_address_space *vm)
399 {
400 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
401 }
402 
403 static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
404 					enum intel_memory_type type)
405 {
406 	/* avoid INTEL_MEMORY_MOCK overflow */
407 	if ((int)type >= ARRAY_SIZE(vm->min_alignment))
408 		type = INTEL_MEMORY_SYSTEM;
409 
410 	return vm->min_alignment[type];
411 }
412 
413 static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
414 					    struct drm_i915_gem_object  *obj)
415 {
416 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
417 	enum intel_memory_type type = mr ? mr->type : INTEL_MEMORY_SYSTEM;
418 
419 	return i915_vm_min_alignment(vm, type);
420 }
421 
422 static inline bool
423 i915_vm_has_cache_coloring(struct i915_address_space *vm)
424 {
425 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
426 }
427 
428 static inline struct i915_ggtt *
429 i915_vm_to_ggtt(struct i915_address_space *vm)
430 {
431 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
432 	GEM_BUG_ON(!i915_is_ggtt(vm));
433 	return container_of(vm, struct i915_ggtt, vm);
434 }
435 
436 static inline struct i915_ppgtt *
437 i915_vm_to_ppgtt(struct i915_address_space *vm)
438 {
439 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
440 	GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
441 	return container_of(vm, struct i915_ppgtt, vm);
442 }
443 
444 static inline struct i915_address_space *
445 i915_vm_get(struct i915_address_space *vm)
446 {
447 	kref_get(&vm->ref);
448 	return vm;
449 }
450 
451 /**
452  * i915_vm_resv_get - Obtain a reference on the vm's reservation lock
453  * @vm: The vm whose reservation lock we want to share.
454  *
455  * Return: A pointer to the vm's reservation lock.
456  */
457 static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
458 {
459 	kref_get(&vm->resv_ref);
460 	return &vm->_resv;
461 }
462 
463 void i915_vm_release(struct kref *kref);
464 
465 void i915_vm_resv_release(struct kref *kref);
466 
467 static inline void i915_vm_put(struct i915_address_space *vm)
468 {
469 	kref_put(&vm->ref, i915_vm_release);
470 }
471 
472 /**
473  * i915_vm_resv_put - Release a reference on the vm's reservation lock
474  * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
475  */
476 static inline void i915_vm_resv_put(struct i915_address_space *vm)
477 {
478 	kref_put(&vm->resv_ref, i915_vm_resv_release);
479 }
480 
481 static inline struct i915_address_space *
482 i915_vm_open(struct i915_address_space *vm)
483 {
484 	GEM_BUG_ON(!atomic_read(&vm->open));
485 	atomic_inc(&vm->open);
486 	return i915_vm_get(vm);
487 }
488 
489 static inline bool
490 i915_vm_tryopen(struct i915_address_space *vm)
491 {
492 	if (atomic_add_unless(&vm->open, 1, 0))
493 		return i915_vm_get(vm);
494 
495 	return false;
496 }
497 
498 void __i915_vm_close(struct i915_address_space *vm);
499 
500 static inline void
501 i915_vm_close(struct i915_address_space *vm)
502 {
503 	GEM_BUG_ON(!atomic_read(&vm->open));
504 	__i915_vm_close(vm);
505 
506 	i915_vm_put(vm);
507 }
508 
509 void i915_address_space_init(struct i915_address_space *vm, int subclass);
510 void i915_address_space_fini(struct i915_address_space *vm);
511 
512 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
513 {
514 	const u32 mask = NUM_PTE(pde_shift) - 1;
515 
516 	return (address >> PAGE_SHIFT) & mask;
517 }
518 
519 /*
520  * Helper to counts the number of PTEs within the given length. This count
521  * does not cross a page table boundary, so the max value would be
522  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
523  */
524 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
525 {
526 	const u64 mask = ~((1ULL << pde_shift) - 1);
527 	u64 end;
528 
529 	GEM_BUG_ON(length == 0);
530 	GEM_BUG_ON(offset_in_page(addr | length));
531 
532 	end = addr + length;
533 
534 	if ((addr & mask) != (end & mask))
535 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
536 
537 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
538 }
539 
540 static inline u32 i915_pde_index(u64 addr, u32 shift)
541 {
542 	return (addr >> shift) & I915_PDE_MASK;
543 }
544 
545 static inline struct i915_page_table *
546 i915_pt_entry(const struct i915_page_directory * const pd,
547 	      const unsigned short n)
548 {
549 	return pd->entry[n];
550 }
551 
552 static inline struct i915_page_directory *
553 i915_pd_entry(const struct i915_page_directory * const pdp,
554 	      const unsigned short n)
555 {
556 	return pdp->entry[n];
557 }
558 
559 static inline dma_addr_t
560 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
561 {
562 	struct i915_page_table *pt = ppgtt->pd->entry[n];
563 
564 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
565 }
566 
567 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
568 		unsigned long lmem_pt_obj_flags);
569 
570 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
571 int i915_ggtt_init_hw(struct drm_i915_private *i915);
572 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
573 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
574 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
575 int i915_init_ggtt(struct drm_i915_private *i915);
576 void i915_ggtt_driver_release(struct drm_i915_private *i915);
577 void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
578 
579 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
580 {
581 	return ggtt->mappable_end > 0;
582 }
583 
584 int i915_ppgtt_init_hw(struct intel_gt *gt);
585 
586 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
587 				     unsigned long lmem_pt_obj_flags);
588 
589 void i915_ggtt_suspend_vm(struct i915_address_space *vm);
590 bool i915_ggtt_resume_vm(struct i915_address_space *vm);
591 void i915_ggtt_suspend(struct i915_ggtt *gtt);
592 void i915_ggtt_resume(struct i915_ggtt *ggtt);
593 
594 void
595 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
596 
597 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
598 #define fill32_px(px, v) do {						\
599 	u64 v__ = lower_32_bits(v);					\
600 	fill_px((px), v__ << 32 | v__);					\
601 } while (0)
602 
603 int setup_scratch_page(struct i915_address_space *vm);
604 void free_scratch(struct i915_address_space *vm);
605 
606 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
607 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
608 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
609 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
610 struct i915_page_directory *__alloc_pd(int npde);
611 
612 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
613 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
614 
615 void free_px(struct i915_address_space *vm,
616 	     struct i915_page_table *pt, int lvl);
617 #define free_pt(vm, px) free_px(vm, px, 0)
618 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
619 
620 void
621 __set_pd_entry(struct i915_page_directory * const pd,
622 	       const unsigned short idx,
623 	       struct i915_page_table *pt,
624 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
625 
626 #define set_pd_entry(pd, idx, to) \
627 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
628 
629 void
630 clear_pd_entry(struct i915_page_directory * const pd,
631 	       const unsigned short idx,
632 	       const struct drm_i915_gem_object * const scratch);
633 
634 bool
635 release_pd_entry(struct i915_page_directory * const pd,
636 		 const unsigned short idx,
637 		 struct i915_page_table * const pt,
638 		 const struct drm_i915_gem_object * const scratch);
639 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
640 
641 void ppgtt_bind_vma(struct i915_address_space *vm,
642 		    struct i915_vm_pt_stash *stash,
643 		    struct i915_vma_resource *vma_res,
644 		    enum i915_cache_level cache_level,
645 		    u32 flags);
646 void ppgtt_unbind_vma(struct i915_address_space *vm,
647 		      struct i915_vma_resource *vma_res);
648 
649 void gtt_write_workarounds(struct intel_gt *gt);
650 
651 void setup_private_pat(struct intel_uncore *uncore);
652 
653 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
654 			   struct i915_vm_pt_stash *stash,
655 			   u64 size);
656 int i915_vm_map_pt_stash(struct i915_address_space *vm,
657 			 struct i915_vm_pt_stash *stash);
658 void i915_vm_free_pt_stash(struct i915_address_space *vm,
659 			   struct i915_vm_pt_stash *stash);
660 
661 struct i915_vma *
662 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
663 
664 struct i915_vma *
665 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
666 
667 static inline struct sgt_dma {
668 	struct scatterlist *sg;
669 	dma_addr_t dma, max;
670 } sgt_dma(struct i915_vma_resource *vma_res) {
671 	struct scatterlist *sg = vma_res->bi.pages->sgl;
672 	dma_addr_t addr = sg_dma_address(sg);
673 
674 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
675 }
676 
677 #endif
678