xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gtt.h (revision ae88357c)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2020 Intel Corporation
4  *
5  * Please try to maintain the following order within this file unless it makes
6  * sense to do otherwise. From top to bottom:
7  * 1. typedefs
8  * 2. #defines, and macros
9  * 3. structure definitions
10  * 4. function prototypes
11  *
12  * Within each section, please try to order by generation in ascending order,
13  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14  */
15 
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18 
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25 
26 #include <drm/drm_mm.h>
27 
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_types.h"
31 
32 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
33 
34 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
35 #define DBG(...) trace_printk(__VA_ARGS__)
36 #else
37 #define DBG(...)
38 #endif
39 
40 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
41 
42 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
43 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
44 #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
45 
46 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
47 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
48 
49 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
50 
51 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
52 
53 #define I915_FENCE_REG_NONE -1
54 #define I915_MAX_NUM_FENCES 32
55 /* 32 fences + sign bit for FENCE_REG_NONE */
56 #define I915_MAX_NUM_FENCE_BITS 6
57 
58 typedef u32 gen6_pte_t;
59 typedef u64 gen8_pte_t;
60 
61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
62 
63 #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
64 #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
65 #define I915_PDES			512
66 #define I915_PDE_MASK			(I915_PDES - 1)
67 
68 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
69 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
70 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
71 #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
72 #define GEN6_PTE_CACHE_LLC		(2 << 1)
73 #define GEN6_PTE_UNCACHED		(1 << 1)
74 #define GEN6_PTE_VALID			REG_BIT(0)
75 
76 #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
77 #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
78 #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
79 #define GEN6_PDE_SHIFT			22
80 #define GEN6_PDE_VALID			REG_BIT(0)
81 #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
82 
83 #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
84 
85 #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
86 #define BYT_PTE_WRITEABLE		REG_BIT(1)
87 
88 #define GEN12_PPGTT_PTE_LM	BIT_ULL(11)
89 
90 #define GEN12_GGTT_PTE_LM	BIT_ULL(1)
91 
92 /*
93  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
94  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
95  */
96 #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
97 					 (((bits) & 0x8) << (11 - 3)))
98 #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
99 #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
100 #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
101 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
102 #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
103 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
104 #define HSW_PTE_UNCACHED		(0)
105 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
106 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
107 
108 /*
109  * GEN8 32b style address is defined as a 3 level page table:
110  * 31:30 | 29:21 | 20:12 |  11:0
111  * PDPE  |  PDE  |  PTE  | offset
112  * The difference as compared to normal x86 3 level page table is the PDPEs are
113  * programmed via register.
114  *
115  * GEN8 48b style address is defined as a 4 level page table:
116  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
117  * PML4E | PDPE  |  PDE  |  PTE  | offset
118  */
119 #define GEN8_3LVL_PDPES			4
120 
121 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
122 #define PPAT_CACHED_PDE			0 /* WB LLC */
123 #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
124 #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
125 
126 #define CHV_PPAT_SNOOP			REG_BIT(6)
127 #define GEN8_PPAT_AGE(x)		((x)<<4)
128 #define GEN8_PPAT_LLCeLLC		(3<<2)
129 #define GEN8_PPAT_LLCELLC		(2<<2)
130 #define GEN8_PPAT_LLC			(1<<2)
131 #define GEN8_PPAT_WB			(3<<0)
132 #define GEN8_PPAT_WT			(2<<0)
133 #define GEN8_PPAT_WC			(1<<0)
134 #define GEN8_PPAT_UC			(0<<0)
135 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
136 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
137 
138 #define GEN8_PDE_IPS_64K BIT(11)
139 #define GEN8_PDE_PS_2M   BIT(7)
140 
141 enum i915_cache_level;
142 
143 struct drm_i915_file_private;
144 struct drm_i915_gem_object;
145 struct i915_fence_reg;
146 struct i915_vma;
147 struct intel_gt;
148 
149 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
150 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
151 
152 struct i915_page_table {
153 	struct drm_i915_gem_object *base;
154 	union {
155 		atomic_t used;
156 		struct i915_page_table *stash;
157 	};
158 };
159 
160 struct i915_page_directory {
161 	struct i915_page_table pt;
162 	spinlock_t lock;
163 	void **entry;
164 };
165 
166 #define __px_choose_expr(x, type, expr, other) \
167 	__builtin_choose_expr( \
168 	__builtin_types_compatible_p(typeof(x), type) || \
169 	__builtin_types_compatible_p(typeof(x), const type), \
170 	({ type __x = (type)(x); expr; }), \
171 	other)
172 
173 #define px_base(px) \
174 	__px_choose_expr(px, struct drm_i915_gem_object *, __x, \
175 	__px_choose_expr(px, struct i915_page_table *, __x->base, \
176 	__px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
177 	(void)0)))
178 
179 struct page *__px_page(struct drm_i915_gem_object *p);
180 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
181 #define px_dma(px) (__px_dma(px_base(px)))
182 
183 void *__px_vaddr(struct drm_i915_gem_object *p);
184 #define px_vaddr(px) (__px_vaddr(px_base(px)))
185 
186 #define px_pt(px) \
187 	__px_choose_expr(px, struct i915_page_table *, __x, \
188 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
189 	(void)0))
190 #define px_used(px) (&px_pt(px)->used)
191 
192 struct i915_vm_pt_stash {
193 	/* preallocated chains of page tables/directories */
194 	struct i915_page_table *pt[2];
195 };
196 
197 struct i915_vma_ops {
198 	/* Map an object into an address space with the given cache flags. */
199 	void (*bind_vma)(struct i915_address_space *vm,
200 			 struct i915_vm_pt_stash *stash,
201 			 struct i915_vma *vma,
202 			 enum i915_cache_level cache_level,
203 			 u32 flags);
204 	/*
205 	 * Unmap an object from an address space. This usually consists of
206 	 * setting the valid PTE entries to a reserved scratch page.
207 	 */
208 	void (*unbind_vma)(struct i915_address_space *vm,
209 			   struct i915_vma *vma);
210 
211 	int (*set_pages)(struct i915_vma *vma);
212 	void (*clear_pages)(struct i915_vma *vma);
213 };
214 
215 struct i915_address_space {
216 	struct kref ref;
217 	struct rcu_work rcu;
218 
219 	struct drm_mm mm;
220 	struct intel_gt *gt;
221 	struct drm_i915_private *i915;
222 	struct device *dma;
223 	/*
224 	 * Every address space belongs to a struct file - except for the global
225 	 * GTT that is owned by the driver (and so @file is set to NULL). In
226 	 * principle, no information should leak from one context to another
227 	 * (or between files/processes etc) unless explicitly shared by the
228 	 * owner. Tracking the owner is important in order to free up per-file
229 	 * objects along with the file, to aide resource tracking, and to
230 	 * assign blame.
231 	 */
232 	struct drm_i915_file_private *file;
233 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
234 	u64 reserved;		/* size addr space reserved */
235 
236 	unsigned int bind_async_flags;
237 
238 	/*
239 	 * Each active user context has its own address space (in full-ppgtt).
240 	 * Since the vm may be shared between multiple contexts, we count how
241 	 * many contexts keep us "open". Once open hits zero, we are closed
242 	 * and do not allow any new attachments, and proceed to shutdown our
243 	 * vma and page directories.
244 	 */
245 	atomic_t open;
246 
247 	struct mutex mutex; /* protects vma and our lists */
248 	struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
249 #define VM_CLASS_GGTT 0
250 #define VM_CLASS_PPGTT 1
251 #define VM_CLASS_DPT 2
252 
253 	struct drm_i915_gem_object *scratch[4];
254 	/**
255 	 * List of vma currently bound.
256 	 */
257 	struct list_head bound_list;
258 
259 	/* Global GTT */
260 	bool is_ggtt:1;
261 
262 	/* Display page table */
263 	bool is_dpt:1;
264 
265 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
266 	bool has_read_only:1;
267 
268 	u8 top;
269 	u8 pd_shift;
270 	u8 scratch_order;
271 
272 	struct drm_i915_gem_object *
273 		(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
274 
275 	u64 (*pte_encode)(dma_addr_t addr,
276 			  enum i915_cache_level level,
277 			  u32 flags); /* Create a valid PTE */
278 #define PTE_READ_ONLY	BIT(0)
279 #define PTE_LM		BIT(1)
280 
281 	void (*allocate_va_range)(struct i915_address_space *vm,
282 				  struct i915_vm_pt_stash *stash,
283 				  u64 start, u64 length);
284 	void (*clear_range)(struct i915_address_space *vm,
285 			    u64 start, u64 length);
286 	void (*insert_page)(struct i915_address_space *vm,
287 			    dma_addr_t addr,
288 			    u64 offset,
289 			    enum i915_cache_level cache_level,
290 			    u32 flags);
291 	void (*insert_entries)(struct i915_address_space *vm,
292 			       struct i915_vma *vma,
293 			       enum i915_cache_level cache_level,
294 			       u32 flags);
295 	void (*cleanup)(struct i915_address_space *vm);
296 
297 	struct i915_vma_ops vma_ops;
298 
299 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
300 	I915_SELFTEST_DECLARE(bool scrub_64K);
301 };
302 
303 /*
304  * The Graphics Translation Table is the way in which GEN hardware translates a
305  * Graphics Virtual Address into a Physical Address. In addition to the normal
306  * collateral associated with any va->pa translations GEN hardware also has a
307  * portion of the GTT which can be mapped by the CPU and remain both coherent
308  * and correct (in cases like swizzling). That region is referred to as GMADR in
309  * the spec.
310  */
311 struct i915_ggtt {
312 	struct i915_address_space vm;
313 
314 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
315 	struct resource gmadr;          /* GMADR resource */
316 	resource_size_t mappable_end;	/* End offset that we can CPU map */
317 
318 	/** "Graphics Stolen Memory" holds the global PTEs */
319 	void __iomem *gsm;
320 	void (*invalidate)(struct i915_ggtt *ggtt);
321 
322 	/** PPGTT used for aliasing the PPGTT with the GTT */
323 	struct i915_ppgtt *alias;
324 
325 	bool do_idle_maps;
326 
327 	int mtrr;
328 
329 	/** Bit 6 swizzling required for X tiling */
330 	u32 bit_6_swizzle_x;
331 	/** Bit 6 swizzling required for Y tiling */
332 	u32 bit_6_swizzle_y;
333 
334 	u32 pin_bias;
335 
336 	unsigned int num_fences;
337 	struct i915_fence_reg *fence_regs;
338 	struct list_head fence_list;
339 
340 	/**
341 	 * List of all objects in gtt_space, currently mmaped by userspace.
342 	 * All objects within this list must also be on bound_list.
343 	 */
344 	struct list_head userfault_list;
345 
346 	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
347 	struct intel_wakeref_auto userfault_wakeref;
348 
349 	struct mutex error_mutex;
350 	struct drm_mm_node error_capture;
351 	struct drm_mm_node uc_fw;
352 };
353 
354 struct i915_ppgtt {
355 	struct i915_address_space vm;
356 
357 	struct i915_page_directory *pd;
358 };
359 
360 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
361 #define i915_is_dpt(vm) ((vm)->is_dpt)
362 
363 int __must_check
364 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
365 
366 static inline bool
367 i915_vm_is_4lvl(const struct i915_address_space *vm)
368 {
369 	return (vm->total - 1) >> 32;
370 }
371 
372 static inline bool
373 i915_vm_has_scratch_64K(struct i915_address_space *vm)
374 {
375 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
376 }
377 
378 static inline bool
379 i915_vm_has_cache_coloring(struct i915_address_space *vm)
380 {
381 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
382 }
383 
384 static inline struct i915_ggtt *
385 i915_vm_to_ggtt(struct i915_address_space *vm)
386 {
387 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
388 	GEM_BUG_ON(!i915_is_ggtt(vm));
389 	return container_of(vm, struct i915_ggtt, vm);
390 }
391 
392 static inline struct i915_ppgtt *
393 i915_vm_to_ppgtt(struct i915_address_space *vm)
394 {
395 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
396 	GEM_BUG_ON(i915_is_ggtt(vm));
397 	return container_of(vm, struct i915_ppgtt, vm);
398 }
399 
400 static inline struct i915_address_space *
401 i915_vm_get(struct i915_address_space *vm)
402 {
403 	kref_get(&vm->ref);
404 	return vm;
405 }
406 
407 void i915_vm_release(struct kref *kref);
408 
409 static inline void i915_vm_put(struct i915_address_space *vm)
410 {
411 	kref_put(&vm->ref, i915_vm_release);
412 }
413 
414 static inline struct i915_address_space *
415 i915_vm_open(struct i915_address_space *vm)
416 {
417 	GEM_BUG_ON(!atomic_read(&vm->open));
418 	atomic_inc(&vm->open);
419 	return i915_vm_get(vm);
420 }
421 
422 static inline bool
423 i915_vm_tryopen(struct i915_address_space *vm)
424 {
425 	if (atomic_add_unless(&vm->open, 1, 0))
426 		return i915_vm_get(vm);
427 
428 	return false;
429 }
430 
431 void __i915_vm_close(struct i915_address_space *vm);
432 
433 static inline void
434 i915_vm_close(struct i915_address_space *vm)
435 {
436 	GEM_BUG_ON(!atomic_read(&vm->open));
437 	__i915_vm_close(vm);
438 
439 	i915_vm_put(vm);
440 }
441 
442 void i915_address_space_init(struct i915_address_space *vm, int subclass);
443 void i915_address_space_fini(struct i915_address_space *vm);
444 
445 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
446 {
447 	const u32 mask = NUM_PTE(pde_shift) - 1;
448 
449 	return (address >> PAGE_SHIFT) & mask;
450 }
451 
452 /*
453  * Helper to counts the number of PTEs within the given length. This count
454  * does not cross a page table boundary, so the max value would be
455  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
456  */
457 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
458 {
459 	const u64 mask = ~((1ULL << pde_shift) - 1);
460 	u64 end;
461 
462 	GEM_BUG_ON(length == 0);
463 	GEM_BUG_ON(offset_in_page(addr | length));
464 
465 	end = addr + length;
466 
467 	if ((addr & mask) != (end & mask))
468 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
469 
470 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
471 }
472 
473 static inline u32 i915_pde_index(u64 addr, u32 shift)
474 {
475 	return (addr >> shift) & I915_PDE_MASK;
476 }
477 
478 static inline struct i915_page_table *
479 i915_pt_entry(const struct i915_page_directory * const pd,
480 	      const unsigned short n)
481 {
482 	return pd->entry[n];
483 }
484 
485 static inline struct i915_page_directory *
486 i915_pd_entry(const struct i915_page_directory * const pdp,
487 	      const unsigned short n)
488 {
489 	return pdp->entry[n];
490 }
491 
492 static inline dma_addr_t
493 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
494 {
495 	struct i915_page_table *pt = ppgtt->pd->entry[n];
496 
497 	return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
498 }
499 
500 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
501 
502 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
503 int i915_ggtt_init_hw(struct drm_i915_private *i915);
504 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
505 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
506 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
507 int i915_init_ggtt(struct drm_i915_private *i915);
508 void i915_ggtt_driver_release(struct drm_i915_private *i915);
509 
510 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
511 {
512 	return ggtt->mappable_end > 0;
513 }
514 
515 int i915_ppgtt_init_hw(struct intel_gt *gt);
516 
517 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
518 
519 void i915_ggtt_suspend(struct i915_ggtt *gtt);
520 void i915_ggtt_resume(struct i915_ggtt *ggtt);
521 
522 void
523 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
524 
525 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
526 #define fill32_px(px, v) do {						\
527 	u64 v__ = lower_32_bits(v);					\
528 	fill_px((px), v__ << 32 | v__);					\
529 } while (0)
530 
531 int setup_scratch_page(struct i915_address_space *vm);
532 void free_scratch(struct i915_address_space *vm);
533 
534 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
535 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
536 struct i915_page_table *alloc_pt(struct i915_address_space *vm);
537 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
538 struct i915_page_directory *__alloc_pd(int npde);
539 
540 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
541 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
542 
543 void free_px(struct i915_address_space *vm,
544 	     struct i915_page_table *pt, int lvl);
545 #define free_pt(vm, px) free_px(vm, px, 0)
546 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
547 
548 void
549 __set_pd_entry(struct i915_page_directory * const pd,
550 	       const unsigned short idx,
551 	       struct i915_page_table *pt,
552 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
553 
554 #define set_pd_entry(pd, idx, to) \
555 	__set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
556 
557 void
558 clear_pd_entry(struct i915_page_directory * const pd,
559 	       const unsigned short idx,
560 	       const struct drm_i915_gem_object * const scratch);
561 
562 bool
563 release_pd_entry(struct i915_page_directory * const pd,
564 		 const unsigned short idx,
565 		 struct i915_page_table * const pt,
566 		 const struct drm_i915_gem_object * const scratch);
567 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
568 
569 int ggtt_set_pages(struct i915_vma *vma);
570 int ppgtt_set_pages(struct i915_vma *vma);
571 void clear_pages(struct i915_vma *vma);
572 
573 void ppgtt_bind_vma(struct i915_address_space *vm,
574 		    struct i915_vm_pt_stash *stash,
575 		    struct i915_vma *vma,
576 		    enum i915_cache_level cache_level,
577 		    u32 flags);
578 void ppgtt_unbind_vma(struct i915_address_space *vm,
579 		      struct i915_vma *vma);
580 
581 void gtt_write_workarounds(struct intel_gt *gt);
582 
583 void setup_private_pat(struct intel_uncore *uncore);
584 
585 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
586 			   struct i915_vm_pt_stash *stash,
587 			   u64 size);
588 int i915_vm_map_pt_stash(struct i915_address_space *vm,
589 			 struct i915_vm_pt_stash *stash);
590 void i915_vm_free_pt_stash(struct i915_address_space *vm,
591 			   struct i915_vm_pt_stash *stash);
592 
593 struct i915_vma *
594 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
595 
596 struct i915_vma *
597 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
598 
599 static inline struct sgt_dma {
600 	struct scatterlist *sg;
601 	dma_addr_t dma, max;
602 } sgt_dma(struct i915_vma *vma) {
603 	struct scatterlist *sg = vma->pages->sgl;
604 	dma_addr_t addr = sg_dma_address(sg);
605 
606 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
607 }
608 
609 #endif
610