1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_TYPES_H__
8 #define __I915_GEM_OBJECT_TYPES_H__
9 
10 #include <linux/mmu_notifier.h>
11 
12 #include <drm/drm_gem.h>
13 #include <drm/ttm/ttm_bo_api.h>
14 #include <uapi/drm/i915_drm.h>
15 
16 #include "i915_active.h"
17 #include "i915_selftest.h"
18 
19 struct drm_i915_gem_object;
20 struct intel_fronbuffer;
21 struct intel_memory_region;
22 
23 /*
24  * struct i915_lut_handle tracks the fast lookups from handle to vma used
25  * for execbuf. Although we use a radixtree for that mapping, in order to
26  * remove them as the object or context is closed, we need a secondary list
27  * and a translation entry (i915_lut_handle).
28  */
29 struct i915_lut_handle {
30 	struct list_head obj_link;
31 	struct i915_gem_context *ctx;
32 	u32 handle;
33 };
34 
35 struct drm_i915_gem_object_ops {
36 	unsigned int flags;
37 #define I915_GEM_OBJECT_IS_SHRINKABLE			BIT(1)
38 /* Skip the shrinker management in set_pages/unset_pages */
39 #define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST	BIT(2)
40 #define I915_GEM_OBJECT_IS_PROXY			BIT(3)
41 #define I915_GEM_OBJECT_NO_MMAP				BIT(4)
42 
43 	/* Interface between the GEM object and its backing storage.
44 	 * get_pages() is called once prior to the use of the associated set
45 	 * of pages before to binding them into the GTT, and put_pages() is
46 	 * called after we no longer need them. As we expect there to be
47 	 * associated cost with migrating pages between the backing storage
48 	 * and making them available for the GPU (e.g. clflush), we may hold
49 	 * onto the pages after they are no longer referenced by the GPU
50 	 * in case they may be used again shortly (for example migrating the
51 	 * pages to a different memory domain within the GTT). put_pages()
52 	 * will therefore most likely be called when the object itself is
53 	 * being released or under memory pressure (where we attempt to
54 	 * reap pages for the shrinker).
55 	 */
56 	int (*get_pages)(struct drm_i915_gem_object *obj);
57 	void (*put_pages)(struct drm_i915_gem_object *obj,
58 			  struct sg_table *pages);
59 	int (*truncate)(struct drm_i915_gem_object *obj);
60 	void (*writeback)(struct drm_i915_gem_object *obj);
61 	int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
62 				      bool no_gpu_wait,
63 				      bool should_writeback);
64 
65 	int (*pread)(struct drm_i915_gem_object *obj,
66 		     const struct drm_i915_gem_pread *arg);
67 	int (*pwrite)(struct drm_i915_gem_object *obj,
68 		      const struct drm_i915_gem_pwrite *arg);
69 	u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
70 	void (*unmap_virtual)(struct drm_i915_gem_object *obj);
71 
72 	int (*dmabuf_export)(struct drm_i915_gem_object *obj);
73 
74 	/**
75 	 * adjust_lru - notify that the madvise value was updated
76 	 * @obj: The gem object
77 	 *
78 	 * The madvise value may have been updated, or object was recently
79 	 * referenced so act accordingly (Perhaps changing an LRU list etc).
80 	 */
81 	void (*adjust_lru)(struct drm_i915_gem_object *obj);
82 
83 	/**
84 	 * delayed_free - Override the default delayed free implementation
85 	 */
86 	void (*delayed_free)(struct drm_i915_gem_object *obj);
87 
88 	/**
89 	 * migrate - Migrate object to a different region either for
90 	 * pinning or for as long as the object lock is held.
91 	 */
92 	int (*migrate)(struct drm_i915_gem_object *obj,
93 		       struct intel_memory_region *mr);
94 
95 	void (*release)(struct drm_i915_gem_object *obj);
96 
97 	const struct vm_operations_struct *mmap_ops;
98 	const char *name; /* friendly name for debug, e.g. lockdep classes */
99 };
100 
101 /**
102  * enum i915_cache_level - The supported GTT caching values for system memory
103  * pages.
104  *
105  * These translate to some special GTT PTE bits when binding pages into some
106  * address space. It also determines whether an object, or rather its pages are
107  * coherent with the GPU, when also reading or writing through the CPU cache
108  * with those pages.
109  *
110  * Userspace can also control this through struct drm_i915_gem_caching.
111  */
112 enum i915_cache_level {
113 	/**
114 	 * @I915_CACHE_NONE:
115 	 *
116 	 * GPU access is not coherent with the CPU cache. If the cache is dirty
117 	 * and we need the underlying pages to be coherent with some later GPU
118 	 * access then we need to manually flush the pages.
119 	 *
120 	 * On shared LLC platforms reads and writes through the CPU cache are
121 	 * still coherent even with this setting. See also
122 	 * &drm_i915_gem_object.cache_coherent for more details. Due to this we
123 	 * should only ever use uncached for scanout surfaces, otherwise we end
124 	 * up over-flushing in some places.
125 	 *
126 	 * This is the default on non-LLC platforms.
127 	 */
128 	I915_CACHE_NONE = 0,
129 	/**
130 	 * @I915_CACHE_LLC:
131 	 *
132 	 * GPU access is coherent with the CPU cache. If the cache is dirty,
133 	 * then the GPU will ensure that access remains coherent, when both
134 	 * reading and writing through the CPU cache. GPU writes can dirty the
135 	 * CPU cache.
136 	 *
137 	 * Not used for scanout surfaces.
138 	 *
139 	 * Applies to both platforms with shared LLC(HAS_LLC), and snooping
140 	 * based platforms(HAS_SNOOP).
141 	 *
142 	 * This is the default on shared LLC platforms.  The only exception is
143 	 * scanout objects, where the display engine is not coherent with the
144 	 * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
145 	 * automatically applied by the kernel in pin_for_display, if userspace
146 	 * has not done so already.
147 	 */
148 	I915_CACHE_LLC,
149 	/**
150 	 * @I915_CACHE_L3_LLC:
151 	 *
152 	 * Explicitly enable the Gfx L3 cache, with coherent LLC.
153 	 *
154 	 * The Gfx L3 sits between the domain specific caches, e.g
155 	 * sampler/render caches, and the larger LLC. LLC is coherent with the
156 	 * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
157 	 * when the workload completes.
158 	 *
159 	 * Not used for scanout surfaces.
160 	 *
161 	 * Only exposed on some gen7 + GGTT. More recent hardware has dropped
162 	 * this explicit setting, where it should now be enabled by default.
163 	 */
164 	I915_CACHE_L3_LLC,
165 	/**
166 	 * @I915_CACHE_WT:
167 	 *
168 	 * Write-through. Used for scanout surfaces.
169 	 *
170 	 * The GPU can utilise the caches, while still having the display engine
171 	 * be coherent with GPU writes, as a result we don't need to flush the
172 	 * CPU caches when moving out of the render domain. This is the default
173 	 * setting chosen by the kernel, if supported by the HW, otherwise we
174 	 * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
175 	 * cache still need to be flushed, to remain coherent with the display
176 	 * engine.
177 	 */
178 	I915_CACHE_WT,
179 };
180 
181 enum i915_map_type {
182 	I915_MAP_WB = 0,
183 	I915_MAP_WC,
184 #define I915_MAP_OVERRIDE BIT(31)
185 	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
186 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
187 };
188 
189 enum i915_mmap_type {
190 	I915_MMAP_TYPE_GTT = 0,
191 	I915_MMAP_TYPE_WC,
192 	I915_MMAP_TYPE_WB,
193 	I915_MMAP_TYPE_UC,
194 	I915_MMAP_TYPE_FIXED,
195 };
196 
197 struct i915_mmap_offset {
198 	struct drm_vma_offset_node vma_node;
199 	struct drm_i915_gem_object *obj;
200 	enum i915_mmap_type mmap_type;
201 
202 	struct rb_node offset;
203 };
204 
205 struct i915_gem_object_page_iter {
206 	struct scatterlist *sg_pos;
207 	unsigned int sg_idx; /* in pages, but 32bit eek! */
208 
209 	struct radix_tree_root radix;
210 	struct mutex lock; /* protects this cache */
211 };
212 
213 struct drm_i915_gem_object {
214 	/*
215 	 * We might have reason to revisit the below since it wastes
216 	 * a lot of space for non-ttm gem objects.
217 	 * In any case, always use the accessors for the ttm_buffer_object
218 	 * when accessing it.
219 	 */
220 	union {
221 		struct drm_gem_object base;
222 		struct ttm_buffer_object __do_not_access;
223 	};
224 
225 	const struct drm_i915_gem_object_ops *ops;
226 
227 	struct {
228 		/**
229 		 * @vma.lock: protect the list/tree of vmas
230 		 */
231 		spinlock_t lock;
232 
233 		/**
234 		 * @vma.list: List of VMAs backed by this object
235 		 *
236 		 * The VMA on this list are ordered by type, all GGTT vma are
237 		 * placed at the head and all ppGTT vma are placed at the tail.
238 		 * The different types of GGTT vma are unordered between
239 		 * themselves, use the @vma.tree (which has a defined order
240 		 * between all VMA) to quickly find an exact match.
241 		 */
242 		struct list_head list;
243 
244 		/**
245 		 * @vma.tree: Ordered tree of VMAs backed by this object
246 		 *
247 		 * All VMA created for this object are placed in the @vma.tree
248 		 * for fast retrieval via a binary search in
249 		 * i915_vma_instance(). They are also added to @vma.list for
250 		 * easy iteration.
251 		 */
252 		struct rb_root tree;
253 	} vma;
254 
255 	/**
256 	 * @lut_list: List of vma lookup entries in use for this object.
257 	 *
258 	 * If this object is closed, we need to remove all of its VMA from
259 	 * the fast lookup index in associated contexts; @lut_list provides
260 	 * this translation from object to context->handles_vma.
261 	 */
262 	struct list_head lut_list;
263 	spinlock_t lut_lock; /* guards lut_list */
264 
265 	/**
266 	 * @obj_link: Link into @i915_gem_ww_ctx.obj_list
267 	 *
268 	 * When we lock this object through i915_gem_object_lock() with a
269 	 * context, we add it to the list to ensure we can unlock everything
270 	 * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
271 	 */
272 	struct list_head obj_link;
273 	/**
274 	 * @shared_resv_from: The object shares the resv from this vm.
275 	 */
276 	struct i915_address_space *shares_resv_from;
277 
278 	union {
279 		struct rcu_head rcu;
280 		struct llist_node freed;
281 	};
282 
283 	/**
284 	 * Whether the object is currently in the GGTT mmap.
285 	 */
286 	unsigned int userfault_count;
287 	struct list_head userfault_link;
288 
289 	struct {
290 		spinlock_t lock; /* Protects access to mmo offsets */
291 		struct rb_root offsets;
292 	} mmo;
293 
294 	I915_SELFTEST_DECLARE(struct list_head st_link);
295 
296 	unsigned long flags;
297 #define I915_BO_ALLOC_CONTIGUOUS  BIT(0)
298 #define I915_BO_ALLOC_VOLATILE    BIT(1)
299 #define I915_BO_ALLOC_CPU_CLEAR   BIT(2)
300 #define I915_BO_ALLOC_USER        BIT(3)
301 /* Object is allowed to lose its contents on suspend / resume, even if pinned */
302 #define I915_BO_ALLOC_PM_VOLATILE BIT(4)
303 /* Object needs to be restored early using memcpy during resume */
304 #define I915_BO_ALLOC_PM_EARLY    BIT(5)
305 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
306 			     I915_BO_ALLOC_VOLATILE | \
307 			     I915_BO_ALLOC_CPU_CLEAR | \
308 			     I915_BO_ALLOC_USER | \
309 			     I915_BO_ALLOC_PM_VOLATILE | \
310 			     I915_BO_ALLOC_PM_EARLY)
311 #define I915_BO_READONLY          BIT(6)
312 #define I915_TILING_QUIRK_BIT     7 /* unknown swizzling; do not release! */
313 #define I915_BO_PROTECTED         BIT(8)
314 	/**
315 	 * @mem_flags - Mutable placement-related flags
316 	 *
317 	 * These are flags that indicate specifics of the memory region
318 	 * the object is currently in. As such they are only stable
319 	 * either under the object lock or if the object is pinned.
320 	 */
321 	unsigned int mem_flags;
322 #define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
323 #define I915_BO_FLAG_IOMEM       BIT(1) /* Object backed by IO memory */
324 	/**
325 	 * @cache_level: The desired GTT caching level.
326 	 *
327 	 * See enum i915_cache_level for possible values, along with what
328 	 * each does.
329 	 */
330 	unsigned int cache_level:3;
331 	/**
332 	 * @cache_coherent:
333 	 *
334 	 * Track whether the pages are coherent with the GPU if reading or
335 	 * writing through the CPU caches. The largely depends on the
336 	 * @cache_level setting.
337 	 *
338 	 * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
339 	 * platforms, coherency must be explicitly requested with some special
340 	 * GTT caching bits(see enum i915_cache_level). When enabling coherency
341 	 * it does come at a performance and power cost on such platforms. On
342 	 * the flip side the kernel does not need to manually flush any buffers
343 	 * which need to be coherent with the GPU, if the object is not coherent
344 	 * i.e @cache_coherent is zero.
345 	 *
346 	 * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
347 	 * access will automatically snoop the CPU caches(even with CACHE_NONE).
348 	 * The one exception is when dealing with the display engine, like with
349 	 * scanout surfaces. To handle this the kernel will always flush the
350 	 * surface out of the CPU caches when preparing it for scanout.  Also
351 	 * note that since scanout surfaces are only ever read by the display
352 	 * engine we only need to care about flushing any writes through the CPU
353 	 * cache, reads on the other hand will always be coherent.
354 	 *
355 	 * Something strange here is why @cache_coherent is not a simple
356 	 * boolean, i.e coherent vs non-coherent. The reasoning for this is back
357 	 * to the display engine not being fully coherent. As a result scanout
358 	 * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
359 	 * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
360 	 * that this is likely a scanout surface, and will set @cache_coherent
361 	 * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
362 	 * LLC. The kernel uses this to always flush writes through the CPU
363 	 * cache as early as possible, where it can, in effect keeping
364 	 * @cache_dirty clean, so we can potentially avoid stalling when
365 	 * flushing the surface just before doing the scanout.  This does mean
366 	 * we might unnecessarily flush non-scanout objects in some places, but
367 	 * the default assumption is that all normal objects should be using
368 	 * I915_CACHE_LLC, at least on platforms with the shared LLC.
369 	 *
370 	 * Supported values:
371 	 *
372 	 * I915_BO_CACHE_COHERENT_FOR_READ:
373 	 *
374 	 * On shared LLC platforms, we use this for special scanout surfaces,
375 	 * where the display engine is not coherent with the CPU cache. As such
376 	 * we need to ensure we flush any writes before doing the scanout. As an
377 	 * optimisation we try to flush any writes as early as possible to avoid
378 	 * stalling later.
379 	 *
380 	 * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
381 	 * platforms, we use:
382 	 *
383 	 *	cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
384 	 *
385 	 * While for normal objects that are fully coherent, including special
386 	 * scanout surfaces marked as I915_CACHE_WT, we use:
387 	 *
388 	 *	cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
389 	 *			 I915_BO_CACHE_COHERENT_FOR_WRITE
390 	 *
391 	 * And then for objects that are not coherent at all we use:
392 	 *
393 	 *	cache_coherent = 0
394 	 *
395 	 * I915_BO_CACHE_COHERENT_FOR_WRITE:
396 	 *
397 	 * When writing through the CPU cache, the GPU is still coherent. Note
398 	 * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
399 	 */
400 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
401 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
402 	unsigned int cache_coherent:2;
403 
404 	/**
405 	 * @cache_dirty:
406 	 *
407 	 * Track if we are we dirty with writes through the CPU cache for this
408 	 * object. As a result reading directly from main memory might yield
409 	 * stale data.
410 	 *
411 	 * This also ties into whether the kernel is tracking the object as
412 	 * coherent with the GPU, as per @cache_coherent, as it determines if
413 	 * flushing might be needed at various points.
414 	 *
415 	 * Another part of @cache_dirty is managing flushing when first
416 	 * acquiring the pages for system memory, at this point the pages are
417 	 * considered foreign, so the default assumption is that the cache is
418 	 * dirty, for example the page zeroing done by the kernel might leave
419 	 * writes though the CPU cache, or swapping-in, while the actual data in
420 	 * main memory is potentially stale.  Note that this is a potential
421 	 * security issue when dealing with userspace objects and zeroing. Now,
422 	 * whether we actually need apply the big sledgehammer of flushing all
423 	 * the pages on acquire depends on if @cache_coherent is marked as
424 	 * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
425 	 * for both reads and writes though the CPU cache.
426 	 *
427 	 * Note that on shared LLC platforms we still apply the heavy flush for
428 	 * I915_CACHE_NONE objects, under the assumption that this is going to
429 	 * be used for scanout.
430 	 *
431 	 * Update: On some hardware there is now also the 'Bypass LLC' MOCS
432 	 * entry, which defeats our @cache_coherent tracking, since userspace
433 	 * can freely bypass the CPU cache when touching the pages with the GPU,
434 	 * where the kernel is completely unaware. On such platform we need
435 	 * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
436 	 *
437 	 * Special care is taken on non-LLC platforms, to prevent potential
438 	 * information leak. The driver currently ensures:
439 	 *
440 	 *   1. All userspace objects, by default, have @cache_level set as
441 	 *   I915_CACHE_NONE. The only exception is userptr objects, where we
442 	 *   instead force I915_CACHE_LLC, but we also don't allow userspace to
443 	 *   ever change the @cache_level for such objects. Another special case
444 	 *   is dma-buf, which doesn't rely on @cache_dirty,  but there we
445 	 *   always do a forced flush when acquiring the pages, if there is a
446 	 *   chance that the pages can be read directly from main memory with
447 	 *   the GPU.
448 	 *
449 	 *   2. All I915_CACHE_NONE objects have @cache_dirty initially true.
450 	 *
451 	 *   3. All swapped-out objects(i.e shmem) have @cache_dirty set to
452 	 *   true.
453 	 *
454 	 *   4. The @cache_dirty is never freely reset before the initial
455 	 *   flush, even if userspace adjusts the @cache_level through the
456 	 *   i915_gem_set_caching_ioctl.
457 	 *
458 	 *   5. All @cache_dirty objects(including swapped-in) are initially
459 	 *   flushed with a synchronous call to drm_clflush_sg in
460 	 *   __i915_gem_object_set_pages. The @cache_dirty can be freely reset
461 	 *   at this point. All further asynchronous clfushes are never security
462 	 *   critical, i.e userspace is free to race against itself.
463 	 */
464 	unsigned int cache_dirty:1;
465 
466 	/**
467 	 * @read_domains: Read memory domains.
468 	 *
469 	 * These monitor which caches contain read/write data related to the
470 	 * object. When transitioning from one set of domains to another,
471 	 * the driver is called to ensure that caches are suitably flushed and
472 	 * invalidated.
473 	 */
474 	u16 read_domains;
475 
476 	/**
477 	 * @write_domain: Corresponding unique write memory domain.
478 	 */
479 	u16 write_domain;
480 
481 	struct intel_frontbuffer __rcu *frontbuffer;
482 
483 	/** Current tiling stride for the object, if it's tiled. */
484 	unsigned int tiling_and_stride;
485 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
486 #define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
487 #define STRIDE_MASK (~TILING_MASK)
488 
489 	struct {
490 		/*
491 		 * Protects the pages and their use. Do not use directly, but
492 		 * instead go through the pin/unpin interfaces.
493 		 */
494 		atomic_t pages_pin_count;
495 
496 		/**
497 		 * @shrink_pin: Prevents the pages from being made visible to
498 		 * the shrinker, while the shrink_pin is non-zero. Most users
499 		 * should pretty much never have to care about this, outside of
500 		 * some special use cases.
501 		 *
502 		 * By default most objects will start out as visible to the
503 		 * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the
504 		 * backing pages are attached to the object, like in
505 		 * __i915_gem_object_set_pages(). They will then be removed the
506 		 * shrinker list once the pages are released.
507 		 *
508 		 * The @shrink_pin is incremented by calling
509 		 * i915_gem_object_make_unshrinkable(), which will also remove
510 		 * the object from the shrinker list, if the pin count was zero.
511 		 *
512 		 * Callers will then typically call
513 		 * i915_gem_object_make_shrinkable() or
514 		 * i915_gem_object_make_purgeable() to decrement the pin count,
515 		 * and make the pages visible again.
516 		 */
517 		atomic_t shrink_pin;
518 
519 		/**
520 		 * @ttm_shrinkable: True when the object is using shmem pages
521 		 * underneath. Protected by the object lock.
522 		 */
523 		bool ttm_shrinkable;
524 
525 		/**
526 		 * Priority list of potential placements for this object.
527 		 */
528 		struct intel_memory_region **placements;
529 		int n_placements;
530 
531 		/**
532 		 * Memory region for this object.
533 		 */
534 		struct intel_memory_region *region;
535 
536 		/**
537 		 * Memory manager resource allocated for this object. Only
538 		 * needed for the mock region.
539 		 */
540 		struct ttm_resource *res;
541 
542 		/**
543 		 * Element within memory_region->objects or region->purgeable
544 		 * if the object is marked as DONTNEED. Access is protected by
545 		 * region->obj_lock.
546 		 */
547 		struct list_head region_link;
548 
549 		struct i915_refct_sgt *rsgt;
550 		struct sg_table *pages;
551 		void *mapping;
552 
553 		struct i915_page_sizes {
554 			/**
555 			 * The sg mask of the pages sg_table. i.e the mask of
556 			 * of the lengths for each sg entry.
557 			 */
558 			unsigned int phys;
559 
560 			/**
561 			 * The gtt page sizes we are allowed to use given the
562 			 * sg mask and the supported page sizes. This will
563 			 * express the smallest unit we can use for the whole
564 			 * object, as well as the larger sizes we may be able
565 			 * to use opportunistically.
566 			 */
567 			unsigned int sg;
568 
569 			/**
570 			 * The actual gtt page size usage. Since we can have
571 			 * multiple vma associated with this object we need to
572 			 * prevent any trampling of state, hence a copy of this
573 			 * struct also lives in each vma, therefore the gtt
574 			 * value here should only be read/write through the vma.
575 			 */
576 			unsigned int gtt;
577 		} page_sizes;
578 
579 		I915_SELFTEST_DECLARE(unsigned int page_mask);
580 
581 		struct i915_gem_object_page_iter get_page;
582 		struct i915_gem_object_page_iter get_dma_page;
583 
584 		/**
585 		 * Element within i915->mm.shrink_list or i915->mm.purge_list,
586 		 * locked by i915->mm.obj_lock.
587 		 */
588 		struct list_head link;
589 
590 		/**
591 		 * Advice: are the backing pages purgeable?
592 		 */
593 		unsigned int madv:2;
594 
595 		/**
596 		 * This is set if the object has been written to since the
597 		 * pages were last acquired.
598 		 */
599 		bool dirty:1;
600 	} mm;
601 
602 	struct {
603 		struct i915_refct_sgt *cached_io_rsgt;
604 		struct i915_gem_object_page_iter get_io_page;
605 		struct drm_i915_gem_object *backup;
606 		bool created:1;
607 	} ttm;
608 
609 	/*
610 	 * Record which PXP key instance this object was created against (if
611 	 * any), so we can use it to determine if the encryption is valid by
612 	 * comparing against the current key instance.
613 	 */
614 	u32 pxp_key_instance;
615 
616 	/** Record of address bit 17 of each page at last unbind. */
617 	unsigned long *bit_17;
618 
619 	union {
620 #ifdef CONFIG_MMU_NOTIFIER
621 		struct i915_gem_userptr {
622 			uintptr_t ptr;
623 			unsigned long notifier_seq;
624 
625 			struct mmu_interval_notifier notifier;
626 			struct page **pvec;
627 			int page_ref;
628 		} userptr;
629 #endif
630 
631 		struct drm_mm_node *stolen;
632 
633 		unsigned long scratch;
634 		u64 encode;
635 
636 		void *gvt_info;
637 	};
638 };
639 
640 static inline struct drm_i915_gem_object *
641 to_intel_bo(struct drm_gem_object *gem)
642 {
643 	/* Assert that to_intel_bo(NULL) == NULL */
644 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
645 
646 	return container_of(gem, struct drm_i915_gem_object, base);
647 }
648 
649 #endif
650