1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include "display/intel_frontbuffer.h"
15 #include "intel_memory_region.h"
16 #include "i915_gem_object_types.h"
17 #include "i915_gem_gtt.h"
18 #include "i915_gem_ww.h"
19 #include "i915_vma_types.h"
20 
21 enum intel_region_id;
22 
23 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
24 
25 static inline bool i915_gem_object_size_2big(u64 size)
26 {
27 	struct drm_i915_gem_object *obj;
28 
29 	if (overflows_type(size, obj->base.size))
30 		return true;
31 
32 	return false;
33 }
34 
35 unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
36 				    enum i915_cache_level level);
37 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
38 				     enum i915_cache_level lvl);
39 void i915_gem_init__objects(struct drm_i915_private *i915);
40 
41 void i915_objects_module_exit(void);
42 int i915_objects_module_init(void);
43 
44 struct drm_i915_gem_object *i915_gem_object_alloc(void);
45 void i915_gem_object_free(struct drm_i915_gem_object *obj);
46 
47 void i915_gem_object_init(struct drm_i915_gem_object *obj,
48 			  const struct drm_i915_gem_object_ops *ops,
49 			  struct lock_class_key *key,
50 			  unsigned alloc_flags);
51 
52 void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
53 
54 struct drm_i915_gem_object *
55 i915_gem_object_create_shmem(struct drm_i915_private *i915,
56 			     resource_size_t size);
57 struct drm_i915_gem_object *
58 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
59 				       const void *data, resource_size_t size);
60 struct drm_i915_gem_object *
61 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
62 			      struct intel_memory_region **placements,
63 			      unsigned int n_placements);
64 
65 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
66 
67 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
68 				     struct sg_table *pages,
69 				     bool needs_clflush);
70 
71 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
72 				const struct drm_i915_gem_pwrite *args);
73 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
74 			       const struct drm_i915_gem_pread *args);
75 
76 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
77 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
78 				     struct sg_table *pages);
79 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
80 				    struct sg_table *pages);
81 
82 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
83 
84 struct sg_table *
85 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
86 
87 /**
88  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
89  * @file: DRM file private date
90  * @handle: userspace handle
91  *
92  * Returns:
93  *
94  * A pointer to the object named by the handle if such exists on @filp, NULL
95  * otherwise. This object is only valid whilst under the RCU read lock, and
96  * note carefully the object may be in the process of being destroyed.
97  */
98 static inline struct drm_i915_gem_object *
99 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
100 {
101 #ifdef CONFIG_LOCKDEP
102 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
103 #endif
104 	return idr_find(&file->object_idr, handle);
105 }
106 
107 static inline struct drm_i915_gem_object *
108 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
109 {
110 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
111 		obj = NULL;
112 
113 	return obj;
114 }
115 
116 static inline struct drm_i915_gem_object *
117 i915_gem_object_lookup(struct drm_file *file, u32 handle)
118 {
119 	struct drm_i915_gem_object *obj;
120 
121 	rcu_read_lock();
122 	obj = i915_gem_object_lookup_rcu(file, handle);
123 	obj = i915_gem_object_get_rcu(obj);
124 	rcu_read_unlock();
125 
126 	return obj;
127 }
128 
129 __deprecated
130 struct drm_gem_object *
131 drm_gem_object_lookup(struct drm_file *file, u32 handle);
132 
133 __attribute__((nonnull))
134 static inline struct drm_i915_gem_object *
135 i915_gem_object_get(struct drm_i915_gem_object *obj)
136 {
137 	drm_gem_object_get(&obj->base);
138 	return obj;
139 }
140 
141 __attribute__((nonnull))
142 static inline void
143 i915_gem_object_put(struct drm_i915_gem_object *obj)
144 {
145 	__drm_gem_object_put(&obj->base);
146 }
147 
148 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
149 
150 /*
151  * If more than one potential simultaneous locker, assert held.
152  */
153 static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
154 {
155 	/*
156 	 * Note mm list lookup is protected by
157 	 * kref_get_unless_zero().
158 	 */
159 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
160 	    kref_read(&obj->base.refcount) > 0)
161 		assert_object_held(obj);
162 }
163 
164 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
165 					 struct i915_gem_ww_ctx *ww,
166 					 bool intr)
167 {
168 	int ret;
169 
170 	if (intr)
171 		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
172 	else
173 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
174 
175 	if (!ret && ww) {
176 		i915_gem_object_get(obj);
177 		list_add_tail(&obj->obj_link, &ww->obj_list);
178 	}
179 	if (ret == -EALREADY)
180 		ret = 0;
181 
182 	if (ret == -EDEADLK) {
183 		i915_gem_object_get(obj);
184 		ww->contended = obj;
185 	}
186 
187 	return ret;
188 }
189 
190 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
191 				       struct i915_gem_ww_ctx *ww)
192 {
193 	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
194 }
195 
196 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
197 						     struct i915_gem_ww_ctx *ww)
198 {
199 	WARN_ON(ww && !ww->intr);
200 	return __i915_gem_object_lock(obj, ww, true);
201 }
202 
203 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
204 					   struct i915_gem_ww_ctx *ww)
205 {
206 	if (!ww)
207 		return dma_resv_trylock(obj->base.resv);
208 	else
209 		return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
210 }
211 
212 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
213 {
214 	if (obj->ops->adjust_lru)
215 		obj->ops->adjust_lru(obj);
216 
217 	dma_resv_unlock(obj->base.resv);
218 }
219 
220 static inline void
221 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
222 {
223 	obj->flags |= I915_BO_READONLY;
224 }
225 
226 static inline bool
227 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
228 {
229 	return obj->flags & I915_BO_READONLY;
230 }
231 
232 static inline bool
233 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
234 {
235 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
236 }
237 
238 static inline bool
239 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
240 {
241 	return obj->flags & I915_BO_ALLOC_VOLATILE;
242 }
243 
244 static inline void
245 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
246 {
247 	obj->flags |= I915_BO_ALLOC_VOLATILE;
248 }
249 
250 static inline bool
251 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
252 {
253 	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
254 }
255 
256 static inline void
257 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
258 {
259 	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
260 }
261 
262 static inline void
263 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
264 {
265 	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
266 }
267 
268 static inline bool
269 i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
270 {
271 	return obj->flags & I915_BO_PROTECTED;
272 }
273 
274 static inline bool
275 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
276 			 unsigned long flags)
277 {
278 	return obj->ops->flags & flags;
279 }
280 
281 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
282 
283 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
284 
285 static inline bool
286 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
287 {
288 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
289 }
290 
291 static inline bool
292 i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
293 {
294 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
295 }
296 
297 static inline bool
298 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
299 {
300 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
301 }
302 
303 static inline bool
304 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
305 {
306 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
307 }
308 
309 static inline bool
310 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
311 {
312 	return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
313 }
314 
315 static inline unsigned int
316 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
317 {
318 	return obj->tiling_and_stride & TILING_MASK;
319 }
320 
321 static inline bool
322 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
323 {
324 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
325 }
326 
327 static inline unsigned int
328 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
329 {
330 	return obj->tiling_and_stride & STRIDE_MASK;
331 }
332 
333 static inline unsigned int
334 i915_gem_tile_height(unsigned int tiling)
335 {
336 	GEM_BUG_ON(!tiling);
337 	return tiling == I915_TILING_Y ? 32 : 8;
338 }
339 
340 static inline unsigned int
341 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
342 {
343 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
344 }
345 
346 static inline unsigned int
347 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
348 {
349 	return (i915_gem_object_get_stride(obj) *
350 		i915_gem_object_get_tile_height(obj));
351 }
352 
353 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
354 			       unsigned int tiling, unsigned int stride);
355 
356 /**
357  * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
358  * pointer and the target page position using pgoff_t n input argument and
359  * i915_gem_object_page_iter
360  * @obj: i915 GEM buffer object
361  * @iter: i915 GEM buffer object page iterator
362  * @n: page offset
363  * @offset: searched physical offset,
364  *          it will be used for returning physical page offset value
365  *
366  * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
367  *          Takes and releases the RCU lock to search the radix_tree of
368  *          i915_gem_object_page_iter.
369  *
370  * Returns:
371  * The target scatterlist pointer and the target page position.
372  *
373  * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
374  */
375 struct scatterlist *
376 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
377 				   struct i915_gem_object_page_iter *iter,
378 				   pgoff_t  n,
379 				   unsigned int *offset);
380 
381 /**
382  * i915_gem_object_page_iter_get_sg - wrapper macro for
383  * __i915_gem_object_page_iter_get_sg()
384  * @obj: i915 GEM buffer object
385  * @it: i915 GEM buffer object page iterator
386  * @n: page offset
387  * @offset: searched physical offset,
388  *          it will be used for returning physical page offset value
389  *
390  * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
391  *          Takes and releases the RCU lock to search the radix_tree of
392  *          i915_gem_object_page_iter.
393  *
394  * Returns:
395  * The target scatterlist pointer and the target page position.
396  *
397  * In order to avoid the truncation of the input parameter, it checks the page
398  * offset n's type from the input parameter before calling
399  * __i915_gem_object_page_iter_get_sg().
400  */
401 #define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({	\
402 	static_assert(castable_to_type(n, pgoff_t));		\
403 	__i915_gem_object_page_iter_get_sg(obj, it, n, offset);	\
404 })
405 
406 /**
407  * __i915_gem_object_get_sg - helper to find the target scatterlist
408  * pointer and the target page position using pgoff_t n input argument and
409  * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
410  * @obj: i915 GEM buffer object
411  * @n: page offset
412  * @offset: searched physical offset,
413  *          it will be used for returning physical page offset value
414  *
415  * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
416  * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
417  *
418  * Returns:
419  * The target scatterlist pointer and the target page position.
420  *
421  * Recommended to use wrapper macro: i915_gem_object_get_sg()
422  * See also __i915_gem_object_page_iter_get_sg()
423  */
424 static inline struct scatterlist *
425 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
426 			 unsigned int *offset)
427 {
428 	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
429 }
430 
431 /**
432  * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
433  * @obj: i915 GEM buffer object
434  * @n: page offset
435  * @offset: searched physical offset,
436  *          it will be used for returning physical page offset value
437  *
438  * Returns:
439  * The target scatterlist pointer and the target page position.
440  *
441  * In order to avoid the truncation of the input parameter, it checks the page
442  * offset n's type from the input parameter before calling
443  * __i915_gem_object_get_sg().
444  * See also __i915_gem_object_page_iter_get_sg()
445  */
446 #define i915_gem_object_get_sg(obj, n, offset) ({	\
447 	static_assert(castable_to_type(n, pgoff_t));	\
448 	__i915_gem_object_get_sg(obj, n, offset);	\
449 })
450 
451 /**
452  * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
453  * pointer and the target page position using pgoff_t n input argument and
454  * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
455  * @obj: i915 GEM buffer object
456  * @n: page offset
457  * @offset: searched physical offset,
458  *          it will be used for returning physical page offset value
459  *
460  * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
461  * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
462  *
463  * Returns:
464  * The target scatterlist pointer and the target page position.
465  *
466  * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
467  * See also __i915_gem_object_page_iter_get_sg()
468  */
469 static inline struct scatterlist *
470 __i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
471 			     unsigned int *offset)
472 {
473 	return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
474 }
475 
476 /**
477  * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
478  * @obj: i915 GEM buffer object
479  * @n: page offset
480  * @offset: searched physical offset,
481  *          it will be used for returning physical page offset value
482  *
483  * Returns:
484  * The target scatterlist pointer and the target page position.
485  *
486  * In order to avoid the truncation of the input parameter, it checks the page
487  * offset n's type from the input parameter before calling
488  * __i915_gem_object_get_sg_dma().
489  * See also __i915_gem_object_page_iter_get_sg()
490  */
491 #define i915_gem_object_get_sg_dma(obj, n, offset) ({	\
492 	static_assert(castable_to_type(n, pgoff_t));	\
493 	__i915_gem_object_get_sg_dma(obj, n, offset);	\
494 })
495 
496 /**
497  * __i915_gem_object_get_page - helper to find the target page with a page offset
498  * @obj: i915 GEM buffer object
499  * @n: page offset
500  *
501  * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
502  * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
503  * internally.
504  *
505  * Returns:
506  * The target page pointer.
507  *
508  * Recommended to use wrapper macro: i915_gem_object_get_page()
509  * See also __i915_gem_object_page_iter_get_sg()
510  */
511 struct page *
512 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
513 
514 /**
515  * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
516  * @obj: i915 GEM buffer object
517  * @n: page offset
518  *
519  * Returns:
520  * The target page pointer.
521  *
522  * In order to avoid the truncation of the input parameter, it checks the page
523  * offset n's type from the input parameter before calling
524  * __i915_gem_object_get_page().
525  * See also __i915_gem_object_page_iter_get_sg()
526  */
527 #define i915_gem_object_get_page(obj, n) ({		\
528 	static_assert(castable_to_type(n, pgoff_t));	\
529 	__i915_gem_object_get_page(obj, n);		\
530 })
531 
532 /**
533  * __i915_gem_object_get_dirty_page - helper to find the target page with a page
534  * offset
535  * @obj: i915 GEM buffer object
536  * @n: page offset
537  *
538  * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
539  *
540  * Returns:
541  * The target page pointer.
542  *
543  * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
544  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
545  */
546 struct page *
547 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
548 
549 /**
550  * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
551  * @obj: i915 GEM buffer object
552  * @n: page offset
553  *
554  * Returns:
555  * The target page pointer.
556  *
557  * In order to avoid the truncation of the input parameter, it checks the page
558  * offset n's type from the input parameter before calling
559  * __i915_gem_object_get_dirty_page().
560  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
561  */
562 #define i915_gem_object_get_dirty_page(obj, n) ({	\
563 	static_assert(castable_to_type(n, pgoff_t));	\
564 	__i915_gem_object_get_dirty_page(obj, n);	\
565 })
566 
567 /**
568  * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
569  * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
570  * @obj: i915 GEM buffer object
571  * @n: page offset
572  * @len: DMA mapped scatterlist's DMA bus addresses length to return
573  *
574  * Returns:
575  * Bus addresses of targeted DMA mapped scatterlist
576  *
577  * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
578  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
579  */
580 dma_addr_t
581 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
582 				      unsigned int *len);
583 
584 /**
585  * i915_gem_object_get_dma_address_len - wrapper macro for
586  * __i915_gem_object_get_dma_address_len
587  * @obj: i915 GEM buffer object
588  * @n: page offset
589  * @len: DMA mapped scatterlist's DMA bus addresses length to return
590  *
591  * Returns:
592  * Bus addresses of targeted DMA mapped scatterlist
593  *
594  * In order to avoid the truncation of the input parameter, it checks the page
595  * offset n's type from the input parameter before calling
596  * __i915_gem_object_get_dma_address_len().
597  * See also __i915_gem_object_page_iter_get_sg() and
598  * __i915_gem_object_get_dma_address_len()
599  */
600 #define i915_gem_object_get_dma_address_len(obj, n, len) ({	\
601 	static_assert(castable_to_type(n, pgoff_t));		\
602 	__i915_gem_object_get_dma_address_len(obj, n, len);	\
603 })
604 
605 /**
606  * __i915_gem_object_get_dma_address - helper to get bus addresses of
607  * targeted DMA mapped scatterlist from i915 GEM buffer object
608  * @obj: i915 GEM buffer object
609  * @n: page offset
610  *
611  * Returns:
612  * Bus addresses of targeted DMA mapped scatterlis
613  *
614  * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
615  * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
616  */
617 dma_addr_t
618 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
619 
620 /**
621  * i915_gem_object_get_dma_address - wrapper macro for
622  * __i915_gem_object_get_dma_address
623  * @obj: i915 GEM buffer object
624  * @n: page offset
625  *
626  * Returns:
627  * Bus addresses of targeted DMA mapped scatterlist
628  *
629  * In order to avoid the truncation of the input parameter, it checks the page
630  * offset n's type from the input parameter before calling
631  * __i915_gem_object_get_dma_address().
632  * See also __i915_gem_object_page_iter_get_sg() and
633  * __i915_gem_object_get_dma_address()
634  */
635 #define i915_gem_object_get_dma_address(obj, n) ({	\
636 	static_assert(castable_to_type(n, pgoff_t));	\
637 	__i915_gem_object_get_dma_address(obj, n);	\
638 })
639 
640 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
641 				 struct sg_table *pages);
642 
643 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
644 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
645 
646 static inline int __must_check
647 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
648 {
649 	assert_object_held(obj);
650 
651 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
652 		return 0;
653 
654 	return __i915_gem_object_get_pages(obj);
655 }
656 
657 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
658 
659 static inline bool
660 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
661 {
662 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
663 }
664 
665 static inline void
666 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
667 {
668 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
669 
670 	atomic_inc(&obj->mm.pages_pin_count);
671 }
672 
673 static inline bool
674 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
675 {
676 	return atomic_read(&obj->mm.pages_pin_count);
677 }
678 
679 static inline void
680 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
681 {
682 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
683 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
684 
685 	atomic_dec(&obj->mm.pages_pin_count);
686 }
687 
688 static inline void
689 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
690 {
691 	__i915_gem_object_unpin_pages(obj);
692 }
693 
694 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
695 int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
696 
697 /**
698  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
699  * @obj: the object to map into kernel address space
700  * @type: the type of mapping, used to select pgprot_t
701  *
702  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
703  * pages and then returns a contiguous mapping of the backing storage into
704  * the kernel address space. Based on the @type of mapping, the PTE will be
705  * set to either WriteBack or WriteCombine (via pgprot_t).
706  *
707  * The caller is responsible for calling i915_gem_object_unpin_map() when the
708  * mapping is no longer required.
709  *
710  * Returns the pointer through which to access the mapped object, or an
711  * ERR_PTR() on error.
712  */
713 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
714 					   enum i915_map_type type);
715 
716 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
717 						    enum i915_map_type type);
718 
719 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
720 				 unsigned long offset,
721 				 unsigned long size);
722 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
723 {
724 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
725 }
726 
727 /**
728  * i915_gem_object_unpin_map - releases an earlier mapping
729  * @obj: the object to unmap
730  *
731  * After pinning the object and mapping its pages, once you are finished
732  * with your access, call i915_gem_object_unpin_map() to release the pin
733  * upon the mapping. Once the pin count reaches zero, that mapping may be
734  * removed.
735  */
736 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
737 {
738 	i915_gem_object_unpin_pages(obj);
739 }
740 
741 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
742 
743 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
744 				 unsigned int *needs_clflush);
745 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
746 				  unsigned int *needs_clflush);
747 #define CLFLUSH_BEFORE	BIT(0)
748 #define CLFLUSH_AFTER	BIT(1)
749 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
750 
751 static inline void
752 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
753 {
754 	i915_gem_object_unpin_pages(obj);
755 }
756 
757 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
758 				     struct dma_fence **fence);
759 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
760 				      bool intr);
761 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
762 
763 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
764 					 unsigned int cache_level);
765 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
766 				   unsigned int pat_index);
767 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
768 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
769 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
770 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
771 
772 int __must_check
773 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
774 int __must_check
775 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
776 int __must_check
777 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
778 struct i915_vma * __must_check
779 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
780 				     struct i915_gem_ww_ctx *ww,
781 				     u32 alignment,
782 				     const struct i915_gtt_view *view,
783 				     unsigned int flags);
784 
785 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
786 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
787 void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
788 void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
789 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
790 
791 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
792 {
793 	obj->read_domains = I915_GEM_DOMAIN_CPU;
794 	obj->write_domain = I915_GEM_DOMAIN_CPU;
795 	if (i915_gem_cpu_write_needs_clflush(obj))
796 		obj->cache_dirty = true;
797 }
798 
799 void i915_gem_fence_wait_priority(struct dma_fence *fence,
800 				  const struct i915_sched_attr *attr);
801 
802 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
803 			 unsigned int flags,
804 			 long timeout);
805 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
806 				  unsigned int flags,
807 				  const struct i915_sched_attr *attr);
808 
809 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
810 					 enum fb_op_origin origin);
811 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
812 					      enum fb_op_origin origin);
813 
814 static inline void
815 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
816 				  enum fb_op_origin origin)
817 {
818 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
819 		__i915_gem_object_flush_frontbuffer(obj, origin);
820 }
821 
822 static inline void
823 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
824 				       enum fb_op_origin origin)
825 {
826 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
827 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
828 }
829 
830 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
831 
832 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
833 
834 void __i915_gem_free_object_rcu(struct rcu_head *head);
835 
836 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
837 
838 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
839 
840 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
841 
842 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
843 
844 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
845 			    struct i915_gem_ww_ctx *ww,
846 			    enum intel_region_id id);
847 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
848 			      struct i915_gem_ww_ctx *ww,
849 			      enum intel_region_id id,
850 			      unsigned int flags);
851 
852 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
853 				 enum intel_region_id id);
854 
855 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
856 				   unsigned int flags);
857 
858 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
859 					enum intel_memory_type type);
860 
861 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj);
862 
863 int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
864 			 size_t size, struct intel_memory_region *mr,
865 			 struct address_space *mapping,
866 			 unsigned int max_segment);
867 void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
868 			 bool dirty, bool backup);
869 void __shmem_writeback(size_t size, struct address_space *mapping);
870 
871 #ifdef CONFIG_MMU_NOTIFIER
872 static inline bool
873 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
874 {
875 	return obj->userptr.notifier.mm;
876 }
877 
878 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
879 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
880 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
881 #else
882 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
883 
884 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
885 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
886 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
887 
888 #endif
889 
890 /**
891  * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
892  * @obj: The object whose frontbuffer to get.
893  *
894  * Get pointer to object's frontbuffer if such exists. Please note that RCU
895  * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
896  *
897  * Return: pointer to object's frontbuffer is such exists or NULL
898  */
899 static inline struct intel_frontbuffer *
900 i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
901 {
902 	struct intel_frontbuffer *front;
903 
904 	if (likely(!rcu_access_pointer(obj->frontbuffer)))
905 		return NULL;
906 
907 	rcu_read_lock();
908 	do {
909 		front = rcu_dereference(obj->frontbuffer);
910 		if (!front)
911 			break;
912 
913 		if (unlikely(!kref_get_unless_zero(&front->ref)))
914 			continue;
915 
916 		if (likely(front == rcu_access_pointer(obj->frontbuffer)))
917 			break;
918 
919 		intel_frontbuffer_put(front);
920 	} while (1);
921 	rcu_read_unlock();
922 
923 	return front;
924 }
925 
926 /**
927  * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
928  * @obj: The object whose frontbuffer to set.
929  * @front: The frontbuffer to set
930  *
931  * Set object's frontbuffer pointer. If frontbuffer is already set for the
932  * object keep it and return it's pointer to the caller. Please note that RCU
933  * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
934  * function is protected by i915->display.fb_tracking.lock
935  *
936  * Return: pointer to frontbuffer which was set.
937  */
938 static inline struct intel_frontbuffer *
939 i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
940 				struct intel_frontbuffer *front)
941 {
942 	struct intel_frontbuffer *cur = front;
943 
944 	if (!front) {
945 		RCU_INIT_POINTER(obj->frontbuffer, NULL);
946 	} else if (rcu_access_pointer(obj->frontbuffer)) {
947 		cur = rcu_dereference_protected(obj->frontbuffer, true);
948 		kref_get(&cur->ref);
949 	} else {
950 		drm_gem_object_get(intel_bo_to_drm_bo(obj));
951 		rcu_assign_pointer(obj->frontbuffer, front);
952 	}
953 
954 	return cur;
955 }
956 
957 #endif
958