1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include "display/intel_frontbuffer.h"
15 #include "i915_gem_object_types.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_vma_types.h"
18 
19 void i915_gem_init__objects(struct drm_i915_private *i915);
20 
21 struct drm_i915_gem_object *i915_gem_object_alloc(void);
22 void i915_gem_object_free(struct drm_i915_gem_object *obj);
23 
24 void i915_gem_object_init(struct drm_i915_gem_object *obj,
25 			  const struct drm_i915_gem_object_ops *ops,
26 			  struct lock_class_key *key);
27 struct drm_i915_gem_object *
28 i915_gem_object_create_shmem(struct drm_i915_private *i915,
29 			     resource_size_t size);
30 struct drm_i915_gem_object *
31 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
32 				       const void *data, resource_size_t size);
33 
34 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
35 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
36 				     struct sg_table *pages,
37 				     bool needs_clflush);
38 
39 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
40 
41 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
42 
43 struct sg_table *
44 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
45 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
46 
47 /**
48  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
49  * @filp: DRM file private date
50  * @handle: userspace handle
51  *
52  * Returns:
53  *
54  * A pointer to the object named by the handle if such exists on @filp, NULL
55  * otherwise. This object is only valid whilst under the RCU read lock, and
56  * note carefully the object may be in the process of being destroyed.
57  */
58 static inline struct drm_i915_gem_object *
59 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
60 {
61 #ifdef CONFIG_LOCKDEP
62 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
63 #endif
64 	return idr_find(&file->object_idr, handle);
65 }
66 
67 static inline struct drm_i915_gem_object *
68 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
69 {
70 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
71 		obj = NULL;
72 
73 	return obj;
74 }
75 
76 static inline struct drm_i915_gem_object *
77 i915_gem_object_lookup(struct drm_file *file, u32 handle)
78 {
79 	struct drm_i915_gem_object *obj;
80 
81 	rcu_read_lock();
82 	obj = i915_gem_object_lookup_rcu(file, handle);
83 	obj = i915_gem_object_get_rcu(obj);
84 	rcu_read_unlock();
85 
86 	return obj;
87 }
88 
89 __deprecated
90 struct drm_gem_object *
91 drm_gem_object_lookup(struct drm_file *file, u32 handle);
92 
93 __attribute__((nonnull))
94 static inline struct drm_i915_gem_object *
95 i915_gem_object_get(struct drm_i915_gem_object *obj)
96 {
97 	drm_gem_object_get(&obj->base);
98 	return obj;
99 }
100 
101 __attribute__((nonnull))
102 static inline void
103 i915_gem_object_put(struct drm_i915_gem_object *obj)
104 {
105 	__drm_gem_object_put(&obj->base);
106 }
107 
108 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
109 
110 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
111 					 struct i915_gem_ww_ctx *ww,
112 					 bool intr)
113 {
114 	int ret;
115 
116 	if (intr)
117 		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
118 	else
119 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
120 
121 	if (!ret && ww)
122 		list_add_tail(&obj->obj_link, &ww->obj_list);
123 	if (ret == -EALREADY)
124 		ret = 0;
125 
126 	if (ret == -EDEADLK)
127 		ww->contended = obj;
128 
129 	return ret;
130 }
131 
132 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
133 				       struct i915_gem_ww_ctx *ww)
134 {
135 	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
136 }
137 
138 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
139 						     struct i915_gem_ww_ctx *ww)
140 {
141 	WARN_ON(ww && !ww->intr);
142 	return __i915_gem_object_lock(obj, ww, true);
143 }
144 
145 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
146 {
147 	return dma_resv_trylock(obj->base.resv);
148 }
149 
150 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
151 {
152 	dma_resv_unlock(obj->base.resv);
153 }
154 
155 struct dma_fence *
156 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
157 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
158 				  struct dma_fence *fence);
159 
160 static inline void
161 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
162 {
163 	obj->flags |= I915_BO_READONLY;
164 }
165 
166 static inline bool
167 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
168 {
169 	return obj->flags & I915_BO_READONLY;
170 }
171 
172 static inline bool
173 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
174 {
175 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
176 }
177 
178 static inline bool
179 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
180 {
181 	return obj->flags & I915_BO_ALLOC_VOLATILE;
182 }
183 
184 static inline void
185 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
186 {
187 	obj->flags |= I915_BO_ALLOC_VOLATILE;
188 }
189 
190 static inline bool
191 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
192 			 unsigned long flags)
193 {
194 	return obj->ops->flags & flags;
195 }
196 
197 static inline bool
198 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
199 {
200 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
201 }
202 
203 static inline bool
204 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
205 {
206 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
207 }
208 
209 static inline bool
210 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
211 {
212 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
213 }
214 
215 static inline bool
216 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
217 {
218 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
219 }
220 
221 static inline bool
222 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
223 {
224 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
225 }
226 
227 static inline bool
228 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
229 {
230 	return READ_ONCE(obj->frontbuffer);
231 }
232 
233 static inline unsigned int
234 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
235 {
236 	return obj->tiling_and_stride & TILING_MASK;
237 }
238 
239 static inline bool
240 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
241 {
242 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
243 }
244 
245 static inline unsigned int
246 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
247 {
248 	return obj->tiling_and_stride & STRIDE_MASK;
249 }
250 
251 static inline unsigned int
252 i915_gem_tile_height(unsigned int tiling)
253 {
254 	GEM_BUG_ON(!tiling);
255 	return tiling == I915_TILING_Y ? 32 : 8;
256 }
257 
258 static inline unsigned int
259 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
260 {
261 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
262 }
263 
264 static inline unsigned int
265 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
266 {
267 	return (i915_gem_object_get_stride(obj) *
268 		i915_gem_object_get_tile_height(obj));
269 }
270 
271 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
272 			       unsigned int tiling, unsigned int stride);
273 
274 struct scatterlist *
275 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
276 			 struct i915_gem_object_page_iter *iter,
277 			 unsigned int n,
278 			 unsigned int *offset);
279 
280 static inline struct scatterlist *
281 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
282 		       unsigned int n,
283 		       unsigned int *offset)
284 {
285 	return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset);
286 }
287 
288 static inline struct scatterlist *
289 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
290 			   unsigned int n,
291 			   unsigned int *offset)
292 {
293 	return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset);
294 }
295 
296 struct page *
297 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
298 			 unsigned int n);
299 
300 struct page *
301 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
302 			       unsigned int n);
303 
304 dma_addr_t
305 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
306 				    unsigned long n,
307 				    unsigned int *len);
308 
309 dma_addr_t
310 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
311 				unsigned long n);
312 
313 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
314 				 struct sg_table *pages,
315 				 unsigned int sg_page_sizes);
316 
317 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
318 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
319 
320 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
321 	I915_MM_NORMAL = 0,
322 	/*
323 	 * Only used by struct_mutex, when called "recursively" from
324 	 * direct-reclaim-esque. Safe because there is only every one
325 	 * struct_mutex in the entire system.
326 	 */
327 	I915_MM_SHRINKER = 1,
328 	/*
329 	 * Used for obj->mm.lock when allocating pages. Safe because the object
330 	 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
331 	 * it. As soon as the object has pages, obj->mm.lock nests within
332 	 * fs_reclaim.
333 	 */
334 	I915_MM_GET_PAGES = 1,
335 };
336 
337 static inline int __must_check
338 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
339 {
340 	might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
341 
342 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
343 		return 0;
344 
345 	return __i915_gem_object_get_pages(obj);
346 }
347 
348 static inline bool
349 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
350 {
351 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
352 }
353 
354 static inline void
355 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
356 {
357 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
358 
359 	atomic_inc(&obj->mm.pages_pin_count);
360 }
361 
362 static inline bool
363 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
364 {
365 	return atomic_read(&obj->mm.pages_pin_count);
366 }
367 
368 static inline void
369 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
370 {
371 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
372 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
373 
374 	atomic_dec(&obj->mm.pages_pin_count);
375 }
376 
377 static inline void
378 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
379 {
380 	__i915_gem_object_unpin_pages(obj);
381 }
382 
383 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
384 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
385 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
386 
387 enum i915_map_type {
388 	I915_MAP_WB = 0,
389 	I915_MAP_WC,
390 #define I915_MAP_OVERRIDE BIT(31)
391 	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
392 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
393 };
394 
395 /**
396  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
397  * @obj: the object to map into kernel address space
398  * @type: the type of mapping, used to select pgprot_t
399  *
400  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
401  * pages and then returns a contiguous mapping of the backing storage into
402  * the kernel address space. Based on the @type of mapping, the PTE will be
403  * set to either WriteBack or WriteCombine (via pgprot_t).
404  *
405  * The caller is responsible for calling i915_gem_object_unpin_map() when the
406  * mapping is no longer required.
407  *
408  * Returns the pointer through which to access the mapped object, or an
409  * ERR_PTR() on error.
410  */
411 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
412 					   enum i915_map_type type);
413 
414 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
415 				 unsigned long offset,
416 				 unsigned long size);
417 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
418 {
419 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
420 }
421 
422 /**
423  * i915_gem_object_unpin_map - releases an earlier mapping
424  * @obj: the object to unmap
425  *
426  * After pinning the object and mapping its pages, once you are finished
427  * with your access, call i915_gem_object_unpin_map() to release the pin
428  * upon the mapping. Once the pin count reaches zero, that mapping may be
429  * removed.
430  */
431 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
432 {
433 	i915_gem_object_unpin_pages(obj);
434 }
435 
436 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
437 
438 void
439 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
440 				   unsigned int flush_domains);
441 
442 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
443 				 unsigned int *needs_clflush);
444 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
445 				  unsigned int *needs_clflush);
446 #define CLFLUSH_BEFORE	BIT(0)
447 #define CLFLUSH_AFTER	BIT(1)
448 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
449 
450 static inline void
451 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
452 {
453 	i915_gem_object_unpin_pages(obj);
454 }
455 
456 static inline struct intel_engine_cs *
457 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
458 {
459 	struct intel_engine_cs *engine = NULL;
460 	struct dma_fence *fence;
461 
462 	rcu_read_lock();
463 	fence = dma_resv_get_excl_rcu(obj->base.resv);
464 	rcu_read_unlock();
465 
466 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
467 		engine = to_request(fence)->engine;
468 	dma_fence_put(fence);
469 
470 	return engine;
471 }
472 
473 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
474 					 unsigned int cache_level);
475 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
476 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
477 
478 int __must_check
479 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
480 int __must_check
481 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
482 int __must_check
483 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
484 struct i915_vma * __must_check
485 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
486 				     u32 alignment,
487 				     const struct i915_ggtt_view *view,
488 				     unsigned int flags);
489 
490 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
491 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
492 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
493 
494 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
495 {
496 	if (obj->cache_dirty)
497 		return false;
498 
499 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
500 		return true;
501 
502 	/* Currently in use by HW (display engine)? Keep flushed. */
503 	return i915_gem_object_is_framebuffer(obj);
504 }
505 
506 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
507 {
508 	obj->read_domains = I915_GEM_DOMAIN_CPU;
509 	obj->write_domain = I915_GEM_DOMAIN_CPU;
510 	if (cpu_write_needs_clflush(obj))
511 		obj->cache_dirty = true;
512 }
513 
514 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
515 			 unsigned int flags,
516 			 long timeout);
517 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
518 				  unsigned int flags,
519 				  const struct i915_sched_attr *attr);
520 
521 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
522 					 enum fb_op_origin origin);
523 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
524 					      enum fb_op_origin origin);
525 
526 static inline void
527 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
528 				  enum fb_op_origin origin)
529 {
530 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
531 		__i915_gem_object_flush_frontbuffer(obj, origin);
532 }
533 
534 static inline void
535 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
536 				       enum fb_op_origin origin)
537 {
538 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
539 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
540 }
541 
542 #endif
543