1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include "display/intel_frontbuffer.h"
15 #include "i915_gem_object_types.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_vma_types.h"
18 
19 /*
20  * XXX: There is a prevalence of the assumption that we fit the
21  * object's page count inside a 32bit _signed_ variable. Let's document
22  * this and catch if we ever need to fix it. In the meantime, if you do
23  * spot such a local variable, please consider fixing!
24  *
25  * Aside from our own locals (for which we have no excuse!):
26  * - sg_table embeds unsigned int for num_pages
27  * - get_user_pages*() mixed ints with longs
28  */
29 #define GEM_CHECK_SIZE_OVERFLOW(sz) \
30 	GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
31 
32 static inline bool i915_gem_object_size_2big(u64 size)
33 {
34 	struct drm_i915_gem_object *obj;
35 
36 	if (GEM_CHECK_SIZE_OVERFLOW(size))
37 		return true;
38 
39 	if (overflows_type(size, obj->base.size))
40 		return true;
41 
42 	return false;
43 }
44 
45 void i915_gem_init__objects(struct drm_i915_private *i915);
46 
47 struct drm_i915_gem_object *i915_gem_object_alloc(void);
48 void i915_gem_object_free(struct drm_i915_gem_object *obj);
49 
50 void i915_gem_object_init(struct drm_i915_gem_object *obj,
51 			  const struct drm_i915_gem_object_ops *ops,
52 			  struct lock_class_key *key,
53 			  unsigned alloc_flags);
54 struct drm_i915_gem_object *
55 i915_gem_object_create_shmem(struct drm_i915_private *i915,
56 			     resource_size_t size);
57 struct drm_i915_gem_object *
58 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
59 				       const void *data, resource_size_t size);
60 
61 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
62 
63 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
64 				     struct sg_table *pages,
65 				     bool needs_clflush);
66 
67 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
68 				const struct drm_i915_gem_pwrite *args);
69 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
70 			       const struct drm_i915_gem_pread *args);
71 
72 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
73 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
74 				     struct sg_table *pages);
75 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
76 				    struct sg_table *pages);
77 
78 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
79 
80 struct sg_table *
81 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
82 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
83 
84 /**
85  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
86  * @filp: DRM file private date
87  * @handle: userspace handle
88  *
89  * Returns:
90  *
91  * A pointer to the object named by the handle if such exists on @filp, NULL
92  * otherwise. This object is only valid whilst under the RCU read lock, and
93  * note carefully the object may be in the process of being destroyed.
94  */
95 static inline struct drm_i915_gem_object *
96 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
97 {
98 #ifdef CONFIG_LOCKDEP
99 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
100 #endif
101 	return idr_find(&file->object_idr, handle);
102 }
103 
104 static inline struct drm_i915_gem_object *
105 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
106 {
107 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
108 		obj = NULL;
109 
110 	return obj;
111 }
112 
113 static inline struct drm_i915_gem_object *
114 i915_gem_object_lookup(struct drm_file *file, u32 handle)
115 {
116 	struct drm_i915_gem_object *obj;
117 
118 	rcu_read_lock();
119 	obj = i915_gem_object_lookup_rcu(file, handle);
120 	obj = i915_gem_object_get_rcu(obj);
121 	rcu_read_unlock();
122 
123 	return obj;
124 }
125 
126 __deprecated
127 struct drm_gem_object *
128 drm_gem_object_lookup(struct drm_file *file, u32 handle);
129 
130 __attribute__((nonnull))
131 static inline struct drm_i915_gem_object *
132 i915_gem_object_get(struct drm_i915_gem_object *obj)
133 {
134 	drm_gem_object_get(&obj->base);
135 	return obj;
136 }
137 
138 __attribute__((nonnull))
139 static inline void
140 i915_gem_object_put(struct drm_i915_gem_object *obj)
141 {
142 	__drm_gem_object_put(&obj->base);
143 }
144 
145 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
146 
147 /*
148  * If more than one potential simultaneous locker, assert held.
149  */
150 static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
151 {
152 	/*
153 	 * Note mm list lookup is protected by
154 	 * kref_get_unless_zero().
155 	 */
156 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
157 	    kref_read(&obj->base.refcount) > 0)
158 		assert_object_held(obj);
159 }
160 
161 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
162 					 struct i915_gem_ww_ctx *ww,
163 					 bool intr)
164 {
165 	int ret;
166 
167 	if (intr)
168 		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
169 	else
170 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
171 
172 	if (!ret && ww)
173 		list_add_tail(&obj->obj_link, &ww->obj_list);
174 	if (ret == -EALREADY)
175 		ret = 0;
176 
177 	if (ret == -EDEADLK)
178 		ww->contended = obj;
179 
180 	return ret;
181 }
182 
183 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
184 				       struct i915_gem_ww_ctx *ww)
185 {
186 	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
187 }
188 
189 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
190 						     struct i915_gem_ww_ctx *ww)
191 {
192 	WARN_ON(ww && !ww->intr);
193 	return __i915_gem_object_lock(obj, ww, true);
194 }
195 
196 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
197 {
198 	return dma_resv_trylock(obj->base.resv);
199 }
200 
201 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
202 {
203 	if (obj->ops->adjust_lru)
204 		obj->ops->adjust_lru(obj);
205 
206 	dma_resv_unlock(obj->base.resv);
207 }
208 
209 static inline void
210 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
211 {
212 	obj->flags |= I915_BO_READONLY;
213 }
214 
215 static inline bool
216 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
217 {
218 	return obj->flags & I915_BO_READONLY;
219 }
220 
221 static inline bool
222 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
223 {
224 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
225 }
226 
227 static inline bool
228 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
229 {
230 	return obj->flags & I915_BO_ALLOC_VOLATILE;
231 }
232 
233 static inline void
234 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
235 {
236 	obj->flags |= I915_BO_ALLOC_VOLATILE;
237 }
238 
239 static inline bool
240 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
241 {
242 	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
243 }
244 
245 static inline void
246 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
247 {
248 	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
249 }
250 
251 static inline void
252 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
253 {
254 	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
255 }
256 
257 static inline bool
258 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
259 			 unsigned long flags)
260 {
261 	return obj->ops->flags & flags;
262 }
263 
264 static inline bool
265 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
266 {
267 	return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
268 }
269 
270 static inline bool
271 i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
272 {
273 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
274 }
275 
276 static inline bool
277 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
278 {
279 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
280 }
281 
282 static inline bool
283 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
284 {
285 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
286 }
287 
288 static inline bool
289 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
290 {
291 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
292 }
293 
294 static inline bool
295 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
296 {
297 	return READ_ONCE(obj->frontbuffer);
298 }
299 
300 static inline unsigned int
301 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
302 {
303 	return obj->tiling_and_stride & TILING_MASK;
304 }
305 
306 static inline bool
307 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
308 {
309 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
310 }
311 
312 static inline unsigned int
313 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
314 {
315 	return obj->tiling_and_stride & STRIDE_MASK;
316 }
317 
318 static inline unsigned int
319 i915_gem_tile_height(unsigned int tiling)
320 {
321 	GEM_BUG_ON(!tiling);
322 	return tiling == I915_TILING_Y ? 32 : 8;
323 }
324 
325 static inline unsigned int
326 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
327 {
328 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
329 }
330 
331 static inline unsigned int
332 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
333 {
334 	return (i915_gem_object_get_stride(obj) *
335 		i915_gem_object_get_tile_height(obj));
336 }
337 
338 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
339 			       unsigned int tiling, unsigned int stride);
340 
341 struct scatterlist *
342 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
343 			 struct i915_gem_object_page_iter *iter,
344 			 unsigned int n,
345 			 unsigned int *offset, bool allow_alloc, bool dma);
346 
347 static inline struct scatterlist *
348 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
349 		       unsigned int n,
350 		       unsigned int *offset, bool allow_alloc)
351 {
352 	return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
353 }
354 
355 static inline struct scatterlist *
356 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
357 			   unsigned int n,
358 			   unsigned int *offset, bool allow_alloc)
359 {
360 	return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
361 }
362 
363 struct page *
364 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
365 			 unsigned int n);
366 
367 struct page *
368 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
369 			       unsigned int n);
370 
371 dma_addr_t
372 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
373 				    unsigned long n,
374 				    unsigned int *len);
375 
376 dma_addr_t
377 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
378 				unsigned long n);
379 
380 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
381 				 struct sg_table *pages,
382 				 unsigned int sg_page_sizes);
383 
384 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
385 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
386 
387 static inline int __must_check
388 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
389 {
390 	assert_object_held(obj);
391 
392 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
393 		return 0;
394 
395 	return __i915_gem_object_get_pages(obj);
396 }
397 
398 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
399 
400 static inline bool
401 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
402 {
403 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
404 }
405 
406 static inline void
407 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
408 {
409 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
410 
411 	atomic_inc(&obj->mm.pages_pin_count);
412 }
413 
414 static inline bool
415 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
416 {
417 	return atomic_read(&obj->mm.pages_pin_count);
418 }
419 
420 static inline void
421 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
422 {
423 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
424 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
425 
426 	atomic_dec(&obj->mm.pages_pin_count);
427 }
428 
429 static inline void
430 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
431 {
432 	__i915_gem_object_unpin_pages(obj);
433 }
434 
435 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
436 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
437 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
438 
439 /**
440  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
441  * @obj: the object to map into kernel address space
442  * @type: the type of mapping, used to select pgprot_t
443  *
444  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
445  * pages and then returns a contiguous mapping of the backing storage into
446  * the kernel address space. Based on the @type of mapping, the PTE will be
447  * set to either WriteBack or WriteCombine (via pgprot_t).
448  *
449  * The caller is responsible for calling i915_gem_object_unpin_map() when the
450  * mapping is no longer required.
451  *
452  * Returns the pointer through which to access the mapped object, or an
453  * ERR_PTR() on error.
454  */
455 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
456 					   enum i915_map_type type);
457 
458 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
459 						    enum i915_map_type type);
460 
461 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
462 				 unsigned long offset,
463 				 unsigned long size);
464 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
465 {
466 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
467 }
468 
469 /**
470  * i915_gem_object_unpin_map - releases an earlier mapping
471  * @obj: the object to unmap
472  *
473  * After pinning the object and mapping its pages, once you are finished
474  * with your access, call i915_gem_object_unpin_map() to release the pin
475  * upon the mapping. Once the pin count reaches zero, that mapping may be
476  * removed.
477  */
478 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
479 {
480 	i915_gem_object_unpin_pages(obj);
481 }
482 
483 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
484 
485 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
486 				 unsigned int *needs_clflush);
487 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
488 				  unsigned int *needs_clflush);
489 #define CLFLUSH_BEFORE	BIT(0)
490 #define CLFLUSH_AFTER	BIT(1)
491 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
492 
493 static inline void
494 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
495 {
496 	i915_gem_object_unpin_pages(obj);
497 }
498 
499 static inline struct intel_engine_cs *
500 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
501 {
502 	struct intel_engine_cs *engine = NULL;
503 	struct dma_fence *fence;
504 
505 	rcu_read_lock();
506 	fence = dma_resv_get_excl_unlocked(obj->base.resv);
507 	rcu_read_unlock();
508 
509 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
510 		engine = to_request(fence)->engine;
511 	dma_fence_put(fence);
512 
513 	return engine;
514 }
515 
516 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
517 					 unsigned int cache_level);
518 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
519 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
520 
521 int __must_check
522 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
523 int __must_check
524 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
525 int __must_check
526 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
527 struct i915_vma * __must_check
528 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
529 				     struct i915_gem_ww_ctx *ww,
530 				     u32 alignment,
531 				     const struct i915_ggtt_view *view,
532 				     unsigned int flags);
533 
534 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
535 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
536 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
537 
538 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
539 {
540 	if (obj->cache_dirty)
541 		return false;
542 
543 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
544 		return true;
545 
546 	/* Currently in use by HW (display engine)? Keep flushed. */
547 	return i915_gem_object_is_framebuffer(obj);
548 }
549 
550 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
551 {
552 	obj->read_domains = I915_GEM_DOMAIN_CPU;
553 	obj->write_domain = I915_GEM_DOMAIN_CPU;
554 	if (cpu_write_needs_clflush(obj))
555 		obj->cache_dirty = true;
556 }
557 
558 void i915_gem_fence_wait_priority(struct dma_fence *fence,
559 				  const struct i915_sched_attr *attr);
560 
561 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
562 			 unsigned int flags,
563 			 long timeout);
564 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
565 				  unsigned int flags,
566 				  const struct i915_sched_attr *attr);
567 
568 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
569 					 enum fb_op_origin origin);
570 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
571 					      enum fb_op_origin origin);
572 
573 static inline void
574 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
575 				  enum fb_op_origin origin)
576 {
577 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
578 		__i915_gem_object_flush_frontbuffer(obj, origin);
579 }
580 
581 static inline void
582 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
583 				       enum fb_op_origin origin)
584 {
585 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
586 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
587 }
588 
589 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
590 
591 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
592 
593 void __i915_gem_free_object_rcu(struct rcu_head *head);
594 
595 void __i915_gem_free_object(struct drm_i915_gem_object *obj);
596 
597 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
598 
599 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
600 
601 bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj);
602 
603 #ifdef CONFIG_MMU_NOTIFIER
604 static inline bool
605 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
606 {
607 	return obj->userptr.notifier.mm;
608 }
609 
610 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
611 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
612 void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
613 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
614 #else
615 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
616 
617 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
618 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
619 static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
620 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
621 
622 #endif
623 
624 #endif
625