1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include "display/intel_frontbuffer.h"
15 #include "i915_gem_object_types.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_vma_types.h"
18 
19 /*
20  * XXX: There is a prevalence of the assumption that we fit the
21  * object's page count inside a 32bit _signed_ variable. Let's document
22  * this and catch if we ever need to fix it. In the meantime, if you do
23  * spot such a local variable, please consider fixing!
24  *
25  * Aside from our own locals (for which we have no excuse!):
26  * - sg_table embeds unsigned int for num_pages
27  * - get_user_pages*() mixed ints with longs
28  */
29 #define GEM_CHECK_SIZE_OVERFLOW(sz) \
30 	GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
31 
32 static inline bool i915_gem_object_size_2big(u64 size)
33 {
34 	struct drm_i915_gem_object *obj;
35 
36 	if (GEM_CHECK_SIZE_OVERFLOW(size))
37 		return true;
38 
39 	if (overflows_type(size, obj->base.size))
40 		return true;
41 
42 	return false;
43 }
44 
45 void i915_gem_init__objects(struct drm_i915_private *i915);
46 
47 struct drm_i915_gem_object *i915_gem_object_alloc(void);
48 void i915_gem_object_free(struct drm_i915_gem_object *obj);
49 
50 void i915_gem_object_init(struct drm_i915_gem_object *obj,
51 			  const struct drm_i915_gem_object_ops *ops,
52 			  struct lock_class_key *key,
53 			  unsigned alloc_flags);
54 struct drm_i915_gem_object *
55 i915_gem_object_create_shmem(struct drm_i915_private *i915,
56 			     resource_size_t size);
57 struct drm_i915_gem_object *
58 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
59 				       const void *data, resource_size_t size);
60 
61 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
62 
63 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
64 				     struct sg_table *pages,
65 				     bool needs_clflush);
66 
67 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
68 				const struct drm_i915_gem_pwrite *args);
69 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
70 			       const struct drm_i915_gem_pread *args);
71 
72 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
73 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
74 				     struct sg_table *pages);
75 void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
76 				    struct sg_table *pages);
77 
78 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
79 
80 struct sg_table *
81 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
82 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
83 
84 /**
85  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
86  * @filp: DRM file private date
87  * @handle: userspace handle
88  *
89  * Returns:
90  *
91  * A pointer to the object named by the handle if such exists on @filp, NULL
92  * otherwise. This object is only valid whilst under the RCU read lock, and
93  * note carefully the object may be in the process of being destroyed.
94  */
95 static inline struct drm_i915_gem_object *
96 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
97 {
98 #ifdef CONFIG_LOCKDEP
99 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
100 #endif
101 	return idr_find(&file->object_idr, handle);
102 }
103 
104 static inline struct drm_i915_gem_object *
105 i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
106 {
107 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
108 		obj = NULL;
109 
110 	return obj;
111 }
112 
113 static inline struct drm_i915_gem_object *
114 i915_gem_object_lookup(struct drm_file *file, u32 handle)
115 {
116 	struct drm_i915_gem_object *obj;
117 
118 	rcu_read_lock();
119 	obj = i915_gem_object_lookup_rcu(file, handle);
120 	obj = i915_gem_object_get_rcu(obj);
121 	rcu_read_unlock();
122 
123 	return obj;
124 }
125 
126 __deprecated
127 struct drm_gem_object *
128 drm_gem_object_lookup(struct drm_file *file, u32 handle);
129 
130 __attribute__((nonnull))
131 static inline struct drm_i915_gem_object *
132 i915_gem_object_get(struct drm_i915_gem_object *obj)
133 {
134 	drm_gem_object_get(&obj->base);
135 	return obj;
136 }
137 
138 __attribute__((nonnull))
139 static inline void
140 i915_gem_object_put(struct drm_i915_gem_object *obj)
141 {
142 	__drm_gem_object_put(&obj->base);
143 }
144 
145 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
146 
147 /*
148  * If more than one potential simultaneous locker, assert held.
149  */
150 static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
151 {
152 	/*
153 	 * Note mm list lookup is protected by
154 	 * kref_get_unless_zero().
155 	 */
156 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
157 	    kref_read(&obj->base.refcount) > 0)
158 		assert_object_held(obj);
159 }
160 
161 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
162 					 struct i915_gem_ww_ctx *ww,
163 					 bool intr)
164 {
165 	int ret;
166 
167 	if (intr)
168 		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
169 	else
170 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
171 
172 	if (!ret && ww)
173 		list_add_tail(&obj->obj_link, &ww->obj_list);
174 	if (ret == -EALREADY)
175 		ret = 0;
176 
177 	if (ret == -EDEADLK)
178 		ww->contended = obj;
179 
180 	return ret;
181 }
182 
183 static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
184 				       struct i915_gem_ww_ctx *ww)
185 {
186 	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
187 }
188 
189 static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
190 						     struct i915_gem_ww_ctx *ww)
191 {
192 	WARN_ON(ww && !ww->intr);
193 	return __i915_gem_object_lock(obj, ww, true);
194 }
195 
196 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
197 {
198 	return dma_resv_trylock(obj->base.resv);
199 }
200 
201 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
202 {
203 	dma_resv_unlock(obj->base.resv);
204 }
205 
206 static inline void
207 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
208 {
209 	obj->flags |= I915_BO_READONLY;
210 }
211 
212 static inline bool
213 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
214 {
215 	return obj->flags & I915_BO_READONLY;
216 }
217 
218 static inline bool
219 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
220 {
221 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
222 }
223 
224 static inline bool
225 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
226 {
227 	return obj->flags & I915_BO_ALLOC_VOLATILE;
228 }
229 
230 static inline void
231 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
232 {
233 	obj->flags |= I915_BO_ALLOC_VOLATILE;
234 }
235 
236 static inline bool
237 i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
238 {
239 	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
240 }
241 
242 static inline void
243 i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
244 {
245 	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
246 }
247 
248 static inline void
249 i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
250 {
251 	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
252 }
253 
254 static inline bool
255 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
256 			 unsigned long flags)
257 {
258 	return obj->ops->flags & flags;
259 }
260 
261 static inline bool
262 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
263 {
264 	return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
265 }
266 
267 static inline bool
268 i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
269 {
270 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
271 }
272 
273 static inline bool
274 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
275 {
276 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
277 }
278 
279 static inline bool
280 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
281 {
282 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
283 }
284 
285 static inline bool
286 i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
287 {
288 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
289 }
290 
291 static inline bool
292 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
293 {
294 	return READ_ONCE(obj->frontbuffer);
295 }
296 
297 static inline unsigned int
298 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
299 {
300 	return obj->tiling_and_stride & TILING_MASK;
301 }
302 
303 static inline bool
304 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
305 {
306 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
307 }
308 
309 static inline unsigned int
310 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
311 {
312 	return obj->tiling_and_stride & STRIDE_MASK;
313 }
314 
315 static inline unsigned int
316 i915_gem_tile_height(unsigned int tiling)
317 {
318 	GEM_BUG_ON(!tiling);
319 	return tiling == I915_TILING_Y ? 32 : 8;
320 }
321 
322 static inline unsigned int
323 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
324 {
325 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
326 }
327 
328 static inline unsigned int
329 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
330 {
331 	return (i915_gem_object_get_stride(obj) *
332 		i915_gem_object_get_tile_height(obj));
333 }
334 
335 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
336 			       unsigned int tiling, unsigned int stride);
337 
338 struct scatterlist *
339 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
340 			 struct i915_gem_object_page_iter *iter,
341 			 unsigned int n,
342 			 unsigned int *offset, bool allow_alloc);
343 
344 static inline struct scatterlist *
345 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
346 		       unsigned int n,
347 		       unsigned int *offset, bool allow_alloc)
348 {
349 	return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc);
350 }
351 
352 static inline struct scatterlist *
353 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
354 			   unsigned int n,
355 			   unsigned int *offset, bool allow_alloc)
356 {
357 	return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc);
358 }
359 
360 struct page *
361 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
362 			 unsigned int n);
363 
364 struct page *
365 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
366 			       unsigned int n);
367 
368 dma_addr_t
369 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
370 				    unsigned long n,
371 				    unsigned int *len);
372 
373 dma_addr_t
374 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
375 				unsigned long n);
376 
377 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
378 				 struct sg_table *pages,
379 				 unsigned int sg_page_sizes);
380 
381 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
382 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
383 
384 static inline int __must_check
385 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
386 {
387 	assert_object_held(obj);
388 
389 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
390 		return 0;
391 
392 	return __i915_gem_object_get_pages(obj);
393 }
394 
395 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
396 
397 static inline bool
398 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
399 {
400 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
401 }
402 
403 static inline void
404 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
405 {
406 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
407 
408 	atomic_inc(&obj->mm.pages_pin_count);
409 }
410 
411 static inline bool
412 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
413 {
414 	return atomic_read(&obj->mm.pages_pin_count);
415 }
416 
417 static inline void
418 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
419 {
420 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
421 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
422 
423 	atomic_dec(&obj->mm.pages_pin_count);
424 }
425 
426 static inline void
427 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
428 {
429 	__i915_gem_object_unpin_pages(obj);
430 }
431 
432 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
433 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
434 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
435 
436 /**
437  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
438  * @obj: the object to map into kernel address space
439  * @type: the type of mapping, used to select pgprot_t
440  *
441  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
442  * pages and then returns a contiguous mapping of the backing storage into
443  * the kernel address space. Based on the @type of mapping, the PTE will be
444  * set to either WriteBack or WriteCombine (via pgprot_t).
445  *
446  * The caller is responsible for calling i915_gem_object_unpin_map() when the
447  * mapping is no longer required.
448  *
449  * Returns the pointer through which to access the mapped object, or an
450  * ERR_PTR() on error.
451  */
452 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
453 					   enum i915_map_type type);
454 
455 void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
456 						    enum i915_map_type type);
457 
458 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
459 				 unsigned long offset,
460 				 unsigned long size);
461 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
462 {
463 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
464 }
465 
466 /**
467  * i915_gem_object_unpin_map - releases an earlier mapping
468  * @obj: the object to unmap
469  *
470  * After pinning the object and mapping its pages, once you are finished
471  * with your access, call i915_gem_object_unpin_map() to release the pin
472  * upon the mapping. Once the pin count reaches zero, that mapping may be
473  * removed.
474  */
475 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
476 {
477 	i915_gem_object_unpin_pages(obj);
478 }
479 
480 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
481 
482 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
483 				 unsigned int *needs_clflush);
484 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
485 				  unsigned int *needs_clflush);
486 #define CLFLUSH_BEFORE	BIT(0)
487 #define CLFLUSH_AFTER	BIT(1)
488 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
489 
490 static inline void
491 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
492 {
493 	i915_gem_object_unpin_pages(obj);
494 }
495 
496 static inline struct intel_engine_cs *
497 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
498 {
499 	struct intel_engine_cs *engine = NULL;
500 	struct dma_fence *fence;
501 
502 	rcu_read_lock();
503 	fence = dma_resv_get_excl_unlocked(obj->base.resv);
504 	rcu_read_unlock();
505 
506 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
507 		engine = to_request(fence)->engine;
508 	dma_fence_put(fence);
509 
510 	return engine;
511 }
512 
513 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
514 					 unsigned int cache_level);
515 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
516 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
517 
518 int __must_check
519 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
520 int __must_check
521 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
522 int __must_check
523 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
524 struct i915_vma * __must_check
525 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
526 				     struct i915_gem_ww_ctx *ww,
527 				     u32 alignment,
528 				     const struct i915_ggtt_view *view,
529 				     unsigned int flags);
530 
531 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
532 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
533 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
534 
535 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
536 {
537 	if (obj->cache_dirty)
538 		return false;
539 
540 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
541 		return true;
542 
543 	/* Currently in use by HW (display engine)? Keep flushed. */
544 	return i915_gem_object_is_framebuffer(obj);
545 }
546 
547 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
548 {
549 	obj->read_domains = I915_GEM_DOMAIN_CPU;
550 	obj->write_domain = I915_GEM_DOMAIN_CPU;
551 	if (cpu_write_needs_clflush(obj))
552 		obj->cache_dirty = true;
553 }
554 
555 void i915_gem_fence_wait_priority(struct dma_fence *fence,
556 				  const struct i915_sched_attr *attr);
557 
558 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
559 			 unsigned int flags,
560 			 long timeout);
561 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
562 				  unsigned int flags,
563 				  const struct i915_sched_attr *attr);
564 
565 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
566 					 enum fb_op_origin origin);
567 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
568 					      enum fb_op_origin origin);
569 
570 static inline void
571 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
572 				  enum fb_op_origin origin)
573 {
574 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
575 		__i915_gem_object_flush_frontbuffer(obj, origin);
576 }
577 
578 static inline void
579 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
580 				       enum fb_op_origin origin)
581 {
582 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
583 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
584 }
585 
586 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
587 
588 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
589 
590 #ifdef CONFIG_MMU_NOTIFIER
591 static inline bool
592 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
593 {
594 	return obj->userptr.notifier.mm;
595 }
596 
597 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
598 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
599 void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
600 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
601 #else
602 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
603 
604 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
605 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
606 static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
607 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
608 
609 #endif
610 
611 #endif
612