1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include <drm/i915_drm.h>
15 
16 #include "display/intel_frontbuffer.h"
17 #include "i915_gem_object_types.h"
18 #include "i915_gem_gtt.h"
19 #include "i915_vma_types.h"
20 
21 void i915_gem_init__objects(struct drm_i915_private *i915);
22 
23 struct drm_i915_gem_object *i915_gem_object_alloc(void);
24 void i915_gem_object_free(struct drm_i915_gem_object *obj);
25 
26 void i915_gem_object_init(struct drm_i915_gem_object *obj,
27 			  const struct drm_i915_gem_object_ops *ops,
28 			  struct lock_class_key *key);
29 struct drm_i915_gem_object *
30 i915_gem_object_create_shmem(struct drm_i915_private *i915,
31 			     resource_size_t size);
32 struct drm_i915_gem_object *
33 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
34 				       const void *data, resource_size_t size);
35 
36 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
37 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
38 				     struct sg_table *pages,
39 				     bool needs_clflush);
40 
41 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
42 
43 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
44 void i915_gem_free_object(struct drm_gem_object *obj);
45 
46 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
47 
48 struct sg_table *
49 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
50 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
51 
52 /**
53  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
54  * @filp: DRM file private date
55  * @handle: userspace handle
56  *
57  * Returns:
58  *
59  * A pointer to the object named by the handle if such exists on @filp, NULL
60  * otherwise. This object is only valid whilst under the RCU read lock, and
61  * note carefully the object may be in the process of being destroyed.
62  */
63 static inline struct drm_i915_gem_object *
64 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
65 {
66 #ifdef CONFIG_LOCKDEP
67 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
68 #endif
69 	return idr_find(&file->object_idr, handle);
70 }
71 
72 static inline struct drm_i915_gem_object *
73 i915_gem_object_lookup(struct drm_file *file, u32 handle)
74 {
75 	struct drm_i915_gem_object *obj;
76 
77 	rcu_read_lock();
78 	obj = i915_gem_object_lookup_rcu(file, handle);
79 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
80 		obj = NULL;
81 	rcu_read_unlock();
82 
83 	return obj;
84 }
85 
86 __deprecated
87 struct drm_gem_object *
88 drm_gem_object_lookup(struct drm_file *file, u32 handle);
89 
90 __attribute__((nonnull))
91 static inline struct drm_i915_gem_object *
92 i915_gem_object_get(struct drm_i915_gem_object *obj)
93 {
94 	drm_gem_object_get(&obj->base);
95 	return obj;
96 }
97 
98 __attribute__((nonnull))
99 static inline void
100 i915_gem_object_put(struct drm_i915_gem_object *obj)
101 {
102 	__drm_gem_object_put(&obj->base);
103 }
104 
105 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
106 
107 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
108 {
109 	dma_resv_lock(obj->base.resv, NULL);
110 }
111 
112 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
113 {
114 	return dma_resv_trylock(obj->base.resv);
115 }
116 
117 static inline int
118 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
119 {
120 	return dma_resv_lock_interruptible(obj->base.resv, NULL);
121 }
122 
123 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
124 {
125 	dma_resv_unlock(obj->base.resv);
126 }
127 
128 struct dma_fence *
129 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
130 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
131 				  struct dma_fence *fence);
132 
133 static inline void
134 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
135 {
136 	obj->flags |= I915_BO_READONLY;
137 }
138 
139 static inline bool
140 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
141 {
142 	return obj->flags & I915_BO_READONLY;
143 }
144 
145 static inline bool
146 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
147 {
148 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
149 }
150 
151 static inline bool
152 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
153 {
154 	return obj->flags & I915_BO_ALLOC_VOLATILE;
155 }
156 
157 static inline void
158 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
159 {
160 	obj->flags |= I915_BO_ALLOC_VOLATILE;
161 }
162 
163 static inline bool
164 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
165 			 unsigned long flags)
166 {
167 	return obj->ops->flags & flags;
168 }
169 
170 static inline bool
171 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
172 {
173 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
174 }
175 
176 static inline bool
177 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
178 {
179 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
180 }
181 
182 static inline bool
183 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
184 {
185 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
186 }
187 
188 static inline bool
189 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
190 {
191 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
192 }
193 
194 static inline bool
195 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
196 {
197 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
198 }
199 
200 static inline bool
201 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
202 {
203 	return READ_ONCE(obj->frontbuffer);
204 }
205 
206 static inline unsigned int
207 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
208 {
209 	return obj->tiling_and_stride & TILING_MASK;
210 }
211 
212 static inline bool
213 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
214 {
215 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
216 }
217 
218 static inline unsigned int
219 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
220 {
221 	return obj->tiling_and_stride & STRIDE_MASK;
222 }
223 
224 static inline unsigned int
225 i915_gem_tile_height(unsigned int tiling)
226 {
227 	GEM_BUG_ON(!tiling);
228 	return tiling == I915_TILING_Y ? 32 : 8;
229 }
230 
231 static inline unsigned int
232 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
233 {
234 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
235 }
236 
237 static inline unsigned int
238 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
239 {
240 	return (i915_gem_object_get_stride(obj) *
241 		i915_gem_object_get_tile_height(obj));
242 }
243 
244 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
245 			       unsigned int tiling, unsigned int stride);
246 
247 struct scatterlist *
248 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
249 		       unsigned int n, unsigned int *offset);
250 
251 struct page *
252 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
253 			 unsigned int n);
254 
255 struct page *
256 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
257 			       unsigned int n);
258 
259 dma_addr_t
260 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
261 				    unsigned long n,
262 				    unsigned int *len);
263 
264 dma_addr_t
265 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
266 				unsigned long n);
267 
268 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
269 				 struct sg_table *pages,
270 				 unsigned int sg_page_sizes);
271 
272 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
273 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
274 
275 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
276 	I915_MM_NORMAL = 0,
277 	/*
278 	 * Only used by struct_mutex, when called "recursively" from
279 	 * direct-reclaim-esque. Safe because there is only every one
280 	 * struct_mutex in the entire system.
281 	 */
282 	I915_MM_SHRINKER = 1,
283 	/*
284 	 * Used for obj->mm.lock when allocating pages. Safe because the object
285 	 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
286 	 * it. As soon as the object has pages, obj->mm.lock nests within
287 	 * fs_reclaim.
288 	 */
289 	I915_MM_GET_PAGES = 1,
290 };
291 
292 static inline int __must_check
293 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
294 {
295 	might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
296 
297 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
298 		return 0;
299 
300 	return __i915_gem_object_get_pages(obj);
301 }
302 
303 static inline bool
304 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
305 {
306 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
307 }
308 
309 static inline void
310 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
311 {
312 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
313 
314 	atomic_inc(&obj->mm.pages_pin_count);
315 }
316 
317 static inline bool
318 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
319 {
320 	return atomic_read(&obj->mm.pages_pin_count);
321 }
322 
323 static inline void
324 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
325 {
326 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
327 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
328 
329 	atomic_dec(&obj->mm.pages_pin_count);
330 }
331 
332 static inline void
333 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
334 {
335 	__i915_gem_object_unpin_pages(obj);
336 }
337 
338 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
339 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
340 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
341 
342 enum i915_map_type {
343 	I915_MAP_WB = 0,
344 	I915_MAP_WC,
345 #define I915_MAP_OVERRIDE BIT(31)
346 	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
347 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
348 };
349 
350 /**
351  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
352  * @obj: the object to map into kernel address space
353  * @type: the type of mapping, used to select pgprot_t
354  *
355  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
356  * pages and then returns a contiguous mapping of the backing storage into
357  * the kernel address space. Based on the @type of mapping, the PTE will be
358  * set to either WriteBack or WriteCombine (via pgprot_t).
359  *
360  * The caller is responsible for calling i915_gem_object_unpin_map() when the
361  * mapping is no longer required.
362  *
363  * Returns the pointer through which to access the mapped object, or an
364  * ERR_PTR() on error.
365  */
366 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
367 					   enum i915_map_type type);
368 
369 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
370 				 unsigned long offset,
371 				 unsigned long size);
372 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
373 {
374 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
375 }
376 
377 /**
378  * i915_gem_object_unpin_map - releases an earlier mapping
379  * @obj: the object to unmap
380  *
381  * After pinning the object and mapping its pages, once you are finished
382  * with your access, call i915_gem_object_unpin_map() to release the pin
383  * upon the mapping. Once the pin count reaches zero, that mapping may be
384  * removed.
385  */
386 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
387 {
388 	i915_gem_object_unpin_pages(obj);
389 }
390 
391 void
392 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
393 				   unsigned int flush_domains);
394 
395 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
396 				 unsigned int *needs_clflush);
397 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
398 				  unsigned int *needs_clflush);
399 #define CLFLUSH_BEFORE	BIT(0)
400 #define CLFLUSH_AFTER	BIT(1)
401 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
402 
403 static inline void
404 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
405 {
406 	i915_gem_object_unpin_pages(obj);
407 	i915_gem_object_unlock(obj);
408 }
409 
410 static inline struct intel_engine_cs *
411 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
412 {
413 	struct intel_engine_cs *engine = NULL;
414 	struct dma_fence *fence;
415 
416 	rcu_read_lock();
417 	fence = dma_resv_get_excl_rcu(obj->base.resv);
418 	rcu_read_unlock();
419 
420 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
421 		engine = to_request(fence)->engine;
422 	dma_fence_put(fence);
423 
424 	return engine;
425 }
426 
427 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
428 					 unsigned int cache_level);
429 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
430 
431 int __must_check
432 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
433 int __must_check
434 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
435 int __must_check
436 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
437 struct i915_vma * __must_check
438 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
439 				     u32 alignment,
440 				     const struct i915_ggtt_view *view,
441 				     unsigned int flags);
442 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
443 
444 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
445 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
446 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
447 
448 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
449 {
450 	if (obj->cache_dirty)
451 		return false;
452 
453 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
454 		return true;
455 
456 	/* Currently in use by HW (display engine)? Keep flushed. */
457 	return i915_gem_object_is_framebuffer(obj);
458 }
459 
460 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
461 {
462 	obj->read_domains = I915_GEM_DOMAIN_CPU;
463 	obj->write_domain = I915_GEM_DOMAIN_CPU;
464 	if (cpu_write_needs_clflush(obj))
465 		obj->cache_dirty = true;
466 }
467 
468 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
469 			 unsigned int flags,
470 			 long timeout);
471 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
472 				  unsigned int flags,
473 				  const struct i915_sched_attr *attr);
474 
475 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
476 					 enum fb_op_origin origin);
477 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
478 					      enum fb_op_origin origin);
479 
480 static inline void
481 i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
482 				  enum fb_op_origin origin)
483 {
484 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
485 		__i915_gem_object_flush_frontbuffer(obj, origin);
486 }
487 
488 static inline void
489 i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
490 				       enum fb_op_origin origin)
491 {
492 	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
493 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
494 }
495 
496 #endif
497