1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9 
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13 
14 #include <drm/i915_drm.h>
15 
16 #include "i915_gem_object_types.h"
17 
18 #include "i915_gem_gtt.h"
19 
20 void i915_gem_init__objects(struct drm_i915_private *i915);
21 
22 struct drm_i915_gem_object *i915_gem_object_alloc(void);
23 void i915_gem_object_free(struct drm_i915_gem_object *obj);
24 
25 void i915_gem_object_init(struct drm_i915_gem_object *obj,
26 			  const struct drm_i915_gem_object_ops *ops,
27 			  struct lock_class_key *key);
28 struct drm_i915_gem_object *
29 i915_gem_object_create_shmem(struct drm_i915_private *i915,
30 			     resource_size_t size);
31 struct drm_i915_gem_object *
32 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
33 				       const void *data, resource_size_t size);
34 
35 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
36 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
37 				     struct sg_table *pages,
38 				     bool needs_clflush);
39 
40 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
41 
42 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
43 void i915_gem_free_object(struct drm_gem_object *obj);
44 
45 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
46 
47 struct sg_table *
48 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
49 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
50 
51 /**
52  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
53  * @filp: DRM file private date
54  * @handle: userspace handle
55  *
56  * Returns:
57  *
58  * A pointer to the object named by the handle if such exists on @filp, NULL
59  * otherwise. This object is only valid whilst under the RCU read lock, and
60  * note carefully the object may be in the process of being destroyed.
61  */
62 static inline struct drm_i915_gem_object *
63 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
64 {
65 #ifdef CONFIG_LOCKDEP
66 	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
67 #endif
68 	return idr_find(&file->object_idr, handle);
69 }
70 
71 static inline struct drm_i915_gem_object *
72 i915_gem_object_lookup(struct drm_file *file, u32 handle)
73 {
74 	struct drm_i915_gem_object *obj;
75 
76 	rcu_read_lock();
77 	obj = i915_gem_object_lookup_rcu(file, handle);
78 	if (obj && !kref_get_unless_zero(&obj->base.refcount))
79 		obj = NULL;
80 	rcu_read_unlock();
81 
82 	return obj;
83 }
84 
85 __deprecated
86 struct drm_gem_object *
87 drm_gem_object_lookup(struct drm_file *file, u32 handle);
88 
89 __attribute__((nonnull))
90 static inline struct drm_i915_gem_object *
91 i915_gem_object_get(struct drm_i915_gem_object *obj)
92 {
93 	drm_gem_object_get(&obj->base);
94 	return obj;
95 }
96 
97 __attribute__((nonnull))
98 static inline void
99 i915_gem_object_put(struct drm_i915_gem_object *obj)
100 {
101 	__drm_gem_object_put(&obj->base);
102 }
103 
104 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
105 
106 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
107 {
108 	dma_resv_lock(obj->base.resv, NULL);
109 }
110 
111 static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
112 {
113 	return dma_resv_trylock(obj->base.resv);
114 }
115 
116 static inline int
117 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
118 {
119 	return dma_resv_lock_interruptible(obj->base.resv, NULL);
120 }
121 
122 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
123 {
124 	dma_resv_unlock(obj->base.resv);
125 }
126 
127 struct dma_fence *
128 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
129 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
130 				  struct dma_fence *fence);
131 
132 static inline void
133 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
134 {
135 	obj->base.vma_node.readonly = true;
136 }
137 
138 static inline bool
139 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
140 {
141 	return obj->base.vma_node.readonly;
142 }
143 
144 static inline bool
145 i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
146 {
147 	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
148 }
149 
150 static inline bool
151 i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
152 {
153 	return obj->flags & I915_BO_ALLOC_VOLATILE;
154 }
155 
156 static inline void
157 i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
158 {
159 	obj->flags |= I915_BO_ALLOC_VOLATILE;
160 }
161 
162 static inline bool
163 i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
164 			 unsigned long flags)
165 {
166 	return obj->ops->flags & flags;
167 }
168 
169 static inline bool
170 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
171 {
172 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
173 }
174 
175 static inline bool
176 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
177 {
178 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
179 }
180 
181 static inline bool
182 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
183 {
184 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
185 }
186 
187 static inline bool
188 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
189 {
190 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
191 }
192 
193 static inline bool
194 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
195 {
196 	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
197 }
198 
199 static inline bool
200 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
201 {
202 	return READ_ONCE(obj->frontbuffer);
203 }
204 
205 static inline unsigned int
206 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
207 {
208 	return obj->tiling_and_stride & TILING_MASK;
209 }
210 
211 static inline bool
212 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
213 {
214 	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
215 }
216 
217 static inline unsigned int
218 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
219 {
220 	return obj->tiling_and_stride & STRIDE_MASK;
221 }
222 
223 static inline unsigned int
224 i915_gem_tile_height(unsigned int tiling)
225 {
226 	GEM_BUG_ON(!tiling);
227 	return tiling == I915_TILING_Y ? 32 : 8;
228 }
229 
230 static inline unsigned int
231 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
232 {
233 	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
234 }
235 
236 static inline unsigned int
237 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
238 {
239 	return (i915_gem_object_get_stride(obj) *
240 		i915_gem_object_get_tile_height(obj));
241 }
242 
243 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
244 			       unsigned int tiling, unsigned int stride);
245 
246 struct scatterlist *
247 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
248 		       unsigned int n, unsigned int *offset);
249 
250 struct page *
251 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
252 			 unsigned int n);
253 
254 struct page *
255 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
256 			       unsigned int n);
257 
258 dma_addr_t
259 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
260 				    unsigned long n,
261 				    unsigned int *len);
262 
263 dma_addr_t
264 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
265 				unsigned long n);
266 
267 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
268 				 struct sg_table *pages,
269 				 unsigned int sg_page_sizes);
270 
271 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
272 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
273 
274 static inline int __must_check
275 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
276 {
277 	might_lock(&obj->mm.lock);
278 
279 	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
280 		return 0;
281 
282 	return __i915_gem_object_get_pages(obj);
283 }
284 
285 static inline bool
286 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
287 {
288 	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
289 }
290 
291 static inline void
292 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
293 {
294 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
295 
296 	atomic_inc(&obj->mm.pages_pin_count);
297 }
298 
299 static inline bool
300 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
301 {
302 	return atomic_read(&obj->mm.pages_pin_count);
303 }
304 
305 static inline void
306 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
307 {
308 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
309 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
310 
311 	atomic_dec(&obj->mm.pages_pin_count);
312 }
313 
314 static inline void
315 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
316 {
317 	__i915_gem_object_unpin_pages(obj);
318 }
319 
320 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
321 	I915_MM_NORMAL = 0,
322 	I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
323 };
324 
325 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
326 				enum i915_mm_subclass subclass);
327 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
328 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
329 
330 enum i915_map_type {
331 	I915_MAP_WB = 0,
332 	I915_MAP_WC,
333 #define I915_MAP_OVERRIDE BIT(31)
334 	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
335 	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
336 };
337 
338 /**
339  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
340  * @obj: the object to map into kernel address space
341  * @type: the type of mapping, used to select pgprot_t
342  *
343  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
344  * pages and then returns a contiguous mapping of the backing storage into
345  * the kernel address space. Based on the @type of mapping, the PTE will be
346  * set to either WriteBack or WriteCombine (via pgprot_t).
347  *
348  * The caller is responsible for calling i915_gem_object_unpin_map() when the
349  * mapping is no longer required.
350  *
351  * Returns the pointer through which to access the mapped object, or an
352  * ERR_PTR() on error.
353  */
354 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
355 					   enum i915_map_type type);
356 
357 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
358 				 unsigned long offset,
359 				 unsigned long size);
360 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
361 {
362 	__i915_gem_object_flush_map(obj, 0, obj->base.size);
363 }
364 
365 /**
366  * i915_gem_object_unpin_map - releases an earlier mapping
367  * @obj: the object to unmap
368  *
369  * After pinning the object and mapping its pages, once you are finished
370  * with your access, call i915_gem_object_unpin_map() to release the pin
371  * upon the mapping. Once the pin count reaches zero, that mapping may be
372  * removed.
373  */
374 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
375 {
376 	i915_gem_object_unpin_pages(obj);
377 }
378 
379 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
380 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
381 
382 void
383 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
384 				   unsigned int flush_domains);
385 
386 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
387 				 unsigned int *needs_clflush);
388 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
389 				  unsigned int *needs_clflush);
390 #define CLFLUSH_BEFORE	BIT(0)
391 #define CLFLUSH_AFTER	BIT(1)
392 #define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
393 
394 static inline void
395 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
396 {
397 	i915_gem_object_unpin_pages(obj);
398 	i915_gem_object_unlock(obj);
399 }
400 
401 static inline struct intel_engine_cs *
402 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
403 {
404 	struct intel_engine_cs *engine = NULL;
405 	struct dma_fence *fence;
406 
407 	rcu_read_lock();
408 	fence = dma_resv_get_excl_rcu(obj->base.resv);
409 	rcu_read_unlock();
410 
411 	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
412 		engine = to_request(fence)->engine;
413 	dma_fence_put(fence);
414 
415 	return engine;
416 }
417 
418 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
419 					 unsigned int cache_level);
420 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
421 
422 int __must_check
423 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
424 int __must_check
425 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
426 int __must_check
427 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
428 struct i915_vma * __must_check
429 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
430 				     u32 alignment,
431 				     const struct i915_ggtt_view *view,
432 				     unsigned int flags);
433 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
434 
435 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
436 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
437 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
438 
439 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
440 {
441 	if (obj->cache_dirty)
442 		return false;
443 
444 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
445 		return true;
446 
447 	/* Currently in use by HW (display engine)? Keep flushed. */
448 	return i915_gem_object_is_framebuffer(obj);
449 }
450 
451 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
452 {
453 	obj->read_domains = I915_GEM_DOMAIN_CPU;
454 	obj->write_domain = I915_GEM_DOMAIN_CPU;
455 	if (cpu_write_needs_clflush(obj))
456 		obj->cache_dirty = true;
457 }
458 
459 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
460 			 unsigned int flags,
461 			 long timeout);
462 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
463 				  unsigned int flags,
464 				  const struct i915_sched_attr *attr);
465 
466 #endif
467