xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma.h (revision 8e4ee5e8)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __I915_VMA_H__
26 #define __I915_VMA_H__
27 
28 #include <linux/io-mapping.h>
29 #include <linux/rbtree.h>
30 
31 #include <drm/drm_mm.h>
32 
33 #include "gt/intel_ggtt_fencing.h"
34 #include "gem/i915_gem_object.h"
35 
36 #include "i915_gem_gtt.h"
37 
38 #include "i915_active.h"
39 #include "i915_request.h"
40 #include "i915_vma_resource.h"
41 #include "i915_vma_types.h"
42 
43 struct i915_vma *
44 i915_vma_instance(struct drm_i915_gem_object *obj,
45 		  struct i915_address_space *vm,
46 		  const struct i915_gtt_view *view);
47 
48 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
49 #define I915_VMA_RELEASE_MAP BIT(0)
50 
51 static inline bool i915_vma_is_active(const struct i915_vma *vma)
52 {
53 	return !i915_active_is_idle(&vma->active);
54 }
55 
56 /* do not reserve memory to prevent deadlocks */
57 #define __EXEC_OBJECT_NO_RESERVE BIT(31)
58 #define __EXEC_OBJECT_NO_REQUEST_AWAIT BIT(30)
59 
60 int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
61 					  struct i915_request *rq,
62 					  struct dma_fence *fence,
63 					  unsigned int flags);
64 static inline int __must_check
65 i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
66 			unsigned int flags)
67 {
68 	return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
69 }
70 
71 #define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
72 
73 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
74 {
75 	return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
76 }
77 
78 static inline bool i915_vma_is_dpt(const struct i915_vma *vma)
79 {
80 	return i915_is_dpt(vma->vm);
81 }
82 
83 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
84 {
85 	return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
86 }
87 
88 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
89 {
90 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
91 	set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
92 }
93 
94 static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
95 {
96 	return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
97 				  __i915_vma_flags(vma));
98 }
99 
100 void i915_vma_flush_writes(struct i915_vma *vma);
101 
102 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
103 {
104 	return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
105 }
106 
107 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
108 {
109 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
110 	return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
111 }
112 
113 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
114 {
115 	return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
116 }
117 
118 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
119 {
120 	return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
121 }
122 
123 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
124 {
125 	return !list_empty(&vma->closed_link);
126 }
127 
128 /* Internal use only. */
129 static inline u64 __i915_vma_size(const struct i915_vma *vma)
130 {
131 	return vma->node.size;
132 }
133 
134 /**
135  * i915_vma_offset - Obtain the va range size of the vma
136  * @vma: The vma
137  *
138  * GPU virtual address space may be allocated with padding. This
139  * function returns the effective virtual address range size
140  * with padding subtracted.
141  *
142  * Return: The effective virtual address range size.
143  */
144 static inline u64 i915_vma_size(const struct i915_vma *vma)
145 {
146 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
147 	return __i915_vma_size(vma);
148 }
149 
150 /* Internal use only. */
151 static inline u64 __i915_vma_offset(const struct i915_vma *vma)
152 {
153 	return vma->node.start;
154 }
155 
156 /**
157  * i915_vma_offset - Obtain the va offset of the vma
158  * @vma: The vma
159  *
160  * GPU virtual address space may be allocated with padding. This
161  * function returns the effective virtual address offset the gpu
162  * should use to access the bound data.
163  *
164  * Return: The effective virtual address offset.
165  */
166 static inline u64 i915_vma_offset(const struct i915_vma *vma)
167 {
168 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
169 	return __i915_vma_offset(vma);
170 }
171 
172 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
173 {
174 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
175 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
176 	GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma)));
177 	GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) +
178 				 i915_vma_size(vma) - 1));
179 	return lower_32_bits(i915_vma_offset(vma));
180 }
181 
182 static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
183 {
184 	return i915_vm_to_ggtt(vma->vm)->pin_bias;
185 }
186 
187 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
188 {
189 	i915_gem_object_get(vma->obj);
190 	return vma;
191 }
192 
193 static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
194 {
195 	if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
196 		return vma;
197 
198 	return NULL;
199 }
200 
201 static inline void i915_vma_put(struct i915_vma *vma)
202 {
203 	i915_gem_object_put(vma->obj);
204 }
205 
206 static inline long
207 i915_vma_compare(struct i915_vma *vma,
208 		 struct i915_address_space *vm,
209 		 const struct i915_gtt_view *view)
210 {
211 	ptrdiff_t cmp;
212 
213 	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
214 
215 	cmp = ptrdiff(vma->vm, vm);
216 	if (cmp)
217 		return cmp;
218 
219 	BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0);
220 	cmp = vma->gtt_view.type;
221 	if (!view)
222 		return cmp;
223 
224 	cmp -= view->type;
225 	if (cmp)
226 		return cmp;
227 
228 	assert_i915_gem_gtt_types();
229 
230 	/* gtt_view.type also encodes its size so that we both distinguish
231 	 * different views using it as a "type" and also use a compact (no
232 	 * accessing of uninitialised padding bytes) memcmp without storing
233 	 * an extra parameter or adding more code.
234 	 *
235 	 * To ensure that the memcmp is valid for all branches of the union,
236 	 * even though the code looks like it is just comparing one branch,
237 	 * we assert above that all branches have the same address, and that
238 	 * each branch has a unique type/size.
239 	 */
240 	BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL);
241 	BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED);
242 	BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED);
243 	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
244 		     offsetof(typeof(*view), partial));
245 	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
246 		     offsetof(typeof(*view), remapped));
247 	return memcmp(&vma->gtt_view.partial, &view->partial, view->type);
248 }
249 
250 struct i915_vma_work *i915_vma_work(void);
251 int i915_vma_bind(struct i915_vma *vma,
252 		  enum i915_cache_level cache_level,
253 		  u32 flags,
254 		  struct i915_vma_work *work,
255 		  struct i915_vma_resource *vma_res);
256 
257 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
258 bool i915_vma_misplaced(const struct i915_vma *vma,
259 			u64 size, u64 alignment, u64 flags);
260 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
261 void i915_vma_revoke_mmap(struct i915_vma *vma);
262 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
263 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
264 int __i915_vma_unbind(struct i915_vma *vma);
265 int __must_check i915_vma_unbind(struct i915_vma *vma);
266 int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm);
267 int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma);
268 void i915_vma_unlink_ctx(struct i915_vma *vma);
269 void i915_vma_close(struct i915_vma *vma);
270 void i915_vma_reopen(struct i915_vma *vma);
271 
272 void i915_vma_destroy_locked(struct i915_vma *vma);
273 void i915_vma_destroy(struct i915_vma *vma);
274 
275 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
276 
277 static inline void i915_vma_lock(struct i915_vma *vma)
278 {
279 	dma_resv_lock(vma->obj->base.resv, NULL);
280 }
281 
282 static inline void i915_vma_unlock(struct i915_vma *vma)
283 {
284 	dma_resv_unlock(vma->obj->base.resv);
285 }
286 
287 int __must_check
288 i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
289 		u64 size, u64 alignment, u64 flags);
290 
291 static inline int __must_check
292 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
293 {
294 	struct i915_gem_ww_ctx ww;
295 	int err;
296 
297 	i915_gem_ww_ctx_init(&ww, true);
298 retry:
299 	err = i915_gem_object_lock(vma->obj, &ww);
300 	if (!err)
301 		err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
302 	if (err == -EDEADLK) {
303 		err = i915_gem_ww_ctx_backoff(&ww);
304 		if (!err)
305 			goto retry;
306 	}
307 	i915_gem_ww_ctx_fini(&ww);
308 
309 	return err;
310 }
311 
312 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
313 		  u32 align, unsigned int flags);
314 
315 static inline int i915_vma_pin_count(const struct i915_vma *vma)
316 {
317 	return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
318 }
319 
320 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
321 {
322 	return i915_vma_pin_count(vma);
323 }
324 
325 static inline void __i915_vma_pin(struct i915_vma *vma)
326 {
327 	atomic_inc(&vma->flags);
328 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
329 }
330 
331 static inline void __i915_vma_unpin(struct i915_vma *vma)
332 {
333 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
334 	atomic_dec(&vma->flags);
335 }
336 
337 static inline void i915_vma_unpin(struct i915_vma *vma)
338 {
339 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
340 	__i915_vma_unpin(vma);
341 }
342 
343 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
344 				     unsigned int where)
345 {
346 	return atomic_read(&vma->flags) & where;
347 }
348 
349 static inline bool i915_node_color_differs(const struct drm_mm_node *node,
350 					   unsigned long color)
351 {
352 	return drm_mm_node_allocated(node) && node->color != color;
353 }
354 
355 /**
356  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
357  * @vma: VMA to iomap
358  *
359  * The passed in VMA has to be pinned in the global GTT mappable region.
360  * An extra pinning of the VMA is acquired for the return iomapping,
361  * the caller must call i915_vma_unpin_iomap to relinquish the pinning
362  * after the iomapping is no longer required.
363  *
364  * Returns a valid iomapped pointer or ERR_PTR.
365  */
366 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
367 
368 /**
369  * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
370  * @vma: VMA to unpin
371  *
372  * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
373  *
374  * This function is only valid to be called on a VMA previously
375  * iomapped by the caller with i915_vma_pin_iomap().
376  */
377 void i915_vma_unpin_iomap(struct i915_vma *vma);
378 
379 /**
380  * i915_vma_pin_fence - pin fencing state
381  * @vma: vma to pin fencing for
382  *
383  * This pins the fencing state (whether tiled or untiled) to make sure the
384  * vma (and its object) is ready to be used as a scanout target. Fencing
385  * status must be synchronize first by calling i915_vma_get_fence():
386  *
387  * The resulting fence pin reference must be released again with
388  * i915_vma_unpin_fence().
389  *
390  * Returns:
391  *
392  * True if the vma has a fence, false otherwise.
393  */
394 int __must_check i915_vma_pin_fence(struct i915_vma *vma);
395 void i915_vma_revoke_fence(struct i915_vma *vma);
396 
397 int __i915_vma_pin_fence(struct i915_vma *vma);
398 
399 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
400 {
401 	GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
402 	atomic_dec(&vma->fence->pin_count);
403 }
404 
405 /**
406  * i915_vma_unpin_fence - unpin fencing state
407  * @vma: vma to unpin fencing for
408  *
409  * This releases the fence pin reference acquired through
410  * i915_vma_pin_fence. It will handle both objects with and without an
411  * attached fence correctly, callers do not need to distinguish this.
412  */
413 static inline void
414 i915_vma_unpin_fence(struct i915_vma *vma)
415 {
416 	if (vma->fence)
417 		__i915_vma_unpin_fence(vma);
418 }
419 
420 void i915_vma_parked(struct intel_gt *gt);
421 
422 static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
423 {
424 	return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
425 }
426 
427 static inline void i915_vma_mark_scanout(struct i915_vma *vma)
428 {
429 	set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
430 }
431 
432 static inline void i915_vma_clear_scanout(struct i915_vma *vma)
433 {
434 	clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
435 }
436 
437 #define for_each_until(cond) if (cond) break; else
438 
439 /**
440  * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
441  * @V: the #i915_vma iterator
442  * @OBJ: the #drm_i915_gem_object
443  *
444  * GGTT VMA are placed at the being of the object's vma_list, see
445  * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
446  * or the list is empty ofc.
447  */
448 #define for_each_ggtt_vma(V, OBJ) \
449 	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
450 		for_each_until(!i915_vma_is_ggtt(V))
451 
452 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
453 void i915_vma_make_shrinkable(struct i915_vma *vma);
454 void i915_vma_make_purgeable(struct i915_vma *vma);
455 
456 int i915_vma_wait_for_bind(struct i915_vma *vma);
457 
458 static inline int i915_vma_sync(struct i915_vma *vma)
459 {
460 	/* Wait for the asynchronous bindings and pending GPU reads */
461 	return i915_active_wait(&vma->active);
462 }
463 
464 /**
465  * i915_vma_get_current_resource - Get the current resource of the vma
466  * @vma: The vma to get the current resource from.
467  *
468  * It's illegal to call this function if the vma is not bound.
469  *
470  * Return: A refcounted pointer to the current vma resource
471  * of the vma, assuming the vma is bound.
472  */
473 static inline struct i915_vma_resource *
474 i915_vma_get_current_resource(struct i915_vma *vma)
475 {
476 	return i915_vma_resource_get(vma->resource);
477 }
478 
479 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
480 void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
481 				     struct i915_vma *vma);
482 #endif
483 
484 void i915_vma_module_exit(void);
485 int i915_vma_module_init(void);
486 
487 I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
488 I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
489 
490 #endif
491