xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma.h (revision 47c59e0c)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __I915_VMA_H__
26 #define __I915_VMA_H__
27 
28 #include <linux/io-mapping.h>
29 #include <linux/rbtree.h>
30 
31 #include <drm/drm_mm.h>
32 
33 #include "gt/intel_ggtt_fencing.h"
34 #include "gem/i915_gem_object.h"
35 
36 #include "i915_gem_gtt.h"
37 
38 #include "i915_active.h"
39 #include "i915_request.h"
40 #include "i915_vma_resource.h"
41 #include "i915_vma_types.h"
42 
43 struct i915_vma *
44 i915_vma_instance(struct drm_i915_gem_object *obj,
45 		  struct i915_address_space *vm,
46 		  const struct i915_gtt_view *view);
47 
48 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
49 #define I915_VMA_RELEASE_MAP BIT(0)
50 
51 static inline bool i915_vma_is_active(const struct i915_vma *vma)
52 {
53 	return !i915_active_is_idle(&vma->active);
54 }
55 
56 /* do not reserve memory to prevent deadlocks */
57 #define __EXEC_OBJECT_NO_RESERVE BIT(31)
58 #define __EXEC_OBJECT_NO_REQUEST_AWAIT BIT(30)
59 
60 int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
61 					  struct i915_request *rq,
62 					  struct dma_fence *fence,
63 					  unsigned int flags);
64 static inline int __must_check
65 i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
66 			unsigned int flags)
67 {
68 	return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
69 }
70 
71 #define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
72 
73 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
74 {
75 	return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
76 }
77 
78 static inline bool i915_vma_is_dpt(const struct i915_vma *vma)
79 {
80 	return i915_is_dpt(vma->vm);
81 }
82 
83 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
84 {
85 	return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
86 }
87 
88 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
89 {
90 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
91 	set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
92 }
93 
94 static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
95 {
96 	return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
97 				  __i915_vma_flags(vma));
98 }
99 
100 void i915_vma_flush_writes(struct i915_vma *vma);
101 
102 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
103 {
104 	return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
105 }
106 
107 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
108 {
109 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
110 	return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
111 }
112 
113 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
114 {
115 	return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
116 }
117 
118 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
119 {
120 	return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
121 }
122 
123 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
124 {
125 	return !list_empty(&vma->closed_link);
126 }
127 
128 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
129 {
130 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
131 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
132 	GEM_BUG_ON(upper_32_bits(vma->node.start));
133 	GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
134 	return lower_32_bits(vma->node.start);
135 }
136 
137 static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
138 {
139 	return i915_vm_to_ggtt(vma->vm)->pin_bias;
140 }
141 
142 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
143 {
144 	i915_gem_object_get(vma->obj);
145 	return vma;
146 }
147 
148 static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
149 {
150 	if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
151 		return vma;
152 
153 	return NULL;
154 }
155 
156 static inline void i915_vma_put(struct i915_vma *vma)
157 {
158 	i915_gem_object_put(vma->obj);
159 }
160 
161 static inline long
162 i915_vma_compare(struct i915_vma *vma,
163 		 struct i915_address_space *vm,
164 		 const struct i915_gtt_view *view)
165 {
166 	ptrdiff_t cmp;
167 
168 	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
169 
170 	cmp = ptrdiff(vma->vm, vm);
171 	if (cmp)
172 		return cmp;
173 
174 	BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0);
175 	cmp = vma->gtt_view.type;
176 	if (!view)
177 		return cmp;
178 
179 	cmp -= view->type;
180 	if (cmp)
181 		return cmp;
182 
183 	assert_i915_gem_gtt_types();
184 
185 	/* gtt_view.type also encodes its size so that we both distinguish
186 	 * different views using it as a "type" and also use a compact (no
187 	 * accessing of uninitialised padding bytes) memcmp without storing
188 	 * an extra parameter or adding more code.
189 	 *
190 	 * To ensure that the memcmp is valid for all branches of the union,
191 	 * even though the code looks like it is just comparing one branch,
192 	 * we assert above that all branches have the same address, and that
193 	 * each branch has a unique type/size.
194 	 */
195 	BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL);
196 	BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED);
197 	BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED);
198 	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
199 		     offsetof(typeof(*view), partial));
200 	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
201 		     offsetof(typeof(*view), remapped));
202 	return memcmp(&vma->gtt_view.partial, &view->partial, view->type);
203 }
204 
205 struct i915_vma_work *i915_vma_work(void);
206 int i915_vma_bind(struct i915_vma *vma,
207 		  enum i915_cache_level cache_level,
208 		  u32 flags,
209 		  struct i915_vma_work *work,
210 		  struct i915_vma_resource *vma_res);
211 
212 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
213 bool i915_vma_misplaced(const struct i915_vma *vma,
214 			u64 size, u64 alignment, u64 flags);
215 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
216 void i915_vma_revoke_mmap(struct i915_vma *vma);
217 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
218 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
219 int __i915_vma_unbind(struct i915_vma *vma);
220 int __must_check i915_vma_unbind(struct i915_vma *vma);
221 int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm);
222 int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma);
223 void i915_vma_unlink_ctx(struct i915_vma *vma);
224 void i915_vma_close(struct i915_vma *vma);
225 void i915_vma_reopen(struct i915_vma *vma);
226 
227 void i915_vma_destroy_locked(struct i915_vma *vma);
228 void i915_vma_destroy(struct i915_vma *vma);
229 
230 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
231 
232 static inline void i915_vma_lock(struct i915_vma *vma)
233 {
234 	dma_resv_lock(vma->obj->base.resv, NULL);
235 }
236 
237 static inline void i915_vma_unlock(struct i915_vma *vma)
238 {
239 	dma_resv_unlock(vma->obj->base.resv);
240 }
241 
242 int __must_check
243 i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
244 		u64 size, u64 alignment, u64 flags);
245 
246 static inline int __must_check
247 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
248 {
249 	struct i915_gem_ww_ctx ww;
250 	int err;
251 
252 	i915_gem_ww_ctx_init(&ww, true);
253 retry:
254 	err = i915_gem_object_lock(vma->obj, &ww);
255 	if (!err)
256 		err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
257 	if (err == -EDEADLK) {
258 		err = i915_gem_ww_ctx_backoff(&ww);
259 		if (!err)
260 			goto retry;
261 	}
262 	i915_gem_ww_ctx_fini(&ww);
263 
264 	return err;
265 }
266 
267 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
268 		  u32 align, unsigned int flags);
269 
270 static inline int i915_vma_pin_count(const struct i915_vma *vma)
271 {
272 	return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
273 }
274 
275 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
276 {
277 	return i915_vma_pin_count(vma);
278 }
279 
280 static inline void __i915_vma_pin(struct i915_vma *vma)
281 {
282 	atomic_inc(&vma->flags);
283 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
284 }
285 
286 static inline void __i915_vma_unpin(struct i915_vma *vma)
287 {
288 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
289 	atomic_dec(&vma->flags);
290 }
291 
292 static inline void i915_vma_unpin(struct i915_vma *vma)
293 {
294 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
295 	__i915_vma_unpin(vma);
296 }
297 
298 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
299 				     unsigned int where)
300 {
301 	return atomic_read(&vma->flags) & where;
302 }
303 
304 static inline bool i915_node_color_differs(const struct drm_mm_node *node,
305 					   unsigned long color)
306 {
307 	return drm_mm_node_allocated(node) && node->color != color;
308 }
309 
310 /**
311  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
312  * @vma: VMA to iomap
313  *
314  * The passed in VMA has to be pinned in the global GTT mappable region.
315  * An extra pinning of the VMA is acquired for the return iomapping,
316  * the caller must call i915_vma_unpin_iomap to relinquish the pinning
317  * after the iomapping is no longer required.
318  *
319  * Returns a valid iomapped pointer or ERR_PTR.
320  */
321 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
322 
323 /**
324  * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
325  * @vma: VMA to unpin
326  *
327  * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
328  *
329  * This function is only valid to be called on a VMA previously
330  * iomapped by the caller with i915_vma_pin_iomap().
331  */
332 void i915_vma_unpin_iomap(struct i915_vma *vma);
333 
334 /**
335  * i915_vma_pin_fence - pin fencing state
336  * @vma: vma to pin fencing for
337  *
338  * This pins the fencing state (whether tiled or untiled) to make sure the
339  * vma (and its object) is ready to be used as a scanout target. Fencing
340  * status must be synchronize first by calling i915_vma_get_fence():
341  *
342  * The resulting fence pin reference must be released again with
343  * i915_vma_unpin_fence().
344  *
345  * Returns:
346  *
347  * True if the vma has a fence, false otherwise.
348  */
349 int __must_check i915_vma_pin_fence(struct i915_vma *vma);
350 void i915_vma_revoke_fence(struct i915_vma *vma);
351 
352 int __i915_vma_pin_fence(struct i915_vma *vma);
353 
354 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
355 {
356 	GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
357 	atomic_dec(&vma->fence->pin_count);
358 }
359 
360 /**
361  * i915_vma_unpin_fence - unpin fencing state
362  * @vma: vma to unpin fencing for
363  *
364  * This releases the fence pin reference acquired through
365  * i915_vma_pin_fence. It will handle both objects with and without an
366  * attached fence correctly, callers do not need to distinguish this.
367  */
368 static inline void
369 i915_vma_unpin_fence(struct i915_vma *vma)
370 {
371 	if (vma->fence)
372 		__i915_vma_unpin_fence(vma);
373 }
374 
375 void i915_vma_parked(struct intel_gt *gt);
376 
377 static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
378 {
379 	return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
380 }
381 
382 static inline void i915_vma_mark_scanout(struct i915_vma *vma)
383 {
384 	set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
385 }
386 
387 static inline void i915_vma_clear_scanout(struct i915_vma *vma)
388 {
389 	clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
390 }
391 
392 #define for_each_until(cond) if (cond) break; else
393 
394 /**
395  * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
396  * @V: the #i915_vma iterator
397  * @OBJ: the #drm_i915_gem_object
398  *
399  * GGTT VMA are placed at the being of the object's vma_list, see
400  * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
401  * or the list is empty ofc.
402  */
403 #define for_each_ggtt_vma(V, OBJ) \
404 	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
405 		for_each_until(!i915_vma_is_ggtt(V))
406 
407 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
408 void i915_vma_make_shrinkable(struct i915_vma *vma);
409 void i915_vma_make_purgeable(struct i915_vma *vma);
410 
411 int i915_vma_wait_for_bind(struct i915_vma *vma);
412 
413 static inline int i915_vma_sync(struct i915_vma *vma)
414 {
415 	/* Wait for the asynchronous bindings and pending GPU reads */
416 	return i915_active_wait(&vma->active);
417 }
418 
419 /**
420  * i915_vma_get_current_resource - Get the current resource of the vma
421  * @vma: The vma to get the current resource from.
422  *
423  * It's illegal to call this function if the vma is not bound.
424  *
425  * Return: A refcounted pointer to the current vma resource
426  * of the vma, assuming the vma is bound.
427  */
428 static inline struct i915_vma_resource *
429 i915_vma_get_current_resource(struct i915_vma *vma)
430 {
431 	return i915_vma_resource_get(vma->resource);
432 }
433 
434 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
435 void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
436 				     struct i915_vma *vma);
437 #endif
438 
439 void i915_vma_module_exit(void);
440 int i915_vma_module_init(void);
441 
442 I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
443 I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
444 
445 #endif
446