xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma.h (revision b830f94f)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __I915_VMA_H__
26 #define __I915_VMA_H__
27 
28 #include <linux/io-mapping.h>
29 #include <linux/rbtree.h>
30 
31 #include <drm/drm_mm.h>
32 
33 #include "i915_gem_gtt.h"
34 #include "i915_gem_fence_reg.h"
35 #include "gem/i915_gem_object.h"
36 
37 #include "i915_active.h"
38 #include "i915_request.h"
39 
40 enum i915_cache_level;
41 
42 /**
43  * DOC: Virtual Memory Address
44  *
45  * A VMA represents a GEM BO that is bound into an address space. Therefore, a
46  * VMA's presence cannot be guaranteed before binding, or after unbinding the
47  * object into/from the address space.
48  *
49  * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
50  * will always be <= an objects lifetime. So object refcounting should cover us.
51  */
52 struct i915_vma {
53 	struct drm_mm_node node;
54 	struct drm_i915_gem_object *obj;
55 	struct i915_address_space *vm;
56 	const struct i915_vma_ops *ops;
57 	struct i915_fence_reg *fence;
58 	struct reservation_object *resv; /** Alias of obj->resv */
59 	struct sg_table *pages;
60 	void __iomem *iomap;
61 	void *private; /* owned by creator */
62 	u64 size;
63 	u64 display_alignment;
64 	struct i915_page_sizes page_sizes;
65 
66 	u32 fence_size;
67 	u32 fence_alignment;
68 
69 	/**
70 	 * Count of the number of times this vma has been opened by different
71 	 * handles (but same file) for execbuf, i.e. the number of aliases
72 	 * that exist in the ctx->handle_vmas LUT for this vma.
73 	 */
74 	atomic_t open_count;
75 	unsigned long flags;
76 	/**
77 	 * How many users have pinned this object in GTT space.
78 	 *
79 	 * This is a tightly bound, fairly small number of users, so we
80 	 * stuff inside the flags field so that we can both check for overflow
81 	 * and detect a no-op i915_vma_pin() in a single check, while also
82 	 * pinning the vma.
83 	 *
84 	 * The worst case display setup would have the same vma pinned for
85 	 * use on each plane on each crtc, while also building the next atomic
86 	 * state and holding a pin for the length of the cleanup queue. In the
87 	 * future, the flip queue may be increased from 1.
88 	 * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
89 	 *
90 	 * For GEM, the number of concurrent users for pwrite/pread is
91 	 * unbounded. For execbuffer, it is currently one but will in future
92 	 * be extended to allow multiple clients to pin vma concurrently.
93 	 *
94 	 * We also use suballocated pages, with each suballocation claiming
95 	 * its own pin on the shared vma. At present, this is limited to
96 	 * exclusive cachelines of a single page, so a maximum of 64 possible
97 	 * users.
98 	 */
99 #define I915_VMA_PIN_MASK 0xff
100 #define I915_VMA_PIN_OVERFLOW	BIT(8)
101 
102 	/** Flags and address space this VMA is bound to */
103 #define I915_VMA_GLOBAL_BIND	BIT(9)
104 #define I915_VMA_LOCAL_BIND	BIT(10)
105 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
106 
107 #define I915_VMA_GGTT		BIT(11)
108 #define I915_VMA_CAN_FENCE	BIT(12)
109 #define I915_VMA_USERFAULT_BIT	13
110 #define I915_VMA_USERFAULT	BIT(I915_VMA_USERFAULT_BIT)
111 #define I915_VMA_GGTT_WRITE	BIT(14)
112 
113 	struct i915_active active;
114 	struct i915_active_request last_fence;
115 
116 	/**
117 	 * Support different GGTT views into the same object.
118 	 * This means there can be multiple VMA mappings per object and per VM.
119 	 * i915_ggtt_view_type is used to distinguish between those entries.
120 	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
121 	 * assumed in GEM functions which take no ggtt view parameter.
122 	 */
123 	struct i915_ggtt_view ggtt_view;
124 
125 	/** This object's place on the active/inactive lists */
126 	struct list_head vm_link;
127 
128 	struct list_head obj_link; /* Link in the object's VMA list */
129 	struct rb_node obj_node;
130 	struct hlist_node obj_hash;
131 
132 	/** This vma's place in the execbuf reservation list */
133 	struct list_head exec_link;
134 	struct list_head reloc_link;
135 
136 	/** This vma's place in the eviction list */
137 	struct list_head evict_link;
138 
139 	struct list_head closed_link;
140 
141 	/**
142 	 * Used for performing relocations during execbuffer insertion.
143 	 */
144 	unsigned int *exec_flags;
145 	struct hlist_node exec_node;
146 	u32 exec_handle;
147 };
148 
149 struct i915_vma *
150 i915_vma_instance(struct drm_i915_gem_object *obj,
151 		  struct i915_address_space *vm,
152 		  const struct i915_ggtt_view *view);
153 
154 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
155 #define I915_VMA_RELEASE_MAP BIT(0)
156 
157 static inline bool i915_vma_is_active(const struct i915_vma *vma)
158 {
159 	return !i915_active_is_idle(&vma->active);
160 }
161 
162 int __must_check i915_vma_move_to_active(struct i915_vma *vma,
163 					 struct i915_request *rq,
164 					 unsigned int flags);
165 
166 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
167 {
168 	return vma->flags & I915_VMA_GGTT;
169 }
170 
171 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
172 {
173 	return vma->flags & I915_VMA_GGTT_WRITE;
174 }
175 
176 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
177 {
178 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
179 	vma->flags |= I915_VMA_GGTT_WRITE;
180 }
181 
182 static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
183 {
184 	vma->flags &= ~I915_VMA_GGTT_WRITE;
185 }
186 
187 void i915_vma_flush_writes(struct i915_vma *vma);
188 
189 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
190 {
191 	return vma->flags & I915_VMA_CAN_FENCE;
192 }
193 
194 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
195 {
196 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
197 	return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
198 }
199 
200 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
201 {
202 	return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
203 }
204 
205 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
206 {
207 	return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
208 }
209 
210 static inline bool i915_vma_is_closed(const struct i915_vma *vma)
211 {
212 	return !list_empty(&vma->closed_link);
213 }
214 
215 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
216 {
217 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
218 	GEM_BUG_ON(!vma->node.allocated);
219 	GEM_BUG_ON(upper_32_bits(vma->node.start));
220 	GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
221 	return lower_32_bits(vma->node.start);
222 }
223 
224 static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
225 {
226 	return i915_vm_to_ggtt(vma->vm)->pin_bias;
227 }
228 
229 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
230 {
231 	i915_gem_object_get(vma->obj);
232 	return vma;
233 }
234 
235 static inline void i915_vma_put(struct i915_vma *vma)
236 {
237 	i915_gem_object_put(vma->obj);
238 }
239 
240 static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
241 {
242 	return a - b;
243 }
244 
245 static inline long
246 i915_vma_compare(struct i915_vma *vma,
247 		 struct i915_address_space *vm,
248 		 const struct i915_ggtt_view *view)
249 {
250 	ptrdiff_t cmp;
251 
252 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
253 
254 	cmp = ptrdiff(vma->vm, vm);
255 	if (cmp)
256 		return cmp;
257 
258 	BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
259 	cmp = vma->ggtt_view.type;
260 	if (!view)
261 		return cmp;
262 
263 	cmp -= view->type;
264 	if (cmp)
265 		return cmp;
266 
267 	assert_i915_gem_gtt_types();
268 
269 	/* ggtt_view.type also encodes its size so that we both distinguish
270 	 * different views using it as a "type" and also use a compact (no
271 	 * accessing of uninitialised padding bytes) memcmp without storing
272 	 * an extra parameter or adding more code.
273 	 *
274 	 * To ensure that the memcmp is valid for all branches of the union,
275 	 * even though the code looks like it is just comparing one branch,
276 	 * we assert above that all branches have the same address, and that
277 	 * each branch has a unique type/size.
278 	 */
279 	BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
280 	BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
281 	BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
282 	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
283 		     offsetof(typeof(*view), partial));
284 	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
285 		     offsetof(typeof(*view), remapped));
286 	return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
287 }
288 
289 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
290 		  u32 flags);
291 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
292 bool i915_vma_misplaced(const struct i915_vma *vma,
293 			u64 size, u64 alignment, u64 flags);
294 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
295 void i915_vma_revoke_mmap(struct i915_vma *vma);
296 int __must_check i915_vma_unbind(struct i915_vma *vma);
297 void i915_vma_unlink_ctx(struct i915_vma *vma);
298 void i915_vma_close(struct i915_vma *vma);
299 void i915_vma_reopen(struct i915_vma *vma);
300 void i915_vma_destroy(struct i915_vma *vma);
301 
302 #define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
303 
304 static inline void i915_vma_lock(struct i915_vma *vma)
305 {
306 	reservation_object_lock(vma->resv, NULL);
307 }
308 
309 static inline void i915_vma_unlock(struct i915_vma *vma)
310 {
311 	reservation_object_unlock(vma->resv);
312 }
313 
314 int __i915_vma_do_pin(struct i915_vma *vma,
315 		      u64 size, u64 alignment, u64 flags);
316 static inline int __must_check
317 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
318 {
319 	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
320 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
321 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
322 
323 	/* Pin early to prevent the shrinker/eviction logic from destroying
324 	 * our vma as we insert and bind.
325 	 */
326 	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
327 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
328 		GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
329 		return 0;
330 	}
331 
332 	return __i915_vma_do_pin(vma, size, alignment, flags);
333 }
334 
335 static inline int i915_vma_pin_count(const struct i915_vma *vma)
336 {
337 	return vma->flags & I915_VMA_PIN_MASK;
338 }
339 
340 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
341 {
342 	return i915_vma_pin_count(vma);
343 }
344 
345 static inline void __i915_vma_pin(struct i915_vma *vma)
346 {
347 	vma->flags++;
348 	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
349 }
350 
351 static inline void __i915_vma_unpin(struct i915_vma *vma)
352 {
353 	vma->flags--;
354 }
355 
356 static inline void i915_vma_unpin(struct i915_vma *vma)
357 {
358 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
359 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
360 	__i915_vma_unpin(vma);
361 }
362 
363 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
364 				     unsigned int where)
365 {
366 	return vma->flags & where;
367 }
368 
369 /**
370  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
371  * @vma: VMA to iomap
372  *
373  * The passed in VMA has to be pinned in the global GTT mappable region.
374  * An extra pinning of the VMA is acquired for the return iomapping,
375  * the caller must call i915_vma_unpin_iomap to relinquish the pinning
376  * after the iomapping is no longer required.
377  *
378  * Callers must hold the struct_mutex.
379  *
380  * Returns a valid iomapped pointer or ERR_PTR.
381  */
382 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
383 #define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
384 
385 /**
386  * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
387  * @vma: VMA to unpin
388  *
389  * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
390  *
391  * Callers must hold the struct_mutex. This function is only valid to be
392  * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
393  */
394 void i915_vma_unpin_iomap(struct i915_vma *vma);
395 
396 static inline struct page *i915_vma_first_page(struct i915_vma *vma)
397 {
398 	GEM_BUG_ON(!vma->pages);
399 	return sg_page(vma->pages->sgl);
400 }
401 
402 /**
403  * i915_vma_pin_fence - pin fencing state
404  * @vma: vma to pin fencing for
405  *
406  * This pins the fencing state (whether tiled or untiled) to make sure the
407  * vma (and its object) is ready to be used as a scanout target. Fencing
408  * status must be synchronize first by calling i915_vma_get_fence():
409  *
410  * The resulting fence pin reference must be released again with
411  * i915_vma_unpin_fence().
412  *
413  * Returns:
414  *
415  * True if the vma has a fence, false otherwise.
416  */
417 int i915_vma_pin_fence(struct i915_vma *vma);
418 int __must_check i915_vma_put_fence(struct i915_vma *vma);
419 
420 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
421 {
422 	GEM_BUG_ON(vma->fence->pin_count <= 0);
423 	vma->fence->pin_count--;
424 }
425 
426 /**
427  * i915_vma_unpin_fence - unpin fencing state
428  * @vma: vma to unpin fencing for
429  *
430  * This releases the fence pin reference acquired through
431  * i915_vma_pin_fence. It will handle both objects with and without an
432  * attached fence correctly, callers do not need to distinguish this.
433  */
434 static inline void
435 i915_vma_unpin_fence(struct i915_vma *vma)
436 {
437 	/* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
438 	if (vma->fence)
439 		__i915_vma_unpin_fence(vma);
440 }
441 
442 void i915_vma_parked(struct drm_i915_private *i915);
443 
444 #define for_each_until(cond) if (cond) break; else
445 
446 /**
447  * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
448  * @V: the #i915_vma iterator
449  * @OBJ: the #drm_i915_gem_object
450  *
451  * GGTT VMA are placed at the being of the object's vma_list, see
452  * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
453  * or the list is empty ofc.
454  */
455 #define for_each_ggtt_vma(V, OBJ) \
456 	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
457 		for_each_until(!i915_vma_is_ggtt(V))
458 
459 struct i915_vma *i915_vma_alloc(void);
460 void i915_vma_free(struct i915_vma *vma);
461 
462 #endif
463