xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma.c (revision e6bb1d7f)
1b42fe9caSJoonas Lahtinen /*
2b42fe9caSJoonas Lahtinen  * Copyright © 2016 Intel Corporation
3b42fe9caSJoonas Lahtinen  *
4b42fe9caSJoonas Lahtinen  * Permission is hereby granted, free of charge, to any person obtaining a
5b42fe9caSJoonas Lahtinen  * copy of this software and associated documentation files (the "Software"),
6b42fe9caSJoonas Lahtinen  * to deal in the Software without restriction, including without limitation
7b42fe9caSJoonas Lahtinen  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b42fe9caSJoonas Lahtinen  * and/or sell copies of the Software, and to permit persons to whom the
9b42fe9caSJoonas Lahtinen  * Software is furnished to do so, subject to the following conditions:
10b42fe9caSJoonas Lahtinen  *
11b42fe9caSJoonas Lahtinen  * The above copyright notice and this permission notice (including the next
12b42fe9caSJoonas Lahtinen  * paragraph) shall be included in all copies or substantial portions of the
13b42fe9caSJoonas Lahtinen  * Software.
14b42fe9caSJoonas Lahtinen  *
15b42fe9caSJoonas Lahtinen  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b42fe9caSJoonas Lahtinen  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b42fe9caSJoonas Lahtinen  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b42fe9caSJoonas Lahtinen  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b42fe9caSJoonas Lahtinen  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b42fe9caSJoonas Lahtinen  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b42fe9caSJoonas Lahtinen  * IN THE SOFTWARE.
22b42fe9caSJoonas Lahtinen  *
23b42fe9caSJoonas Lahtinen  */
24b42fe9caSJoonas Lahtinen 
25b42fe9caSJoonas Lahtinen #include "i915_vma.h"
26b42fe9caSJoonas Lahtinen 
27b42fe9caSJoonas Lahtinen #include "i915_drv.h"
28b42fe9caSJoonas Lahtinen #include "intel_ringbuffer.h"
29b42fe9caSJoonas Lahtinen #include "intel_frontbuffer.h"
30b42fe9caSJoonas Lahtinen 
31b42fe9caSJoonas Lahtinen #include <drm/drm_gem.h>
32b42fe9caSJoonas Lahtinen 
331eca65d9SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
3410195b1eSChris Wilson 
3510195b1eSChris Wilson #include <linux/stackdepot.h>
3610195b1eSChris Wilson 
3710195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason)
3810195b1eSChris Wilson {
3910195b1eSChris Wilson 	unsigned long entries[12];
4010195b1eSChris Wilson 	struct stack_trace trace = {
4110195b1eSChris Wilson 		.entries = entries,
4210195b1eSChris Wilson 		.max_entries = ARRAY_SIZE(entries),
4310195b1eSChris Wilson 	};
4410195b1eSChris Wilson 	char buf[512];
4510195b1eSChris Wilson 
4610195b1eSChris Wilson 	if (!vma->node.stack) {
4710195b1eSChris Wilson 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
4810195b1eSChris Wilson 				 vma->node.start, vma->node.size, reason);
4910195b1eSChris Wilson 		return;
5010195b1eSChris Wilson 	}
5110195b1eSChris Wilson 
5210195b1eSChris Wilson 	depot_fetch_stack(vma->node.stack, &trace);
5310195b1eSChris Wilson 	snprint_stack_trace(buf, sizeof(buf), &trace, 0);
5410195b1eSChris Wilson 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
5510195b1eSChris Wilson 			 vma->node.start, vma->node.size, reason, buf);
5610195b1eSChris Wilson }
5710195b1eSChris Wilson 
5810195b1eSChris Wilson #else
5910195b1eSChris Wilson 
6010195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason)
6110195b1eSChris Wilson {
6210195b1eSChris Wilson }
6310195b1eSChris Wilson 
6410195b1eSChris Wilson #endif
6510195b1eSChris Wilson 
66b42fe9caSJoonas Lahtinen static void
67e61e0f51SChris Wilson i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
68b42fe9caSJoonas Lahtinen {
69b42fe9caSJoonas Lahtinen 	const unsigned int idx = rq->engine->id;
70b42fe9caSJoonas Lahtinen 	struct i915_vma *vma =
71b42fe9caSJoonas Lahtinen 		container_of(active, struct i915_vma, last_read[idx]);
72b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj = vma->obj;
73b42fe9caSJoonas Lahtinen 
74b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
75b42fe9caSJoonas Lahtinen 
76b42fe9caSJoonas Lahtinen 	i915_vma_clear_active(vma, idx);
77b42fe9caSJoonas Lahtinen 	if (i915_vma_is_active(vma))
78b42fe9caSJoonas Lahtinen 		return;
79b42fe9caSJoonas Lahtinen 
8044a0ec0dSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
81b42fe9caSJoonas Lahtinen 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
82b42fe9caSJoonas Lahtinen 
83b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_gem_object_is_active(obj));
84b42fe9caSJoonas Lahtinen 	if (--obj->active_count)
85b42fe9caSJoonas Lahtinen 		return;
86b42fe9caSJoonas Lahtinen 
871ab22356SChris Wilson 	/* Prune the shared fence arrays iff completely idle (inc. external) */
881ab22356SChris Wilson 	if (reservation_object_trylock(obj->resv)) {
891ab22356SChris Wilson 		if (reservation_object_test_signaled_rcu(obj->resv, true))
901ab22356SChris Wilson 			reservation_object_add_excl_fence(obj->resv, NULL);
911ab22356SChris Wilson 		reservation_object_unlock(obj->resv);
921ab22356SChris Wilson 	}
931ab22356SChris Wilson 
94b42fe9caSJoonas Lahtinen 	/* Bump our place on the bound list to keep it roughly in LRU order
95b42fe9caSJoonas Lahtinen 	 * so that we don't steal from recently used but inactive objects
96b42fe9caSJoonas Lahtinen 	 * (unless we are forced to ofc!)
97b42fe9caSJoonas Lahtinen 	 */
98f2123818SChris Wilson 	spin_lock(&rq->i915->mm.obj_lock);
99b42fe9caSJoonas Lahtinen 	if (obj->bind_count)
100f2123818SChris Wilson 		list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
101f2123818SChris Wilson 	spin_unlock(&rq->i915->mm.obj_lock);
102b42fe9caSJoonas Lahtinen 
103b42fe9caSJoonas Lahtinen 	obj->mm.dirty = true; /* be paranoid  */
104b42fe9caSJoonas Lahtinen 
105b42fe9caSJoonas Lahtinen 	if (i915_gem_object_has_active_reference(obj)) {
106b42fe9caSJoonas Lahtinen 		i915_gem_object_clear_active_reference(obj);
107b42fe9caSJoonas Lahtinen 		i915_gem_object_put(obj);
108b42fe9caSJoonas Lahtinen 	}
109b42fe9caSJoonas Lahtinen }
110b42fe9caSJoonas Lahtinen 
111b42fe9caSJoonas Lahtinen static struct i915_vma *
112a01cb37aSChris Wilson vma_create(struct drm_i915_gem_object *obj,
113b42fe9caSJoonas Lahtinen 	   struct i915_address_space *vm,
114b42fe9caSJoonas Lahtinen 	   const struct i915_ggtt_view *view)
115b42fe9caSJoonas Lahtinen {
116b42fe9caSJoonas Lahtinen 	struct i915_vma *vma;
117b42fe9caSJoonas Lahtinen 	struct rb_node *rb, **p;
118b42fe9caSJoonas Lahtinen 	int i;
119b42fe9caSJoonas Lahtinen 
120e1cc3db0SChris Wilson 	/* The aliasing_ppgtt should never be used directly! */
12182ad6443SChris Wilson 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
122e1cc3db0SChris Wilson 
1231fcdaa7eSChris Wilson 	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
124b42fe9caSJoonas Lahtinen 	if (vma == NULL)
125b42fe9caSJoonas Lahtinen 		return ERR_PTR(-ENOMEM);
126b42fe9caSJoonas Lahtinen 
127b42fe9caSJoonas Lahtinen 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
128b42fe9caSJoonas Lahtinen 		init_request_active(&vma->last_read[i], i915_vma_retire);
129b42fe9caSJoonas Lahtinen 	init_request_active(&vma->last_fence, NULL);
130b42fe9caSJoonas Lahtinen 	vma->vm = vm;
13193f2cde2SChris Wilson 	vma->ops = &vm->vma_ops;
132b42fe9caSJoonas Lahtinen 	vma->obj = obj;
13395ff7c7dSChris Wilson 	vma->resv = obj->resv;
134b42fe9caSJoonas Lahtinen 	vma->size = obj->base.size;
135f51455d4SChris Wilson 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
136b42fe9caSJoonas Lahtinen 
1377c518460SChris Wilson 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
138b42fe9caSJoonas Lahtinen 		vma->ggtt_view = *view;
139b42fe9caSJoonas Lahtinen 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
14007e19ea4SChris Wilson 			GEM_BUG_ON(range_overflows_t(u64,
1418bab1193SChris Wilson 						     view->partial.offset,
1428bab1193SChris Wilson 						     view->partial.size,
14307e19ea4SChris Wilson 						     obj->base.size >> PAGE_SHIFT));
1448bab1193SChris Wilson 			vma->size = view->partial.size;
145b42fe9caSJoonas Lahtinen 			vma->size <<= PAGE_SHIFT;
1467e7367d3SChris Wilson 			GEM_BUG_ON(vma->size > obj->base.size);
147b42fe9caSJoonas Lahtinen 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
1488bab1193SChris Wilson 			vma->size = intel_rotation_info_size(&view->rotated);
149b42fe9caSJoonas Lahtinen 			vma->size <<= PAGE_SHIFT;
150b42fe9caSJoonas Lahtinen 		}
151b42fe9caSJoonas Lahtinen 	}
152b42fe9caSJoonas Lahtinen 
1531fcdaa7eSChris Wilson 	if (unlikely(vma->size > vm->total))
1541fcdaa7eSChris Wilson 		goto err_vma;
1551fcdaa7eSChris Wilson 
156b00ddb27SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
157b00ddb27SChris Wilson 
158b42fe9caSJoonas Lahtinen 	if (i915_is_ggtt(vm)) {
1591fcdaa7eSChris Wilson 		if (unlikely(overflows_type(vma->size, u32)))
1601fcdaa7eSChris Wilson 			goto err_vma;
1611fcdaa7eSChris Wilson 
16291d4e0aaSChris Wilson 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
163944397f0SChris Wilson 						      i915_gem_object_get_tiling(obj),
164944397f0SChris Wilson 						      i915_gem_object_get_stride(obj));
1651fcdaa7eSChris Wilson 		if (unlikely(vma->fence_size < vma->size || /* overflow */
1661fcdaa7eSChris Wilson 			     vma->fence_size > vm->total))
1671fcdaa7eSChris Wilson 			goto err_vma;
1681fcdaa7eSChris Wilson 
169f51455d4SChris Wilson 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
170944397f0SChris Wilson 
17191d4e0aaSChris Wilson 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
172944397f0SChris Wilson 								i915_gem_object_get_tiling(obj),
173944397f0SChris Wilson 								i915_gem_object_get_stride(obj));
174944397f0SChris Wilson 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
175944397f0SChris Wilson 
176e2189dd0SChris Wilson 		/*
177e2189dd0SChris Wilson 		 * We put the GGTT vma at the start of the vma-list, followed
178e2189dd0SChris Wilson 		 * by the ppGGTT vma. This allows us to break early when
179e2189dd0SChris Wilson 		 * iterating over only the GGTT vma for an object, see
180e2189dd0SChris Wilson 		 * for_each_ggtt_vma()
181e2189dd0SChris Wilson 		 */
182b42fe9caSJoonas Lahtinen 		vma->flags |= I915_VMA_GGTT;
183b42fe9caSJoonas Lahtinen 		list_add(&vma->obj_link, &obj->vma_list);
184b42fe9caSJoonas Lahtinen 	} else {
185b42fe9caSJoonas Lahtinen 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
186b42fe9caSJoonas Lahtinen 		list_add_tail(&vma->obj_link, &obj->vma_list);
187b42fe9caSJoonas Lahtinen 	}
188b42fe9caSJoonas Lahtinen 
189b42fe9caSJoonas Lahtinen 	rb = NULL;
190b42fe9caSJoonas Lahtinen 	p = &obj->vma_tree.rb_node;
191b42fe9caSJoonas Lahtinen 	while (*p) {
192b42fe9caSJoonas Lahtinen 		struct i915_vma *pos;
193b42fe9caSJoonas Lahtinen 
194b42fe9caSJoonas Lahtinen 		rb = *p;
195b42fe9caSJoonas Lahtinen 		pos = rb_entry(rb, struct i915_vma, obj_node);
196b42fe9caSJoonas Lahtinen 		if (i915_vma_compare(pos, vm, view) < 0)
197b42fe9caSJoonas Lahtinen 			p = &rb->rb_right;
198b42fe9caSJoonas Lahtinen 		else
199b42fe9caSJoonas Lahtinen 			p = &rb->rb_left;
200b42fe9caSJoonas Lahtinen 	}
201b42fe9caSJoonas Lahtinen 	rb_link_node(&vma->obj_node, rb, p);
202b42fe9caSJoonas Lahtinen 	rb_insert_color(&vma->obj_node, &obj->vma_tree);
2031fcdaa7eSChris Wilson 	list_add(&vma->vm_link, &vm->unbound_list);
204b42fe9caSJoonas Lahtinen 
205b42fe9caSJoonas Lahtinen 	return vma;
2061fcdaa7eSChris Wilson 
2071fcdaa7eSChris Wilson err_vma:
2081fcdaa7eSChris Wilson 	kmem_cache_free(vm->i915->vmas, vma);
2091fcdaa7eSChris Wilson 	return ERR_PTR(-E2BIG);
210b42fe9caSJoonas Lahtinen }
211b42fe9caSJoonas Lahtinen 
212481a6f7dSChris Wilson static struct i915_vma *
213481a6f7dSChris Wilson vma_lookup(struct drm_i915_gem_object *obj,
214718659a6SChris Wilson 	   struct i915_address_space *vm,
215718659a6SChris Wilson 	   const struct i915_ggtt_view *view)
216718659a6SChris Wilson {
217718659a6SChris Wilson 	struct rb_node *rb;
218718659a6SChris Wilson 
219718659a6SChris Wilson 	rb = obj->vma_tree.rb_node;
220718659a6SChris Wilson 	while (rb) {
221718659a6SChris Wilson 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
222718659a6SChris Wilson 		long cmp;
223718659a6SChris Wilson 
224718659a6SChris Wilson 		cmp = i915_vma_compare(vma, vm, view);
225718659a6SChris Wilson 		if (cmp == 0)
226718659a6SChris Wilson 			return vma;
227718659a6SChris Wilson 
228718659a6SChris Wilson 		if (cmp < 0)
229718659a6SChris Wilson 			rb = rb->rb_right;
230718659a6SChris Wilson 		else
231718659a6SChris Wilson 			rb = rb->rb_left;
232718659a6SChris Wilson 	}
233718659a6SChris Wilson 
234718659a6SChris Wilson 	return NULL;
235718659a6SChris Wilson }
236718659a6SChris Wilson 
237718659a6SChris Wilson /**
238718659a6SChris Wilson  * i915_vma_instance - return the singleton instance of the VMA
239718659a6SChris Wilson  * @obj: parent &struct drm_i915_gem_object to be mapped
240718659a6SChris Wilson  * @vm: address space in which the mapping is located
241718659a6SChris Wilson  * @view: additional mapping requirements
242718659a6SChris Wilson  *
243718659a6SChris Wilson  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
244718659a6SChris Wilson  * the same @view characteristics. If a match is not found, one is created.
245718659a6SChris Wilson  * Once created, the VMA is kept until either the object is freed, or the
246718659a6SChris Wilson  * address space is closed.
247718659a6SChris Wilson  *
248718659a6SChris Wilson  * Must be called with struct_mutex held.
249718659a6SChris Wilson  *
250718659a6SChris Wilson  * Returns the vma, or an error pointer.
251718659a6SChris Wilson  */
252718659a6SChris Wilson struct i915_vma *
253718659a6SChris Wilson i915_vma_instance(struct drm_i915_gem_object *obj,
254718659a6SChris Wilson 		  struct i915_address_space *vm,
255718659a6SChris Wilson 		  const struct i915_ggtt_view *view)
256718659a6SChris Wilson {
257718659a6SChris Wilson 	struct i915_vma *vma;
258718659a6SChris Wilson 
259718659a6SChris Wilson 	lockdep_assert_held(&obj->base.dev->struct_mutex);
260718659a6SChris Wilson 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
261718659a6SChris Wilson 	GEM_BUG_ON(vm->closed);
262718659a6SChris Wilson 
263481a6f7dSChris Wilson 	vma = vma_lookup(obj, vm, view);
264718659a6SChris Wilson 	if (!vma)
265a01cb37aSChris Wilson 		vma = vma_create(obj, vm, view);
266718659a6SChris Wilson 
2674ea9527cSChris Wilson 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
268481a6f7dSChris Wilson 	GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
269718659a6SChris Wilson 	return vma;
270718659a6SChris Wilson }
271718659a6SChris Wilson 
272718659a6SChris Wilson /**
273b42fe9caSJoonas Lahtinen  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
274b42fe9caSJoonas Lahtinen  * @vma: VMA to map
275b42fe9caSJoonas Lahtinen  * @cache_level: mapping cache level
276b42fe9caSJoonas Lahtinen  * @flags: flags like global or local mapping
277b42fe9caSJoonas Lahtinen  *
278b42fe9caSJoonas Lahtinen  * DMA addresses are taken from the scatter-gather table of this object (or of
279b42fe9caSJoonas Lahtinen  * this VMA in case of non-default GGTT views) and PTE entries set up.
280b42fe9caSJoonas Lahtinen  * Note that DMA addresses are also the only part of the SG table we care about.
281b42fe9caSJoonas Lahtinen  */
282b42fe9caSJoonas Lahtinen int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
283b42fe9caSJoonas Lahtinen 		  u32 flags)
284b42fe9caSJoonas Lahtinen {
285b42fe9caSJoonas Lahtinen 	u32 bind_flags;
286b42fe9caSJoonas Lahtinen 	u32 vma_flags;
287b42fe9caSJoonas Lahtinen 	int ret;
288b42fe9caSJoonas Lahtinen 
289aa149431SChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
290aa149431SChris Wilson 	GEM_BUG_ON(vma->size > vma->node.size);
291aa149431SChris Wilson 
292aa149431SChris Wilson 	if (GEM_WARN_ON(range_overflows(vma->node.start,
293aa149431SChris Wilson 					vma->node.size,
294aa149431SChris Wilson 					vma->vm->total)))
295aa149431SChris Wilson 		return -ENODEV;
296aa149431SChris Wilson 
297aa149431SChris Wilson 	if (GEM_WARN_ON(!flags))
298b42fe9caSJoonas Lahtinen 		return -EINVAL;
299b42fe9caSJoonas Lahtinen 
300b42fe9caSJoonas Lahtinen 	bind_flags = 0;
301b42fe9caSJoonas Lahtinen 	if (flags & PIN_GLOBAL)
302b42fe9caSJoonas Lahtinen 		bind_flags |= I915_VMA_GLOBAL_BIND;
303b42fe9caSJoonas Lahtinen 	if (flags & PIN_USER)
304b42fe9caSJoonas Lahtinen 		bind_flags |= I915_VMA_LOCAL_BIND;
305b42fe9caSJoonas Lahtinen 
306b42fe9caSJoonas Lahtinen 	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
307b42fe9caSJoonas Lahtinen 	if (flags & PIN_UPDATE)
308b42fe9caSJoonas Lahtinen 		bind_flags |= vma_flags;
309b42fe9caSJoonas Lahtinen 	else
310b42fe9caSJoonas Lahtinen 		bind_flags &= ~vma_flags;
311b42fe9caSJoonas Lahtinen 	if (bind_flags == 0)
312b42fe9caSJoonas Lahtinen 		return 0;
313b42fe9caSJoonas Lahtinen 
314fa3f46afSMatthew Auld 	GEM_BUG_ON(!vma->pages);
315fa3f46afSMatthew Auld 
3166146e6daSDaniele Ceraolo Spurio 	trace_i915_vma_bind(vma, bind_flags);
31793f2cde2SChris Wilson 	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
318b42fe9caSJoonas Lahtinen 	if (ret)
319b42fe9caSJoonas Lahtinen 		return ret;
320b42fe9caSJoonas Lahtinen 
321b42fe9caSJoonas Lahtinen 	vma->flags |= bind_flags;
322b42fe9caSJoonas Lahtinen 	return 0;
323b42fe9caSJoonas Lahtinen }
324b42fe9caSJoonas Lahtinen 
325b42fe9caSJoonas Lahtinen void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
326b42fe9caSJoonas Lahtinen {
327b42fe9caSJoonas Lahtinen 	void __iomem *ptr;
328b4563f59SChris Wilson 	int err;
329b42fe9caSJoonas Lahtinen 
330b42fe9caSJoonas Lahtinen 	/* Access through the GTT requires the device to be awake. */
33149d73912SChris Wilson 	assert_rpm_wakelock_held(vma->vm->i915);
332b42fe9caSJoonas Lahtinen 
33349d73912SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
334b4563f59SChris Wilson 	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
335b4563f59SChris Wilson 		err = -ENODEV;
336b4563f59SChris Wilson 		goto err;
337b4563f59SChris Wilson 	}
338b42fe9caSJoonas Lahtinen 
339b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
340b42fe9caSJoonas Lahtinen 	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
341b42fe9caSJoonas Lahtinen 
342b42fe9caSJoonas Lahtinen 	ptr = vma->iomap;
343b42fe9caSJoonas Lahtinen 	if (ptr == NULL) {
34473ebd503SMatthew Auld 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
345b42fe9caSJoonas Lahtinen 					vma->node.start,
346b42fe9caSJoonas Lahtinen 					vma->node.size);
347b4563f59SChris Wilson 		if (ptr == NULL) {
348b4563f59SChris Wilson 			err = -ENOMEM;
349b4563f59SChris Wilson 			goto err;
350b4563f59SChris Wilson 		}
351b42fe9caSJoonas Lahtinen 
352b42fe9caSJoonas Lahtinen 		vma->iomap = ptr;
353b42fe9caSJoonas Lahtinen 	}
354b42fe9caSJoonas Lahtinen 
355b42fe9caSJoonas Lahtinen 	__i915_vma_pin(vma);
356b4563f59SChris Wilson 
3573bd40735SChris Wilson 	err = i915_vma_pin_fence(vma);
358b4563f59SChris Wilson 	if (err)
359b4563f59SChris Wilson 		goto err_unpin;
360b4563f59SChris Wilson 
3617125397bSChris Wilson 	i915_vma_set_ggtt_write(vma);
362b42fe9caSJoonas Lahtinen 	return ptr;
363b4563f59SChris Wilson 
364b4563f59SChris Wilson err_unpin:
365b4563f59SChris Wilson 	__i915_vma_unpin(vma);
366b4563f59SChris Wilson err:
367b4563f59SChris Wilson 	return IO_ERR_PTR(err);
368b4563f59SChris Wilson }
369b4563f59SChris Wilson 
3707125397bSChris Wilson void i915_vma_flush_writes(struct i915_vma *vma)
3717125397bSChris Wilson {
3727125397bSChris Wilson 	if (!i915_vma_has_ggtt_write(vma))
3737125397bSChris Wilson 		return;
3747125397bSChris Wilson 
3757125397bSChris Wilson 	i915_gem_flush_ggtt_writes(vma->vm->i915);
3767125397bSChris Wilson 
3777125397bSChris Wilson 	i915_vma_unset_ggtt_write(vma);
3787125397bSChris Wilson }
3797125397bSChris Wilson 
380b4563f59SChris Wilson void i915_vma_unpin_iomap(struct i915_vma *vma)
381b4563f59SChris Wilson {
382520ea7c5SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
383b4563f59SChris Wilson 
384b4563f59SChris Wilson 	GEM_BUG_ON(vma->iomap == NULL);
385b4563f59SChris Wilson 
3867125397bSChris Wilson 	i915_vma_flush_writes(vma);
3877125397bSChris Wilson 
388b4563f59SChris Wilson 	i915_vma_unpin_fence(vma);
389b4563f59SChris Wilson 	i915_vma_unpin(vma);
390b42fe9caSJoonas Lahtinen }
391b42fe9caSJoonas Lahtinen 
392b42fe9caSJoonas Lahtinen void i915_vma_unpin_and_release(struct i915_vma **p_vma)
393b42fe9caSJoonas Lahtinen {
394b42fe9caSJoonas Lahtinen 	struct i915_vma *vma;
395b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj;
396b42fe9caSJoonas Lahtinen 
397b42fe9caSJoonas Lahtinen 	vma = fetch_and_zero(p_vma);
398b42fe9caSJoonas Lahtinen 	if (!vma)
399b42fe9caSJoonas Lahtinen 		return;
400b42fe9caSJoonas Lahtinen 
401b42fe9caSJoonas Lahtinen 	obj = vma->obj;
402520ea7c5SChris Wilson 	GEM_BUG_ON(!obj);
403b42fe9caSJoonas Lahtinen 
404b42fe9caSJoonas Lahtinen 	i915_vma_unpin(vma);
405b42fe9caSJoonas Lahtinen 	i915_vma_close(vma);
406b42fe9caSJoonas Lahtinen 
407b42fe9caSJoonas Lahtinen 	__i915_gem_object_release_unless_active(obj);
408b42fe9caSJoonas Lahtinen }
409b42fe9caSJoonas Lahtinen 
410782a3e9eSChris Wilson bool i915_vma_misplaced(const struct i915_vma *vma,
411782a3e9eSChris Wilson 			u64 size, u64 alignment, u64 flags)
412b42fe9caSJoonas Lahtinen {
413b42fe9caSJoonas Lahtinen 	if (!drm_mm_node_allocated(&vma->node))
414b42fe9caSJoonas Lahtinen 		return false;
415b42fe9caSJoonas Lahtinen 
416b42fe9caSJoonas Lahtinen 	if (vma->node.size < size)
417b42fe9caSJoonas Lahtinen 		return true;
418b42fe9caSJoonas Lahtinen 
419f51455d4SChris Wilson 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
420f51455d4SChris Wilson 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
421b42fe9caSJoonas Lahtinen 		return true;
422b42fe9caSJoonas Lahtinen 
423b42fe9caSJoonas Lahtinen 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
424b42fe9caSJoonas Lahtinen 		return true;
425b42fe9caSJoonas Lahtinen 
426b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_BIAS &&
427b42fe9caSJoonas Lahtinen 	    vma->node.start < (flags & PIN_OFFSET_MASK))
428b42fe9caSJoonas Lahtinen 		return true;
429b42fe9caSJoonas Lahtinen 
430b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_FIXED &&
431b42fe9caSJoonas Lahtinen 	    vma->node.start != (flags & PIN_OFFSET_MASK))
432b42fe9caSJoonas Lahtinen 		return true;
433b42fe9caSJoonas Lahtinen 
434b42fe9caSJoonas Lahtinen 	return false;
435b42fe9caSJoonas Lahtinen }
436b42fe9caSJoonas Lahtinen 
437b42fe9caSJoonas Lahtinen void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
438b42fe9caSJoonas Lahtinen {
439b42fe9caSJoonas Lahtinen 	bool mappable, fenceable;
440b42fe9caSJoonas Lahtinen 
441944397f0SChris Wilson 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
442944397f0SChris Wilson 	GEM_BUG_ON(!vma->fence_size);
443b42fe9caSJoonas Lahtinen 
444b42fe9caSJoonas Lahtinen 	/*
445b42fe9caSJoonas Lahtinen 	 * Explicitly disable for rotated VMA since the display does not
446b42fe9caSJoonas Lahtinen 	 * need the fence and the VMA is not accessible to other users.
447b42fe9caSJoonas Lahtinen 	 */
448944397f0SChris Wilson 	if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
449944397f0SChris Wilson 		return;
450944397f0SChris Wilson 
451944397f0SChris Wilson 	fenceable = (vma->node.size >= vma->fence_size &&
452f51455d4SChris Wilson 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
453944397f0SChris Wilson 
454944397f0SChris Wilson 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
455944397f0SChris Wilson 
456944397f0SChris Wilson 	if (mappable && fenceable)
457b42fe9caSJoonas Lahtinen 		vma->flags |= I915_VMA_CAN_FENCE;
458b42fe9caSJoonas Lahtinen 	else
459b42fe9caSJoonas Lahtinen 		vma->flags &= ~I915_VMA_CAN_FENCE;
460b42fe9caSJoonas Lahtinen }
461b42fe9caSJoonas Lahtinen 
4627d1d9aeaSChris Wilson static bool color_differs(struct drm_mm_node *node, unsigned long color)
463b42fe9caSJoonas Lahtinen {
4647d1d9aeaSChris Wilson 	return node->allocated && node->color != color;
4657d1d9aeaSChris Wilson }
4667d1d9aeaSChris Wilson 
4677d1d9aeaSChris Wilson bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
4687d1d9aeaSChris Wilson {
4697d1d9aeaSChris Wilson 	struct drm_mm_node *node = &vma->node;
470b42fe9caSJoonas Lahtinen 	struct drm_mm_node *other;
471b42fe9caSJoonas Lahtinen 
472b42fe9caSJoonas Lahtinen 	/*
473b42fe9caSJoonas Lahtinen 	 * On some machines we have to be careful when putting differing types
474b42fe9caSJoonas Lahtinen 	 * of snoopable memory together to avoid the prefetcher crossing memory
475b42fe9caSJoonas Lahtinen 	 * domains and dying. During vm initialisation, we decide whether or not
476b42fe9caSJoonas Lahtinen 	 * these constraints apply and set the drm_mm.color_adjust
477b42fe9caSJoonas Lahtinen 	 * appropriately.
478b42fe9caSJoonas Lahtinen 	 */
479b42fe9caSJoonas Lahtinen 	if (vma->vm->mm.color_adjust == NULL)
480b42fe9caSJoonas Lahtinen 		return true;
481b42fe9caSJoonas Lahtinen 
4827d1d9aeaSChris Wilson 	/* Only valid to be called on an already inserted vma */
4837d1d9aeaSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(node));
4847d1d9aeaSChris Wilson 	GEM_BUG_ON(list_empty(&node->node_list));
485b42fe9caSJoonas Lahtinen 
4867d1d9aeaSChris Wilson 	other = list_prev_entry(node, node_list);
487ef426c10SDaniel Vetter 	if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
488b42fe9caSJoonas Lahtinen 		return false;
489b42fe9caSJoonas Lahtinen 
4907d1d9aeaSChris Wilson 	other = list_next_entry(node, node_list);
491ef426c10SDaniel Vetter 	if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
492b42fe9caSJoonas Lahtinen 		return false;
493b42fe9caSJoonas Lahtinen 
494b42fe9caSJoonas Lahtinen 	return true;
495b42fe9caSJoonas Lahtinen }
496b42fe9caSJoonas Lahtinen 
49783d317adSChris Wilson static void assert_bind_count(const struct drm_i915_gem_object *obj)
49883d317adSChris Wilson {
49983d317adSChris Wilson 	/*
50083d317adSChris Wilson 	 * Combine the assertion that the object is bound and that we have
50183d317adSChris Wilson 	 * pinned its pages. But we should never have bound the object
50283d317adSChris Wilson 	 * more than we have pinned its pages. (For complete accuracy, we
50383d317adSChris Wilson 	 * assume that no else is pinning the pages, but as a rough assertion
50483d317adSChris Wilson 	 * that we will not run into problems later, this will do!)
50583d317adSChris Wilson 	 */
50683d317adSChris Wilson 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
50783d317adSChris Wilson }
50883d317adSChris Wilson 
509b42fe9caSJoonas Lahtinen /**
510b42fe9caSJoonas Lahtinen  * i915_vma_insert - finds a slot for the vma in its address space
511b42fe9caSJoonas Lahtinen  * @vma: the vma
512b42fe9caSJoonas Lahtinen  * @size: requested size in bytes (can be larger than the VMA)
513b42fe9caSJoonas Lahtinen  * @alignment: required alignment
514b42fe9caSJoonas Lahtinen  * @flags: mask of PIN_* flags to use
515b42fe9caSJoonas Lahtinen  *
516b42fe9caSJoonas Lahtinen  * First we try to allocate some free space that meets the requirements for
517b42fe9caSJoonas Lahtinen  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
518b42fe9caSJoonas Lahtinen  * preferrably the oldest idle entry to make room for the new VMA.
519b42fe9caSJoonas Lahtinen  *
520b42fe9caSJoonas Lahtinen  * Returns:
521b42fe9caSJoonas Lahtinen  * 0 on success, negative error code otherwise.
522b42fe9caSJoonas Lahtinen  */
523b42fe9caSJoonas Lahtinen static int
524b42fe9caSJoonas Lahtinen i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
525b42fe9caSJoonas Lahtinen {
52649d73912SChris Wilson 	struct drm_i915_private *dev_priv = vma->vm->i915;
527520ea7c5SChris Wilson 	unsigned int cache_level;
528b42fe9caSJoonas Lahtinen 	u64 start, end;
529b42fe9caSJoonas Lahtinen 	int ret;
530b42fe9caSJoonas Lahtinen 
531010e3e68SChris Wilson 	GEM_BUG_ON(i915_vma_is_closed(vma));
532b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
533b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
534b42fe9caSJoonas Lahtinen 
535b42fe9caSJoonas Lahtinen 	size = max(size, vma->size);
536944397f0SChris Wilson 	alignment = max(alignment, vma->display_alignment);
537944397f0SChris Wilson 	if (flags & PIN_MAPPABLE) {
538944397f0SChris Wilson 		size = max_t(typeof(size), size, vma->fence_size);
539944397f0SChris Wilson 		alignment = max_t(typeof(alignment),
540944397f0SChris Wilson 				  alignment, vma->fence_alignment);
541944397f0SChris Wilson 	}
542b42fe9caSJoonas Lahtinen 
543f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
544f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
545f51455d4SChris Wilson 	GEM_BUG_ON(!is_power_of_2(alignment));
546f51455d4SChris Wilson 
547b42fe9caSJoonas Lahtinen 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
548f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
549b42fe9caSJoonas Lahtinen 
550b42fe9caSJoonas Lahtinen 	end = vma->vm->total;
551b42fe9caSJoonas Lahtinen 	if (flags & PIN_MAPPABLE)
552b42fe9caSJoonas Lahtinen 		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
553b42fe9caSJoonas Lahtinen 	if (flags & PIN_ZONE_4G)
554f51455d4SChris Wilson 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
555f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
556b42fe9caSJoonas Lahtinen 
557b42fe9caSJoonas Lahtinen 	/* If binding the object/GGTT view requires more space than the entire
558b42fe9caSJoonas Lahtinen 	 * aperture has, reject it early before evicting everything in a vain
559b42fe9caSJoonas Lahtinen 	 * attempt to find space.
560b42fe9caSJoonas Lahtinen 	 */
561b42fe9caSJoonas Lahtinen 	if (size > end) {
562520ea7c5SChris Wilson 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
563520ea7c5SChris Wilson 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
564b42fe9caSJoonas Lahtinen 			  end);
5652889caa9SChris Wilson 		return -ENOSPC;
566b42fe9caSJoonas Lahtinen 	}
567b42fe9caSJoonas Lahtinen 
568520ea7c5SChris Wilson 	if (vma->obj) {
569520ea7c5SChris Wilson 		ret = i915_gem_object_pin_pages(vma->obj);
570b42fe9caSJoonas Lahtinen 		if (ret)
571b42fe9caSJoonas Lahtinen 			return ret;
572b42fe9caSJoonas Lahtinen 
573520ea7c5SChris Wilson 		cache_level = vma->obj->cache_level;
574520ea7c5SChris Wilson 	} else {
575520ea7c5SChris Wilson 		cache_level = 0;
576520ea7c5SChris Wilson 	}
577520ea7c5SChris Wilson 
578fa3f46afSMatthew Auld 	GEM_BUG_ON(vma->pages);
579fa3f46afSMatthew Auld 
58093f2cde2SChris Wilson 	ret = vma->ops->set_pages(vma);
581fa3f46afSMatthew Auld 	if (ret)
582fa3f46afSMatthew Auld 		goto err_unpin;
583fa3f46afSMatthew Auld 
584b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_FIXED) {
585b42fe9caSJoonas Lahtinen 		u64 offset = flags & PIN_OFFSET_MASK;
586f51455d4SChris Wilson 		if (!IS_ALIGNED(offset, alignment) ||
587e8f9ae9bSChris Wilson 		    range_overflows(offset, size, end)) {
588b42fe9caSJoonas Lahtinen 			ret = -EINVAL;
589fa3f46afSMatthew Auld 			goto err_clear;
590b42fe9caSJoonas Lahtinen 		}
591b42fe9caSJoonas Lahtinen 
592625d988aSChris Wilson 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
593520ea7c5SChris Wilson 					   size, offset, cache_level,
594625d988aSChris Wilson 					   flags);
595b42fe9caSJoonas Lahtinen 		if (ret)
596fa3f46afSMatthew Auld 			goto err_clear;
597b42fe9caSJoonas Lahtinen 	} else {
5987464284bSMatthew Auld 		/*
5997464284bSMatthew Auld 		 * We only support huge gtt pages through the 48b PPGTT,
6007464284bSMatthew Auld 		 * however we also don't want to force any alignment for
6017464284bSMatthew Auld 		 * objects which need to be tightly packed into the low 32bits.
6027464284bSMatthew Auld 		 *
6037464284bSMatthew Auld 		 * Note that we assume that GGTT are limited to 4GiB for the
6047464284bSMatthew Auld 		 * forseeable future. See also i915_ggtt_offset().
6057464284bSMatthew Auld 		 */
6067464284bSMatthew Auld 		if (upper_32_bits(end - 1) &&
6077464284bSMatthew Auld 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
608855822beSMatthew Auld 			/*
609855822beSMatthew Auld 			 * We can't mix 64K and 4K PTEs in the same page-table
610855822beSMatthew Auld 			 * (2M block), and so to avoid the ugliness and
611855822beSMatthew Auld 			 * complexity of coloring we opt for just aligning 64K
612855822beSMatthew Auld 			 * objects to 2M.
613855822beSMatthew Auld 			 */
6147464284bSMatthew Auld 			u64 page_alignment =
615855822beSMatthew Auld 				rounddown_pow_of_two(vma->page_sizes.sg |
616855822beSMatthew Auld 						     I915_GTT_PAGE_SIZE_2M);
6177464284bSMatthew Auld 
618bef27bdbSChris Wilson 			/*
619bef27bdbSChris Wilson 			 * Check we don't expand for the limited Global GTT
620bef27bdbSChris Wilson 			 * (mappable aperture is even more precious!). This
621bef27bdbSChris Wilson 			 * also checks that we exclude the aliasing-ppgtt.
622bef27bdbSChris Wilson 			 */
623bef27bdbSChris Wilson 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
624bef27bdbSChris Wilson 
6257464284bSMatthew Auld 			alignment = max(alignment, page_alignment);
626855822beSMatthew Auld 
627855822beSMatthew Auld 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
628855822beSMatthew Auld 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
6297464284bSMatthew Auld 		}
6307464284bSMatthew Auld 
631e007b19dSChris Wilson 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
632520ea7c5SChris Wilson 					  size, alignment, cache_level,
633e007b19dSChris Wilson 					  start, end, flags);
634e007b19dSChris Wilson 		if (ret)
635fa3f46afSMatthew Auld 			goto err_clear;
636b42fe9caSJoonas Lahtinen 
637b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(vma->node.start < start);
638b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
639b42fe9caSJoonas Lahtinen 	}
64044a0ec0dSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
641520ea7c5SChris Wilson 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
642b42fe9caSJoonas Lahtinen 
643b42fe9caSJoonas Lahtinen 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
644f2123818SChris Wilson 
645520ea7c5SChris Wilson 	if (vma->obj) {
646520ea7c5SChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
647520ea7c5SChris Wilson 
648f2123818SChris Wilson 		spin_lock(&dev_priv->mm.obj_lock);
649f2123818SChris Wilson 		list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
650b42fe9caSJoonas Lahtinen 		obj->bind_count++;
651f2123818SChris Wilson 		spin_unlock(&dev_priv->mm.obj_lock);
652f2123818SChris Wilson 
65383d317adSChris Wilson 		assert_bind_count(obj);
654520ea7c5SChris Wilson 	}
655b42fe9caSJoonas Lahtinen 
656b42fe9caSJoonas Lahtinen 	return 0;
657b42fe9caSJoonas Lahtinen 
658fa3f46afSMatthew Auld err_clear:
65993f2cde2SChris Wilson 	vma->ops->clear_pages(vma);
660b42fe9caSJoonas Lahtinen err_unpin:
661520ea7c5SChris Wilson 	if (vma->obj)
662520ea7c5SChris Wilson 		i915_gem_object_unpin_pages(vma->obj);
663b42fe9caSJoonas Lahtinen 	return ret;
664b42fe9caSJoonas Lahtinen }
665b42fe9caSJoonas Lahtinen 
66631c7effaSChris Wilson static void
66731c7effaSChris Wilson i915_vma_remove(struct i915_vma *vma)
66831c7effaSChris Wilson {
669f2123818SChris Wilson 	struct drm_i915_private *i915 = vma->vm->i915;
67031c7effaSChris Wilson 
67131c7effaSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
67231c7effaSChris Wilson 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
67331c7effaSChris Wilson 
67493f2cde2SChris Wilson 	vma->ops->clear_pages(vma);
675fa3f46afSMatthew Auld 
67631c7effaSChris Wilson 	drm_mm_remove_node(&vma->node);
67731c7effaSChris Wilson 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
67831c7effaSChris Wilson 
679520ea7c5SChris Wilson 	/*
680520ea7c5SChris Wilson 	 * Since the unbound list is global, only move to that list if
68131c7effaSChris Wilson 	 * no more VMAs exist.
68231c7effaSChris Wilson 	 */
683520ea7c5SChris Wilson 	if (vma->obj) {
684520ea7c5SChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
685520ea7c5SChris Wilson 
686f2123818SChris Wilson 		spin_lock(&i915->mm.obj_lock);
68731c7effaSChris Wilson 		if (--obj->bind_count == 0)
688f2123818SChris Wilson 			list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
689f2123818SChris Wilson 		spin_unlock(&i915->mm.obj_lock);
69031c7effaSChris Wilson 
691520ea7c5SChris Wilson 		/*
692520ea7c5SChris Wilson 		 * And finally now the object is completely decoupled from this
693520ea7c5SChris Wilson 		 * vma, we can drop its hold on the backing storage and allow
694520ea7c5SChris Wilson 		 * it to be reaped by the shrinker.
69531c7effaSChris Wilson 		 */
69631c7effaSChris Wilson 		i915_gem_object_unpin_pages(obj);
69783d317adSChris Wilson 		assert_bind_count(obj);
69831c7effaSChris Wilson 	}
699520ea7c5SChris Wilson }
70031c7effaSChris Wilson 
701b42fe9caSJoonas Lahtinen int __i915_vma_do_pin(struct i915_vma *vma,
702b42fe9caSJoonas Lahtinen 		      u64 size, u64 alignment, u64 flags)
703b42fe9caSJoonas Lahtinen {
70431c7effaSChris Wilson 	const unsigned int bound = vma->flags;
705b42fe9caSJoonas Lahtinen 	int ret;
706b42fe9caSJoonas Lahtinen 
70749d73912SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
708b42fe9caSJoonas Lahtinen 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
709b42fe9caSJoonas Lahtinen 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
710b42fe9caSJoonas Lahtinen 
711b42fe9caSJoonas Lahtinen 	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
712b42fe9caSJoonas Lahtinen 		ret = -EBUSY;
71331c7effaSChris Wilson 		goto err_unpin;
714b42fe9caSJoonas Lahtinen 	}
715b42fe9caSJoonas Lahtinen 
716b42fe9caSJoonas Lahtinen 	if ((bound & I915_VMA_BIND_MASK) == 0) {
717b42fe9caSJoonas Lahtinen 		ret = i915_vma_insert(vma, size, alignment, flags);
718b42fe9caSJoonas Lahtinen 		if (ret)
71931c7effaSChris Wilson 			goto err_unpin;
720b42fe9caSJoonas Lahtinen 	}
721d36caeeaSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
722b42fe9caSJoonas Lahtinen 
723520ea7c5SChris Wilson 	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
724b42fe9caSJoonas Lahtinen 	if (ret)
72531c7effaSChris Wilson 		goto err_remove;
726b42fe9caSJoonas Lahtinen 
727d36caeeaSChris Wilson 	GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
728d36caeeaSChris Wilson 
729b42fe9caSJoonas Lahtinen 	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
730b42fe9caSJoonas Lahtinen 		__i915_vma_set_map_and_fenceable(vma);
731b42fe9caSJoonas Lahtinen 
732b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
733b42fe9caSJoonas Lahtinen 	return 0;
734b42fe9caSJoonas Lahtinen 
73531c7effaSChris Wilson err_remove:
73631c7effaSChris Wilson 	if ((bound & I915_VMA_BIND_MASK) == 0) {
73731c7effaSChris Wilson 		i915_vma_remove(vma);
738fa3f46afSMatthew Auld 		GEM_BUG_ON(vma->pages);
739d36caeeaSChris Wilson 		GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
74031c7effaSChris Wilson 	}
74131c7effaSChris Wilson err_unpin:
742b42fe9caSJoonas Lahtinen 	__i915_vma_unpin(vma);
743b42fe9caSJoonas Lahtinen 	return ret;
744b42fe9caSJoonas Lahtinen }
745b42fe9caSJoonas Lahtinen 
7463365e226SChris Wilson void i915_vma_close(struct i915_vma *vma)
7473365e226SChris Wilson {
7483365e226SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
7493365e226SChris Wilson 
7503365e226SChris Wilson 	GEM_BUG_ON(i915_vma_is_closed(vma));
7513365e226SChris Wilson 	vma->flags |= I915_VMA_CLOSED;
7523365e226SChris Wilson 
7533365e226SChris Wilson 	/*
7543365e226SChris Wilson 	 * We defer actually closing, unbinding and destroying the VMA until
7553365e226SChris Wilson 	 * the next idle point, or if the object is freed in the meantime. By
7563365e226SChris Wilson 	 * postponing the unbind, we allow for it to be resurrected by the
7573365e226SChris Wilson 	 * client, avoiding the work required to rebind the VMA. This is
7583365e226SChris Wilson 	 * advantageous for DRI, where the client/server pass objects
7593365e226SChris Wilson 	 * between themselves, temporarily opening a local VMA to the
7603365e226SChris Wilson 	 * object, and then closing it again. The same object is then reused
7613365e226SChris Wilson 	 * on the next frame (or two, depending on the depth of the swap queue)
7623365e226SChris Wilson 	 * causing us to rebind the VMA once more. This ends up being a lot
7633365e226SChris Wilson 	 * of wasted work for the steady state.
7643365e226SChris Wilson 	 */
7653365e226SChris Wilson 	list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
7663365e226SChris Wilson }
7673365e226SChris Wilson 
7683365e226SChris Wilson void i915_vma_reopen(struct i915_vma *vma)
7693365e226SChris Wilson {
7703365e226SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
7713365e226SChris Wilson 
7723365e226SChris Wilson 	if (vma->flags & I915_VMA_CLOSED) {
7733365e226SChris Wilson 		vma->flags &= ~I915_VMA_CLOSED;
7743365e226SChris Wilson 		list_del(&vma->closed_link);
7753365e226SChris Wilson 	}
7763365e226SChris Wilson }
7773365e226SChris Wilson 
7783365e226SChris Wilson static void __i915_vma_destroy(struct i915_vma *vma)
779b42fe9caSJoonas Lahtinen {
780520ea7c5SChris Wilson 	struct drm_i915_private *i915 = vma->vm->i915;
7817a3bc034SChris Wilson 	int i;
7827a3bc034SChris Wilson 
783b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(vma->node.allocated);
784b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(vma->fence);
785b42fe9caSJoonas Lahtinen 
7867a3bc034SChris Wilson 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
7877a3bc034SChris Wilson 		GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
7887a3bc034SChris Wilson 	GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
7897a3bc034SChris Wilson 
790010e3e68SChris Wilson 	list_del(&vma->obj_link);
791b42fe9caSJoonas Lahtinen 	list_del(&vma->vm_link);
792520ea7c5SChris Wilson 	if (vma->obj)
7933365e226SChris Wilson 		rb_erase(&vma->obj_node, &vma->obj->vma_tree);
794010e3e68SChris Wilson 
795b42fe9caSJoonas Lahtinen 	if (!i915_vma_is_ggtt(vma))
796b42fe9caSJoonas Lahtinen 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
797b42fe9caSJoonas Lahtinen 
798520ea7c5SChris Wilson 	kmem_cache_free(i915->vmas, vma);
799b42fe9caSJoonas Lahtinen }
800b42fe9caSJoonas Lahtinen 
8013365e226SChris Wilson void i915_vma_destroy(struct i915_vma *vma)
802b42fe9caSJoonas Lahtinen {
8033365e226SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
804b42fe9caSJoonas Lahtinen 
8053365e226SChris Wilson 	GEM_BUG_ON(i915_vma_is_active(vma));
8063365e226SChris Wilson 	GEM_BUG_ON(i915_vma_is_pinned(vma));
807b42fe9caSJoonas Lahtinen 
8083365e226SChris Wilson 	if (i915_vma_is_closed(vma))
8093365e226SChris Wilson 		list_del(&vma->closed_link);
8103365e226SChris Wilson 
811b42fe9caSJoonas Lahtinen 	WARN_ON(i915_vma_unbind(vma));
8123365e226SChris Wilson 	__i915_vma_destroy(vma);
8133365e226SChris Wilson }
8143365e226SChris Wilson 
8153365e226SChris Wilson void i915_vma_parked(struct drm_i915_private *i915)
8163365e226SChris Wilson {
8173365e226SChris Wilson 	struct i915_vma *vma, *next;
8183365e226SChris Wilson 
8193365e226SChris Wilson 	list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
8203365e226SChris Wilson 		GEM_BUG_ON(!i915_vma_is_closed(vma));
8213365e226SChris Wilson 		i915_vma_destroy(vma);
8223365e226SChris Wilson 	}
8233365e226SChris Wilson 
8243365e226SChris Wilson 	GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
825b42fe9caSJoonas Lahtinen }
826b42fe9caSJoonas Lahtinen 
827b42fe9caSJoonas Lahtinen static void __i915_vma_iounmap(struct i915_vma *vma)
828b42fe9caSJoonas Lahtinen {
829b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_is_pinned(vma));
830b42fe9caSJoonas Lahtinen 
831b42fe9caSJoonas Lahtinen 	if (vma->iomap == NULL)
832b42fe9caSJoonas Lahtinen 		return;
833b42fe9caSJoonas Lahtinen 
834b42fe9caSJoonas Lahtinen 	io_mapping_unmap(vma->iomap);
835b42fe9caSJoonas Lahtinen 	vma->iomap = NULL;
836b42fe9caSJoonas Lahtinen }
837b42fe9caSJoonas Lahtinen 
838a65adaf8SChris Wilson void i915_vma_revoke_mmap(struct i915_vma *vma)
839a65adaf8SChris Wilson {
840a65adaf8SChris Wilson 	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
841a65adaf8SChris Wilson 	u64 vma_offset;
842a65adaf8SChris Wilson 
843a65adaf8SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
844a65adaf8SChris Wilson 
845a65adaf8SChris Wilson 	if (!i915_vma_has_userfault(vma))
846a65adaf8SChris Wilson 		return;
847a65adaf8SChris Wilson 
848a65adaf8SChris Wilson 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
849a65adaf8SChris Wilson 	GEM_BUG_ON(!vma->obj->userfault_count);
850a65adaf8SChris Wilson 
851a65adaf8SChris Wilson 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
852a65adaf8SChris Wilson 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
853a65adaf8SChris Wilson 			    drm_vma_node_offset_addr(node) + vma_offset,
854a65adaf8SChris Wilson 			    vma->size,
855a65adaf8SChris Wilson 			    1);
856a65adaf8SChris Wilson 
857a65adaf8SChris Wilson 	i915_vma_unset_userfault(vma);
858a65adaf8SChris Wilson 	if (!--vma->obj->userfault_count)
859a65adaf8SChris Wilson 		list_del(&vma->obj->userfault_link);
860a65adaf8SChris Wilson }
861a65adaf8SChris Wilson 
862e6bb1d7fSChris Wilson static void export_fence(struct i915_vma *vma,
863e6bb1d7fSChris Wilson 			 struct i915_request *rq,
864e6bb1d7fSChris Wilson 			 unsigned int flags)
865e6bb1d7fSChris Wilson {
866e6bb1d7fSChris Wilson 	struct reservation_object *resv = vma->resv;
867e6bb1d7fSChris Wilson 
868e6bb1d7fSChris Wilson 	/*
869e6bb1d7fSChris Wilson 	 * Ignore errors from failing to allocate the new fence, we can't
870e6bb1d7fSChris Wilson 	 * handle an error right now. Worst case should be missed
871e6bb1d7fSChris Wilson 	 * synchronisation leading to rendering corruption.
872e6bb1d7fSChris Wilson 	 */
873e6bb1d7fSChris Wilson 	reservation_object_lock(resv, NULL);
874e6bb1d7fSChris Wilson 	if (flags & EXEC_OBJECT_WRITE)
875e6bb1d7fSChris Wilson 		reservation_object_add_excl_fence(resv, &rq->fence);
876e6bb1d7fSChris Wilson 	else if (reservation_object_reserve_shared(resv) == 0)
877e6bb1d7fSChris Wilson 		reservation_object_add_shared_fence(resv, &rq->fence);
878e6bb1d7fSChris Wilson 	reservation_object_unlock(resv);
879e6bb1d7fSChris Wilson }
880e6bb1d7fSChris Wilson 
881e6bb1d7fSChris Wilson int i915_vma_move_to_active(struct i915_vma *vma,
882e6bb1d7fSChris Wilson 			    struct i915_request *rq,
883e6bb1d7fSChris Wilson 			    unsigned int flags)
884e6bb1d7fSChris Wilson {
885e6bb1d7fSChris Wilson 	struct drm_i915_gem_object *obj = vma->obj;
886e6bb1d7fSChris Wilson 	const unsigned int idx = rq->engine->id;
887e6bb1d7fSChris Wilson 
888e6bb1d7fSChris Wilson 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
889e6bb1d7fSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
890e6bb1d7fSChris Wilson 
891e6bb1d7fSChris Wilson 	/*
892e6bb1d7fSChris Wilson 	 * Add a reference if we're newly entering the active list.
893e6bb1d7fSChris Wilson 	 * The order in which we add operations to the retirement queue is
894e6bb1d7fSChris Wilson 	 * vital here: mark_active adds to the start of the callback list,
895e6bb1d7fSChris Wilson 	 * such that subsequent callbacks are called first. Therefore we
896e6bb1d7fSChris Wilson 	 * add the active reference first and queue for it to be dropped
897e6bb1d7fSChris Wilson 	 * *last*.
898e6bb1d7fSChris Wilson 	 */
899e6bb1d7fSChris Wilson 	if (!i915_vma_is_active(vma))
900e6bb1d7fSChris Wilson 		obj->active_count++;
901e6bb1d7fSChris Wilson 	i915_vma_set_active(vma, idx);
902e6bb1d7fSChris Wilson 	i915_gem_active_set(&vma->last_read[idx], rq);
903e6bb1d7fSChris Wilson 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
904e6bb1d7fSChris Wilson 
905e6bb1d7fSChris Wilson 	obj->write_domain = 0;
906e6bb1d7fSChris Wilson 	if (flags & EXEC_OBJECT_WRITE) {
907e6bb1d7fSChris Wilson 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
908e6bb1d7fSChris Wilson 
909e6bb1d7fSChris Wilson 		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
910e6bb1d7fSChris Wilson 			i915_gem_active_set(&obj->frontbuffer_write, rq);
911e6bb1d7fSChris Wilson 
912e6bb1d7fSChris Wilson 		obj->read_domains = 0;
913e6bb1d7fSChris Wilson 	}
914e6bb1d7fSChris Wilson 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
915e6bb1d7fSChris Wilson 
916e6bb1d7fSChris Wilson 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
917e6bb1d7fSChris Wilson 		i915_gem_active_set(&vma->last_fence, rq);
918e6bb1d7fSChris Wilson 
919e6bb1d7fSChris Wilson 	export_fence(vma, rq, flags);
920e6bb1d7fSChris Wilson 	return 0;
921e6bb1d7fSChris Wilson }
922e6bb1d7fSChris Wilson 
923b42fe9caSJoonas Lahtinen int i915_vma_unbind(struct i915_vma *vma)
924b42fe9caSJoonas Lahtinen {
925b42fe9caSJoonas Lahtinen 	unsigned long active;
926b42fe9caSJoonas Lahtinen 	int ret;
927b42fe9caSJoonas Lahtinen 
928520ea7c5SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
929b42fe9caSJoonas Lahtinen 
930520ea7c5SChris Wilson 	/*
931520ea7c5SChris Wilson 	 * First wait upon any activity as retiring the request may
932b42fe9caSJoonas Lahtinen 	 * have side-effects such as unpinning or even unbinding this vma.
933b42fe9caSJoonas Lahtinen 	 */
9347f017b19SChris Wilson 	might_sleep();
935b42fe9caSJoonas Lahtinen 	active = i915_vma_get_active(vma);
936b42fe9caSJoonas Lahtinen 	if (active) {
937b42fe9caSJoonas Lahtinen 		int idx;
938b42fe9caSJoonas Lahtinen 
939520ea7c5SChris Wilson 		/*
940520ea7c5SChris Wilson 		 * When a closed VMA is retired, it is unbound - eek.
941b42fe9caSJoonas Lahtinen 		 * In order to prevent it from being recursively closed,
942b42fe9caSJoonas Lahtinen 		 * take a pin on the vma so that the second unbind is
943b42fe9caSJoonas Lahtinen 		 * aborted.
944b42fe9caSJoonas Lahtinen 		 *
945b42fe9caSJoonas Lahtinen 		 * Even more scary is that the retire callback may free
946b42fe9caSJoonas Lahtinen 		 * the object (last active vma). To prevent the explosion
947b42fe9caSJoonas Lahtinen 		 * we defer the actual object free to a worker that can
948b42fe9caSJoonas Lahtinen 		 * only proceed once it acquires the struct_mutex (which
949b42fe9caSJoonas Lahtinen 		 * we currently hold, therefore it cannot free this object
950b42fe9caSJoonas Lahtinen 		 * before we are finished).
951b42fe9caSJoonas Lahtinen 		 */
952b42fe9caSJoonas Lahtinen 		__i915_vma_pin(vma);
953b42fe9caSJoonas Lahtinen 
954b42fe9caSJoonas Lahtinen 		for_each_active(active, idx) {
955b42fe9caSJoonas Lahtinen 			ret = i915_gem_active_retire(&vma->last_read[idx],
95649d73912SChris Wilson 						     &vma->vm->i915->drm.struct_mutex);
957b42fe9caSJoonas Lahtinen 			if (ret)
958b42fe9caSJoonas Lahtinen 				break;
959b42fe9caSJoonas Lahtinen 		}
960b42fe9caSJoonas Lahtinen 
961760a898dSChris Wilson 		if (!ret) {
962760a898dSChris Wilson 			ret = i915_gem_active_retire(&vma->last_fence,
963760a898dSChris Wilson 						     &vma->vm->i915->drm.struct_mutex);
964760a898dSChris Wilson 		}
965760a898dSChris Wilson 
966b42fe9caSJoonas Lahtinen 		__i915_vma_unpin(vma);
967b42fe9caSJoonas Lahtinen 		if (ret)
968b42fe9caSJoonas Lahtinen 			return ret;
969b42fe9caSJoonas Lahtinen 	}
9707a3bc034SChris Wilson 	GEM_BUG_ON(i915_vma_is_active(vma));
971b42fe9caSJoonas Lahtinen 
97210195b1eSChris Wilson 	if (i915_vma_is_pinned(vma)) {
97310195b1eSChris Wilson 		vma_print_allocator(vma, "is pinned");
974b42fe9caSJoonas Lahtinen 		return -EBUSY;
97510195b1eSChris Wilson 	}
976b42fe9caSJoonas Lahtinen 
977b42fe9caSJoonas Lahtinen 	if (!drm_mm_node_allocated(&vma->node))
9783365e226SChris Wilson 		return 0;
979b42fe9caSJoonas Lahtinen 
980b42fe9caSJoonas Lahtinen 	if (i915_vma_is_map_and_fenceable(vma)) {
9817125397bSChris Wilson 		/*
9827125397bSChris Wilson 		 * Check that we have flushed all writes through the GGTT
9837125397bSChris Wilson 		 * before the unbind, other due to non-strict nature of those
9847125397bSChris Wilson 		 * indirect writes they may end up referencing the GGTT PTE
9857125397bSChris Wilson 		 * after the unbind.
9867125397bSChris Wilson 		 */
9877125397bSChris Wilson 		i915_vma_flush_writes(vma);
9887125397bSChris Wilson 		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
9897125397bSChris Wilson 
990b42fe9caSJoonas Lahtinen 		/* release the fence reg _after_ flushing */
991b42fe9caSJoonas Lahtinen 		ret = i915_vma_put_fence(vma);
992b42fe9caSJoonas Lahtinen 		if (ret)
993b42fe9caSJoonas Lahtinen 			return ret;
994b42fe9caSJoonas Lahtinen 
995b42fe9caSJoonas Lahtinen 		/* Force a pagefault for domain tracking on next user access */
996a65adaf8SChris Wilson 		i915_vma_revoke_mmap(vma);
997b42fe9caSJoonas Lahtinen 
998b42fe9caSJoonas Lahtinen 		__i915_vma_iounmap(vma);
999b42fe9caSJoonas Lahtinen 		vma->flags &= ~I915_VMA_CAN_FENCE;
1000b42fe9caSJoonas Lahtinen 	}
1001a65adaf8SChris Wilson 	GEM_BUG_ON(vma->fence);
1002a65adaf8SChris Wilson 	GEM_BUG_ON(i915_vma_has_userfault(vma));
1003b42fe9caSJoonas Lahtinen 
1004b42fe9caSJoonas Lahtinen 	if (likely(!vma->vm->closed)) {
1005b42fe9caSJoonas Lahtinen 		trace_i915_vma_unbind(vma);
100693f2cde2SChris Wilson 		vma->ops->unbind_vma(vma);
1007b42fe9caSJoonas Lahtinen 	}
1008b42fe9caSJoonas Lahtinen 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1009b42fe9caSJoonas Lahtinen 
101031c7effaSChris Wilson 	i915_vma_remove(vma);
1011b42fe9caSJoonas Lahtinen 
1012b42fe9caSJoonas Lahtinen 	return 0;
1013b42fe9caSJoonas Lahtinen }
1014b42fe9caSJoonas Lahtinen 
1015e3c7a1c5SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1016e3c7a1c5SChris Wilson #include "selftests/i915_vma.c"
1017e3c7a1c5SChris Wilson #endif
1018