xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma.c (revision 782a3e9e)
1b42fe9caSJoonas Lahtinen /*
2b42fe9caSJoonas Lahtinen  * Copyright © 2016 Intel Corporation
3b42fe9caSJoonas Lahtinen  *
4b42fe9caSJoonas Lahtinen  * Permission is hereby granted, free of charge, to any person obtaining a
5b42fe9caSJoonas Lahtinen  * copy of this software and associated documentation files (the "Software"),
6b42fe9caSJoonas Lahtinen  * to deal in the Software without restriction, including without limitation
7b42fe9caSJoonas Lahtinen  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b42fe9caSJoonas Lahtinen  * and/or sell copies of the Software, and to permit persons to whom the
9b42fe9caSJoonas Lahtinen  * Software is furnished to do so, subject to the following conditions:
10b42fe9caSJoonas Lahtinen  *
11b42fe9caSJoonas Lahtinen  * The above copyright notice and this permission notice (including the next
12b42fe9caSJoonas Lahtinen  * paragraph) shall be included in all copies or substantial portions of the
13b42fe9caSJoonas Lahtinen  * Software.
14b42fe9caSJoonas Lahtinen  *
15b42fe9caSJoonas Lahtinen  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b42fe9caSJoonas Lahtinen  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b42fe9caSJoonas Lahtinen  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b42fe9caSJoonas Lahtinen  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b42fe9caSJoonas Lahtinen  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b42fe9caSJoonas Lahtinen  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b42fe9caSJoonas Lahtinen  * IN THE SOFTWARE.
22b42fe9caSJoonas Lahtinen  *
23b42fe9caSJoonas Lahtinen  */
24b42fe9caSJoonas Lahtinen 
25b42fe9caSJoonas Lahtinen #include "i915_vma.h"
26b42fe9caSJoonas Lahtinen 
27b42fe9caSJoonas Lahtinen #include "i915_drv.h"
28b42fe9caSJoonas Lahtinen #include "intel_ringbuffer.h"
29b42fe9caSJoonas Lahtinen #include "intel_frontbuffer.h"
30b42fe9caSJoonas Lahtinen 
31b42fe9caSJoonas Lahtinen #include <drm/drm_gem.h>
32b42fe9caSJoonas Lahtinen 
33b42fe9caSJoonas Lahtinen static void
34b42fe9caSJoonas Lahtinen i915_vma_retire(struct i915_gem_active *active,
35b42fe9caSJoonas Lahtinen 		struct drm_i915_gem_request *rq)
36b42fe9caSJoonas Lahtinen {
37b42fe9caSJoonas Lahtinen 	const unsigned int idx = rq->engine->id;
38b42fe9caSJoonas Lahtinen 	struct i915_vma *vma =
39b42fe9caSJoonas Lahtinen 		container_of(active, struct i915_vma, last_read[idx]);
40b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj = vma->obj;
41b42fe9caSJoonas Lahtinen 
42b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43b42fe9caSJoonas Lahtinen 
44b42fe9caSJoonas Lahtinen 	i915_vma_clear_active(vma, idx);
45b42fe9caSJoonas Lahtinen 	if (i915_vma_is_active(vma))
46b42fe9caSJoonas Lahtinen 		return;
47b42fe9caSJoonas Lahtinen 
4844a0ec0dSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
49b42fe9caSJoonas Lahtinen 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50b42fe9caSJoonas Lahtinen 	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51b42fe9caSJoonas Lahtinen 		WARN_ON(i915_vma_unbind(vma));
52b42fe9caSJoonas Lahtinen 
53b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_gem_object_is_active(obj));
54b42fe9caSJoonas Lahtinen 	if (--obj->active_count)
55b42fe9caSJoonas Lahtinen 		return;
56b42fe9caSJoonas Lahtinen 
57b42fe9caSJoonas Lahtinen 	/* Bump our place on the bound list to keep it roughly in LRU order
58b42fe9caSJoonas Lahtinen 	 * so that we don't steal from recently used but inactive objects
59b42fe9caSJoonas Lahtinen 	 * (unless we are forced to ofc!)
60b42fe9caSJoonas Lahtinen 	 */
61b42fe9caSJoonas Lahtinen 	if (obj->bind_count)
62b42fe9caSJoonas Lahtinen 		list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
63b42fe9caSJoonas Lahtinen 
64b42fe9caSJoonas Lahtinen 	obj->mm.dirty = true; /* be paranoid  */
65b42fe9caSJoonas Lahtinen 
66b42fe9caSJoonas Lahtinen 	if (i915_gem_object_has_active_reference(obj)) {
67b42fe9caSJoonas Lahtinen 		i915_gem_object_clear_active_reference(obj);
68b42fe9caSJoonas Lahtinen 		i915_gem_object_put(obj);
69b42fe9caSJoonas Lahtinen 	}
70b42fe9caSJoonas Lahtinen }
71b42fe9caSJoonas Lahtinen 
72b42fe9caSJoonas Lahtinen static struct i915_vma *
73a01cb37aSChris Wilson vma_create(struct drm_i915_gem_object *obj,
74b42fe9caSJoonas Lahtinen 	   struct i915_address_space *vm,
75b42fe9caSJoonas Lahtinen 	   const struct i915_ggtt_view *view)
76b42fe9caSJoonas Lahtinen {
77b42fe9caSJoonas Lahtinen 	struct i915_vma *vma;
78b42fe9caSJoonas Lahtinen 	struct rb_node *rb, **p;
79b42fe9caSJoonas Lahtinen 	int i;
80b42fe9caSJoonas Lahtinen 
81e1cc3db0SChris Wilson 	/* The aliasing_ppgtt should never be used directly! */
82e1cc3db0SChris Wilson 	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
83e1cc3db0SChris Wilson 
841fcdaa7eSChris Wilson 	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
85b42fe9caSJoonas Lahtinen 	if (vma == NULL)
86b42fe9caSJoonas Lahtinen 		return ERR_PTR(-ENOMEM);
87b42fe9caSJoonas Lahtinen 
88b42fe9caSJoonas Lahtinen 	INIT_LIST_HEAD(&vma->exec_list);
89b42fe9caSJoonas Lahtinen 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
90b42fe9caSJoonas Lahtinen 		init_request_active(&vma->last_read[i], i915_vma_retire);
91b42fe9caSJoonas Lahtinen 	init_request_active(&vma->last_fence, NULL);
92b42fe9caSJoonas Lahtinen 	vma->vm = vm;
93b42fe9caSJoonas Lahtinen 	vma->obj = obj;
94b42fe9caSJoonas Lahtinen 	vma->size = obj->base.size;
95f51455d4SChris Wilson 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
96b42fe9caSJoonas Lahtinen 
977c518460SChris Wilson 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
98b42fe9caSJoonas Lahtinen 		vma->ggtt_view = *view;
99b42fe9caSJoonas Lahtinen 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
10007e19ea4SChris Wilson 			GEM_BUG_ON(range_overflows_t(u64,
1018bab1193SChris Wilson 						     view->partial.offset,
1028bab1193SChris Wilson 						     view->partial.size,
10307e19ea4SChris Wilson 						     obj->base.size >> PAGE_SHIFT));
1048bab1193SChris Wilson 			vma->size = view->partial.size;
105b42fe9caSJoonas Lahtinen 			vma->size <<= PAGE_SHIFT;
10607e19ea4SChris Wilson 			GEM_BUG_ON(vma->size >= obj->base.size);
107b42fe9caSJoonas Lahtinen 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
1088bab1193SChris Wilson 			vma->size = intel_rotation_info_size(&view->rotated);
109b42fe9caSJoonas Lahtinen 			vma->size <<= PAGE_SHIFT;
110b42fe9caSJoonas Lahtinen 		}
111b42fe9caSJoonas Lahtinen 	}
112b42fe9caSJoonas Lahtinen 
1131fcdaa7eSChris Wilson 	if (unlikely(vma->size > vm->total))
1141fcdaa7eSChris Wilson 		goto err_vma;
1151fcdaa7eSChris Wilson 
116b00ddb27SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
117b00ddb27SChris Wilson 
118b42fe9caSJoonas Lahtinen 	if (i915_is_ggtt(vm)) {
1191fcdaa7eSChris Wilson 		if (unlikely(overflows_type(vma->size, u32)))
1201fcdaa7eSChris Wilson 			goto err_vma;
1211fcdaa7eSChris Wilson 
12291d4e0aaSChris Wilson 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
123944397f0SChris Wilson 						      i915_gem_object_get_tiling(obj),
124944397f0SChris Wilson 						      i915_gem_object_get_stride(obj));
1251fcdaa7eSChris Wilson 		if (unlikely(vma->fence_size < vma->size || /* overflow */
1261fcdaa7eSChris Wilson 			     vma->fence_size > vm->total))
1271fcdaa7eSChris Wilson 			goto err_vma;
1281fcdaa7eSChris Wilson 
129f51455d4SChris Wilson 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
130944397f0SChris Wilson 
13191d4e0aaSChris Wilson 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
132944397f0SChris Wilson 								i915_gem_object_get_tiling(obj),
133944397f0SChris Wilson 								i915_gem_object_get_stride(obj));
134944397f0SChris Wilson 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
135944397f0SChris Wilson 
136b42fe9caSJoonas Lahtinen 		vma->flags |= I915_VMA_GGTT;
137b42fe9caSJoonas Lahtinen 		list_add(&vma->obj_link, &obj->vma_list);
138b42fe9caSJoonas Lahtinen 	} else {
139b42fe9caSJoonas Lahtinen 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
140b42fe9caSJoonas Lahtinen 		list_add_tail(&vma->obj_link, &obj->vma_list);
141b42fe9caSJoonas Lahtinen 	}
142b42fe9caSJoonas Lahtinen 
143b42fe9caSJoonas Lahtinen 	rb = NULL;
144b42fe9caSJoonas Lahtinen 	p = &obj->vma_tree.rb_node;
145b42fe9caSJoonas Lahtinen 	while (*p) {
146b42fe9caSJoonas Lahtinen 		struct i915_vma *pos;
147b42fe9caSJoonas Lahtinen 
148b42fe9caSJoonas Lahtinen 		rb = *p;
149b42fe9caSJoonas Lahtinen 		pos = rb_entry(rb, struct i915_vma, obj_node);
150b42fe9caSJoonas Lahtinen 		if (i915_vma_compare(pos, vm, view) < 0)
151b42fe9caSJoonas Lahtinen 			p = &rb->rb_right;
152b42fe9caSJoonas Lahtinen 		else
153b42fe9caSJoonas Lahtinen 			p = &rb->rb_left;
154b42fe9caSJoonas Lahtinen 	}
155b42fe9caSJoonas Lahtinen 	rb_link_node(&vma->obj_node, rb, p);
156b42fe9caSJoonas Lahtinen 	rb_insert_color(&vma->obj_node, &obj->vma_tree);
1571fcdaa7eSChris Wilson 	list_add(&vma->vm_link, &vm->unbound_list);
158b42fe9caSJoonas Lahtinen 
159b42fe9caSJoonas Lahtinen 	return vma;
1601fcdaa7eSChris Wilson 
1611fcdaa7eSChris Wilson err_vma:
1621fcdaa7eSChris Wilson 	kmem_cache_free(vm->i915->vmas, vma);
1631fcdaa7eSChris Wilson 	return ERR_PTR(-E2BIG);
164b42fe9caSJoonas Lahtinen }
165b42fe9caSJoonas Lahtinen 
166481a6f7dSChris Wilson static struct i915_vma *
167481a6f7dSChris Wilson vma_lookup(struct drm_i915_gem_object *obj,
168718659a6SChris Wilson 	   struct i915_address_space *vm,
169718659a6SChris Wilson 	   const struct i915_ggtt_view *view)
170718659a6SChris Wilson {
171718659a6SChris Wilson 	struct rb_node *rb;
172718659a6SChris Wilson 
173718659a6SChris Wilson 	rb = obj->vma_tree.rb_node;
174718659a6SChris Wilson 	while (rb) {
175718659a6SChris Wilson 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
176718659a6SChris Wilson 		long cmp;
177718659a6SChris Wilson 
178718659a6SChris Wilson 		cmp = i915_vma_compare(vma, vm, view);
179718659a6SChris Wilson 		if (cmp == 0)
180718659a6SChris Wilson 			return vma;
181718659a6SChris Wilson 
182718659a6SChris Wilson 		if (cmp < 0)
183718659a6SChris Wilson 			rb = rb->rb_right;
184718659a6SChris Wilson 		else
185718659a6SChris Wilson 			rb = rb->rb_left;
186718659a6SChris Wilson 	}
187718659a6SChris Wilson 
188718659a6SChris Wilson 	return NULL;
189718659a6SChris Wilson }
190718659a6SChris Wilson 
191718659a6SChris Wilson /**
192718659a6SChris Wilson  * i915_vma_instance - return the singleton instance of the VMA
193718659a6SChris Wilson  * @obj: parent &struct drm_i915_gem_object to be mapped
194718659a6SChris Wilson  * @vm: address space in which the mapping is located
195718659a6SChris Wilson  * @view: additional mapping requirements
196718659a6SChris Wilson  *
197718659a6SChris Wilson  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
198718659a6SChris Wilson  * the same @view characteristics. If a match is not found, one is created.
199718659a6SChris Wilson  * Once created, the VMA is kept until either the object is freed, or the
200718659a6SChris Wilson  * address space is closed.
201718659a6SChris Wilson  *
202718659a6SChris Wilson  * Must be called with struct_mutex held.
203718659a6SChris Wilson  *
204718659a6SChris Wilson  * Returns the vma, or an error pointer.
205718659a6SChris Wilson  */
206718659a6SChris Wilson struct i915_vma *
207718659a6SChris Wilson i915_vma_instance(struct drm_i915_gem_object *obj,
208718659a6SChris Wilson 		  struct i915_address_space *vm,
209718659a6SChris Wilson 		  const struct i915_ggtt_view *view)
210718659a6SChris Wilson {
211718659a6SChris Wilson 	struct i915_vma *vma;
212718659a6SChris Wilson 
213718659a6SChris Wilson 	lockdep_assert_held(&obj->base.dev->struct_mutex);
214718659a6SChris Wilson 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
215718659a6SChris Wilson 	GEM_BUG_ON(vm->closed);
216718659a6SChris Wilson 
217481a6f7dSChris Wilson 	vma = vma_lookup(obj, vm, view);
218718659a6SChris Wilson 	if (!vma)
219a01cb37aSChris Wilson 		vma = vma_create(obj, vm, view);
220718659a6SChris Wilson 
221718659a6SChris Wilson 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
2224ea9527cSChris Wilson 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
223481a6f7dSChris Wilson 	GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
224718659a6SChris Wilson 	return vma;
225718659a6SChris Wilson }
226718659a6SChris Wilson 
227718659a6SChris Wilson /**
228b42fe9caSJoonas Lahtinen  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
229b42fe9caSJoonas Lahtinen  * @vma: VMA to map
230b42fe9caSJoonas Lahtinen  * @cache_level: mapping cache level
231b42fe9caSJoonas Lahtinen  * @flags: flags like global or local mapping
232b42fe9caSJoonas Lahtinen  *
233b42fe9caSJoonas Lahtinen  * DMA addresses are taken from the scatter-gather table of this object (or of
234b42fe9caSJoonas Lahtinen  * this VMA in case of non-default GGTT views) and PTE entries set up.
235b42fe9caSJoonas Lahtinen  * Note that DMA addresses are also the only part of the SG table we care about.
236b42fe9caSJoonas Lahtinen  */
237b42fe9caSJoonas Lahtinen int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
238b42fe9caSJoonas Lahtinen 		  u32 flags)
239b42fe9caSJoonas Lahtinen {
240b42fe9caSJoonas Lahtinen 	u32 bind_flags;
241b42fe9caSJoonas Lahtinen 	u32 vma_flags;
242b42fe9caSJoonas Lahtinen 	int ret;
243b42fe9caSJoonas Lahtinen 
244b42fe9caSJoonas Lahtinen 	if (WARN_ON(flags == 0))
245b42fe9caSJoonas Lahtinen 		return -EINVAL;
246b42fe9caSJoonas Lahtinen 
247b42fe9caSJoonas Lahtinen 	bind_flags = 0;
248b42fe9caSJoonas Lahtinen 	if (flags & PIN_GLOBAL)
249b42fe9caSJoonas Lahtinen 		bind_flags |= I915_VMA_GLOBAL_BIND;
250b42fe9caSJoonas Lahtinen 	if (flags & PIN_USER)
251b42fe9caSJoonas Lahtinen 		bind_flags |= I915_VMA_LOCAL_BIND;
252b42fe9caSJoonas Lahtinen 
253b42fe9caSJoonas Lahtinen 	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
254b42fe9caSJoonas Lahtinen 	if (flags & PIN_UPDATE)
255b42fe9caSJoonas Lahtinen 		bind_flags |= vma_flags;
256b42fe9caSJoonas Lahtinen 	else
257b42fe9caSJoonas Lahtinen 		bind_flags &= ~vma_flags;
258b42fe9caSJoonas Lahtinen 	if (bind_flags == 0)
259b42fe9caSJoonas Lahtinen 		return 0;
260b42fe9caSJoonas Lahtinen 
261966d5bf5SMatthew Auld 	if (GEM_WARN_ON(range_overflows(vma->node.start,
262966d5bf5SMatthew Auld 					vma->node.size,
263966d5bf5SMatthew Auld 					vma->vm->total)))
2647a0499a4SMatthew Auld 		return -ENODEV;
2657a0499a4SMatthew Auld 
266b42fe9caSJoonas Lahtinen 	if (vma_flags == 0 && vma->vm->allocate_va_range) {
267b42fe9caSJoonas Lahtinen 		trace_i915_va_alloc(vma);
268b42fe9caSJoonas Lahtinen 		ret = vma->vm->allocate_va_range(vma->vm,
269b42fe9caSJoonas Lahtinen 						 vma->node.start,
270b42fe9caSJoonas Lahtinen 						 vma->node.size);
271b42fe9caSJoonas Lahtinen 		if (ret)
272b42fe9caSJoonas Lahtinen 			return ret;
273b42fe9caSJoonas Lahtinen 	}
274b42fe9caSJoonas Lahtinen 
2756146e6daSDaniele Ceraolo Spurio 	trace_i915_vma_bind(vma, bind_flags);
276b42fe9caSJoonas Lahtinen 	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
277b42fe9caSJoonas Lahtinen 	if (ret)
278b42fe9caSJoonas Lahtinen 		return ret;
279b42fe9caSJoonas Lahtinen 
280b42fe9caSJoonas Lahtinen 	vma->flags |= bind_flags;
281b42fe9caSJoonas Lahtinen 	return 0;
282b42fe9caSJoonas Lahtinen }
283b42fe9caSJoonas Lahtinen 
284b42fe9caSJoonas Lahtinen void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
285b42fe9caSJoonas Lahtinen {
286b42fe9caSJoonas Lahtinen 	void __iomem *ptr;
287b42fe9caSJoonas Lahtinen 
288b42fe9caSJoonas Lahtinen 	/* Access through the GTT requires the device to be awake. */
28949d73912SChris Wilson 	assert_rpm_wakelock_held(vma->vm->i915);
290b42fe9caSJoonas Lahtinen 
29149d73912SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
292b42fe9caSJoonas Lahtinen 	if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
293b42fe9caSJoonas Lahtinen 		return IO_ERR_PTR(-ENODEV);
294b42fe9caSJoonas Lahtinen 
295b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
296b42fe9caSJoonas Lahtinen 	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
297b42fe9caSJoonas Lahtinen 
298b42fe9caSJoonas Lahtinen 	ptr = vma->iomap;
299b42fe9caSJoonas Lahtinen 	if (ptr == NULL) {
300b42fe9caSJoonas Lahtinen 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
301b42fe9caSJoonas Lahtinen 					vma->node.start,
302b42fe9caSJoonas Lahtinen 					vma->node.size);
303b42fe9caSJoonas Lahtinen 		if (ptr == NULL)
304b42fe9caSJoonas Lahtinen 			return IO_ERR_PTR(-ENOMEM);
305b42fe9caSJoonas Lahtinen 
306b42fe9caSJoonas Lahtinen 		vma->iomap = ptr;
307b42fe9caSJoonas Lahtinen 	}
308b42fe9caSJoonas Lahtinen 
309b42fe9caSJoonas Lahtinen 	__i915_vma_pin(vma);
310b42fe9caSJoonas Lahtinen 	return ptr;
311b42fe9caSJoonas Lahtinen }
312b42fe9caSJoonas Lahtinen 
313b42fe9caSJoonas Lahtinen void i915_vma_unpin_and_release(struct i915_vma **p_vma)
314b42fe9caSJoonas Lahtinen {
315b42fe9caSJoonas Lahtinen 	struct i915_vma *vma;
316b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj;
317b42fe9caSJoonas Lahtinen 
318b42fe9caSJoonas Lahtinen 	vma = fetch_and_zero(p_vma);
319b42fe9caSJoonas Lahtinen 	if (!vma)
320b42fe9caSJoonas Lahtinen 		return;
321b42fe9caSJoonas Lahtinen 
322b42fe9caSJoonas Lahtinen 	obj = vma->obj;
323b42fe9caSJoonas Lahtinen 
324b42fe9caSJoonas Lahtinen 	i915_vma_unpin(vma);
325b42fe9caSJoonas Lahtinen 	i915_vma_close(vma);
326b42fe9caSJoonas Lahtinen 
327b42fe9caSJoonas Lahtinen 	__i915_gem_object_release_unless_active(obj);
328b42fe9caSJoonas Lahtinen }
329b42fe9caSJoonas Lahtinen 
330782a3e9eSChris Wilson bool i915_vma_misplaced(const struct i915_vma *vma,
331782a3e9eSChris Wilson 			u64 size, u64 alignment, u64 flags)
332b42fe9caSJoonas Lahtinen {
333b42fe9caSJoonas Lahtinen 	if (!drm_mm_node_allocated(&vma->node))
334b42fe9caSJoonas Lahtinen 		return false;
335b42fe9caSJoonas Lahtinen 
336b42fe9caSJoonas Lahtinen 	if (vma->node.size < size)
337b42fe9caSJoonas Lahtinen 		return true;
338b42fe9caSJoonas Lahtinen 
339f51455d4SChris Wilson 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
340f51455d4SChris Wilson 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
341b42fe9caSJoonas Lahtinen 		return true;
342b42fe9caSJoonas Lahtinen 
343b42fe9caSJoonas Lahtinen 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
344b42fe9caSJoonas Lahtinen 		return true;
345b42fe9caSJoonas Lahtinen 
346b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_BIAS &&
347b42fe9caSJoonas Lahtinen 	    vma->node.start < (flags & PIN_OFFSET_MASK))
348b42fe9caSJoonas Lahtinen 		return true;
349b42fe9caSJoonas Lahtinen 
350b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_FIXED &&
351b42fe9caSJoonas Lahtinen 	    vma->node.start != (flags & PIN_OFFSET_MASK))
352b42fe9caSJoonas Lahtinen 		return true;
353b42fe9caSJoonas Lahtinen 
354b42fe9caSJoonas Lahtinen 	return false;
355b42fe9caSJoonas Lahtinen }
356b42fe9caSJoonas Lahtinen 
357b42fe9caSJoonas Lahtinen void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
358b42fe9caSJoonas Lahtinen {
359b42fe9caSJoonas Lahtinen 	bool mappable, fenceable;
360b42fe9caSJoonas Lahtinen 
361944397f0SChris Wilson 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
362944397f0SChris Wilson 	GEM_BUG_ON(!vma->fence_size);
363b42fe9caSJoonas Lahtinen 
364b42fe9caSJoonas Lahtinen 	/*
365b42fe9caSJoonas Lahtinen 	 * Explicitly disable for rotated VMA since the display does not
366b42fe9caSJoonas Lahtinen 	 * need the fence and the VMA is not accessible to other users.
367b42fe9caSJoonas Lahtinen 	 */
368944397f0SChris Wilson 	if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
369944397f0SChris Wilson 		return;
370944397f0SChris Wilson 
371944397f0SChris Wilson 	fenceable = (vma->node.size >= vma->fence_size &&
372f51455d4SChris Wilson 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
373944397f0SChris Wilson 
374944397f0SChris Wilson 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
375944397f0SChris Wilson 
376944397f0SChris Wilson 	if (mappable && fenceable)
377b42fe9caSJoonas Lahtinen 		vma->flags |= I915_VMA_CAN_FENCE;
378b42fe9caSJoonas Lahtinen 	else
379b42fe9caSJoonas Lahtinen 		vma->flags &= ~I915_VMA_CAN_FENCE;
380b42fe9caSJoonas Lahtinen }
381b42fe9caSJoonas Lahtinen 
3827d1d9aeaSChris Wilson static bool color_differs(struct drm_mm_node *node, unsigned long color)
383b42fe9caSJoonas Lahtinen {
3847d1d9aeaSChris Wilson 	return node->allocated && node->color != color;
3857d1d9aeaSChris Wilson }
3867d1d9aeaSChris Wilson 
3877d1d9aeaSChris Wilson bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
3887d1d9aeaSChris Wilson {
3897d1d9aeaSChris Wilson 	struct drm_mm_node *node = &vma->node;
390b42fe9caSJoonas Lahtinen 	struct drm_mm_node *other;
391b42fe9caSJoonas Lahtinen 
392b42fe9caSJoonas Lahtinen 	/*
393b42fe9caSJoonas Lahtinen 	 * On some machines we have to be careful when putting differing types
394b42fe9caSJoonas Lahtinen 	 * of snoopable memory together to avoid the prefetcher crossing memory
395b42fe9caSJoonas Lahtinen 	 * domains and dying. During vm initialisation, we decide whether or not
396b42fe9caSJoonas Lahtinen 	 * these constraints apply and set the drm_mm.color_adjust
397b42fe9caSJoonas Lahtinen 	 * appropriately.
398b42fe9caSJoonas Lahtinen 	 */
399b42fe9caSJoonas Lahtinen 	if (vma->vm->mm.color_adjust == NULL)
400b42fe9caSJoonas Lahtinen 		return true;
401b42fe9caSJoonas Lahtinen 
4027d1d9aeaSChris Wilson 	/* Only valid to be called on an already inserted vma */
4037d1d9aeaSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(node));
4047d1d9aeaSChris Wilson 	GEM_BUG_ON(list_empty(&node->node_list));
405b42fe9caSJoonas Lahtinen 
4067d1d9aeaSChris Wilson 	other = list_prev_entry(node, node_list);
407ef426c10SDaniel Vetter 	if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
408b42fe9caSJoonas Lahtinen 		return false;
409b42fe9caSJoonas Lahtinen 
4107d1d9aeaSChris Wilson 	other = list_next_entry(node, node_list);
411ef426c10SDaniel Vetter 	if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
412b42fe9caSJoonas Lahtinen 		return false;
413b42fe9caSJoonas Lahtinen 
414b42fe9caSJoonas Lahtinen 	return true;
415b42fe9caSJoonas Lahtinen }
416b42fe9caSJoonas Lahtinen 
417b42fe9caSJoonas Lahtinen /**
418b42fe9caSJoonas Lahtinen  * i915_vma_insert - finds a slot for the vma in its address space
419b42fe9caSJoonas Lahtinen  * @vma: the vma
420b42fe9caSJoonas Lahtinen  * @size: requested size in bytes (can be larger than the VMA)
421b42fe9caSJoonas Lahtinen  * @alignment: required alignment
422b42fe9caSJoonas Lahtinen  * @flags: mask of PIN_* flags to use
423b42fe9caSJoonas Lahtinen  *
424b42fe9caSJoonas Lahtinen  * First we try to allocate some free space that meets the requirements for
425b42fe9caSJoonas Lahtinen  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
426b42fe9caSJoonas Lahtinen  * preferrably the oldest idle entry to make room for the new VMA.
427b42fe9caSJoonas Lahtinen  *
428b42fe9caSJoonas Lahtinen  * Returns:
429b42fe9caSJoonas Lahtinen  * 0 on success, negative error code otherwise.
430b42fe9caSJoonas Lahtinen  */
431b42fe9caSJoonas Lahtinen static int
432b42fe9caSJoonas Lahtinen i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
433b42fe9caSJoonas Lahtinen {
43449d73912SChris Wilson 	struct drm_i915_private *dev_priv = vma->vm->i915;
435b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj = vma->obj;
436b42fe9caSJoonas Lahtinen 	u64 start, end;
437b42fe9caSJoonas Lahtinen 	int ret;
438b42fe9caSJoonas Lahtinen 
439b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
440b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
441b42fe9caSJoonas Lahtinen 
442b42fe9caSJoonas Lahtinen 	size = max(size, vma->size);
443944397f0SChris Wilson 	alignment = max(alignment, vma->display_alignment);
444944397f0SChris Wilson 	if (flags & PIN_MAPPABLE) {
445944397f0SChris Wilson 		size = max_t(typeof(size), size, vma->fence_size);
446944397f0SChris Wilson 		alignment = max_t(typeof(alignment),
447944397f0SChris Wilson 				  alignment, vma->fence_alignment);
448944397f0SChris Wilson 	}
449b42fe9caSJoonas Lahtinen 
450f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
451f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
452f51455d4SChris Wilson 	GEM_BUG_ON(!is_power_of_2(alignment));
453f51455d4SChris Wilson 
454b42fe9caSJoonas Lahtinen 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
455f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
456b42fe9caSJoonas Lahtinen 
457b42fe9caSJoonas Lahtinen 	end = vma->vm->total;
458b42fe9caSJoonas Lahtinen 	if (flags & PIN_MAPPABLE)
459b42fe9caSJoonas Lahtinen 		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
460b42fe9caSJoonas Lahtinen 	if (flags & PIN_ZONE_4G)
461f51455d4SChris Wilson 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
462f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
463b42fe9caSJoonas Lahtinen 
464b42fe9caSJoonas Lahtinen 	/* If binding the object/GGTT view requires more space than the entire
465b42fe9caSJoonas Lahtinen 	 * aperture has, reject it early before evicting everything in a vain
466b42fe9caSJoonas Lahtinen 	 * attempt to find space.
467b42fe9caSJoonas Lahtinen 	 */
468b42fe9caSJoonas Lahtinen 	if (size > end) {
469b42fe9caSJoonas Lahtinen 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
470b42fe9caSJoonas Lahtinen 			  size, obj->base.size,
471b42fe9caSJoonas Lahtinen 			  flags & PIN_MAPPABLE ? "mappable" : "total",
472b42fe9caSJoonas Lahtinen 			  end);
473b42fe9caSJoonas Lahtinen 		return -E2BIG;
474b42fe9caSJoonas Lahtinen 	}
475b42fe9caSJoonas Lahtinen 
476b42fe9caSJoonas Lahtinen 	ret = i915_gem_object_pin_pages(obj);
477b42fe9caSJoonas Lahtinen 	if (ret)
478b42fe9caSJoonas Lahtinen 		return ret;
479b42fe9caSJoonas Lahtinen 
480b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_FIXED) {
481b42fe9caSJoonas Lahtinen 		u64 offset = flags & PIN_OFFSET_MASK;
482f51455d4SChris Wilson 		if (!IS_ALIGNED(offset, alignment) ||
483e8f9ae9bSChris Wilson 		    range_overflows(offset, size, end)) {
484b42fe9caSJoonas Lahtinen 			ret = -EINVAL;
485b42fe9caSJoonas Lahtinen 			goto err_unpin;
486b42fe9caSJoonas Lahtinen 		}
487b42fe9caSJoonas Lahtinen 
488625d988aSChris Wilson 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
489625d988aSChris Wilson 					   size, offset, obj->cache_level,
490625d988aSChris Wilson 					   flags);
491b42fe9caSJoonas Lahtinen 		if (ret)
492b42fe9caSJoonas Lahtinen 			goto err_unpin;
493b42fe9caSJoonas Lahtinen 	} else {
494e007b19dSChris Wilson 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
495e007b19dSChris Wilson 					  size, alignment, obj->cache_level,
496e007b19dSChris Wilson 					  start, end, flags);
497e007b19dSChris Wilson 		if (ret)
498b42fe9caSJoonas Lahtinen 			goto err_unpin;
499b42fe9caSJoonas Lahtinen 
500b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(vma->node.start < start);
501b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
502b42fe9caSJoonas Lahtinen 	}
50344a0ec0dSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
504b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
505b42fe9caSJoonas Lahtinen 
506b42fe9caSJoonas Lahtinen 	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
507b42fe9caSJoonas Lahtinen 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
508b42fe9caSJoonas Lahtinen 	obj->bind_count++;
509b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
510b42fe9caSJoonas Lahtinen 
511b42fe9caSJoonas Lahtinen 	return 0;
512b42fe9caSJoonas Lahtinen 
513b42fe9caSJoonas Lahtinen err_unpin:
514b42fe9caSJoonas Lahtinen 	i915_gem_object_unpin_pages(obj);
515b42fe9caSJoonas Lahtinen 	return ret;
516b42fe9caSJoonas Lahtinen }
517b42fe9caSJoonas Lahtinen 
518b42fe9caSJoonas Lahtinen int __i915_vma_do_pin(struct i915_vma *vma,
519b42fe9caSJoonas Lahtinen 		      u64 size, u64 alignment, u64 flags)
520b42fe9caSJoonas Lahtinen {
521b42fe9caSJoonas Lahtinen 	unsigned int bound = vma->flags;
522b42fe9caSJoonas Lahtinen 	int ret;
523b42fe9caSJoonas Lahtinen 
52449d73912SChris Wilson 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
525b42fe9caSJoonas Lahtinen 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
526b42fe9caSJoonas Lahtinen 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
527b42fe9caSJoonas Lahtinen 
528b42fe9caSJoonas Lahtinen 	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
529b42fe9caSJoonas Lahtinen 		ret = -EBUSY;
530b42fe9caSJoonas Lahtinen 		goto err;
531b42fe9caSJoonas Lahtinen 	}
532b42fe9caSJoonas Lahtinen 
533b42fe9caSJoonas Lahtinen 	if ((bound & I915_VMA_BIND_MASK) == 0) {
534b42fe9caSJoonas Lahtinen 		ret = i915_vma_insert(vma, size, alignment, flags);
535b42fe9caSJoonas Lahtinen 		if (ret)
536b42fe9caSJoonas Lahtinen 			goto err;
537b42fe9caSJoonas Lahtinen 	}
538b42fe9caSJoonas Lahtinen 
539b42fe9caSJoonas Lahtinen 	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
540b42fe9caSJoonas Lahtinen 	if (ret)
541b42fe9caSJoonas Lahtinen 		goto err;
542b42fe9caSJoonas Lahtinen 
543b42fe9caSJoonas Lahtinen 	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
544b42fe9caSJoonas Lahtinen 		__i915_vma_set_map_and_fenceable(vma);
545b42fe9caSJoonas Lahtinen 
5460325701aSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
547b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
548b42fe9caSJoonas Lahtinen 	return 0;
549b42fe9caSJoonas Lahtinen 
550b42fe9caSJoonas Lahtinen err:
551b42fe9caSJoonas Lahtinen 	__i915_vma_unpin(vma);
552b42fe9caSJoonas Lahtinen 	return ret;
553b42fe9caSJoonas Lahtinen }
554b42fe9caSJoonas Lahtinen 
555b42fe9caSJoonas Lahtinen void i915_vma_destroy(struct i915_vma *vma)
556b42fe9caSJoonas Lahtinen {
557b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(vma->node.allocated);
558b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_is_active(vma));
559b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_vma_is_closed(vma));
560b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(vma->fence);
561b42fe9caSJoonas Lahtinen 
562b42fe9caSJoonas Lahtinen 	list_del(&vma->vm_link);
563b42fe9caSJoonas Lahtinen 	if (!i915_vma_is_ggtt(vma))
564b42fe9caSJoonas Lahtinen 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
565b42fe9caSJoonas Lahtinen 
566b42fe9caSJoonas Lahtinen 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
567b42fe9caSJoonas Lahtinen }
568b42fe9caSJoonas Lahtinen 
569b42fe9caSJoonas Lahtinen void i915_vma_close(struct i915_vma *vma)
570b42fe9caSJoonas Lahtinen {
571b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_is_closed(vma));
572b42fe9caSJoonas Lahtinen 	vma->flags |= I915_VMA_CLOSED;
573b42fe9caSJoonas Lahtinen 
574b42fe9caSJoonas Lahtinen 	list_del(&vma->obj_link);
575b42fe9caSJoonas Lahtinen 	rb_erase(&vma->obj_node, &vma->obj->vma_tree);
576b42fe9caSJoonas Lahtinen 
577b42fe9caSJoonas Lahtinen 	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
578b42fe9caSJoonas Lahtinen 		WARN_ON(i915_vma_unbind(vma));
579b42fe9caSJoonas Lahtinen }
580b42fe9caSJoonas Lahtinen 
581b42fe9caSJoonas Lahtinen static void __i915_vma_iounmap(struct i915_vma *vma)
582b42fe9caSJoonas Lahtinen {
583b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_is_pinned(vma));
584b42fe9caSJoonas Lahtinen 
585b42fe9caSJoonas Lahtinen 	if (vma->iomap == NULL)
586b42fe9caSJoonas Lahtinen 		return;
587b42fe9caSJoonas Lahtinen 
588b42fe9caSJoonas Lahtinen 	io_mapping_unmap(vma->iomap);
589b42fe9caSJoonas Lahtinen 	vma->iomap = NULL;
590b42fe9caSJoonas Lahtinen }
591b42fe9caSJoonas Lahtinen 
592b42fe9caSJoonas Lahtinen int i915_vma_unbind(struct i915_vma *vma)
593b42fe9caSJoonas Lahtinen {
594b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj = vma->obj;
595b42fe9caSJoonas Lahtinen 	unsigned long active;
596b42fe9caSJoonas Lahtinen 	int ret;
597b42fe9caSJoonas Lahtinen 
598b42fe9caSJoonas Lahtinen 	lockdep_assert_held(&obj->base.dev->struct_mutex);
599b42fe9caSJoonas Lahtinen 
600b42fe9caSJoonas Lahtinen 	/* First wait upon any activity as retiring the request may
601b42fe9caSJoonas Lahtinen 	 * have side-effects such as unpinning or even unbinding this vma.
602b42fe9caSJoonas Lahtinen 	 */
603b42fe9caSJoonas Lahtinen 	active = i915_vma_get_active(vma);
604b42fe9caSJoonas Lahtinen 	if (active) {
605b42fe9caSJoonas Lahtinen 		int idx;
606b42fe9caSJoonas Lahtinen 
607b42fe9caSJoonas Lahtinen 		/* When a closed VMA is retired, it is unbound - eek.
608b42fe9caSJoonas Lahtinen 		 * In order to prevent it from being recursively closed,
609b42fe9caSJoonas Lahtinen 		 * take a pin on the vma so that the second unbind is
610b42fe9caSJoonas Lahtinen 		 * aborted.
611b42fe9caSJoonas Lahtinen 		 *
612b42fe9caSJoonas Lahtinen 		 * Even more scary is that the retire callback may free
613b42fe9caSJoonas Lahtinen 		 * the object (last active vma). To prevent the explosion
614b42fe9caSJoonas Lahtinen 		 * we defer the actual object free to a worker that can
615b42fe9caSJoonas Lahtinen 		 * only proceed once it acquires the struct_mutex (which
616b42fe9caSJoonas Lahtinen 		 * we currently hold, therefore it cannot free this object
617b42fe9caSJoonas Lahtinen 		 * before we are finished).
618b42fe9caSJoonas Lahtinen 		 */
619b42fe9caSJoonas Lahtinen 		__i915_vma_pin(vma);
620b42fe9caSJoonas Lahtinen 
621b42fe9caSJoonas Lahtinen 		for_each_active(active, idx) {
622b42fe9caSJoonas Lahtinen 			ret = i915_gem_active_retire(&vma->last_read[idx],
62349d73912SChris Wilson 						     &vma->vm->i915->drm.struct_mutex);
624b42fe9caSJoonas Lahtinen 			if (ret)
625b42fe9caSJoonas Lahtinen 				break;
626b42fe9caSJoonas Lahtinen 		}
627b42fe9caSJoonas Lahtinen 
628b42fe9caSJoonas Lahtinen 		__i915_vma_unpin(vma);
629b42fe9caSJoonas Lahtinen 		if (ret)
630b42fe9caSJoonas Lahtinen 			return ret;
631b42fe9caSJoonas Lahtinen 
632b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(i915_vma_is_active(vma));
633b42fe9caSJoonas Lahtinen 	}
634b42fe9caSJoonas Lahtinen 
635b42fe9caSJoonas Lahtinen 	if (i915_vma_is_pinned(vma))
636b42fe9caSJoonas Lahtinen 		return -EBUSY;
637b42fe9caSJoonas Lahtinen 
638b42fe9caSJoonas Lahtinen 	if (!drm_mm_node_allocated(&vma->node))
639b42fe9caSJoonas Lahtinen 		goto destroy;
640b42fe9caSJoonas Lahtinen 
641b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(obj->bind_count == 0);
642b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
643b42fe9caSJoonas Lahtinen 
644b42fe9caSJoonas Lahtinen 	if (i915_vma_is_map_and_fenceable(vma)) {
645b42fe9caSJoonas Lahtinen 		/* release the fence reg _after_ flushing */
646b42fe9caSJoonas Lahtinen 		ret = i915_vma_put_fence(vma);
647b42fe9caSJoonas Lahtinen 		if (ret)
648b42fe9caSJoonas Lahtinen 			return ret;
649b42fe9caSJoonas Lahtinen 
650b42fe9caSJoonas Lahtinen 		/* Force a pagefault for domain tracking on next user access */
651b42fe9caSJoonas Lahtinen 		i915_gem_release_mmap(obj);
652b42fe9caSJoonas Lahtinen 
653b42fe9caSJoonas Lahtinen 		__i915_vma_iounmap(vma);
654b42fe9caSJoonas Lahtinen 		vma->flags &= ~I915_VMA_CAN_FENCE;
655b42fe9caSJoonas Lahtinen 	}
656b42fe9caSJoonas Lahtinen 
657b42fe9caSJoonas Lahtinen 	if (likely(!vma->vm->closed)) {
658b42fe9caSJoonas Lahtinen 		trace_i915_vma_unbind(vma);
659b42fe9caSJoonas Lahtinen 		vma->vm->unbind_vma(vma);
660b42fe9caSJoonas Lahtinen 	}
661b42fe9caSJoonas Lahtinen 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
662b42fe9caSJoonas Lahtinen 
663b42fe9caSJoonas Lahtinen 	drm_mm_remove_node(&vma->node);
664b42fe9caSJoonas Lahtinen 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
665b42fe9caSJoonas Lahtinen 
666b42fe9caSJoonas Lahtinen 	if (vma->pages != obj->mm.pages) {
667b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(!vma->pages);
668b42fe9caSJoonas Lahtinen 		sg_free_table(vma->pages);
669b42fe9caSJoonas Lahtinen 		kfree(vma->pages);
670b42fe9caSJoonas Lahtinen 	}
671b42fe9caSJoonas Lahtinen 	vma->pages = NULL;
672b42fe9caSJoonas Lahtinen 
673b42fe9caSJoonas Lahtinen 	/* Since the unbound list is global, only move to that list if
674b42fe9caSJoonas Lahtinen 	 * no more VMAs exist. */
675b42fe9caSJoonas Lahtinen 	if (--obj->bind_count == 0)
676b42fe9caSJoonas Lahtinen 		list_move_tail(&obj->global_link,
677b42fe9caSJoonas Lahtinen 			       &to_i915(obj->base.dev)->mm.unbound_list);
678b42fe9caSJoonas Lahtinen 
679b42fe9caSJoonas Lahtinen 	/* And finally now the object is completely decoupled from this vma,
680b42fe9caSJoonas Lahtinen 	 * we can drop its hold on the backing storage and allow it to be
681b42fe9caSJoonas Lahtinen 	 * reaped by the shrinker.
682b42fe9caSJoonas Lahtinen 	 */
683b42fe9caSJoonas Lahtinen 	i915_gem_object_unpin_pages(obj);
6847a5580a2SChris Wilson 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
685b42fe9caSJoonas Lahtinen 
686b42fe9caSJoonas Lahtinen destroy:
687b42fe9caSJoonas Lahtinen 	if (unlikely(i915_vma_is_closed(vma)))
688b42fe9caSJoonas Lahtinen 		i915_vma_destroy(vma);
689b42fe9caSJoonas Lahtinen 
690b42fe9caSJoonas Lahtinen 	return 0;
691b42fe9caSJoonas Lahtinen }
692b42fe9caSJoonas Lahtinen 
693e3c7a1c5SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
694e3c7a1c5SChris Wilson #include "selftests/i915_vma.c"
695e3c7a1c5SChris Wilson #endif
696