xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma.c (revision 5e3eb862)
1b42fe9caSJoonas Lahtinen /*
2b42fe9caSJoonas Lahtinen  * Copyright © 2016 Intel Corporation
3b42fe9caSJoonas Lahtinen  *
4b42fe9caSJoonas Lahtinen  * Permission is hereby granted, free of charge, to any person obtaining a
5b42fe9caSJoonas Lahtinen  * copy of this software and associated documentation files (the "Software"),
6b42fe9caSJoonas Lahtinen  * to deal in the Software without restriction, including without limitation
7b42fe9caSJoonas Lahtinen  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b42fe9caSJoonas Lahtinen  * and/or sell copies of the Software, and to permit persons to whom the
9b42fe9caSJoonas Lahtinen  * Software is furnished to do so, subject to the following conditions:
10b42fe9caSJoonas Lahtinen  *
11b42fe9caSJoonas Lahtinen  * The above copyright notice and this permission notice (including the next
12b42fe9caSJoonas Lahtinen  * paragraph) shall be included in all copies or substantial portions of the
13b42fe9caSJoonas Lahtinen  * Software.
14b42fe9caSJoonas Lahtinen  *
15b42fe9caSJoonas Lahtinen  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b42fe9caSJoonas Lahtinen  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b42fe9caSJoonas Lahtinen  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b42fe9caSJoonas Lahtinen  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b42fe9caSJoonas Lahtinen  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b42fe9caSJoonas Lahtinen  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b42fe9caSJoonas Lahtinen  * IN THE SOFTWARE.
22b42fe9caSJoonas Lahtinen  *
23b42fe9caSJoonas Lahtinen  */
24b42fe9caSJoonas Lahtinen 
2509480072SChris Wilson #include <linux/sched/mm.h>
26420a07b8SNirmoy Das #include <linux/dma-fence-array.h>
27df0566a6SJani Nikula #include <drm/drm_gem.h>
28112ed2d3SChris Wilson 
29acc855d3SJani Nikula #include "display/intel_display.h"
30df0566a6SJani Nikula #include "display/intel_frontbuffer.h"
314bc91dbdSAnusha Srivatsa #include "gem/i915_gem_lmem.h"
32386e75a4SJani Nikula #include "gem/i915_gem_tiling.h"
33df0566a6SJani Nikula #include "gt/intel_engine.h"
34ccd20945SChris Wilson #include "gt/intel_engine_heartbeat.h"
35a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h"
365e3eb862SJanusz Krzysztofik #include "gt/intel_gt_pm.h"
37ccd20945SChris Wilson #include "gt/intel_gt_requests.h"
38568a2e6fSChris Wilson #include "gt/intel_tlb.h"
39b42fe9caSJoonas Lahtinen 
40b42fe9caSJoonas Lahtinen #include "i915_drv.h"
412ef97818SJani Nikula #include "i915_gem_evict.h"
422850748eSChris Wilson #include "i915_sw_fence_work.h"
43a09d9a80SJani Nikula #include "i915_trace.h"
44df0566a6SJani Nikula #include "i915_vma.h"
45e1a4bbb6SThomas Hellström #include "i915_vma_resource.h"
46b42fe9caSJoonas Lahtinen 
assert_vma_held_evict(const struct i915_vma * vma)47a594525cSMaarten Lankhorst static inline void assert_vma_held_evict(const struct i915_vma *vma)
48a594525cSMaarten Lankhorst {
49a594525cSMaarten Lankhorst 	/*
50a594525cSMaarten Lankhorst 	 * We may be forced to unbind when the vm is dead, to clean it up.
51a594525cSMaarten Lankhorst 	 * This is the only exception to the requirement of the object lock
52a594525cSMaarten Lankhorst 	 * being held.
53a594525cSMaarten Lankhorst 	 */
54e1a7ab4fSThomas Hellström 	if (kref_read(&vma->vm->ref))
55a594525cSMaarten Lankhorst 		assert_object_held_shared(vma->obj);
56a594525cSMaarten Lankhorst }
57a594525cSMaarten Lankhorst 
5864fc7cc7SDaniel Vetter static struct kmem_cache *slab_vmas;
5913f1bfd3SChris Wilson 
i915_vma_alloc(void)60e6e1a304SMaarten Lankhorst static struct i915_vma *i915_vma_alloc(void)
6113f1bfd3SChris Wilson {
6264fc7cc7SDaniel Vetter 	return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
6313f1bfd3SChris Wilson }
6413f1bfd3SChris Wilson 
i915_vma_free(struct i915_vma * vma)65e6e1a304SMaarten Lankhorst static void i915_vma_free(struct i915_vma *vma)
6613f1bfd3SChris Wilson {
6764fc7cc7SDaniel Vetter 	return kmem_cache_free(slab_vmas, vma);
6813f1bfd3SChris Wilson }
6913f1bfd3SChris Wilson 
701eca65d9SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
7110195b1eSChris Wilson 
7210195b1eSChris Wilson #include <linux/stackdepot.h>
7310195b1eSChris Wilson 
vma_print_allocator(struct i915_vma * vma,const char * reason)7410195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason)
7510195b1eSChris Wilson {
7610195b1eSChris Wilson 	char buf[512];
7710195b1eSChris Wilson 
7810195b1eSChris Wilson 	if (!vma->node.stack) {
79b364f3cdSUwe Kleine-König 		drm_dbg(vma->obj->base.dev,
80a10234fdSTvrtko Ursulin 			"vma.node [%08llx + %08llx] %s: unknown owner\n",
8110195b1eSChris Wilson 			vma->node.start, vma->node.size, reason);
8210195b1eSChris Wilson 		return;
8310195b1eSChris Wilson 	}
8410195b1eSChris Wilson 
850f68d45eSImran Khan 	stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
86b364f3cdSUwe Kleine-König 	drm_dbg(vma->obj->base.dev,
87a10234fdSTvrtko Ursulin 		"vma.node [%08llx + %08llx] %s: inserted at %s\n",
8810195b1eSChris Wilson 		vma->node.start, vma->node.size, reason, buf);
8910195b1eSChris Wilson }
9010195b1eSChris Wilson 
9110195b1eSChris Wilson #else
9210195b1eSChris Wilson 
vma_print_allocator(struct i915_vma * vma,const char * reason)9310195b1eSChris Wilson static void vma_print_allocator(struct i915_vma *vma, const char *reason)
9410195b1eSChris Wilson {
9510195b1eSChris Wilson }
9610195b1eSChris Wilson 
9710195b1eSChris Wilson #endif
9810195b1eSChris Wilson 
active_to_vma(struct i915_active * ref)9912c255b5SChris Wilson static inline struct i915_vma *active_to_vma(struct i915_active *ref)
10012c255b5SChris Wilson {
10112c255b5SChris Wilson 	return container_of(ref, typeof(struct i915_vma), active);
10212c255b5SChris Wilson }
10312c255b5SChris Wilson 
__i915_vma_active(struct i915_active * ref)10412c255b5SChris Wilson static int __i915_vma_active(struct i915_active *ref)
10512c255b5SChris Wilson {
1065e3eb862SJanusz Krzysztofik 	struct i915_vma *vma = active_to_vma(ref);
1075e3eb862SJanusz Krzysztofik 
1085e3eb862SJanusz Krzysztofik 	if (!i915_vma_tryget(vma))
1095e3eb862SJanusz Krzysztofik 		return -ENOENT;
1105e3eb862SJanusz Krzysztofik 
1115e3eb862SJanusz Krzysztofik 	/*
1125e3eb862SJanusz Krzysztofik 	 * Exclude global GTT VMA from holding a GT wakeref
1135e3eb862SJanusz Krzysztofik 	 * while active, otherwise GPU never goes idle.
1145e3eb862SJanusz Krzysztofik 	 */
1155e3eb862SJanusz Krzysztofik 	if (!i915_vma_is_ggtt(vma))
1165e3eb862SJanusz Krzysztofik 		intel_gt_pm_get(vma->vm->gt);
1175e3eb862SJanusz Krzysztofik 
1185e3eb862SJanusz Krzysztofik 	return 0;
11912c255b5SChris Wilson }
12012c255b5SChris Wilson 
__i915_vma_retire(struct i915_active * ref)12164d6c500SChris Wilson static void __i915_vma_retire(struct i915_active *ref)
12264d6c500SChris Wilson {
1235e3eb862SJanusz Krzysztofik 	struct i915_vma *vma = active_to_vma(ref);
1245e3eb862SJanusz Krzysztofik 
1255e3eb862SJanusz Krzysztofik 	if (!i915_vma_is_ggtt(vma)) {
1265e3eb862SJanusz Krzysztofik 		/*
1275e3eb862SJanusz Krzysztofik 		 * Since we can be called from atomic contexts,
1285e3eb862SJanusz Krzysztofik 		 * use an async variant of intel_gt_pm_put().
1295e3eb862SJanusz Krzysztofik 		 */
1305e3eb862SJanusz Krzysztofik 		intel_gt_pm_put_async(vma->vm->gt);
1315e3eb862SJanusz Krzysztofik 	}
1325e3eb862SJanusz Krzysztofik 
1335e3eb862SJanusz Krzysztofik 	i915_vma_put(vma);
134b42fe9caSJoonas Lahtinen }
135b42fe9caSJoonas Lahtinen 
136b42fe9caSJoonas Lahtinen static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)137a01cb37aSChris Wilson vma_create(struct drm_i915_gem_object *obj,
138b42fe9caSJoonas Lahtinen 	   struct i915_address_space *vm,
1393bb6a442SNiranjana Vishwanathapura 	   const struct i915_gtt_view *view)
140b42fe9caSJoonas Lahtinen {
14103fca66bSChris Wilson 	struct i915_vma *pos = ERR_PTR(-E2BIG);
142b42fe9caSJoonas Lahtinen 	struct i915_vma *vma;
143b42fe9caSJoonas Lahtinen 	struct rb_node *rb, **p;
144e1a7ab4fSThomas Hellström 	int err;
145b42fe9caSJoonas Lahtinen 
146e1cc3db0SChris Wilson 	/* The aliasing_ppgtt should never be used directly! */
14771e51ca8SChris Wilson 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
148e1cc3db0SChris Wilson 
14913f1bfd3SChris Wilson 	vma = i915_vma_alloc();
150b42fe9caSJoonas Lahtinen 	if (vma == NULL)
151b42fe9caSJoonas Lahtinen 		return ERR_PTR(-ENOMEM);
152b42fe9caSJoonas Lahtinen 
15393f2cde2SChris Wilson 	vma->ops = &vm->vma_ops;
154b42fe9caSJoonas Lahtinen 	vma->obj = obj;
155b42fe9caSJoonas Lahtinen 	vma->size = obj->base.size;
156f51455d4SChris Wilson 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
157b42fe9caSJoonas Lahtinen 
158c3b14760SMatthew Auld 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
159155ab883SChris Wilson 
16009480072SChris Wilson 	/* Declare ourselves safe for use inside shrinkers */
16109480072SChris Wilson 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
16209480072SChris Wilson 		fs_reclaim_acquire(GFP_KERNEL);
16309480072SChris Wilson 		might_lock(&vma->active.mutex);
16409480072SChris Wilson 		fs_reclaim_release(GFP_KERNEL);
16509480072SChris Wilson 	}
16609480072SChris Wilson 
167155ab883SChris Wilson 	INIT_LIST_HEAD(&vma->closed_link);
168e1a7ab4fSThomas Hellström 	INIT_LIST_HEAD(&vma->obj_link);
169e1a7ab4fSThomas Hellström 	RB_CLEAR_NODE(&vma->obj_node);
170155ab883SChris Wilson 
1713bb6a442SNiranjana Vishwanathapura 	if (view && view->type != I915_GTT_VIEW_NORMAL) {
1723bb6a442SNiranjana Vishwanathapura 		vma->gtt_view = *view;
1733bb6a442SNiranjana Vishwanathapura 		if (view->type == I915_GTT_VIEW_PARTIAL) {
17407e19ea4SChris Wilson 			GEM_BUG_ON(range_overflows_t(u64,
1758bab1193SChris Wilson 						     view->partial.offset,
1768bab1193SChris Wilson 						     view->partial.size,
17707e19ea4SChris Wilson 						     obj->base.size >> PAGE_SHIFT));
1788bab1193SChris Wilson 			vma->size = view->partial.size;
179b42fe9caSJoonas Lahtinen 			vma->size <<= PAGE_SHIFT;
1807e7367d3SChris Wilson 			GEM_BUG_ON(vma->size > obj->base.size);
1813bb6a442SNiranjana Vishwanathapura 		} else if (view->type == I915_GTT_VIEW_ROTATED) {
1828bab1193SChris Wilson 			vma->size = intel_rotation_info_size(&view->rotated);
183b42fe9caSJoonas Lahtinen 			vma->size <<= PAGE_SHIFT;
1843bb6a442SNiranjana Vishwanathapura 		} else if (view->type == I915_GTT_VIEW_REMAPPED) {
1851a74fc0bSVille Syrjälä 			vma->size = intel_remapped_info_size(&view->remapped);
1861a74fc0bSVille Syrjälä 			vma->size <<= PAGE_SHIFT;
187b42fe9caSJoonas Lahtinen 		}
188b42fe9caSJoonas Lahtinen 	}
189b42fe9caSJoonas Lahtinen 
1901fcdaa7eSChris Wilson 	if (unlikely(vma->size > vm->total))
1911fcdaa7eSChris Wilson 		goto err_vma;
1921fcdaa7eSChris Wilson 
193b00ddb27SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
194b00ddb27SChris Wilson 
195e1a7ab4fSThomas Hellström 	err = mutex_lock_interruptible(&vm->mutex);
196e1a7ab4fSThomas Hellström 	if (err) {
197e1a7ab4fSThomas Hellström 		pos = ERR_PTR(err);
198e1a7ab4fSThomas Hellström 		goto err_vma;
199e1a7ab4fSThomas Hellström 	}
200cb593e5dSChris Wilson 
201e1a7ab4fSThomas Hellström 	vma->vm = vm;
202e1a7ab4fSThomas Hellström 	list_add_tail(&vma->vm_link, &vm->unbound_list);
203e1a7ab4fSThomas Hellström 
204e1a7ab4fSThomas Hellström 	spin_lock(&obj->vma.lock);
205b42fe9caSJoonas Lahtinen 	if (i915_is_ggtt(vm)) {
2061fcdaa7eSChris Wilson 		if (unlikely(overflows_type(vma->size, u32)))
207cb593e5dSChris Wilson 			goto err_unlock;
2081fcdaa7eSChris Wilson 
20991d4e0aaSChris Wilson 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
210944397f0SChris Wilson 						      i915_gem_object_get_tiling(obj),
211944397f0SChris Wilson 						      i915_gem_object_get_stride(obj));
2121fcdaa7eSChris Wilson 		if (unlikely(vma->fence_size < vma->size || /* overflow */
2131fcdaa7eSChris Wilson 			     vma->fence_size > vm->total))
214cb593e5dSChris Wilson 			goto err_unlock;
2151fcdaa7eSChris Wilson 
216f51455d4SChris Wilson 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
217944397f0SChris Wilson 
21891d4e0aaSChris Wilson 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
219944397f0SChris Wilson 								i915_gem_object_get_tiling(obj),
220944397f0SChris Wilson 								i915_gem_object_get_stride(obj));
221944397f0SChris Wilson 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
222944397f0SChris Wilson 
2234dd2fbbfSChris Wilson 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
224528cbd17SChris Wilson 	}
225528cbd17SChris Wilson 
226528cbd17SChris Wilson 	rb = NULL;
227528cbd17SChris Wilson 	p = &obj->vma.tree.rb_node;
228528cbd17SChris Wilson 	while (*p) {
229528cbd17SChris Wilson 		long cmp;
230528cbd17SChris Wilson 
231528cbd17SChris Wilson 		rb = *p;
232528cbd17SChris Wilson 		pos = rb_entry(rb, struct i915_vma, obj_node);
233528cbd17SChris Wilson 
234528cbd17SChris Wilson 		/*
235528cbd17SChris Wilson 		 * If the view already exists in the tree, another thread
236528cbd17SChris Wilson 		 * already created a matching vma, so return the older instance
237528cbd17SChris Wilson 		 * and dispose of ours.
238528cbd17SChris Wilson 		 */
239528cbd17SChris Wilson 		cmp = i915_vma_compare(pos, vm, view);
240528cbd17SChris Wilson 		if (cmp < 0)
241528cbd17SChris Wilson 			p = &rb->rb_right;
24203fca66bSChris Wilson 		else if (cmp > 0)
243528cbd17SChris Wilson 			p = &rb->rb_left;
24403fca66bSChris Wilson 		else
24503fca66bSChris Wilson 			goto err_unlock;
246528cbd17SChris Wilson 	}
247528cbd17SChris Wilson 	rb_link_node(&vma->obj_node, rb, p);
248528cbd17SChris Wilson 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
249528cbd17SChris Wilson 
250528cbd17SChris Wilson 	if (i915_vma_is_ggtt(vma))
251e2189dd0SChris Wilson 		/*
252e2189dd0SChris Wilson 		 * We put the GGTT vma at the start of the vma-list, followed
253e2189dd0SChris Wilson 		 * by the ppGGTT vma. This allows us to break early when
254e2189dd0SChris Wilson 		 * iterating over only the GGTT vma for an object, see
255e2189dd0SChris Wilson 		 * for_each_ggtt_vma()
256e2189dd0SChris Wilson 		 */
257528cbd17SChris Wilson 		list_add(&vma->obj_link, &obj->vma.list);
258b42fe9caSJoonas Lahtinen 	else
259528cbd17SChris Wilson 		list_add_tail(&vma->obj_link, &obj->vma.list);
260528cbd17SChris Wilson 
261528cbd17SChris Wilson 	spin_unlock(&obj->vma.lock);
262e1a7ab4fSThomas Hellström 	mutex_unlock(&vm->mutex);
26309d7e46bSChris Wilson 
264b42fe9caSJoonas Lahtinen 	return vma;
2651fcdaa7eSChris Wilson 
266cb593e5dSChris Wilson err_unlock:
267cb593e5dSChris Wilson 	spin_unlock(&obj->vma.lock);
268e1a7ab4fSThomas Hellström 	list_del_init(&vma->vm_link);
269e1a7ab4fSThomas Hellström 	mutex_unlock(&vm->mutex);
2701fcdaa7eSChris Wilson err_vma:
27113f1bfd3SChris Wilson 	i915_vma_free(vma);
27203fca66bSChris Wilson 	return pos;
273b42fe9caSJoonas Lahtinen }
274b42fe9caSJoonas Lahtinen 
275481a6f7dSChris Wilson static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)276547be6a4SLiam Howlett i915_vma_lookup(struct drm_i915_gem_object *obj,
277718659a6SChris Wilson 	   struct i915_address_space *vm,
2783bb6a442SNiranjana Vishwanathapura 	   const struct i915_gtt_view *view)
279718659a6SChris Wilson {
280718659a6SChris Wilson 	struct rb_node *rb;
281718659a6SChris Wilson 
282528cbd17SChris Wilson 	rb = obj->vma.tree.rb_node;
283718659a6SChris Wilson 	while (rb) {
284718659a6SChris Wilson 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
285718659a6SChris Wilson 		long cmp;
286718659a6SChris Wilson 
287718659a6SChris Wilson 		cmp = i915_vma_compare(vma, vm, view);
288718659a6SChris Wilson 		if (cmp == 0)
289718659a6SChris Wilson 			return vma;
290718659a6SChris Wilson 
291718659a6SChris Wilson 		if (cmp < 0)
292718659a6SChris Wilson 			rb = rb->rb_right;
293718659a6SChris Wilson 		else
294718659a6SChris Wilson 			rb = rb->rb_left;
295718659a6SChris Wilson 	}
296718659a6SChris Wilson 
297718659a6SChris Wilson 	return NULL;
298718659a6SChris Wilson }
299718659a6SChris Wilson 
300718659a6SChris Wilson /**
301718659a6SChris Wilson  * i915_vma_instance - return the singleton instance of the VMA
302718659a6SChris Wilson  * @obj: parent &struct drm_i915_gem_object to be mapped
303718659a6SChris Wilson  * @vm: address space in which the mapping is located
304718659a6SChris Wilson  * @view: additional mapping requirements
305718659a6SChris Wilson  *
306718659a6SChris Wilson  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
307718659a6SChris Wilson  * the same @view characteristics. If a match is not found, one is created.
308718659a6SChris Wilson  * Once created, the VMA is kept until either the object is freed, or the
309718659a6SChris Wilson  * address space is closed.
310718659a6SChris Wilson  *
311718659a6SChris Wilson  * Returns the vma, or an error pointer.
312718659a6SChris Wilson  */
313718659a6SChris Wilson struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)314718659a6SChris Wilson i915_vma_instance(struct drm_i915_gem_object *obj,
315718659a6SChris Wilson 		  struct i915_address_space *vm,
3163bb6a442SNiranjana Vishwanathapura 		  const struct i915_gtt_view *view)
317718659a6SChris Wilson {
318718659a6SChris Wilson 	struct i915_vma *vma;
319718659a6SChris Wilson 
32074862d4cSImre Deak 	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
321e1a7ab4fSThomas Hellström 	GEM_BUG_ON(!kref_read(&vm->ref));
322718659a6SChris Wilson 
323528cbd17SChris Wilson 	spin_lock(&obj->vma.lock);
324547be6a4SLiam Howlett 	vma = i915_vma_lookup(obj, vm, view);
325528cbd17SChris Wilson 	spin_unlock(&obj->vma.lock);
326528cbd17SChris Wilson 
327528cbd17SChris Wilson 	/* vma_create() will resolve the race if another creates the vma */
328528cbd17SChris Wilson 	if (unlikely(!vma))
329a01cb37aSChris Wilson 		vma = vma_create(obj, vm, view);
330718659a6SChris Wilson 
3314ea9527cSChris Wilson 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
332718659a6SChris Wilson 	return vma;
333718659a6SChris Wilson }
334718659a6SChris Wilson 
3352850748eSChris Wilson struct i915_vma_work {
3362850748eSChris Wilson 	struct dma_fence_work base;
337cd0452aaSChris Wilson 	struct i915_address_space *vm;
338cd0452aaSChris Wilson 	struct i915_vm_pt_stash stash;
3392f6b90daSThomas Hellström 	struct i915_vma_resource *vma_res;
340bfe53be2SMatthew Auld 	struct drm_i915_gem_object *obj;
341e3793468SChris Wilson 	struct i915_sw_dma_fence_cb cb;
3429275277dSFei Yang 	unsigned int pat_index;
3432850748eSChris Wilson 	unsigned int flags;
3442850748eSChris Wilson };
3452850748eSChris Wilson 
__vma_bind(struct dma_fence_work * work)346dc194184SJason Ekstrand static void __vma_bind(struct dma_fence_work *work)
3472850748eSChris Wilson {
3482850748eSChris Wilson 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
3492f6b90daSThomas Hellström 	struct i915_vma_resource *vma_res = vw->vma_res;
3502850748eSChris Wilson 
351bfe53be2SMatthew Auld 	/*
352bfe53be2SMatthew Auld 	 * We are about the bind the object, which must mean we have already
353bfe53be2SMatthew Auld 	 * signaled the work to potentially clear/move the pages underneath. If
354bfe53be2SMatthew Auld 	 * something went wrong at that stage then the object should have
355bfe53be2SMatthew Auld 	 * unknown_state set, in which case we need to skip the bind.
356bfe53be2SMatthew Auld 	 */
357bfe53be2SMatthew Auld 	if (i915_gem_object_has_unknown_state(vw->obj))
358bfe53be2SMatthew Auld 		return;
359bfe53be2SMatthew Auld 
3602f6b90daSThomas Hellström 	vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
3619275277dSFei Yang 			       vma_res, vw->pat_index, vw->flags);
3622850748eSChris Wilson }
3632850748eSChris Wilson 
__vma_release(struct dma_fence_work * work)36454d7195fSChris Wilson static void __vma_release(struct dma_fence_work *work)
36554d7195fSChris Wilson {
36654d7195fSChris Wilson 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
36754d7195fSChris Wilson 
368bfe53be2SMatthew Auld 	if (vw->obj)
369bfe53be2SMatthew Auld 		i915_gem_object_put(vw->obj);
370cd0452aaSChris Wilson 
371cd0452aaSChris Wilson 	i915_vm_free_pt_stash(vw->vm, &vw->stash);
3722f6b90daSThomas Hellström 	if (vw->vma_res)
3732f6b90daSThomas Hellström 		i915_vma_resource_put(vw->vma_res);
37454d7195fSChris Wilson }
37554d7195fSChris Wilson 
3762850748eSChris Wilson static const struct dma_fence_work_ops bind_ops = {
3772850748eSChris Wilson 	.name = "bind",
3782850748eSChris Wilson 	.work = __vma_bind,
37954d7195fSChris Wilson 	.release = __vma_release,
3802850748eSChris Wilson };
3812850748eSChris Wilson 
i915_vma_work(void)3822850748eSChris Wilson struct i915_vma_work *i915_vma_work(void)
3832850748eSChris Wilson {
3842850748eSChris Wilson 	struct i915_vma_work *vw;
3852850748eSChris Wilson 
3862850748eSChris Wilson 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
3872850748eSChris Wilson 	if (!vw)
3882850748eSChris Wilson 		return NULL;
3892850748eSChris Wilson 
3902850748eSChris Wilson 	dma_fence_work_init(&vw->base, &bind_ops);
3912850748eSChris Wilson 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
3922850748eSChris Wilson 
3932850748eSChris Wilson 	return vw;
3942850748eSChris Wilson }
3952850748eSChris Wilson 
i915_vma_wait_for_bind(struct i915_vma * vma)396e3793468SChris Wilson int i915_vma_wait_for_bind(struct i915_vma *vma)
397e3793468SChris Wilson {
398e3793468SChris Wilson 	int err = 0;
399e3793468SChris Wilson 
400e3793468SChris Wilson 	if (rcu_access_pointer(vma->active.excl.fence)) {
401e3793468SChris Wilson 		struct dma_fence *fence;
402e3793468SChris Wilson 
403e3793468SChris Wilson 		rcu_read_lock();
404e3793468SChris Wilson 		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
405e3793468SChris Wilson 		rcu_read_unlock();
406e3793468SChris Wilson 		if (fence) {
407fbd4cf3bSMatthew Auld 			err = dma_fence_wait(fence, true);
408e3793468SChris Wilson 			dma_fence_put(fence);
409e3793468SChris Wilson 		}
410e3793468SChris Wilson 	}
411e3793468SChris Wilson 
412e3793468SChris Wilson 	return err;
413e3793468SChris Wilson }
414e3793468SChris Wilson 
415f6c466b8SMaarten Lankhorst #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
i915_vma_verify_bind_complete(struct i915_vma * vma)416f6c466b8SMaarten Lankhorst static int i915_vma_verify_bind_complete(struct i915_vma *vma)
417f6c466b8SMaarten Lankhorst {
418ad5c99e0SMaarten Lankhorst 	struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
419ad5c99e0SMaarten Lankhorst 	int err;
420f6c466b8SMaarten Lankhorst 
421f6c466b8SMaarten Lankhorst 	if (!fence)
422f6c466b8SMaarten Lankhorst 		return 0;
423f6c466b8SMaarten Lankhorst 
424f6c466b8SMaarten Lankhorst 	if (dma_fence_is_signaled(fence))
425f6c466b8SMaarten Lankhorst 		err = fence->error;
426f6c466b8SMaarten Lankhorst 	else
427f6c466b8SMaarten Lankhorst 		err = -EBUSY;
428f6c466b8SMaarten Lankhorst 
429f6c466b8SMaarten Lankhorst 	dma_fence_put(fence);
430f6c466b8SMaarten Lankhorst 
431f6c466b8SMaarten Lankhorst 	return err;
432f6c466b8SMaarten Lankhorst }
433f6c466b8SMaarten Lankhorst #else
434f6c466b8SMaarten Lankhorst #define i915_vma_verify_bind_complete(_vma) 0
435f6c466b8SMaarten Lankhorst #endif
436f6c466b8SMaarten Lankhorst 
43739a2bd34SThomas Hellström I915_SELFTEST_EXPORT void
i915_vma_resource_init_from_vma(struct i915_vma_resource * vma_res,struct i915_vma * vma)43839a2bd34SThomas Hellström i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
43939a2bd34SThomas Hellström 				struct i915_vma *vma)
44039a2bd34SThomas Hellström {
44139a2bd34SThomas Hellström 	struct drm_i915_gem_object *obj = vma->obj;
44239a2bd34SThomas Hellström 
4432f6b90daSThomas Hellström 	i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
44460dc43d1SThomas Hellström 			       obj->mm.rsgt, i915_gem_object_is_readonly(obj),
44560dc43d1SThomas Hellström 			       i915_gem_object_is_lmem(obj), obj->mm.region,
4468e4ee5e8SChris Wilson 			       vma->ops, vma->private, __i915_vma_offset(vma),
44761102251SChris Wilson 			       __i915_vma_size(vma), vma->size, vma->guard);
44839a2bd34SThomas Hellström }
44939a2bd34SThomas Hellström 
450718659a6SChris Wilson /**
451b42fe9caSJoonas Lahtinen  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
452b42fe9caSJoonas Lahtinen  * @vma: VMA to map
4539275277dSFei Yang  * @pat_index: PAT index to set in PTE
454b42fe9caSJoonas Lahtinen  * @flags: flags like global or local mapping
4552850748eSChris Wilson  * @work: preallocated worker for allocating and binding the PTE
456e1a4bbb6SThomas Hellström  * @vma_res: pointer to a preallocated vma resource. The resource is either
457e1a4bbb6SThomas Hellström  * consumed or freed.
458b42fe9caSJoonas Lahtinen  *
459b42fe9caSJoonas Lahtinen  * DMA addresses are taken from the scatter-gather table of this object (or of
460b42fe9caSJoonas Lahtinen  * this VMA in case of non-default GGTT views) and PTE entries set up.
461b42fe9caSJoonas Lahtinen  * Note that DMA addresses are also the only part of the SG table we care about.
462b42fe9caSJoonas Lahtinen  */
i915_vma_bind(struct i915_vma * vma,unsigned int pat_index,u32 flags,struct i915_vma_work * work,struct i915_vma_resource * vma_res)4632850748eSChris Wilson int i915_vma_bind(struct i915_vma *vma,
4649275277dSFei Yang 		  unsigned int pat_index,
4652850748eSChris Wilson 		  u32 flags,
466e1a4bbb6SThomas Hellström 		  struct i915_vma_work *work,
467e1a4bbb6SThomas Hellström 		  struct i915_vma_resource *vma_res)
468b42fe9caSJoonas Lahtinen {
469b42fe9caSJoonas Lahtinen 	u32 bind_flags;
470b42fe9caSJoonas Lahtinen 	u32 vma_flags;
4712f6b90daSThomas Hellström 	int ret;
472b42fe9caSJoonas Lahtinen 
473c2ea703dSThomas Hellström 	lockdep_assert_held(&vma->vm->mutex);
474aa149431SChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
4758e4ee5e8SChris Wilson 	GEM_BUG_ON(vma->size > i915_vma_size(vma));
476aa149431SChris Wilson 
477bbb8a9d7STvrtko Ursulin 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
478aa149431SChris Wilson 					      vma->node.size,
479e1a4bbb6SThomas Hellström 					      vma->vm->total))) {
4802f6b90daSThomas Hellström 		i915_vma_resource_free(vma_res);
481aa149431SChris Wilson 		return -ENODEV;
482e1a4bbb6SThomas Hellström 	}
483aa149431SChris Wilson 
484e1a4bbb6SThomas Hellström 	if (GEM_DEBUG_WARN_ON(!flags)) {
4852f6b90daSThomas Hellström 		i915_vma_resource_free(vma_res);
486b42fe9caSJoonas Lahtinen 		return -EINVAL;
487e1a4bbb6SThomas Hellström 	}
488b42fe9caSJoonas Lahtinen 
4892850748eSChris Wilson 	bind_flags = flags;
4902850748eSChris Wilson 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
491b42fe9caSJoonas Lahtinen 
4924dd2fbbfSChris Wilson 	vma_flags = atomic_read(&vma->flags);
4934dd2fbbfSChris Wilson 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
494aedbe0a1SChris Wilson 
495b42fe9caSJoonas Lahtinen 	bind_flags &= ~vma_flags;
496e1a4bbb6SThomas Hellström 	if (bind_flags == 0) {
4972f6b90daSThomas Hellström 		i915_vma_resource_free(vma_res);
498b42fe9caSJoonas Lahtinen 		return 0;
499e1a4bbb6SThomas Hellström 	}
500b42fe9caSJoonas Lahtinen 
5010b4d1f0eSMaarten Lankhorst 	GEM_BUG_ON(!atomic_read(&vma->pages_count));
502fa3f46afSMatthew Auld 
5032f6b90daSThomas Hellström 	/* Wait for or await async unbinds touching our range */
5042f6b90daSThomas Hellström 	if (work && bind_flags & vma->vm->bind_async_flags)
5052f6b90daSThomas Hellström 		ret = i915_vma_resource_bind_dep_await(vma->vm,
5062f6b90daSThomas Hellström 						       &work->base.chain,
5072f6b90daSThomas Hellström 						       vma->node.start,
5082f6b90daSThomas Hellström 						       vma->node.size,
5092f6b90daSThomas Hellström 						       true,
5102f6b90daSThomas Hellström 						       GFP_NOWAIT |
5112f6b90daSThomas Hellström 						       __GFP_RETRY_MAYFAIL |
5122f6b90daSThomas Hellström 						       __GFP_NOWARN);
5132f6b90daSThomas Hellström 	else
5142f6b90daSThomas Hellström 		ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
5152f6b90daSThomas Hellström 						      vma->node.size, true);
5162f6b90daSThomas Hellström 	if (ret) {
5172f6b90daSThomas Hellström 		i915_vma_resource_free(vma_res);
5182f6b90daSThomas Hellström 		return ret;
5192f6b90daSThomas Hellström 	}
5202f6b90daSThomas Hellström 
521e1a4bbb6SThomas Hellström 	if (vma->resource || !vma_res) {
522e1a4bbb6SThomas Hellström 		/* Rebinding with an additional I915_VMA_*_BIND */
523e1a4bbb6SThomas Hellström 		GEM_WARN_ON(!vma_flags);
5248f4f9a3bSThomas Hellström 		i915_vma_resource_free(vma_res);
525e1a4bbb6SThomas Hellström 	} else {
52639a2bd34SThomas Hellström 		i915_vma_resource_init_from_vma(vma_res, vma);
527e1a4bbb6SThomas Hellström 		vma->resource = vma_res;
528e1a4bbb6SThomas Hellström 	}
5296146e6daSDaniele Ceraolo Spurio 	trace_i915_vma_bind(vma, bind_flags);
530aedbe0a1SChris Wilson 	if (work && bind_flags & vma->vm->bind_async_flags) {
531e3793468SChris Wilson 		struct dma_fence *prev;
532e3793468SChris Wilson 
5332f6b90daSThomas Hellström 		work->vma_res = i915_vma_resource_get(vma->resource);
5349275277dSFei Yang 		work->pat_index = pat_index;
53512b07256SChris Wilson 		work->flags = bind_flags;
5362850748eSChris Wilson 
5372850748eSChris Wilson 		/*
5382850748eSChris Wilson 		 * Note we only want to chain up to the migration fence on
5392850748eSChris Wilson 		 * the pages (not the object itself). As we don't track that,
5402850748eSChris Wilson 		 * yet, we have to use the exclusive fence instead.
5412850748eSChris Wilson 		 *
5422850748eSChris Wilson 		 * Also note that we do not want to track the async vma as
5432850748eSChris Wilson 		 * part of the obj->resv->excl_fence as it only affects
5442850748eSChris Wilson 		 * execution and not content or object's backing store lifetime.
5452850748eSChris Wilson 		 */
546e3793468SChris Wilson 		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
54730ca04e1SChris Wilson 		if (prev) {
548e3793468SChris Wilson 			__i915_sw_fence_await_dma_fence(&work->base.chain,
549e3793468SChris Wilson 							prev,
550e3793468SChris Wilson 							&work->cb);
55130ca04e1SChris Wilson 			dma_fence_put(prev);
55230ca04e1SChris Wilson 		}
553e3793468SChris Wilson 
5542850748eSChris Wilson 		work->base.dma.error = 0; /* enable the queue_work() */
555bfe53be2SMatthew Auld 		work->obj = i915_gem_object_get(vma->obj);
5562850748eSChris Wilson 	} else {
557f6c466b8SMaarten Lankhorst 		ret = i915_gem_object_wait_moving_fence(vma->obj, true);
5582f6b90daSThomas Hellström 		if (ret) {
5592f6b90daSThomas Hellström 			i915_vma_resource_free(vma->resource);
5602f6b90daSThomas Hellström 			vma->resource = NULL;
5612f6b90daSThomas Hellström 
562f6c466b8SMaarten Lankhorst 			return ret;
563f6c466b8SMaarten Lankhorst 		}
5649275277dSFei Yang 		vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
56539a2bd34SThomas Hellström 				   bind_flags);
5662850748eSChris Wilson 	}
567b42fe9caSJoonas Lahtinen 
5684dd2fbbfSChris Wilson 	atomic_or(bind_flags, &vma->flags);
569b42fe9caSJoonas Lahtinen 	return 0;
570b42fe9caSJoonas Lahtinen }
571b42fe9caSJoonas Lahtinen 
i915_vma_pin_iomap(struct i915_vma * vma)572b42fe9caSJoonas Lahtinen void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
573b42fe9caSJoonas Lahtinen {
574b42fe9caSJoonas Lahtinen 	void __iomem *ptr;
575b4563f59SChris Wilson 	int err;
576b42fe9caSJoonas Lahtinen 
57730b9d1b3SMatthew Auld 	if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
578ea3ce08cSKefeng Wang 		return IOMEM_ERR_PTR(-EINVAL);
57930b9d1b3SMatthew Auld 
580b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
5814dd2fbbfSChris Wilson 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
582f6c466b8SMaarten Lankhorst 	GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
583b42fe9caSJoonas Lahtinen 
5842850748eSChris Wilson 	ptr = READ_ONCE(vma->iomap);
585b42fe9caSJoonas Lahtinen 	if (ptr == NULL) {
5864bc91dbdSAnusha Srivatsa 		/*
5874bc91dbdSAnusha Srivatsa 		 * TODO: consider just using i915_gem_object_pin_map() for lmem
5884bc91dbdSAnusha Srivatsa 		 * instead, which already supports mapping non-contiguous chunks
5894bc91dbdSAnusha Srivatsa 		 * of pages, that way we can also drop the
5904bc91dbdSAnusha Srivatsa 		 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
5914bc91dbdSAnusha Srivatsa 		 */
592d976521aSCQ Tang 		if (i915_gem_object_is_lmem(vma->obj)) {
5934bc91dbdSAnusha Srivatsa 			ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
5944bc91dbdSAnusha Srivatsa 							  vma->obj->base.size);
595d976521aSCQ Tang 		} else if (i915_vma_is_map_and_fenceable(vma)) {
59673ebd503SMatthew Auld 			ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
5978e4ee5e8SChris Wilson 						i915_vma_offset(vma),
5988e4ee5e8SChris Wilson 						i915_vma_size(vma));
599d976521aSCQ Tang 		} else {
600d976521aSCQ Tang 			ptr = (void __iomem *)
601d976521aSCQ Tang 				i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
602d976521aSCQ Tang 			if (IS_ERR(ptr)) {
603d976521aSCQ Tang 				err = PTR_ERR(ptr);
604d976521aSCQ Tang 				goto err;
605d976521aSCQ Tang 			}
606d976521aSCQ Tang 			ptr = page_pack_bits(ptr, 1);
607d976521aSCQ Tang 		}
608d976521aSCQ Tang 
609b4563f59SChris Wilson 		if (ptr == NULL) {
610b4563f59SChris Wilson 			err = -ENOMEM;
611b4563f59SChris Wilson 			goto err;
612b4563f59SChris Wilson 		}
613b42fe9caSJoonas Lahtinen 
6142850748eSChris Wilson 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
615d976521aSCQ Tang 			if (page_unmask_bits(ptr))
616d976521aSCQ Tang 				__i915_gem_object_release_map(vma->obj);
617d976521aSCQ Tang 			else
6182850748eSChris Wilson 				io_mapping_unmap(ptr);
6192850748eSChris Wilson 			ptr = vma->iomap;
6202850748eSChris Wilson 		}
621b42fe9caSJoonas Lahtinen 	}
622b42fe9caSJoonas Lahtinen 
623b42fe9caSJoonas Lahtinen 	__i915_vma_pin(vma);
624b4563f59SChris Wilson 
6253bd40735SChris Wilson 	err = i915_vma_pin_fence(vma);
626b4563f59SChris Wilson 	if (err)
627b4563f59SChris Wilson 		goto err_unpin;
628b4563f59SChris Wilson 
6297125397bSChris Wilson 	i915_vma_set_ggtt_write(vma);
630a5972e93SChris Wilson 
631a5972e93SChris Wilson 	/* NB Access through the GTT requires the device to be awake. */
632d976521aSCQ Tang 	return page_mask_bits(ptr);
633b4563f59SChris Wilson 
634b4563f59SChris Wilson err_unpin:
635b4563f59SChris Wilson 	__i915_vma_unpin(vma);
636b4563f59SChris Wilson err:
637ea3ce08cSKefeng Wang 	return IOMEM_ERR_PTR(err);
638b4563f59SChris Wilson }
639b4563f59SChris Wilson 
i915_vma_flush_writes(struct i915_vma * vma)6407125397bSChris Wilson void i915_vma_flush_writes(struct i915_vma *vma)
6417125397bSChris Wilson {
6422850748eSChris Wilson 	if (i915_vma_unset_ggtt_write(vma))
643a1c8a09eSTvrtko Ursulin 		intel_gt_flush_ggtt_writes(vma->vm->gt);
6447125397bSChris Wilson }
6457125397bSChris Wilson 
i915_vma_unpin_iomap(struct i915_vma * vma)646b4563f59SChris Wilson void i915_vma_unpin_iomap(struct i915_vma *vma)
647b4563f59SChris Wilson {
648b4563f59SChris Wilson 	GEM_BUG_ON(vma->iomap == NULL);
649b4563f59SChris Wilson 
650d976521aSCQ Tang 	/* XXX We keep the mapping until __i915_vma_unbind()/evict() */
651d976521aSCQ Tang 
6527125397bSChris Wilson 	i915_vma_flush_writes(vma);
6537125397bSChris Wilson 
654b4563f59SChris Wilson 	i915_vma_unpin_fence(vma);
655b4563f59SChris Wilson 	i915_vma_unpin(vma);
656b42fe9caSJoonas Lahtinen }
657b42fe9caSJoonas Lahtinen 
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)6586a2f59e4SChris Wilson void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
659b42fe9caSJoonas Lahtinen {
660b42fe9caSJoonas Lahtinen 	struct i915_vma *vma;
661b42fe9caSJoonas Lahtinen 	struct drm_i915_gem_object *obj;
662b42fe9caSJoonas Lahtinen 
663b42fe9caSJoonas Lahtinen 	vma = fetch_and_zero(p_vma);
664b42fe9caSJoonas Lahtinen 	if (!vma)
665b42fe9caSJoonas Lahtinen 		return;
666b42fe9caSJoonas Lahtinen 
667b42fe9caSJoonas Lahtinen 	obj = vma->obj;
668520ea7c5SChris Wilson 	GEM_BUG_ON(!obj);
669b42fe9caSJoonas Lahtinen 
670b42fe9caSJoonas Lahtinen 	i915_vma_unpin(vma);
671b42fe9caSJoonas Lahtinen 
6726a2f59e4SChris Wilson 	if (flags & I915_VMA_RELEASE_MAP)
6736a2f59e4SChris Wilson 		i915_gem_object_unpin_map(obj);
6746a2f59e4SChris Wilson 
675c017cf6bSChris Wilson 	i915_gem_object_put(obj);
676b42fe9caSJoonas Lahtinen }
677b42fe9caSJoonas Lahtinen 
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)678782a3e9eSChris Wilson bool i915_vma_misplaced(const struct i915_vma *vma,
679782a3e9eSChris Wilson 			u64 size, u64 alignment, u64 flags)
680b42fe9caSJoonas Lahtinen {
681b42fe9caSJoonas Lahtinen 	if (!drm_mm_node_allocated(&vma->node))
682b42fe9caSJoonas Lahtinen 		return false;
683b42fe9caSJoonas Lahtinen 
6842850748eSChris Wilson 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
6852850748eSChris Wilson 		return true;
6862850748eSChris Wilson 
6878e4ee5e8SChris Wilson 	if (i915_vma_size(vma) < size)
688b42fe9caSJoonas Lahtinen 		return true;
689b42fe9caSJoonas Lahtinen 
690f51455d4SChris Wilson 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
6918e4ee5e8SChris Wilson 	if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
692b42fe9caSJoonas Lahtinen 		return true;
693b42fe9caSJoonas Lahtinen 
694b42fe9caSJoonas Lahtinen 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
695b42fe9caSJoonas Lahtinen 		return true;
696b42fe9caSJoonas Lahtinen 
697b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_BIAS &&
6988e4ee5e8SChris Wilson 	    i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
699b42fe9caSJoonas Lahtinen 		return true;
700b42fe9caSJoonas Lahtinen 
701b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_FIXED &&
7028e4ee5e8SChris Wilson 	    i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
703b42fe9caSJoonas Lahtinen 		return true;
704b42fe9caSJoonas Lahtinen 
70561102251SChris Wilson 	if (flags & PIN_OFFSET_GUARD &&
70661102251SChris Wilson 	    vma->guard < (flags & PIN_OFFSET_MASK))
70761102251SChris Wilson 		return true;
70861102251SChris Wilson 
709b42fe9caSJoonas Lahtinen 	return false;
710b42fe9caSJoonas Lahtinen }
711b42fe9caSJoonas Lahtinen 
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)712b42fe9caSJoonas Lahtinen void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
713b42fe9caSJoonas Lahtinen {
714b42fe9caSJoonas Lahtinen 	bool mappable, fenceable;
715b42fe9caSJoonas Lahtinen 
716944397f0SChris Wilson 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
717944397f0SChris Wilson 	GEM_BUG_ON(!vma->fence_size);
718b42fe9caSJoonas Lahtinen 
7198e4ee5e8SChris Wilson 	fenceable = (i915_vma_size(vma) >= vma->fence_size &&
7208e4ee5e8SChris Wilson 		     IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
721944397f0SChris Wilson 
7228e4ee5e8SChris Wilson 	mappable = i915_ggtt_offset(vma) + vma->fence_size <=
7238e4ee5e8SChris Wilson 		   i915_vm_to_ggtt(vma->vm)->mappable_end;
724944397f0SChris Wilson 
725944397f0SChris Wilson 	if (mappable && fenceable)
7264dd2fbbfSChris Wilson 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
727b42fe9caSJoonas Lahtinen 	else
7284dd2fbbfSChris Wilson 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
729b42fe9caSJoonas Lahtinen }
730b42fe9caSJoonas Lahtinen 
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)73133dd8899SMatthew Auld bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
7327d1d9aeaSChris Wilson {
7337d1d9aeaSChris Wilson 	struct drm_mm_node *node = &vma->node;
734b42fe9caSJoonas Lahtinen 	struct drm_mm_node *other;
735b42fe9caSJoonas Lahtinen 
736b42fe9caSJoonas Lahtinen 	/*
737b42fe9caSJoonas Lahtinen 	 * On some machines we have to be careful when putting differing types
738b42fe9caSJoonas Lahtinen 	 * of snoopable memory together to avoid the prefetcher crossing memory
739b42fe9caSJoonas Lahtinen 	 * domains and dying. During vm initialisation, we decide whether or not
740b42fe9caSJoonas Lahtinen 	 * these constraints apply and set the drm_mm.color_adjust
741b42fe9caSJoonas Lahtinen 	 * appropriately.
742b42fe9caSJoonas Lahtinen 	 */
74333dd8899SMatthew Auld 	if (!i915_vm_has_cache_coloring(vma->vm))
744b42fe9caSJoonas Lahtinen 		return true;
745b42fe9caSJoonas Lahtinen 
7467d1d9aeaSChris Wilson 	/* Only valid to be called on an already inserted vma */
7477d1d9aeaSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(node));
7487d1d9aeaSChris Wilson 	GEM_BUG_ON(list_empty(&node->node_list));
749b42fe9caSJoonas Lahtinen 
7507d1d9aeaSChris Wilson 	other = list_prev_entry(node, node_list);
75133dd8899SMatthew Auld 	if (i915_node_color_differs(other, color) &&
7521e0a96e5SMatthew Auld 	    !drm_mm_hole_follows(other))
753b42fe9caSJoonas Lahtinen 		return false;
754b42fe9caSJoonas Lahtinen 
7557d1d9aeaSChris Wilson 	other = list_next_entry(node, node_list);
75633dd8899SMatthew Auld 	if (i915_node_color_differs(other, color) &&
7571e0a96e5SMatthew Auld 	    !drm_mm_hole_follows(node))
758b42fe9caSJoonas Lahtinen 		return false;
759b42fe9caSJoonas Lahtinen 
760b42fe9caSJoonas Lahtinen 	return true;
761b42fe9caSJoonas Lahtinen }
762b42fe9caSJoonas Lahtinen 
763b42fe9caSJoonas Lahtinen /**
764b42fe9caSJoonas Lahtinen  * i915_vma_insert - finds a slot for the vma in its address space
765b42fe9caSJoonas Lahtinen  * @vma: the vma
766a915450eSLee Jones  * @ww: An optional struct i915_gem_ww_ctx
767b42fe9caSJoonas Lahtinen  * @size: requested size in bytes (can be larger than the VMA)
768b42fe9caSJoonas Lahtinen  * @alignment: required alignment
769b42fe9caSJoonas Lahtinen  * @flags: mask of PIN_* flags to use
770b42fe9caSJoonas Lahtinen  *
771b42fe9caSJoonas Lahtinen  * First we try to allocate some free space that meets the requirements for
772b42fe9caSJoonas Lahtinen  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
773b42fe9caSJoonas Lahtinen  * preferrably the oldest idle entry to make room for the new VMA.
774b42fe9caSJoonas Lahtinen  *
775b42fe9caSJoonas Lahtinen  * Returns:
776b42fe9caSJoonas Lahtinen  * 0 on success, negative error code otherwise.
777b42fe9caSJoonas Lahtinen  */
778b42fe9caSJoonas Lahtinen static int
i915_vma_insert(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)7797e00897bSMaarten Lankhorst i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
7807e00897bSMaarten Lankhorst 		u64 size, u64 alignment, u64 flags)
781b42fe9caSJoonas Lahtinen {
78261102251SChris Wilson 	unsigned long color, guard;
783b42fe9caSJoonas Lahtinen 	u64 start, end;
784b42fe9caSJoonas Lahtinen 	int ret;
785b42fe9caSJoonas Lahtinen 
7864dd2fbbfSChris Wilson 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
787b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
78861102251SChris Wilson 	GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
789b42fe9caSJoonas Lahtinen 
790b42fe9caSJoonas Lahtinen 	size = max(size, vma->size);
79161102251SChris Wilson 	alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
792944397f0SChris Wilson 	if (flags & PIN_MAPPABLE) {
793944397f0SChris Wilson 		size = max_t(typeof(size), size, vma->fence_size);
794944397f0SChris Wilson 		alignment = max_t(typeof(alignment),
795944397f0SChris Wilson 				  alignment, vma->fence_alignment);
796944397f0SChris Wilson 	}
797b42fe9caSJoonas Lahtinen 
798f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
799f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
800f51455d4SChris Wilson 	GEM_BUG_ON(!is_power_of_2(alignment));
801f51455d4SChris Wilson 
80261102251SChris Wilson 	guard = vma->guard; /* retain guard across rebinds */
80361102251SChris Wilson 	if (flags & PIN_OFFSET_GUARD) {
80461102251SChris Wilson 		GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
80561102251SChris Wilson 		guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
80661102251SChris Wilson 	}
80761102251SChris Wilson 	/*
80861102251SChris Wilson 	 * As we align the node upon insertion, but the hardware gets
80961102251SChris Wilson 	 * node.start + guard, the easiest way to make that work is
81061102251SChris Wilson 	 * to make the guard a multiple of the alignment size.
81161102251SChris Wilson 	 */
81261102251SChris Wilson 	guard = ALIGN(guard, alignment);
81361102251SChris Wilson 
814b42fe9caSJoonas Lahtinen 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
815f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
816b42fe9caSJoonas Lahtinen 
817b42fe9caSJoonas Lahtinen 	end = vma->vm->total;
818b42fe9caSJoonas Lahtinen 	if (flags & PIN_MAPPABLE)
8192850748eSChris Wilson 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
820b42fe9caSJoonas Lahtinen 	if (flags & PIN_ZONE_4G)
821f51455d4SChris Wilson 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
822f51455d4SChris Wilson 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
823b42fe9caSJoonas Lahtinen 
82487bd701eSMatthew Auld 	alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
82587bd701eSMatthew Auld 
82661102251SChris Wilson 	/*
82761102251SChris Wilson 	 * If binding the object/GGTT view requires more space than the entire
828b42fe9caSJoonas Lahtinen 	 * aperture has, reject it early before evicting everything in a vain
829b42fe9caSJoonas Lahtinen 	 * attempt to find space.
830b42fe9caSJoonas Lahtinen 	 */
83161102251SChris Wilson 	if (size > end - 2 * guard) {
832b364f3cdSUwe Kleine-König 		drm_dbg(vma->obj->base.dev,
833a10234fdSTvrtko Ursulin 			"Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
834a10234fdSTvrtko Ursulin 			size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
8352889caa9SChris Wilson 		return -ENOSPC;
836b42fe9caSJoonas Lahtinen 	}
837b42fe9caSJoonas Lahtinen 
83833dd8899SMatthew Auld 	color = 0;
83987bd701eSMatthew Auld 
840e6e1a304SMaarten Lankhorst 	if (i915_vm_has_cache_coloring(vma->vm))
8419275277dSFei Yang 		color = vma->obj->pat_index;
842fa3f46afSMatthew Auld 
843b42fe9caSJoonas Lahtinen 	if (flags & PIN_OFFSET_FIXED) {
844b42fe9caSJoonas Lahtinen 		u64 offset = flags & PIN_OFFSET_MASK;
845f51455d4SChris Wilson 		if (!IS_ALIGNED(offset, alignment) ||
8462850748eSChris Wilson 		    range_overflows(offset, size, end))
8472850748eSChris Wilson 			return -EINVAL;
84861102251SChris Wilson 		/*
84961102251SChris Wilson 		 * The caller knows not of the guard added by others and
85061102251SChris Wilson 		 * requests for the offset of the start of its buffer
85161102251SChris Wilson 		 * to be fixed, which may not be the same as the position
85261102251SChris Wilson 		 * of the vma->node due to the guard pages.
85361102251SChris Wilson 		 */
85461102251SChris Wilson 		if (offset < guard || offset + size > end - guard)
85561102251SChris Wilson 			return -ENOSPC;
856b42fe9caSJoonas Lahtinen 
8577e00897bSMaarten Lankhorst 		ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
85861102251SChris Wilson 					   size + 2 * guard,
85961102251SChris Wilson 					   offset - guard,
86061102251SChris Wilson 					   color, flags);
861b42fe9caSJoonas Lahtinen 		if (ret)
8622850748eSChris Wilson 			return ret;
863b42fe9caSJoonas Lahtinen 	} else {
86461102251SChris Wilson 		size += 2 * guard;
8657464284bSMatthew Auld 		/*
8667464284bSMatthew Auld 		 * We only support huge gtt pages through the 48b PPGTT,
8677464284bSMatthew Auld 		 * however we also don't want to force any alignment for
8687464284bSMatthew Auld 		 * objects which need to be tightly packed into the low 32bits.
8697464284bSMatthew Auld 		 *
8707464284bSMatthew Auld 		 * Note that we assume that GGTT are limited to 4GiB for the
8717464284bSMatthew Auld 		 * forseeable future. See also i915_ggtt_offset().
8727464284bSMatthew Auld 		 */
8737464284bSMatthew Auld 		if (upper_32_bits(end - 1) &&
8748133a6daSMatthew Auld 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
8758133a6daSMatthew Auld 		    !HAS_64K_PAGES(vma->vm->i915)) {
876855822beSMatthew Auld 			/*
877855822beSMatthew Auld 			 * We can't mix 64K and 4K PTEs in the same page-table
878855822beSMatthew Auld 			 * (2M block), and so to avoid the ugliness and
879855822beSMatthew Auld 			 * complexity of coloring we opt for just aligning 64K
880855822beSMatthew Auld 			 * objects to 2M.
881855822beSMatthew Auld 			 */
8827464284bSMatthew Auld 			u64 page_alignment =
883855822beSMatthew Auld 				rounddown_pow_of_two(vma->page_sizes.sg |
884855822beSMatthew Auld 						     I915_GTT_PAGE_SIZE_2M);
8857464284bSMatthew Auld 
886bef27bdbSChris Wilson 			/*
887bef27bdbSChris Wilson 			 * Check we don't expand for the limited Global GTT
888bef27bdbSChris Wilson 			 * (mappable aperture is even more precious!). This
889bef27bdbSChris Wilson 			 * also checks that we exclude the aliasing-ppgtt.
890bef27bdbSChris Wilson 			 */
891bef27bdbSChris Wilson 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
892bef27bdbSChris Wilson 
8937464284bSMatthew Auld 			alignment = max(alignment, page_alignment);
894855822beSMatthew Auld 
895855822beSMatthew Auld 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
896855822beSMatthew Auld 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
8977464284bSMatthew Auld 		}
8987464284bSMatthew Auld 
8997e00897bSMaarten Lankhorst 		ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
90033dd8899SMatthew Auld 					  size, alignment, color,
901e007b19dSChris Wilson 					  start, end, flags);
902e007b19dSChris Wilson 		if (ret)
9032850748eSChris Wilson 			return ret;
904b42fe9caSJoonas Lahtinen 
905b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(vma->node.start < start);
906b42fe9caSJoonas Lahtinen 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
907b42fe9caSJoonas Lahtinen 	}
90844a0ec0dSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
90933dd8899SMatthew Auld 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
910b42fe9caSJoonas Lahtinen 
911e1a7ab4fSThomas Hellström 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
91261102251SChris Wilson 	vma->guard = guard;
913b42fe9caSJoonas Lahtinen 
914b42fe9caSJoonas Lahtinen 	return 0;
915b42fe9caSJoonas Lahtinen }
916b42fe9caSJoonas Lahtinen 
91731c7effaSChris Wilson static void
i915_vma_detach(struct i915_vma * vma)918dde01d94SChris Wilson i915_vma_detach(struct i915_vma *vma)
91931c7effaSChris Wilson {
92031c7effaSChris Wilson 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
9214dd2fbbfSChris Wilson 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
92231c7effaSChris Wilson 
923520ea7c5SChris Wilson 	/*
924520ea7c5SChris Wilson 	 * And finally now the object is completely decoupled from this
925520ea7c5SChris Wilson 	 * vma, we can drop its hold on the backing storage and allow
926520ea7c5SChris Wilson 	 * it to be reaped by the shrinker.
92731c7effaSChris Wilson 	 */
928e1a7ab4fSThomas Hellström 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
929520ea7c5SChris Wilson }
93031c7effaSChris Wilson 
try_qad_pin(struct i915_vma * vma,unsigned int flags)9312850748eSChris Wilson static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
932b42fe9caSJoonas Lahtinen {
9332850748eSChris Wilson 	unsigned int bound;
934b42fe9caSJoonas Lahtinen 
9352850748eSChris Wilson 	bound = atomic_read(&vma->flags);
936b5cfe6f7SMaarten Lankhorst 
937b5cfe6f7SMaarten Lankhorst 	if (flags & PIN_VALIDATE) {
938b5cfe6f7SMaarten Lankhorst 		flags &= I915_VMA_BIND_MASK;
939b5cfe6f7SMaarten Lankhorst 
940b5cfe6f7SMaarten Lankhorst 		return (flags & bound) == flags;
941b5cfe6f7SMaarten Lankhorst 	}
942b5cfe6f7SMaarten Lankhorst 
943b5cfe6f7SMaarten Lankhorst 	/* with the lock mandatory for unbind, we don't race here */
944b5cfe6f7SMaarten Lankhorst 	flags &= I915_VMA_BIND_MASK;
9452850748eSChris Wilson 	do {
9462850748eSChris Wilson 		if (unlikely(flags & ~bound))
9472850748eSChris Wilson 			return false;
948b42fe9caSJoonas Lahtinen 
9492850748eSChris Wilson 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
9502850748eSChris Wilson 			return false;
9512850748eSChris Wilson 
9522850748eSChris Wilson 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
9532850748eSChris Wilson 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
9542850748eSChris Wilson 
9552850748eSChris Wilson 	return true;
9562850748eSChris Wilson }
957b42fe9caSJoonas Lahtinen 
9580b4d1f0eSMaarten Lankhorst static struct scatterlist *
rotate_pages(struct drm_i915_gem_object * obj,unsigned int offset,unsigned int width,unsigned int height,unsigned int src_stride,unsigned int dst_stride,struct sg_table * st,struct scatterlist * sg)9590b4d1f0eSMaarten Lankhorst rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
9600b4d1f0eSMaarten Lankhorst 	     unsigned int width, unsigned int height,
9610b4d1f0eSMaarten Lankhorst 	     unsigned int src_stride, unsigned int dst_stride,
9620b4d1f0eSMaarten Lankhorst 	     struct sg_table *st, struct scatterlist *sg)
9632850748eSChris Wilson {
9640b4d1f0eSMaarten Lankhorst 	unsigned int column, row;
965f47e6306SChris Wilson 	pgoff_t src_idx;
9660b4d1f0eSMaarten Lankhorst 
9670b4d1f0eSMaarten Lankhorst 	for (column = 0; column < width; column++) {
9680b4d1f0eSMaarten Lankhorst 		unsigned int left;
9690b4d1f0eSMaarten Lankhorst 
9700b4d1f0eSMaarten Lankhorst 		src_idx = src_stride * (height - 1) + column + offset;
9710b4d1f0eSMaarten Lankhorst 		for (row = 0; row < height; row++) {
9720b4d1f0eSMaarten Lankhorst 			st->nents++;
9730b4d1f0eSMaarten Lankhorst 			/*
9740b4d1f0eSMaarten Lankhorst 			 * We don't need the pages, but need to initialize
9750b4d1f0eSMaarten Lankhorst 			 * the entries so the sg list can be happily traversed.
9760b4d1f0eSMaarten Lankhorst 			 * The only thing we need are DMA addresses.
9770b4d1f0eSMaarten Lankhorst 			 */
9780b4d1f0eSMaarten Lankhorst 			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
9790b4d1f0eSMaarten Lankhorst 			sg_dma_address(sg) =
9800b4d1f0eSMaarten Lankhorst 				i915_gem_object_get_dma_address(obj, src_idx);
9810b4d1f0eSMaarten Lankhorst 			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
9820b4d1f0eSMaarten Lankhorst 			sg = sg_next(sg);
9830b4d1f0eSMaarten Lankhorst 			src_idx -= src_stride;
9840b4d1f0eSMaarten Lankhorst 		}
9850b4d1f0eSMaarten Lankhorst 
9860b4d1f0eSMaarten Lankhorst 		left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
9870b4d1f0eSMaarten Lankhorst 
9880b4d1f0eSMaarten Lankhorst 		if (!left)
9890b4d1f0eSMaarten Lankhorst 			continue;
9900b4d1f0eSMaarten Lankhorst 
9910b4d1f0eSMaarten Lankhorst 		st->nents++;
9920b4d1f0eSMaarten Lankhorst 
9930b4d1f0eSMaarten Lankhorst 		/*
9940b4d1f0eSMaarten Lankhorst 		 * The DE ignores the PTEs for the padding tiles, the sg entry
9950b4d1f0eSMaarten Lankhorst 		 * here is just a conenience to indicate how many padding PTEs
9960b4d1f0eSMaarten Lankhorst 		 * to insert at this spot.
9970b4d1f0eSMaarten Lankhorst 		 */
9980b4d1f0eSMaarten Lankhorst 		sg_set_page(sg, NULL, left, 0);
9990b4d1f0eSMaarten Lankhorst 		sg_dma_address(sg) = 0;
10000b4d1f0eSMaarten Lankhorst 		sg_dma_len(sg) = left;
10010b4d1f0eSMaarten Lankhorst 		sg = sg_next(sg);
10020b4d1f0eSMaarten Lankhorst 	}
10030b4d1f0eSMaarten Lankhorst 
10040b4d1f0eSMaarten Lankhorst 	return sg;
10050b4d1f0eSMaarten Lankhorst }
10060b4d1f0eSMaarten Lankhorst 
10070b4d1f0eSMaarten Lankhorst static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info * rot_info,struct drm_i915_gem_object * obj)10080b4d1f0eSMaarten Lankhorst intel_rotate_pages(struct intel_rotation_info *rot_info,
10090b4d1f0eSMaarten Lankhorst 		   struct drm_i915_gem_object *obj)
10100b4d1f0eSMaarten Lankhorst {
10110b4d1f0eSMaarten Lankhorst 	unsigned int size = intel_rotation_info_size(rot_info);
10120b4d1f0eSMaarten Lankhorst 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
10130b4d1f0eSMaarten Lankhorst 	struct sg_table *st;
10140b4d1f0eSMaarten Lankhorst 	struct scatterlist *sg;
10150b4d1f0eSMaarten Lankhorst 	int ret = -ENOMEM;
10160b4d1f0eSMaarten Lankhorst 	int i;
10170b4d1f0eSMaarten Lankhorst 
10180b4d1f0eSMaarten Lankhorst 	/* Allocate target SG list. */
10190b4d1f0eSMaarten Lankhorst 	st = kmalloc(sizeof(*st), GFP_KERNEL);
10200b4d1f0eSMaarten Lankhorst 	if (!st)
10210b4d1f0eSMaarten Lankhorst 		goto err_st_alloc;
10220b4d1f0eSMaarten Lankhorst 
10230b4d1f0eSMaarten Lankhorst 	ret = sg_alloc_table(st, size, GFP_KERNEL);
10240b4d1f0eSMaarten Lankhorst 	if (ret)
10250b4d1f0eSMaarten Lankhorst 		goto err_sg_alloc;
10260b4d1f0eSMaarten Lankhorst 
10270b4d1f0eSMaarten Lankhorst 	st->nents = 0;
10280b4d1f0eSMaarten Lankhorst 	sg = st->sgl;
10290b4d1f0eSMaarten Lankhorst 
10300b4d1f0eSMaarten Lankhorst 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
10310b4d1f0eSMaarten Lankhorst 		sg = rotate_pages(obj, rot_info->plane[i].offset,
10320b4d1f0eSMaarten Lankhorst 				  rot_info->plane[i].width, rot_info->plane[i].height,
10330b4d1f0eSMaarten Lankhorst 				  rot_info->plane[i].src_stride,
10340b4d1f0eSMaarten Lankhorst 				  rot_info->plane[i].dst_stride,
10350b4d1f0eSMaarten Lankhorst 				  st, sg);
10360b4d1f0eSMaarten Lankhorst 
10370b4d1f0eSMaarten Lankhorst 	return st;
10380b4d1f0eSMaarten Lankhorst 
10390b4d1f0eSMaarten Lankhorst err_sg_alloc:
10400b4d1f0eSMaarten Lankhorst 	kfree(st);
10410b4d1f0eSMaarten Lankhorst err_st_alloc:
10420b4d1f0eSMaarten Lankhorst 
10430b4d1f0eSMaarten Lankhorst 	drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
10440b4d1f0eSMaarten Lankhorst 		obj->base.size, rot_info->plane[0].width,
10450b4d1f0eSMaarten Lankhorst 		rot_info->plane[0].height, size);
10460b4d1f0eSMaarten Lankhorst 
10470b4d1f0eSMaarten Lankhorst 	return ERR_PTR(ret);
10480b4d1f0eSMaarten Lankhorst }
10490b4d1f0eSMaarten Lankhorst 
10500b4d1f0eSMaarten Lankhorst static struct scatterlist *
add_padding_pages(unsigned int count,struct sg_table * st,struct scatterlist * sg)105130424ebaSRodrigo Vivi add_padding_pages(unsigned int count,
10520b4d1f0eSMaarten Lankhorst 		  struct sg_table *st, struct scatterlist *sg)
10530b4d1f0eSMaarten Lankhorst {
10540b4d1f0eSMaarten Lankhorst 	st->nents++;
10550b4d1f0eSMaarten Lankhorst 
10560b4d1f0eSMaarten Lankhorst 	/*
10570b4d1f0eSMaarten Lankhorst 	 * The DE ignores the PTEs for the padding tiles, the sg entry
10580b4d1f0eSMaarten Lankhorst 	 * here is just a convenience to indicate how many padding PTEs
10590b4d1f0eSMaarten Lankhorst 	 * to insert at this spot.
10600b4d1f0eSMaarten Lankhorst 	 */
106130424ebaSRodrigo Vivi 	sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
10620b4d1f0eSMaarten Lankhorst 	sg_dma_address(sg) = 0;
106330424ebaSRodrigo Vivi 	sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
10640b4d1f0eSMaarten Lankhorst 	sg = sg_next(sg);
106530424ebaSRodrigo Vivi 
106630424ebaSRodrigo Vivi 	return sg;
10670b4d1f0eSMaarten Lankhorst }
10680b4d1f0eSMaarten Lankhorst 
106930424ebaSRodrigo Vivi static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object * obj,unsigned long offset,unsigned int alignment_pad,unsigned int width,unsigned int height,unsigned int src_stride,unsigned int dst_stride,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)107030424ebaSRodrigo Vivi remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1071f47e6306SChris Wilson 			      unsigned long offset, unsigned int alignment_pad,
107230424ebaSRodrigo Vivi 			      unsigned int width, unsigned int height,
107330424ebaSRodrigo Vivi 			      unsigned int src_stride, unsigned int dst_stride,
107430424ebaSRodrigo Vivi 			      struct sg_table *st, struct scatterlist *sg,
107530424ebaSRodrigo Vivi 			      unsigned int *gtt_offset)
107630424ebaSRodrigo Vivi {
107730424ebaSRodrigo Vivi 	unsigned int row;
107830424ebaSRodrigo Vivi 
107930424ebaSRodrigo Vivi 	if (!width || !height)
108030424ebaSRodrigo Vivi 		return sg;
108130424ebaSRodrigo Vivi 
108230424ebaSRodrigo Vivi 	if (alignment_pad)
108330424ebaSRodrigo Vivi 		sg = add_padding_pages(alignment_pad, st, sg);
108430424ebaSRodrigo Vivi 
10850b4d1f0eSMaarten Lankhorst 	for (row = 0; row < height; row++) {
10860b4d1f0eSMaarten Lankhorst 		unsigned int left = width * I915_GTT_PAGE_SIZE;
10870b4d1f0eSMaarten Lankhorst 
10880b4d1f0eSMaarten Lankhorst 		while (left) {
10890b4d1f0eSMaarten Lankhorst 			dma_addr_t addr;
10900b4d1f0eSMaarten Lankhorst 			unsigned int length;
10910b4d1f0eSMaarten Lankhorst 
10920b4d1f0eSMaarten Lankhorst 			/*
10930b4d1f0eSMaarten Lankhorst 			 * We don't need the pages, but need to initialize
10940b4d1f0eSMaarten Lankhorst 			 * the entries so the sg list can be happily traversed.
10950b4d1f0eSMaarten Lankhorst 			 * The only thing we need are DMA addresses.
10960b4d1f0eSMaarten Lankhorst 			 */
10970b4d1f0eSMaarten Lankhorst 
10980b4d1f0eSMaarten Lankhorst 			addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
10990b4d1f0eSMaarten Lankhorst 
11000b4d1f0eSMaarten Lankhorst 			length = min(left, length);
11010b4d1f0eSMaarten Lankhorst 
11020b4d1f0eSMaarten Lankhorst 			st->nents++;
11030b4d1f0eSMaarten Lankhorst 
11040b4d1f0eSMaarten Lankhorst 			sg_set_page(sg, NULL, length, 0);
11050b4d1f0eSMaarten Lankhorst 			sg_dma_address(sg) = addr;
11060b4d1f0eSMaarten Lankhorst 			sg_dma_len(sg) = length;
11070b4d1f0eSMaarten Lankhorst 			sg = sg_next(sg);
11080b4d1f0eSMaarten Lankhorst 
11090b4d1f0eSMaarten Lankhorst 			offset += length / I915_GTT_PAGE_SIZE;
11100b4d1f0eSMaarten Lankhorst 			left -= length;
11110b4d1f0eSMaarten Lankhorst 		}
11120b4d1f0eSMaarten Lankhorst 
11130b4d1f0eSMaarten Lankhorst 		offset += src_stride - width;
11140b4d1f0eSMaarten Lankhorst 
11150b4d1f0eSMaarten Lankhorst 		left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
11160b4d1f0eSMaarten Lankhorst 
11170b4d1f0eSMaarten Lankhorst 		if (!left)
11180b4d1f0eSMaarten Lankhorst 			continue;
11190b4d1f0eSMaarten Lankhorst 
112030424ebaSRodrigo Vivi 		sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
11210b4d1f0eSMaarten Lankhorst 	}
11220b4d1f0eSMaarten Lankhorst 
112330424ebaSRodrigo Vivi 	*gtt_offset += alignment_pad + dst_stride * height;
112430424ebaSRodrigo Vivi 
112530424ebaSRodrigo Vivi 	return sg;
112630424ebaSRodrigo Vivi }
112730424ebaSRodrigo Vivi 
112830424ebaSRodrigo Vivi static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object * obj,pgoff_t obj_offset,unsigned int count,struct sg_table * st,struct scatterlist * sg)112930424ebaSRodrigo Vivi remap_contiguous_pages(struct drm_i915_gem_object *obj,
1130f47e6306SChris Wilson 		       pgoff_t obj_offset,
113130424ebaSRodrigo Vivi 		       unsigned int count,
113230424ebaSRodrigo Vivi 		       struct sg_table *st, struct scatterlist *sg)
113330424ebaSRodrigo Vivi {
113430424ebaSRodrigo Vivi 	struct scatterlist *iter;
113530424ebaSRodrigo Vivi 	unsigned int offset;
113630424ebaSRodrigo Vivi 
113730424ebaSRodrigo Vivi 	iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
113830424ebaSRodrigo Vivi 	GEM_BUG_ON(!iter);
113930424ebaSRodrigo Vivi 
114030424ebaSRodrigo Vivi 	do {
114130424ebaSRodrigo Vivi 		unsigned int len;
114230424ebaSRodrigo Vivi 
114330424ebaSRodrigo Vivi 		len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
114430424ebaSRodrigo Vivi 			  count << PAGE_SHIFT);
114530424ebaSRodrigo Vivi 		sg_set_page(sg, NULL, len, 0);
114630424ebaSRodrigo Vivi 		sg_dma_address(sg) =
114730424ebaSRodrigo Vivi 			sg_dma_address(iter) + (offset << PAGE_SHIFT);
114830424ebaSRodrigo Vivi 		sg_dma_len(sg) = len;
114930424ebaSRodrigo Vivi 
115030424ebaSRodrigo Vivi 		st->nents++;
115130424ebaSRodrigo Vivi 		count -= len >> PAGE_SHIFT;
115230424ebaSRodrigo Vivi 		if (count == 0)
115330424ebaSRodrigo Vivi 			return sg;
115430424ebaSRodrigo Vivi 
115530424ebaSRodrigo Vivi 		sg = __sg_next(sg);
115630424ebaSRodrigo Vivi 		iter = __sg_next(iter);
115730424ebaSRodrigo Vivi 		offset = 0;
115830424ebaSRodrigo Vivi 	} while (1);
115930424ebaSRodrigo Vivi }
116030424ebaSRodrigo Vivi 
116130424ebaSRodrigo Vivi static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object * obj,pgoff_t obj_offset,unsigned int alignment_pad,unsigned int size,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)116230424ebaSRodrigo Vivi remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1163f47e6306SChris Wilson 			       pgoff_t obj_offset, unsigned int alignment_pad,
116430424ebaSRodrigo Vivi 			       unsigned int size,
116530424ebaSRodrigo Vivi 			       struct sg_table *st, struct scatterlist *sg,
116630424ebaSRodrigo Vivi 			       unsigned int *gtt_offset)
116730424ebaSRodrigo Vivi {
116830424ebaSRodrigo Vivi 	if (!size)
116930424ebaSRodrigo Vivi 		return sg;
117030424ebaSRodrigo Vivi 
117130424ebaSRodrigo Vivi 	if (alignment_pad)
117230424ebaSRodrigo Vivi 		sg = add_padding_pages(alignment_pad, st, sg);
117330424ebaSRodrigo Vivi 
117430424ebaSRodrigo Vivi 	sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
117530424ebaSRodrigo Vivi 	sg = sg_next(sg);
117630424ebaSRodrigo Vivi 
117730424ebaSRodrigo Vivi 	*gtt_offset += alignment_pad + size;
117830424ebaSRodrigo Vivi 
117930424ebaSRodrigo Vivi 	return sg;
118030424ebaSRodrigo Vivi }
118130424ebaSRodrigo Vivi 
118230424ebaSRodrigo Vivi static struct scatterlist *
remap_color_plane_pages(const struct intel_remapped_info * rem_info,struct drm_i915_gem_object * obj,int color_plane,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)118330424ebaSRodrigo Vivi remap_color_plane_pages(const struct intel_remapped_info *rem_info,
118430424ebaSRodrigo Vivi 			struct drm_i915_gem_object *obj,
118530424ebaSRodrigo Vivi 			int color_plane,
118630424ebaSRodrigo Vivi 			struct sg_table *st, struct scatterlist *sg,
118730424ebaSRodrigo Vivi 			unsigned int *gtt_offset)
118830424ebaSRodrigo Vivi {
118930424ebaSRodrigo Vivi 	unsigned int alignment_pad = 0;
119030424ebaSRodrigo Vivi 
119130424ebaSRodrigo Vivi 	if (rem_info->plane_alignment)
119230424ebaSRodrigo Vivi 		alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
119330424ebaSRodrigo Vivi 
119430424ebaSRodrigo Vivi 	if (rem_info->plane[color_plane].linear)
119530424ebaSRodrigo Vivi 		sg = remap_linear_color_plane_pages(obj,
119630424ebaSRodrigo Vivi 						    rem_info->plane[color_plane].offset,
119730424ebaSRodrigo Vivi 						    alignment_pad,
119830424ebaSRodrigo Vivi 						    rem_info->plane[color_plane].size,
119930424ebaSRodrigo Vivi 						    st, sg,
120030424ebaSRodrigo Vivi 						    gtt_offset);
120130424ebaSRodrigo Vivi 
120230424ebaSRodrigo Vivi 	else
120330424ebaSRodrigo Vivi 		sg = remap_tiled_color_plane_pages(obj,
120430424ebaSRodrigo Vivi 						   rem_info->plane[color_plane].offset,
120530424ebaSRodrigo Vivi 						   alignment_pad,
120630424ebaSRodrigo Vivi 						   rem_info->plane[color_plane].width,
120730424ebaSRodrigo Vivi 						   rem_info->plane[color_plane].height,
120830424ebaSRodrigo Vivi 						   rem_info->plane[color_plane].src_stride,
120930424ebaSRodrigo Vivi 						   rem_info->plane[color_plane].dst_stride,
121030424ebaSRodrigo Vivi 						   st, sg,
121130424ebaSRodrigo Vivi 						   gtt_offset);
121230424ebaSRodrigo Vivi 
12130b4d1f0eSMaarten Lankhorst 	return sg;
12140b4d1f0eSMaarten Lankhorst }
12150b4d1f0eSMaarten Lankhorst 
12160b4d1f0eSMaarten Lankhorst static noinline struct sg_table *
intel_remap_pages(struct intel_remapped_info * rem_info,struct drm_i915_gem_object * obj)12170b4d1f0eSMaarten Lankhorst intel_remap_pages(struct intel_remapped_info *rem_info,
12180b4d1f0eSMaarten Lankhorst 		  struct drm_i915_gem_object *obj)
12190b4d1f0eSMaarten Lankhorst {
12200b4d1f0eSMaarten Lankhorst 	unsigned int size = intel_remapped_info_size(rem_info);
12210b4d1f0eSMaarten Lankhorst 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
12220b4d1f0eSMaarten Lankhorst 	struct sg_table *st;
12230b4d1f0eSMaarten Lankhorst 	struct scatterlist *sg;
12240b4d1f0eSMaarten Lankhorst 	unsigned int gtt_offset = 0;
12250b4d1f0eSMaarten Lankhorst 	int ret = -ENOMEM;
12260b4d1f0eSMaarten Lankhorst 	int i;
12270b4d1f0eSMaarten Lankhorst 
12280b4d1f0eSMaarten Lankhorst 	/* Allocate target SG list. */
12290b4d1f0eSMaarten Lankhorst 	st = kmalloc(sizeof(*st), GFP_KERNEL);
12300b4d1f0eSMaarten Lankhorst 	if (!st)
12310b4d1f0eSMaarten Lankhorst 		goto err_st_alloc;
12320b4d1f0eSMaarten Lankhorst 
12330b4d1f0eSMaarten Lankhorst 	ret = sg_alloc_table(st, size, GFP_KERNEL);
12340b4d1f0eSMaarten Lankhorst 	if (ret)
12350b4d1f0eSMaarten Lankhorst 		goto err_sg_alloc;
12360b4d1f0eSMaarten Lankhorst 
12370b4d1f0eSMaarten Lankhorst 	st->nents = 0;
12380b4d1f0eSMaarten Lankhorst 	sg = st->sgl;
12390b4d1f0eSMaarten Lankhorst 
124030424ebaSRodrigo Vivi 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
124130424ebaSRodrigo Vivi 		sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset);
12420b4d1f0eSMaarten Lankhorst 
12430b4d1f0eSMaarten Lankhorst 	i915_sg_trim(st);
12440b4d1f0eSMaarten Lankhorst 
12450b4d1f0eSMaarten Lankhorst 	return st;
12460b4d1f0eSMaarten Lankhorst 
12470b4d1f0eSMaarten Lankhorst err_sg_alloc:
12480b4d1f0eSMaarten Lankhorst 	kfree(st);
12490b4d1f0eSMaarten Lankhorst err_st_alloc:
12500b4d1f0eSMaarten Lankhorst 
12510b4d1f0eSMaarten Lankhorst 	drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
12520b4d1f0eSMaarten Lankhorst 		obj->base.size, rem_info->plane[0].width,
12530b4d1f0eSMaarten Lankhorst 		rem_info->plane[0].height, size);
12540b4d1f0eSMaarten Lankhorst 
12550b4d1f0eSMaarten Lankhorst 	return ERR_PTR(ret);
12560b4d1f0eSMaarten Lankhorst }
12570b4d1f0eSMaarten Lankhorst 
12580b4d1f0eSMaarten Lankhorst static noinline struct sg_table *
intel_partial_pages(const struct i915_gtt_view * view,struct drm_i915_gem_object * obj)12593bb6a442SNiranjana Vishwanathapura intel_partial_pages(const struct i915_gtt_view *view,
12600b4d1f0eSMaarten Lankhorst 		    struct drm_i915_gem_object *obj)
12610b4d1f0eSMaarten Lankhorst {
12620b4d1f0eSMaarten Lankhorst 	struct sg_table *st;
126330424ebaSRodrigo Vivi 	struct scatterlist *sg;
12640b4d1f0eSMaarten Lankhorst 	unsigned int count = view->partial.size;
12650b4d1f0eSMaarten Lankhorst 	int ret = -ENOMEM;
12660b4d1f0eSMaarten Lankhorst 
12670b4d1f0eSMaarten Lankhorst 	st = kmalloc(sizeof(*st), GFP_KERNEL);
12680b4d1f0eSMaarten Lankhorst 	if (!st)
12690b4d1f0eSMaarten Lankhorst 		goto err_st_alloc;
12700b4d1f0eSMaarten Lankhorst 
12710b4d1f0eSMaarten Lankhorst 	ret = sg_alloc_table(st, count, GFP_KERNEL);
12720b4d1f0eSMaarten Lankhorst 	if (ret)
12730b4d1f0eSMaarten Lankhorst 		goto err_sg_alloc;
12740b4d1f0eSMaarten Lankhorst 
12750b4d1f0eSMaarten Lankhorst 	st->nents = 0;
12760b4d1f0eSMaarten Lankhorst 
127730424ebaSRodrigo Vivi 	sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
12780b4d1f0eSMaarten Lankhorst 
12790b4d1f0eSMaarten Lankhorst 	sg_mark_end(sg);
12800b4d1f0eSMaarten Lankhorst 	i915_sg_trim(st); /* Drop any unused tail entries. */
12810b4d1f0eSMaarten Lankhorst 
12820b4d1f0eSMaarten Lankhorst 	return st;
12830b4d1f0eSMaarten Lankhorst 
12840b4d1f0eSMaarten Lankhorst err_sg_alloc:
12850b4d1f0eSMaarten Lankhorst 	kfree(st);
12860b4d1f0eSMaarten Lankhorst err_st_alloc:
12870b4d1f0eSMaarten Lankhorst 	return ERR_PTR(ret);
12880b4d1f0eSMaarten Lankhorst }
12890b4d1f0eSMaarten Lankhorst 
12900b4d1f0eSMaarten Lankhorst static int
__i915_vma_get_pages(struct i915_vma * vma)12910b4d1f0eSMaarten Lankhorst __i915_vma_get_pages(struct i915_vma *vma)
12920b4d1f0eSMaarten Lankhorst {
12930b4d1f0eSMaarten Lankhorst 	struct sg_table *pages;
12940b4d1f0eSMaarten Lankhorst 
12950b4d1f0eSMaarten Lankhorst 	/*
12960b4d1f0eSMaarten Lankhorst 	 * The vma->pages are only valid within the lifespan of the borrowed
12970b4d1f0eSMaarten Lankhorst 	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
12980b4d1f0eSMaarten Lankhorst 	 * must be the vma->pages. A simple rule is that vma->pages must only
12990b4d1f0eSMaarten Lankhorst 	 * be accessed when the obj->mm.pages are pinned.
13000b4d1f0eSMaarten Lankhorst 	 */
13010b4d1f0eSMaarten Lankhorst 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
13020b4d1f0eSMaarten Lankhorst 
13033bb6a442SNiranjana Vishwanathapura 	switch (vma->gtt_view.type) {
13040b4d1f0eSMaarten Lankhorst 	default:
13053bb6a442SNiranjana Vishwanathapura 		GEM_BUG_ON(vma->gtt_view.type);
13060b4d1f0eSMaarten Lankhorst 		fallthrough;
13073bb6a442SNiranjana Vishwanathapura 	case I915_GTT_VIEW_NORMAL:
13080b4d1f0eSMaarten Lankhorst 		pages = vma->obj->mm.pages;
13090b4d1f0eSMaarten Lankhorst 		break;
13100b4d1f0eSMaarten Lankhorst 
13113bb6a442SNiranjana Vishwanathapura 	case I915_GTT_VIEW_ROTATED:
13120b4d1f0eSMaarten Lankhorst 		pages =
13133bb6a442SNiranjana Vishwanathapura 			intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
13140b4d1f0eSMaarten Lankhorst 		break;
13150b4d1f0eSMaarten Lankhorst 
13163bb6a442SNiranjana Vishwanathapura 	case I915_GTT_VIEW_REMAPPED:
13170b4d1f0eSMaarten Lankhorst 		pages =
13183bb6a442SNiranjana Vishwanathapura 			intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
13190b4d1f0eSMaarten Lankhorst 		break;
13200b4d1f0eSMaarten Lankhorst 
13213bb6a442SNiranjana Vishwanathapura 	case I915_GTT_VIEW_PARTIAL:
13223bb6a442SNiranjana Vishwanathapura 		pages = intel_partial_pages(&vma->gtt_view, vma->obj);
13230b4d1f0eSMaarten Lankhorst 		break;
13240b4d1f0eSMaarten Lankhorst 	}
13250b4d1f0eSMaarten Lankhorst 
13260b4d1f0eSMaarten Lankhorst 	if (IS_ERR(pages)) {
13270b4d1f0eSMaarten Lankhorst 		drm_err(&vma->vm->i915->drm,
1328294996a9SMaarten Lankhorst 			"Failed to get pages for VMA view type %u (%ld)!\n",
13293bb6a442SNiranjana Vishwanathapura 			vma->gtt_view.type, PTR_ERR(pages));
1330294996a9SMaarten Lankhorst 		return PTR_ERR(pages);
13310b4d1f0eSMaarten Lankhorst 	}
13320b4d1f0eSMaarten Lankhorst 
13330b4d1f0eSMaarten Lankhorst 	vma->pages = pages;
13340b4d1f0eSMaarten Lankhorst 
1335294996a9SMaarten Lankhorst 	return 0;
13360b4d1f0eSMaarten Lankhorst }
13370b4d1f0eSMaarten Lankhorst 
i915_vma_get_pages(struct i915_vma * vma)13380b4d1f0eSMaarten Lankhorst I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
13390b4d1f0eSMaarten Lankhorst {
13400b4d1f0eSMaarten Lankhorst 	int err;
1341d36caeeaSChris Wilson 
13422850748eSChris Wilson 	if (atomic_add_unless(&vma->pages_count, 1, 0))
1343b42fe9caSJoonas Lahtinen 		return 0;
1344b42fe9caSJoonas Lahtinen 
13452850748eSChris Wilson 	err = i915_gem_object_pin_pages(vma->obj);
13462850748eSChris Wilson 	if (err)
13470f4308d5SThomas Hellström 		return err;
13482850748eSChris Wilson 
13490b4d1f0eSMaarten Lankhorst 	err = __i915_vma_get_pages(vma);
13500f4308d5SThomas Hellström 	if (err)
13510b4d1f0eSMaarten Lankhorst 		goto err_unpin;
13520b4d1f0eSMaarten Lankhorst 
13530b4d1f0eSMaarten Lankhorst 	vma->page_sizes = vma->obj->mm.page_sizes;
13542850748eSChris Wilson 	atomic_inc(&vma->pages_count);
13552850748eSChris Wilson 
13560b4d1f0eSMaarten Lankhorst 	return 0;
13570b4d1f0eSMaarten Lankhorst 
13580b4d1f0eSMaarten Lankhorst err_unpin:
13590f4308d5SThomas Hellström 	__i915_gem_object_unpin_pages(vma->obj);
13602850748eSChris Wilson 
13612850748eSChris Wilson 	return err;
13622850748eSChris Wilson }
13632850748eSChris Wilson 
vma_invalidate_tlb(struct i915_address_space * vm,u32 * tlb)13643d037d99SMauro Carvalho Chehab void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
13655d36acb7SChris Wilson {
1366d6c531abSChris Wilson 	struct intel_gt *gt;
1367d6c531abSChris Wilson 	int id;
1368d6c531abSChris Wilson 
1369d6c531abSChris Wilson 	if (!tlb)
1370d6c531abSChris Wilson 		return;
1371d6c531abSChris Wilson 
13725d36acb7SChris Wilson 	/*
13735d36acb7SChris Wilson 	 * Before we release the pages that were bound by this vma, we
13745d36acb7SChris Wilson 	 * must invalidate all the TLBs that may still have a reference
13755d36acb7SChris Wilson 	 * back to our physical address. It only needs to be done once,
13765d36acb7SChris Wilson 	 * so after updating the PTE to point away from the pages, record
13775d36acb7SChris Wilson 	 * the most recent TLB invalidation seqno, and if we have not yet
13785d36acb7SChris Wilson 	 * flushed the TLBs upon release, perform a full invalidation.
13795d36acb7SChris Wilson 	 */
1380d6c531abSChris Wilson 	for_each_gt(gt, vm->i915, id)
1381d6c531abSChris Wilson 		WRITE_ONCE(tlb[id],
1382f2ac6402SAlan Previn 			   intel_gt_next_invalidate_tlb_full(gt));
13835d36acb7SChris Wilson }
13845d36acb7SChris Wilson 
__vma_put_pages(struct i915_vma * vma,unsigned int count)13852850748eSChris Wilson static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
13862850748eSChris Wilson {
13872850748eSChris Wilson 	/* We allocate under vma_get_pages, so beware the shrinker */
13882850748eSChris Wilson 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
13890b4d1f0eSMaarten Lankhorst 
13902850748eSChris Wilson 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
1391294996a9SMaarten Lankhorst 		if (vma->pages != vma->obj->mm.pages) {
1392294996a9SMaarten Lankhorst 			sg_free_table(vma->pages);
1393294996a9SMaarten Lankhorst 			kfree(vma->pages);
13940b4d1f0eSMaarten Lankhorst 		}
1395294996a9SMaarten Lankhorst 		vma->pages = NULL;
1396e6e1a304SMaarten Lankhorst 
13972850748eSChris Wilson 		i915_gem_object_unpin_pages(vma->obj);
13982850748eSChris Wilson 	}
13992850748eSChris Wilson }
14002850748eSChris Wilson 
i915_vma_put_pages(struct i915_vma * vma)14010b4d1f0eSMaarten Lankhorst I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
14022850748eSChris Wilson {
14032850748eSChris Wilson 	if (atomic_add_unless(&vma->pages_count, -1, 1))
14042850748eSChris Wilson 		return;
14052850748eSChris Wilson 
14062850748eSChris Wilson 	__vma_put_pages(vma, 1);
14072850748eSChris Wilson }
14082850748eSChris Wilson 
vma_unbind_pages(struct i915_vma * vma)14092850748eSChris Wilson static void vma_unbind_pages(struct i915_vma *vma)
14102850748eSChris Wilson {
14112850748eSChris Wilson 	unsigned int count;
14122850748eSChris Wilson 
14132850748eSChris Wilson 	lockdep_assert_held(&vma->vm->mutex);
14142850748eSChris Wilson 
14152850748eSChris Wilson 	/* The upper portion of pages_count is the number of bindings */
14162850748eSChris Wilson 	count = atomic_read(&vma->pages_count);
14172850748eSChris Wilson 	count >>= I915_VMA_PAGES_BIAS;
14182850748eSChris Wilson 	GEM_BUG_ON(!count);
14192850748eSChris Wilson 
14202850748eSChris Wilson 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
14212850748eSChris Wilson }
14222850748eSChris Wilson 
i915_vma_pin_ww(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)142347b08693SMaarten Lankhorst int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
142447b08693SMaarten Lankhorst 		    u64 size, u64 alignment, u64 flags)
14252850748eSChris Wilson {
14262850748eSChris Wilson 	struct i915_vma_work *work = NULL;
1427f6c466b8SMaarten Lankhorst 	struct dma_fence *moving = NULL;
1428e1a4bbb6SThomas Hellström 	struct i915_vma_resource *vma_res = NULL;
14295e3eb862SJanusz Krzysztofik 	intel_wakeref_t wakeref;
14302850748eSChris Wilson 	unsigned int bound;
14312850748eSChris Wilson 	int err;
14322850748eSChris Wilson 
14331eef0de1SMaarten Lankhorst 	assert_vma_held(vma);
14340b4d1f0eSMaarten Lankhorst 	GEM_BUG_ON(!ww);
143547b08693SMaarten Lankhorst 
14362850748eSChris Wilson 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
14372850748eSChris Wilson 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
14382850748eSChris Wilson 
14392850748eSChris Wilson 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
14402850748eSChris Wilson 
14412850748eSChris Wilson 	/* First try and grab the pin without rebinding the vma */
1442b5cfe6f7SMaarten Lankhorst 	if (try_qad_pin(vma, flags))
14432850748eSChris Wilson 		return 0;
14442850748eSChris Wilson 
14450b4d1f0eSMaarten Lankhorst 	err = i915_vma_get_pages(vma);
14462850748eSChris Wilson 	if (err)
14472850748eSChris Wilson 		return err;
14482850748eSChris Wilson 
14495e3eb862SJanusz Krzysztofik 	/*
14505e3eb862SJanusz Krzysztofik 	 * In case of a global GTT, we must hold a runtime-pm wakeref
14515e3eb862SJanusz Krzysztofik 	 * while global PTEs are updated.  In other cases, we hold
14525e3eb862SJanusz Krzysztofik 	 * the rpm reference while the VMA is active.  Since runtime
14535e3eb862SJanusz Krzysztofik 	 * resume may require allocations, which are forbidden inside
14545e3eb862SJanusz Krzysztofik 	 * vm->mutex, get the first rpm wakeref outside of the mutex.
14555e3eb862SJanusz Krzysztofik 	 */
145689351925SChris Wilson 	wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
145789351925SChris Wilson 
1458833124a0SMatthew Auld 	if (flags & vma->vm->bind_async_flags) {
145926ad4f8bSMaarten Lankhorst 		/* lock VM */
146026ad4f8bSMaarten Lankhorst 		err = i915_vm_lock_objects(vma->vm, ww);
146126ad4f8bSMaarten Lankhorst 		if (err)
146226ad4f8bSMaarten Lankhorst 			goto err_rpm;
146326ad4f8bSMaarten Lankhorst 
14642850748eSChris Wilson 		work = i915_vma_work();
14652850748eSChris Wilson 		if (!work) {
14662850748eSChris Wilson 			err = -ENOMEM;
146789351925SChris Wilson 			goto err_rpm;
14682850748eSChris Wilson 		}
1469cd0452aaSChris Wilson 
1470e1a7ab4fSThomas Hellström 		work->vm = vma->vm;
1471833124a0SMatthew Auld 
14728ec5c000STvrtko Ursulin 		err = i915_gem_object_get_moving_fence(vma->obj, &moving);
14738ec5c000STvrtko Ursulin 		if (err)
14748ec5c000STvrtko Ursulin 			goto err_rpm;
1475cd0452aaSChris Wilson 
1476f6c466b8SMaarten Lankhorst 		dma_fence_work_chain(&work->base, moving);
1477f6c466b8SMaarten Lankhorst 
1478cd0452aaSChris Wilson 		/* Allocate enough page directories to used PTE */
147989351925SChris Wilson 		if (vma->vm->allocate_va_range) {
1480cef8ce55SMatthew Auld 			err = i915_vm_alloc_pt_stash(vma->vm,
1481cd0452aaSChris Wilson 						     &work->stash,
1482cd0452aaSChris Wilson 						     vma->size);
1483cef8ce55SMatthew Auld 			if (err)
1484cef8ce55SMatthew Auld 				goto err_fence;
14852850748eSChris Wilson 
1486529b9ec8SMatthew Auld 			err = i915_vm_map_pt_stash(vma->vm, &work->stash);
148789351925SChris Wilson 			if (err)
148889351925SChris Wilson 				goto err_fence;
148989351925SChris Wilson 		}
149089351925SChris Wilson 	}
1491c0e60347SChris Wilson 
1492e1a4bbb6SThomas Hellström 	vma_res = i915_vma_resource_alloc();
1493e1a4bbb6SThomas Hellström 	if (IS_ERR(vma_res)) {
1494e1a4bbb6SThomas Hellström 		err = PTR_ERR(vma_res);
1495e1a4bbb6SThomas Hellström 		goto err_fence;
1496e1a4bbb6SThomas Hellström 	}
1497e1a4bbb6SThomas Hellström 
1498d0024911SChris Wilson 	/*
1499d0024911SChris Wilson 	 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1500d0024911SChris Wilson 	 *
1501d0024911SChris Wilson 	 * We conflate the Global GTT with the user's vma when using the
1502d0024911SChris Wilson 	 * aliasing-ppgtt, but it is still vitally important to try and
1503d0024911SChris Wilson 	 * keep the use cases distinct. For example, userptr objects are
1504d0024911SChris Wilson 	 * not allowed inside the Global GTT as that will cause lock
1505d0024911SChris Wilson 	 * inversions when we have to evict them the mmu_notifier callbacks -
1506d0024911SChris Wilson 	 * but they are allowed to be part of the user ppGTT which can never
1507d0024911SChris Wilson 	 * be mapped. As such we try to give the distinct users of the same
1508d0024911SChris Wilson 	 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1509d0024911SChris Wilson 	 * and i915_ppgtt separate].
1510d0024911SChris Wilson 	 *
1511d0024911SChris Wilson 	 * NB this may cause us to mask real lock inversions -- while the
1512d0024911SChris Wilson 	 * code is safe today, lockdep may not be able to spot future
1513d0024911SChris Wilson 	 * transgressions.
1514d0024911SChris Wilson 	 */
1515d0024911SChris Wilson 	err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1516d0024911SChris Wilson 					      !(flags & PIN_GLOBAL));
15172850748eSChris Wilson 	if (err)
1518e1a4bbb6SThomas Hellström 		goto err_vma_res;
15192850748eSChris Wilson 
1520d0024911SChris Wilson 	/* No more allocations allowed now we hold vm->mutex */
1521d0024911SChris Wilson 
152200de702cSChris Wilson 	if (unlikely(i915_vma_is_closed(vma))) {
152300de702cSChris Wilson 		err = -ENOENT;
152400de702cSChris Wilson 		goto err_unlock;
152500de702cSChris Wilson 	}
152600de702cSChris Wilson 
15272850748eSChris Wilson 	bound = atomic_read(&vma->flags);
15282850748eSChris Wilson 	if (unlikely(bound & I915_VMA_ERROR)) {
15292850748eSChris Wilson 		err = -ENOMEM;
15302850748eSChris Wilson 		goto err_unlock;
15312850748eSChris Wilson 	}
15322850748eSChris Wilson 
15332850748eSChris Wilson 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
15342850748eSChris Wilson 		err = -EAGAIN; /* pins are meant to be fairly temporary */
15352850748eSChris Wilson 		goto err_unlock;
15362850748eSChris Wilson 	}
15372850748eSChris Wilson 
15382850748eSChris Wilson 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1539b5cfe6f7SMaarten Lankhorst 		if (!(flags & PIN_VALIDATE))
15402850748eSChris Wilson 			__i915_vma_pin(vma);
15412850748eSChris Wilson 		goto err_unlock;
15422850748eSChris Wilson 	}
15432850748eSChris Wilson 
15442850748eSChris Wilson 	err = i915_active_acquire(&vma->active);
15452850748eSChris Wilson 	if (err)
15462850748eSChris Wilson 		goto err_unlock;
15472850748eSChris Wilson 
15482850748eSChris Wilson 	if (!(bound & I915_VMA_BIND_MASK)) {
15497e00897bSMaarten Lankhorst 		err = i915_vma_insert(vma, ww, size, alignment, flags);
15502850748eSChris Wilson 		if (err)
15512850748eSChris Wilson 			goto err_active;
15522850748eSChris Wilson 
15532850748eSChris Wilson 		if (i915_is_ggtt(vma->vm))
15542850748eSChris Wilson 			__i915_vma_set_map_and_fenceable(vma);
15552850748eSChris Wilson 	}
15562850748eSChris Wilson 
15572850748eSChris Wilson 	GEM_BUG_ON(!vma->pages);
15582850748eSChris Wilson 	err = i915_vma_bind(vma,
15599275277dSFei Yang 			    vma->obj->pat_index,
1560e1a4bbb6SThomas Hellström 			    flags, work, vma_res);
1561e1a4bbb6SThomas Hellström 	vma_res = NULL;
15622850748eSChris Wilson 	if (err)
15632850748eSChris Wilson 		goto err_remove;
15642850748eSChris Wilson 
15652850748eSChris Wilson 	/* There should only be at most 2 active bindings (user, global) */
15662850748eSChris Wilson 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
15672850748eSChris Wilson 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
15682850748eSChris Wilson 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
15692850748eSChris Wilson 
1570b5cfe6f7SMaarten Lankhorst 	if (!(flags & PIN_VALIDATE)) {
15712850748eSChris Wilson 		__i915_vma_pin(vma);
15722850748eSChris Wilson 		GEM_BUG_ON(!i915_vma_is_pinned(vma));
1573b5cfe6f7SMaarten Lankhorst 	}
15742850748eSChris Wilson 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
15752850748eSChris Wilson 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
15762850748eSChris Wilson 
15772850748eSChris Wilson err_remove:
1578dde01d94SChris Wilson 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1579dde01d94SChris Wilson 		i915_vma_detach(vma);
1580dde01d94SChris Wilson 		drm_mm_remove_node(&vma->node);
1581dde01d94SChris Wilson 	}
15822850748eSChris Wilson err_active:
15832850748eSChris Wilson 	i915_active_release(&vma->active);
15842850748eSChris Wilson err_unlock:
15852850748eSChris Wilson 	mutex_unlock(&vma->vm->mutex);
1586e1a4bbb6SThomas Hellström err_vma_res:
15878f4f9a3bSThomas Hellström 	i915_vma_resource_free(vma_res);
15882850748eSChris Wilson err_fence:
15892850748eSChris Wilson 	if (work)
159092581f9fSChris Wilson 		dma_fence_work_commit_imm(&work->base);
159189351925SChris Wilson err_rpm:
1592c0e60347SChris Wilson 	intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
15930b4d1f0eSMaarten Lankhorst 
1594f6c466b8SMaarten Lankhorst 	if (moving)
1595f6c466b8SMaarten Lankhorst 		dma_fence_put(moving);
1596f6c466b8SMaarten Lankhorst 
15970b4d1f0eSMaarten Lankhorst 	i915_vma_put_pages(vma);
15982850748eSChris Wilson 	return err;
1599b42fe9caSJoonas Lahtinen }
1600b42fe9caSJoonas Lahtinen 
flush_idle_contexts(struct intel_gt * gt)1601ccd20945SChris Wilson static void flush_idle_contexts(struct intel_gt *gt)
1602ccd20945SChris Wilson {
1603ccd20945SChris Wilson 	struct intel_engine_cs *engine;
1604ccd20945SChris Wilson 	enum intel_engine_id id;
1605ccd20945SChris Wilson 
1606ccd20945SChris Wilson 	for_each_engine(engine, gt, id)
1607ccd20945SChris Wilson 		intel_engine_flush_barriers(engine);
1608ccd20945SChris Wilson 
1609ccd20945SChris Wilson 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1610ccd20945SChris Wilson }
1611ccd20945SChris Wilson 
__i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)16122abb6195SMaarten Lankhorst static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
161347b08693SMaarten Lankhorst 			   u32 align, unsigned int flags)
1614ccd20945SChris Wilson {
1615ccd20945SChris Wilson 	struct i915_address_space *vm = vma->vm;
16160f857158SAravind Iddamsetty 	struct intel_gt *gt;
16170f857158SAravind Iddamsetty 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1618ccd20945SChris Wilson 	int err;
1619ccd20945SChris Wilson 
1620ccd20945SChris Wilson 	do {
162147b08693SMaarten Lankhorst 		err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
16222abb6195SMaarten Lankhorst 
1623e3793468SChris Wilson 		if (err != -ENOSPC) {
1624e3793468SChris Wilson 			if (!err) {
1625e3793468SChris Wilson 				err = i915_vma_wait_for_bind(vma);
1626e3793468SChris Wilson 				if (err)
1627e3793468SChris Wilson 					i915_vma_unpin(vma);
1628e3793468SChris Wilson 			}
1629ccd20945SChris Wilson 			return err;
1630e3793468SChris Wilson 		}
1631ccd20945SChris Wilson 
1632ccd20945SChris Wilson 		/* Unlike i915_vma_pin, we don't take no for an answer! */
16330f857158SAravind Iddamsetty 		list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
16340f857158SAravind Iddamsetty 			flush_idle_contexts(gt);
1635ccd20945SChris Wilson 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
16366945c53bSMaarten Lankhorst 			/*
16376945c53bSMaarten Lankhorst 			 * We pass NULL ww here, as we don't want to unbind
16386945c53bSMaarten Lankhorst 			 * locked objects when called from execbuf when pinning
16396945c53bSMaarten Lankhorst 			 * is removed. This would probably regress badly.
16406945c53bSMaarten Lankhorst 			 */
1641801fa7a8SMatthew Auld 			i915_gem_evict_vm(vm, NULL, NULL);
1642ccd20945SChris Wilson 			mutex_unlock(&vm->mutex);
1643ccd20945SChris Wilson 		}
1644ccd20945SChris Wilson 	} while (1);
1645ccd20945SChris Wilson }
1646ccd20945SChris Wilson 
i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)16472abb6195SMaarten Lankhorst int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
16482abb6195SMaarten Lankhorst 		  u32 align, unsigned int flags)
16492abb6195SMaarten Lankhorst {
16502abb6195SMaarten Lankhorst 	struct i915_gem_ww_ctx _ww;
16512abb6195SMaarten Lankhorst 	int err;
16522abb6195SMaarten Lankhorst 
16532abb6195SMaarten Lankhorst 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
16542abb6195SMaarten Lankhorst 
16552abb6195SMaarten Lankhorst 	if (ww)
16562abb6195SMaarten Lankhorst 		return __i915_ggtt_pin(vma, ww, align, flags);
16572abb6195SMaarten Lankhorst 
16580de2cc0eSTvrtko Ursulin 	lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
16592abb6195SMaarten Lankhorst 
16602abb6195SMaarten Lankhorst 	for_i915_gem_ww(&_ww, err, true) {
16612abb6195SMaarten Lankhorst 		err = i915_gem_object_lock(vma->obj, &_ww);
16622abb6195SMaarten Lankhorst 		if (!err)
16632abb6195SMaarten Lankhorst 			err = __i915_ggtt_pin(vma, &_ww, align, flags);
16642abb6195SMaarten Lankhorst 	}
16652abb6195SMaarten Lankhorst 
16662abb6195SMaarten Lankhorst 	return err;
16672abb6195SMaarten Lankhorst }
16682abb6195SMaarten Lankhorst 
1669ddd33ff1SJouni Högander /**
1670ddd33ff1SJouni Högander  * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
1671ddd33ff1SJouni Högander  * @obj: i915 GEM object
1672ddd33ff1SJouni Högander  * This function clears scanout flags for objects ggtt vmas. These flags are set
1673ddd33ff1SJouni Högander  * when object is pinned for display use and this function to clear them all is
1674ddd33ff1SJouni Högander  * targeted to be called by frontbuffer tracking code when the frontbuffer is
1675ddd33ff1SJouni Högander  * about to be released.
1676ddd33ff1SJouni Högander  */
i915_ggtt_clear_scanout(struct drm_i915_gem_object * obj)1677ddd33ff1SJouni Högander void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
1678ddd33ff1SJouni Högander {
1679ddd33ff1SJouni Högander 	struct i915_vma *vma;
1680ddd33ff1SJouni Högander 
1681ddd33ff1SJouni Högander 	spin_lock(&obj->vma.lock);
1682ddd33ff1SJouni Högander 	for_each_ggtt_vma(vma, obj) {
1683ddd33ff1SJouni Högander 		i915_vma_clear_scanout(vma);
1684ddd33ff1SJouni Högander 		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1685ddd33ff1SJouni Högander 	}
1686ddd33ff1SJouni Högander 	spin_unlock(&obj->vma.lock);
1687ddd33ff1SJouni Högander }
1688ddd33ff1SJouni Högander 
__vma_close(struct i915_vma * vma,struct intel_gt * gt)168950689771SChris Wilson static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
16903365e226SChris Wilson {
16913365e226SChris Wilson 	/*
16923365e226SChris Wilson 	 * We defer actually closing, unbinding and destroying the VMA until
16933365e226SChris Wilson 	 * the next idle point, or if the object is freed in the meantime. By
16943365e226SChris Wilson 	 * postponing the unbind, we allow for it to be resurrected by the
16953365e226SChris Wilson 	 * client, avoiding the work required to rebind the VMA. This is
16963365e226SChris Wilson 	 * advantageous for DRI, where the client/server pass objects
16973365e226SChris Wilson 	 * between themselves, temporarily opening a local VMA to the
16983365e226SChris Wilson 	 * object, and then closing it again. The same object is then reused
16993365e226SChris Wilson 	 * on the next frame (or two, depending on the depth of the swap queue)
17003365e226SChris Wilson 	 * causing us to rebind the VMA once more. This ends up being a lot
17013365e226SChris Wilson 	 * of wasted work for the steady state.
17023365e226SChris Wilson 	 */
170350689771SChris Wilson 	GEM_BUG_ON(i915_vma_is_closed(vma));
170471e51ca8SChris Wilson 	list_add(&vma->closed_link, &gt->closed_vma);
170550689771SChris Wilson }
170650689771SChris Wilson 
i915_vma_close(struct i915_vma * vma)170750689771SChris Wilson void i915_vma_close(struct i915_vma *vma)
170850689771SChris Wilson {
170950689771SChris Wilson 	struct intel_gt *gt = vma->vm->gt;
171050689771SChris Wilson 	unsigned long flags;
171150689771SChris Wilson 
171250689771SChris Wilson 	if (i915_vma_is_ggtt(vma))
171350689771SChris Wilson 		return;
171450689771SChris Wilson 
171550689771SChris Wilson 	GEM_BUG_ON(!atomic_read(&vma->open_count));
171650689771SChris Wilson 	if (atomic_dec_and_lock_irqsave(&vma->open_count,
171750689771SChris Wilson 					&gt->closed_lock,
171850689771SChris Wilson 					flags)) {
171950689771SChris Wilson 		__vma_close(vma, gt);
172071e51ca8SChris Wilson 		spin_unlock_irqrestore(&gt->closed_lock, flags);
1721155ab883SChris Wilson 	}
172250689771SChris Wilson }
1723155ab883SChris Wilson 
__i915_vma_remove_closed(struct i915_vma * vma)1724155ab883SChris Wilson static void __i915_vma_remove_closed(struct i915_vma *vma)
1725155ab883SChris Wilson {
1726155ab883SChris Wilson 	list_del_init(&vma->closed_link);
17273365e226SChris Wilson }
17283365e226SChris Wilson 
i915_vma_reopen(struct i915_vma * vma)17293365e226SChris Wilson void i915_vma_reopen(struct i915_vma *vma)
17303365e226SChris Wilson {
17311df1c79cSKarol Herbst 	struct intel_gt *gt = vma->vm->gt;
17321df1c79cSKarol Herbst 
17331df1c79cSKarol Herbst 	spin_lock_irq(&gt->closed_lock);
17342850748eSChris Wilson 	if (i915_vma_is_closed(vma))
1735155ab883SChris Wilson 		__i915_vma_remove_closed(vma);
17361df1c79cSKarol Herbst 	spin_unlock_irq(&gt->closed_lock);
17373365e226SChris Wilson }
17383365e226SChris Wilson 
force_unbind(struct i915_vma * vma)1739c03d9826SThomas Hellström static void force_unbind(struct i915_vma *vma)
1740c03d9826SThomas Hellström {
1741c03d9826SThomas Hellström 	if (!drm_mm_node_allocated(&vma->node))
1742c03d9826SThomas Hellström 		return;
1743c03d9826SThomas Hellström 
17442850748eSChris Wilson 	atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
17452850748eSChris Wilson 	WARN_ON(__i915_vma_unbind(vma));
1746b290a78bSChris Wilson 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
17472850748eSChris Wilson }
1748c03d9826SThomas Hellström 
release_references(struct i915_vma * vma,struct intel_gt * gt,bool vm_ddestroy)17491926a6b7SThomas Hellström static void release_references(struct i915_vma *vma, struct intel_gt *gt,
17501926a6b7SThomas Hellström 			       bool vm_ddestroy)
1751c03d9826SThomas Hellström {
1752c03d9826SThomas Hellström 	struct drm_i915_gem_object *obj = vma->obj;
1753c03d9826SThomas Hellström 
17542850748eSChris Wilson 	GEM_BUG_ON(i915_vma_is_active(vma));
1755b42fe9caSJoonas Lahtinen 
1756528cbd17SChris Wilson 	spin_lock(&obj->vma.lock);
1757528cbd17SChris Wilson 	list_del(&vma->obj_link);
175851dc276dSChris Wilson 	if (!RB_EMPTY_NODE(&vma->obj_node))
17592850748eSChris Wilson 		rb_erase(&vma->obj_node, &obj->vma.tree);
1760e1a7ab4fSThomas Hellström 
1761528cbd17SChris Wilson 	spin_unlock(&obj->vma.lock);
1762010e3e68SChris Wilson 
17631df1c79cSKarol Herbst 	spin_lock_irq(&gt->closed_lock);
1764155ab883SChris Wilson 	__i915_vma_remove_closed(vma);
17651df1c79cSKarol Herbst 	spin_unlock_irq(&gt->closed_lock);
17663365e226SChris Wilson 
1767e1a7ab4fSThomas Hellström 	if (vm_ddestroy)
1768e1a7ab4fSThomas Hellström 		i915_vm_resv_put(vma->vm);
1769e1a7ab4fSThomas Hellström 
17707a2280e8SNirmoy Das 	/* Wait for async active retire */
17717a2280e8SNirmoy Das 	i915_active_wait(&vma->active);
1772d9393973SThomas Hellström 	i915_active_fini(&vma->active);
1773d9393973SThomas Hellström 	GEM_WARN_ON(vma->resource);
1774d9393973SThomas Hellström 	i915_vma_free(vma);
1775c03d9826SThomas Hellström }
1776c03d9826SThomas Hellström 
1777a915450eSLee Jones /*
1778c03d9826SThomas Hellström  * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1779c03d9826SThomas Hellström  * the initial reference.
1780c03d9826SThomas Hellström  *
1781c03d9826SThomas Hellström  * This function should be called when it's decided the vma isn't needed
1782c03d9826SThomas Hellström  * anymore. The caller must assure that it doesn't race with another lookup
1783c03d9826SThomas Hellström  * plus destroy, typically by taking an appropriate reference.
1784c03d9826SThomas Hellström  *
1785c03d9826SThomas Hellström  * Current callsites are
1786c03d9826SThomas Hellström  * - __i915_gem_object_pages_fini()
1787c03d9826SThomas Hellström  * - __i915_vm_close() - Blocks the above function by taking a reference on
1788c03d9826SThomas Hellström  * the object.
1789e1a7ab4fSThomas Hellström  * - __i915_vma_parked() - Blocks the above functions by taking a reference
1790e1a7ab4fSThomas Hellström  * on the vm and a reference on the object. Also takes the object lock so
1791e1a7ab4fSThomas Hellström  * destruction from __i915_vma_parked() can be blocked by holding the
1792e1a7ab4fSThomas Hellström  * object lock. Since the object lock is only allowed from within i915 with
1793e1a7ab4fSThomas Hellström  * an object refcount, holding the object lock also implicitly blocks the
1794e1a7ab4fSThomas Hellström  * vma freeing from __i915_gem_object_pages_fini().
1795c03d9826SThomas Hellström  *
1796c03d9826SThomas Hellström  * Because of locks taken during destruction, a vma is also guaranteed to
1797c03d9826SThomas Hellström  * stay alive while the following locks are held if it was looked up while
1798c03d9826SThomas Hellström  * holding one of the locks:
1799c03d9826SThomas Hellström  * - vm->mutex
1800c03d9826SThomas Hellström  * - obj->vma.lock
1801c03d9826SThomas Hellström  * - gt->closed_lock
1802c03d9826SThomas Hellström  */
i915_vma_destroy_locked(struct i915_vma * vma)1803c03d9826SThomas Hellström void i915_vma_destroy_locked(struct i915_vma *vma)
1804c03d9826SThomas Hellström {
1805c03d9826SThomas Hellström 	lockdep_assert_held(&vma->vm->mutex);
1806c03d9826SThomas Hellström 
1807c03d9826SThomas Hellström 	force_unbind(vma);
1808e1a7ab4fSThomas Hellström 	list_del_init(&vma->vm_link);
18091926a6b7SThomas Hellström 	release_references(vma, vma->vm->gt, false);
1810c03d9826SThomas Hellström }
1811c03d9826SThomas Hellström 
i915_vma_destroy(struct i915_vma * vma)1812c03d9826SThomas Hellström void i915_vma_destroy(struct i915_vma *vma)
1813c03d9826SThomas Hellström {
18141926a6b7SThomas Hellström 	struct intel_gt *gt;
1815e1a7ab4fSThomas Hellström 	bool vm_ddestroy;
1816e1a7ab4fSThomas Hellström 
1817c03d9826SThomas Hellström 	mutex_lock(&vma->vm->mutex);
1818c03d9826SThomas Hellström 	force_unbind(vma);
1819e1a7ab4fSThomas Hellström 	list_del_init(&vma->vm_link);
1820e1a7ab4fSThomas Hellström 	vm_ddestroy = vma->vm_ddestroy;
1821e1a7ab4fSThomas Hellström 	vma->vm_ddestroy = false;
18221926a6b7SThomas Hellström 
18231926a6b7SThomas Hellström 	/* vma->vm may be freed when releasing vma->vm->mutex. */
18241926a6b7SThomas Hellström 	gt = vma->vm->gt;
1825c03d9826SThomas Hellström 	mutex_unlock(&vma->vm->mutex);
18261926a6b7SThomas Hellström 	release_references(vma, gt, vm_ddestroy);
18273365e226SChris Wilson }
18283365e226SChris Wilson 
i915_vma_parked(struct intel_gt * gt)182971e51ca8SChris Wilson void i915_vma_parked(struct intel_gt *gt)
18303365e226SChris Wilson {
18313365e226SChris Wilson 	struct i915_vma *vma, *next;
18323447c4c5SChris Wilson 	LIST_HEAD(closed);
18333365e226SChris Wilson 
183471e51ca8SChris Wilson 	spin_lock_irq(&gt->closed_lock);
183571e51ca8SChris Wilson 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
18362850748eSChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
18372850748eSChris Wilson 		struct i915_address_space *vm = vma->vm;
18382850748eSChris Wilson 
18392850748eSChris Wilson 		/* XXX All to avoid keeping a reference on i915_vma itself */
18402850748eSChris Wilson 
18412850748eSChris Wilson 		if (!kref_get_unless_zero(&obj->base.refcount))
18422850748eSChris Wilson 			continue;
18432850748eSChris Wilson 
1844e1a7ab4fSThomas Hellström 		if (!i915_vm_tryget(vm)) {
18452850748eSChris Wilson 			i915_gem_object_put(obj);
18463447c4c5SChris Wilson 			continue;
18472850748eSChris Wilson 		}
18482850748eSChris Wilson 
18493447c4c5SChris Wilson 		list_move(&vma->closed_link, &closed);
18503447c4c5SChris Wilson 	}
185171e51ca8SChris Wilson 	spin_unlock_irq(&gt->closed_lock);
18523365e226SChris Wilson 
18533447c4c5SChris Wilson 	/* As the GT is held idle, no vma can be reopened as we destroy them */
18543447c4c5SChris Wilson 	list_for_each_entry_safe(vma, next, &closed, closed_link) {
18553447c4c5SChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
18563447c4c5SChris Wilson 		struct i915_address_space *vm = vma->vm;
18573447c4c5SChris Wilson 
18580f341974SMaarten Lankhorst 		if (i915_gem_object_trylock(obj, NULL)) {
18593447c4c5SChris Wilson 			INIT_LIST_HEAD(&vma->closed_link);
1860c03d9826SThomas Hellström 			i915_vma_destroy(vma);
18610f341974SMaarten Lankhorst 			i915_gem_object_unlock(obj);
18620f341974SMaarten Lankhorst 		} else {
18630f341974SMaarten Lankhorst 			/* back you go.. */
18640f341974SMaarten Lankhorst 			spin_lock_irq(&gt->closed_lock);
18650f341974SMaarten Lankhorst 			list_add(&vma->closed_link, &gt->closed_vma);
18660f341974SMaarten Lankhorst 			spin_unlock_irq(&gt->closed_lock);
18670f341974SMaarten Lankhorst 		}
18683447c4c5SChris Wilson 
18692850748eSChris Wilson 		i915_gem_object_put(obj);
1870e1a7ab4fSThomas Hellström 		i915_vm_put(vm);
1871155ab883SChris Wilson 	}
1872b42fe9caSJoonas Lahtinen }
1873b42fe9caSJoonas Lahtinen 
__i915_vma_iounmap(struct i915_vma * vma)1874b42fe9caSJoonas Lahtinen static void __i915_vma_iounmap(struct i915_vma *vma)
1875b42fe9caSJoonas Lahtinen {
1876b42fe9caSJoonas Lahtinen 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1877b42fe9caSJoonas Lahtinen 
1878b42fe9caSJoonas Lahtinen 	if (vma->iomap == NULL)
1879b42fe9caSJoonas Lahtinen 		return;
1880b42fe9caSJoonas Lahtinen 
1881d976521aSCQ Tang 	if (page_unmask_bits(vma->iomap))
1882d976521aSCQ Tang 		__i915_gem_object_release_map(vma->obj);
1883d976521aSCQ Tang 	else
1884b42fe9caSJoonas Lahtinen 		io_mapping_unmap(vma->iomap);
1885b42fe9caSJoonas Lahtinen 	vma->iomap = NULL;
1886b42fe9caSJoonas Lahtinen }
1887b42fe9caSJoonas Lahtinen 
i915_vma_revoke_mmap(struct i915_vma * vma)1888a65adaf8SChris Wilson void i915_vma_revoke_mmap(struct i915_vma *vma)
1889a65adaf8SChris Wilson {
1890cc662126SAbdiel Janulgue 	struct drm_vma_offset_node *node;
1891a65adaf8SChris Wilson 	u64 vma_offset;
1892a65adaf8SChris Wilson 
1893a65adaf8SChris Wilson 	if (!i915_vma_has_userfault(vma))
1894a65adaf8SChris Wilson 		return;
1895a65adaf8SChris Wilson 
1896a65adaf8SChris Wilson 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1897a65adaf8SChris Wilson 	GEM_BUG_ON(!vma->obj->userfault_count);
1898a65adaf8SChris Wilson 
1899cc662126SAbdiel Janulgue 	node = &vma->mmo->vma_node;
19003bb6a442SNiranjana Vishwanathapura 	vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1901a65adaf8SChris Wilson 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1902a65adaf8SChris Wilson 			    drm_vma_node_offset_addr(node) + vma_offset,
1903a65adaf8SChris Wilson 			    vma->size,
1904a65adaf8SChris Wilson 			    1);
1905a65adaf8SChris Wilson 
1906a65adaf8SChris Wilson 	i915_vma_unset_userfault(vma);
1907a65adaf8SChris Wilson 	if (!--vma->obj->userfault_count)
1908a65adaf8SChris Wilson 		list_del(&vma->obj->userfault_link);
1909a65adaf8SChris Wilson }
1910a65adaf8SChris Wilson 
1911af5c6fcfSChris Wilson static int
__i915_request_await_bind(struct i915_request * rq,struct i915_vma * vma)1912af5c6fcfSChris Wilson __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1913af5c6fcfSChris Wilson {
1914af5c6fcfSChris Wilson 	return __i915_request_await_exclusive(rq, &vma->active);
1915af5c6fcfSChris Wilson }
1916af5c6fcfSChris Wilson 
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)1917ad5c99e0SMaarten Lankhorst static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
19182850748eSChris Wilson {
19192850748eSChris Wilson 	int err;
19202850748eSChris Wilson 
19212850748eSChris Wilson 	/* Wait for the vma to be bound before we start! */
1922af5c6fcfSChris Wilson 	err = __i915_request_await_bind(rq, vma);
19232850748eSChris Wilson 	if (err)
19242850748eSChris Wilson 		return err;
19252850748eSChris Wilson 
19262850748eSChris Wilson 	return i915_active_add_request(&vma->active, rq);
19272850748eSChris Wilson }
19282850748eSChris Wilson 
_i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,struct dma_fence * fence,unsigned int flags)1929544460c3SMatthew Brost int _i915_vma_move_to_active(struct i915_vma *vma,
1930e6bb1d7fSChris Wilson 			     struct i915_request *rq,
1931544460c3SMatthew Brost 			     struct dma_fence *fence,
1932e6bb1d7fSChris Wilson 			     unsigned int flags)
1933e6bb1d7fSChris Wilson {
1934e6bb1d7fSChris Wilson 	struct drm_i915_gem_object *obj = vma->obj;
1935a93615f9SChris Wilson 	int err;
1936e6bb1d7fSChris Wilson 
19376951e589SChris Wilson 	assert_object_held(obj);
1938e6bb1d7fSChris Wilson 
1939b5cfe6f7SMaarten Lankhorst 	GEM_BUG_ON(!vma->pages);
1940b5cfe6f7SMaarten Lankhorst 
19412a76fc89SAndrzej Hajda 	if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
19422a76fc89SAndrzej Hajda 		err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
19432a76fc89SAndrzej Hajda 		if (unlikely(err))
19442a76fc89SAndrzej Hajda 			return err;
19452a76fc89SAndrzej Hajda 	}
19462850748eSChris Wilson 	err = __i915_vma_move_to_active(vma, rq);
1947a93615f9SChris Wilson 	if (unlikely(err))
1948a93615f9SChris Wilson 		return err;
1949e6bb1d7fSChris Wilson 
1950420a07b8SNirmoy Das 	/*
1951420a07b8SNirmoy Das 	 * Reserve fences slot early to prevent an allocation after preparing
1952420a07b8SNirmoy Das 	 * the workload and associating fences with dma_resv.
1953420a07b8SNirmoy Das 	 */
1954420a07b8SNirmoy Das 	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
1955420a07b8SNirmoy Das 		struct dma_fence *curr;
1956420a07b8SNirmoy Das 		int idx;
1957420a07b8SNirmoy Das 
1958420a07b8SNirmoy Das 		dma_fence_array_for_each(curr, idx, fence)
1959420a07b8SNirmoy Das 			;
1960420a07b8SNirmoy Das 		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
1961420a07b8SNirmoy Das 		if (unlikely(err))
1962420a07b8SNirmoy Das 			return err;
1963420a07b8SNirmoy Das 	}
1964420a07b8SNirmoy Das 
1965e6bb1d7fSChris Wilson 	if (flags & EXEC_OBJECT_WRITE) {
1966da42104fSChris Wilson 		struct intel_frontbuffer *front;
1967da42104fSChris Wilson 
19687b574550SJouni Högander 		front = i915_gem_object_get_frontbuffer(obj);
1969da42104fSChris Wilson 		if (unlikely(front)) {
1970da42104fSChris Wilson 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1971da42104fSChris Wilson 				i915_active_add_request(&front->write, rq);
1972da42104fSChris Wilson 			intel_frontbuffer_put(front);
1973da42104fSChris Wilson 		}
1974c8d4c18bSChristian König 	}
1975c8d4c18bSChristian König 
1976544460c3SMatthew Brost 	if (fence) {
1977420a07b8SNirmoy Das 		struct dma_fence *curr;
1978420a07b8SNirmoy Das 		enum dma_resv_usage usage;
1979420a07b8SNirmoy Das 		int idx;
1980420a07b8SNirmoy Das 
1981420a07b8SNirmoy Das 		if (flags & EXEC_OBJECT_WRITE) {
1982420a07b8SNirmoy Das 			usage = DMA_RESV_USAGE_WRITE;
1983420a07b8SNirmoy Das 			obj->write_domain = I915_GEM_DOMAIN_RENDER;
198404f7eb3dSNirmoy Das 			obj->read_domains = 0;
1985cd2a4eafSChris Wilson 		} else {
1986420a07b8SNirmoy Das 			usage = DMA_RESV_USAGE_READ;
198704f7eb3dSNirmoy Das 			obj->write_domain = 0;
1988bfaae47dSMaarten Lankhorst 		}
1989cd2a4eafSChris Wilson 
1990420a07b8SNirmoy Das 		dma_fence_array_for_each(curr, idx, fence)
1991420a07b8SNirmoy Das 			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
1992544460c3SMatthew Brost 	}
199363baf4f3SChris Wilson 
199463baf4f3SChris Wilson 	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
199563baf4f3SChris Wilson 		i915_active_add_request(&vma->fence->active, rq);
199663baf4f3SChris Wilson 
1997e6bb1d7fSChris Wilson 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
1998a93615f9SChris Wilson 	obj->mm.dirty = true;
1999e6bb1d7fSChris Wilson 
2000a93615f9SChris Wilson 	GEM_BUG_ON(!i915_vma_is_active(vma));
2001e6bb1d7fSChris Wilson 	return 0;
2002e6bb1d7fSChris Wilson }
2003e6bb1d7fSChris Wilson 
__i915_vma_evict(struct i915_vma * vma,bool async)20042f6b90daSThomas Hellström struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
2005b42fe9caSJoonas Lahtinen {
20062f6b90daSThomas Hellström 	struct i915_vma_resource *vma_res = vma->resource;
2007e1a4bbb6SThomas Hellström 	struct dma_fence *unbind_fence;
2008e1a4bbb6SThomas Hellström 
200960e94557SChris Wilson 	GEM_BUG_ON(i915_vma_is_pinned(vma));
2010a594525cSMaarten Lankhorst 	assert_vma_held_evict(vma);
201160e94557SChris Wilson 
2012b42fe9caSJoonas Lahtinen 	if (i915_vma_is_map_and_fenceable(vma)) {
20139657aaa2SChris Wilson 		/* Force a pagefault for domain tracking on next user access */
20149657aaa2SChris Wilson 		i915_vma_revoke_mmap(vma);
20159657aaa2SChris Wilson 
20167125397bSChris Wilson 		/*
20177125397bSChris Wilson 		 * Check that we have flushed all writes through the GGTT
20187125397bSChris Wilson 		 * before the unbind, other due to non-strict nature of those
20197125397bSChris Wilson 		 * indirect writes they may end up referencing the GGTT PTE
20207125397bSChris Wilson 		 * after the unbind.
20215424f5d7SChris Wilson 		 *
20225424f5d7SChris Wilson 		 * Note that we may be concurrently poking at the GGTT_WRITE
20235424f5d7SChris Wilson 		 * bit from set-domain, as we mark all GGTT vma associated
20245424f5d7SChris Wilson 		 * with an object. We know this is for another vma, as we
20255424f5d7SChris Wilson 		 * are currently unbinding this one -- so if this vma will be
20265424f5d7SChris Wilson 		 * reused, it will be refaulted and have its dirty bit set
20275424f5d7SChris Wilson 		 * before the next write.
20287125397bSChris Wilson 		 */
20297125397bSChris Wilson 		i915_vma_flush_writes(vma);
20307125397bSChris Wilson 
2031b42fe9caSJoonas Lahtinen 		/* release the fence reg _after_ flushing */
20320d86ee35SChris Wilson 		i915_vma_revoke_fence(vma);
2033b42fe9caSJoonas Lahtinen 
20344dd2fbbfSChris Wilson 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
2035b42fe9caSJoonas Lahtinen 	}
2036afd5cb39SJuha-Pekka Heikkila 
2037afd5cb39SJuha-Pekka Heikkila 	__i915_vma_iounmap(vma);
2038afd5cb39SJuha-Pekka Heikkila 
2039a65adaf8SChris Wilson 	GEM_BUG_ON(vma->fence);
2040a65adaf8SChris Wilson 	GEM_BUG_ON(i915_vma_has_userfault(vma));
2041b42fe9caSJoonas Lahtinen 
20422f6b90daSThomas Hellström 	/* Object backend must be async capable. */
204360dc43d1SThomas Hellström 	GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
20442f6b90daSThomas Hellström 
20452f6b90daSThomas Hellström 	/* If vm is not open, unbind is a nop. */
20462f6b90daSThomas Hellström 	vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
2047e1a7ab4fSThomas Hellström 		kref_read(&vma->vm->ref);
2048e1a7ab4fSThomas Hellström 	vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
2049e1a7ab4fSThomas Hellström 		vma->vm->skip_pte_rewrite;
2050b42fe9caSJoonas Lahtinen 	trace_i915_vma_unbind(vma);
20512f6b90daSThomas Hellström 
20525d36acb7SChris Wilson 	if (async)
20535d36acb7SChris Wilson 		unbind_fence = i915_vma_resource_unbind(vma_res,
2054d6c531abSChris Wilson 							vma->obj->mm.tlb);
20555d36acb7SChris Wilson 	else
20565d36acb7SChris Wilson 		unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
20575d36acb7SChris Wilson 
20582f6b90daSThomas Hellström 	vma->resource = NULL;
20592f6b90daSThomas Hellström 
20605424f5d7SChris Wilson 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
20615424f5d7SChris Wilson 		   &vma->flags);
2062b42fe9caSJoonas Lahtinen 
2063dde01d94SChris Wilson 	i915_vma_detach(vma);
2064e1a4bbb6SThomas Hellström 
20655d36acb7SChris Wilson 	if (!async) {
20665d36acb7SChris Wilson 		if (unbind_fence) {
2067e1a4bbb6SThomas Hellström 			dma_fence_wait(unbind_fence, false);
2068e1a4bbb6SThomas Hellström 			dma_fence_put(unbind_fence);
20692f6b90daSThomas Hellström 			unbind_fence = NULL;
20702f6b90daSThomas Hellström 		}
2071d6c531abSChris Wilson 		vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
20725d36acb7SChris Wilson 	}
20732f6b90daSThomas Hellström 
20742f6b90daSThomas Hellström 	/*
20752f6b90daSThomas Hellström 	 * Binding itself may not have completed until the unbind fence signals,
20762f6b90daSThomas Hellström 	 * so don't drop the pages until that happens, unless the resource is
20772f6b90daSThomas Hellström 	 * async_capable.
20782f6b90daSThomas Hellström 	 */
20792f6b90daSThomas Hellström 
20802f6b90daSThomas Hellström 	vma_unbind_pages(vma);
20812f6b90daSThomas Hellström 	return unbind_fence;
2082bffa18ddSChris Wilson }
2083bffa18ddSChris Wilson 
__i915_vma_unbind(struct i915_vma * vma)2084bffa18ddSChris Wilson int __i915_vma_unbind(struct i915_vma *vma)
2085bffa18ddSChris Wilson {
2086bffa18ddSChris Wilson 	int ret;
2087bffa18ddSChris Wilson 
2088bffa18ddSChris Wilson 	lockdep_assert_held(&vma->vm->mutex);
2089a594525cSMaarten Lankhorst 	assert_vma_held_evict(vma);
2090bffa18ddSChris Wilson 
2091bffa18ddSChris Wilson 	if (!drm_mm_node_allocated(&vma->node))
2092bffa18ddSChris Wilson 		return 0;
2093bffa18ddSChris Wilson 
2094bffa18ddSChris Wilson 	if (i915_vma_is_pinned(vma)) {
2095bffa18ddSChris Wilson 		vma_print_allocator(vma, "is pinned");
2096bffa18ddSChris Wilson 		return -EAGAIN;
2097bffa18ddSChris Wilson 	}
2098bffa18ddSChris Wilson 
2099bffa18ddSChris Wilson 	/*
2100bffa18ddSChris Wilson 	 * After confirming that no one else is pinning this vma, wait for
2101bffa18ddSChris Wilson 	 * any laggards who may have crept in during the wait (through
2102bffa18ddSChris Wilson 	 * a residual pin skipping the vm->mutex) to complete.
2103bffa18ddSChris Wilson 	 */
2104bffa18ddSChris Wilson 	ret = i915_vma_sync(vma);
2105bffa18ddSChris Wilson 	if (ret)
2106bffa18ddSChris Wilson 		return ret;
2107bffa18ddSChris Wilson 
2108bffa18ddSChris Wilson 	GEM_BUG_ON(i915_vma_is_active(vma));
21092f6b90daSThomas Hellström 	__i915_vma_evict(vma, false);
2110b42fe9caSJoonas Lahtinen 
211176f9764cSChris Wilson 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2112b42fe9caSJoonas Lahtinen 	return 0;
2113b42fe9caSJoonas Lahtinen }
2114b42fe9caSJoonas Lahtinen 
__i915_vma_unbind_async(struct i915_vma * vma)21152f6b90daSThomas Hellström static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
21162f6b90daSThomas Hellström {
21172f6b90daSThomas Hellström 	struct dma_fence *fence;
21182f6b90daSThomas Hellström 
21192f6b90daSThomas Hellström 	lockdep_assert_held(&vma->vm->mutex);
21202f6b90daSThomas Hellström 
21212f6b90daSThomas Hellström 	if (!drm_mm_node_allocated(&vma->node))
21222f6b90daSThomas Hellström 		return NULL;
21232f6b90daSThomas Hellström 
21242f6b90daSThomas Hellström 	if (i915_vma_is_pinned(vma) ||
21252f6b90daSThomas Hellström 	    &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
21262f6b90daSThomas Hellström 		return ERR_PTR(-EAGAIN);
21272f6b90daSThomas Hellström 
21282f6b90daSThomas Hellström 	/*
21292f6b90daSThomas Hellström 	 * We probably need to replace this with awaiting the fences of the
21302f6b90daSThomas Hellström 	 * object's dma_resv when the vma active goes away. When doing that
21312f6b90daSThomas Hellström 	 * we need to be careful to not add the vma_resource unbind fence
21322f6b90daSThomas Hellström 	 * immediately to the object's dma_resv, because then unbinding
21332f6b90daSThomas Hellström 	 * the next vma from the object, in case there are many, will
21342f6b90daSThomas Hellström 	 * actually await the unbinding of the previous vmas, which is
21352f6b90daSThomas Hellström 	 * undesirable.
21362f6b90daSThomas Hellström 	 */
21372f6b90daSThomas Hellström 	if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
21382f6b90daSThomas Hellström 				       I915_ACTIVE_AWAIT_EXCL |
21392f6b90daSThomas Hellström 				       I915_ACTIVE_AWAIT_ACTIVE) < 0) {
21402f6b90daSThomas Hellström 		return ERR_PTR(-EBUSY);
21412f6b90daSThomas Hellström 	}
21422f6b90daSThomas Hellström 
21432f6b90daSThomas Hellström 	fence = __i915_vma_evict(vma, true);
21442f6b90daSThomas Hellström 
21452f6b90daSThomas Hellström 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
21462f6b90daSThomas Hellström 
21472f6b90daSThomas Hellström 	return fence;
21482f6b90daSThomas Hellström }
21492f6b90daSThomas Hellström 
i915_vma_unbind(struct i915_vma * vma)21502850748eSChris Wilson int i915_vma_unbind(struct i915_vma *vma)
21512850748eSChris Wilson {
21522850748eSChris Wilson 	struct i915_address_space *vm = vma->vm;
2153c0e60347SChris Wilson 	intel_wakeref_t wakeref = 0;
21542850748eSChris Wilson 	int err;
21552850748eSChris Wilson 
21560f341974SMaarten Lankhorst 	assert_object_held_shared(vma->obj);
21570f341974SMaarten Lankhorst 
2158d62f416fSChris Wilson 	/* Optimistic wait before taking the mutex */
2159d62f416fSChris Wilson 	err = i915_vma_sync(vma);
2160d62f416fSChris Wilson 	if (err)
2161bffa18ddSChris Wilson 		return err;
2162bffa18ddSChris Wilson 
2163bffa18ddSChris Wilson 	if (!drm_mm_node_allocated(&vma->node))
2164bffa18ddSChris Wilson 		return 0;
2165d62f416fSChris Wilson 
2166614654abSChris Wilson 	if (i915_vma_is_pinned(vma)) {
2167614654abSChris Wilson 		vma_print_allocator(vma, "is pinned");
2168614654abSChris Wilson 		return -EAGAIN;
2169614654abSChris Wilson 	}
2170614654abSChris Wilson 
2171614654abSChris Wilson 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2172614654abSChris Wilson 		/* XXX not always required: nop_clear_range */
2173614654abSChris Wilson 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2174614654abSChris Wilson 
2175d0024911SChris Wilson 	err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
21762850748eSChris Wilson 	if (err)
2177d62f416fSChris Wilson 		goto out_rpm;
21782850748eSChris Wilson 
21792850748eSChris Wilson 	err = __i915_vma_unbind(vma);
21802850748eSChris Wilson 	mutex_unlock(&vm->mutex);
21812850748eSChris Wilson 
2182d62f416fSChris Wilson out_rpm:
2183c0e60347SChris Wilson 	if (wakeref)
2184c0e60347SChris Wilson 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
21852850748eSChris Wilson 	return err;
21862850748eSChris Wilson }
21872850748eSChris Wilson 
i915_vma_unbind_async(struct i915_vma * vma,bool trylock_vm)21882f6b90daSThomas Hellström int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
21892f6b90daSThomas Hellström {
21902f6b90daSThomas Hellström 	struct drm_i915_gem_object *obj = vma->obj;
21912f6b90daSThomas Hellström 	struct i915_address_space *vm = vma->vm;
21922f6b90daSThomas Hellström 	intel_wakeref_t wakeref = 0;
21932f6b90daSThomas Hellström 	struct dma_fence *fence;
21942f6b90daSThomas Hellström 	int err;
21952f6b90daSThomas Hellström 
21962f6b90daSThomas Hellström 	/*
21972f6b90daSThomas Hellström 	 * We need the dma-resv lock since we add the
21982f6b90daSThomas Hellström 	 * unbind fence to the dma-resv object.
21992f6b90daSThomas Hellström 	 */
22002f6b90daSThomas Hellström 	assert_object_held(obj);
22012f6b90daSThomas Hellström 
22022f6b90daSThomas Hellström 	if (!drm_mm_node_allocated(&vma->node))
22032f6b90daSThomas Hellström 		return 0;
22042f6b90daSThomas Hellström 
22052f6b90daSThomas Hellström 	if (i915_vma_is_pinned(vma)) {
22062f6b90daSThomas Hellström 		vma_print_allocator(vma, "is pinned");
22072f6b90daSThomas Hellström 		return -EAGAIN;
22082f6b90daSThomas Hellström 	}
22092f6b90daSThomas Hellström 
22102f6b90daSThomas Hellström 	if (!obj->mm.rsgt)
22112f6b90daSThomas Hellström 		return -EBUSY;
22122f6b90daSThomas Hellström 
22134f0755c2SNirmoy Das 	err = dma_resv_reserve_fences(obj->base.resv, 2);
22142f6b90daSThomas Hellström 	if (err)
22152f6b90daSThomas Hellström 		return -EBUSY;
22162f6b90daSThomas Hellström 
22172f6b90daSThomas Hellström 	/*
22182f6b90daSThomas Hellström 	 * It would be great if we could grab this wakeref from the
22192f6b90daSThomas Hellström 	 * async unbind work if needed, but we can't because it uses
22202f6b90daSThomas Hellström 	 * kmalloc and it's in the dma-fence signalling critical path.
22212f6b90daSThomas Hellström 	 */
22222f6b90daSThomas Hellström 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
22232f6b90daSThomas Hellström 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
22242f6b90daSThomas Hellström 
22252f6b90daSThomas Hellström 	if (trylock_vm && !mutex_trylock(&vm->mutex)) {
22262f6b90daSThomas Hellström 		err = -EBUSY;
22272f6b90daSThomas Hellström 		goto out_rpm;
22282f6b90daSThomas Hellström 	} else if (!trylock_vm) {
22292f6b90daSThomas Hellström 		err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
22302f6b90daSThomas Hellström 		if (err)
22312f6b90daSThomas Hellström 			goto out_rpm;
22322f6b90daSThomas Hellström 	}
22332f6b90daSThomas Hellström 
22342f6b90daSThomas Hellström 	fence = __i915_vma_unbind_async(vma);
22352f6b90daSThomas Hellström 	mutex_unlock(&vm->mutex);
22362f6b90daSThomas Hellström 	if (IS_ERR_OR_NULL(fence)) {
22372f6b90daSThomas Hellström 		err = PTR_ERR_OR_ZERO(fence);
22382f6b90daSThomas Hellström 		goto out_rpm;
22392f6b90daSThomas Hellström 	}
22402f6b90daSThomas Hellström 
224173511edfSChristian König 	dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
22422f6b90daSThomas Hellström 	dma_fence_put(fence);
22432f6b90daSThomas Hellström 
22442f6b90daSThomas Hellström out_rpm:
22452f6b90daSThomas Hellström 	if (wakeref)
22462f6b90daSThomas Hellström 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
22472f6b90daSThomas Hellström 	return err;
22482f6b90daSThomas Hellström }
22492f6b90daSThomas Hellström 
i915_vma_unbind_unlocked(struct i915_vma * vma)22500f341974SMaarten Lankhorst int i915_vma_unbind_unlocked(struct i915_vma *vma)
22510f341974SMaarten Lankhorst {
22520f341974SMaarten Lankhorst 	int err;
22530f341974SMaarten Lankhorst 
22540f341974SMaarten Lankhorst 	i915_gem_object_lock(vma->obj, NULL);
22550f341974SMaarten Lankhorst 	err = i915_vma_unbind(vma);
22560f341974SMaarten Lankhorst 	i915_gem_object_unlock(vma->obj);
22570f341974SMaarten Lankhorst 
22580f341974SMaarten Lankhorst 	return err;
22590f341974SMaarten Lankhorst }
22600f341974SMaarten Lankhorst 
i915_vma_make_unshrinkable(struct i915_vma * vma)22611aff1903SChris Wilson struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
22621aff1903SChris Wilson {
22631aff1903SChris Wilson 	i915_gem_object_make_unshrinkable(vma->obj);
22641aff1903SChris Wilson 	return vma;
22651aff1903SChris Wilson }
22661aff1903SChris Wilson 
i915_vma_make_shrinkable(struct i915_vma * vma)22671aff1903SChris Wilson void i915_vma_make_shrinkable(struct i915_vma *vma)
22681aff1903SChris Wilson {
22691aff1903SChris Wilson 	i915_gem_object_make_shrinkable(vma->obj);
22701aff1903SChris Wilson }
22711aff1903SChris Wilson 
i915_vma_make_purgeable(struct i915_vma * vma)22721aff1903SChris Wilson void i915_vma_make_purgeable(struct i915_vma *vma)
22731aff1903SChris Wilson {
22741aff1903SChris Wilson 	i915_gem_object_make_purgeable(vma->obj);
22751aff1903SChris Wilson }
22761aff1903SChris Wilson 
2277e3c7a1c5SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2278e3c7a1c5SChris Wilson #include "selftests/i915_vma.c"
2279e3c7a1c5SChris Wilson #endif
228013f1bfd3SChris Wilson 
i915_vma_module_exit(void)228164fc7cc7SDaniel Vetter void i915_vma_module_exit(void)
2282103b76eeSChris Wilson {
228364fc7cc7SDaniel Vetter 	kmem_cache_destroy(slab_vmas);
2284103b76eeSChris Wilson }
2285103b76eeSChris Wilson 
i915_vma_module_init(void)228664fc7cc7SDaniel Vetter int __init i915_vma_module_init(void)
228713f1bfd3SChris Wilson {
228864fc7cc7SDaniel Vetter 	slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
228964fc7cc7SDaniel Vetter 	if (!slab_vmas)
229013f1bfd3SChris Wilson 		return -ENOMEM;
229113f1bfd3SChris Wilson 
229213f1bfd3SChris Wilson 	return 0;
229313f1bfd3SChris Wilson }
2294