1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uuk>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include "i915_drv.h"
31 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 
34 static bool
35 mark_free(struct i915_vma *vma, struct list_head *unwind)
36 {
37 	if (vma->obj->pin_count)
38 		return false;
39 
40 	if (WARN_ON(!list_empty(&vma->exec_list)))
41 		return false;
42 
43 	list_add(&vma->exec_list, unwind);
44 	return drm_mm_scan_add_block(&vma->node);
45 }
46 
47 int
48 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
49 			 int min_size, unsigned alignment, unsigned cache_level,
50 			 bool mappable, bool nonblocking)
51 {
52 	drm_i915_private_t *dev_priv = dev->dev_private;
53 	struct list_head eviction_list, unwind_list;
54 	struct i915_vma *vma;
55 	int ret = 0;
56 
57 	trace_i915_gem_evict(dev, min_size, alignment, mappable);
58 
59 	/*
60 	 * The goal is to evict objects and amalgamate space in LRU order.
61 	 * The oldest idle objects reside on the inactive list, which is in
62 	 * retirement order. The next objects to retire are those on the (per
63 	 * ring) active list that do not have an outstanding flush. Once the
64 	 * hardware reports completion (the seqno is updated after the
65 	 * batchbuffer has been finished) the clean buffer objects would
66 	 * be retired to the inactive list. Any dirty objects would be added
67 	 * to the tail of the flushing list. So after processing the clean
68 	 * active objects we need to emit a MI_FLUSH to retire the flushing
69 	 * list, hence the retirement order of the flushing list is in
70 	 * advance of the dirty objects on the active lists.
71 	 *
72 	 * The retirement sequence is thus:
73 	 *   1. Inactive objects (already retired)
74 	 *   2. Clean active objects
75 	 *   3. Flushing list
76 	 *   4. Dirty active objects.
77 	 *
78 	 * On each list, the oldest objects lie at the HEAD with the freshest
79 	 * object on the TAIL.
80 	 */
81 
82 	INIT_LIST_HEAD(&unwind_list);
83 	if (mappable) {
84 		BUG_ON(!i915_is_ggtt(vm));
85 		drm_mm_init_scan_with_range(&vm->mm, min_size,
86 					    alignment, cache_level, 0,
87 					    dev_priv->gtt.mappable_end);
88 	} else
89 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 
91 	/* First see if there is a large enough contiguous idle region... */
92 	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 		if (mark_free(vma, &unwind_list))
94 			goto found;
95 	}
96 
97 	if (nonblocking)
98 		goto none;
99 
100 	/* Now merge in the soon-to-be-expired objects... */
101 	list_for_each_entry(vma, &vm->active_list, mm_list) {
102 		if (mark_free(vma, &unwind_list))
103 			goto found;
104 	}
105 
106 none:
107 	/* Nothing found, clean up and bail out! */
108 	while (!list_empty(&unwind_list)) {
109 		vma = list_first_entry(&unwind_list,
110 				       struct i915_vma,
111 				       exec_list);
112 		ret = drm_mm_scan_remove_block(&vma->node);
113 		BUG_ON(ret);
114 
115 		list_del_init(&vma->exec_list);
116 	}
117 
118 	/* We expect the caller to unpin, evict all and try again, or give up.
119 	 * So calling i915_gem_evict_vm() is unnecessary.
120 	 */
121 	return -ENOSPC;
122 
123 found:
124 	/* drm_mm doesn't allow any other other operations while
125 	 * scanning, therefore store to be evicted objects on a
126 	 * temporary list. */
127 	INIT_LIST_HEAD(&eviction_list);
128 	while (!list_empty(&unwind_list)) {
129 		vma = list_first_entry(&unwind_list,
130 				       struct i915_vma,
131 				       exec_list);
132 		if (drm_mm_scan_remove_block(&vma->node)) {
133 			list_move(&vma->exec_list, &eviction_list);
134 			drm_gem_object_reference(&vma->obj->base);
135 			continue;
136 		}
137 		list_del_init(&vma->exec_list);
138 	}
139 
140 	/* Unbinding will emit any required flushes */
141 	while (!list_empty(&eviction_list)) {
142 		struct drm_gem_object *obj;
143 		vma = list_first_entry(&eviction_list,
144 				       struct i915_vma,
145 				       exec_list);
146 
147 		obj =  &vma->obj->base;
148 		list_del_init(&vma->exec_list);
149 		if (ret == 0)
150 			ret = i915_vma_unbind(vma);
151 
152 		drm_gem_object_unreference(obj);
153 	}
154 
155 	return ret;
156 }
157 
158 /**
159  * i915_gem_evict_vm - Try to free up VM space
160  *
161  * @vm: Address space to evict from
162  * @do_idle: Boolean directing whether to idle first.
163  *
164  * VM eviction is about freeing up virtual address space. If one wants fine
165  * grained eviction, they should see evict something for more details. In terms
166  * of freeing up actual system memory, this function may not accomplish the
167  * desired result. An object may be shared in multiple address space, and this
168  * function will not assert those objects be freed.
169  *
170  * Using do_idle will result in a more complete eviction because it retires, and
171  * inactivates current BOs.
172  */
173 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
174 {
175 	struct i915_vma *vma, *next;
176 	int ret;
177 
178 	trace_i915_gem_evict_vm(vm);
179 
180 	if (do_idle) {
181 		ret = i915_gpu_idle(vm->dev);
182 		if (ret)
183 			return ret;
184 
185 		i915_gem_retire_requests(vm->dev);
186 	}
187 
188 	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
189 		if (vma->obj->pin_count == 0)
190 			WARN_ON(i915_vma_unbind(vma));
191 
192 	return 0;
193 }
194 
195 int
196 i915_gem_evict_everything(struct drm_device *dev)
197 {
198 	drm_i915_private_t *dev_priv = dev->dev_private;
199 	struct i915_address_space *vm;
200 	bool lists_empty = true;
201 	int ret;
202 
203 	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
204 		lists_empty = (list_empty(&vm->inactive_list) &&
205 			       list_empty(&vm->active_list));
206 		if (!lists_empty)
207 			lists_empty = false;
208 	}
209 
210 	if (lists_empty)
211 		return -ENOSPC;
212 
213 	trace_i915_gem_evict_everything(dev);
214 
215 	/* The gpu_idle will flush everything in the write domain to the
216 	 * active list. Then we must move everything off the active list
217 	 * with retire requests.
218 	 */
219 	ret = i915_gpu_idle(dev);
220 	if (ret)
221 		return ret;
222 
223 	i915_gem_retire_requests(dev);
224 
225 	/* Having flushed everything, unbind() should never raise an error */
226 	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
227 		WARN_ON(i915_gem_evict_vm(vm, false));
228 
229 	return 0;
230 }
231