1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uuk>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 #include "i915_trace.h"
35 
36 static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
37 {
38 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
39 	struct intel_engine_cs *engine;
40 	enum intel_engine_id id;
41 
42 	for_each_engine(engine, dev_priv, id) {
43 		struct intel_timeline *tl;
44 
45 		tl = &ggtt->base.timeline.engine[engine->id];
46 		if (i915_gem_active_isset(&tl->last_request))
47 			return false;
48 	}
49 
50 	return true;
51 }
52 
53 static bool
54 mark_free(struct drm_mm_scan *scan,
55 	  struct i915_vma *vma,
56 	  unsigned int flags,
57 	  struct list_head *unwind)
58 {
59 	if (i915_vma_is_pinned(vma))
60 		return false;
61 
62 	if (WARN_ON(!list_empty(&vma->exec_list)))
63 		return false;
64 
65 	if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
66 		return false;
67 
68 	list_add(&vma->exec_list, unwind);
69 	return drm_mm_scan_add_block(scan, &vma->node);
70 }
71 
72 /**
73  * i915_gem_evict_something - Evict vmas to make room for binding a new one
74  * @vm: address space to evict from
75  * @min_size: size of the desired free space
76  * @alignment: alignment constraint of the desired free space
77  * @cache_level: cache_level for the desired space
78  * @start: start (inclusive) of the range from which to evict objects
79  * @end: end (exclusive) of the range from which to evict objects
80  * @flags: additional flags to control the eviction algorithm
81  *
82  * This function will try to evict vmas until a free space satisfying the
83  * requirements is found. Callers must check first whether any such hole exists
84  * already before calling this function.
85  *
86  * This function is used by the object/vma binding code.
87  *
88  * Since this function is only used to free up virtual address space it only
89  * ignores pinned vmas, and not object where the backing storage itself is
90  * pinned. Hence obj->pages_pin_count does not protect against eviction.
91  *
92  * To clarify: This is for freeing up virtual address space, not for freeing
93  * memory in e.g. the shrinker.
94  */
95 int
96 i915_gem_evict_something(struct i915_address_space *vm,
97 			 u64 min_size, u64 alignment,
98 			 unsigned cache_level,
99 			 u64 start, u64 end,
100 			 unsigned flags)
101 {
102 	struct drm_i915_private *dev_priv = vm->i915;
103 	struct drm_mm_scan scan;
104 	struct list_head eviction_list;
105 	struct list_head *phases[] = {
106 		&vm->inactive_list,
107 		&vm->active_list,
108 		NULL,
109 	}, **phase;
110 	struct i915_vma *vma, *next;
111 	struct drm_mm_node *node;
112 	enum drm_mm_insert_mode mode;
113 	int ret;
114 
115 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
116 	trace_i915_gem_evict(vm, min_size, alignment, flags);
117 
118 	/*
119 	 * The goal is to evict objects and amalgamate space in LRU order.
120 	 * The oldest idle objects reside on the inactive list, which is in
121 	 * retirement order. The next objects to retire are those in flight,
122 	 * on the active list, again in retirement order.
123 	 *
124 	 * The retirement sequence is thus:
125 	 *   1. Inactive objects (already retired)
126 	 *   2. Active objects (will stall on unbinding)
127 	 *
128 	 * On each list, the oldest objects lie at the HEAD with the freshest
129 	 * object on the TAIL.
130 	 */
131 	mode = DRM_MM_INSERT_BEST;
132 	if (flags & PIN_HIGH)
133 		mode = DRM_MM_INSERT_HIGH;
134 	if (flags & PIN_MAPPABLE)
135 		mode = DRM_MM_INSERT_LOW;
136 	drm_mm_scan_init_with_range(&scan, &vm->mm,
137 				    min_size, alignment, cache_level,
138 				    start, end, mode);
139 
140 	/* Retire before we search the active list. Although we have
141 	 * reasonable accuracy in our retirement lists, we may have
142 	 * a stray pin (preventing eviction) that can only be resolved by
143 	 * retiring.
144 	 */
145 	if (!(flags & PIN_NONBLOCK))
146 		i915_gem_retire_requests(dev_priv);
147 	else
148 		phases[1] = NULL;
149 
150 search_again:
151 	INIT_LIST_HEAD(&eviction_list);
152 	phase = phases;
153 	do {
154 		list_for_each_entry(vma, *phase, vm_link)
155 			if (mark_free(&scan, vma, flags, &eviction_list))
156 				goto found;
157 	} while (*++phase);
158 
159 	/* Nothing found, clean up and bail out! */
160 	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
161 		ret = drm_mm_scan_remove_block(&scan, &vma->node);
162 		BUG_ON(ret);
163 
164 		INIT_LIST_HEAD(&vma->exec_list);
165 	}
166 
167 	/* Can we unpin some objects such as idle hw contents,
168 	 * or pending flips? But since only the GGTT has global entries
169 	 * such as scanouts, rinbuffers and contexts, we can skip the
170 	 * purge when inspecting per-process local address spaces.
171 	 */
172 	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
173 		return -ENOSPC;
174 
175 	if (ggtt_is_idle(dev_priv)) {
176 		/* If we still have pending pageflip completions, drop
177 		 * back to userspace to give our workqueues time to
178 		 * acquire our locks and unpin the old scanouts.
179 		 */
180 		return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
181 	}
182 
183 	/* Not everything in the GGTT is tracked via vma (otherwise we
184 	 * could evict as required with minimal stalling) so we are forced
185 	 * to idle the GPU and explicitly retire outstanding requests in
186 	 * the hopes that we can then remove contexts and the like only
187 	 * bound by their active reference.
188 	 */
189 	ret = i915_gem_switch_to_kernel_context(dev_priv);
190 	if (ret)
191 		return ret;
192 
193 	ret = i915_gem_wait_for_idle(dev_priv,
194 				     I915_WAIT_INTERRUPTIBLE |
195 				     I915_WAIT_LOCKED);
196 	if (ret)
197 		return ret;
198 
199 	i915_gem_retire_requests(dev_priv);
200 	goto search_again;
201 
202 found:
203 	/* drm_mm doesn't allow any other other operations while
204 	 * scanning, therefore store to-be-evicted objects on a
205 	 * temporary list and take a reference for all before
206 	 * calling unbind (which may remove the active reference
207 	 * of any of our objects, thus corrupting the list).
208 	 */
209 	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
210 		if (drm_mm_scan_remove_block(&scan, &vma->node))
211 			__i915_vma_pin(vma);
212 		else
213 			list_del_init(&vma->exec_list);
214 	}
215 
216 	/* Unbinding will emit any required flushes */
217 	ret = 0;
218 	while (!list_empty(&eviction_list)) {
219 		vma = list_first_entry(&eviction_list,
220 				       struct i915_vma,
221 				       exec_list);
222 
223 		list_del_init(&vma->exec_list);
224 		__i915_vma_unpin(vma);
225 		if (ret == 0)
226 			ret = i915_vma_unbind(vma);
227 	}
228 
229 	while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
230 		vma = container_of(node, struct i915_vma, node);
231 		ret = i915_vma_unbind(vma);
232 	}
233 
234 	return ret;
235 }
236 
237 /**
238  * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
239  * @vm: address space to evict from
240  * @target: range (and color) to evict for
241  * @flags: additional flags to control the eviction algorithm
242  *
243  * This function will try to evict vmas that overlap the target node.
244  *
245  * To clarify: This is for freeing up virtual address space, not for freeing
246  * memory in e.g. the shrinker.
247  */
248 int i915_gem_evict_for_node(struct i915_address_space *vm,
249 			    struct drm_mm_node *target,
250 			    unsigned int flags)
251 {
252 	LIST_HEAD(eviction_list);
253 	struct drm_mm_node *node;
254 	u64 start = target->start;
255 	u64 end = start + target->size;
256 	struct i915_vma *vma, *next;
257 	bool check_color;
258 	int ret = 0;
259 
260 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
261 	trace_i915_gem_evict_node(vm, target, flags);
262 
263 	/* Retire before we search the active list. Although we have
264 	 * reasonable accuracy in our retirement lists, we may have
265 	 * a stray pin (preventing eviction) that can only be resolved by
266 	 * retiring.
267 	 */
268 	if (!(flags & PIN_NONBLOCK))
269 		i915_gem_retire_requests(vm->i915);
270 
271 	check_color = vm->mm.color_adjust;
272 	if (check_color) {
273 		/* Expand search to cover neighbouring guard pages (or lack!) */
274 		if (start > vm->start)
275 			start -= I915_GTT_PAGE_SIZE;
276 		if (end < vm->start + vm->total)
277 			end += I915_GTT_PAGE_SIZE;
278 	}
279 
280 	drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
281 		/* If we find any non-objects (!vma), we cannot evict them */
282 		if (node->color == I915_COLOR_UNEVICTABLE) {
283 			ret = -ENOSPC;
284 			break;
285 		}
286 
287 		vma = container_of(node, typeof(*vma), node);
288 
289 		/* If we are using coloring to insert guard pages between
290 		 * different cache domains within the address space, we have
291 		 * to check whether the objects on either side of our range
292 		 * abutt and conflict. If they are in conflict, then we evict
293 		 * those as well to make room for our guard pages.
294 		 */
295 		if (check_color) {
296 			if (node->start + node->size == target->start) {
297 				if (node->color == target->color)
298 					continue;
299 			}
300 			if (node->start == target->start + target->size) {
301 				if (node->color == target->color)
302 					continue;
303 			}
304 		}
305 
306 		if (flags & PIN_NONBLOCK &&
307 		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
308 			ret = -ENOSPC;
309 			break;
310 		}
311 
312 		/* Overlap of objects in the same batch? */
313 		if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
314 			ret = -ENOSPC;
315 			if (vma->exec_entry &&
316 			    vma->exec_entry->flags & EXEC_OBJECT_PINNED)
317 				ret = -EINVAL;
318 			break;
319 		}
320 
321 		/* Never show fear in the face of dragons!
322 		 *
323 		 * We cannot directly remove this node from within this
324 		 * iterator and as with i915_gem_evict_something() we employ
325 		 * the vma pin_count in order to prevent the action of
326 		 * unbinding one vma from freeing (by dropping its active
327 		 * reference) another in our eviction list.
328 		 */
329 		__i915_vma_pin(vma);
330 		list_add(&vma->exec_list, &eviction_list);
331 	}
332 
333 	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
334 		list_del_init(&vma->exec_list);
335 		__i915_vma_unpin(vma);
336 		if (ret == 0)
337 			ret = i915_vma_unbind(vma);
338 	}
339 
340 	return ret;
341 }
342 
343 /**
344  * i915_gem_evict_vm - Evict all idle vmas from a vm
345  * @vm: Address space to cleanse
346  * @do_idle: Boolean directing whether to idle first.
347  *
348  * This function evicts all idles vmas from a vm. If all unpinned vmas should be
349  * evicted the @do_idle needs to be set to true.
350  *
351  * This is used by the execbuf code as a last-ditch effort to defragment the
352  * address space.
353  *
354  * To clarify: This is for freeing up virtual address space, not for freeing
355  * memory in e.g. the shrinker.
356  */
357 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
358 {
359 	struct i915_vma *vma, *next;
360 	int ret;
361 
362 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
363 	trace_i915_gem_evict_vm(vm);
364 
365 	if (do_idle) {
366 		struct drm_i915_private *dev_priv = vm->i915;
367 
368 		if (i915_is_ggtt(vm)) {
369 			ret = i915_gem_switch_to_kernel_context(dev_priv);
370 			if (ret)
371 				return ret;
372 		}
373 
374 		ret = i915_gem_wait_for_idle(dev_priv,
375 					     I915_WAIT_INTERRUPTIBLE |
376 					     I915_WAIT_LOCKED);
377 		if (ret)
378 			return ret;
379 
380 		i915_gem_retire_requests(dev_priv);
381 		WARN_ON(!list_empty(&vm->active_list));
382 	}
383 
384 	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
385 		if (!i915_vma_is_pinned(vma))
386 			WARN_ON(i915_vma_unbind(vma));
387 
388 	return 0;
389 }
390