1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uuk>
26  *
27  */
28 
29 #include "gem/i915_gem_context.h"
30 #include "gt/intel_gt_requests.h"
31 
32 #include "i915_drv.h"
33 #include "i915_trace.h"
34 
35 I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
36 	bool fail_if_busy:1;
37 } igt_evict_ctl;)
38 
39 static int ggtt_flush(struct intel_gt *gt)
40 {
41 	/*
42 	 * Not everything in the GGTT is tracked via vma (otherwise we
43 	 * could evict as required with minimal stalling) so we are forced
44 	 * to idle the GPU and explicitly retire outstanding requests in
45 	 * the hopes that we can then remove contexts and the like only
46 	 * bound by their active reference.
47 	 */
48 	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
49 }
50 
51 static bool
52 mark_free(struct drm_mm_scan *scan,
53 	  struct i915_vma *vma,
54 	  unsigned int flags,
55 	  struct list_head *unwind)
56 {
57 	if (i915_vma_is_pinned(vma))
58 		return false;
59 
60 	list_add(&vma->evict_link, unwind);
61 	return drm_mm_scan_add_block(scan, &vma->node);
62 }
63 
64 static bool defer_evict(struct i915_vma *vma)
65 {
66 	if (i915_vma_is_active(vma))
67 		return true;
68 
69 	if (i915_vma_is_scanout(vma))
70 		return true;
71 
72 	return false;
73 }
74 
75 /**
76  * i915_gem_evict_something - Evict vmas to make room for binding a new one
77  * @vm: address space to evict from
78  * @min_size: size of the desired free space
79  * @alignment: alignment constraint of the desired free space
80  * @color: color for the desired space
81  * @start: start (inclusive) of the range from which to evict objects
82  * @end: end (exclusive) of the range from which to evict objects
83  * @flags: additional flags to control the eviction algorithm
84  *
85  * This function will try to evict vmas until a free space satisfying the
86  * requirements is found. Callers must check first whether any such hole exists
87  * already before calling this function.
88  *
89  * This function is used by the object/vma binding code.
90  *
91  * Since this function is only used to free up virtual address space it only
92  * ignores pinned vmas, and not object where the backing storage itself is
93  * pinned. Hence obj->pages_pin_count does not protect against eviction.
94  *
95  * To clarify: This is for freeing up virtual address space, not for freeing
96  * memory in e.g. the shrinker.
97  */
98 int
99 i915_gem_evict_something(struct i915_address_space *vm,
100 			 u64 min_size, u64 alignment,
101 			 unsigned long color,
102 			 u64 start, u64 end,
103 			 unsigned flags)
104 {
105 	struct drm_mm_scan scan;
106 	struct list_head eviction_list;
107 	struct i915_vma *vma, *next;
108 	struct drm_mm_node *node;
109 	enum drm_mm_insert_mode mode;
110 	struct i915_vma *active;
111 	int ret;
112 
113 	lockdep_assert_held(&vm->mutex);
114 	trace_i915_gem_evict(vm, min_size, alignment, flags);
115 
116 	/*
117 	 * The goal is to evict objects and amalgamate space in rough LRU order.
118 	 * Since both active and inactive objects reside on the same list,
119 	 * in a mix of creation and last scanned order, as we process the list
120 	 * we sort it into inactive/active, which keeps the active portion
121 	 * in a rough MRU order.
122 	 *
123 	 * The retirement sequence is thus:
124 	 *   1. Inactive objects (already retired, random order)
125 	 *   2. Active objects (will stall on unbinding, oldest scanned first)
126 	 */
127 	mode = DRM_MM_INSERT_BEST;
128 	if (flags & PIN_HIGH)
129 		mode = DRM_MM_INSERT_HIGH;
130 	if (flags & PIN_MAPPABLE)
131 		mode = DRM_MM_INSERT_LOW;
132 	drm_mm_scan_init_with_range(&scan, &vm->mm,
133 				    min_size, alignment, color,
134 				    start, end, mode);
135 
136 	intel_gt_retire_requests(vm->gt);
137 
138 search_again:
139 	active = NULL;
140 	INIT_LIST_HEAD(&eviction_list);
141 	list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
142 		if (vma == active) { /* now seen this vma twice */
143 			if (flags & PIN_NONBLOCK)
144 				break;
145 
146 			active = ERR_PTR(-EAGAIN);
147 		}
148 
149 		/*
150 		 * We keep this list in a rough least-recently scanned order
151 		 * of active elements (inactive elements are cheap to reap).
152 		 * New entries are added to the end, and we move anything we
153 		 * scan to the end. The assumption is that the working set
154 		 * of applications is either steady state (and thanks to the
155 		 * userspace bo cache it almost always is) or volatile and
156 		 * frequently replaced after a frame, which are self-evicting!
157 		 * Given that assumption, the MRU order of the scan list is
158 		 * fairly static, and keeping it in least-recently scan order
159 		 * is suitable.
160 		 *
161 		 * To notice when we complete one full cycle, we record the
162 		 * first active element seen, before moving it to the tail.
163 		 */
164 		if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
165 			if (!active)
166 				active = vma;
167 
168 			list_move_tail(&vma->vm_link, &vm->bound_list);
169 			continue;
170 		}
171 
172 		if (mark_free(&scan, vma, flags, &eviction_list))
173 			goto found;
174 	}
175 
176 	/* Nothing found, clean up and bail out! */
177 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
178 		ret = drm_mm_scan_remove_block(&scan, &vma->node);
179 		BUG_ON(ret);
180 	}
181 
182 	/*
183 	 * Can we unpin some objects such as idle hw contents,
184 	 * or pending flips? But since only the GGTT has global entries
185 	 * such as scanouts, rinbuffers and contexts, we can skip the
186 	 * purge when inspecting per-process local address spaces.
187 	 */
188 	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
189 		return -ENOSPC;
190 
191 	/*
192 	 * Not everything in the GGTT is tracked via VMA using
193 	 * i915_vma_move_to_active(), otherwise we could evict as required
194 	 * with minimal stalling. Instead we are forced to idle the GPU and
195 	 * explicitly retire outstanding requests which will then remove
196 	 * the pinning for active objects such as contexts and ring,
197 	 * enabling us to evict them on the next iteration.
198 	 *
199 	 * To ensure that all user contexts are evictable, we perform
200 	 * a switch to the perma-pinned kernel context. This all also gives
201 	 * us a termination condition, when the last retired context is
202 	 * the kernel's there is no more we can evict.
203 	 */
204 	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
205 		return -EBUSY;
206 
207 	ret = ggtt_flush(vm->gt);
208 	if (ret)
209 		return ret;
210 
211 	cond_resched();
212 
213 	flags |= PIN_NONBLOCK;
214 	goto search_again;
215 
216 found:
217 	/* drm_mm doesn't allow any other other operations while
218 	 * scanning, therefore store to-be-evicted objects on a
219 	 * temporary list and take a reference for all before
220 	 * calling unbind (which may remove the active reference
221 	 * of any of our objects, thus corrupting the list).
222 	 */
223 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
224 		if (drm_mm_scan_remove_block(&scan, &vma->node))
225 			__i915_vma_pin(vma);
226 		else
227 			list_del(&vma->evict_link);
228 	}
229 
230 	/* Unbinding will emit any required flushes */
231 	ret = 0;
232 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
233 		__i915_vma_unpin(vma);
234 		if (ret == 0)
235 			ret = __i915_vma_unbind(vma);
236 	}
237 
238 	while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
239 		vma = container_of(node, struct i915_vma, node);
240 
241 		/* If we find any non-objects (!vma), we cannot evict them */
242 		if (vma->node.color != I915_COLOR_UNEVICTABLE)
243 			ret = __i915_vma_unbind(vma);
244 		else
245 			ret = -ENOSPC; /* XXX search failed, try again? */
246 	}
247 
248 	return ret;
249 }
250 
251 /**
252  * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
253  * @vm: address space to evict from
254  * @target: range (and color) to evict for
255  * @flags: additional flags to control the eviction algorithm
256  *
257  * This function will try to evict vmas that overlap the target node.
258  *
259  * To clarify: This is for freeing up virtual address space, not for freeing
260  * memory in e.g. the shrinker.
261  */
262 int i915_gem_evict_for_node(struct i915_address_space *vm,
263 			    struct drm_mm_node *target,
264 			    unsigned int flags)
265 {
266 	LIST_HEAD(eviction_list);
267 	struct drm_mm_node *node;
268 	u64 start = target->start;
269 	u64 end = start + target->size;
270 	struct i915_vma *vma, *next;
271 	int ret = 0;
272 
273 	lockdep_assert_held(&vm->mutex);
274 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
275 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
276 
277 	trace_i915_gem_evict_node(vm, target, flags);
278 
279 	/*
280 	 * Retire before we search the active list. Although we have
281 	 * reasonable accuracy in our retirement lists, we may have
282 	 * a stray pin (preventing eviction) that can only be resolved by
283 	 * retiring.
284 	 */
285 	intel_gt_retire_requests(vm->gt);
286 
287 	if (i915_vm_has_cache_coloring(vm)) {
288 		/* Expand search to cover neighbouring guard pages (or lack!) */
289 		if (start)
290 			start -= I915_GTT_PAGE_SIZE;
291 
292 		/* Always look at the page afterwards to avoid the end-of-GTT */
293 		end += I915_GTT_PAGE_SIZE;
294 	}
295 	GEM_BUG_ON(start >= end);
296 
297 	drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
298 		/* If we find any non-objects (!vma), we cannot evict them */
299 		if (node->color == I915_COLOR_UNEVICTABLE) {
300 			ret = -ENOSPC;
301 			break;
302 		}
303 
304 		GEM_BUG_ON(!drm_mm_node_allocated(node));
305 		vma = container_of(node, typeof(*vma), node);
306 
307 		/*
308 		 * If we are using coloring to insert guard pages between
309 		 * different cache domains within the address space, we have
310 		 * to check whether the objects on either side of our range
311 		 * abutt and conflict. If they are in conflict, then we evict
312 		 * those as well to make room for our guard pages.
313 		 */
314 		if (i915_vm_has_cache_coloring(vm)) {
315 			if (node->start + node->size == target->start) {
316 				if (node->color == target->color)
317 					continue;
318 			}
319 			if (node->start == target->start + target->size) {
320 				if (node->color == target->color)
321 					continue;
322 			}
323 		}
324 
325 		if (i915_vma_is_pinned(vma)) {
326 			ret = -ENOSPC;
327 			break;
328 		}
329 
330 		if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
331 			ret = -ENOSPC;
332 			break;
333 		}
334 
335 		/*
336 		 * Never show fear in the face of dragons!
337 		 *
338 		 * We cannot directly remove this node from within this
339 		 * iterator and as with i915_gem_evict_something() we employ
340 		 * the vma pin_count in order to prevent the action of
341 		 * unbinding one vma from freeing (by dropping its active
342 		 * reference) another in our eviction list.
343 		 */
344 		__i915_vma_pin(vma);
345 		list_add(&vma->evict_link, &eviction_list);
346 	}
347 
348 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
349 		__i915_vma_unpin(vma);
350 		if (ret == 0)
351 			ret = __i915_vma_unbind(vma);
352 	}
353 
354 	return ret;
355 }
356 
357 /**
358  * i915_gem_evict_vm - Evict all idle vmas from a vm
359  * @vm: Address space to cleanse
360  *
361  * This function evicts all vmas from a vm.
362  *
363  * This is used by the execbuf code as a last-ditch effort to defragment the
364  * address space.
365  *
366  * To clarify: This is for freeing up virtual address space, not for freeing
367  * memory in e.g. the shrinker.
368  */
369 int i915_gem_evict_vm(struct i915_address_space *vm)
370 {
371 	int ret = 0;
372 
373 	lockdep_assert_held(&vm->mutex);
374 	trace_i915_gem_evict_vm(vm);
375 
376 	/* Switch back to the default context in order to unpin
377 	 * the existing context objects. However, such objects only
378 	 * pin themselves inside the global GTT and performing the
379 	 * switch otherwise is ineffective.
380 	 */
381 	if (i915_is_ggtt(vm)) {
382 		ret = ggtt_flush(vm->gt);
383 		if (ret)
384 			return ret;
385 	}
386 
387 	do {
388 		struct i915_vma *vma, *vn;
389 		LIST_HEAD(eviction_list);
390 
391 		list_for_each_entry(vma, &vm->bound_list, vm_link) {
392 			if (i915_vma_is_pinned(vma))
393 				continue;
394 
395 			__i915_vma_pin(vma);
396 			list_add(&vma->evict_link, &eviction_list);
397 		}
398 		if (list_empty(&eviction_list))
399 			break;
400 
401 		ret = 0;
402 		list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
403 			__i915_vma_unpin(vma);
404 			if (ret == 0)
405 				ret = __i915_vma_unbind(vma);
406 			if (ret != -EINTR) /* "Get me out of here!" */
407 				ret = 0;
408 		}
409 	} while (ret == 0);
410 
411 	return ret;
412 }
413 
414 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
415 #include "selftests/i915_gem_evict.c"
416 #endif
417