1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uuk> 26 * 27 */ 28 29 #include <drm/i915_drm.h> 30 31 #include "gem/i915_gem_context.h" 32 #include "gt/intel_gt_requests.h" 33 34 #include "i915_drv.h" 35 #include "i915_trace.h" 36 37 I915_SELFTEST_DECLARE(static struct igt_evict_ctl { 38 bool fail_if_busy:1; 39 } igt_evict_ctl;) 40 41 static int ggtt_flush(struct intel_gt *gt) 42 { 43 /* 44 * Not everything in the GGTT is tracked via vma (otherwise we 45 * could evict as required with minimal stalling) so we are forced 46 * to idle the GPU and explicitly retire outstanding requests in 47 * the hopes that we can then remove contexts and the like only 48 * bound by their active reference. 49 */ 50 return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 51 } 52 53 static bool 54 mark_free(struct drm_mm_scan *scan, 55 struct i915_vma *vma, 56 unsigned int flags, 57 struct list_head *unwind) 58 { 59 if (i915_vma_is_pinned(vma)) 60 return false; 61 62 list_add(&vma->evict_link, unwind); 63 return drm_mm_scan_add_block(scan, &vma->node); 64 } 65 66 /** 67 * i915_gem_evict_something - Evict vmas to make room for binding a new one 68 * @vm: address space to evict from 69 * @min_size: size of the desired free space 70 * @alignment: alignment constraint of the desired free space 71 * @color: color for the desired space 72 * @start: start (inclusive) of the range from which to evict objects 73 * @end: end (exclusive) of the range from which to evict objects 74 * @flags: additional flags to control the eviction algorithm 75 * 76 * This function will try to evict vmas until a free space satisfying the 77 * requirements is found. Callers must check first whether any such hole exists 78 * already before calling this function. 79 * 80 * This function is used by the object/vma binding code. 81 * 82 * Since this function is only used to free up virtual address space it only 83 * ignores pinned vmas, and not object where the backing storage itself is 84 * pinned. Hence obj->pages_pin_count does not protect against eviction. 85 * 86 * To clarify: This is for freeing up virtual address space, not for freeing 87 * memory in e.g. the shrinker. 88 */ 89 int 90 i915_gem_evict_something(struct i915_address_space *vm, 91 u64 min_size, u64 alignment, 92 unsigned long color, 93 u64 start, u64 end, 94 unsigned flags) 95 { 96 struct drm_mm_scan scan; 97 struct list_head eviction_list; 98 struct i915_vma *vma, *next; 99 struct drm_mm_node *node; 100 enum drm_mm_insert_mode mode; 101 struct i915_vma *active; 102 int ret; 103 104 lockdep_assert_held(&vm->mutex); 105 trace_i915_gem_evict(vm, min_size, alignment, flags); 106 107 /* 108 * The goal is to evict objects and amalgamate space in rough LRU order. 109 * Since both active and inactive objects reside on the same list, 110 * in a mix of creation and last scanned order, as we process the list 111 * we sort it into inactive/active, which keeps the active portion 112 * in a rough MRU order. 113 * 114 * The retirement sequence is thus: 115 * 1. Inactive objects (already retired, random order) 116 * 2. Active objects (will stall on unbinding, oldest scanned first) 117 */ 118 mode = DRM_MM_INSERT_BEST; 119 if (flags & PIN_HIGH) 120 mode = DRM_MM_INSERT_HIGH; 121 if (flags & PIN_MAPPABLE) 122 mode = DRM_MM_INSERT_LOW; 123 drm_mm_scan_init_with_range(&scan, &vm->mm, 124 min_size, alignment, color, 125 start, end, mode); 126 127 intel_gt_retire_requests(vm->gt); 128 129 search_again: 130 active = NULL; 131 INIT_LIST_HEAD(&eviction_list); 132 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { 133 /* 134 * We keep this list in a rough least-recently scanned order 135 * of active elements (inactive elements are cheap to reap). 136 * New entries are added to the end, and we move anything we 137 * scan to the end. The assumption is that the working set 138 * of applications is either steady state (and thanks to the 139 * userspace bo cache it almost always is) or volatile and 140 * frequently replaced after a frame, which are self-evicting! 141 * Given that assumption, the MRU order of the scan list is 142 * fairly static, and keeping it in least-recently scan order 143 * is suitable. 144 * 145 * To notice when we complete one full cycle, we record the 146 * first active element seen, before moving it to the tail. 147 */ 148 if (i915_vma_is_active(vma)) { 149 if (vma == active) { 150 if (flags & PIN_NONBLOCK) 151 break; 152 153 active = ERR_PTR(-EAGAIN); 154 } 155 156 if (active != ERR_PTR(-EAGAIN)) { 157 if (!active) 158 active = vma; 159 160 list_move_tail(&vma->vm_link, &vm->bound_list); 161 continue; 162 } 163 } 164 165 if (mark_free(&scan, vma, flags, &eviction_list)) 166 goto found; 167 } 168 169 /* Nothing found, clean up and bail out! */ 170 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 171 ret = drm_mm_scan_remove_block(&scan, &vma->node); 172 BUG_ON(ret); 173 } 174 175 /* 176 * Can we unpin some objects such as idle hw contents, 177 * or pending flips? But since only the GGTT has global entries 178 * such as scanouts, rinbuffers and contexts, we can skip the 179 * purge when inspecting per-process local address spaces. 180 */ 181 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 182 return -ENOSPC; 183 184 /* 185 * Not everything in the GGTT is tracked via VMA using 186 * i915_vma_move_to_active(), otherwise we could evict as required 187 * with minimal stalling. Instead we are forced to idle the GPU and 188 * explicitly retire outstanding requests which will then remove 189 * the pinning for active objects such as contexts and ring, 190 * enabling us to evict them on the next iteration. 191 * 192 * To ensure that all user contexts are evictable, we perform 193 * a switch to the perma-pinned kernel context. This all also gives 194 * us a termination condition, when the last retired context is 195 * the kernel's there is no more we can evict. 196 */ 197 if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) 198 return -EBUSY; 199 200 ret = ggtt_flush(vm->gt); 201 if (ret) 202 return ret; 203 204 cond_resched(); 205 206 flags |= PIN_NONBLOCK; 207 goto search_again; 208 209 found: 210 /* drm_mm doesn't allow any other other operations while 211 * scanning, therefore store to-be-evicted objects on a 212 * temporary list and take a reference for all before 213 * calling unbind (which may remove the active reference 214 * of any of our objects, thus corrupting the list). 215 */ 216 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 217 if (drm_mm_scan_remove_block(&scan, &vma->node)) 218 __i915_vma_pin(vma); 219 else 220 list_del(&vma->evict_link); 221 } 222 223 /* Unbinding will emit any required flushes */ 224 ret = 0; 225 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 226 __i915_vma_unpin(vma); 227 if (ret == 0) 228 ret = __i915_vma_unbind(vma); 229 } 230 231 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { 232 vma = container_of(node, struct i915_vma, node); 233 ret = __i915_vma_unbind(vma); 234 } 235 236 return ret; 237 } 238 239 /** 240 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one 241 * @vm: address space to evict from 242 * @target: range (and color) to evict for 243 * @flags: additional flags to control the eviction algorithm 244 * 245 * This function will try to evict vmas that overlap the target node. 246 * 247 * To clarify: This is for freeing up virtual address space, not for freeing 248 * memory in e.g. the shrinker. 249 */ 250 int i915_gem_evict_for_node(struct i915_address_space *vm, 251 struct drm_mm_node *target, 252 unsigned int flags) 253 { 254 LIST_HEAD(eviction_list); 255 struct drm_mm_node *node; 256 u64 start = target->start; 257 u64 end = start + target->size; 258 struct i915_vma *vma, *next; 259 int ret = 0; 260 261 lockdep_assert_held(&vm->mutex); 262 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 263 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 264 265 trace_i915_gem_evict_node(vm, target, flags); 266 267 /* 268 * Retire before we search the active list. Although we have 269 * reasonable accuracy in our retirement lists, we may have 270 * a stray pin (preventing eviction) that can only be resolved by 271 * retiring. 272 */ 273 intel_gt_retire_requests(vm->gt); 274 275 if (i915_vm_has_cache_coloring(vm)) { 276 /* Expand search to cover neighbouring guard pages (or lack!) */ 277 if (start) 278 start -= I915_GTT_PAGE_SIZE; 279 280 /* Always look at the page afterwards to avoid the end-of-GTT */ 281 end += I915_GTT_PAGE_SIZE; 282 } 283 GEM_BUG_ON(start >= end); 284 285 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { 286 /* If we find any non-objects (!vma), we cannot evict them */ 287 if (node->color == I915_COLOR_UNEVICTABLE) { 288 ret = -ENOSPC; 289 break; 290 } 291 292 GEM_BUG_ON(!drm_mm_node_allocated(node)); 293 vma = container_of(node, typeof(*vma), node); 294 295 /* If we are using coloring to insert guard pages between 296 * different cache domains within the address space, we have 297 * to check whether the objects on either side of our range 298 * abutt and conflict. If they are in conflict, then we evict 299 * those as well to make room for our guard pages. 300 */ 301 if (i915_vm_has_cache_coloring(vm)) { 302 if (node->start + node->size == target->start) { 303 if (node->color == target->color) 304 continue; 305 } 306 if (node->start == target->start + target->size) { 307 if (node->color == target->color) 308 continue; 309 } 310 } 311 312 if (flags & PIN_NONBLOCK && 313 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { 314 ret = -ENOSPC; 315 break; 316 } 317 318 /* Overlap of objects in the same batch? */ 319 if (i915_vma_is_pinned(vma)) { 320 ret = -ENOSPC; 321 if (vma->exec_flags && 322 *vma->exec_flags & EXEC_OBJECT_PINNED) 323 ret = -EINVAL; 324 break; 325 } 326 327 /* Never show fear in the face of dragons! 328 * 329 * We cannot directly remove this node from within this 330 * iterator and as with i915_gem_evict_something() we employ 331 * the vma pin_count in order to prevent the action of 332 * unbinding one vma from freeing (by dropping its active 333 * reference) another in our eviction list. 334 */ 335 __i915_vma_pin(vma); 336 list_add(&vma->evict_link, &eviction_list); 337 } 338 339 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 340 __i915_vma_unpin(vma); 341 if (ret == 0) 342 ret = __i915_vma_unbind(vma); 343 } 344 345 return ret; 346 } 347 348 /** 349 * i915_gem_evict_vm - Evict all idle vmas from a vm 350 * @vm: Address space to cleanse 351 * 352 * This function evicts all vmas from a vm. 353 * 354 * This is used by the execbuf code as a last-ditch effort to defragment the 355 * address space. 356 * 357 * To clarify: This is for freeing up virtual address space, not for freeing 358 * memory in e.g. the shrinker. 359 */ 360 int i915_gem_evict_vm(struct i915_address_space *vm) 361 { 362 struct list_head eviction_list; 363 struct i915_vma *vma, *next; 364 int ret; 365 366 lockdep_assert_held(&vm->mutex); 367 trace_i915_gem_evict_vm(vm); 368 369 /* Switch back to the default context in order to unpin 370 * the existing context objects. However, such objects only 371 * pin themselves inside the global GTT and performing the 372 * switch otherwise is ineffective. 373 */ 374 if (i915_is_ggtt(vm)) { 375 ret = ggtt_flush(vm->gt); 376 if (ret) 377 return ret; 378 } 379 380 INIT_LIST_HEAD(&eviction_list); 381 list_for_each_entry(vma, &vm->bound_list, vm_link) { 382 if (i915_vma_is_pinned(vma)) 383 continue; 384 385 __i915_vma_pin(vma); 386 list_add(&vma->evict_link, &eviction_list); 387 } 388 389 ret = 0; 390 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 391 __i915_vma_unpin(vma); 392 if (ret == 0) 393 ret = __i915_vma_unbind(vma); 394 } 395 return ret; 396 } 397 398 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 399 #include "selftests/i915_gem_evict.c" 400 #endif 401