1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008-2015 Intel Corporation 5 */ 6 7 #include <linux/oom.h> 8 #include <linux/sched/mm.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/slab.h> 11 #include <linux/swap.h> 12 #include <linux/pci.h> 13 #include <linux/dma-buf.h> 14 #include <linux/vmalloc.h> 15 #include <drm/i915_drm.h> 16 17 #include "i915_trace.h" 18 19 static bool swap_available(void) 20 { 21 return get_nr_swap_pages() > 0; 22 } 23 24 static bool can_release_pages(struct drm_i915_gem_object *obj) 25 { 26 /* Consider only shrinkable ojects. */ 27 if (!i915_gem_object_is_shrinkable(obj)) 28 return false; 29 30 /* 31 * Only report true if by unbinding the object and putting its pages 32 * we can actually make forward progress towards freeing physical 33 * pages. 34 * 35 * If the pages are pinned for any other reason than being bound 36 * to the GPU, simply unbinding from the GPU is not going to succeed 37 * in releasing our pin count on the pages themselves. 38 */ 39 if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count)) 40 return false; 41 42 /* 43 * We can only return physical pages to the system if we can either 44 * discard the contents (because the user has marked them as being 45 * purgeable) or if we can move their contents out to swap. 46 */ 47 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; 48 } 49 50 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, 51 unsigned long shrink) 52 { 53 unsigned long flags; 54 55 flags = 0; 56 if (shrink & I915_SHRINK_ACTIVE) 57 flags = I915_GEM_OBJECT_UNBIND_ACTIVE; 58 59 if (i915_gem_object_unbind(obj, flags) == 0) 60 __i915_gem_object_put_pages(obj); 61 62 return !i915_gem_object_has_pages(obj); 63 } 64 65 static void try_to_writeback(struct drm_i915_gem_object *obj, 66 unsigned int flags) 67 { 68 switch (obj->mm.madv) { 69 case I915_MADV_DONTNEED: 70 i915_gem_object_truncate(obj); 71 case __I915_MADV_PURGED: 72 return; 73 } 74 75 if (flags & I915_SHRINK_WRITEBACK) 76 i915_gem_object_writeback(obj); 77 } 78 79 /** 80 * i915_gem_shrink - Shrink buffer object caches 81 * @i915: i915 device 82 * @target: amount of memory to make available, in pages 83 * @nr_scanned: optional output for number of pages scanned (incremental) 84 * @shrink: control flags for selecting cache types 85 * 86 * This function is the main interface to the shrinker. It will try to release 87 * up to @target pages of main memory backing storage from buffer objects. 88 * Selection of the specific caches can be done with @flags. This is e.g. useful 89 * when purgeable objects should be removed from caches preferentially. 90 * 91 * Note that it's not guaranteed that released amount is actually available as 92 * free system memory - the pages might still be in-used to due to other reasons 93 * (like cpu mmaps) or the mm core has reused them before we could grab them. 94 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to 95 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). 96 * 97 * Also note that any kind of pinning (both per-vma address space pins and 98 * backing storage pins at the buffer object level) result in the shrinker code 99 * having to skip the object. 100 * 101 * Returns: 102 * The number of pages of backing storage actually released. 103 */ 104 unsigned long 105 i915_gem_shrink(struct drm_i915_private *i915, 106 unsigned long target, 107 unsigned long *nr_scanned, 108 unsigned int shrink) 109 { 110 const struct { 111 struct list_head *list; 112 unsigned int bit; 113 } phases[] = { 114 { &i915->mm.purge_list, ~0u }, 115 { 116 &i915->mm.shrink_list, 117 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND 118 }, 119 { NULL, 0 }, 120 }, *phase; 121 intel_wakeref_t wakeref = 0; 122 unsigned long count = 0; 123 unsigned long scanned = 0; 124 125 /* 126 * When shrinking the active list, we should also consider active 127 * contexts. Active contexts are pinned until they are retired, and 128 * so can not be simply unbound to retire and unpin their pages. To 129 * shrink the contexts, we must wait until the gpu is idle and 130 * completed its switch to the kernel context. In short, we do 131 * not have a good mechanism for idling a specific context. 132 */ 133 134 trace_i915_gem_shrink(i915, target, shrink); 135 136 /* 137 * Unbinding of objects will require HW access; Let us not wake the 138 * device just to recover a little memory. If absolutely necessary, 139 * we will force the wake during oom-notifier. 140 */ 141 if (shrink & I915_SHRINK_BOUND) { 142 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); 143 if (!wakeref) 144 shrink &= ~I915_SHRINK_BOUND; 145 } 146 147 /* 148 * As we may completely rewrite the (un)bound list whilst unbinding 149 * (due to retiring requests) we have to strictly process only 150 * one element of the list at the time, and recheck the list 151 * on every iteration. 152 * 153 * In particular, we must hold a reference whilst removing the 154 * object as we may end up waiting for and/or retiring the objects. 155 * This might release the final reference (held by the active list) 156 * and result in the object being freed from under us. This is 157 * similar to the precautions the eviction code must take whilst 158 * removing objects. 159 * 160 * Also note that although these lists do not hold a reference to 161 * the object we can safely grab one here: The final object 162 * unreferencing and the bound_list are both protected by the 163 * dev->struct_mutex and so we won't ever be able to observe an 164 * object on the bound_list with a reference count equals 0. 165 */ 166 for (phase = phases; phase->list; phase++) { 167 struct list_head still_in_list; 168 struct drm_i915_gem_object *obj; 169 unsigned long flags; 170 171 if ((shrink & phase->bit) == 0) 172 continue; 173 174 INIT_LIST_HEAD(&still_in_list); 175 176 /* 177 * We serialize our access to unreferenced objects through 178 * the use of the struct_mutex. While the objects are not 179 * yet freed (due to RCU then a workqueue) we still want 180 * to be able to shrink their pages, so they remain on 181 * the unbound/bound list until actually freed. 182 */ 183 spin_lock_irqsave(&i915->mm.obj_lock, flags); 184 while (count < target && 185 (obj = list_first_entry_or_null(phase->list, 186 typeof(*obj), 187 mm.link))) { 188 list_move_tail(&obj->mm.link, &still_in_list); 189 190 if (shrink & I915_SHRINK_VMAPS && 191 !is_vmalloc_addr(obj->mm.mapping)) 192 continue; 193 194 if (!(shrink & I915_SHRINK_ACTIVE) && 195 i915_gem_object_is_framebuffer(obj)) 196 continue; 197 198 if (!(shrink & I915_SHRINK_BOUND) && 199 atomic_read(&obj->bind_count)) 200 continue; 201 202 if (!can_release_pages(obj)) 203 continue; 204 205 if (!kref_get_unless_zero(&obj->base.refcount)) 206 continue; 207 208 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 209 210 if (unsafe_drop_pages(obj, shrink)) { 211 /* May arrive from get_pages on another bo */ 212 mutex_lock(&obj->mm.lock); 213 if (!i915_gem_object_has_pages(obj)) { 214 try_to_writeback(obj, shrink); 215 count += obj->base.size >> PAGE_SHIFT; 216 } 217 mutex_unlock(&obj->mm.lock); 218 } 219 220 scanned += obj->base.size >> PAGE_SHIFT; 221 i915_gem_object_put(obj); 222 223 spin_lock_irqsave(&i915->mm.obj_lock, flags); 224 } 225 list_splice_tail(&still_in_list, phase->list); 226 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 227 } 228 229 if (shrink & I915_SHRINK_BOUND) 230 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 231 232 if (nr_scanned) 233 *nr_scanned += scanned; 234 return count; 235 } 236 237 /** 238 * i915_gem_shrink_all - Shrink buffer object caches completely 239 * @i915: i915 device 240 * 241 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all 242 * caches completely. It also first waits for and retires all outstanding 243 * requests to also be able to release backing storage for active objects. 244 * 245 * This should only be used in code to intentionally quiescent the gpu or as a 246 * last-ditch effort when memory seems to have run out. 247 * 248 * Returns: 249 * The number of pages of backing storage actually released. 250 */ 251 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) 252 { 253 intel_wakeref_t wakeref; 254 unsigned long freed = 0; 255 256 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 257 freed = i915_gem_shrink(i915, -1UL, NULL, 258 I915_SHRINK_BOUND | 259 I915_SHRINK_UNBOUND | 260 I915_SHRINK_ACTIVE); 261 } 262 263 return freed; 264 } 265 266 static unsigned long 267 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 268 { 269 struct drm_i915_private *i915 = 270 container_of(shrinker, struct drm_i915_private, mm.shrinker); 271 unsigned long num_objects; 272 unsigned long count; 273 274 count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT; 275 num_objects = READ_ONCE(i915->mm.shrink_count); 276 277 /* 278 * Update our preferred vmscan batch size for the next pass. 279 * Our rough guess for an effective batch size is roughly 2 280 * available GEM objects worth of pages. That is we don't want 281 * the shrinker to fire, until it is worth the cost of freeing an 282 * entire GEM object. 283 */ 284 if (num_objects) { 285 unsigned long avg = 2 * count / num_objects; 286 287 i915->mm.shrinker.batch = 288 max((i915->mm.shrinker.batch + avg) >> 1, 289 128ul /* default SHRINK_BATCH */); 290 } 291 292 return count; 293 } 294 295 static unsigned long 296 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) 297 { 298 struct drm_i915_private *i915 = 299 container_of(shrinker, struct drm_i915_private, mm.shrinker); 300 unsigned long freed; 301 302 sc->nr_scanned = 0; 303 304 freed = i915_gem_shrink(i915, 305 sc->nr_to_scan, 306 &sc->nr_scanned, 307 I915_SHRINK_BOUND | 308 I915_SHRINK_UNBOUND); 309 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { 310 intel_wakeref_t wakeref; 311 312 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 313 freed += i915_gem_shrink(i915, 314 sc->nr_to_scan - sc->nr_scanned, 315 &sc->nr_scanned, 316 I915_SHRINK_ACTIVE | 317 I915_SHRINK_BOUND | 318 I915_SHRINK_UNBOUND | 319 I915_SHRINK_WRITEBACK); 320 } 321 } 322 323 return sc->nr_scanned ? freed : SHRINK_STOP; 324 } 325 326 static int 327 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) 328 { 329 struct drm_i915_private *i915 = 330 container_of(nb, struct drm_i915_private, mm.oom_notifier); 331 struct drm_i915_gem_object *obj; 332 unsigned long unevictable, available, freed_pages; 333 intel_wakeref_t wakeref; 334 unsigned long flags; 335 336 freed_pages = 0; 337 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 338 freed_pages += i915_gem_shrink(i915, -1UL, NULL, 339 I915_SHRINK_ACTIVE | 340 I915_SHRINK_BOUND | 341 I915_SHRINK_UNBOUND | 342 I915_SHRINK_WRITEBACK); 343 344 /* Because we may be allocating inside our own driver, we cannot 345 * assert that there are no objects with pinned pages that are not 346 * being pointed to by hardware. 347 */ 348 available = unevictable = 0; 349 spin_lock_irqsave(&i915->mm.obj_lock, flags); 350 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { 351 if (!can_release_pages(obj)) 352 unevictable += obj->base.size >> PAGE_SHIFT; 353 else 354 available += obj->base.size >> PAGE_SHIFT; 355 } 356 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 357 358 if (freed_pages || available) 359 pr_info("Purging GPU memory, %lu pages freed, " 360 "%lu pages still pinned, %lu pages left available.\n", 361 freed_pages, unevictable, available); 362 363 *(unsigned long *)ptr += freed_pages; 364 return NOTIFY_DONE; 365 } 366 367 static int 368 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) 369 { 370 struct drm_i915_private *i915 = 371 container_of(nb, struct drm_i915_private, mm.vmap_notifier); 372 struct i915_vma *vma, *next; 373 unsigned long freed_pages = 0; 374 intel_wakeref_t wakeref; 375 376 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 377 freed_pages += i915_gem_shrink(i915, -1UL, NULL, 378 I915_SHRINK_BOUND | 379 I915_SHRINK_UNBOUND | 380 I915_SHRINK_VMAPS); 381 382 /* We also want to clear any cached iomaps as they wrap vmap */ 383 mutex_lock(&i915->ggtt.vm.mutex); 384 list_for_each_entry_safe(vma, next, 385 &i915->ggtt.vm.bound_list, vm_link) { 386 unsigned long count = vma->node.size >> PAGE_SHIFT; 387 388 if (!vma->iomap || i915_vma_is_active(vma)) 389 continue; 390 391 if (__i915_vma_unbind(vma) == 0) 392 freed_pages += count; 393 } 394 mutex_unlock(&i915->ggtt.vm.mutex); 395 396 *(unsigned long *)ptr += freed_pages; 397 return NOTIFY_DONE; 398 } 399 400 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) 401 { 402 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; 403 i915->mm.shrinker.count_objects = i915_gem_shrinker_count; 404 i915->mm.shrinker.seeks = DEFAULT_SEEKS; 405 i915->mm.shrinker.batch = 4096; 406 WARN_ON(register_shrinker(&i915->mm.shrinker)); 407 408 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; 409 WARN_ON(register_oom_notifier(&i915->mm.oom_notifier)); 410 411 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; 412 WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier)); 413 } 414 415 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) 416 { 417 WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); 418 WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); 419 unregister_shrinker(&i915->mm.shrinker); 420 } 421 422 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, 423 struct mutex *mutex) 424 { 425 bool unlock = false; 426 427 if (!IS_ENABLED(CONFIG_LOCKDEP)) 428 return; 429 430 if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) { 431 mutex_acquire(&i915->drm.struct_mutex.dep_map, 432 I915_MM_NORMAL, 0, _RET_IP_); 433 unlock = true; 434 } 435 436 fs_reclaim_acquire(GFP_KERNEL); 437 438 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); 439 mutex_release(&mutex->dep_map, _RET_IP_); 440 441 fs_reclaim_release(GFP_KERNEL); 442 443 if (unlock) 444 mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_); 445 } 446 447 #define obj_to_i915(obj__) to_i915((obj__)->base.dev) 448 449 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) 450 { 451 struct drm_i915_private *i915 = obj_to_i915(obj); 452 unsigned long flags; 453 454 /* 455 * We can only be called while the pages are pinned or when 456 * the pages are released. If pinned, we should only be called 457 * from a single caller under controlled conditions; and on release 458 * only one caller may release us. Neither the two may cross. 459 */ 460 if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0)) 461 return; 462 463 spin_lock_irqsave(&i915->mm.obj_lock, flags); 464 if (!atomic_fetch_inc(&obj->mm.shrink_pin) && 465 !list_empty(&obj->mm.link)) { 466 list_del_init(&obj->mm.link); 467 i915->mm.shrink_count--; 468 i915->mm.shrink_memory -= obj->base.size; 469 } 470 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 471 } 472 473 static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, 474 struct list_head *head) 475 { 476 struct drm_i915_private *i915 = obj_to_i915(obj); 477 unsigned long flags; 478 479 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 480 if (!i915_gem_object_is_shrinkable(obj)) 481 return; 482 483 if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1)) 484 return; 485 486 spin_lock_irqsave(&i915->mm.obj_lock, flags); 487 GEM_BUG_ON(!kref_read(&obj->base.refcount)); 488 if (atomic_dec_and_test(&obj->mm.shrink_pin)) { 489 GEM_BUG_ON(!list_empty(&obj->mm.link)); 490 491 list_add_tail(&obj->mm.link, head); 492 i915->mm.shrink_count++; 493 i915->mm.shrink_memory += obj->base.size; 494 495 } 496 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 497 } 498 499 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) 500 { 501 __i915_gem_object_make_shrinkable(obj, 502 &obj_to_i915(obj)->mm.shrink_list); 503 } 504 505 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) 506 { 507 __i915_gem_object_make_shrinkable(obj, 508 &obj_to_i915(obj)->mm.purge_list); 509 } 510