xref: /openbmc/linux/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c (revision 0791faebfe750292a8a842b64795a390ca4a3b51)
110be98a7SChris Wilson /*
210be98a7SChris Wilson  * SPDX-License-Identifier: MIT
310be98a7SChris Wilson  *
410be98a7SChris Wilson  * Copyright © 2008-2015 Intel Corporation
510be98a7SChris Wilson  */
610be98a7SChris Wilson 
710be98a7SChris Wilson #include <linux/oom.h>
810be98a7SChris Wilson #include <linux/sched/mm.h>
910be98a7SChris Wilson #include <linux/shmem_fs.h>
1010be98a7SChris Wilson #include <linux/slab.h>
1110be98a7SChris Wilson #include <linux/swap.h>
1210be98a7SChris Wilson #include <linux/pci.h>
1310be98a7SChris Wilson #include <linux/dma-buf.h>
1410be98a7SChris Wilson #include <linux/vmalloc.h>
1510be98a7SChris Wilson 
1609137e94SChris Wilson #include "gt/intel_gt_requests.h"
1709137e94SChris Wilson 
1810be98a7SChris Wilson #include "i915_trace.h"
1910be98a7SChris Wilson 
swap_available(void)2010be98a7SChris Wilson static bool swap_available(void)
2110be98a7SChris Wilson {
2210be98a7SChris Wilson 	return get_nr_swap_pages() > 0;
2310be98a7SChris Wilson }
2410be98a7SChris Wilson 
can_release_pages(struct drm_i915_gem_object * obj)2510be98a7SChris Wilson static bool can_release_pages(struct drm_i915_gem_object *obj)
2610be98a7SChris Wilson {
2710be98a7SChris Wilson 	/* Consider only shrinkable ojects. */
2810be98a7SChris Wilson 	if (!i915_gem_object_is_shrinkable(obj))
2910be98a7SChris Wilson 		return false;
3010be98a7SChris Wilson 
315a90606dSChris Wilson 	/*
325a90606dSChris Wilson 	 * We can only return physical pages to the system if we can either
3310be98a7SChris Wilson 	 * discard the contents (because the user has marked them as being
3410be98a7SChris Wilson 	 * purgeable) or if we can move their contents out to swap.
3510be98a7SChris Wilson 	 */
3610be98a7SChris Wilson 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
3710be98a7SChris Wilson }
3810be98a7SChris Wilson 
drop_pages(struct drm_i915_gem_object * obj,unsigned long shrink,bool trylock_vm)39429e1fc1SLucas De Marchi static bool drop_pages(struct drm_i915_gem_object *obj,
40bc6f80ccSMaarten Lankhorst 		       unsigned long shrink, bool trylock_vm)
4110be98a7SChris Wilson {
42c03467baSChris Wilson 	unsigned long flags;
43c03467baSChris Wilson 
44c03467baSChris Wilson 	flags = 0;
45c03467baSChris Wilson 	if (shrink & I915_SHRINK_ACTIVE)
46bc6f80ccSMaarten Lankhorst 		flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
479da0ea09SChris Wilson 	if (!(shrink & I915_SHRINK_BOUND))
48bc6f80ccSMaarten Lankhorst 		flags |= I915_GEM_OBJECT_UNBIND_TEST;
49bc6f80ccSMaarten Lankhorst 	if (trylock_vm)
50bc6f80ccSMaarten Lankhorst 		flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
51c03467baSChris Wilson 
52c03467baSChris Wilson 	if (i915_gem_object_unbind(obj, flags) == 0)
53abd2f577SMaarten Lankhorst 		return true;
54c03467baSChris Wilson 
55abd2f577SMaarten Lankhorst 	return false;
5610be98a7SChris Wilson }
5710be98a7SChris Wilson 
try_to_writeback(struct drm_i915_gem_object * obj,unsigned int flags)587ae03459SMatthew Auld static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
5910be98a7SChris Wilson {
60ffa3fe08SMatthew Auld 	if (obj->ops->shrink) {
61ffa3fe08SMatthew Auld 		unsigned int shrink_flags = 0;
62ffa3fe08SMatthew Auld 
63ffa3fe08SMatthew Auld 		if (!(flags & I915_SHRINK_ACTIVE))
64ffa3fe08SMatthew Auld 			shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
65ffa3fe08SMatthew Auld 
66ffa3fe08SMatthew Auld 		if (flags & I915_SHRINK_WRITEBACK)
67ffa3fe08SMatthew Auld 			shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
68ffa3fe08SMatthew Auld 
69ffa3fe08SMatthew Auld 		return obj->ops->shrink(obj, shrink_flags);
70ffa3fe08SMatthew Auld 	}
71ffa3fe08SMatthew Auld 
727ae03459SMatthew Auld 	return 0;
7310be98a7SChris Wilson }
7410be98a7SChris Wilson 
7510be98a7SChris Wilson /**
7610be98a7SChris Wilson  * i915_gem_shrink - Shrink buffer object caches
77772f7bb7SMaarten Lankhorst  * @ww: i915 gem ww acquire ctx, or NULL
7810be98a7SChris Wilson  * @i915: i915 device
7910be98a7SChris Wilson  * @target: amount of memory to make available, in pages
8010be98a7SChris Wilson  * @nr_scanned: optional output for number of pages scanned (incremental)
8170972f51SChris Wilson  * @shrink: control flags for selecting cache types
8210be98a7SChris Wilson  *
8310be98a7SChris Wilson  * This function is the main interface to the shrinker. It will try to release
8410be98a7SChris Wilson  * up to @target pages of main memory backing storage from buffer objects.
8510be98a7SChris Wilson  * Selection of the specific caches can be done with @flags. This is e.g. useful
8610be98a7SChris Wilson  * when purgeable objects should be removed from caches preferentially.
8710be98a7SChris Wilson  *
8810be98a7SChris Wilson  * Note that it's not guaranteed that released amount is actually available as
8910be98a7SChris Wilson  * free system memory - the pages might still be in-used to due to other reasons
9010be98a7SChris Wilson  * (like cpu mmaps) or the mm core has reused them before we could grab them.
9110be98a7SChris Wilson  * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
9210be98a7SChris Wilson  * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
9310be98a7SChris Wilson  *
9410be98a7SChris Wilson  * Also note that any kind of pinning (both per-vma address space pins and
9510be98a7SChris Wilson  * backing storage pins at the buffer object level) result in the shrinker code
9610be98a7SChris Wilson  * having to skip the object.
9710be98a7SChris Wilson  *
9810be98a7SChris Wilson  * Returns:
9910be98a7SChris Wilson  * The number of pages of backing storage actually released.
10010be98a7SChris Wilson  */
10110be98a7SChris Wilson unsigned long
i915_gem_shrink(struct i915_gem_ww_ctx * ww,struct drm_i915_private * i915,unsigned long target,unsigned long * nr_scanned,unsigned int shrink)102cf41a8f1SMaarten Lankhorst i915_gem_shrink(struct i915_gem_ww_ctx *ww,
103cf41a8f1SMaarten Lankhorst 		struct drm_i915_private *i915,
10410be98a7SChris Wilson 		unsigned long target,
10510be98a7SChris Wilson 		unsigned long *nr_scanned,
106a8cff4c8SChris Wilson 		unsigned int shrink)
10710be98a7SChris Wilson {
10810be98a7SChris Wilson 	const struct {
10910be98a7SChris Wilson 		struct list_head *list;
11010be98a7SChris Wilson 		unsigned int bit;
11110be98a7SChris Wilson 	} phases[] = {
1123b4fa964SChris Wilson 		{ &i915->mm.purge_list, ~0u },
113ecab9be1SChris Wilson 		{
114ecab9be1SChris Wilson 			&i915->mm.shrink_list,
115ecab9be1SChris Wilson 			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
116ecab9be1SChris Wilson 		},
11710be98a7SChris Wilson 		{ NULL, 0 },
11810be98a7SChris Wilson 	}, *phase;
11910be98a7SChris Wilson 	intel_wakeref_t wakeref = 0;
12010be98a7SChris Wilson 	unsigned long count = 0;
12110be98a7SChris Wilson 	unsigned long scanned = 0;
122239f3c2eSMaarten Lankhorst 	int err = 0;
12310be98a7SChris Wilson 
124bc6f80ccSMaarten Lankhorst 	/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
125bc6f80ccSMaarten Lankhorst 	bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
126bc6f80ccSMaarten Lankhorst 
127a8cff4c8SChris Wilson 	trace_i915_gem_shrink(i915, target, shrink);
12810be98a7SChris Wilson 
12910be98a7SChris Wilson 	/*
13010be98a7SChris Wilson 	 * Unbinding of objects will require HW access; Let us not wake the
13110be98a7SChris Wilson 	 * device just to recover a little memory. If absolutely necessary,
13210be98a7SChris Wilson 	 * we will force the wake during oom-notifier.
13310be98a7SChris Wilson 	 */
134a8cff4c8SChris Wilson 	if (shrink & I915_SHRINK_BOUND) {
135d858d569SDaniele Ceraolo Spurio 		wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
13610be98a7SChris Wilson 		if (!wakeref)
137a8cff4c8SChris Wilson 			shrink &= ~I915_SHRINK_BOUND;
13810be98a7SChris Wilson 	}
13910be98a7SChris Wilson 
14010be98a7SChris Wilson 	/*
14109137e94SChris Wilson 	 * When shrinking the active list, we should also consider active
14209137e94SChris Wilson 	 * contexts. Active contexts are pinned until they are retired, and
14309137e94SChris Wilson 	 * so can not be simply unbound to retire and unpin their pages. To
14409137e94SChris Wilson 	 * shrink the contexts, we must wait until the gpu is idle and
14509137e94SChris Wilson 	 * completed its switch to the kernel context. In short, we do
14609137e94SChris Wilson 	 * not have a good mechanism for idling a specific context, but
14709137e94SChris Wilson 	 * what we can do is give them a kick so that we do not keep idle
14809137e94SChris Wilson 	 * contexts around longer than is necessary.
14909137e94SChris Wilson 	 */
15009137e94SChris Wilson 	if (shrink & I915_SHRINK_ACTIVE)
15109137e94SChris Wilson 		/* Retire requests to unpin all idle contexts */
1521a9c4db4SMichał Winiarski 		intel_gt_retire_requests(to_gt(i915));
15309137e94SChris Wilson 
15409137e94SChris Wilson 	/*
15510be98a7SChris Wilson 	 * As we may completely rewrite the (un)bound list whilst unbinding
15610be98a7SChris Wilson 	 * (due to retiring requests) we have to strictly process only
15710be98a7SChris Wilson 	 * one element of the list at the time, and recheck the list
15810be98a7SChris Wilson 	 * on every iteration.
15910be98a7SChris Wilson 	 *
16010be98a7SChris Wilson 	 * In particular, we must hold a reference whilst removing the
16110be98a7SChris Wilson 	 * object as we may end up waiting for and/or retiring the objects.
16210be98a7SChris Wilson 	 * This might release the final reference (held by the active list)
16310be98a7SChris Wilson 	 * and result in the object being freed from under us. This is
16410be98a7SChris Wilson 	 * similar to the precautions the eviction code must take whilst
16510be98a7SChris Wilson 	 * removing objects.
16610be98a7SChris Wilson 	 *
16710be98a7SChris Wilson 	 * Also note that although these lists do not hold a reference to
16810be98a7SChris Wilson 	 * the object we can safely grab one here: The final object
16910be98a7SChris Wilson 	 * unreferencing and the bound_list are both protected by the
17010be98a7SChris Wilson 	 * dev->struct_mutex and so we won't ever be able to observe an
17110be98a7SChris Wilson 	 * object on the bound_list with a reference count equals 0.
17210be98a7SChris Wilson 	 */
17310be98a7SChris Wilson 	for (phase = phases; phase->list; phase++) {
17410be98a7SChris Wilson 		struct list_head still_in_list;
17510be98a7SChris Wilson 		struct drm_i915_gem_object *obj;
176a8cff4c8SChris Wilson 		unsigned long flags;
17710be98a7SChris Wilson 
178a8cff4c8SChris Wilson 		if ((shrink & phase->bit) == 0)
17910be98a7SChris Wilson 			continue;
18010be98a7SChris Wilson 
18110be98a7SChris Wilson 		INIT_LIST_HEAD(&still_in_list);
18210be98a7SChris Wilson 
18310be98a7SChris Wilson 		/*
18410be98a7SChris Wilson 		 * We serialize our access to unreferenced objects through
18510be98a7SChris Wilson 		 * the use of the struct_mutex. While the objects are not
18610be98a7SChris Wilson 		 * yet freed (due to RCU then a workqueue) we still want
18710be98a7SChris Wilson 		 * to be able to shrink their pages, so they remain on
18810be98a7SChris Wilson 		 * the unbound/bound list until actually freed.
18910be98a7SChris Wilson 		 */
190a8cff4c8SChris Wilson 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
19110be98a7SChris Wilson 		while (count < target &&
19210be98a7SChris Wilson 		       (obj = list_first_entry_or_null(phase->list,
19310be98a7SChris Wilson 						       typeof(*obj),
19410be98a7SChris Wilson 						       mm.link))) {
19510be98a7SChris Wilson 			list_move_tail(&obj->mm.link, &still_in_list);
19610be98a7SChris Wilson 
197a8cff4c8SChris Wilson 			if (shrink & I915_SHRINK_VMAPS &&
19810be98a7SChris Wilson 			    !is_vmalloc_addr(obj->mm.mapping))
19910be98a7SChris Wilson 				continue;
20010be98a7SChris Wilson 
201a8cff4c8SChris Wilson 			if (!(shrink & I915_SHRINK_ACTIVE) &&
202c03467baSChris Wilson 			    i915_gem_object_is_framebuffer(obj))
20310be98a7SChris Wilson 				continue;
20410be98a7SChris Wilson 
20510be98a7SChris Wilson 			if (!can_release_pages(obj))
20610be98a7SChris Wilson 				continue;
20710be98a7SChris Wilson 
2080bd6cb6bSChris Wilson 			if (!kref_get_unless_zero(&obj->base.refcount))
2090bd6cb6bSChris Wilson 				continue;
2100bd6cb6bSChris Wilson 
211a8cff4c8SChris Wilson 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
21210be98a7SChris Wilson 
21310be98a7SChris Wilson 			/* May arrive from get_pages on another bo */
214cf41a8f1SMaarten Lankhorst 			if (!ww) {
215d8be1357SMaarten Lankhorst 				if (!i915_gem_object_trylock(obj, NULL))
216cf41a8f1SMaarten Lankhorst 					goto skip;
217cf41a8f1SMaarten Lankhorst 			} else {
218cf41a8f1SMaarten Lankhorst 				err = i915_gem_object_lock(obj, ww);
219cf41a8f1SMaarten Lankhorst 				if (err)
220cf41a8f1SMaarten Lankhorst 					goto skip;
221cf41a8f1SMaarten Lankhorst 			}
222cf41a8f1SMaarten Lankhorst 
223e4e80625SMaarten Lankhorst 			if (drop_pages(obj, shrink, trylock_vm) &&
224e4e80625SMaarten Lankhorst 			    !__i915_gem_object_put_pages(obj) &&
225e4e80625SMaarten Lankhorst 			    !try_to_writeback(obj, shrink))
22610be98a7SChris Wilson 				count += obj->base.size >> PAGE_SHIFT;
227e4e80625SMaarten Lankhorst 
228cf41a8f1SMaarten Lankhorst 			if (!ww)
229cf41a8f1SMaarten Lankhorst 				i915_gem_object_unlock(obj);
2300bd6cb6bSChris Wilson 
23110be98a7SChris Wilson 			scanned += obj->base.size >> PAGE_SHIFT;
232cf41a8f1SMaarten Lankhorst skip:
2330bd6cb6bSChris Wilson 			i915_gem_object_put(obj);
23410be98a7SChris Wilson 
235a8cff4c8SChris Wilson 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
236cf41a8f1SMaarten Lankhorst 			if (err)
237cf41a8f1SMaarten Lankhorst 				break;
23810be98a7SChris Wilson 		}
23910be98a7SChris Wilson 		list_splice_tail(&still_in_list, phase->list);
240a8cff4c8SChris Wilson 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
241cf41a8f1SMaarten Lankhorst 		if (err)
242239f3c2eSMaarten Lankhorst 			break;
24310be98a7SChris Wilson 	}
24410be98a7SChris Wilson 
245a8cff4c8SChris Wilson 	if (shrink & I915_SHRINK_BOUND)
246d858d569SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&i915->runtime_pm, wakeref);
24710be98a7SChris Wilson 
248239f3c2eSMaarten Lankhorst 	if (err)
249239f3c2eSMaarten Lankhorst 		return err;
250239f3c2eSMaarten Lankhorst 
25110be98a7SChris Wilson 	if (nr_scanned)
25210be98a7SChris Wilson 		*nr_scanned += scanned;
25310be98a7SChris Wilson 	return count;
25410be98a7SChris Wilson }
25510be98a7SChris Wilson 
25610be98a7SChris Wilson /**
25710be98a7SChris Wilson  * i915_gem_shrink_all - Shrink buffer object caches completely
25810be98a7SChris Wilson  * @i915: i915 device
25910be98a7SChris Wilson  *
26010be98a7SChris Wilson  * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
26110be98a7SChris Wilson  * caches completely. It also first waits for and retires all outstanding
26210be98a7SChris Wilson  * requests to also be able to release backing storage for active objects.
26310be98a7SChris Wilson  *
26410be98a7SChris Wilson  * This should only be used in code to intentionally quiescent the gpu or as a
26510be98a7SChris Wilson  * last-ditch effort when memory seems to have run out.
26610be98a7SChris Wilson  *
26710be98a7SChris Wilson  * Returns:
26810be98a7SChris Wilson  * The number of pages of backing storage actually released.
26910be98a7SChris Wilson  */
i915_gem_shrink_all(struct drm_i915_private * i915)27010be98a7SChris Wilson unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
27110be98a7SChris Wilson {
27210be98a7SChris Wilson 	intel_wakeref_t wakeref;
27310be98a7SChris Wilson 	unsigned long freed = 0;
27410be98a7SChris Wilson 
275c447ff7dSDaniele Ceraolo Spurio 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
276cf41a8f1SMaarten Lankhorst 		freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
27710be98a7SChris Wilson 					I915_SHRINK_BOUND |
2786f24e410SChris Wilson 					I915_SHRINK_UNBOUND);
27910be98a7SChris Wilson 	}
28010be98a7SChris Wilson 
28110be98a7SChris Wilson 	return freed;
28210be98a7SChris Wilson }
28310be98a7SChris Wilson 
28410be98a7SChris Wilson static unsigned long
i915_gem_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)28510be98a7SChris Wilson i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
28610be98a7SChris Wilson {
28710be98a7SChris Wilson 	struct drm_i915_private *i915 =
28810be98a7SChris Wilson 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
289d82b4b26SChris Wilson 	unsigned long num_objects;
290d82b4b26SChris Wilson 	unsigned long count;
29110be98a7SChris Wilson 
292d82b4b26SChris Wilson 	count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
293d82b4b26SChris Wilson 	num_objects = READ_ONCE(i915->mm.shrink_count);
29410be98a7SChris Wilson 
295d82b4b26SChris Wilson 	/*
296d82b4b26SChris Wilson 	 * Update our preferred vmscan batch size for the next pass.
29710be98a7SChris Wilson 	 * Our rough guess for an effective batch size is roughly 2
29810be98a7SChris Wilson 	 * available GEM objects worth of pages. That is we don't want
29910be98a7SChris Wilson 	 * the shrinker to fire, until it is worth the cost of freeing an
30010be98a7SChris Wilson 	 * entire GEM object.
30110be98a7SChris Wilson 	 */
30210be98a7SChris Wilson 	if (num_objects) {
30310be98a7SChris Wilson 		unsigned long avg = 2 * count / num_objects;
30410be98a7SChris Wilson 
30510be98a7SChris Wilson 		i915->mm.shrinker.batch =
30610be98a7SChris Wilson 			max((i915->mm.shrinker.batch + avg) >> 1,
30710be98a7SChris Wilson 			    128ul /* default SHRINK_BATCH */);
30810be98a7SChris Wilson 	}
30910be98a7SChris Wilson 
31010be98a7SChris Wilson 	return count;
31110be98a7SChris Wilson }
31210be98a7SChris Wilson 
31310be98a7SChris Wilson static unsigned long
i915_gem_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)31410be98a7SChris Wilson i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
31510be98a7SChris Wilson {
31610be98a7SChris Wilson 	struct drm_i915_private *i915 =
31710be98a7SChris Wilson 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
31810be98a7SChris Wilson 	unsigned long freed;
31910be98a7SChris Wilson 
32010be98a7SChris Wilson 	sc->nr_scanned = 0;
32110be98a7SChris Wilson 
322cf41a8f1SMaarten Lankhorst 	freed = i915_gem_shrink(NULL, i915,
32310be98a7SChris Wilson 				sc->nr_to_scan,
32410be98a7SChris Wilson 				&sc->nr_scanned,
32510be98a7SChris Wilson 				I915_SHRINK_BOUND |
3262850748eSChris Wilson 				I915_SHRINK_UNBOUND);
32710be98a7SChris Wilson 	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
32810be98a7SChris Wilson 		intel_wakeref_t wakeref;
32910be98a7SChris Wilson 
330c447ff7dSDaniele Ceraolo Spurio 		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
331cf41a8f1SMaarten Lankhorst 			freed += i915_gem_shrink(NULL, i915,
33210be98a7SChris Wilson 						 sc->nr_to_scan - sc->nr_scanned,
33310be98a7SChris Wilson 						 &sc->nr_scanned,
33410be98a7SChris Wilson 						 I915_SHRINK_ACTIVE |
33510be98a7SChris Wilson 						 I915_SHRINK_BOUND |
33610be98a7SChris Wilson 						 I915_SHRINK_UNBOUND |
33710be98a7SChris Wilson 						 I915_SHRINK_WRITEBACK);
33810be98a7SChris Wilson 		}
33910be98a7SChris Wilson 	}
34010be98a7SChris Wilson 
34110be98a7SChris Wilson 	return sc->nr_scanned ? freed : SHRINK_STOP;
34210be98a7SChris Wilson }
34310be98a7SChris Wilson 
34410be98a7SChris Wilson static int
i915_gem_shrinker_oom(struct notifier_block * nb,unsigned long event,void * ptr)34510be98a7SChris Wilson i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
34610be98a7SChris Wilson {
34710be98a7SChris Wilson 	struct drm_i915_private *i915 =
34810be98a7SChris Wilson 		container_of(nb, struct drm_i915_private, mm.oom_notifier);
34910be98a7SChris Wilson 	struct drm_i915_gem_object *obj;
350ecab9be1SChris Wilson 	unsigned long unevictable, available, freed_pages;
35110be98a7SChris Wilson 	intel_wakeref_t wakeref;
352a8cff4c8SChris Wilson 	unsigned long flags;
35310be98a7SChris Wilson 
35410be98a7SChris Wilson 	freed_pages = 0;
355c447ff7dSDaniele Ceraolo Spurio 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
356cf41a8f1SMaarten Lankhorst 		freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
35710be98a7SChris Wilson 					       I915_SHRINK_BOUND |
35810be98a7SChris Wilson 					       I915_SHRINK_UNBOUND |
35910be98a7SChris Wilson 					       I915_SHRINK_WRITEBACK);
36010be98a7SChris Wilson 
36110be98a7SChris Wilson 	/* Because we may be allocating inside our own driver, we cannot
36210be98a7SChris Wilson 	 * assert that there are no objects with pinned pages that are not
36310be98a7SChris Wilson 	 * being pointed to by hardware.
36410be98a7SChris Wilson 	 */
365ecab9be1SChris Wilson 	available = unevictable = 0;
366a8cff4c8SChris Wilson 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
367ecab9be1SChris Wilson 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
36810be98a7SChris Wilson 		if (!can_release_pages(obj))
36910be98a7SChris Wilson 			unevictable += obj->base.size >> PAGE_SHIFT;
37010be98a7SChris Wilson 		else
371ecab9be1SChris Wilson 			available += obj->base.size >> PAGE_SHIFT;
37210be98a7SChris Wilson 	}
373a8cff4c8SChris Wilson 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
37410be98a7SChris Wilson 
375ecab9be1SChris Wilson 	if (freed_pages || available)
37610be98a7SChris Wilson 		pr_info("Purging GPU memory, %lu pages freed, "
377ecab9be1SChris Wilson 			"%lu pages still pinned, %lu pages left available.\n",
378ecab9be1SChris Wilson 			freed_pages, unevictable, available);
37910be98a7SChris Wilson 
38010be98a7SChris Wilson 	*(unsigned long *)ptr += freed_pages;
38110be98a7SChris Wilson 	return NOTIFY_DONE;
38210be98a7SChris Wilson }
38310be98a7SChris Wilson 
38410be98a7SChris Wilson static int
i915_gem_shrinker_vmap(struct notifier_block * nb,unsigned long event,void * ptr)38510be98a7SChris Wilson i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
38610be98a7SChris Wilson {
38710be98a7SChris Wilson 	struct drm_i915_private *i915 =
38810be98a7SChris Wilson 		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
38910be98a7SChris Wilson 	struct i915_vma *vma, *next;
39010be98a7SChris Wilson 	unsigned long freed_pages = 0;
39110be98a7SChris Wilson 	intel_wakeref_t wakeref;
39210be98a7SChris Wilson 
393c447ff7dSDaniele Ceraolo Spurio 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
394cf41a8f1SMaarten Lankhorst 		freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
39510be98a7SChris Wilson 					       I915_SHRINK_BOUND |
39610be98a7SChris Wilson 					       I915_SHRINK_UNBOUND |
39710be98a7SChris Wilson 					       I915_SHRINK_VMAPS);
39810be98a7SChris Wilson 
39910be98a7SChris Wilson 	/* We also want to clear any cached iomaps as they wrap vmap */
4005c24c9d2SMichał Winiarski 	mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
40110be98a7SChris Wilson 	list_for_each_entry_safe(vma, next,
4025c24c9d2SMichał Winiarski 				 &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
403*8e4ee5e8SChris Wilson 		unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
4042c3849baSMaarten Lankhorst 		struct drm_i915_gem_object *obj = vma->obj;
40510be98a7SChris Wilson 
40610be98a7SChris Wilson 		if (!vma->iomap || i915_vma_is_active(vma))
40710be98a7SChris Wilson 			continue;
40810be98a7SChris Wilson 
409d8be1357SMaarten Lankhorst 		if (!i915_gem_object_trylock(obj, NULL))
4102c3849baSMaarten Lankhorst 			continue;
4112c3849baSMaarten Lankhorst 
4122850748eSChris Wilson 		if (__i915_vma_unbind(vma) == 0)
41310be98a7SChris Wilson 			freed_pages += count;
4142c3849baSMaarten Lankhorst 
4152c3849baSMaarten Lankhorst 		i915_gem_object_unlock(obj);
41610be98a7SChris Wilson 	}
4175c24c9d2SMichał Winiarski 	mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
41810be98a7SChris Wilson 
41910be98a7SChris Wilson 	*(unsigned long *)ptr += freed_pages;
42010be98a7SChris Wilson 	return NOTIFY_DONE;
42110be98a7SChris Wilson }
42210be98a7SChris Wilson 
i915_gem_driver_register__shrinker(struct drm_i915_private * i915)423c29579d2SChris Wilson void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
42410be98a7SChris Wilson {
42510be98a7SChris Wilson 	i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
42610be98a7SChris Wilson 	i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
42710be98a7SChris Wilson 	i915->mm.shrinker.seeks = DEFAULT_SEEKS;
42810be98a7SChris Wilson 	i915->mm.shrinker.batch = 4096;
429e33c267aSRoman Gushchin 	drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
430e33c267aSRoman Gushchin 						  "drm-i915_gem"));
43110be98a7SChris Wilson 
43210be98a7SChris Wilson 	i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
43385c823acSPankaj Bharadiya 	drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
43410be98a7SChris Wilson 
43510be98a7SChris Wilson 	i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
43685c823acSPankaj Bharadiya 	drm_WARN_ON(&i915->drm,
43785c823acSPankaj Bharadiya 		    register_vmap_purge_notifier(&i915->mm.vmap_notifier));
43810be98a7SChris Wilson }
43910be98a7SChris Wilson 
i915_gem_driver_unregister__shrinker(struct drm_i915_private * i915)440c29579d2SChris Wilson void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
44110be98a7SChris Wilson {
44285c823acSPankaj Bharadiya 	drm_WARN_ON(&i915->drm,
44385c823acSPankaj Bharadiya 		    unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
44485c823acSPankaj Bharadiya 	drm_WARN_ON(&i915->drm,
44585c823acSPankaj Bharadiya 		    unregister_oom_notifier(&i915->mm.oom_notifier));
44610be98a7SChris Wilson 	unregister_shrinker(&i915->mm.shrinker);
44710be98a7SChris Wilson }
44810be98a7SChris Wilson 
i915_gem_shrinker_taints_mutex(struct drm_i915_private * i915,struct mutex * mutex)44910be98a7SChris Wilson void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
45010be98a7SChris Wilson 				    struct mutex *mutex)
45110be98a7SChris Wilson {
45210be98a7SChris Wilson 	if (!IS_ENABLED(CONFIG_LOCKDEP))
45310be98a7SChris Wilson 		return;
45410be98a7SChris Wilson 
45510be98a7SChris Wilson 	fs_reclaim_acquire(GFP_KERNEL);
45610be98a7SChris Wilson 
45710be98a7SChris Wilson 	mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
4585facae4fSQian Cai 	mutex_release(&mutex->dep_map, _RET_IP_);
45910be98a7SChris Wilson 
46010be98a7SChris Wilson 	fs_reclaim_release(GFP_KERNEL);
46110be98a7SChris Wilson }
4621aff1903SChris Wilson 
463e25d1ea4SMatthew Auld /**
464e25d1ea4SMatthew Auld  * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
465e25d1ea4SMatthew Auld  * default all object types that support shrinking(see IS_SHRINKABLE), will also
466e25d1ea4SMatthew Auld  * make the object visible to the shrinker after allocating the system memory
467e25d1ea4SMatthew Auld  * pages.
468e25d1ea4SMatthew Auld  * @obj: The GEM object.
469e25d1ea4SMatthew Auld  *
470e25d1ea4SMatthew Auld  * This is typically used for special kernel internal objects that can't be
471e25d1ea4SMatthew Auld  * easily processed by the shrinker, like if they are perma-pinned.
472e25d1ea4SMatthew Auld  */
i915_gem_object_make_unshrinkable(struct drm_i915_gem_object * obj)4731aff1903SChris Wilson void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
4741aff1903SChris Wilson {
47599013b10SChris Wilson 	struct drm_i915_private *i915 = obj_to_i915(obj);
47699013b10SChris Wilson 	unsigned long flags;
47799013b10SChris Wilson 
4781aff1903SChris Wilson 	/*
4791aff1903SChris Wilson 	 * We can only be called while the pages are pinned or when
4801aff1903SChris Wilson 	 * the pages are released. If pinned, we should only be called
4811aff1903SChris Wilson 	 * from a single caller under controlled conditions; and on release
4821aff1903SChris Wilson 	 * only one caller may release us. Neither the two may cross.
4831aff1903SChris Wilson 	 */
48499013b10SChris Wilson 	if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
48599013b10SChris Wilson 		return;
4861aff1903SChris Wilson 
4871aff1903SChris Wilson 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
48899013b10SChris Wilson 	if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
48999013b10SChris Wilson 	    !list_empty(&obj->mm.link)) {
4901aff1903SChris Wilson 		list_del_init(&obj->mm.link);
4911aff1903SChris Wilson 		i915->mm.shrink_count--;
4921aff1903SChris Wilson 		i915->mm.shrink_memory -= obj->base.size;
4931aff1903SChris Wilson 	}
49499013b10SChris Wilson 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
4951aff1903SChris Wilson }
4961aff1903SChris Wilson 
___i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj,struct list_head * head)497ebd4a8ecSMatthew Auld static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
4981aff1903SChris Wilson 					       struct list_head *head)
4991aff1903SChris Wilson {
5001aff1903SChris Wilson 	struct drm_i915_private *i915 = obj_to_i915(obj);
5011aff1903SChris Wilson 	unsigned long flags;
5021aff1903SChris Wilson 
50399013b10SChris Wilson 	if (!i915_gem_object_is_shrinkable(obj))
50499013b10SChris Wilson 		return;
50599013b10SChris Wilson 
50699013b10SChris Wilson 	if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
50799013b10SChris Wilson 		return;
50899013b10SChris Wilson 
5091aff1903SChris Wilson 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
5101aff1903SChris Wilson 	GEM_BUG_ON(!kref_read(&obj->base.refcount));
51199013b10SChris Wilson 	if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
51299013b10SChris Wilson 		GEM_BUG_ON(!list_empty(&obj->mm.link));
5131aff1903SChris Wilson 
5141aff1903SChris Wilson 		list_add_tail(&obj->mm.link, head);
5151aff1903SChris Wilson 		i915->mm.shrink_count++;
5161aff1903SChris Wilson 		i915->mm.shrink_memory += obj->base.size;
5171aff1903SChris Wilson 
5181aff1903SChris Wilson 	}
51999013b10SChris Wilson 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
5201aff1903SChris Wilson }
5211aff1903SChris Wilson 
522e25d1ea4SMatthew Auld /**
523ebd4a8ecSMatthew Auld  * __i915_gem_object_make_shrinkable - Move the object to the tail of the
524ebd4a8ecSMatthew Auld  * shrinkable list. Objects on this list might be swapped out. Used with
525ebd4a8ecSMatthew Auld  * WILLNEED objects.
526ebd4a8ecSMatthew Auld  * @obj: The GEM object.
527ebd4a8ecSMatthew Auld  *
528ebd4a8ecSMatthew Auld  * DO NOT USE. This is intended to be called on very special objects that don't
529ebd4a8ecSMatthew Auld  * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
530ebd4a8ecSMatthew Auld  * underneath.
531ebd4a8ecSMatthew Auld  */
__i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj)532ebd4a8ecSMatthew Auld void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
533ebd4a8ecSMatthew Auld {
534ebd4a8ecSMatthew Auld 	___i915_gem_object_make_shrinkable(obj,
535ebd4a8ecSMatthew Auld 					   &obj_to_i915(obj)->mm.shrink_list);
536ebd4a8ecSMatthew Auld }
537ebd4a8ecSMatthew Auld 
538ebd4a8ecSMatthew Auld /**
539ebd4a8ecSMatthew Auld  * __i915_gem_object_make_purgeable - Move the object to the tail of the
540ebd4a8ecSMatthew Auld  * purgeable list. Objects on this list might be swapped out. Used with
541ebd4a8ecSMatthew Auld  * DONTNEED objects.
542ebd4a8ecSMatthew Auld  * @obj: The GEM object.
543ebd4a8ecSMatthew Auld  *
544ebd4a8ecSMatthew Auld  * DO NOT USE. This is intended to be called on very special objects that don't
545ebd4a8ecSMatthew Auld  * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
546ebd4a8ecSMatthew Auld  * underneath.
547ebd4a8ecSMatthew Auld  */
__i915_gem_object_make_purgeable(struct drm_i915_gem_object * obj)548ebd4a8ecSMatthew Auld void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
549ebd4a8ecSMatthew Auld {
550ebd4a8ecSMatthew Auld 	___i915_gem_object_make_shrinkable(obj,
551ebd4a8ecSMatthew Auld 					   &obj_to_i915(obj)->mm.purge_list);
552ebd4a8ecSMatthew Auld }
553ebd4a8ecSMatthew Auld 
554ebd4a8ecSMatthew Auld /**
555e25d1ea4SMatthew Auld  * i915_gem_object_make_shrinkable - Move the object to the tail of the
556e25d1ea4SMatthew Auld  * shrinkable list. Objects on this list might be swapped out. Used with
557e25d1ea4SMatthew Auld  * WILLNEED objects.
558e25d1ea4SMatthew Auld  * @obj: The GEM object.
559e25d1ea4SMatthew Auld  *
560e25d1ea4SMatthew Auld  * MUST only be called on objects which have backing pages.
561e25d1ea4SMatthew Auld  *
562e25d1ea4SMatthew Auld  * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
563e25d1ea4SMatthew Auld  */
i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj)5641aff1903SChris Wilson void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
5651aff1903SChris Wilson {
566ebd4a8ecSMatthew Auld 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
567ebd4a8ecSMatthew Auld 	__i915_gem_object_make_shrinkable(obj);
5681aff1903SChris Wilson }
5691aff1903SChris Wilson 
570e25d1ea4SMatthew Auld /**
571e25d1ea4SMatthew Auld  * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable
572e25d1ea4SMatthew Auld  * list. Used with DONTNEED objects. Unlike with shrinkable objects, the
573e25d1ea4SMatthew Auld  * shrinker will attempt to discard the backing pages, instead of trying to swap
574e25d1ea4SMatthew Auld  * them out.
575e25d1ea4SMatthew Auld  * @obj: The GEM object.
576e25d1ea4SMatthew Auld  *
577e25d1ea4SMatthew Auld  * MUST only be called on objects which have backing pages.
578e25d1ea4SMatthew Auld  *
579e25d1ea4SMatthew Auld  * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
580e25d1ea4SMatthew Auld  */
i915_gem_object_make_purgeable(struct drm_i915_gem_object * obj)5811aff1903SChris Wilson void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
5821aff1903SChris Wilson {
583ebd4a8ecSMatthew Auld 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
584ebd4a8ecSMatthew Auld 	__i915_gem_object_make_purgeable(obj);
5851aff1903SChris Wilson }
586