1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_gpu.h"
10 #include "msm_gpu_trace.h"
11 
12 /* Default disabled for now until it has some more testing on the different
13  * iommu combinations that can be paired with the driver:
14  */
15 bool enable_eviction = false;
16 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
17 module_param(enable_eviction, bool, 0600);
18 
19 static bool can_swap(void)
20 {
21 	return enable_eviction && get_nr_swap_pages() > 0;
22 }
23 
24 static unsigned long
25 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
26 {
27 	struct msm_drm_private *priv =
28 		container_of(shrinker, struct msm_drm_private, shrinker);
29 	unsigned count = priv->shrinkable_count;
30 
31 	if (can_swap())
32 		count += priv->evictable_count;
33 
34 	return count;
35 }
36 
37 static bool
38 purge(struct msm_gem_object *msm_obj)
39 {
40 	if (!is_purgeable(msm_obj))
41 		return false;
42 
43 	/*
44 	 * This will move the obj out of still_in_list to
45 	 * the purged list
46 	 */
47 	msm_gem_purge(&msm_obj->base);
48 
49 	return true;
50 }
51 
52 static bool
53 evict(struct msm_gem_object *msm_obj)
54 {
55 	if (is_unevictable(msm_obj))
56 		return false;
57 
58 	msm_gem_evict(&msm_obj->base);
59 
60 	return true;
61 }
62 
63 static unsigned long
64 scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
65 		bool (*shrink)(struct msm_gem_object *msm_obj))
66 {
67 	unsigned freed = 0;
68 	struct list_head still_in_list;
69 
70 	INIT_LIST_HEAD(&still_in_list);
71 
72 	mutex_lock(&priv->mm_lock);
73 
74 	while (freed < nr_to_scan) {
75 		struct msm_gem_object *msm_obj = list_first_entry_or_null(
76 				list, typeof(*msm_obj), mm_list);
77 
78 		if (!msm_obj)
79 			break;
80 
81 		list_move_tail(&msm_obj->mm_list, &still_in_list);
82 
83 		/*
84 		 * If it is in the process of being freed, msm_gem_free_object
85 		 * can be blocked on mm_lock waiting to remove it.  So just
86 		 * skip it.
87 		 */
88 		if (!kref_get_unless_zero(&msm_obj->base.refcount))
89 			continue;
90 
91 		/*
92 		 * Now that we own a reference, we can drop mm_lock for the
93 		 * rest of the loop body, to reduce contention with the
94 		 * retire_submit path (which could make more objects purgeable)
95 		 */
96 
97 		mutex_unlock(&priv->mm_lock);
98 
99 		/*
100 		 * Note that this still needs to be trylock, since we can
101 		 * hit shrinker in response to trying to get backing pages
102 		 * for this obj (ie. while it's lock is already held)
103 		 */
104 		if (!msm_gem_trylock(&msm_obj->base))
105 			goto tail;
106 
107 		if (shrink(msm_obj))
108 			freed += msm_obj->base.size >> PAGE_SHIFT;
109 
110 		msm_gem_unlock(&msm_obj->base);
111 
112 tail:
113 		drm_gem_object_put(&msm_obj->base);
114 		mutex_lock(&priv->mm_lock);
115 	}
116 
117 	list_splice_tail(&still_in_list, list);
118 	mutex_unlock(&priv->mm_lock);
119 
120 	return freed;
121 }
122 
123 static unsigned long
124 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
125 {
126 	struct msm_drm_private *priv =
127 		container_of(shrinker, struct msm_drm_private, shrinker);
128 	unsigned long freed;
129 
130 	freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
131 
132 	if (freed > 0)
133 		trace_msm_gem_purge(freed << PAGE_SHIFT);
134 
135 	if (can_swap() && freed < sc->nr_to_scan) {
136 		int evicted = scan(priv, sc->nr_to_scan - freed,
137 				&priv->inactive_willneed, evict);
138 
139 		if (evicted > 0)
140 			trace_msm_gem_evict(evicted << PAGE_SHIFT);
141 
142 		freed += evicted;
143 	}
144 
145 	return (freed > 0) ? freed : SHRINK_STOP;
146 }
147 
148 /* since we don't know any better, lets bail after a few
149  * and if necessary the shrinker will be invoked again.
150  * Seems better than unmapping *everything*
151  */
152 static const int vmap_shrink_limit = 15;
153 
154 static bool
155 vmap_shrink(struct msm_gem_object *msm_obj)
156 {
157 	if (!is_vunmapable(msm_obj))
158 		return false;
159 
160 	msm_gem_vunmap(&msm_obj->base);
161 
162 	return true;
163 }
164 
165 static int
166 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
167 {
168 	struct msm_drm_private *priv =
169 		container_of(nb, struct msm_drm_private, vmap_notifier);
170 	struct list_head *mm_lists[] = {
171 		&priv->inactive_dontneed,
172 		&priv->inactive_willneed,
173 		priv->gpu ? &priv->gpu->active_list : NULL,
174 		NULL,
175 	};
176 	unsigned idx, unmapped = 0;
177 
178 	for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
179 		unmapped += scan(priv, vmap_shrink_limit - unmapped,
180 				mm_lists[idx], vmap_shrink);
181 	}
182 
183 	*(unsigned long *)ptr += unmapped;
184 
185 	if (unmapped > 0)
186 		trace_msm_gem_purge_vmaps(unmapped);
187 
188 	return NOTIFY_DONE;
189 }
190 
191 /**
192  * msm_gem_shrinker_init - Initialize msm shrinker
193  * @dev: drm device
194  *
195  * This function registers and sets up the msm shrinker.
196  */
197 void msm_gem_shrinker_init(struct drm_device *dev)
198 {
199 	struct msm_drm_private *priv = dev->dev_private;
200 	priv->shrinker.count_objects = msm_gem_shrinker_count;
201 	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
202 	priv->shrinker.seeks = DEFAULT_SEEKS;
203 	WARN_ON(register_shrinker(&priv->shrinker));
204 
205 	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
206 	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
207 }
208 
209 /**
210  * msm_gem_shrinker_cleanup - Clean up msm shrinker
211  * @dev: drm device
212  *
213  * This function unregisters the msm shrinker.
214  */
215 void msm_gem_shrinker_cleanup(struct drm_device *dev)
216 {
217 	struct msm_drm_private *priv = dev->dev_private;
218 
219 	if (priv->shrinker.nr_deferred) {
220 		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
221 		unregister_shrinker(&priv->shrinker);
222 	}
223 }
224