1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/vmalloc.h>
8 #include <linux/sched/mm.h>
9 
10 #include "msm_drv.h"
11 #include "msm_gem.h"
12 #include "msm_gpu.h"
13 #include "msm_gpu_trace.h"
14 
15 /* Default disabled for now until it has some more testing on the different
16  * iommu combinations that can be paired with the driver:
17  */
18 static bool enable_eviction = true;
19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20 module_param(enable_eviction, bool, 0600);
21 
22 static bool can_swap(void)
23 {
24 	return enable_eviction && get_nr_swap_pages() > 0;
25 }
26 
27 static bool can_block(struct shrink_control *sc)
28 {
29 	if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30 		return false;
31 	return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32 }
33 
34 static unsigned long
35 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36 {
37 	struct msm_drm_private *priv =
38 		container_of(shrinker, struct msm_drm_private, shrinker);
39 	unsigned count = priv->lru.dontneed.count;
40 
41 	if (can_swap())
42 		count += priv->lru.willneed.count;
43 
44 	return count;
45 }
46 
47 static bool
48 purge(struct drm_gem_object *obj)
49 {
50 	if (!is_purgeable(to_msm_bo(obj)))
51 		return false;
52 
53 	if (msm_gem_active(obj))
54 		return false;
55 
56 	msm_gem_purge(obj);
57 
58 	return true;
59 }
60 
61 static bool
62 evict(struct drm_gem_object *obj)
63 {
64 	if (is_unevictable(to_msm_bo(obj)))
65 		return false;
66 
67 	if (msm_gem_active(obj))
68 		return false;
69 
70 	msm_gem_evict(obj);
71 
72 	return true;
73 }
74 
75 static bool
76 wait_for_idle(struct drm_gem_object *obj)
77 {
78 	enum dma_resv_usage usage = dma_resv_usage_rw(true);
79 	return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
80 }
81 
82 static bool
83 active_purge(struct drm_gem_object *obj)
84 {
85 	if (!wait_for_idle(obj))
86 		return false;
87 
88 	return purge(obj);
89 }
90 
91 static bool
92 active_evict(struct drm_gem_object *obj)
93 {
94 	if (!wait_for_idle(obj))
95 		return false;
96 
97 	return evict(obj);
98 }
99 
100 static unsigned long
101 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
102 {
103 	struct msm_drm_private *priv =
104 		container_of(shrinker, struct msm_drm_private, shrinker);
105 	struct {
106 		struct drm_gem_lru *lru;
107 		bool (*shrink)(struct drm_gem_object *obj);
108 		bool cond;
109 		unsigned long freed;
110 	} stages[] = {
111 		/* Stages of progressively more aggressive/expensive reclaim: */
112 		{ &priv->lru.dontneed, purge,        true },
113 		{ &priv->lru.willneed, evict,        can_swap() },
114 		{ &priv->lru.dontneed, active_purge, can_block(sc) },
115 		{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
116 	};
117 	long nr = sc->nr_to_scan;
118 	unsigned long freed = 0;
119 
120 	for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121 		if (!stages[i].cond)
122 			continue;
123 		stages[i].freed =
124 			drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
125 		nr -= stages[i].freed;
126 		freed += stages[i].freed;
127 	}
128 
129 	if (freed) {
130 		trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
131 				     stages[1].freed, stages[2].freed,
132 				     stages[3].freed);
133 	}
134 
135 	return (freed > 0) ? freed : SHRINK_STOP;
136 }
137 
138 #ifdef CONFIG_DEBUG_FS
139 unsigned long
140 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
141 {
142 	struct msm_drm_private *priv = dev->dev_private;
143 	struct shrink_control sc = {
144 		.nr_to_scan = nr_to_scan,
145 	};
146 	int ret;
147 
148 	fs_reclaim_acquire(GFP_KERNEL);
149 	ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
150 	fs_reclaim_release(GFP_KERNEL);
151 
152 	return ret;
153 }
154 #endif
155 
156 /* since we don't know any better, lets bail after a few
157  * and if necessary the shrinker will be invoked again.
158  * Seems better than unmapping *everything*
159  */
160 static const int vmap_shrink_limit = 15;
161 
162 static bool
163 vmap_shrink(struct drm_gem_object *obj)
164 {
165 	if (!is_vunmapable(to_msm_bo(obj)))
166 		return false;
167 
168 	msm_gem_vunmap(obj);
169 
170 	return true;
171 }
172 
173 static int
174 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
175 {
176 	struct msm_drm_private *priv =
177 		container_of(nb, struct msm_drm_private, vmap_notifier);
178 	struct drm_gem_lru *lrus[] = {
179 		&priv->lru.dontneed,
180 		&priv->lru.willneed,
181 		&priv->lru.pinned,
182 		NULL,
183 	};
184 	unsigned idx, unmapped = 0;
185 
186 	for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
187 		unmapped += drm_gem_lru_scan(lrus[idx],
188 					     vmap_shrink_limit - unmapped,
189 					     vmap_shrink);
190 	}
191 
192 	*(unsigned long *)ptr += unmapped;
193 
194 	if (unmapped > 0)
195 		trace_msm_gem_purge_vmaps(unmapped);
196 
197 	return NOTIFY_DONE;
198 }
199 
200 /**
201  * msm_gem_shrinker_init - Initialize msm shrinker
202  * @dev: drm device
203  *
204  * This function registers and sets up the msm shrinker.
205  */
206 void msm_gem_shrinker_init(struct drm_device *dev)
207 {
208 	struct msm_drm_private *priv = dev->dev_private;
209 	priv->shrinker.count_objects = msm_gem_shrinker_count;
210 	priv->shrinker.scan_objects = msm_gem_shrinker_scan;
211 	priv->shrinker.seeks = DEFAULT_SEEKS;
212 	WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
213 
214 	priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
215 	WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
216 }
217 
218 /**
219  * msm_gem_shrinker_cleanup - Clean up msm shrinker
220  * @dev: drm device
221  *
222  * This function unregisters the msm shrinker.
223  */
224 void msm_gem_shrinker_cleanup(struct drm_device *dev)
225 {
226 	struct msm_drm_private *priv = dev->dev_private;
227 
228 	if (priv->shrinker.nr_deferred) {
229 		WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
230 		unregister_shrinker(&priv->shrinker);
231 	}
232 }
233