1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/vmalloc.h> 8 #include <linux/sched/mm.h> 9 10 #include "msm_drv.h" 11 #include "msm_gem.h" 12 #include "msm_gpu.h" 13 #include "msm_gpu_trace.h" 14 15 /* Default disabled for now until it has some more testing on the different 16 * iommu combinations that can be paired with the driver: 17 */ 18 static bool enable_eviction = false; 19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers"); 20 module_param(enable_eviction, bool, 0600); 21 22 static bool can_swap(void) 23 { 24 return enable_eviction && get_nr_swap_pages() > 0; 25 } 26 27 static unsigned long 28 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 29 { 30 struct msm_drm_private *priv = 31 container_of(shrinker, struct msm_drm_private, shrinker); 32 unsigned count = priv->shrinkable_count; 33 34 if (can_swap()) 35 count += priv->evictable_count; 36 37 return count; 38 } 39 40 static bool 41 purge(struct msm_gem_object *msm_obj) 42 { 43 if (!is_purgeable(msm_obj)) 44 return false; 45 46 /* 47 * This will move the obj out of still_in_list to 48 * the purged list 49 */ 50 msm_gem_purge(&msm_obj->base); 51 52 return true; 53 } 54 55 static bool 56 evict(struct msm_gem_object *msm_obj) 57 { 58 if (is_unevictable(msm_obj)) 59 return false; 60 61 msm_gem_evict(&msm_obj->base); 62 63 return true; 64 } 65 66 static unsigned long 67 scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list, 68 bool (*shrink)(struct msm_gem_object *msm_obj)) 69 { 70 unsigned freed = 0; 71 struct list_head still_in_list; 72 73 INIT_LIST_HEAD(&still_in_list); 74 75 mutex_lock(&priv->mm_lock); 76 77 while (freed < nr_to_scan) { 78 struct msm_gem_object *msm_obj = list_first_entry_or_null( 79 list, typeof(*msm_obj), mm_list); 80 81 if (!msm_obj) 82 break; 83 84 list_move_tail(&msm_obj->mm_list, &still_in_list); 85 86 /* 87 * If it is in the process of being freed, msm_gem_free_object 88 * can be blocked on mm_lock waiting to remove it. So just 89 * skip it. 90 */ 91 if (!kref_get_unless_zero(&msm_obj->base.refcount)) 92 continue; 93 94 /* 95 * Now that we own a reference, we can drop mm_lock for the 96 * rest of the loop body, to reduce contention with the 97 * retire_submit path (which could make more objects purgeable) 98 */ 99 100 mutex_unlock(&priv->mm_lock); 101 102 /* 103 * Note that this still needs to be trylock, since we can 104 * hit shrinker in response to trying to get backing pages 105 * for this obj (ie. while it's lock is already held) 106 */ 107 if (!msm_gem_trylock(&msm_obj->base)) 108 goto tail; 109 110 if (shrink(msm_obj)) 111 freed += msm_obj->base.size >> PAGE_SHIFT; 112 113 msm_gem_unlock(&msm_obj->base); 114 115 tail: 116 drm_gem_object_put(&msm_obj->base); 117 mutex_lock(&priv->mm_lock); 118 } 119 120 list_splice_tail(&still_in_list, list); 121 mutex_unlock(&priv->mm_lock); 122 123 return freed; 124 } 125 126 static unsigned long 127 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) 128 { 129 struct msm_drm_private *priv = 130 container_of(shrinker, struct msm_drm_private, shrinker); 131 unsigned long freed; 132 133 freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge); 134 135 if (freed > 0) 136 trace_msm_gem_purge(freed << PAGE_SHIFT); 137 138 if (can_swap() && freed < sc->nr_to_scan) { 139 int evicted = scan(priv, sc->nr_to_scan - freed, 140 &priv->inactive_willneed, evict); 141 142 if (evicted > 0) 143 trace_msm_gem_evict(evicted << PAGE_SHIFT); 144 145 freed += evicted; 146 } 147 148 return (freed > 0) ? freed : SHRINK_STOP; 149 } 150 151 #ifdef CONFIG_DEBUG_FS 152 unsigned long 153 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan) 154 { 155 struct msm_drm_private *priv = dev->dev_private; 156 struct shrink_control sc = { 157 .nr_to_scan = nr_to_scan, 158 }; 159 int ret; 160 161 fs_reclaim_acquire(GFP_KERNEL); 162 ret = msm_gem_shrinker_scan(&priv->shrinker, &sc); 163 fs_reclaim_release(GFP_KERNEL); 164 165 return ret; 166 } 167 #endif 168 169 /* since we don't know any better, lets bail after a few 170 * and if necessary the shrinker will be invoked again. 171 * Seems better than unmapping *everything* 172 */ 173 static const int vmap_shrink_limit = 15; 174 175 static bool 176 vmap_shrink(struct msm_gem_object *msm_obj) 177 { 178 if (!is_vunmapable(msm_obj)) 179 return false; 180 181 msm_gem_vunmap(&msm_obj->base); 182 183 return true; 184 } 185 186 static int 187 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) 188 { 189 struct msm_drm_private *priv = 190 container_of(nb, struct msm_drm_private, vmap_notifier); 191 struct list_head *mm_lists[] = { 192 &priv->inactive_dontneed, 193 &priv->inactive_willneed, 194 priv->gpu ? &priv->gpu->active_list : NULL, 195 NULL, 196 }; 197 unsigned idx, unmapped = 0; 198 199 for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) { 200 unmapped += scan(priv, vmap_shrink_limit - unmapped, 201 mm_lists[idx], vmap_shrink); 202 } 203 204 *(unsigned long *)ptr += unmapped; 205 206 if (unmapped > 0) 207 trace_msm_gem_purge_vmaps(unmapped); 208 209 return NOTIFY_DONE; 210 } 211 212 /** 213 * msm_gem_shrinker_init - Initialize msm shrinker 214 * @dev: drm device 215 * 216 * This function registers and sets up the msm shrinker. 217 */ 218 void msm_gem_shrinker_init(struct drm_device *dev) 219 { 220 struct msm_drm_private *priv = dev->dev_private; 221 priv->shrinker.count_objects = msm_gem_shrinker_count; 222 priv->shrinker.scan_objects = msm_gem_shrinker_scan; 223 priv->shrinker.seeks = DEFAULT_SEEKS; 224 WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem")); 225 226 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap; 227 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier)); 228 } 229 230 /** 231 * msm_gem_shrinker_cleanup - Clean up msm shrinker 232 * @dev: drm device 233 * 234 * This function unregisters the msm shrinker. 235 */ 236 void msm_gem_shrinker_cleanup(struct drm_device *dev) 237 { 238 struct msm_drm_private *priv = dev->dev_private; 239 240 if (priv->shrinker.nr_deferred) { 241 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); 242 unregister_shrinker(&priv->shrinker); 243 } 244 } 245