1 /* 2 * Linux VM pressure 3 * 4 * Copyright 2012 Linaro Ltd. 5 * Anton Vorontsov <anton.vorontsov@linaro.org> 6 * 7 * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro, 8 * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15 #include <linux/cgroup.h> 16 #include <linux/fs.h> 17 #include <linux/log2.h> 18 #include <linux/sched.h> 19 #include <linux/mm.h> 20 #include <linux/vmstat.h> 21 #include <linux/eventfd.h> 22 #include <linux/slab.h> 23 #include <linux/swap.h> 24 #include <linux/printk.h> 25 #include <linux/vmpressure.h> 26 27 /* 28 * The window size (vmpressure_win) is the number of scanned pages before 29 * we try to analyze scanned/reclaimed ratio. So the window is used as a 30 * rate-limit tunable for the "low" level notification, and also for 31 * averaging the ratio for medium/critical levels. Using small window 32 * sizes can cause lot of false positives, but too big window size will 33 * delay the notifications. 34 * 35 * As the vmscan reclaimer logic works with chunks which are multiple of 36 * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. 37 * 38 * TODO: Make the window size depend on machine size, as we do for vmstat 39 * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). 40 */ 41 static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; 42 43 /* 44 * These thresholds are used when we account memory pressure through 45 * scanned/reclaimed ratio. The current values were chosen empirically. In 46 * essence, they are percents: the higher the value, the more number 47 * unsuccessful reclaims there were. 48 */ 49 static const unsigned int vmpressure_level_med = 60; 50 static const unsigned int vmpressure_level_critical = 95; 51 52 /* 53 * When there are too little pages left to scan, vmpressure() may miss the 54 * critical pressure as number of pages will be less than "window size". 55 * However, in that case the vmscan priority will raise fast as the 56 * reclaimer will try to scan LRUs more deeply. 57 * 58 * The vmscan logic considers these special priorities: 59 * 60 * prio == DEF_PRIORITY (12): reclaimer starts with that value 61 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed 62 * prio == 0 : close to OOM, kernel scans every page in an lru 63 * 64 * Any value in this range is acceptable for this tunable (i.e. from 12 to 65 * 0). Current value for the vmpressure_level_critical_prio is chosen 66 * empirically, but the number, in essence, means that we consider 67 * critical level when scanning depth is ~10% of the lru size (vmscan 68 * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one 69 * eights). 70 */ 71 static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10); 72 73 static struct vmpressure *work_to_vmpressure(struct work_struct *work) 74 { 75 return container_of(work, struct vmpressure, work); 76 } 77 78 static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) 79 { 80 struct cgroup_subsys_state *css = vmpressure_to_css(vmpr); 81 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 82 83 memcg = parent_mem_cgroup(memcg); 84 if (!memcg) 85 return NULL; 86 return memcg_to_vmpressure(memcg); 87 } 88 89 enum vmpressure_levels { 90 VMPRESSURE_LOW = 0, 91 VMPRESSURE_MEDIUM, 92 VMPRESSURE_CRITICAL, 93 VMPRESSURE_NUM_LEVELS, 94 }; 95 96 static const char * const vmpressure_str_levels[] = { 97 [VMPRESSURE_LOW] = "low", 98 [VMPRESSURE_MEDIUM] = "medium", 99 [VMPRESSURE_CRITICAL] = "critical", 100 }; 101 102 static enum vmpressure_levels vmpressure_level(unsigned long pressure) 103 { 104 if (pressure >= vmpressure_level_critical) 105 return VMPRESSURE_CRITICAL; 106 else if (pressure >= vmpressure_level_med) 107 return VMPRESSURE_MEDIUM; 108 return VMPRESSURE_LOW; 109 } 110 111 static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, 112 unsigned long reclaimed) 113 { 114 unsigned long scale = scanned + reclaimed; 115 unsigned long pressure; 116 117 /* 118 * We calculate the ratio (in percents) of how many pages were 119 * scanned vs. reclaimed in a given time frame (window). Note that 120 * time is in VM reclaimer's "ticks", i.e. number of pages 121 * scanned. This makes it possible to set desired reaction time 122 * and serves as a ratelimit. 123 */ 124 pressure = scale - (reclaimed * scale / scanned); 125 pressure = pressure * 100 / scale; 126 127 pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, 128 scanned, reclaimed); 129 130 return vmpressure_level(pressure); 131 } 132 133 struct vmpressure_event { 134 struct eventfd_ctx *efd; 135 enum vmpressure_levels level; 136 struct list_head node; 137 }; 138 139 static bool vmpressure_event(struct vmpressure *vmpr, 140 enum vmpressure_levels level) 141 { 142 struct vmpressure_event *ev; 143 bool signalled = false; 144 145 mutex_lock(&vmpr->events_lock); 146 147 list_for_each_entry(ev, &vmpr->events, node) { 148 if (level >= ev->level) { 149 eventfd_signal(ev->efd, 1); 150 signalled = true; 151 } 152 } 153 154 mutex_unlock(&vmpr->events_lock); 155 156 return signalled; 157 } 158 159 static void vmpressure_work_fn(struct work_struct *work) 160 { 161 struct vmpressure *vmpr = work_to_vmpressure(work); 162 unsigned long scanned; 163 unsigned long reclaimed; 164 enum vmpressure_levels level; 165 166 spin_lock(&vmpr->sr_lock); 167 /* 168 * Several contexts might be calling vmpressure(), so it is 169 * possible that the work was rescheduled again before the old 170 * work context cleared the counters. In that case we will run 171 * just after the old work returns, but then scanned might be zero 172 * here. No need for any locks here since we don't care if 173 * vmpr->reclaimed is in sync. 174 */ 175 scanned = vmpr->tree_scanned; 176 if (!scanned) { 177 spin_unlock(&vmpr->sr_lock); 178 return; 179 } 180 181 reclaimed = vmpr->tree_reclaimed; 182 vmpr->tree_scanned = 0; 183 vmpr->tree_reclaimed = 0; 184 spin_unlock(&vmpr->sr_lock); 185 186 level = vmpressure_calc_level(scanned, reclaimed); 187 188 do { 189 if (vmpressure_event(vmpr, level)) 190 break; 191 /* 192 * If not handled, propagate the event upward into the 193 * hierarchy. 194 */ 195 } while ((vmpr = vmpressure_parent(vmpr))); 196 } 197 198 /** 199 * vmpressure() - Account memory pressure through scanned/reclaimed ratio 200 * @gfp: reclaimer's gfp mask 201 * @memcg: cgroup memory controller handle 202 * @tree: legacy subtree mode 203 * @scanned: number of pages scanned 204 * @reclaimed: number of pages reclaimed 205 * 206 * This function should be called from the vmscan reclaim path to account 207 * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw 208 * pressure index is then further refined and averaged over time. 209 * 210 * If @tree is set, vmpressure is in traditional userspace reporting 211 * mode: @memcg is considered the pressure root and userspace is 212 * notified of the entire subtree's reclaim efficiency. 213 * 214 * If @tree is not set, reclaim efficiency is recorded for @memcg, and 215 * only in-kernel users are notified. 216 * 217 * This function does not return any value. 218 */ 219 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 220 unsigned long scanned, unsigned long reclaimed) 221 { 222 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); 223 224 /* 225 * Here we only want to account pressure that userland is able to 226 * help us with. For example, suppose that DMA zone is under 227 * pressure; if we notify userland about that kind of pressure, 228 * then it will be mostly a waste as it will trigger unnecessary 229 * freeing of memory by userland (since userland is more likely to 230 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That 231 * is why we include only movable, highmem and FS/IO pages. 232 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so 233 * we account it too. 234 */ 235 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) 236 return; 237 238 /* 239 * If we got here with no pages scanned, then that is an indicator 240 * that reclaimer was unable to find any shrinkable LRUs at the 241 * current scanning depth. But it does not mean that we should 242 * report the critical pressure, yet. If the scanning priority 243 * (scanning depth) goes too high (deep), we will be notified 244 * through vmpressure_prio(). But so far, keep calm. 245 */ 246 if (!scanned) 247 return; 248 249 if (tree) { 250 spin_lock(&vmpr->sr_lock); 251 scanned = vmpr->tree_scanned += scanned; 252 vmpr->tree_reclaimed += reclaimed; 253 spin_unlock(&vmpr->sr_lock); 254 255 if (scanned < vmpressure_win) 256 return; 257 schedule_work(&vmpr->work); 258 } else { 259 enum vmpressure_levels level; 260 261 /* For now, no users for root-level efficiency */ 262 if (!memcg || memcg == root_mem_cgroup) 263 return; 264 265 spin_lock(&vmpr->sr_lock); 266 scanned = vmpr->scanned += scanned; 267 reclaimed = vmpr->reclaimed += reclaimed; 268 if (scanned < vmpressure_win) { 269 spin_unlock(&vmpr->sr_lock); 270 return; 271 } 272 vmpr->scanned = vmpr->reclaimed = 0; 273 spin_unlock(&vmpr->sr_lock); 274 275 level = vmpressure_calc_level(scanned, reclaimed); 276 277 if (level > VMPRESSURE_LOW) { 278 /* 279 * Let the socket buffer allocator know that 280 * we are having trouble reclaiming LRU pages. 281 * 282 * For hysteresis keep the pressure state 283 * asserted for a second in which subsequent 284 * pressure events can occur. 285 */ 286 memcg->socket_pressure = jiffies + HZ; 287 } 288 } 289 } 290 291 /** 292 * vmpressure_prio() - Account memory pressure through reclaimer priority level 293 * @gfp: reclaimer's gfp mask 294 * @memcg: cgroup memory controller handle 295 * @prio: reclaimer's priority 296 * 297 * This function should be called from the reclaim path every time when 298 * the vmscan's reclaiming priority (scanning depth) changes. 299 * 300 * This function does not return any value. 301 */ 302 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) 303 { 304 /* 305 * We only use prio for accounting critical level. For more info 306 * see comment for vmpressure_level_critical_prio variable above. 307 */ 308 if (prio > vmpressure_level_critical_prio) 309 return; 310 311 /* 312 * OK, the prio is below the threshold, updating vmpressure 313 * information before shrinker dives into long shrinking of long 314 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0 315 * to the vmpressure() basically means that we signal 'critical' 316 * level. 317 */ 318 vmpressure(gfp, memcg, true, vmpressure_win, 0); 319 } 320 321 /** 322 * vmpressure_register_event() - Bind vmpressure notifications to an eventfd 323 * @memcg: memcg that is interested in vmpressure notifications 324 * @eventfd: eventfd context to link notifications with 325 * @args: event arguments (used to set up a pressure level threshold) 326 * 327 * This function associates eventfd context with the vmpressure 328 * infrastructure, so that the notifications will be delivered to the 329 * @eventfd. The @args parameter is a string that denotes pressure level 330 * threshold (one of vmpressure_str_levels, i.e. "low", "medium", or 331 * "critical"). 332 * 333 * To be used as memcg event method. 334 */ 335 int vmpressure_register_event(struct mem_cgroup *memcg, 336 struct eventfd_ctx *eventfd, const char *args) 337 { 338 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); 339 struct vmpressure_event *ev; 340 int level; 341 342 for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++) { 343 if (!strcmp(vmpressure_str_levels[level], args)) 344 break; 345 } 346 347 if (level >= VMPRESSURE_NUM_LEVELS) 348 return -EINVAL; 349 350 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 351 if (!ev) 352 return -ENOMEM; 353 354 ev->efd = eventfd; 355 ev->level = level; 356 357 mutex_lock(&vmpr->events_lock); 358 list_add(&ev->node, &vmpr->events); 359 mutex_unlock(&vmpr->events_lock); 360 361 return 0; 362 } 363 364 /** 365 * vmpressure_unregister_event() - Unbind eventfd from vmpressure 366 * @memcg: memcg handle 367 * @eventfd: eventfd context that was used to link vmpressure with the @cg 368 * 369 * This function does internal manipulations to detach the @eventfd from 370 * the vmpressure notifications, and then frees internal resources 371 * associated with the @eventfd (but the @eventfd itself is not freed). 372 * 373 * To be used as memcg event method. 374 */ 375 void vmpressure_unregister_event(struct mem_cgroup *memcg, 376 struct eventfd_ctx *eventfd) 377 { 378 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); 379 struct vmpressure_event *ev; 380 381 mutex_lock(&vmpr->events_lock); 382 list_for_each_entry(ev, &vmpr->events, node) { 383 if (ev->efd != eventfd) 384 continue; 385 list_del(&ev->node); 386 kfree(ev); 387 break; 388 } 389 mutex_unlock(&vmpr->events_lock); 390 } 391 392 /** 393 * vmpressure_init() - Initialize vmpressure control structure 394 * @vmpr: Structure to be initialized 395 * 396 * This function should be called on every allocated vmpressure structure 397 * before any usage. 398 */ 399 void vmpressure_init(struct vmpressure *vmpr) 400 { 401 spin_lock_init(&vmpr->sr_lock); 402 mutex_init(&vmpr->events_lock); 403 INIT_LIST_HEAD(&vmpr->events); 404 INIT_WORK(&vmpr->work, vmpressure_work_fn); 405 } 406 407 /** 408 * vmpressure_cleanup() - shuts down vmpressure control structure 409 * @vmpr: Structure to be cleaned up 410 * 411 * This function should be called before the structure in which it is 412 * embedded is cleaned up. 413 */ 414 void vmpressure_cleanup(struct vmpressure *vmpr) 415 { 416 /* 417 * Make sure there is no pending work before eventfd infrastructure 418 * goes away. 419 */ 420 flush_work(&vmpr->work); 421 } 422