xref: /openbmc/linux/mm/vmpressure.c (revision cd4d09ec)
1 /*
2  * Linux VM pressure
3  *
4  * Copyright 2012 Linaro Ltd.
5  *		  Anton Vorontsov <anton.vorontsov@linaro.org>
6  *
7  * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
8  * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation.
13  */
14 
15 #include <linux/cgroup.h>
16 #include <linux/fs.h>
17 #include <linux/log2.h>
18 #include <linux/sched.h>
19 #include <linux/mm.h>
20 #include <linux/vmstat.h>
21 #include <linux/eventfd.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/printk.h>
25 #include <linux/vmpressure.h>
26 
27 /*
28  * The window size (vmpressure_win) is the number of scanned pages before
29  * we try to analyze scanned/reclaimed ratio. So the window is used as a
30  * rate-limit tunable for the "low" level notification, and also for
31  * averaging the ratio for medium/critical levels. Using small window
32  * sizes can cause lot of false positives, but too big window size will
33  * delay the notifications.
34  *
35  * As the vmscan reclaimer logic works with chunks which are multiple of
36  * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
37  *
38  * TODO: Make the window size depend on machine size, as we do for vmstat
39  * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
40  */
41 static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
42 
43 /*
44  * These thresholds are used when we account memory pressure through
45  * scanned/reclaimed ratio. The current values were chosen empirically. In
46  * essence, they are percents: the higher the value, the more number
47  * unsuccessful reclaims there were.
48  */
49 static const unsigned int vmpressure_level_med = 60;
50 static const unsigned int vmpressure_level_critical = 95;
51 
52 /*
53  * When there are too little pages left to scan, vmpressure() may miss the
54  * critical pressure as number of pages will be less than "window size".
55  * However, in that case the vmscan priority will raise fast as the
56  * reclaimer will try to scan LRUs more deeply.
57  *
58  * The vmscan logic considers these special priorities:
59  *
60  * prio == DEF_PRIORITY (12): reclaimer starts with that value
61  * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
62  * prio == 0                : close to OOM, kernel scans every page in an lru
63  *
64  * Any value in this range is acceptable for this tunable (i.e. from 12 to
65  * 0). Current value for the vmpressure_level_critical_prio is chosen
66  * empirically, but the number, in essence, means that we consider
67  * critical level when scanning depth is ~10% of the lru size (vmscan
68  * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
69  * eights).
70  */
71 static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
72 
73 static struct vmpressure *work_to_vmpressure(struct work_struct *work)
74 {
75 	return container_of(work, struct vmpressure, work);
76 }
77 
78 static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
79 {
80 	struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
81 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
82 
83 	memcg = parent_mem_cgroup(memcg);
84 	if (!memcg)
85 		return NULL;
86 	return memcg_to_vmpressure(memcg);
87 }
88 
89 enum vmpressure_levels {
90 	VMPRESSURE_LOW = 0,
91 	VMPRESSURE_MEDIUM,
92 	VMPRESSURE_CRITICAL,
93 	VMPRESSURE_NUM_LEVELS,
94 };
95 
96 static const char * const vmpressure_str_levels[] = {
97 	[VMPRESSURE_LOW] = "low",
98 	[VMPRESSURE_MEDIUM] = "medium",
99 	[VMPRESSURE_CRITICAL] = "critical",
100 };
101 
102 static enum vmpressure_levels vmpressure_level(unsigned long pressure)
103 {
104 	if (pressure >= vmpressure_level_critical)
105 		return VMPRESSURE_CRITICAL;
106 	else if (pressure >= vmpressure_level_med)
107 		return VMPRESSURE_MEDIUM;
108 	return VMPRESSURE_LOW;
109 }
110 
111 static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
112 						    unsigned long reclaimed)
113 {
114 	unsigned long scale = scanned + reclaimed;
115 	unsigned long pressure;
116 
117 	/*
118 	 * We calculate the ratio (in percents) of how many pages were
119 	 * scanned vs. reclaimed in a given time frame (window). Note that
120 	 * time is in VM reclaimer's "ticks", i.e. number of pages
121 	 * scanned. This makes it possible to set desired reaction time
122 	 * and serves as a ratelimit.
123 	 */
124 	pressure = scale - (reclaimed * scale / scanned);
125 	pressure = pressure * 100 / scale;
126 
127 	pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
128 		 scanned, reclaimed);
129 
130 	return vmpressure_level(pressure);
131 }
132 
133 struct vmpressure_event {
134 	struct eventfd_ctx *efd;
135 	enum vmpressure_levels level;
136 	struct list_head node;
137 };
138 
139 static bool vmpressure_event(struct vmpressure *vmpr,
140 			     enum vmpressure_levels level)
141 {
142 	struct vmpressure_event *ev;
143 	bool signalled = false;
144 
145 	mutex_lock(&vmpr->events_lock);
146 
147 	list_for_each_entry(ev, &vmpr->events, node) {
148 		if (level >= ev->level) {
149 			eventfd_signal(ev->efd, 1);
150 			signalled = true;
151 		}
152 	}
153 
154 	mutex_unlock(&vmpr->events_lock);
155 
156 	return signalled;
157 }
158 
159 static void vmpressure_work_fn(struct work_struct *work)
160 {
161 	struct vmpressure *vmpr = work_to_vmpressure(work);
162 	unsigned long scanned;
163 	unsigned long reclaimed;
164 	enum vmpressure_levels level;
165 
166 	spin_lock(&vmpr->sr_lock);
167 	/*
168 	 * Several contexts might be calling vmpressure(), so it is
169 	 * possible that the work was rescheduled again before the old
170 	 * work context cleared the counters. In that case we will run
171 	 * just after the old work returns, but then scanned might be zero
172 	 * here. No need for any locks here since we don't care if
173 	 * vmpr->reclaimed is in sync.
174 	 */
175 	scanned = vmpr->tree_scanned;
176 	if (!scanned) {
177 		spin_unlock(&vmpr->sr_lock);
178 		return;
179 	}
180 
181 	reclaimed = vmpr->tree_reclaimed;
182 	vmpr->tree_scanned = 0;
183 	vmpr->tree_reclaimed = 0;
184 	spin_unlock(&vmpr->sr_lock);
185 
186 	level = vmpressure_calc_level(scanned, reclaimed);
187 
188 	do {
189 		if (vmpressure_event(vmpr, level))
190 			break;
191 		/*
192 		 * If not handled, propagate the event upward into the
193 		 * hierarchy.
194 		 */
195 	} while ((vmpr = vmpressure_parent(vmpr)));
196 }
197 
198 /**
199  * vmpressure() - Account memory pressure through scanned/reclaimed ratio
200  * @gfp:	reclaimer's gfp mask
201  * @memcg:	cgroup memory controller handle
202  * @tree:	legacy subtree mode
203  * @scanned:	number of pages scanned
204  * @reclaimed:	number of pages reclaimed
205  *
206  * This function should be called from the vmscan reclaim path to account
207  * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
208  * pressure index is then further refined and averaged over time.
209  *
210  * If @tree is set, vmpressure is in traditional userspace reporting
211  * mode: @memcg is considered the pressure root and userspace is
212  * notified of the entire subtree's reclaim efficiency.
213  *
214  * If @tree is not set, reclaim efficiency is recorded for @memcg, and
215  * only in-kernel users are notified.
216  *
217  * This function does not return any value.
218  */
219 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
220 		unsigned long scanned, unsigned long reclaimed)
221 {
222 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
223 
224 	/*
225 	 * Here we only want to account pressure that userland is able to
226 	 * help us with. For example, suppose that DMA zone is under
227 	 * pressure; if we notify userland about that kind of pressure,
228 	 * then it will be mostly a waste as it will trigger unnecessary
229 	 * freeing of memory by userland (since userland is more likely to
230 	 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
231 	 * is why we include only movable, highmem and FS/IO pages.
232 	 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
233 	 * we account it too.
234 	 */
235 	if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
236 		return;
237 
238 	/*
239 	 * If we got here with no pages scanned, then that is an indicator
240 	 * that reclaimer was unable to find any shrinkable LRUs at the
241 	 * current scanning depth. But it does not mean that we should
242 	 * report the critical pressure, yet. If the scanning priority
243 	 * (scanning depth) goes too high (deep), we will be notified
244 	 * through vmpressure_prio(). But so far, keep calm.
245 	 */
246 	if (!scanned)
247 		return;
248 
249 	if (tree) {
250 		spin_lock(&vmpr->sr_lock);
251 		vmpr->tree_scanned += scanned;
252 		vmpr->tree_reclaimed += reclaimed;
253 		scanned = vmpr->scanned;
254 		spin_unlock(&vmpr->sr_lock);
255 
256 		if (scanned < vmpressure_win)
257 			return;
258 		schedule_work(&vmpr->work);
259 	} else {
260 		enum vmpressure_levels level;
261 
262 		/* For now, no users for root-level efficiency */
263 		if (!memcg || memcg == root_mem_cgroup)
264 			return;
265 
266 		spin_lock(&vmpr->sr_lock);
267 		scanned = vmpr->scanned += scanned;
268 		reclaimed = vmpr->reclaimed += reclaimed;
269 		if (scanned < vmpressure_win) {
270 			spin_unlock(&vmpr->sr_lock);
271 			return;
272 		}
273 		vmpr->scanned = vmpr->reclaimed = 0;
274 		spin_unlock(&vmpr->sr_lock);
275 
276 		level = vmpressure_calc_level(scanned, reclaimed);
277 
278 		if (level > VMPRESSURE_LOW) {
279 			/*
280 			 * Let the socket buffer allocator know that
281 			 * we are having trouble reclaiming LRU pages.
282 			 *
283 			 * For hysteresis keep the pressure state
284 			 * asserted for a second in which subsequent
285 			 * pressure events can occur.
286 			 */
287 			memcg->socket_pressure = jiffies + HZ;
288 		}
289 	}
290 }
291 
292 /**
293  * vmpressure_prio() - Account memory pressure through reclaimer priority level
294  * @gfp:	reclaimer's gfp mask
295  * @memcg:	cgroup memory controller handle
296  * @prio:	reclaimer's priority
297  *
298  * This function should be called from the reclaim path every time when
299  * the vmscan's reclaiming priority (scanning depth) changes.
300  *
301  * This function does not return any value.
302  */
303 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
304 {
305 	/*
306 	 * We only use prio for accounting critical level. For more info
307 	 * see comment for vmpressure_level_critical_prio variable above.
308 	 */
309 	if (prio > vmpressure_level_critical_prio)
310 		return;
311 
312 	/*
313 	 * OK, the prio is below the threshold, updating vmpressure
314 	 * information before shrinker dives into long shrinking of long
315 	 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
316 	 * to the vmpressure() basically means that we signal 'critical'
317 	 * level.
318 	 */
319 	vmpressure(gfp, memcg, true, vmpressure_win, 0);
320 }
321 
322 /**
323  * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
324  * @memcg:	memcg that is interested in vmpressure notifications
325  * @eventfd:	eventfd context to link notifications with
326  * @args:	event arguments (used to set up a pressure level threshold)
327  *
328  * This function associates eventfd context with the vmpressure
329  * infrastructure, so that the notifications will be delivered to the
330  * @eventfd. The @args parameter is a string that denotes pressure level
331  * threshold (one of vmpressure_str_levels, i.e. "low", "medium", or
332  * "critical").
333  *
334  * To be used as memcg event method.
335  */
336 int vmpressure_register_event(struct mem_cgroup *memcg,
337 			      struct eventfd_ctx *eventfd, const char *args)
338 {
339 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
340 	struct vmpressure_event *ev;
341 	int level;
342 
343 	for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++) {
344 		if (!strcmp(vmpressure_str_levels[level], args))
345 			break;
346 	}
347 
348 	if (level >= VMPRESSURE_NUM_LEVELS)
349 		return -EINVAL;
350 
351 	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
352 	if (!ev)
353 		return -ENOMEM;
354 
355 	ev->efd = eventfd;
356 	ev->level = level;
357 
358 	mutex_lock(&vmpr->events_lock);
359 	list_add(&ev->node, &vmpr->events);
360 	mutex_unlock(&vmpr->events_lock);
361 
362 	return 0;
363 }
364 
365 /**
366  * vmpressure_unregister_event() - Unbind eventfd from vmpressure
367  * @memcg:	memcg handle
368  * @eventfd:	eventfd context that was used to link vmpressure with the @cg
369  *
370  * This function does internal manipulations to detach the @eventfd from
371  * the vmpressure notifications, and then frees internal resources
372  * associated with the @eventfd (but the @eventfd itself is not freed).
373  *
374  * To be used as memcg event method.
375  */
376 void vmpressure_unregister_event(struct mem_cgroup *memcg,
377 				 struct eventfd_ctx *eventfd)
378 {
379 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
380 	struct vmpressure_event *ev;
381 
382 	mutex_lock(&vmpr->events_lock);
383 	list_for_each_entry(ev, &vmpr->events, node) {
384 		if (ev->efd != eventfd)
385 			continue;
386 		list_del(&ev->node);
387 		kfree(ev);
388 		break;
389 	}
390 	mutex_unlock(&vmpr->events_lock);
391 }
392 
393 /**
394  * vmpressure_init() - Initialize vmpressure control structure
395  * @vmpr:	Structure to be initialized
396  *
397  * This function should be called on every allocated vmpressure structure
398  * before any usage.
399  */
400 void vmpressure_init(struct vmpressure *vmpr)
401 {
402 	spin_lock_init(&vmpr->sr_lock);
403 	mutex_init(&vmpr->events_lock);
404 	INIT_LIST_HEAD(&vmpr->events);
405 	INIT_WORK(&vmpr->work, vmpressure_work_fn);
406 }
407 
408 /**
409  * vmpressure_cleanup() - shuts down vmpressure control structure
410  * @vmpr:	Structure to be cleaned up
411  *
412  * This function should be called before the structure in which it is
413  * embedded is cleaned up.
414  */
415 void vmpressure_cleanup(struct vmpressure *vmpr)
416 {
417 	/*
418 	 * Make sure there is no pending work before eventfd infrastructure
419 	 * goes away.
420 	 */
421 	flush_work(&vmpr->work);
422 }
423