xref: /openbmc/linux/mm/workingset.c (revision 7fb2e072)
1 /*
2  * Workingset detection
3  *
4  * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
5  */
6 
7 #include <linux/memcontrol.h>
8 #include <linux/writeback.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/pagemap.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/swap.h>
14 #include <linux/dax.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17 
18 /*
19  *		Double CLOCK lists
20  *
21  * Per node, two clock lists are maintained for file pages: the
22  * inactive and the active list.  Freshly faulted pages start out at
23  * the head of the inactive list and page reclaim scans pages from the
24  * tail.  Pages that are accessed multiple times on the inactive list
25  * are promoted to the active list, to protect them from reclaim,
26  * whereas active pages are demoted to the inactive list when the
27  * active list grows too big.
28  *
29  *   fault ------------------------+
30  *                                 |
31  *              +--------------+   |            +-------------+
32  *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
33  *              +--------------+                +-------------+    |
34  *                     |                                           |
35  *                     +-------------- promotion ------------------+
36  *
37  *
38  *		Access frequency and refault distance
39  *
40  * A workload is thrashing when its pages are frequently used but they
41  * are evicted from the inactive list every time before another access
42  * would have promoted them to the active list.
43  *
44  * In cases where the average access distance between thrashing pages
45  * is bigger than the size of memory there is nothing that can be
46  * done - the thrashing set could never fit into memory under any
47  * circumstance.
48  *
49  * However, the average access distance could be bigger than the
50  * inactive list, yet smaller than the size of memory.  In this case,
51  * the set could fit into memory if it weren't for the currently
52  * active pages - which may be used more, hopefully less frequently:
53  *
54  *      +-memory available to cache-+
55  *      |                           |
56  *      +-inactive------+-active----+
57  *  a b | c d e f g h i | J K L M N |
58  *      +---------------+-----------+
59  *
60  * It is prohibitively expensive to accurately track access frequency
61  * of pages.  But a reasonable approximation can be made to measure
62  * thrashing on the inactive list, after which refaulting pages can be
63  * activated optimistically to compete with the existing active pages.
64  *
65  * Approximating inactive page access frequency - Observations:
66  *
67  * 1. When a page is accessed for the first time, it is added to the
68  *    head of the inactive list, slides every existing inactive page
69  *    towards the tail by one slot, and pushes the current tail page
70  *    out of memory.
71  *
72  * 2. When a page is accessed for the second time, it is promoted to
73  *    the active list, shrinking the inactive list by one slot.  This
74  *    also slides all inactive pages that were faulted into the cache
75  *    more recently than the activated page towards the tail of the
76  *    inactive list.
77  *
78  * Thus:
79  *
80  * 1. The sum of evictions and activations between any two points in
81  *    time indicate the minimum number of inactive pages accessed in
82  *    between.
83  *
84  * 2. Moving one inactive page N page slots towards the tail of the
85  *    list requires at least N inactive page accesses.
86  *
87  * Combining these:
88  *
89  * 1. When a page is finally evicted from memory, the number of
90  *    inactive pages accessed while the page was in cache is at least
91  *    the number of page slots on the inactive list.
92  *
93  * 2. In addition, measuring the sum of evictions and activations (E)
94  *    at the time of a page's eviction, and comparing it to another
95  *    reading (R) at the time the page faults back into memory tells
96  *    the minimum number of accesses while the page was not cached.
97  *    This is called the refault distance.
98  *
99  * Because the first access of the page was the fault and the second
100  * access the refault, we combine the in-cache distance with the
101  * out-of-cache distance to get the complete minimum access distance
102  * of this page:
103  *
104  *      NR_inactive + (R - E)
105  *
106  * And knowing the minimum access distance of a page, we can easily
107  * tell if the page would be able to stay in cache assuming all page
108  * slots in the cache were available:
109  *
110  *   NR_inactive + (R - E) <= NR_inactive + NR_active
111  *
112  * which can be further simplified to
113  *
114  *   (R - E) <= NR_active
115  *
116  * Put into words, the refault distance (out-of-cache) can be seen as
117  * a deficit in inactive list space (in-cache).  If the inactive list
118  * had (R - E) more page slots, the page would not have been evicted
119  * in between accesses, but activated instead.  And on a full system,
120  * the only thing eating into inactive list space is active pages.
121  *
122  *
123  *		Activating refaulting pages
124  *
125  * All that is known about the active list is that the pages have been
126  * accessed more than once in the past.  This means that at any given
127  * time there is actually a good chance that pages on the active list
128  * are no longer in active use.
129  *
130  * So when a refault distance of (R - E) is observed and there are at
131  * least (R - E) active pages, the refaulting page is activated
132  * optimistically in the hope that (R - E) active pages are actually
133  * used less frequently than the refaulting page - or even not used at
134  * all anymore.
135  *
136  * If this is wrong and demotion kicks in, the pages which are truly
137  * used more frequently will be reactivated while the less frequently
138  * used once will be evicted from memory.
139  *
140  * But if this is right, the stale pages will be pushed out of memory
141  * and the used pages get to stay in cache.
142  *
143  *
144  *		Implementation
145  *
146  * For each node's file LRU lists, a counter for inactive evictions
147  * and activations is maintained (node->inactive_age).
148  *
149  * On eviction, a snapshot of this counter (along with some bits to
150  * identify the node) is stored in the now empty page cache radix tree
151  * slot of the evicted page.  This is called a shadow entry.
152  *
153  * On cache misses for which there are shadow entries, an eligible
154  * refault distance will immediately activate the refaulting page.
155  */
156 
157 #define EVICTION_SHIFT	(RADIX_TREE_EXCEPTIONAL_ENTRY + \
158 			 NODES_SHIFT +	\
159 			 MEM_CGROUP_ID_SHIFT)
160 #define EVICTION_MASK	(~0UL >> EVICTION_SHIFT)
161 
162 /*
163  * Eviction timestamps need to be able to cover the full range of
164  * actionable refaults. However, bits are tight in the radix tree
165  * entry, and after storing the identifier for the lruvec there might
166  * not be enough left to represent every single actionable refault. In
167  * that case, we have to sacrifice granularity for distance, and group
168  * evictions into coarser buckets by shaving off lower timestamp bits.
169  */
170 static unsigned int bucket_order __read_mostly;
171 
172 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
173 {
174 	eviction >>= bucket_order;
175 	eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
176 	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
177 	eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
178 
179 	return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
180 }
181 
182 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
183 			  unsigned long *evictionp)
184 {
185 	unsigned long entry = (unsigned long)shadow;
186 	int memcgid, nid;
187 
188 	entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
189 	nid = entry & ((1UL << NODES_SHIFT) - 1);
190 	entry >>= NODES_SHIFT;
191 	memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
192 	entry >>= MEM_CGROUP_ID_SHIFT;
193 
194 	*memcgidp = memcgid;
195 	*pgdat = NODE_DATA(nid);
196 	*evictionp = entry << bucket_order;
197 }
198 
199 /**
200  * workingset_eviction - note the eviction of a page from memory
201  * @mapping: address space the page was backing
202  * @page: the page being evicted
203  *
204  * Returns a shadow entry to be stored in @mapping->page_tree in place
205  * of the evicted @page so that a later refault can be detected.
206  */
207 void *workingset_eviction(struct address_space *mapping, struct page *page)
208 {
209 	struct mem_cgroup *memcg = page_memcg(page);
210 	struct pglist_data *pgdat = page_pgdat(page);
211 	int memcgid = mem_cgroup_id(memcg);
212 	unsigned long eviction;
213 	struct lruvec *lruvec;
214 
215 	/* Page is fully exclusive and pins page->mem_cgroup */
216 	VM_BUG_ON_PAGE(PageLRU(page), page);
217 	VM_BUG_ON_PAGE(page_count(page), page);
218 	VM_BUG_ON_PAGE(!PageLocked(page), page);
219 
220 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
221 	eviction = atomic_long_inc_return(&lruvec->inactive_age);
222 	return pack_shadow(memcgid, pgdat, eviction);
223 }
224 
225 /**
226  * workingset_refault - evaluate the refault of a previously evicted page
227  * @shadow: shadow entry of the evicted page
228  *
229  * Calculates and evaluates the refault distance of the previously
230  * evicted page in the context of the node it was allocated in.
231  *
232  * Returns %true if the page should be activated, %false otherwise.
233  */
234 bool workingset_refault(void *shadow)
235 {
236 	unsigned long refault_distance;
237 	unsigned long active_file;
238 	struct mem_cgroup *memcg;
239 	unsigned long eviction;
240 	struct lruvec *lruvec;
241 	unsigned long refault;
242 	struct pglist_data *pgdat;
243 	int memcgid;
244 
245 	unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
246 
247 	rcu_read_lock();
248 	/*
249 	 * Look up the memcg associated with the stored ID. It might
250 	 * have been deleted since the page's eviction.
251 	 *
252 	 * Note that in rare events the ID could have been recycled
253 	 * for a new cgroup that refaults a shared page. This is
254 	 * impossible to tell from the available data. However, this
255 	 * should be a rare and limited disturbance, and activations
256 	 * are always speculative anyway. Ultimately, it's the aging
257 	 * algorithm's job to shake out the minimum access frequency
258 	 * for the active cache.
259 	 *
260 	 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
261 	 * would be better if the root_mem_cgroup existed in all
262 	 * configurations instead.
263 	 */
264 	memcg = mem_cgroup_from_id(memcgid);
265 	if (!mem_cgroup_disabled() && !memcg) {
266 		rcu_read_unlock();
267 		return false;
268 	}
269 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
270 	refault = atomic_long_read(&lruvec->inactive_age);
271 	active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
272 
273 	/*
274 	 * The unsigned subtraction here gives an accurate distance
275 	 * across inactive_age overflows in most cases.
276 	 *
277 	 * There is a special case: usually, shadow entries have a
278 	 * short lifetime and are either refaulted or reclaimed along
279 	 * with the inode before they get too old.  But it is not
280 	 * impossible for the inactive_age to lap a shadow entry in
281 	 * the field, which can then can result in a false small
282 	 * refault distance, leading to a false activation should this
283 	 * old entry actually refault again.  However, earlier kernels
284 	 * used to deactivate unconditionally with *every* reclaim
285 	 * invocation for the longest time, so the occasional
286 	 * inappropriate activation leading to pressure on the active
287 	 * list is not a problem.
288 	 */
289 	refault_distance = (refault - eviction) & EVICTION_MASK;
290 
291 	inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
292 
293 	if (refault_distance <= active_file) {
294 		inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
295 		rcu_read_unlock();
296 		return true;
297 	}
298 	rcu_read_unlock();
299 	return false;
300 }
301 
302 /**
303  * workingset_activation - note a page activation
304  * @page: page that is being activated
305  */
306 void workingset_activation(struct page *page)
307 {
308 	struct mem_cgroup *memcg;
309 	struct lruvec *lruvec;
310 
311 	rcu_read_lock();
312 	/*
313 	 * Filter non-memcg pages here, e.g. unmap can call
314 	 * mark_page_accessed() on VDSO pages.
315 	 *
316 	 * XXX: See workingset_refault() - this should return
317 	 * root_mem_cgroup even for !CONFIG_MEMCG.
318 	 */
319 	memcg = page_memcg_rcu(page);
320 	if (!mem_cgroup_disabled() && !memcg)
321 		goto out;
322 	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
323 	atomic_long_inc(&lruvec->inactive_age);
324 out:
325 	rcu_read_unlock();
326 }
327 
328 /*
329  * Shadow entries reflect the share of the working set that does not
330  * fit into memory, so their number depends on the access pattern of
331  * the workload.  In most cases, they will refault or get reclaimed
332  * along with the inode, but a (malicious) workload that streams
333  * through files with a total size several times that of available
334  * memory, while preventing the inodes from being reclaimed, can
335  * create excessive amounts of shadow nodes.  To keep a lid on this,
336  * track shadow nodes and reclaim them when they grow way past the
337  * point where they would still be useful.
338  */
339 
340 static struct list_lru shadow_nodes;
341 
342 void workingset_update_node(struct radix_tree_node *node, void *private)
343 {
344 	struct address_space *mapping = private;
345 
346 	/* Only regular page cache has shadow entries */
347 	if (dax_mapping(mapping) || shmem_mapping(mapping))
348 		return;
349 
350 	/*
351 	 * Track non-empty nodes that contain only shadow entries;
352 	 * unlink those that contain pages or are being freed.
353 	 *
354 	 * Avoid acquiring the list_lru lock when the nodes are
355 	 * already where they should be. The list_empty() test is safe
356 	 * as node->private_list is protected by &mapping->tree_lock.
357 	 */
358 	if (node->count && node->count == node->exceptional) {
359 		if (list_empty(&node->private_list))
360 			list_lru_add(&shadow_nodes, &node->private_list);
361 	} else {
362 		if (!list_empty(&node->private_list))
363 			list_lru_del(&shadow_nodes, &node->private_list);
364 	}
365 }
366 
367 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
368 					struct shrink_control *sc)
369 {
370 	unsigned long max_nodes;
371 	unsigned long nodes;
372 	unsigned long cache;
373 
374 	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
375 	local_irq_disable();
376 	nodes = list_lru_shrink_count(&shadow_nodes, sc);
377 	local_irq_enable();
378 
379 	/*
380 	 * Approximate a reasonable limit for the radix tree nodes
381 	 * containing shadow entries. We don't need to keep more
382 	 * shadow entries than possible pages on the active list,
383 	 * since refault distances bigger than that are dismissed.
384 	 *
385 	 * The size of the active list converges toward 100% of
386 	 * overall page cache as memory grows, with only a tiny
387 	 * inactive list. Assume the total cache size for that.
388 	 *
389 	 * Nodes might be sparsely populated, with only one shadow
390 	 * entry in the extreme case. Obviously, we cannot keep one
391 	 * node for every eligible shadow entry, so compromise on a
392 	 * worst-case density of 1/8th. Below that, not all eligible
393 	 * refaults can be detected anymore.
394 	 *
395 	 * On 64-bit with 7 radix_tree_nodes per page and 64 slots
396 	 * each, this will reclaim shadow entries when they consume
397 	 * ~1.8% of available memory:
398 	 *
399 	 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
400 	 */
401 	if (sc->memcg) {
402 		cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
403 						     LRU_ALL_FILE);
404 	} else {
405 		cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
406 			node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
407 	}
408 	max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3);
409 
410 	if (nodes <= max_nodes)
411 		return 0;
412 	return nodes - max_nodes;
413 }
414 
415 static enum lru_status shadow_lru_isolate(struct list_head *item,
416 					  struct list_lru_one *lru,
417 					  spinlock_t *lru_lock,
418 					  void *arg)
419 {
420 	struct address_space *mapping;
421 	struct radix_tree_node *node;
422 	unsigned int i;
423 	int ret;
424 
425 	/*
426 	 * Page cache insertions and deletions synchroneously maintain
427 	 * the shadow node LRU under the mapping->tree_lock and the
428 	 * lru_lock.  Because the page cache tree is emptied before
429 	 * the inode can be destroyed, holding the lru_lock pins any
430 	 * address_space that has radix tree nodes on the LRU.
431 	 *
432 	 * We can then safely transition to the mapping->tree_lock to
433 	 * pin only the address_space of the particular node we want
434 	 * to reclaim, take the node off-LRU, and drop the lru_lock.
435 	 */
436 
437 	node = container_of(item, struct radix_tree_node, private_list);
438 	mapping = container_of(node->root, struct address_space, page_tree);
439 
440 	/* Coming from the list, invert the lock order */
441 	if (!spin_trylock(&mapping->tree_lock)) {
442 		spin_unlock(lru_lock);
443 		ret = LRU_RETRY;
444 		goto out;
445 	}
446 
447 	list_lru_isolate(lru, item);
448 	spin_unlock(lru_lock);
449 
450 	/*
451 	 * The nodes should only contain one or more shadow entries,
452 	 * no pages, so we expect to be able to remove them all and
453 	 * delete and free the empty node afterwards.
454 	 */
455 	if (WARN_ON_ONCE(!node->exceptional))
456 		goto out_invalid;
457 	if (WARN_ON_ONCE(node->count != node->exceptional))
458 		goto out_invalid;
459 	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
460 		if (node->slots[i]) {
461 			if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
462 				goto out_invalid;
463 			if (WARN_ON_ONCE(!node->exceptional))
464 				goto out_invalid;
465 			if (WARN_ON_ONCE(!mapping->nrexceptional))
466 				goto out_invalid;
467 			node->slots[i] = NULL;
468 			node->exceptional--;
469 			node->count--;
470 			mapping->nrexceptional--;
471 		}
472 	}
473 	if (WARN_ON_ONCE(node->exceptional))
474 		goto out_invalid;
475 	inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
476 	__radix_tree_delete_node(&mapping->page_tree, node,
477 				 workingset_update_node, mapping);
478 
479 out_invalid:
480 	spin_unlock(&mapping->tree_lock);
481 	ret = LRU_REMOVED_RETRY;
482 out:
483 	local_irq_enable();
484 	cond_resched();
485 	local_irq_disable();
486 	spin_lock(lru_lock);
487 	return ret;
488 }
489 
490 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
491 				       struct shrink_control *sc)
492 {
493 	unsigned long ret;
494 
495 	/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
496 	local_irq_disable();
497 	ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
498 	local_irq_enable();
499 	return ret;
500 }
501 
502 static struct shrinker workingset_shadow_shrinker = {
503 	.count_objects = count_shadow_nodes,
504 	.scan_objects = scan_shadow_nodes,
505 	.seeks = DEFAULT_SEEKS,
506 	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
507 };
508 
509 /*
510  * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
511  * mapping->tree_lock.
512  */
513 static struct lock_class_key shadow_nodes_key;
514 
515 static int __init workingset_init(void)
516 {
517 	unsigned int timestamp_bits;
518 	unsigned int max_order;
519 	int ret;
520 
521 	BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
522 	/*
523 	 * Calculate the eviction bucket size to cover the longest
524 	 * actionable refault distance, which is currently half of
525 	 * memory (totalram_pages/2). However, memory hotplug may add
526 	 * some more pages at runtime, so keep working with up to
527 	 * double the initial memory by using totalram_pages as-is.
528 	 */
529 	timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
530 	max_order = fls_long(totalram_pages - 1);
531 	if (max_order > timestamp_bits)
532 		bucket_order = max_order - timestamp_bits;
533 	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
534 	       timestamp_bits, max_order, bucket_order);
535 
536 	ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
537 	if (ret)
538 		goto err;
539 	ret = register_shrinker(&workingset_shadow_shrinker);
540 	if (ret)
541 		goto err_list_lru;
542 	return 0;
543 err_list_lru:
544 	list_lru_destroy(&shadow_nodes);
545 err:
546 	return ret;
547 }
548 module_init(workingset_init);
549