1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Workingset detection 4 * 5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner 6 */ 7 8 #include <linux/memcontrol.h> 9 #include <linux/writeback.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/atomic.h> 13 #include <linux/module.h> 14 #include <linux/swap.h> 15 #include <linux/dax.h> 16 #include <linux/fs.h> 17 #include <linux/mm.h> 18 19 /* 20 * Double CLOCK lists 21 * 22 * Per node, two clock lists are maintained for file pages: the 23 * inactive and the active list. Freshly faulted pages start out at 24 * the head of the inactive list and page reclaim scans pages from the 25 * tail. Pages that are accessed multiple times on the inactive list 26 * are promoted to the active list, to protect them from reclaim, 27 * whereas active pages are demoted to the inactive list when the 28 * active list grows too big. 29 * 30 * fault ------------------------+ 31 * | 32 * +--------------+ | +-------------+ 33 * reclaim <- | inactive | <-+-- demotion | active | <--+ 34 * +--------------+ +-------------+ | 35 * | | 36 * +-------------- promotion ------------------+ 37 * 38 * 39 * Access frequency and refault distance 40 * 41 * A workload is thrashing when its pages are frequently used but they 42 * are evicted from the inactive list every time before another access 43 * would have promoted them to the active list. 44 * 45 * In cases where the average access distance between thrashing pages 46 * is bigger than the size of memory there is nothing that can be 47 * done - the thrashing set could never fit into memory under any 48 * circumstance. 49 * 50 * However, the average access distance could be bigger than the 51 * inactive list, yet smaller than the size of memory. In this case, 52 * the set could fit into memory if it weren't for the currently 53 * active pages - which may be used more, hopefully less frequently: 54 * 55 * +-memory available to cache-+ 56 * | | 57 * +-inactive------+-active----+ 58 * a b | c d e f g h i | J K L M N | 59 * +---------------+-----------+ 60 * 61 * It is prohibitively expensive to accurately track access frequency 62 * of pages. But a reasonable approximation can be made to measure 63 * thrashing on the inactive list, after which refaulting pages can be 64 * activated optimistically to compete with the existing active pages. 65 * 66 * Approximating inactive page access frequency - Observations: 67 * 68 * 1. When a page is accessed for the first time, it is added to the 69 * head of the inactive list, slides every existing inactive page 70 * towards the tail by one slot, and pushes the current tail page 71 * out of memory. 72 * 73 * 2. When a page is accessed for the second time, it is promoted to 74 * the active list, shrinking the inactive list by one slot. This 75 * also slides all inactive pages that were faulted into the cache 76 * more recently than the activated page towards the tail of the 77 * inactive list. 78 * 79 * Thus: 80 * 81 * 1. The sum of evictions and activations between any two points in 82 * time indicate the minimum number of inactive pages accessed in 83 * between. 84 * 85 * 2. Moving one inactive page N page slots towards the tail of the 86 * list requires at least N inactive page accesses. 87 * 88 * Combining these: 89 * 90 * 1. When a page is finally evicted from memory, the number of 91 * inactive pages accessed while the page was in cache is at least 92 * the number of page slots on the inactive list. 93 * 94 * 2. In addition, measuring the sum of evictions and activations (E) 95 * at the time of a page's eviction, and comparing it to another 96 * reading (R) at the time the page faults back into memory tells 97 * the minimum number of accesses while the page was not cached. 98 * This is called the refault distance. 99 * 100 * Because the first access of the page was the fault and the second 101 * access the refault, we combine the in-cache distance with the 102 * out-of-cache distance to get the complete minimum access distance 103 * of this page: 104 * 105 * NR_inactive + (R - E) 106 * 107 * And knowing the minimum access distance of a page, we can easily 108 * tell if the page would be able to stay in cache assuming all page 109 * slots in the cache were available: 110 * 111 * NR_inactive + (R - E) <= NR_inactive + NR_active 112 * 113 * which can be further simplified to 114 * 115 * (R - E) <= NR_active 116 * 117 * Put into words, the refault distance (out-of-cache) can be seen as 118 * a deficit in inactive list space (in-cache). If the inactive list 119 * had (R - E) more page slots, the page would not have been evicted 120 * in between accesses, but activated instead. And on a full system, 121 * the only thing eating into inactive list space is active pages. 122 * 123 * 124 * Refaulting inactive pages 125 * 126 * All that is known about the active list is that the pages have been 127 * accessed more than once in the past. This means that at any given 128 * time there is actually a good chance that pages on the active list 129 * are no longer in active use. 130 * 131 * So when a refault distance of (R - E) is observed and there are at 132 * least (R - E) active pages, the refaulting page is activated 133 * optimistically in the hope that (R - E) active pages are actually 134 * used less frequently than the refaulting page - or even not used at 135 * all anymore. 136 * 137 * That means if inactive cache is refaulting with a suitable refault 138 * distance, we assume the cache workingset is transitioning and put 139 * pressure on the current active list. 140 * 141 * If this is wrong and demotion kicks in, the pages which are truly 142 * used more frequently will be reactivated while the less frequently 143 * used once will be evicted from memory. 144 * 145 * But if this is right, the stale pages will be pushed out of memory 146 * and the used pages get to stay in cache. 147 * 148 * Refaulting active pages 149 * 150 * If on the other hand the refaulting pages have recently been 151 * deactivated, it means that the active list is no longer protecting 152 * actively used cache from reclaim. The cache is NOT transitioning to 153 * a different workingset; the existing workingset is thrashing in the 154 * space allocated to the page cache. 155 * 156 * 157 * Implementation 158 * 159 * For each node's LRU lists, a counter for inactive evictions and 160 * activations is maintained (node->nonresident_age). 161 * 162 * On eviction, a snapshot of this counter (along with some bits to 163 * identify the node) is stored in the now empty page cache 164 * slot of the evicted page. This is called a shadow entry. 165 * 166 * On cache misses for which there are shadow entries, an eligible 167 * refault distance will immediately activate the refaulting page. 168 */ 169 170 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ 171 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) 172 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) 173 174 /* 175 * Eviction timestamps need to be able to cover the full range of 176 * actionable refaults. However, bits are tight in the xarray 177 * entry, and after storing the identifier for the lruvec there might 178 * not be enough left to represent every single actionable refault. In 179 * that case, we have to sacrifice granularity for distance, and group 180 * evictions into coarser buckets by shaving off lower timestamp bits. 181 */ 182 static unsigned int bucket_order __read_mostly; 183 184 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, 185 bool workingset) 186 { 187 eviction >>= bucket_order; 188 eviction &= EVICTION_MASK; 189 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; 190 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; 191 eviction = (eviction << 1) | workingset; 192 193 return xa_mk_value(eviction); 194 } 195 196 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, 197 unsigned long *evictionp, bool *workingsetp) 198 { 199 unsigned long entry = xa_to_value(shadow); 200 int memcgid, nid; 201 bool workingset; 202 203 workingset = entry & 1; 204 entry >>= 1; 205 nid = entry & ((1UL << NODES_SHIFT) - 1); 206 entry >>= NODES_SHIFT; 207 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); 208 entry >>= MEM_CGROUP_ID_SHIFT; 209 210 *memcgidp = memcgid; 211 *pgdat = NODE_DATA(nid); 212 *evictionp = entry << bucket_order; 213 *workingsetp = workingset; 214 } 215 216 /** 217 * workingset_age_nonresident - age non-resident entries as LRU ages 218 * @memcg: the lruvec that was aged 219 * @nr_pages: the number of pages to count 220 * 221 * As in-memory pages are aged, non-resident pages need to be aged as 222 * well, in order for the refault distances later on to be comparable 223 * to the in-memory dimensions. This function allows reclaim and LRU 224 * operations to drive the non-resident aging along in parallel. 225 */ 226 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) 227 { 228 /* 229 * Reclaiming a cgroup means reclaiming all its children in a 230 * round-robin fashion. That means that each cgroup has an LRU 231 * order that is composed of the LRU orders of its child 232 * cgroups; and every page has an LRU position not just in the 233 * cgroup that owns it, but in all of that group's ancestors. 234 * 235 * So when the physical inactive list of a leaf cgroup ages, 236 * the virtual inactive lists of all its parents, including 237 * the root cgroup's, age as well. 238 */ 239 do { 240 atomic_long_add(nr_pages, &lruvec->nonresident_age); 241 } while ((lruvec = parent_lruvec(lruvec))); 242 } 243 244 /** 245 * workingset_eviction - note the eviction of a page from memory 246 * @target_memcg: the cgroup that is causing the reclaim 247 * @page: the page being evicted 248 * 249 * Returns a shadow entry to be stored in @page->mapping->i_pages in place 250 * of the evicted @page so that a later refault can be detected. 251 */ 252 void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) 253 { 254 struct pglist_data *pgdat = page_pgdat(page); 255 unsigned long eviction; 256 struct lruvec *lruvec; 257 int memcgid; 258 259 /* Page is fully exclusive and pins page->mem_cgroup */ 260 VM_BUG_ON_PAGE(PageLRU(page), page); 261 VM_BUG_ON_PAGE(page_count(page), page); 262 VM_BUG_ON_PAGE(!PageLocked(page), page); 263 264 lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 265 workingset_age_nonresident(lruvec, hpage_nr_pages(page)); 266 /* XXX: target_memcg can be NULL, go through lruvec */ 267 memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); 268 eviction = atomic_long_read(&lruvec->nonresident_age); 269 return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); 270 } 271 272 /** 273 * workingset_refault - evaluate the refault of a previously evicted page 274 * @page: the freshly allocated replacement page 275 * @shadow: shadow entry of the evicted page 276 * 277 * Calculates and evaluates the refault distance of the previously 278 * evicted page in the context of the node and the memcg whose memory 279 * pressure caused the eviction. 280 */ 281 void workingset_refault(struct page *page, void *shadow) 282 { 283 struct mem_cgroup *eviction_memcg; 284 struct lruvec *eviction_lruvec; 285 unsigned long refault_distance; 286 unsigned long workingset_size; 287 struct pglist_data *pgdat; 288 struct mem_cgroup *memcg; 289 unsigned long eviction; 290 struct lruvec *lruvec; 291 unsigned long refault; 292 bool workingset; 293 int memcgid; 294 295 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); 296 297 rcu_read_lock(); 298 /* 299 * Look up the memcg associated with the stored ID. It might 300 * have been deleted since the page's eviction. 301 * 302 * Note that in rare events the ID could have been recycled 303 * for a new cgroup that refaults a shared page. This is 304 * impossible to tell from the available data. However, this 305 * should be a rare and limited disturbance, and activations 306 * are always speculative anyway. Ultimately, it's the aging 307 * algorithm's job to shake out the minimum access frequency 308 * for the active cache. 309 * 310 * XXX: On !CONFIG_MEMCG, this will always return NULL; it 311 * would be better if the root_mem_cgroup existed in all 312 * configurations instead. 313 */ 314 eviction_memcg = mem_cgroup_from_id(memcgid); 315 if (!mem_cgroup_disabled() && !eviction_memcg) 316 goto out; 317 eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); 318 refault = atomic_long_read(&eviction_lruvec->nonresident_age); 319 320 /* 321 * Calculate the refault distance 322 * 323 * The unsigned subtraction here gives an accurate distance 324 * across nonresident_age overflows in most cases. There is a 325 * special case: usually, shadow entries have a short lifetime 326 * and are either refaulted or reclaimed along with the inode 327 * before they get too old. But it is not impossible for the 328 * nonresident_age to lap a shadow entry in the field, which 329 * can then result in a false small refault distance, leading 330 * to a false activation should this old entry actually 331 * refault again. However, earlier kernels used to deactivate 332 * unconditionally with *every* reclaim invocation for the 333 * longest time, so the occasional inappropriate activation 334 * leading to pressure on the active list is not a problem. 335 */ 336 refault_distance = (refault - eviction) & EVICTION_MASK; 337 338 /* 339 * The activation decision for this page is made at the level 340 * where the eviction occurred, as that is where the LRU order 341 * during page reclaim is being determined. 342 * 343 * However, the cgroup that will own the page is the one that 344 * is actually experiencing the refault event. 345 */ 346 memcg = page_memcg(page); 347 lruvec = mem_cgroup_lruvec(memcg, pgdat); 348 349 inc_lruvec_state(lruvec, WORKINGSET_REFAULT); 350 351 /* 352 * Compare the distance to the existing workingset size. We 353 * don't activate pages that couldn't stay resident even if 354 * all the memory was available to the page cache. Whether 355 * cache can compete with anon or not depends on having swap. 356 */ 357 workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); 358 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { 359 workingset_size += lruvec_page_state(eviction_lruvec, 360 NR_INACTIVE_ANON); 361 workingset_size += lruvec_page_state(eviction_lruvec, 362 NR_ACTIVE_ANON); 363 } 364 if (refault_distance > workingset_size) 365 goto out; 366 367 SetPageActive(page); 368 workingset_age_nonresident(lruvec, hpage_nr_pages(page)); 369 inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); 370 371 /* Page was active prior to eviction */ 372 if (workingset) { 373 SetPageWorkingset(page); 374 /* XXX: Move to lru_cache_add() when it supports new vs putback */ 375 spin_lock_irq(&page_pgdat(page)->lru_lock); 376 lru_note_cost_page(page); 377 spin_unlock_irq(&page_pgdat(page)->lru_lock); 378 inc_lruvec_state(lruvec, WORKINGSET_RESTORE); 379 } 380 out: 381 rcu_read_unlock(); 382 } 383 384 /** 385 * workingset_activation - note a page activation 386 * @page: page that is being activated 387 */ 388 void workingset_activation(struct page *page) 389 { 390 struct mem_cgroup *memcg; 391 struct lruvec *lruvec; 392 393 rcu_read_lock(); 394 /* 395 * Filter non-memcg pages here, e.g. unmap can call 396 * mark_page_accessed() on VDSO pages. 397 * 398 * XXX: See workingset_refault() - this should return 399 * root_mem_cgroup even for !CONFIG_MEMCG. 400 */ 401 memcg = page_memcg_rcu(page); 402 if (!mem_cgroup_disabled() && !memcg) 403 goto out; 404 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); 405 workingset_age_nonresident(lruvec, hpage_nr_pages(page)); 406 out: 407 rcu_read_unlock(); 408 } 409 410 /* 411 * Shadow entries reflect the share of the working set that does not 412 * fit into memory, so their number depends on the access pattern of 413 * the workload. In most cases, they will refault or get reclaimed 414 * along with the inode, but a (malicious) workload that streams 415 * through files with a total size several times that of available 416 * memory, while preventing the inodes from being reclaimed, can 417 * create excessive amounts of shadow nodes. To keep a lid on this, 418 * track shadow nodes and reclaim them when they grow way past the 419 * point where they would still be useful. 420 */ 421 422 static struct list_lru shadow_nodes; 423 424 void workingset_update_node(struct xa_node *node) 425 { 426 /* 427 * Track non-empty nodes that contain only shadow entries; 428 * unlink those that contain pages or are being freed. 429 * 430 * Avoid acquiring the list_lru lock when the nodes are 431 * already where they should be. The list_empty() test is safe 432 * as node->private_list is protected by the i_pages lock. 433 */ 434 VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */ 435 436 if (node->count && node->count == node->nr_values) { 437 if (list_empty(&node->private_list)) { 438 list_lru_add(&shadow_nodes, &node->private_list); 439 __inc_lruvec_slab_state(node, WORKINGSET_NODES); 440 } 441 } else { 442 if (!list_empty(&node->private_list)) { 443 list_lru_del(&shadow_nodes, &node->private_list); 444 __dec_lruvec_slab_state(node, WORKINGSET_NODES); 445 } 446 } 447 } 448 449 static unsigned long count_shadow_nodes(struct shrinker *shrinker, 450 struct shrink_control *sc) 451 { 452 unsigned long max_nodes; 453 unsigned long nodes; 454 unsigned long pages; 455 456 nodes = list_lru_shrink_count(&shadow_nodes, sc); 457 458 /* 459 * Approximate a reasonable limit for the nodes 460 * containing shadow entries. We don't need to keep more 461 * shadow entries than possible pages on the active list, 462 * since refault distances bigger than that are dismissed. 463 * 464 * The size of the active list converges toward 100% of 465 * overall page cache as memory grows, with only a tiny 466 * inactive list. Assume the total cache size for that. 467 * 468 * Nodes might be sparsely populated, with only one shadow 469 * entry in the extreme case. Obviously, we cannot keep one 470 * node for every eligible shadow entry, so compromise on a 471 * worst-case density of 1/8th. Below that, not all eligible 472 * refaults can be detected anymore. 473 * 474 * On 64-bit with 7 xa_nodes per page and 64 slots 475 * each, this will reclaim shadow entries when they consume 476 * ~1.8% of available memory: 477 * 478 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE 479 */ 480 #ifdef CONFIG_MEMCG 481 if (sc->memcg) { 482 struct lruvec *lruvec; 483 int i; 484 485 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); 486 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) 487 pages += lruvec_page_state_local(lruvec, 488 NR_LRU_BASE + i); 489 pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); 490 pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); 491 } else 492 #endif 493 pages = node_present_pages(sc->nid); 494 495 max_nodes = pages >> (XA_CHUNK_SHIFT - 3); 496 497 if (!nodes) 498 return SHRINK_EMPTY; 499 500 if (nodes <= max_nodes) 501 return 0; 502 return nodes - max_nodes; 503 } 504 505 static enum lru_status shadow_lru_isolate(struct list_head *item, 506 struct list_lru_one *lru, 507 spinlock_t *lru_lock, 508 void *arg) __must_hold(lru_lock) 509 { 510 struct xa_node *node = container_of(item, struct xa_node, private_list); 511 XA_STATE(xas, node->array, 0); 512 struct address_space *mapping; 513 int ret; 514 515 /* 516 * Page cache insertions and deletions synchroneously maintain 517 * the shadow node LRU under the i_pages lock and the 518 * lru_lock. Because the page cache tree is emptied before 519 * the inode can be destroyed, holding the lru_lock pins any 520 * address_space that has nodes on the LRU. 521 * 522 * We can then safely transition to the i_pages lock to 523 * pin only the address_space of the particular node we want 524 * to reclaim, take the node off-LRU, and drop the lru_lock. 525 */ 526 527 mapping = container_of(node->array, struct address_space, i_pages); 528 529 /* Coming from the list, invert the lock order */ 530 if (!xa_trylock(&mapping->i_pages)) { 531 spin_unlock_irq(lru_lock); 532 ret = LRU_RETRY; 533 goto out; 534 } 535 536 list_lru_isolate(lru, item); 537 __dec_lruvec_slab_state(node, WORKINGSET_NODES); 538 539 spin_unlock(lru_lock); 540 541 /* 542 * The nodes should only contain one or more shadow entries, 543 * no pages, so we expect to be able to remove them all and 544 * delete and free the empty node afterwards. 545 */ 546 if (WARN_ON_ONCE(!node->nr_values)) 547 goto out_invalid; 548 if (WARN_ON_ONCE(node->count != node->nr_values)) 549 goto out_invalid; 550 mapping->nrexceptional -= node->nr_values; 551 xas.xa_node = xa_parent_locked(&mapping->i_pages, node); 552 xas.xa_offset = node->offset; 553 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 554 xas_set_update(&xas, workingset_update_node); 555 /* 556 * We could store a shadow entry here which was the minimum of the 557 * shadow entries we were tracking ... 558 */ 559 xas_store(&xas, NULL); 560 __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); 561 562 out_invalid: 563 xa_unlock_irq(&mapping->i_pages); 564 ret = LRU_REMOVED_RETRY; 565 out: 566 cond_resched(); 567 spin_lock_irq(lru_lock); 568 return ret; 569 } 570 571 static unsigned long scan_shadow_nodes(struct shrinker *shrinker, 572 struct shrink_control *sc) 573 { 574 /* list_lru lock nests inside the IRQ-safe i_pages lock */ 575 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, 576 NULL); 577 } 578 579 static struct shrinker workingset_shadow_shrinker = { 580 .count_objects = count_shadow_nodes, 581 .scan_objects = scan_shadow_nodes, 582 .seeks = 0, /* ->count reports only fully expendable nodes */ 583 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, 584 }; 585 586 /* 587 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe 588 * i_pages lock. 589 */ 590 static struct lock_class_key shadow_nodes_key; 591 592 static int __init workingset_init(void) 593 { 594 unsigned int timestamp_bits; 595 unsigned int max_order; 596 int ret; 597 598 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); 599 /* 600 * Calculate the eviction bucket size to cover the longest 601 * actionable refault distance, which is currently half of 602 * memory (totalram_pages/2). However, memory hotplug may add 603 * some more pages at runtime, so keep working with up to 604 * double the initial memory by using totalram_pages as-is. 605 */ 606 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; 607 max_order = fls_long(totalram_pages() - 1); 608 if (max_order > timestamp_bits) 609 bucket_order = max_order - timestamp_bits; 610 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 611 timestamp_bits, max_order, bucket_order); 612 613 ret = prealloc_shrinker(&workingset_shadow_shrinker); 614 if (ret) 615 goto err; 616 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, 617 &workingset_shadow_shrinker); 618 if (ret) 619 goto err_list_lru; 620 register_shrinker_prepared(&workingset_shadow_shrinker); 621 return 0; 622 err_list_lru: 623 free_prealloced_shrinker(&workingset_shadow_shrinker); 624 err: 625 return ret; 626 } 627 module_init(workingset_init); 628