1 /* 2 * Workingset detection 3 * 4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner 5 */ 6 7 #include <linux/memcontrol.h> 8 #include <linux/writeback.h> 9 #include <linux/pagemap.h> 10 #include <linux/atomic.h> 11 #include <linux/module.h> 12 #include <linux/swap.h> 13 #include <linux/dax.h> 14 #include <linux/fs.h> 15 #include <linux/mm.h> 16 17 /* 18 * Double CLOCK lists 19 * 20 * Per node, two clock lists are maintained for file pages: the 21 * inactive and the active list. Freshly faulted pages start out at 22 * the head of the inactive list and page reclaim scans pages from the 23 * tail. Pages that are accessed multiple times on the inactive list 24 * are promoted to the active list, to protect them from reclaim, 25 * whereas active pages are demoted to the inactive list when the 26 * active list grows too big. 27 * 28 * fault ------------------------+ 29 * | 30 * +--------------+ | +-------------+ 31 * reclaim <- | inactive | <-+-- demotion | active | <--+ 32 * +--------------+ +-------------+ | 33 * | | 34 * +-------------- promotion ------------------+ 35 * 36 * 37 * Access frequency and refault distance 38 * 39 * A workload is thrashing when its pages are frequently used but they 40 * are evicted from the inactive list every time before another access 41 * would have promoted them to the active list. 42 * 43 * In cases where the average access distance between thrashing pages 44 * is bigger than the size of memory there is nothing that can be 45 * done - the thrashing set could never fit into memory under any 46 * circumstance. 47 * 48 * However, the average access distance could be bigger than the 49 * inactive list, yet smaller than the size of memory. In this case, 50 * the set could fit into memory if it weren't for the currently 51 * active pages - which may be used more, hopefully less frequently: 52 * 53 * +-memory available to cache-+ 54 * | | 55 * +-inactive------+-active----+ 56 * a b | c d e f g h i | J K L M N | 57 * +---------------+-----------+ 58 * 59 * It is prohibitively expensive to accurately track access frequency 60 * of pages. But a reasonable approximation can be made to measure 61 * thrashing on the inactive list, after which refaulting pages can be 62 * activated optimistically to compete with the existing active pages. 63 * 64 * Approximating inactive page access frequency - Observations: 65 * 66 * 1. When a page is accessed for the first time, it is added to the 67 * head of the inactive list, slides every existing inactive page 68 * towards the tail by one slot, and pushes the current tail page 69 * out of memory. 70 * 71 * 2. When a page is accessed for the second time, it is promoted to 72 * the active list, shrinking the inactive list by one slot. This 73 * also slides all inactive pages that were faulted into the cache 74 * more recently than the activated page towards the tail of the 75 * inactive list. 76 * 77 * Thus: 78 * 79 * 1. The sum of evictions and activations between any two points in 80 * time indicate the minimum number of inactive pages accessed in 81 * between. 82 * 83 * 2. Moving one inactive page N page slots towards the tail of the 84 * list requires at least N inactive page accesses. 85 * 86 * Combining these: 87 * 88 * 1. When a page is finally evicted from memory, the number of 89 * inactive pages accessed while the page was in cache is at least 90 * the number of page slots on the inactive list. 91 * 92 * 2. In addition, measuring the sum of evictions and activations (E) 93 * at the time of a page's eviction, and comparing it to another 94 * reading (R) at the time the page faults back into memory tells 95 * the minimum number of accesses while the page was not cached. 96 * This is called the refault distance. 97 * 98 * Because the first access of the page was the fault and the second 99 * access the refault, we combine the in-cache distance with the 100 * out-of-cache distance to get the complete minimum access distance 101 * of this page: 102 * 103 * NR_inactive + (R - E) 104 * 105 * And knowing the minimum access distance of a page, we can easily 106 * tell if the page would be able to stay in cache assuming all page 107 * slots in the cache were available: 108 * 109 * NR_inactive + (R - E) <= NR_inactive + NR_active 110 * 111 * which can be further simplified to 112 * 113 * (R - E) <= NR_active 114 * 115 * Put into words, the refault distance (out-of-cache) can be seen as 116 * a deficit in inactive list space (in-cache). If the inactive list 117 * had (R - E) more page slots, the page would not have been evicted 118 * in between accesses, but activated instead. And on a full system, 119 * the only thing eating into inactive list space is active pages. 120 * 121 * 122 * Activating refaulting pages 123 * 124 * All that is known about the active list is that the pages have been 125 * accessed more than once in the past. This means that at any given 126 * time there is actually a good chance that pages on the active list 127 * are no longer in active use. 128 * 129 * So when a refault distance of (R - E) is observed and there are at 130 * least (R - E) active pages, the refaulting page is activated 131 * optimistically in the hope that (R - E) active pages are actually 132 * used less frequently than the refaulting page - or even not used at 133 * all anymore. 134 * 135 * If this is wrong and demotion kicks in, the pages which are truly 136 * used more frequently will be reactivated while the less frequently 137 * used once will be evicted from memory. 138 * 139 * But if this is right, the stale pages will be pushed out of memory 140 * and the used pages get to stay in cache. 141 * 142 * 143 * Implementation 144 * 145 * For each node's file LRU lists, a counter for inactive evictions 146 * and activations is maintained (node->inactive_age). 147 * 148 * On eviction, a snapshot of this counter (along with some bits to 149 * identify the node) is stored in the now empty page cache radix tree 150 * slot of the evicted page. This is called a shadow entry. 151 * 152 * On cache misses for which there are shadow entries, an eligible 153 * refault distance will immediately activate the refaulting page. 154 */ 155 156 #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \ 157 NODES_SHIFT + \ 158 MEM_CGROUP_ID_SHIFT) 159 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) 160 161 /* 162 * Eviction timestamps need to be able to cover the full range of 163 * actionable refaults. However, bits are tight in the radix tree 164 * entry, and after storing the identifier for the lruvec there might 165 * not be enough left to represent every single actionable refault. In 166 * that case, we have to sacrifice granularity for distance, and group 167 * evictions into coarser buckets by shaving off lower timestamp bits. 168 */ 169 static unsigned int bucket_order __read_mostly; 170 171 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction) 172 { 173 eviction >>= bucket_order; 174 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; 175 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; 176 eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); 177 178 return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); 179 } 180 181 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, 182 unsigned long *evictionp) 183 { 184 unsigned long entry = (unsigned long)shadow; 185 int memcgid, nid; 186 187 entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; 188 nid = entry & ((1UL << NODES_SHIFT) - 1); 189 entry >>= NODES_SHIFT; 190 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); 191 entry >>= MEM_CGROUP_ID_SHIFT; 192 193 *memcgidp = memcgid; 194 *pgdat = NODE_DATA(nid); 195 *evictionp = entry << bucket_order; 196 } 197 198 /** 199 * workingset_eviction - note the eviction of a page from memory 200 * @mapping: address space the page was backing 201 * @page: the page being evicted 202 * 203 * Returns a shadow entry to be stored in @mapping->page_tree in place 204 * of the evicted @page so that a later refault can be detected. 205 */ 206 void *workingset_eviction(struct address_space *mapping, struct page *page) 207 { 208 struct mem_cgroup *memcg = page_memcg(page); 209 struct pglist_data *pgdat = page_pgdat(page); 210 int memcgid = mem_cgroup_id(memcg); 211 unsigned long eviction; 212 struct lruvec *lruvec; 213 214 /* Page is fully exclusive and pins page->mem_cgroup */ 215 VM_BUG_ON_PAGE(PageLRU(page), page); 216 VM_BUG_ON_PAGE(page_count(page), page); 217 VM_BUG_ON_PAGE(!PageLocked(page), page); 218 219 lruvec = mem_cgroup_lruvec(pgdat, memcg); 220 eviction = atomic_long_inc_return(&lruvec->inactive_age); 221 return pack_shadow(memcgid, pgdat, eviction); 222 } 223 224 /** 225 * workingset_refault - evaluate the refault of a previously evicted page 226 * @shadow: shadow entry of the evicted page 227 * 228 * Calculates and evaluates the refault distance of the previously 229 * evicted page in the context of the node it was allocated in. 230 * 231 * Returns %true if the page should be activated, %false otherwise. 232 */ 233 bool workingset_refault(void *shadow) 234 { 235 unsigned long refault_distance; 236 unsigned long active_file; 237 struct mem_cgroup *memcg; 238 unsigned long eviction; 239 struct lruvec *lruvec; 240 unsigned long refault; 241 struct pglist_data *pgdat; 242 int memcgid; 243 244 unpack_shadow(shadow, &memcgid, &pgdat, &eviction); 245 246 rcu_read_lock(); 247 /* 248 * Look up the memcg associated with the stored ID. It might 249 * have been deleted since the page's eviction. 250 * 251 * Note that in rare events the ID could have been recycled 252 * for a new cgroup that refaults a shared page. This is 253 * impossible to tell from the available data. However, this 254 * should be a rare and limited disturbance, and activations 255 * are always speculative anyway. Ultimately, it's the aging 256 * algorithm's job to shake out the minimum access frequency 257 * for the active cache. 258 * 259 * XXX: On !CONFIG_MEMCG, this will always return NULL; it 260 * would be better if the root_mem_cgroup existed in all 261 * configurations instead. 262 */ 263 memcg = mem_cgroup_from_id(memcgid); 264 if (!mem_cgroup_disabled() && !memcg) { 265 rcu_read_unlock(); 266 return false; 267 } 268 lruvec = mem_cgroup_lruvec(pgdat, memcg); 269 refault = atomic_long_read(&lruvec->inactive_age); 270 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); 271 rcu_read_unlock(); 272 273 /* 274 * The unsigned subtraction here gives an accurate distance 275 * across inactive_age overflows in most cases. 276 * 277 * There is a special case: usually, shadow entries have a 278 * short lifetime and are either refaulted or reclaimed along 279 * with the inode before they get too old. But it is not 280 * impossible for the inactive_age to lap a shadow entry in 281 * the field, which can then can result in a false small 282 * refault distance, leading to a false activation should this 283 * old entry actually refault again. However, earlier kernels 284 * used to deactivate unconditionally with *every* reclaim 285 * invocation for the longest time, so the occasional 286 * inappropriate activation leading to pressure on the active 287 * list is not a problem. 288 */ 289 refault_distance = (refault - eviction) & EVICTION_MASK; 290 291 inc_node_state(pgdat, WORKINGSET_REFAULT); 292 293 if (refault_distance <= active_file) { 294 inc_node_state(pgdat, WORKINGSET_ACTIVATE); 295 return true; 296 } 297 return false; 298 } 299 300 /** 301 * workingset_activation - note a page activation 302 * @page: page that is being activated 303 */ 304 void workingset_activation(struct page *page) 305 { 306 struct mem_cgroup *memcg; 307 struct lruvec *lruvec; 308 309 rcu_read_lock(); 310 /* 311 * Filter non-memcg pages here, e.g. unmap can call 312 * mark_page_accessed() on VDSO pages. 313 * 314 * XXX: See workingset_refault() - this should return 315 * root_mem_cgroup even for !CONFIG_MEMCG. 316 */ 317 memcg = page_memcg_rcu(page); 318 if (!mem_cgroup_disabled() && !memcg) 319 goto out; 320 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); 321 atomic_long_inc(&lruvec->inactive_age); 322 out: 323 rcu_read_unlock(); 324 } 325 326 /* 327 * Shadow entries reflect the share of the working set that does not 328 * fit into memory, so their number depends on the access pattern of 329 * the workload. In most cases, they will refault or get reclaimed 330 * along with the inode, but a (malicious) workload that streams 331 * through files with a total size several times that of available 332 * memory, while preventing the inodes from being reclaimed, can 333 * create excessive amounts of shadow nodes. To keep a lid on this, 334 * track shadow nodes and reclaim them when they grow way past the 335 * point where they would still be useful. 336 */ 337 338 static struct list_lru shadow_nodes; 339 340 void workingset_update_node(struct radix_tree_node *node, void *private) 341 { 342 struct address_space *mapping = private; 343 344 /* Only regular page cache has shadow entries */ 345 if (dax_mapping(mapping) || shmem_mapping(mapping)) 346 return; 347 348 /* 349 * Track non-empty nodes that contain only shadow entries; 350 * unlink those that contain pages or are being freed. 351 * 352 * Avoid acquiring the list_lru lock when the nodes are 353 * already where they should be. The list_empty() test is safe 354 * as node->private_list is protected by &mapping->tree_lock. 355 */ 356 if (node->count && node->count == node->exceptional) { 357 if (list_empty(&node->private_list)) { 358 node->private_data = mapping; 359 list_lru_add(&shadow_nodes, &node->private_list); 360 } 361 } else { 362 if (!list_empty(&node->private_list)) 363 list_lru_del(&shadow_nodes, &node->private_list); 364 } 365 } 366 367 static unsigned long count_shadow_nodes(struct shrinker *shrinker, 368 struct shrink_control *sc) 369 { 370 unsigned long max_nodes; 371 unsigned long nodes; 372 unsigned long cache; 373 374 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ 375 local_irq_disable(); 376 nodes = list_lru_shrink_count(&shadow_nodes, sc); 377 local_irq_enable(); 378 379 /* 380 * Approximate a reasonable limit for the radix tree nodes 381 * containing shadow entries. We don't need to keep more 382 * shadow entries than possible pages on the active list, 383 * since refault distances bigger than that are dismissed. 384 * 385 * The size of the active list converges toward 100% of 386 * overall page cache as memory grows, with only a tiny 387 * inactive list. Assume the total cache size for that. 388 * 389 * Nodes might be sparsely populated, with only one shadow 390 * entry in the extreme case. Obviously, we cannot keep one 391 * node for every eligible shadow entry, so compromise on a 392 * worst-case density of 1/8th. Below that, not all eligible 393 * refaults can be detected anymore. 394 * 395 * On 64-bit with 7 radix_tree_nodes per page and 64 slots 396 * each, this will reclaim shadow entries when they consume 397 * ~1.8% of available memory: 398 * 399 * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE 400 */ 401 if (sc->memcg) { 402 cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, 403 LRU_ALL_FILE); 404 } else { 405 cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) + 406 node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE); 407 } 408 max_nodes = cache >> (RADIX_TREE_MAP_SHIFT - 3); 409 410 if (nodes <= max_nodes) 411 return 0; 412 return nodes - max_nodes; 413 } 414 415 static enum lru_status shadow_lru_isolate(struct list_head *item, 416 struct list_lru_one *lru, 417 spinlock_t *lru_lock, 418 void *arg) 419 { 420 struct address_space *mapping; 421 struct radix_tree_node *node; 422 unsigned int i; 423 int ret; 424 425 /* 426 * Page cache insertions and deletions synchroneously maintain 427 * the shadow node LRU under the mapping->tree_lock and the 428 * lru_lock. Because the page cache tree is emptied before 429 * the inode can be destroyed, holding the lru_lock pins any 430 * address_space that has radix tree nodes on the LRU. 431 * 432 * We can then safely transition to the mapping->tree_lock to 433 * pin only the address_space of the particular node we want 434 * to reclaim, take the node off-LRU, and drop the lru_lock. 435 */ 436 437 node = container_of(item, struct radix_tree_node, private_list); 438 mapping = node->private_data; 439 440 /* Coming from the list, invert the lock order */ 441 if (!spin_trylock(&mapping->tree_lock)) { 442 spin_unlock(lru_lock); 443 ret = LRU_RETRY; 444 goto out; 445 } 446 447 list_lru_isolate(lru, item); 448 spin_unlock(lru_lock); 449 450 /* 451 * The nodes should only contain one or more shadow entries, 452 * no pages, so we expect to be able to remove them all and 453 * delete and free the empty node afterwards. 454 */ 455 if (WARN_ON_ONCE(!node->exceptional)) 456 goto out_invalid; 457 if (WARN_ON_ONCE(node->count != node->exceptional)) 458 goto out_invalid; 459 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { 460 if (node->slots[i]) { 461 if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i]))) 462 goto out_invalid; 463 if (WARN_ON_ONCE(!node->exceptional)) 464 goto out_invalid; 465 if (WARN_ON_ONCE(!mapping->nrexceptional)) 466 goto out_invalid; 467 node->slots[i] = NULL; 468 node->exceptional--; 469 node->count--; 470 mapping->nrexceptional--; 471 } 472 } 473 if (WARN_ON_ONCE(node->exceptional)) 474 goto out_invalid; 475 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); 476 __radix_tree_delete_node(&mapping->page_tree, node); 477 478 out_invalid: 479 spin_unlock(&mapping->tree_lock); 480 ret = LRU_REMOVED_RETRY; 481 out: 482 local_irq_enable(); 483 cond_resched(); 484 local_irq_disable(); 485 spin_lock(lru_lock); 486 return ret; 487 } 488 489 static unsigned long scan_shadow_nodes(struct shrinker *shrinker, 490 struct shrink_control *sc) 491 { 492 unsigned long ret; 493 494 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ 495 local_irq_disable(); 496 ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); 497 local_irq_enable(); 498 return ret; 499 } 500 501 static struct shrinker workingset_shadow_shrinker = { 502 .count_objects = count_shadow_nodes, 503 .scan_objects = scan_shadow_nodes, 504 .seeks = DEFAULT_SEEKS, 505 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, 506 }; 507 508 /* 509 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe 510 * mapping->tree_lock. 511 */ 512 static struct lock_class_key shadow_nodes_key; 513 514 static int __init workingset_init(void) 515 { 516 unsigned int timestamp_bits; 517 unsigned int max_order; 518 int ret; 519 520 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); 521 /* 522 * Calculate the eviction bucket size to cover the longest 523 * actionable refault distance, which is currently half of 524 * memory (totalram_pages/2). However, memory hotplug may add 525 * some more pages at runtime, so keep working with up to 526 * double the initial memory by using totalram_pages as-is. 527 */ 528 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; 529 max_order = fls_long(totalram_pages - 1); 530 if (max_order > timestamp_bits) 531 bucket_order = max_order - timestamp_bits; 532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 533 timestamp_bits, max_order, bucket_order); 534 535 ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); 536 if (ret) 537 goto err; 538 ret = register_shrinker(&workingset_shadow_shrinker); 539 if (ret) 540 goto err_list_lru; 541 return 0; 542 err_list_lru: 543 list_lru_destroy(&shadow_nodes); 544 err: 545 return ret; 546 } 547 module_init(workingset_init); 548