1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/swap.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8 /* 9 * This file contains the default values for the operation of the 10 * Linux VM subsystem. Fine-tuning documentation can be found in 11 * Documentation/admin-guide/sysctl/vm.rst. 12 * Started 18.12.91 13 * Swap aging added 23.2.95, Stephen Tweedie. 14 * Buffermem limits added 12.3.98, Rik van Riel. 15 */ 16 17 #include <linux/mm.h> 18 #include <linux/sched.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/swap.h> 21 #include <linux/mman.h> 22 #include <linux/pagemap.h> 23 #include <linux/pagevec.h> 24 #include <linux/init.h> 25 #include <linux/export.h> 26 #include <linux/mm_inline.h> 27 #include <linux/percpu_counter.h> 28 #include <linux/memremap.h> 29 #include <linux/percpu.h> 30 #include <linux/cpu.h> 31 #include <linux/notifier.h> 32 #include <linux/backing-dev.h> 33 #include <linux/memcontrol.h> 34 #include <linux/gfp.h> 35 #include <linux/uio.h> 36 #include <linux/hugetlb.h> 37 #include <linux/page_idle.h> 38 39 #include "internal.h" 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/pagemap.h> 43 44 /* How many pages do we try to swap or page in/out together? */ 45 int page_cluster; 46 47 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 48 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 49 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 50 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); 51 static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); 52 #ifdef CONFIG_SMP 53 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 54 #endif 55 56 /* 57 * This path almost never happens for VM activity - pages are normally 58 * freed via pagevecs. But it gets used by networking. 59 */ 60 static void __page_cache_release(struct page *page) 61 { 62 if (PageLRU(page)) { 63 pg_data_t *pgdat = page_pgdat(page); 64 struct lruvec *lruvec; 65 unsigned long flags; 66 67 spin_lock_irqsave(&pgdat->lru_lock, flags); 68 lruvec = mem_cgroup_page_lruvec(page, pgdat); 69 VM_BUG_ON_PAGE(!PageLRU(page), page); 70 __ClearPageLRU(page); 71 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 72 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 73 } 74 __ClearPageWaiters(page); 75 } 76 77 static void __put_single_page(struct page *page) 78 { 79 __page_cache_release(page); 80 mem_cgroup_uncharge(page); 81 free_unref_page(page); 82 } 83 84 static void __put_compound_page(struct page *page) 85 { 86 compound_page_dtor *dtor; 87 88 /* 89 * __page_cache_release() is supposed to be called for thp, not for 90 * hugetlb. This is because hugetlb page does never have PageLRU set 91 * (it's never listed to any LRU lists) and no memcg routines should 92 * be called for hugetlb (it has a separate hugetlb_cgroup.) 93 */ 94 if (!PageHuge(page)) 95 __page_cache_release(page); 96 dtor = get_compound_page_dtor(page); 97 (*dtor)(page); 98 } 99 100 void __put_page(struct page *page) 101 { 102 if (is_zone_device_page(page)) { 103 put_dev_pagemap(page->pgmap); 104 105 /* 106 * The page belongs to the device that created pgmap. Do 107 * not return it to page allocator. 108 */ 109 return; 110 } 111 112 if (unlikely(PageCompound(page))) 113 __put_compound_page(page); 114 else 115 __put_single_page(page); 116 } 117 EXPORT_SYMBOL(__put_page); 118 119 /** 120 * put_pages_list() - release a list of pages 121 * @pages: list of pages threaded on page->lru 122 * 123 * Release a list of pages which are strung together on page.lru. Currently 124 * used by read_cache_pages() and related error recovery code. 125 */ 126 void put_pages_list(struct list_head *pages) 127 { 128 while (!list_empty(pages)) { 129 struct page *victim; 130 131 victim = lru_to_page(pages); 132 list_del(&victim->lru); 133 put_page(victim); 134 } 135 } 136 EXPORT_SYMBOL(put_pages_list); 137 138 /* 139 * get_kernel_pages() - pin kernel pages in memory 140 * @kiov: An array of struct kvec structures 141 * @nr_segs: number of segments to pin 142 * @write: pinning for read/write, currently ignored 143 * @pages: array that receives pointers to the pages pinned. 144 * Should be at least nr_segs long. 145 * 146 * Returns number of pages pinned. This may be fewer than the number 147 * requested. If nr_pages is 0 or negative, returns 0. If no pages 148 * were pinned, returns -errno. Each page returned must be released 149 * with a put_page() call when it is finished with. 150 */ 151 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 152 struct page **pages) 153 { 154 int seg; 155 156 for (seg = 0; seg < nr_segs; seg++) { 157 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 158 return seg; 159 160 pages[seg] = kmap_to_page(kiov[seg].iov_base); 161 get_page(pages[seg]); 162 } 163 164 return seg; 165 } 166 EXPORT_SYMBOL_GPL(get_kernel_pages); 167 168 /* 169 * get_kernel_page() - pin a kernel page in memory 170 * @start: starting kernel address 171 * @write: pinning for read/write, currently ignored 172 * @pages: array that receives pointer to the page pinned. 173 * Must be at least nr_segs long. 174 * 175 * Returns 1 if page is pinned. If the page was not pinned, returns 176 * -errno. The page returned must be released with a put_page() call 177 * when it is finished with. 178 */ 179 int get_kernel_page(unsigned long start, int write, struct page **pages) 180 { 181 const struct kvec kiov = { 182 .iov_base = (void *)start, 183 .iov_len = PAGE_SIZE 184 }; 185 186 return get_kernel_pages(&kiov, 1, write, pages); 187 } 188 EXPORT_SYMBOL_GPL(get_kernel_page); 189 190 static void pagevec_lru_move_fn(struct pagevec *pvec, 191 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 192 void *arg) 193 { 194 int i; 195 struct pglist_data *pgdat = NULL; 196 struct lruvec *lruvec; 197 unsigned long flags = 0; 198 199 for (i = 0; i < pagevec_count(pvec); i++) { 200 struct page *page = pvec->pages[i]; 201 struct pglist_data *pagepgdat = page_pgdat(page); 202 203 if (pagepgdat != pgdat) { 204 if (pgdat) 205 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 206 pgdat = pagepgdat; 207 spin_lock_irqsave(&pgdat->lru_lock, flags); 208 } 209 210 lruvec = mem_cgroup_page_lruvec(page, pgdat); 211 (*move_fn)(page, lruvec, arg); 212 } 213 if (pgdat) 214 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 215 release_pages(pvec->pages, pvec->nr); 216 pagevec_reinit(pvec); 217 } 218 219 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 220 void *arg) 221 { 222 int *pgmoved = arg; 223 224 if (PageLRU(page) && !PageUnevictable(page)) { 225 del_page_from_lru_list(page, lruvec, page_lru(page)); 226 ClearPageActive(page); 227 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); 228 (*pgmoved)++; 229 } 230 } 231 232 /* 233 * pagevec_move_tail() must be called with IRQ disabled. 234 * Otherwise this may cause nasty races. 235 */ 236 static void pagevec_move_tail(struct pagevec *pvec) 237 { 238 int pgmoved = 0; 239 240 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 241 __count_vm_events(PGROTATED, pgmoved); 242 } 243 244 /* 245 * Writeback is about to end against a page which has been marked for immediate 246 * reclaim. If it still appears to be reclaimable, move it to the tail of the 247 * inactive list. 248 */ 249 void rotate_reclaimable_page(struct page *page) 250 { 251 if (!PageLocked(page) && !PageDirty(page) && 252 !PageUnevictable(page) && PageLRU(page)) { 253 struct pagevec *pvec; 254 unsigned long flags; 255 256 get_page(page); 257 local_irq_save(flags); 258 pvec = this_cpu_ptr(&lru_rotate_pvecs); 259 if (!pagevec_add(pvec, page) || PageCompound(page)) 260 pagevec_move_tail(pvec); 261 local_irq_restore(flags); 262 } 263 } 264 265 static void update_page_reclaim_stat(struct lruvec *lruvec, 266 int file, int rotated) 267 { 268 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 269 270 reclaim_stat->recent_scanned[file]++; 271 if (rotated) 272 reclaim_stat->recent_rotated[file]++; 273 } 274 275 static void __activate_page(struct page *page, struct lruvec *lruvec, 276 void *arg) 277 { 278 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 279 int file = page_is_file_cache(page); 280 int lru = page_lru_base_type(page); 281 282 del_page_from_lru_list(page, lruvec, lru); 283 SetPageActive(page); 284 lru += LRU_ACTIVE; 285 add_page_to_lru_list(page, lruvec, lru); 286 trace_mm_lru_activate(page); 287 288 __count_vm_event(PGACTIVATE); 289 update_page_reclaim_stat(lruvec, file, 1); 290 } 291 } 292 293 #ifdef CONFIG_SMP 294 static void activate_page_drain(int cpu) 295 { 296 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 297 298 if (pagevec_count(pvec)) 299 pagevec_lru_move_fn(pvec, __activate_page, NULL); 300 } 301 302 static bool need_activate_page_drain(int cpu) 303 { 304 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; 305 } 306 307 void activate_page(struct page *page) 308 { 309 page = compound_head(page); 310 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 311 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 312 313 get_page(page); 314 if (!pagevec_add(pvec, page) || PageCompound(page)) 315 pagevec_lru_move_fn(pvec, __activate_page, NULL); 316 put_cpu_var(activate_page_pvecs); 317 } 318 } 319 320 #else 321 static inline void activate_page_drain(int cpu) 322 { 323 } 324 325 void activate_page(struct page *page) 326 { 327 pg_data_t *pgdat = page_pgdat(page); 328 329 page = compound_head(page); 330 spin_lock_irq(&pgdat->lru_lock); 331 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); 332 spin_unlock_irq(&pgdat->lru_lock); 333 } 334 #endif 335 336 static void __lru_cache_activate_page(struct page *page) 337 { 338 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 339 int i; 340 341 /* 342 * Search backwards on the optimistic assumption that the page being 343 * activated has just been added to this pagevec. Note that only 344 * the local pagevec is examined as a !PageLRU page could be in the 345 * process of being released, reclaimed, migrated or on a remote 346 * pagevec that is currently being drained. Furthermore, marking 347 * a remote pagevec's page PageActive potentially hits a race where 348 * a page is marked PageActive just after it is added to the inactive 349 * list causing accounting errors and BUG_ON checks to trigger. 350 */ 351 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 352 struct page *pagevec_page = pvec->pages[i]; 353 354 if (pagevec_page == page) { 355 SetPageActive(page); 356 break; 357 } 358 } 359 360 put_cpu_var(lru_add_pvec); 361 } 362 363 /* 364 * Mark a page as having seen activity. 365 * 366 * inactive,unreferenced -> inactive,referenced 367 * inactive,referenced -> active,unreferenced 368 * active,unreferenced -> active,referenced 369 * 370 * When a newly allocated page is not yet visible, so safe for non-atomic ops, 371 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 372 */ 373 void mark_page_accessed(struct page *page) 374 { 375 page = compound_head(page); 376 377 if (!PageReferenced(page)) { 378 SetPageReferenced(page); 379 } else if (PageUnevictable(page)) { 380 /* 381 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 382 * this list is never rotated or maintained, so marking an 383 * evictable page accessed has no effect. 384 */ 385 } else if (!PageActive(page)) { 386 /* 387 * If the page is on the LRU, queue it for activation via 388 * activate_page_pvecs. Otherwise, assume the page is on a 389 * pagevec, mark it active and it'll be moved to the active 390 * LRU on the next drain. 391 */ 392 if (PageLRU(page)) 393 activate_page(page); 394 else 395 __lru_cache_activate_page(page); 396 ClearPageReferenced(page); 397 if (page_is_file_cache(page)) 398 workingset_activation(page); 399 } 400 if (page_is_idle(page)) 401 clear_page_idle(page); 402 } 403 EXPORT_SYMBOL(mark_page_accessed); 404 405 static void __lru_cache_add(struct page *page) 406 { 407 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 408 409 get_page(page); 410 if (!pagevec_add(pvec, page) || PageCompound(page)) 411 __pagevec_lru_add(pvec); 412 put_cpu_var(lru_add_pvec); 413 } 414 415 /** 416 * lru_cache_add_anon - add a page to the page lists 417 * @page: the page to add 418 */ 419 void lru_cache_add_anon(struct page *page) 420 { 421 if (PageActive(page)) 422 ClearPageActive(page); 423 __lru_cache_add(page); 424 } 425 426 void lru_cache_add_file(struct page *page) 427 { 428 if (PageActive(page)) 429 ClearPageActive(page); 430 __lru_cache_add(page); 431 } 432 EXPORT_SYMBOL(lru_cache_add_file); 433 434 /** 435 * lru_cache_add - add a page to a page list 436 * @page: the page to be added to the LRU. 437 * 438 * Queue the page for addition to the LRU via pagevec. The decision on whether 439 * to add the page to the [in]active [file|anon] list is deferred until the 440 * pagevec is drained. This gives a chance for the caller of lru_cache_add() 441 * have the page added to the active list using mark_page_accessed(). 442 */ 443 void lru_cache_add(struct page *page) 444 { 445 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 446 VM_BUG_ON_PAGE(PageLRU(page), page); 447 __lru_cache_add(page); 448 } 449 450 /** 451 * lru_cache_add_active_or_unevictable 452 * @page: the page to be added to LRU 453 * @vma: vma in which page is mapped for determining reclaimability 454 * 455 * Place @page on the active or unevictable LRU list, depending on its 456 * evictability. Note that if the page is not evictable, it goes 457 * directly back onto it's zone's unevictable list, it does NOT use a 458 * per cpu pagevec. 459 */ 460 void lru_cache_add_active_or_unevictable(struct page *page, 461 struct vm_area_struct *vma) 462 { 463 VM_BUG_ON_PAGE(PageLRU(page), page); 464 465 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 466 SetPageActive(page); 467 else if (!TestSetPageMlocked(page)) { 468 /* 469 * We use the irq-unsafe __mod_zone_page_stat because this 470 * counter is not modified from interrupt context, and the pte 471 * lock is held(spinlock), which implies preemption disabled. 472 */ 473 __mod_zone_page_state(page_zone(page), NR_MLOCK, 474 hpage_nr_pages(page)); 475 count_vm_event(UNEVICTABLE_PGMLOCKED); 476 } 477 lru_cache_add(page); 478 } 479 480 /* 481 * If the page can not be invalidated, it is moved to the 482 * inactive list to speed up its reclaim. It is moved to the 483 * head of the list, rather than the tail, to give the flusher 484 * threads some time to write it out, as this is much more 485 * effective than the single-page writeout from reclaim. 486 * 487 * If the page isn't page_mapped and dirty/writeback, the page 488 * could reclaim asap using PG_reclaim. 489 * 490 * 1. active, mapped page -> none 491 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 492 * 3. inactive, mapped page -> none 493 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 494 * 5. inactive, clean -> inactive, tail 495 * 6. Others -> none 496 * 497 * In 4, why it moves inactive's head, the VM expects the page would 498 * be write it out by flusher threads as this is much more effective 499 * than the single-page writeout from reclaim. 500 */ 501 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, 502 void *arg) 503 { 504 int lru, file; 505 bool active; 506 507 if (!PageLRU(page)) 508 return; 509 510 if (PageUnevictable(page)) 511 return; 512 513 /* Some processes are using the page */ 514 if (page_mapped(page)) 515 return; 516 517 active = PageActive(page); 518 file = page_is_file_cache(page); 519 lru = page_lru_base_type(page); 520 521 del_page_from_lru_list(page, lruvec, lru + active); 522 ClearPageActive(page); 523 ClearPageReferenced(page); 524 525 if (PageWriteback(page) || PageDirty(page)) { 526 /* 527 * PG_reclaim could be raced with end_page_writeback 528 * It can make readahead confusing. But race window 529 * is _really_ small and it's non-critical problem. 530 */ 531 add_page_to_lru_list(page, lruvec, lru); 532 SetPageReclaim(page); 533 } else { 534 /* 535 * The page's writeback ends up during pagevec 536 * We moves tha page into tail of inactive. 537 */ 538 add_page_to_lru_list_tail(page, lruvec, lru); 539 __count_vm_event(PGROTATED); 540 } 541 542 if (active) 543 __count_vm_event(PGDEACTIVATE); 544 update_page_reclaim_stat(lruvec, file, 0); 545 } 546 547 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 548 void *arg) 549 { 550 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 551 int file = page_is_file_cache(page); 552 int lru = page_lru_base_type(page); 553 554 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); 555 ClearPageActive(page); 556 ClearPageReferenced(page); 557 add_page_to_lru_list(page, lruvec, lru); 558 559 __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page)); 560 update_page_reclaim_stat(lruvec, file, 0); 561 } 562 } 563 564 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, 565 void *arg) 566 { 567 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 568 !PageSwapCache(page) && !PageUnevictable(page)) { 569 bool active = PageActive(page); 570 571 del_page_from_lru_list(page, lruvec, 572 LRU_INACTIVE_ANON + active); 573 ClearPageActive(page); 574 ClearPageReferenced(page); 575 /* 576 * lazyfree pages are clean anonymous pages. They have 577 * SwapBacked flag cleared to distinguish normal anonymous 578 * pages 579 */ 580 ClearPageSwapBacked(page); 581 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); 582 583 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); 584 count_memcg_page_event(page, PGLAZYFREE); 585 update_page_reclaim_stat(lruvec, 1, 0); 586 } 587 } 588 589 /* 590 * Drain pages out of the cpu's pagevecs. 591 * Either "cpu" is the current CPU, and preemption has already been 592 * disabled; or "cpu" is being hot-unplugged, and is already dead. 593 */ 594 void lru_add_drain_cpu(int cpu) 595 { 596 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 597 598 if (pagevec_count(pvec)) 599 __pagevec_lru_add(pvec); 600 601 pvec = &per_cpu(lru_rotate_pvecs, cpu); 602 if (pagevec_count(pvec)) { 603 unsigned long flags; 604 605 /* No harm done if a racing interrupt already did this */ 606 local_irq_save(flags); 607 pagevec_move_tail(pvec); 608 local_irq_restore(flags); 609 } 610 611 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); 612 if (pagevec_count(pvec)) 613 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 614 615 pvec = &per_cpu(lru_deactivate_pvecs, cpu); 616 if (pagevec_count(pvec)) 617 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 618 619 pvec = &per_cpu(lru_lazyfree_pvecs, cpu); 620 if (pagevec_count(pvec)) 621 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 622 623 activate_page_drain(cpu); 624 } 625 626 /** 627 * deactivate_file_page - forcefully deactivate a file page 628 * @page: page to deactivate 629 * 630 * This function hints the VM that @page is a good reclaim candidate, 631 * for example if its invalidation fails due to the page being dirty 632 * or under writeback. 633 */ 634 void deactivate_file_page(struct page *page) 635 { 636 /* 637 * In a workload with many unevictable page such as mprotect, 638 * unevictable page deactivation for accelerating reclaim is pointless. 639 */ 640 if (PageUnevictable(page)) 641 return; 642 643 if (likely(get_page_unless_zero(page))) { 644 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 645 646 if (!pagevec_add(pvec, page) || PageCompound(page)) 647 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 648 put_cpu_var(lru_deactivate_file_pvecs); 649 } 650 } 651 652 /* 653 * deactivate_page - deactivate a page 654 * @page: page to deactivate 655 * 656 * deactivate_page() moves @page to the inactive list if @page was on the active 657 * list and was not an unevictable page. This is done to accelerate the reclaim 658 * of @page. 659 */ 660 void deactivate_page(struct page *page) 661 { 662 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 663 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 664 665 get_page(page); 666 if (!pagevec_add(pvec, page) || PageCompound(page)) 667 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 668 put_cpu_var(lru_deactivate_pvecs); 669 } 670 } 671 672 /** 673 * mark_page_lazyfree - make an anon page lazyfree 674 * @page: page to deactivate 675 * 676 * mark_page_lazyfree() moves @page to the inactive file list. 677 * This is done to accelerate the reclaim of @page. 678 */ 679 void mark_page_lazyfree(struct page *page) 680 { 681 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 682 !PageSwapCache(page) && !PageUnevictable(page)) { 683 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); 684 685 get_page(page); 686 if (!pagevec_add(pvec, page) || PageCompound(page)) 687 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 688 put_cpu_var(lru_lazyfree_pvecs); 689 } 690 } 691 692 void lru_add_drain(void) 693 { 694 lru_add_drain_cpu(get_cpu()); 695 put_cpu(); 696 } 697 698 #ifdef CONFIG_SMP 699 700 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 701 702 static void lru_add_drain_per_cpu(struct work_struct *dummy) 703 { 704 lru_add_drain(); 705 } 706 707 /* 708 * Doesn't need any cpu hotplug locking because we do rely on per-cpu 709 * kworkers being shut down before our page_alloc_cpu_dead callback is 710 * executed on the offlined cpu. 711 * Calling this function with cpu hotplug locks held can actually lead 712 * to obscure indirect dependencies via WQ context. 713 */ 714 void lru_add_drain_all(void) 715 { 716 static seqcount_t seqcount = SEQCNT_ZERO(seqcount); 717 static DEFINE_MUTEX(lock); 718 static struct cpumask has_work; 719 int cpu, seq; 720 721 /* 722 * Make sure nobody triggers this path before mm_percpu_wq is fully 723 * initialized. 724 */ 725 if (WARN_ON(!mm_percpu_wq)) 726 return; 727 728 seq = raw_read_seqcount_latch(&seqcount); 729 730 mutex_lock(&lock); 731 732 /* 733 * Piggyback on drain started and finished while we waited for lock: 734 * all pages pended at the time of our enter were drained from vectors. 735 */ 736 if (__read_seqcount_retry(&seqcount, seq)) 737 goto done; 738 739 raw_write_seqcount_latch(&seqcount); 740 741 cpumask_clear(&has_work); 742 743 for_each_online_cpu(cpu) { 744 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 745 746 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 747 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 748 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 749 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 750 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || 751 need_activate_page_drain(cpu)) { 752 INIT_WORK(work, lru_add_drain_per_cpu); 753 queue_work_on(cpu, mm_percpu_wq, work); 754 cpumask_set_cpu(cpu, &has_work); 755 } 756 } 757 758 for_each_cpu(cpu, &has_work) 759 flush_work(&per_cpu(lru_add_drain_work, cpu)); 760 761 done: 762 mutex_unlock(&lock); 763 } 764 #else 765 void lru_add_drain_all(void) 766 { 767 lru_add_drain(); 768 } 769 #endif 770 771 /** 772 * release_pages - batched put_page() 773 * @pages: array of pages to release 774 * @nr: number of pages 775 * 776 * Decrement the reference count on all the pages in @pages. If it 777 * fell to zero, remove the page from the LRU and free it. 778 */ 779 void release_pages(struct page **pages, int nr) 780 { 781 int i; 782 LIST_HEAD(pages_to_free); 783 struct pglist_data *locked_pgdat = NULL; 784 struct lruvec *lruvec; 785 unsigned long uninitialized_var(flags); 786 unsigned int uninitialized_var(lock_batch); 787 788 for (i = 0; i < nr; i++) { 789 struct page *page = pages[i]; 790 791 /* 792 * Make sure the IRQ-safe lock-holding time does not get 793 * excessive with a continuous string of pages from the 794 * same pgdat. The lock is held only if pgdat != NULL. 795 */ 796 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) { 797 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 798 locked_pgdat = NULL; 799 } 800 801 if (is_huge_zero_page(page)) 802 continue; 803 804 if (is_zone_device_page(page)) { 805 if (locked_pgdat) { 806 spin_unlock_irqrestore(&locked_pgdat->lru_lock, 807 flags); 808 locked_pgdat = NULL; 809 } 810 /* 811 * ZONE_DEVICE pages that return 'false' from 812 * put_devmap_managed_page() do not require special 813 * processing, and instead, expect a call to 814 * put_page_testzero(). 815 */ 816 if (put_devmap_managed_page(page)) 817 continue; 818 } 819 820 page = compound_head(page); 821 if (!put_page_testzero(page)) 822 continue; 823 824 if (PageCompound(page)) { 825 if (locked_pgdat) { 826 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 827 locked_pgdat = NULL; 828 } 829 __put_compound_page(page); 830 continue; 831 } 832 833 if (PageLRU(page)) { 834 struct pglist_data *pgdat = page_pgdat(page); 835 836 if (pgdat != locked_pgdat) { 837 if (locked_pgdat) 838 spin_unlock_irqrestore(&locked_pgdat->lru_lock, 839 flags); 840 lock_batch = 0; 841 locked_pgdat = pgdat; 842 spin_lock_irqsave(&locked_pgdat->lru_lock, flags); 843 } 844 845 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); 846 VM_BUG_ON_PAGE(!PageLRU(page), page); 847 __ClearPageLRU(page); 848 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 849 } 850 851 /* Clear Active bit in case of parallel mark_page_accessed */ 852 __ClearPageActive(page); 853 __ClearPageWaiters(page); 854 855 list_add(&page->lru, &pages_to_free); 856 } 857 if (locked_pgdat) 858 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 859 860 mem_cgroup_uncharge_list(&pages_to_free); 861 free_unref_page_list(&pages_to_free); 862 } 863 EXPORT_SYMBOL(release_pages); 864 865 /* 866 * The pages which we're about to release may be in the deferred lru-addition 867 * queues. That would prevent them from really being freed right now. That's 868 * OK from a correctness point of view but is inefficient - those pages may be 869 * cache-warm and we want to give them back to the page allocator ASAP. 870 * 871 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 872 * and __pagevec_lru_add_active() call release_pages() directly to avoid 873 * mutual recursion. 874 */ 875 void __pagevec_release(struct pagevec *pvec) 876 { 877 if (!pvec->percpu_pvec_drained) { 878 lru_add_drain(); 879 pvec->percpu_pvec_drained = true; 880 } 881 release_pages(pvec->pages, pagevec_count(pvec)); 882 pagevec_reinit(pvec); 883 } 884 EXPORT_SYMBOL(__pagevec_release); 885 886 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 887 /* used by __split_huge_page_refcount() */ 888 void lru_add_page_tail(struct page *page, struct page *page_tail, 889 struct lruvec *lruvec, struct list_head *list) 890 { 891 const int file = 0; 892 893 VM_BUG_ON_PAGE(!PageHead(page), page); 894 VM_BUG_ON_PAGE(PageCompound(page_tail), page); 895 VM_BUG_ON_PAGE(PageLRU(page_tail), page); 896 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); 897 898 if (!list) 899 SetPageLRU(page_tail); 900 901 if (likely(PageLRU(page))) 902 list_add_tail(&page_tail->lru, &page->lru); 903 else if (list) { 904 /* page reclaim is reclaiming a huge page */ 905 get_page(page_tail); 906 list_add_tail(&page_tail->lru, list); 907 } else { 908 /* 909 * Head page has not yet been counted, as an hpage, 910 * so we must account for each subpage individually. 911 * 912 * Put page_tail on the list at the correct position 913 * so they all end up in order. 914 */ 915 add_page_to_lru_list_tail(page_tail, lruvec, 916 page_lru(page_tail)); 917 } 918 919 if (!PageUnevictable(page)) 920 update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); 921 } 922 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 923 924 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 925 void *arg) 926 { 927 enum lru_list lru; 928 int was_unevictable = TestClearPageUnevictable(page); 929 930 VM_BUG_ON_PAGE(PageLRU(page), page); 931 932 SetPageLRU(page); 933 /* 934 * Page becomes evictable in two ways: 935 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()]. 936 * 2) Before acquiring LRU lock to put the page to correct LRU and then 937 * a) do PageLRU check with lock [check_move_unevictable_pages] 938 * b) do PageLRU check before lock [clear_page_mlock] 939 * 940 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need 941 * following strict ordering: 942 * 943 * #0: __pagevec_lru_add_fn #1: clear_page_mlock 944 * 945 * SetPageLRU() TestClearPageMlocked() 946 * smp_mb() // explicit ordering // above provides strict 947 * // ordering 948 * PageMlocked() PageLRU() 949 * 950 * 951 * if '#1' does not observe setting of PG_lru by '#0' and fails 952 * isolation, the explicit barrier will make sure that page_evictable 953 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU 954 * can be reordered after PageMlocked check and can make '#1' to fail 955 * the isolation of the page whose Mlocked bit is cleared (#0 is also 956 * looking at the same page) and the evictable page will be stranded 957 * in an unevictable LRU. 958 */ 959 smp_mb(); 960 961 if (page_evictable(page)) { 962 lru = page_lru(page); 963 update_page_reclaim_stat(lruvec, page_is_file_cache(page), 964 PageActive(page)); 965 if (was_unevictable) 966 count_vm_event(UNEVICTABLE_PGRESCUED); 967 } else { 968 lru = LRU_UNEVICTABLE; 969 ClearPageActive(page); 970 SetPageUnevictable(page); 971 if (!was_unevictable) 972 count_vm_event(UNEVICTABLE_PGCULLED); 973 } 974 975 add_page_to_lru_list(page, lruvec, lru); 976 trace_mm_lru_insertion(page, lru); 977 } 978 979 /* 980 * Add the passed pages to the LRU, then drop the caller's refcount 981 * on them. Reinitialises the caller's pagevec. 982 */ 983 void __pagevec_lru_add(struct pagevec *pvec) 984 { 985 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 986 } 987 EXPORT_SYMBOL(__pagevec_lru_add); 988 989 /** 990 * pagevec_lookup_entries - gang pagecache lookup 991 * @pvec: Where the resulting entries are placed 992 * @mapping: The address_space to search 993 * @start: The starting entry index 994 * @nr_entries: The maximum number of pages 995 * @indices: The cache indices corresponding to the entries in @pvec 996 * 997 * pagevec_lookup_entries() will search for and return a group of up 998 * to @nr_pages pages and shadow entries in the mapping. All 999 * entries are placed in @pvec. pagevec_lookup_entries() takes a 1000 * reference against actual pages in @pvec. 1001 * 1002 * The search returns a group of mapping-contiguous entries with 1003 * ascending indexes. There may be holes in the indices due to 1004 * not-present entries. 1005 * 1006 * pagevec_lookup_entries() returns the number of entries which were 1007 * found. 1008 */ 1009 unsigned pagevec_lookup_entries(struct pagevec *pvec, 1010 struct address_space *mapping, 1011 pgoff_t start, unsigned nr_entries, 1012 pgoff_t *indices) 1013 { 1014 pvec->nr = find_get_entries(mapping, start, nr_entries, 1015 pvec->pages, indices); 1016 return pagevec_count(pvec); 1017 } 1018 1019 /** 1020 * pagevec_remove_exceptionals - pagevec exceptionals pruning 1021 * @pvec: The pagevec to prune 1022 * 1023 * pagevec_lookup_entries() fills both pages and exceptional radix 1024 * tree entries into the pagevec. This function prunes all 1025 * exceptionals from @pvec without leaving holes, so that it can be 1026 * passed on to page-only pagevec operations. 1027 */ 1028 void pagevec_remove_exceptionals(struct pagevec *pvec) 1029 { 1030 int i, j; 1031 1032 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 1033 struct page *page = pvec->pages[i]; 1034 if (!xa_is_value(page)) 1035 pvec->pages[j++] = page; 1036 } 1037 pvec->nr = j; 1038 } 1039 1040 /** 1041 * pagevec_lookup_range - gang pagecache lookup 1042 * @pvec: Where the resulting pages are placed 1043 * @mapping: The address_space to search 1044 * @start: The starting page index 1045 * @end: The final page index 1046 * 1047 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE 1048 * pages in the mapping starting from index @start and upto index @end 1049 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a 1050 * reference against the pages in @pvec. 1051 * 1052 * The search returns a group of mapping-contiguous pages with ascending 1053 * indexes. There may be holes in the indices due to not-present pages. We 1054 * also update @start to index the next page for the traversal. 1055 * 1056 * pagevec_lookup_range() returns the number of pages which were found. If this 1057 * number is smaller than PAGEVEC_SIZE, the end of specified range has been 1058 * reached. 1059 */ 1060 unsigned pagevec_lookup_range(struct pagevec *pvec, 1061 struct address_space *mapping, pgoff_t *start, pgoff_t end) 1062 { 1063 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, 1064 pvec->pages); 1065 return pagevec_count(pvec); 1066 } 1067 EXPORT_SYMBOL(pagevec_lookup_range); 1068 1069 unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 1070 struct address_space *mapping, pgoff_t *index, pgoff_t end, 1071 xa_mark_t tag) 1072 { 1073 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 1074 PAGEVEC_SIZE, pvec->pages); 1075 return pagevec_count(pvec); 1076 } 1077 EXPORT_SYMBOL(pagevec_lookup_range_tag); 1078 1079 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, 1080 struct address_space *mapping, pgoff_t *index, pgoff_t end, 1081 xa_mark_t tag, unsigned max_pages) 1082 { 1083 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 1084 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); 1085 return pagevec_count(pvec); 1086 } 1087 EXPORT_SYMBOL(pagevec_lookup_range_nr_tag); 1088 /* 1089 * Perform any setup for the swap system 1090 */ 1091 void __init swap_setup(void) 1092 { 1093 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1094 1095 /* Use a smaller cluster for small-memory machines */ 1096 if (megs < 16) 1097 page_cluster = 2; 1098 else 1099 page_cluster = 3; 1100 /* 1101 * Right now other parts of the system means that we 1102 * _really_ don't want to cluster much more 1103 */ 1104 } 1105