1 /* 2 * linux/mm/swap.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7 /* 8 * This file contains the default values for the operation of the 9 * Linux VM subsystem. Fine-tuning documentation can be found in 10 * Documentation/sysctl/vm.txt. 11 * Started 18.12.91 12 * Swap aging added 23.2.95, Stephen Tweedie. 13 * Buffermem limits added 12.3.98, Rik van Riel. 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/swap.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/pagevec.h> 23 #include <linux/init.h> 24 #include <linux/export.h> 25 #include <linux/mm_inline.h> 26 #include <linux/percpu_counter.h> 27 #include <linux/memremap.h> 28 #include <linux/percpu.h> 29 #include <linux/cpu.h> 30 #include <linux/notifier.h> 31 #include <linux/backing-dev.h> 32 #include <linux/memcontrol.h> 33 #include <linux/gfp.h> 34 #include <linux/uio.h> 35 #include <linux/hugetlb.h> 36 #include <linux/page_idle.h> 37 38 #include "internal.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/pagemap.h> 42 43 /* How many pages do we try to swap or page in/out together? */ 44 int page_cluster; 45 46 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 47 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 48 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 49 static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); 50 #ifdef CONFIG_SMP 51 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 52 #endif 53 54 /* 55 * This path almost never happens for VM activity - pages are normally 56 * freed via pagevecs. But it gets used by networking. 57 */ 58 static void __page_cache_release(struct page *page) 59 { 60 if (PageLRU(page)) { 61 pg_data_t *pgdat = page_pgdat(page); 62 struct lruvec *lruvec; 63 unsigned long flags; 64 65 spin_lock_irqsave(&pgdat->lru_lock, flags); 66 lruvec = mem_cgroup_page_lruvec(page, pgdat); 67 VM_BUG_ON_PAGE(!PageLRU(page), page); 68 __ClearPageLRU(page); 69 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 70 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 71 } 72 __ClearPageWaiters(page); 73 mem_cgroup_uncharge(page); 74 } 75 76 static void __put_single_page(struct page *page) 77 { 78 __page_cache_release(page); 79 free_unref_page(page); 80 } 81 82 static void __put_compound_page(struct page *page) 83 { 84 compound_page_dtor *dtor; 85 86 /* 87 * __page_cache_release() is supposed to be called for thp, not for 88 * hugetlb. This is because hugetlb page does never have PageLRU set 89 * (it's never listed to any LRU lists) and no memcg routines should 90 * be called for hugetlb (it has a separate hugetlb_cgroup.) 91 */ 92 if (!PageHuge(page)) 93 __page_cache_release(page); 94 dtor = get_compound_page_dtor(page); 95 (*dtor)(page); 96 } 97 98 void __put_page(struct page *page) 99 { 100 if (is_zone_device_page(page)) { 101 put_dev_pagemap(page->pgmap); 102 103 /* 104 * The page belongs to the device that created pgmap. Do 105 * not return it to page allocator. 106 */ 107 return; 108 } 109 110 if (unlikely(PageCompound(page))) 111 __put_compound_page(page); 112 else 113 __put_single_page(page); 114 } 115 EXPORT_SYMBOL(__put_page); 116 117 /** 118 * put_pages_list() - release a list of pages 119 * @pages: list of pages threaded on page->lru 120 * 121 * Release a list of pages which are strung together on page.lru. Currently 122 * used by read_cache_pages() and related error recovery code. 123 */ 124 void put_pages_list(struct list_head *pages) 125 { 126 while (!list_empty(pages)) { 127 struct page *victim; 128 129 victim = lru_to_page(pages); 130 list_del(&victim->lru); 131 put_page(victim); 132 } 133 } 134 EXPORT_SYMBOL(put_pages_list); 135 136 /* 137 * get_kernel_pages() - pin kernel pages in memory 138 * @kiov: An array of struct kvec structures 139 * @nr_segs: number of segments to pin 140 * @write: pinning for read/write, currently ignored 141 * @pages: array that receives pointers to the pages pinned. 142 * Should be at least nr_segs long. 143 * 144 * Returns number of pages pinned. This may be fewer than the number 145 * requested. If nr_pages is 0 or negative, returns 0. If no pages 146 * were pinned, returns -errno. Each page returned must be released 147 * with a put_page() call when it is finished with. 148 */ 149 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 150 struct page **pages) 151 { 152 int seg; 153 154 for (seg = 0; seg < nr_segs; seg++) { 155 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 156 return seg; 157 158 pages[seg] = kmap_to_page(kiov[seg].iov_base); 159 get_page(pages[seg]); 160 } 161 162 return seg; 163 } 164 EXPORT_SYMBOL_GPL(get_kernel_pages); 165 166 /* 167 * get_kernel_page() - pin a kernel page in memory 168 * @start: starting kernel address 169 * @write: pinning for read/write, currently ignored 170 * @pages: array that receives pointer to the page pinned. 171 * Must be at least nr_segs long. 172 * 173 * Returns 1 if page is pinned. If the page was not pinned, returns 174 * -errno. The page returned must be released with a put_page() call 175 * when it is finished with. 176 */ 177 int get_kernel_page(unsigned long start, int write, struct page **pages) 178 { 179 const struct kvec kiov = { 180 .iov_base = (void *)start, 181 .iov_len = PAGE_SIZE 182 }; 183 184 return get_kernel_pages(&kiov, 1, write, pages); 185 } 186 EXPORT_SYMBOL_GPL(get_kernel_page); 187 188 static void pagevec_lru_move_fn(struct pagevec *pvec, 189 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 190 void *arg) 191 { 192 int i; 193 struct pglist_data *pgdat = NULL; 194 struct lruvec *lruvec; 195 unsigned long flags = 0; 196 197 for (i = 0; i < pagevec_count(pvec); i++) { 198 struct page *page = pvec->pages[i]; 199 struct pglist_data *pagepgdat = page_pgdat(page); 200 201 if (pagepgdat != pgdat) { 202 if (pgdat) 203 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 204 pgdat = pagepgdat; 205 spin_lock_irqsave(&pgdat->lru_lock, flags); 206 } 207 208 lruvec = mem_cgroup_page_lruvec(page, pgdat); 209 (*move_fn)(page, lruvec, arg); 210 } 211 if (pgdat) 212 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 213 release_pages(pvec->pages, pvec->nr); 214 pagevec_reinit(pvec); 215 } 216 217 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 218 void *arg) 219 { 220 int *pgmoved = arg; 221 222 if (PageLRU(page) && !PageUnevictable(page)) { 223 del_page_from_lru_list(page, lruvec, page_lru(page)); 224 ClearPageActive(page); 225 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); 226 (*pgmoved)++; 227 } 228 } 229 230 /* 231 * pagevec_move_tail() must be called with IRQ disabled. 232 * Otherwise this may cause nasty races. 233 */ 234 static void pagevec_move_tail(struct pagevec *pvec) 235 { 236 int pgmoved = 0; 237 238 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 239 __count_vm_events(PGROTATED, pgmoved); 240 } 241 242 /* 243 * Writeback is about to end against a page which has been marked for immediate 244 * reclaim. If it still appears to be reclaimable, move it to the tail of the 245 * inactive list. 246 */ 247 void rotate_reclaimable_page(struct page *page) 248 { 249 if (!PageLocked(page) && !PageDirty(page) && 250 !PageUnevictable(page) && PageLRU(page)) { 251 struct pagevec *pvec; 252 unsigned long flags; 253 254 get_page(page); 255 local_irq_save(flags); 256 pvec = this_cpu_ptr(&lru_rotate_pvecs); 257 if (!pagevec_add(pvec, page) || PageCompound(page)) 258 pagevec_move_tail(pvec); 259 local_irq_restore(flags); 260 } 261 } 262 263 static void update_page_reclaim_stat(struct lruvec *lruvec, 264 int file, int rotated) 265 { 266 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 267 268 reclaim_stat->recent_scanned[file]++; 269 if (rotated) 270 reclaim_stat->recent_rotated[file]++; 271 } 272 273 static void __activate_page(struct page *page, struct lruvec *lruvec, 274 void *arg) 275 { 276 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 277 int file = page_is_file_cache(page); 278 int lru = page_lru_base_type(page); 279 280 del_page_from_lru_list(page, lruvec, lru); 281 SetPageActive(page); 282 lru += LRU_ACTIVE; 283 add_page_to_lru_list(page, lruvec, lru); 284 trace_mm_lru_activate(page); 285 286 __count_vm_event(PGACTIVATE); 287 update_page_reclaim_stat(lruvec, file, 1); 288 } 289 } 290 291 #ifdef CONFIG_SMP 292 static void activate_page_drain(int cpu) 293 { 294 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 295 296 if (pagevec_count(pvec)) 297 pagevec_lru_move_fn(pvec, __activate_page, NULL); 298 } 299 300 static bool need_activate_page_drain(int cpu) 301 { 302 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; 303 } 304 305 void activate_page(struct page *page) 306 { 307 page = compound_head(page); 308 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 309 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 310 311 get_page(page); 312 if (!pagevec_add(pvec, page) || PageCompound(page)) 313 pagevec_lru_move_fn(pvec, __activate_page, NULL); 314 put_cpu_var(activate_page_pvecs); 315 } 316 } 317 318 #else 319 static inline void activate_page_drain(int cpu) 320 { 321 } 322 323 void activate_page(struct page *page) 324 { 325 pg_data_t *pgdat = page_pgdat(page); 326 327 page = compound_head(page); 328 spin_lock_irq(&pgdat->lru_lock); 329 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); 330 spin_unlock_irq(&pgdat->lru_lock); 331 } 332 #endif 333 334 static void __lru_cache_activate_page(struct page *page) 335 { 336 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 337 int i; 338 339 /* 340 * Search backwards on the optimistic assumption that the page being 341 * activated has just been added to this pagevec. Note that only 342 * the local pagevec is examined as a !PageLRU page could be in the 343 * process of being released, reclaimed, migrated or on a remote 344 * pagevec that is currently being drained. Furthermore, marking 345 * a remote pagevec's page PageActive potentially hits a race where 346 * a page is marked PageActive just after it is added to the inactive 347 * list causing accounting errors and BUG_ON checks to trigger. 348 */ 349 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 350 struct page *pagevec_page = pvec->pages[i]; 351 352 if (pagevec_page == page) { 353 SetPageActive(page); 354 break; 355 } 356 } 357 358 put_cpu_var(lru_add_pvec); 359 } 360 361 /* 362 * Mark a page as having seen activity. 363 * 364 * inactive,unreferenced -> inactive,referenced 365 * inactive,referenced -> active,unreferenced 366 * active,unreferenced -> active,referenced 367 * 368 * When a newly allocated page is not yet visible, so safe for non-atomic ops, 369 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 370 */ 371 void mark_page_accessed(struct page *page) 372 { 373 page = compound_head(page); 374 if (!PageActive(page) && !PageUnevictable(page) && 375 PageReferenced(page)) { 376 377 /* 378 * If the page is on the LRU, queue it for activation via 379 * activate_page_pvecs. Otherwise, assume the page is on a 380 * pagevec, mark it active and it'll be moved to the active 381 * LRU on the next drain. 382 */ 383 if (PageLRU(page)) 384 activate_page(page); 385 else 386 __lru_cache_activate_page(page); 387 ClearPageReferenced(page); 388 if (page_is_file_cache(page)) 389 workingset_activation(page); 390 } else if (!PageReferenced(page)) { 391 SetPageReferenced(page); 392 } 393 if (page_is_idle(page)) 394 clear_page_idle(page); 395 } 396 EXPORT_SYMBOL(mark_page_accessed); 397 398 static void __lru_cache_add(struct page *page) 399 { 400 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 401 402 get_page(page); 403 if (!pagevec_add(pvec, page) || PageCompound(page)) 404 __pagevec_lru_add(pvec); 405 put_cpu_var(lru_add_pvec); 406 } 407 408 /** 409 * lru_cache_add_anon - add a page to the page lists 410 * @page: the page to add 411 */ 412 void lru_cache_add_anon(struct page *page) 413 { 414 if (PageActive(page)) 415 ClearPageActive(page); 416 __lru_cache_add(page); 417 } 418 419 void lru_cache_add_file(struct page *page) 420 { 421 if (PageActive(page)) 422 ClearPageActive(page); 423 __lru_cache_add(page); 424 } 425 EXPORT_SYMBOL(lru_cache_add_file); 426 427 /** 428 * lru_cache_add - add a page to a page list 429 * @page: the page to be added to the LRU. 430 * 431 * Queue the page for addition to the LRU via pagevec. The decision on whether 432 * to add the page to the [in]active [file|anon] list is deferred until the 433 * pagevec is drained. This gives a chance for the caller of lru_cache_add() 434 * have the page added to the active list using mark_page_accessed(). 435 */ 436 void lru_cache_add(struct page *page) 437 { 438 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 439 VM_BUG_ON_PAGE(PageLRU(page), page); 440 __lru_cache_add(page); 441 } 442 443 /** 444 * lru_cache_add_active_or_unevictable 445 * @page: the page to be added to LRU 446 * @vma: vma in which page is mapped for determining reclaimability 447 * 448 * Place @page on the active or unevictable LRU list, depending on its 449 * evictability. Note that if the page is not evictable, it goes 450 * directly back onto it's zone's unevictable list, it does NOT use a 451 * per cpu pagevec. 452 */ 453 void lru_cache_add_active_or_unevictable(struct page *page, 454 struct vm_area_struct *vma) 455 { 456 VM_BUG_ON_PAGE(PageLRU(page), page); 457 458 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 459 SetPageActive(page); 460 else if (!TestSetPageMlocked(page)) { 461 /* 462 * We use the irq-unsafe __mod_zone_page_stat because this 463 * counter is not modified from interrupt context, and the pte 464 * lock is held(spinlock), which implies preemption disabled. 465 */ 466 __mod_zone_page_state(page_zone(page), NR_MLOCK, 467 hpage_nr_pages(page)); 468 count_vm_event(UNEVICTABLE_PGMLOCKED); 469 } 470 lru_cache_add(page); 471 } 472 473 /* 474 * If the page can not be invalidated, it is moved to the 475 * inactive list to speed up its reclaim. It is moved to the 476 * head of the list, rather than the tail, to give the flusher 477 * threads some time to write it out, as this is much more 478 * effective than the single-page writeout from reclaim. 479 * 480 * If the page isn't page_mapped and dirty/writeback, the page 481 * could reclaim asap using PG_reclaim. 482 * 483 * 1. active, mapped page -> none 484 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 485 * 3. inactive, mapped page -> none 486 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 487 * 5. inactive, clean -> inactive, tail 488 * 6. Others -> none 489 * 490 * In 4, why it moves inactive's head, the VM expects the page would 491 * be write it out by flusher threads as this is much more effective 492 * than the single-page writeout from reclaim. 493 */ 494 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, 495 void *arg) 496 { 497 int lru, file; 498 bool active; 499 500 if (!PageLRU(page)) 501 return; 502 503 if (PageUnevictable(page)) 504 return; 505 506 /* Some processes are using the page */ 507 if (page_mapped(page)) 508 return; 509 510 active = PageActive(page); 511 file = page_is_file_cache(page); 512 lru = page_lru_base_type(page); 513 514 del_page_from_lru_list(page, lruvec, lru + active); 515 ClearPageActive(page); 516 ClearPageReferenced(page); 517 add_page_to_lru_list(page, lruvec, lru); 518 519 if (PageWriteback(page) || PageDirty(page)) { 520 /* 521 * PG_reclaim could be raced with end_page_writeback 522 * It can make readahead confusing. But race window 523 * is _really_ small and it's non-critical problem. 524 */ 525 SetPageReclaim(page); 526 } else { 527 /* 528 * The page's writeback ends up during pagevec 529 * We moves tha page into tail of inactive. 530 */ 531 list_move_tail(&page->lru, &lruvec->lists[lru]); 532 __count_vm_event(PGROTATED); 533 } 534 535 if (active) 536 __count_vm_event(PGDEACTIVATE); 537 update_page_reclaim_stat(lruvec, file, 0); 538 } 539 540 541 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, 542 void *arg) 543 { 544 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 545 !PageSwapCache(page) && !PageUnevictable(page)) { 546 bool active = PageActive(page); 547 548 del_page_from_lru_list(page, lruvec, 549 LRU_INACTIVE_ANON + active); 550 ClearPageActive(page); 551 ClearPageReferenced(page); 552 /* 553 * lazyfree pages are clean anonymous pages. They have 554 * SwapBacked flag cleared to distinguish normal anonymous 555 * pages 556 */ 557 ClearPageSwapBacked(page); 558 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); 559 560 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); 561 count_memcg_page_event(page, PGLAZYFREE); 562 update_page_reclaim_stat(lruvec, 1, 0); 563 } 564 } 565 566 /* 567 * Drain pages out of the cpu's pagevecs. 568 * Either "cpu" is the current CPU, and preemption has already been 569 * disabled; or "cpu" is being hot-unplugged, and is already dead. 570 */ 571 void lru_add_drain_cpu(int cpu) 572 { 573 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 574 575 if (pagevec_count(pvec)) 576 __pagevec_lru_add(pvec); 577 578 pvec = &per_cpu(lru_rotate_pvecs, cpu); 579 if (pagevec_count(pvec)) { 580 unsigned long flags; 581 582 /* No harm done if a racing interrupt already did this */ 583 local_irq_save(flags); 584 pagevec_move_tail(pvec); 585 local_irq_restore(flags); 586 } 587 588 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); 589 if (pagevec_count(pvec)) 590 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 591 592 pvec = &per_cpu(lru_lazyfree_pvecs, cpu); 593 if (pagevec_count(pvec)) 594 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 595 596 activate_page_drain(cpu); 597 } 598 599 /** 600 * deactivate_file_page - forcefully deactivate a file page 601 * @page: page to deactivate 602 * 603 * This function hints the VM that @page is a good reclaim candidate, 604 * for example if its invalidation fails due to the page being dirty 605 * or under writeback. 606 */ 607 void deactivate_file_page(struct page *page) 608 { 609 /* 610 * In a workload with many unevictable page such as mprotect, 611 * unevictable page deactivation for accelerating reclaim is pointless. 612 */ 613 if (PageUnevictable(page)) 614 return; 615 616 if (likely(get_page_unless_zero(page))) { 617 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 618 619 if (!pagevec_add(pvec, page) || PageCompound(page)) 620 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 621 put_cpu_var(lru_deactivate_file_pvecs); 622 } 623 } 624 625 /** 626 * mark_page_lazyfree - make an anon page lazyfree 627 * @page: page to deactivate 628 * 629 * mark_page_lazyfree() moves @page to the inactive file list. 630 * This is done to accelerate the reclaim of @page. 631 */ 632 void mark_page_lazyfree(struct page *page) 633 { 634 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 635 !PageSwapCache(page) && !PageUnevictable(page)) { 636 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); 637 638 get_page(page); 639 if (!pagevec_add(pvec, page) || PageCompound(page)) 640 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 641 put_cpu_var(lru_lazyfree_pvecs); 642 } 643 } 644 645 void lru_add_drain(void) 646 { 647 lru_add_drain_cpu(get_cpu()); 648 put_cpu(); 649 } 650 651 #ifdef CONFIG_SMP 652 653 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 654 655 static void lru_add_drain_per_cpu(struct work_struct *dummy) 656 { 657 lru_add_drain(); 658 } 659 660 /* 661 * Doesn't need any cpu hotplug locking because we do rely on per-cpu 662 * kworkers being shut down before our page_alloc_cpu_dead callback is 663 * executed on the offlined cpu. 664 * Calling this function with cpu hotplug locks held can actually lead 665 * to obscure indirect dependencies via WQ context. 666 */ 667 void lru_add_drain_all(void) 668 { 669 static DEFINE_MUTEX(lock); 670 static struct cpumask has_work; 671 int cpu; 672 673 /* 674 * Make sure nobody triggers this path before mm_percpu_wq is fully 675 * initialized. 676 */ 677 if (WARN_ON(!mm_percpu_wq)) 678 return; 679 680 mutex_lock(&lock); 681 cpumask_clear(&has_work); 682 683 for_each_online_cpu(cpu) { 684 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 685 686 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 687 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 688 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 689 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || 690 need_activate_page_drain(cpu)) { 691 INIT_WORK(work, lru_add_drain_per_cpu); 692 queue_work_on(cpu, mm_percpu_wq, work); 693 cpumask_set_cpu(cpu, &has_work); 694 } 695 } 696 697 for_each_cpu(cpu, &has_work) 698 flush_work(&per_cpu(lru_add_drain_work, cpu)); 699 700 mutex_unlock(&lock); 701 } 702 #else 703 void lru_add_drain_all(void) 704 { 705 lru_add_drain(); 706 } 707 #endif 708 709 /** 710 * release_pages - batched put_page() 711 * @pages: array of pages to release 712 * @nr: number of pages 713 * 714 * Decrement the reference count on all the pages in @pages. If it 715 * fell to zero, remove the page from the LRU and free it. 716 */ 717 void release_pages(struct page **pages, int nr) 718 { 719 int i; 720 LIST_HEAD(pages_to_free); 721 struct pglist_data *locked_pgdat = NULL; 722 struct lruvec *lruvec; 723 unsigned long uninitialized_var(flags); 724 unsigned int uninitialized_var(lock_batch); 725 726 for (i = 0; i < nr; i++) { 727 struct page *page = pages[i]; 728 729 /* 730 * Make sure the IRQ-safe lock-holding time does not get 731 * excessive with a continuous string of pages from the 732 * same pgdat. The lock is held only if pgdat != NULL. 733 */ 734 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) { 735 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 736 locked_pgdat = NULL; 737 } 738 739 if (is_huge_zero_page(page)) 740 continue; 741 742 /* Device public page can not be huge page */ 743 if (is_device_public_page(page)) { 744 if (locked_pgdat) { 745 spin_unlock_irqrestore(&locked_pgdat->lru_lock, 746 flags); 747 locked_pgdat = NULL; 748 } 749 put_devmap_managed_page(page); 750 continue; 751 } 752 753 page = compound_head(page); 754 if (!put_page_testzero(page)) 755 continue; 756 757 if (PageCompound(page)) { 758 if (locked_pgdat) { 759 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 760 locked_pgdat = NULL; 761 } 762 __put_compound_page(page); 763 continue; 764 } 765 766 if (PageLRU(page)) { 767 struct pglist_data *pgdat = page_pgdat(page); 768 769 if (pgdat != locked_pgdat) { 770 if (locked_pgdat) 771 spin_unlock_irqrestore(&locked_pgdat->lru_lock, 772 flags); 773 lock_batch = 0; 774 locked_pgdat = pgdat; 775 spin_lock_irqsave(&locked_pgdat->lru_lock, flags); 776 } 777 778 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); 779 VM_BUG_ON_PAGE(!PageLRU(page), page); 780 __ClearPageLRU(page); 781 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 782 } 783 784 /* Clear Active bit in case of parallel mark_page_accessed */ 785 __ClearPageActive(page); 786 __ClearPageWaiters(page); 787 788 list_add(&page->lru, &pages_to_free); 789 } 790 if (locked_pgdat) 791 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 792 793 mem_cgroup_uncharge_list(&pages_to_free); 794 free_unref_page_list(&pages_to_free); 795 } 796 EXPORT_SYMBOL(release_pages); 797 798 /* 799 * The pages which we're about to release may be in the deferred lru-addition 800 * queues. That would prevent them from really being freed right now. That's 801 * OK from a correctness point of view but is inefficient - those pages may be 802 * cache-warm and we want to give them back to the page allocator ASAP. 803 * 804 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 805 * and __pagevec_lru_add_active() call release_pages() directly to avoid 806 * mutual recursion. 807 */ 808 void __pagevec_release(struct pagevec *pvec) 809 { 810 if (!pvec->percpu_pvec_drained) { 811 lru_add_drain(); 812 pvec->percpu_pvec_drained = true; 813 } 814 release_pages(pvec->pages, pagevec_count(pvec)); 815 pagevec_reinit(pvec); 816 } 817 EXPORT_SYMBOL(__pagevec_release); 818 819 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 820 /* used by __split_huge_page_refcount() */ 821 void lru_add_page_tail(struct page *page, struct page *page_tail, 822 struct lruvec *lruvec, struct list_head *list) 823 { 824 const int file = 0; 825 826 VM_BUG_ON_PAGE(!PageHead(page), page); 827 VM_BUG_ON_PAGE(PageCompound(page_tail), page); 828 VM_BUG_ON_PAGE(PageLRU(page_tail), page); 829 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); 830 831 if (!list) 832 SetPageLRU(page_tail); 833 834 if (likely(PageLRU(page))) 835 list_add_tail(&page_tail->lru, &page->lru); 836 else if (list) { 837 /* page reclaim is reclaiming a huge page */ 838 get_page(page_tail); 839 list_add_tail(&page_tail->lru, list); 840 } else { 841 struct list_head *list_head; 842 /* 843 * Head page has not yet been counted, as an hpage, 844 * so we must account for each subpage individually. 845 * 846 * Use the standard add function to put page_tail on the list, 847 * but then correct its position so they all end up in order. 848 */ 849 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); 850 list_head = page_tail->lru.prev; 851 list_move_tail(&page_tail->lru, list_head); 852 } 853 854 if (!PageUnevictable(page)) 855 update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); 856 } 857 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 858 859 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 860 void *arg) 861 { 862 enum lru_list lru; 863 int was_unevictable = TestClearPageUnevictable(page); 864 865 VM_BUG_ON_PAGE(PageLRU(page), page); 866 867 SetPageLRU(page); 868 /* 869 * Page becomes evictable in two ways: 870 * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()]. 871 * 2) Before acquiring LRU lock to put the page to correct LRU and then 872 * a) do PageLRU check with lock [check_move_unevictable_pages] 873 * b) do PageLRU check before lock [clear_page_mlock] 874 * 875 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need 876 * following strict ordering: 877 * 878 * #0: __pagevec_lru_add_fn #1: clear_page_mlock 879 * 880 * SetPageLRU() TestClearPageMlocked() 881 * smp_mb() // explicit ordering // above provides strict 882 * // ordering 883 * PageMlocked() PageLRU() 884 * 885 * 886 * if '#1' does not observe setting of PG_lru by '#0' and fails 887 * isolation, the explicit barrier will make sure that page_evictable 888 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU 889 * can be reordered after PageMlocked check and can make '#1' to fail 890 * the isolation of the page whose Mlocked bit is cleared (#0 is also 891 * looking at the same page) and the evictable page will be stranded 892 * in an unevictable LRU. 893 */ 894 smp_mb(); 895 896 if (page_evictable(page)) { 897 lru = page_lru(page); 898 update_page_reclaim_stat(lruvec, page_is_file_cache(page), 899 PageActive(page)); 900 if (was_unevictable) 901 count_vm_event(UNEVICTABLE_PGRESCUED); 902 } else { 903 lru = LRU_UNEVICTABLE; 904 ClearPageActive(page); 905 SetPageUnevictable(page); 906 if (!was_unevictable) 907 count_vm_event(UNEVICTABLE_PGCULLED); 908 } 909 910 add_page_to_lru_list(page, lruvec, lru); 911 trace_mm_lru_insertion(page, lru); 912 } 913 914 /* 915 * Add the passed pages to the LRU, then drop the caller's refcount 916 * on them. Reinitialises the caller's pagevec. 917 */ 918 void __pagevec_lru_add(struct pagevec *pvec) 919 { 920 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 921 } 922 EXPORT_SYMBOL(__pagevec_lru_add); 923 924 /** 925 * pagevec_lookup_entries - gang pagecache lookup 926 * @pvec: Where the resulting entries are placed 927 * @mapping: The address_space to search 928 * @start: The starting entry index 929 * @nr_entries: The maximum number of pages 930 * @indices: The cache indices corresponding to the entries in @pvec 931 * 932 * pagevec_lookup_entries() will search for and return a group of up 933 * to @nr_pages pages and shadow entries in the mapping. All 934 * entries are placed in @pvec. pagevec_lookup_entries() takes a 935 * reference against actual pages in @pvec. 936 * 937 * The search returns a group of mapping-contiguous entries with 938 * ascending indexes. There may be holes in the indices due to 939 * not-present entries. 940 * 941 * pagevec_lookup_entries() returns the number of entries which were 942 * found. 943 */ 944 unsigned pagevec_lookup_entries(struct pagevec *pvec, 945 struct address_space *mapping, 946 pgoff_t start, unsigned nr_entries, 947 pgoff_t *indices) 948 { 949 pvec->nr = find_get_entries(mapping, start, nr_entries, 950 pvec->pages, indices); 951 return pagevec_count(pvec); 952 } 953 954 /** 955 * pagevec_remove_exceptionals - pagevec exceptionals pruning 956 * @pvec: The pagevec to prune 957 * 958 * pagevec_lookup_entries() fills both pages and exceptional radix 959 * tree entries into the pagevec. This function prunes all 960 * exceptionals from @pvec without leaving holes, so that it can be 961 * passed on to page-only pagevec operations. 962 */ 963 void pagevec_remove_exceptionals(struct pagevec *pvec) 964 { 965 int i, j; 966 967 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 968 struct page *page = pvec->pages[i]; 969 if (!xa_is_value(page)) 970 pvec->pages[j++] = page; 971 } 972 pvec->nr = j; 973 } 974 975 /** 976 * pagevec_lookup_range - gang pagecache lookup 977 * @pvec: Where the resulting pages are placed 978 * @mapping: The address_space to search 979 * @start: The starting page index 980 * @end: The final page index 981 * 982 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE 983 * pages in the mapping starting from index @start and upto index @end 984 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a 985 * reference against the pages in @pvec. 986 * 987 * The search returns a group of mapping-contiguous pages with ascending 988 * indexes. There may be holes in the indices due to not-present pages. We 989 * also update @start to index the next page for the traversal. 990 * 991 * pagevec_lookup_range() returns the number of pages which were found. If this 992 * number is smaller than PAGEVEC_SIZE, the end of specified range has been 993 * reached. 994 */ 995 unsigned pagevec_lookup_range(struct pagevec *pvec, 996 struct address_space *mapping, pgoff_t *start, pgoff_t end) 997 { 998 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, 999 pvec->pages); 1000 return pagevec_count(pvec); 1001 } 1002 EXPORT_SYMBOL(pagevec_lookup_range); 1003 1004 unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 1005 struct address_space *mapping, pgoff_t *index, pgoff_t end, 1006 xa_mark_t tag) 1007 { 1008 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 1009 PAGEVEC_SIZE, pvec->pages); 1010 return pagevec_count(pvec); 1011 } 1012 EXPORT_SYMBOL(pagevec_lookup_range_tag); 1013 1014 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, 1015 struct address_space *mapping, pgoff_t *index, pgoff_t end, 1016 xa_mark_t tag, unsigned max_pages) 1017 { 1018 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 1019 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); 1020 return pagevec_count(pvec); 1021 } 1022 EXPORT_SYMBOL(pagevec_lookup_range_nr_tag); 1023 /* 1024 * Perform any setup for the swap system 1025 */ 1026 void __init swap_setup(void) 1027 { 1028 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1029 1030 /* Use a smaller cluster for small-memory machines */ 1031 if (megs < 16) 1032 page_cluster = 2; 1033 else 1034 page_cluster = 3; 1035 /* 1036 * Right now other parts of the system means that we 1037 * _really_ don't want to cluster much more 1038 */ 1039 } 1040