1 /* 2 * linux/mm/swap.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7 /* 8 * This file contains the default values for the operation of the 9 * Linux VM subsystem. Fine-tuning documentation can be found in 10 * Documentation/sysctl/vm.txt. 11 * Started 18.12.91 12 * Swap aging added 23.2.95, Stephen Tweedie. 13 * Buffermem limits added 12.3.98, Rik van Riel. 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/swap.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/pagevec.h> 23 #include <linux/init.h> 24 #include <linux/export.h> 25 #include <linux/mm_inline.h> 26 #include <linux/percpu_counter.h> 27 #include <linux/percpu.h> 28 #include <linux/cpu.h> 29 #include <linux/notifier.h> 30 #include <linux/backing-dev.h> 31 #include <linux/memcontrol.h> 32 #include <linux/gfp.h> 33 #include <linux/uio.h> 34 35 #include "internal.h" 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/pagemap.h> 39 40 /* How many pages do we try to swap or page in/out together? */ 41 int page_cluster; 42 43 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 44 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 45 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); 46 47 /* 48 * This path almost never happens for VM activity - pages are normally 49 * freed via pagevecs. But it gets used by networking. 50 */ 51 static void __page_cache_release(struct page *page) 52 { 53 if (PageLRU(page)) { 54 struct zone *zone = page_zone(page); 55 struct lruvec *lruvec; 56 unsigned long flags; 57 58 spin_lock_irqsave(&zone->lru_lock, flags); 59 lruvec = mem_cgroup_page_lruvec(page, zone); 60 VM_BUG_ON_PAGE(!PageLRU(page), page); 61 __ClearPageLRU(page); 62 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 63 spin_unlock_irqrestore(&zone->lru_lock, flags); 64 } 65 } 66 67 static void __put_single_page(struct page *page) 68 { 69 __page_cache_release(page); 70 free_hot_cold_page(page, false); 71 } 72 73 static void __put_compound_page(struct page *page) 74 { 75 compound_page_dtor *dtor; 76 77 __page_cache_release(page); 78 dtor = get_compound_page_dtor(page); 79 (*dtor)(page); 80 } 81 82 /** 83 * Two special cases here: we could avoid taking compound_lock_irqsave 84 * and could skip the tail refcounting(in _mapcount). 85 * 86 * 1. Hugetlbfs page: 87 * 88 * PageHeadHuge will remain true until the compound page 89 * is released and enters the buddy allocator, and it could 90 * not be split by __split_huge_page_refcount(). 91 * 92 * So if we see PageHeadHuge set, and we have the tail page pin, 93 * then we could safely put head page. 94 * 95 * 2. Slab THP page: 96 * 97 * PG_slab is cleared before the slab frees the head page, and 98 * tail pin cannot be the last reference left on the head page, 99 * because the slab code is free to reuse the compound page 100 * after a kfree/kmem_cache_free without having to check if 101 * there's any tail pin left. In turn all tail pinsmust be always 102 * released while the head is still pinned by the slab code 103 * and so we know PG_slab will be still set too. 104 * 105 * So if we see PageSlab set, and we have the tail page pin, 106 * then we could safely put head page. 107 */ 108 static __always_inline 109 void put_unrefcounted_compound_page(struct page *page_head, struct page *page) 110 { 111 /* 112 * If @page is a THP tail, we must read the tail page 113 * flags after the head page flags. The 114 * __split_huge_page_refcount side enforces write memory barriers 115 * between clearing PageTail and before the head page 116 * can be freed and reallocated. 117 */ 118 smp_rmb(); 119 if (likely(PageTail(page))) { 120 /* 121 * __split_huge_page_refcount cannot race 122 * here, see the comment above this function. 123 */ 124 VM_BUG_ON_PAGE(!PageHead(page_head), page_head); 125 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); 126 if (put_page_testzero(page_head)) { 127 /* 128 * If this is the tail of a slab THP page, 129 * the tail pin must not be the last reference 130 * held on the page, because the PG_slab cannot 131 * be cleared before all tail pins (which skips 132 * the _mapcount tail refcounting) have been 133 * released. 134 * 135 * If this is the tail of a hugetlbfs page, 136 * the tail pin may be the last reference on 137 * the page instead, because PageHeadHuge will 138 * not go away until the compound page enters 139 * the buddy allocator. 140 */ 141 VM_BUG_ON_PAGE(PageSlab(page_head), page_head); 142 __put_compound_page(page_head); 143 } 144 } else 145 /* 146 * __split_huge_page_refcount run before us, 147 * @page was a THP tail. The split @page_head 148 * has been freed and reallocated as slab or 149 * hugetlbfs page of smaller order (only 150 * possible if reallocated as slab on x86). 151 */ 152 if (put_page_testzero(page)) 153 __put_single_page(page); 154 } 155 156 static __always_inline 157 void put_refcounted_compound_page(struct page *page_head, struct page *page) 158 { 159 if (likely(page != page_head && get_page_unless_zero(page_head))) { 160 unsigned long flags; 161 162 /* 163 * @page_head wasn't a dangling pointer but it may not 164 * be a head page anymore by the time we obtain the 165 * lock. That is ok as long as it can't be freed from 166 * under us. 167 */ 168 flags = compound_lock_irqsave(page_head); 169 if (unlikely(!PageTail(page))) { 170 /* __split_huge_page_refcount run before us */ 171 compound_unlock_irqrestore(page_head, flags); 172 if (put_page_testzero(page_head)) { 173 /* 174 * The @page_head may have been freed 175 * and reallocated as a compound page 176 * of smaller order and then freed 177 * again. All we know is that it 178 * cannot have become: a THP page, a 179 * compound page of higher order, a 180 * tail page. That is because we 181 * still hold the refcount of the 182 * split THP tail and page_head was 183 * the THP head before the split. 184 */ 185 if (PageHead(page_head)) 186 __put_compound_page(page_head); 187 else 188 __put_single_page(page_head); 189 } 190 out_put_single: 191 if (put_page_testzero(page)) 192 __put_single_page(page); 193 return; 194 } 195 VM_BUG_ON_PAGE(page_head != page->first_page, page); 196 /* 197 * We can release the refcount taken by 198 * get_page_unless_zero() now that 199 * __split_huge_page_refcount() is blocked on the 200 * compound_lock. 201 */ 202 if (put_page_testzero(page_head)) 203 VM_BUG_ON_PAGE(1, page_head); 204 /* __split_huge_page_refcount will wait now */ 205 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); 206 atomic_dec(&page->_mapcount); 207 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head); 208 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 209 compound_unlock_irqrestore(page_head, flags); 210 211 if (put_page_testzero(page_head)) { 212 if (PageHead(page_head)) 213 __put_compound_page(page_head); 214 else 215 __put_single_page(page_head); 216 } 217 } else { 218 /* @page_head is a dangling pointer */ 219 VM_BUG_ON_PAGE(PageTail(page), page); 220 goto out_put_single; 221 } 222 } 223 224 static void put_compound_page(struct page *page) 225 { 226 struct page *page_head; 227 228 /* 229 * We see the PageCompound set and PageTail not set, so @page maybe: 230 * 1. hugetlbfs head page, or 231 * 2. THP head page. 232 */ 233 if (likely(!PageTail(page))) { 234 if (put_page_testzero(page)) { 235 /* 236 * By the time all refcounts have been released 237 * split_huge_page cannot run anymore from under us. 238 */ 239 if (PageHead(page)) 240 __put_compound_page(page); 241 else 242 __put_single_page(page); 243 } 244 return; 245 } 246 247 /* 248 * We see the PageCompound set and PageTail set, so @page maybe: 249 * 1. a tail hugetlbfs page, or 250 * 2. a tail THP page, or 251 * 3. a split THP page. 252 * 253 * Case 3 is possible, as we may race with 254 * __split_huge_page_refcount tearing down a THP page. 255 */ 256 page_head = compound_head_by_tail(page); 257 if (!__compound_tail_refcounted(page_head)) 258 put_unrefcounted_compound_page(page_head, page); 259 else 260 put_refcounted_compound_page(page_head, page); 261 } 262 263 void put_page(struct page *page) 264 { 265 if (unlikely(PageCompound(page))) 266 put_compound_page(page); 267 else if (put_page_testzero(page)) 268 __put_single_page(page); 269 } 270 EXPORT_SYMBOL(put_page); 271 272 /* 273 * This function is exported but must not be called by anything other 274 * than get_page(). It implements the slow path of get_page(). 275 */ 276 bool __get_page_tail(struct page *page) 277 { 278 /* 279 * This takes care of get_page() if run on a tail page 280 * returned by one of the get_user_pages/follow_page variants. 281 * get_user_pages/follow_page itself doesn't need the compound 282 * lock because it runs __get_page_tail_foll() under the 283 * proper PT lock that already serializes against 284 * split_huge_page(). 285 */ 286 unsigned long flags; 287 bool got; 288 struct page *page_head = compound_head(page); 289 290 /* Ref to put_compound_page() comment. */ 291 if (!__compound_tail_refcounted(page_head)) { 292 smp_rmb(); 293 if (likely(PageTail(page))) { 294 /* 295 * This is a hugetlbfs page or a slab 296 * page. __split_huge_page_refcount 297 * cannot race here. 298 */ 299 VM_BUG_ON_PAGE(!PageHead(page_head), page_head); 300 __get_page_tail_foll(page, true); 301 return true; 302 } else { 303 /* 304 * __split_huge_page_refcount run 305 * before us, "page" was a THP 306 * tail. The split page_head has been 307 * freed and reallocated as slab or 308 * hugetlbfs page of smaller order 309 * (only possible if reallocated as 310 * slab on x86). 311 */ 312 return false; 313 } 314 } 315 316 got = false; 317 if (likely(page != page_head && get_page_unless_zero(page_head))) { 318 /* 319 * page_head wasn't a dangling pointer but it 320 * may not be a head page anymore by the time 321 * we obtain the lock. That is ok as long as it 322 * can't be freed from under us. 323 */ 324 flags = compound_lock_irqsave(page_head); 325 /* here __split_huge_page_refcount won't run anymore */ 326 if (likely(PageTail(page))) { 327 __get_page_tail_foll(page, false); 328 got = true; 329 } 330 compound_unlock_irqrestore(page_head, flags); 331 if (unlikely(!got)) 332 put_page(page_head); 333 } 334 return got; 335 } 336 EXPORT_SYMBOL(__get_page_tail); 337 338 /** 339 * put_pages_list() - release a list of pages 340 * @pages: list of pages threaded on page->lru 341 * 342 * Release a list of pages which are strung together on page.lru. Currently 343 * used by read_cache_pages() and related error recovery code. 344 */ 345 void put_pages_list(struct list_head *pages) 346 { 347 while (!list_empty(pages)) { 348 struct page *victim; 349 350 victim = list_entry(pages->prev, struct page, lru); 351 list_del(&victim->lru); 352 page_cache_release(victim); 353 } 354 } 355 EXPORT_SYMBOL(put_pages_list); 356 357 /* 358 * get_kernel_pages() - pin kernel pages in memory 359 * @kiov: An array of struct kvec structures 360 * @nr_segs: number of segments to pin 361 * @write: pinning for read/write, currently ignored 362 * @pages: array that receives pointers to the pages pinned. 363 * Should be at least nr_segs long. 364 * 365 * Returns number of pages pinned. This may be fewer than the number 366 * requested. If nr_pages is 0 or negative, returns 0. If no pages 367 * were pinned, returns -errno. Each page returned must be released 368 * with a put_page() call when it is finished with. 369 */ 370 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 371 struct page **pages) 372 { 373 int seg; 374 375 for (seg = 0; seg < nr_segs; seg++) { 376 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 377 return seg; 378 379 pages[seg] = kmap_to_page(kiov[seg].iov_base); 380 page_cache_get(pages[seg]); 381 } 382 383 return seg; 384 } 385 EXPORT_SYMBOL_GPL(get_kernel_pages); 386 387 /* 388 * get_kernel_page() - pin a kernel page in memory 389 * @start: starting kernel address 390 * @write: pinning for read/write, currently ignored 391 * @pages: array that receives pointer to the page pinned. 392 * Must be at least nr_segs long. 393 * 394 * Returns 1 if page is pinned. If the page was not pinned, returns 395 * -errno. The page returned must be released with a put_page() call 396 * when it is finished with. 397 */ 398 int get_kernel_page(unsigned long start, int write, struct page **pages) 399 { 400 const struct kvec kiov = { 401 .iov_base = (void *)start, 402 .iov_len = PAGE_SIZE 403 }; 404 405 return get_kernel_pages(&kiov, 1, write, pages); 406 } 407 EXPORT_SYMBOL_GPL(get_kernel_page); 408 409 static void pagevec_lru_move_fn(struct pagevec *pvec, 410 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 411 void *arg) 412 { 413 int i; 414 struct zone *zone = NULL; 415 struct lruvec *lruvec; 416 unsigned long flags = 0; 417 418 for (i = 0; i < pagevec_count(pvec); i++) { 419 struct page *page = pvec->pages[i]; 420 struct zone *pagezone = page_zone(page); 421 422 if (pagezone != zone) { 423 if (zone) 424 spin_unlock_irqrestore(&zone->lru_lock, flags); 425 zone = pagezone; 426 spin_lock_irqsave(&zone->lru_lock, flags); 427 } 428 429 lruvec = mem_cgroup_page_lruvec(page, zone); 430 (*move_fn)(page, lruvec, arg); 431 } 432 if (zone) 433 spin_unlock_irqrestore(&zone->lru_lock, flags); 434 release_pages(pvec->pages, pvec->nr, pvec->cold); 435 pagevec_reinit(pvec); 436 } 437 438 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 439 void *arg) 440 { 441 int *pgmoved = arg; 442 443 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 444 enum lru_list lru = page_lru_base_type(page); 445 list_move_tail(&page->lru, &lruvec->lists[lru]); 446 (*pgmoved)++; 447 } 448 } 449 450 /* 451 * pagevec_move_tail() must be called with IRQ disabled. 452 * Otherwise this may cause nasty races. 453 */ 454 static void pagevec_move_tail(struct pagevec *pvec) 455 { 456 int pgmoved = 0; 457 458 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 459 __count_vm_events(PGROTATED, pgmoved); 460 } 461 462 /* 463 * Writeback is about to end against a page which has been marked for immediate 464 * reclaim. If it still appears to be reclaimable, move it to the tail of the 465 * inactive list. 466 */ 467 void rotate_reclaimable_page(struct page *page) 468 { 469 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 470 !PageUnevictable(page) && PageLRU(page)) { 471 struct pagevec *pvec; 472 unsigned long flags; 473 474 page_cache_get(page); 475 local_irq_save(flags); 476 pvec = this_cpu_ptr(&lru_rotate_pvecs); 477 if (!pagevec_add(pvec, page)) 478 pagevec_move_tail(pvec); 479 local_irq_restore(flags); 480 } 481 } 482 483 static void update_page_reclaim_stat(struct lruvec *lruvec, 484 int file, int rotated) 485 { 486 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 487 488 reclaim_stat->recent_scanned[file]++; 489 if (rotated) 490 reclaim_stat->recent_rotated[file]++; 491 } 492 493 static void __activate_page(struct page *page, struct lruvec *lruvec, 494 void *arg) 495 { 496 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 497 int file = page_is_file_cache(page); 498 int lru = page_lru_base_type(page); 499 500 del_page_from_lru_list(page, lruvec, lru); 501 SetPageActive(page); 502 lru += LRU_ACTIVE; 503 add_page_to_lru_list(page, lruvec, lru); 504 trace_mm_lru_activate(page, page_to_pfn(page)); 505 506 __count_vm_event(PGACTIVATE); 507 update_page_reclaim_stat(lruvec, file, 1); 508 } 509 } 510 511 #ifdef CONFIG_SMP 512 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 513 514 static void activate_page_drain(int cpu) 515 { 516 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 517 518 if (pagevec_count(pvec)) 519 pagevec_lru_move_fn(pvec, __activate_page, NULL); 520 } 521 522 static bool need_activate_page_drain(int cpu) 523 { 524 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; 525 } 526 527 void activate_page(struct page *page) 528 { 529 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 530 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 531 532 page_cache_get(page); 533 if (!pagevec_add(pvec, page)) 534 pagevec_lru_move_fn(pvec, __activate_page, NULL); 535 put_cpu_var(activate_page_pvecs); 536 } 537 } 538 539 #else 540 static inline void activate_page_drain(int cpu) 541 { 542 } 543 544 static bool need_activate_page_drain(int cpu) 545 { 546 return false; 547 } 548 549 void activate_page(struct page *page) 550 { 551 struct zone *zone = page_zone(page); 552 553 spin_lock_irq(&zone->lru_lock); 554 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); 555 spin_unlock_irq(&zone->lru_lock); 556 } 557 #endif 558 559 static void __lru_cache_activate_page(struct page *page) 560 { 561 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 562 int i; 563 564 /* 565 * Search backwards on the optimistic assumption that the page being 566 * activated has just been added to this pagevec. Note that only 567 * the local pagevec is examined as a !PageLRU page could be in the 568 * process of being released, reclaimed, migrated or on a remote 569 * pagevec that is currently being drained. Furthermore, marking 570 * a remote pagevec's page PageActive potentially hits a race where 571 * a page is marked PageActive just after it is added to the inactive 572 * list causing accounting errors and BUG_ON checks to trigger. 573 */ 574 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 575 struct page *pagevec_page = pvec->pages[i]; 576 577 if (pagevec_page == page) { 578 SetPageActive(page); 579 break; 580 } 581 } 582 583 put_cpu_var(lru_add_pvec); 584 } 585 586 /* 587 * Mark a page as having seen activity. 588 * 589 * inactive,unreferenced -> inactive,referenced 590 * inactive,referenced -> active,unreferenced 591 * active,unreferenced -> active,referenced 592 */ 593 void mark_page_accessed(struct page *page) 594 { 595 if (!PageActive(page) && !PageUnevictable(page) && 596 PageReferenced(page)) { 597 598 /* 599 * If the page is on the LRU, queue it for activation via 600 * activate_page_pvecs. Otherwise, assume the page is on a 601 * pagevec, mark it active and it'll be moved to the active 602 * LRU on the next drain. 603 */ 604 if (PageLRU(page)) 605 activate_page(page); 606 else 607 __lru_cache_activate_page(page); 608 ClearPageReferenced(page); 609 if (page_is_file_cache(page)) 610 workingset_activation(page); 611 } else if (!PageReferenced(page)) { 612 SetPageReferenced(page); 613 } 614 } 615 EXPORT_SYMBOL(mark_page_accessed); 616 617 /* 618 * Used to mark_page_accessed(page) that is not visible yet and when it is 619 * still safe to use non-atomic ops 620 */ 621 void init_page_accessed(struct page *page) 622 { 623 if (!PageReferenced(page)) 624 __SetPageReferenced(page); 625 } 626 EXPORT_SYMBOL(init_page_accessed); 627 628 static void __lru_cache_add(struct page *page) 629 { 630 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 631 632 page_cache_get(page); 633 if (!pagevec_space(pvec)) 634 __pagevec_lru_add(pvec); 635 pagevec_add(pvec, page); 636 put_cpu_var(lru_add_pvec); 637 } 638 639 /** 640 * lru_cache_add: add a page to the page lists 641 * @page: the page to add 642 */ 643 void lru_cache_add_anon(struct page *page) 644 { 645 if (PageActive(page)) 646 ClearPageActive(page); 647 __lru_cache_add(page); 648 } 649 650 void lru_cache_add_file(struct page *page) 651 { 652 if (PageActive(page)) 653 ClearPageActive(page); 654 __lru_cache_add(page); 655 } 656 EXPORT_SYMBOL(lru_cache_add_file); 657 658 /** 659 * lru_cache_add - add a page to a page list 660 * @page: the page to be added to the LRU. 661 * 662 * Queue the page for addition to the LRU via pagevec. The decision on whether 663 * to add the page to the [in]active [file|anon] list is deferred until the 664 * pagevec is drained. This gives a chance for the caller of lru_cache_add() 665 * have the page added to the active list using mark_page_accessed(). 666 */ 667 void lru_cache_add(struct page *page) 668 { 669 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 670 VM_BUG_ON_PAGE(PageLRU(page), page); 671 __lru_cache_add(page); 672 } 673 674 /** 675 * add_page_to_unevictable_list - add a page to the unevictable list 676 * @page: the page to be added to the unevictable list 677 * 678 * Add page directly to its zone's unevictable list. To avoid races with 679 * tasks that might be making the page evictable, through eg. munlock, 680 * munmap or exit, while it's not on the lru, we want to add the page 681 * while it's locked or otherwise "invisible" to other tasks. This is 682 * difficult to do when using the pagevec cache, so bypass that. 683 */ 684 void add_page_to_unevictable_list(struct page *page) 685 { 686 struct zone *zone = page_zone(page); 687 struct lruvec *lruvec; 688 689 spin_lock_irq(&zone->lru_lock); 690 lruvec = mem_cgroup_page_lruvec(page, zone); 691 ClearPageActive(page); 692 SetPageUnevictable(page); 693 SetPageLRU(page); 694 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 695 spin_unlock_irq(&zone->lru_lock); 696 } 697 698 /* 699 * If the page can not be invalidated, it is moved to the 700 * inactive list to speed up its reclaim. It is moved to the 701 * head of the list, rather than the tail, to give the flusher 702 * threads some time to write it out, as this is much more 703 * effective than the single-page writeout from reclaim. 704 * 705 * If the page isn't page_mapped and dirty/writeback, the page 706 * could reclaim asap using PG_reclaim. 707 * 708 * 1. active, mapped page -> none 709 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 710 * 3. inactive, mapped page -> none 711 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 712 * 5. inactive, clean -> inactive, tail 713 * 6. Others -> none 714 * 715 * In 4, why it moves inactive's head, the VM expects the page would 716 * be write it out by flusher threads as this is much more effective 717 * than the single-page writeout from reclaim. 718 */ 719 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 720 void *arg) 721 { 722 int lru, file; 723 bool active; 724 725 if (!PageLRU(page)) 726 return; 727 728 if (PageUnevictable(page)) 729 return; 730 731 /* Some processes are using the page */ 732 if (page_mapped(page)) 733 return; 734 735 active = PageActive(page); 736 file = page_is_file_cache(page); 737 lru = page_lru_base_type(page); 738 739 del_page_from_lru_list(page, lruvec, lru + active); 740 ClearPageActive(page); 741 ClearPageReferenced(page); 742 add_page_to_lru_list(page, lruvec, lru); 743 744 if (PageWriteback(page) || PageDirty(page)) { 745 /* 746 * PG_reclaim could be raced with end_page_writeback 747 * It can make readahead confusing. But race window 748 * is _really_ small and it's non-critical problem. 749 */ 750 SetPageReclaim(page); 751 } else { 752 /* 753 * The page's writeback ends up during pagevec 754 * We moves tha page into tail of inactive. 755 */ 756 list_move_tail(&page->lru, &lruvec->lists[lru]); 757 __count_vm_event(PGROTATED); 758 } 759 760 if (active) 761 __count_vm_event(PGDEACTIVATE); 762 update_page_reclaim_stat(lruvec, file, 0); 763 } 764 765 /* 766 * Drain pages out of the cpu's pagevecs. 767 * Either "cpu" is the current CPU, and preemption has already been 768 * disabled; or "cpu" is being hot-unplugged, and is already dead. 769 */ 770 void lru_add_drain_cpu(int cpu) 771 { 772 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 773 774 if (pagevec_count(pvec)) 775 __pagevec_lru_add(pvec); 776 777 pvec = &per_cpu(lru_rotate_pvecs, cpu); 778 if (pagevec_count(pvec)) { 779 unsigned long flags; 780 781 /* No harm done if a racing interrupt already did this */ 782 local_irq_save(flags); 783 pagevec_move_tail(pvec); 784 local_irq_restore(flags); 785 } 786 787 pvec = &per_cpu(lru_deactivate_pvecs, cpu); 788 if (pagevec_count(pvec)) 789 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 790 791 activate_page_drain(cpu); 792 } 793 794 /** 795 * deactivate_page - forcefully deactivate a page 796 * @page: page to deactivate 797 * 798 * This function hints the VM that @page is a good reclaim candidate, 799 * for example if its invalidation fails due to the page being dirty 800 * or under writeback. 801 */ 802 void deactivate_page(struct page *page) 803 { 804 /* 805 * In a workload with many unevictable page such as mprotect, unevictable 806 * page deactivation for accelerating reclaim is pointless. 807 */ 808 if (PageUnevictable(page)) 809 return; 810 811 if (likely(get_page_unless_zero(page))) { 812 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 813 814 if (!pagevec_add(pvec, page)) 815 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 816 put_cpu_var(lru_deactivate_pvecs); 817 } 818 } 819 820 void lru_add_drain(void) 821 { 822 lru_add_drain_cpu(get_cpu()); 823 put_cpu(); 824 } 825 826 static void lru_add_drain_per_cpu(struct work_struct *dummy) 827 { 828 lru_add_drain(); 829 } 830 831 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 832 833 void lru_add_drain_all(void) 834 { 835 static DEFINE_MUTEX(lock); 836 static struct cpumask has_work; 837 int cpu; 838 839 mutex_lock(&lock); 840 get_online_cpus(); 841 cpumask_clear(&has_work); 842 843 for_each_online_cpu(cpu) { 844 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 845 846 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 847 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 848 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 849 need_activate_page_drain(cpu)) { 850 INIT_WORK(work, lru_add_drain_per_cpu); 851 schedule_work_on(cpu, work); 852 cpumask_set_cpu(cpu, &has_work); 853 } 854 } 855 856 for_each_cpu(cpu, &has_work) 857 flush_work(&per_cpu(lru_add_drain_work, cpu)); 858 859 put_online_cpus(); 860 mutex_unlock(&lock); 861 } 862 863 /* 864 * Batched page_cache_release(). Decrement the reference count on all the 865 * passed pages. If it fell to zero then remove the page from the LRU and 866 * free it. 867 * 868 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 869 * for the remainder of the operation. 870 * 871 * The locking in this function is against shrink_inactive_list(): we recheck 872 * the page count inside the lock to see whether shrink_inactive_list() 873 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() 874 * will free it. 875 */ 876 void release_pages(struct page **pages, int nr, bool cold) 877 { 878 int i; 879 LIST_HEAD(pages_to_free); 880 struct zone *zone = NULL; 881 struct lruvec *lruvec; 882 unsigned long uninitialized_var(flags); 883 884 for (i = 0; i < nr; i++) { 885 struct page *page = pages[i]; 886 887 if (unlikely(PageCompound(page))) { 888 if (zone) { 889 spin_unlock_irqrestore(&zone->lru_lock, flags); 890 zone = NULL; 891 } 892 put_compound_page(page); 893 continue; 894 } 895 896 if (!put_page_testzero(page)) 897 continue; 898 899 if (PageLRU(page)) { 900 struct zone *pagezone = page_zone(page); 901 902 if (pagezone != zone) { 903 if (zone) 904 spin_unlock_irqrestore(&zone->lru_lock, 905 flags); 906 zone = pagezone; 907 spin_lock_irqsave(&zone->lru_lock, flags); 908 } 909 910 lruvec = mem_cgroup_page_lruvec(page, zone); 911 VM_BUG_ON_PAGE(!PageLRU(page), page); 912 __ClearPageLRU(page); 913 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 914 } 915 916 /* Clear Active bit in case of parallel mark_page_accessed */ 917 __ClearPageActive(page); 918 919 list_add(&page->lru, &pages_to_free); 920 } 921 if (zone) 922 spin_unlock_irqrestore(&zone->lru_lock, flags); 923 924 free_hot_cold_page_list(&pages_to_free, cold); 925 } 926 EXPORT_SYMBOL(release_pages); 927 928 /* 929 * The pages which we're about to release may be in the deferred lru-addition 930 * queues. That would prevent them from really being freed right now. That's 931 * OK from a correctness point of view but is inefficient - those pages may be 932 * cache-warm and we want to give them back to the page allocator ASAP. 933 * 934 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 935 * and __pagevec_lru_add_active() call release_pages() directly to avoid 936 * mutual recursion. 937 */ 938 void __pagevec_release(struct pagevec *pvec) 939 { 940 lru_add_drain(); 941 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 942 pagevec_reinit(pvec); 943 } 944 EXPORT_SYMBOL(__pagevec_release); 945 946 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 947 /* used by __split_huge_page_refcount() */ 948 void lru_add_page_tail(struct page *page, struct page *page_tail, 949 struct lruvec *lruvec, struct list_head *list) 950 { 951 const int file = 0; 952 953 VM_BUG_ON_PAGE(!PageHead(page), page); 954 VM_BUG_ON_PAGE(PageCompound(page_tail), page); 955 VM_BUG_ON_PAGE(PageLRU(page_tail), page); 956 VM_BUG_ON(NR_CPUS != 1 && 957 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 958 959 if (!list) 960 SetPageLRU(page_tail); 961 962 if (likely(PageLRU(page))) 963 list_add_tail(&page_tail->lru, &page->lru); 964 else if (list) { 965 /* page reclaim is reclaiming a huge page */ 966 get_page(page_tail); 967 list_add_tail(&page_tail->lru, list); 968 } else { 969 struct list_head *list_head; 970 /* 971 * Head page has not yet been counted, as an hpage, 972 * so we must account for each subpage individually. 973 * 974 * Use the standard add function to put page_tail on the list, 975 * but then correct its position so they all end up in order. 976 */ 977 add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); 978 list_head = page_tail->lru.prev; 979 list_move_tail(&page_tail->lru, list_head); 980 } 981 982 if (!PageUnevictable(page)) 983 update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); 984 } 985 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 986 987 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 988 void *arg) 989 { 990 int file = page_is_file_cache(page); 991 int active = PageActive(page); 992 enum lru_list lru = page_lru(page); 993 994 VM_BUG_ON_PAGE(PageLRU(page), page); 995 996 SetPageLRU(page); 997 add_page_to_lru_list(page, lruvec, lru); 998 update_page_reclaim_stat(lruvec, file, active); 999 trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page)); 1000 } 1001 1002 /* 1003 * Add the passed pages to the LRU, then drop the caller's refcount 1004 * on them. Reinitialises the caller's pagevec. 1005 */ 1006 void __pagevec_lru_add(struct pagevec *pvec) 1007 { 1008 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 1009 } 1010 EXPORT_SYMBOL(__pagevec_lru_add); 1011 1012 /** 1013 * pagevec_lookup_entries - gang pagecache lookup 1014 * @pvec: Where the resulting entries are placed 1015 * @mapping: The address_space to search 1016 * @start: The starting entry index 1017 * @nr_entries: The maximum number of entries 1018 * @indices: The cache indices corresponding to the entries in @pvec 1019 * 1020 * pagevec_lookup_entries() will search for and return a group of up 1021 * to @nr_entries pages and shadow entries in the mapping. All 1022 * entries are placed in @pvec. pagevec_lookup_entries() takes a 1023 * reference against actual pages in @pvec. 1024 * 1025 * The search returns a group of mapping-contiguous entries with 1026 * ascending indexes. There may be holes in the indices due to 1027 * not-present entries. 1028 * 1029 * pagevec_lookup_entries() returns the number of entries which were 1030 * found. 1031 */ 1032 unsigned pagevec_lookup_entries(struct pagevec *pvec, 1033 struct address_space *mapping, 1034 pgoff_t start, unsigned nr_pages, 1035 pgoff_t *indices) 1036 { 1037 pvec->nr = find_get_entries(mapping, start, nr_pages, 1038 pvec->pages, indices); 1039 return pagevec_count(pvec); 1040 } 1041 1042 /** 1043 * pagevec_remove_exceptionals - pagevec exceptionals pruning 1044 * @pvec: The pagevec to prune 1045 * 1046 * pagevec_lookup_entries() fills both pages and exceptional radix 1047 * tree entries into the pagevec. This function prunes all 1048 * exceptionals from @pvec without leaving holes, so that it can be 1049 * passed on to page-only pagevec operations. 1050 */ 1051 void pagevec_remove_exceptionals(struct pagevec *pvec) 1052 { 1053 int i, j; 1054 1055 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 1056 struct page *page = pvec->pages[i]; 1057 if (!radix_tree_exceptional_entry(page)) 1058 pvec->pages[j++] = page; 1059 } 1060 pvec->nr = j; 1061 } 1062 1063 /** 1064 * pagevec_lookup - gang pagecache lookup 1065 * @pvec: Where the resulting pages are placed 1066 * @mapping: The address_space to search 1067 * @start: The starting page index 1068 * @nr_pages: The maximum number of pages 1069 * 1070 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 1071 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 1072 * reference against the pages in @pvec. 1073 * 1074 * The search returns a group of mapping-contiguous pages with ascending 1075 * indexes. There may be holes in the indices due to not-present pages. 1076 * 1077 * pagevec_lookup() returns the number of pages which were found. 1078 */ 1079 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 1080 pgoff_t start, unsigned nr_pages) 1081 { 1082 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 1083 return pagevec_count(pvec); 1084 } 1085 EXPORT_SYMBOL(pagevec_lookup); 1086 1087 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 1088 pgoff_t *index, int tag, unsigned nr_pages) 1089 { 1090 pvec->nr = find_get_pages_tag(mapping, index, tag, 1091 nr_pages, pvec->pages); 1092 return pagevec_count(pvec); 1093 } 1094 EXPORT_SYMBOL(pagevec_lookup_tag); 1095 1096 /* 1097 * Perform any setup for the swap system 1098 */ 1099 void __init swap_setup(void) 1100 { 1101 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 1102 #ifdef CONFIG_SWAP 1103 int i; 1104 1105 if (bdi_init(swapper_spaces[0].backing_dev_info)) 1106 panic("Failed to init swap bdi"); 1107 for (i = 0; i < MAX_SWAPFILES; i++) { 1108 spin_lock_init(&swapper_spaces[i].tree_lock); 1109 INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear); 1110 } 1111 #endif 1112 1113 /* Use a smaller cluster for small-memory machines */ 1114 if (megs < 16) 1115 page_cluster = 2; 1116 else 1117 page_cluster = 3; 1118 /* 1119 * Right now other parts of the system means that we 1120 * _really_ don't want to cluster much more 1121 */ 1122 } 1123