1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/swap_state.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 * 8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 9 */ 10 #include <linux/mm.h> 11 #include <linux/gfp.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/init.h> 16 #include <linux/pagemap.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/pagevec.h> 20 #include <linux/migrate.h> 21 #include <linux/vmalloc.h> 22 #include <linux/swap_slots.h> 23 #include <linux/huge_mm.h> 24 #include <linux/shmem_fs.h> 25 #include "internal.h" 26 27 /* 28 * swapper_space is a fiction, retained to simplify the path through 29 * vmscan's shrink_page_list. 30 */ 31 static const struct address_space_operations swap_aops = { 32 .writepage = swap_writepage, 33 .set_page_dirty = swap_set_page_dirty, 34 #ifdef CONFIG_MIGRATION 35 .migratepage = migrate_page, 36 #endif 37 }; 38 39 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 40 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 41 static bool enable_vma_readahead __read_mostly = true; 42 43 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 44 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 45 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 46 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 47 48 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 49 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 50 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 51 52 #define SWAP_RA_VAL(addr, win, hits) \ 53 (((addr) & PAGE_MASK) | \ 54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 55 ((hits) & SWAP_RA_HITS_MASK)) 56 57 /* Initial readahead hits is 4 to start up with a small window */ 58 #define GET_SWAP_RA_VAL(vma) \ 59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 60 61 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) 62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) 63 64 static struct { 65 unsigned long add_total; 66 unsigned long del_total; 67 unsigned long find_success; 68 unsigned long find_total; 69 } swap_cache_info; 70 71 unsigned long total_swapcache_pages(void) 72 { 73 unsigned int i, j, nr; 74 unsigned long ret = 0; 75 struct address_space *spaces; 76 struct swap_info_struct *si; 77 78 for (i = 0; i < MAX_SWAPFILES; i++) { 79 swp_entry_t entry = swp_entry(i, 1); 80 81 /* Avoid get_swap_device() to warn for bad swap entry */ 82 if (!swp_swap_info(entry)) 83 continue; 84 /* Prevent swapoff to free swapper_spaces */ 85 si = get_swap_device(entry); 86 if (!si) 87 continue; 88 nr = nr_swapper_spaces[i]; 89 spaces = swapper_spaces[i]; 90 for (j = 0; j < nr; j++) 91 ret += spaces[j].nrpages; 92 put_swap_device(si); 93 } 94 return ret; 95 } 96 97 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 98 99 void show_swap_cache_info(void) 100 { 101 printk("%lu pages in swap cache\n", total_swapcache_pages()); 102 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 103 swap_cache_info.add_total, swap_cache_info.del_total, 104 swap_cache_info.find_success, swap_cache_info.find_total); 105 printk("Free swap = %ldkB\n", 106 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 107 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 108 } 109 110 void *get_shadow_from_swap_cache(swp_entry_t entry) 111 { 112 struct address_space *address_space = swap_address_space(entry); 113 pgoff_t idx = swp_offset(entry); 114 struct page *page; 115 116 page = find_get_entry(address_space, idx); 117 if (xa_is_value(page)) 118 return page; 119 if (page) 120 put_page(page); 121 return NULL; 122 } 123 124 /* 125 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 126 * but sets SwapCache flag and private instead of mapping and index. 127 */ 128 int add_to_swap_cache(struct page *page, swp_entry_t entry, 129 gfp_t gfp, void **shadowp) 130 { 131 struct address_space *address_space = swap_address_space(entry); 132 pgoff_t idx = swp_offset(entry); 133 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 134 unsigned long i, nr = thp_nr_pages(page); 135 void *old; 136 137 VM_BUG_ON_PAGE(!PageLocked(page), page); 138 VM_BUG_ON_PAGE(PageSwapCache(page), page); 139 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 140 141 page_ref_add(page, nr); 142 SetPageSwapCache(page); 143 144 do { 145 unsigned long nr_shadows = 0; 146 147 xas_lock_irq(&xas); 148 xas_create_range(&xas); 149 if (xas_error(&xas)) 150 goto unlock; 151 for (i = 0; i < nr; i++) { 152 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 153 old = xas_load(&xas); 154 if (xa_is_value(old)) { 155 nr_shadows++; 156 if (shadowp) 157 *shadowp = old; 158 } 159 set_page_private(page + i, entry.val + i); 160 xas_store(&xas, page); 161 xas_next(&xas); 162 } 163 address_space->nrexceptional -= nr_shadows; 164 address_space->nrpages += nr; 165 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 166 ADD_CACHE_INFO(add_total, nr); 167 unlock: 168 xas_unlock_irq(&xas); 169 } while (xas_nomem(&xas, gfp)); 170 171 if (!xas_error(&xas)) 172 return 0; 173 174 ClearPageSwapCache(page); 175 page_ref_sub(page, nr); 176 return xas_error(&xas); 177 } 178 179 /* 180 * This must be called only on pages that have 181 * been verified to be in the swap cache. 182 */ 183 void __delete_from_swap_cache(struct page *page, 184 swp_entry_t entry, void *shadow) 185 { 186 struct address_space *address_space = swap_address_space(entry); 187 int i, nr = thp_nr_pages(page); 188 pgoff_t idx = swp_offset(entry); 189 XA_STATE(xas, &address_space->i_pages, idx); 190 191 VM_BUG_ON_PAGE(!PageLocked(page), page); 192 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 193 VM_BUG_ON_PAGE(PageWriteback(page), page); 194 195 for (i = 0; i < nr; i++) { 196 void *entry = xas_store(&xas, shadow); 197 VM_BUG_ON_PAGE(entry != page, entry); 198 set_page_private(page + i, 0); 199 xas_next(&xas); 200 } 201 ClearPageSwapCache(page); 202 if (shadow) 203 address_space->nrexceptional += nr; 204 address_space->nrpages -= nr; 205 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 206 ADD_CACHE_INFO(del_total, nr); 207 } 208 209 /** 210 * add_to_swap - allocate swap space for a page 211 * @page: page we want to move to swap 212 * 213 * Allocate swap space for the page and add the page to the 214 * swap cache. Caller needs to hold the page lock. 215 */ 216 int add_to_swap(struct page *page) 217 { 218 swp_entry_t entry; 219 int err; 220 221 VM_BUG_ON_PAGE(!PageLocked(page), page); 222 VM_BUG_ON_PAGE(!PageUptodate(page), page); 223 224 entry = get_swap_page(page); 225 if (!entry.val) 226 return 0; 227 228 /* 229 * XArray node allocations from PF_MEMALLOC contexts could 230 * completely exhaust the page allocator. __GFP_NOMEMALLOC 231 * stops emergency reserves from being allocated. 232 * 233 * TODO: this could cause a theoretical memory reclaim 234 * deadlock in the swap out path. 235 */ 236 /* 237 * Add it to the swap cache. 238 */ 239 err = add_to_swap_cache(page, entry, 240 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 241 if (err) 242 /* 243 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 244 * clear SWAP_HAS_CACHE flag. 245 */ 246 goto fail; 247 /* 248 * Normally the page will be dirtied in unmap because its pte should be 249 * dirty. A special case is MADV_FREE page. The page's pte could have 250 * dirty bit cleared but the page's SwapBacked bit is still set because 251 * clearing the dirty bit and SwapBacked bit has no lock protected. For 252 * such page, unmap will not set dirty bit for it, so page reclaim will 253 * not write the page out. This can cause data corruption when the page 254 * is swap in later. Always setting the dirty bit for the page solves 255 * the problem. 256 */ 257 set_page_dirty(page); 258 259 return 1; 260 261 fail: 262 put_swap_page(page, entry); 263 return 0; 264 } 265 266 /* 267 * This must be called only on pages that have 268 * been verified to be in the swap cache and locked. 269 * It will never put the page into the free list, 270 * the caller has a reference on the page. 271 */ 272 void delete_from_swap_cache(struct page *page) 273 { 274 swp_entry_t entry = { .val = page_private(page) }; 275 struct address_space *address_space = swap_address_space(entry); 276 277 xa_lock_irq(&address_space->i_pages); 278 __delete_from_swap_cache(page, entry, NULL); 279 xa_unlock_irq(&address_space->i_pages); 280 281 put_swap_page(page, entry); 282 page_ref_sub(page, thp_nr_pages(page)); 283 } 284 285 void clear_shadow_from_swap_cache(int type, unsigned long begin, 286 unsigned long end) 287 { 288 unsigned long curr = begin; 289 void *old; 290 291 for (;;) { 292 unsigned long nr_shadows = 0; 293 swp_entry_t entry = swp_entry(type, curr); 294 struct address_space *address_space = swap_address_space(entry); 295 XA_STATE(xas, &address_space->i_pages, curr); 296 297 xa_lock_irq(&address_space->i_pages); 298 xas_for_each(&xas, old, end) { 299 if (!xa_is_value(old)) 300 continue; 301 xas_store(&xas, NULL); 302 nr_shadows++; 303 } 304 address_space->nrexceptional -= nr_shadows; 305 xa_unlock_irq(&address_space->i_pages); 306 307 /* search the next swapcache until we meet end */ 308 curr >>= SWAP_ADDRESS_SPACE_SHIFT; 309 curr++; 310 curr <<= SWAP_ADDRESS_SPACE_SHIFT; 311 if (curr > end) 312 break; 313 } 314 } 315 316 /* 317 * If we are the only user, then try to free up the swap cache. 318 * 319 * Its ok to check for PageSwapCache without the page lock 320 * here because we are going to recheck again inside 321 * try_to_free_swap() _with_ the lock. 322 * - Marcelo 323 */ 324 static inline void free_swap_cache(struct page *page) 325 { 326 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 327 try_to_free_swap(page); 328 unlock_page(page); 329 } 330 } 331 332 /* 333 * Perform a free_page(), also freeing any swap cache associated with 334 * this page if it is the last user of the page. 335 */ 336 void free_page_and_swap_cache(struct page *page) 337 { 338 free_swap_cache(page); 339 if (!is_huge_zero_page(page)) 340 put_page(page); 341 } 342 343 /* 344 * Passed an array of pages, drop them all from swapcache and then release 345 * them. They are removed from the LRU and freed if this is their last use. 346 */ 347 void free_pages_and_swap_cache(struct page **pages, int nr) 348 { 349 struct page **pagep = pages; 350 int i; 351 352 lru_add_drain(); 353 for (i = 0; i < nr; i++) 354 free_swap_cache(pagep[i]); 355 release_pages(pagep, nr); 356 } 357 358 static inline bool swap_use_vma_readahead(void) 359 { 360 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 361 } 362 363 /* 364 * Lookup a swap entry in the swap cache. A found page will be returned 365 * unlocked and with its refcount incremented - we rely on the kernel 366 * lock getting page table operations atomic even if we drop the page 367 * lock before returning. 368 */ 369 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, 370 unsigned long addr) 371 { 372 struct page *page; 373 struct swap_info_struct *si; 374 375 si = get_swap_device(entry); 376 if (!si) 377 return NULL; 378 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 379 put_swap_device(si); 380 381 INC_CACHE_INFO(find_total); 382 if (page) { 383 bool vma_ra = swap_use_vma_readahead(); 384 bool readahead; 385 386 INC_CACHE_INFO(find_success); 387 /* 388 * At the moment, we don't support PG_readahead for anon THP 389 * so let's bail out rather than confusing the readahead stat. 390 */ 391 if (unlikely(PageTransCompound(page))) 392 return page; 393 394 readahead = TestClearPageReadahead(page); 395 if (vma && vma_ra) { 396 unsigned long ra_val; 397 int win, hits; 398 399 ra_val = GET_SWAP_RA_VAL(vma); 400 win = SWAP_RA_WIN(ra_val); 401 hits = SWAP_RA_HITS(ra_val); 402 if (readahead) 403 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 404 atomic_long_set(&vma->swap_readahead_info, 405 SWAP_RA_VAL(addr, win, hits)); 406 } 407 408 if (readahead) { 409 count_vm_event(SWAP_RA_HIT); 410 if (!vma || !vma_ra) 411 atomic_inc(&swapin_readahead_hits); 412 } 413 } 414 415 return page; 416 } 417 418 /** 419 * find_get_incore_page - Find and get a page from the page or swap caches. 420 * @mapping: The address_space to search. 421 * @index: The page cache index. 422 * 423 * This differs from find_get_page() in that it will also look for the 424 * page in the swap cache. 425 * 426 * Return: The found page or %NULL. 427 */ 428 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) 429 { 430 swp_entry_t swp; 431 struct swap_info_struct *si; 432 struct page *page = find_get_entry(mapping, index); 433 434 if (!page) 435 return page; 436 if (!xa_is_value(page)) 437 return find_subpage(page, index); 438 if (!shmem_mapping(mapping)) 439 return NULL; 440 441 swp = radix_to_swp_entry(page); 442 /* Prevent swapoff from happening to us */ 443 si = get_swap_device(swp); 444 if (!si) 445 return NULL; 446 page = find_get_page(swap_address_space(swp), swp_offset(swp)); 447 put_swap_device(si); 448 return page; 449 } 450 451 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 452 struct vm_area_struct *vma, unsigned long addr, 453 bool *new_page_allocated) 454 { 455 struct swap_info_struct *si; 456 struct page *page; 457 void *shadow = NULL; 458 459 *new_page_allocated = false; 460 461 for (;;) { 462 int err; 463 /* 464 * First check the swap cache. Since this is normally 465 * called after lookup_swap_cache() failed, re-calling 466 * that would confuse statistics. 467 */ 468 si = get_swap_device(entry); 469 if (!si) 470 return NULL; 471 page = find_get_page(swap_address_space(entry), 472 swp_offset(entry)); 473 put_swap_device(si); 474 if (page) 475 return page; 476 477 /* 478 * Just skip read ahead for unused swap slot. 479 * During swap_off when swap_slot_cache is disabled, 480 * we have to handle the race between putting 481 * swap entry in swap cache and marking swap slot 482 * as SWAP_HAS_CACHE. That's done in later part of code or 483 * else swap_off will be aborted if we return NULL. 484 */ 485 if (!__swp_swapcount(entry) && swap_slot_cache_enabled) 486 return NULL; 487 488 /* 489 * Get a new page to read into from swap. Allocate it now, 490 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 491 * cause any racers to loop around until we add it to cache. 492 */ 493 page = alloc_page_vma(gfp_mask, vma, addr); 494 if (!page) 495 return NULL; 496 497 /* 498 * Swap entry may have been freed since our caller observed it. 499 */ 500 err = swapcache_prepare(entry); 501 if (!err) 502 break; 503 504 put_page(page); 505 if (err != -EEXIST) 506 return NULL; 507 508 /* 509 * We might race against __delete_from_swap_cache(), and 510 * stumble across a swap_map entry whose SWAP_HAS_CACHE 511 * has not yet been cleared. Or race against another 512 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 513 * in swap_map, but not yet added its page to swap cache. 514 */ 515 cond_resched(); 516 } 517 518 /* 519 * The swap entry is ours to swap in. Prepare the new page. 520 */ 521 522 __SetPageLocked(page); 523 __SetPageSwapBacked(page); 524 525 /* May fail (-ENOMEM) if XArray node allocation failed. */ 526 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { 527 put_swap_page(page, entry); 528 goto fail_unlock; 529 } 530 531 if (mem_cgroup_charge(page, NULL, gfp_mask)) { 532 delete_from_swap_cache(page); 533 goto fail_unlock; 534 } 535 536 if (shadow) 537 workingset_refault(page, shadow); 538 539 /* Caller will initiate read into locked page */ 540 SetPageWorkingset(page); 541 lru_cache_add(page); 542 *new_page_allocated = true; 543 return page; 544 545 fail_unlock: 546 unlock_page(page); 547 put_page(page); 548 return NULL; 549 } 550 551 /* 552 * Locate a page of swap in physical memory, reserving swap cache space 553 * and reading the disk if it is not already cached. 554 * A failure return means that either the page allocation failed or that 555 * the swap entry is no longer in use. 556 */ 557 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 558 struct vm_area_struct *vma, unsigned long addr, bool do_poll) 559 { 560 bool page_was_allocated; 561 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 562 vma, addr, &page_was_allocated); 563 564 if (page_was_allocated) 565 swap_readpage(retpage, do_poll); 566 567 return retpage; 568 } 569 570 static unsigned int __swapin_nr_pages(unsigned long prev_offset, 571 unsigned long offset, 572 int hits, 573 int max_pages, 574 int prev_win) 575 { 576 unsigned int pages, last_ra; 577 578 /* 579 * This heuristic has been found to work well on both sequential and 580 * random loads, swapping to hard disk or to SSD: please don't ask 581 * what the "+ 2" means, it just happens to work well, that's all. 582 */ 583 pages = hits + 2; 584 if (pages == 2) { 585 /* 586 * We can have no readahead hits to judge by: but must not get 587 * stuck here forever, so check for an adjacent offset instead 588 * (and don't even bother to check whether swap type is same). 589 */ 590 if (offset != prev_offset + 1 && offset != prev_offset - 1) 591 pages = 1; 592 } else { 593 unsigned int roundup = 4; 594 while (roundup < pages) 595 roundup <<= 1; 596 pages = roundup; 597 } 598 599 if (pages > max_pages) 600 pages = max_pages; 601 602 /* Don't shrink readahead too fast */ 603 last_ra = prev_win / 2; 604 if (pages < last_ra) 605 pages = last_ra; 606 607 return pages; 608 } 609 610 static unsigned long swapin_nr_pages(unsigned long offset) 611 { 612 static unsigned long prev_offset; 613 unsigned int hits, pages, max_pages; 614 static atomic_t last_readahead_pages; 615 616 max_pages = 1 << READ_ONCE(page_cluster); 617 if (max_pages <= 1) 618 return 1; 619 620 hits = atomic_xchg(&swapin_readahead_hits, 0); 621 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 622 max_pages, 623 atomic_read(&last_readahead_pages)); 624 if (!hits) 625 WRITE_ONCE(prev_offset, offset); 626 atomic_set(&last_readahead_pages, pages); 627 628 return pages; 629 } 630 631 /** 632 * swap_cluster_readahead - swap in pages in hope we need them soon 633 * @entry: swap entry of this memory 634 * @gfp_mask: memory allocation flags 635 * @vmf: fault information 636 * 637 * Returns the struct page for entry and addr, after queueing swapin. 638 * 639 * Primitive swap readahead code. We simply read an aligned block of 640 * (1 << page_cluster) entries in the swap area. This method is chosen 641 * because it doesn't cost us any seek time. We also make sure to queue 642 * the 'original' request together with the readahead ones... 643 * 644 * This has been extended to use the NUMA policies from the mm triggering 645 * the readahead. 646 * 647 * Caller must hold read mmap_lock if vmf->vma is not NULL. 648 */ 649 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 650 struct vm_fault *vmf) 651 { 652 struct page *page; 653 unsigned long entry_offset = swp_offset(entry); 654 unsigned long offset = entry_offset; 655 unsigned long start_offset, end_offset; 656 unsigned long mask; 657 struct swap_info_struct *si = swp_swap_info(entry); 658 struct blk_plug plug; 659 bool do_poll = true, page_allocated; 660 struct vm_area_struct *vma = vmf->vma; 661 unsigned long addr = vmf->address; 662 663 mask = swapin_nr_pages(offset) - 1; 664 if (!mask) 665 goto skip; 666 667 /* Test swap type to make sure the dereference is safe */ 668 if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { 669 struct inode *inode = si->swap_file->f_mapping->host; 670 if (inode_read_congested(inode)) 671 goto skip; 672 } 673 674 do_poll = false; 675 /* Read a page_cluster sized and aligned cluster around offset. */ 676 start_offset = offset & ~mask; 677 end_offset = offset | mask; 678 if (!start_offset) /* First page is swap header. */ 679 start_offset++; 680 if (end_offset >= si->max) 681 end_offset = si->max - 1; 682 683 blk_start_plug(&plug); 684 for (offset = start_offset; offset <= end_offset ; offset++) { 685 /* Ok, do the async read-ahead now */ 686 page = __read_swap_cache_async( 687 swp_entry(swp_type(entry), offset), 688 gfp_mask, vma, addr, &page_allocated); 689 if (!page) 690 continue; 691 if (page_allocated) { 692 swap_readpage(page, false); 693 if (offset != entry_offset) { 694 SetPageReadahead(page); 695 count_vm_event(SWAP_RA); 696 } 697 } 698 put_page(page); 699 } 700 blk_finish_plug(&plug); 701 702 lru_add_drain(); /* Push any new pages onto the LRU now */ 703 skip: 704 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); 705 } 706 707 int init_swap_address_space(unsigned int type, unsigned long nr_pages) 708 { 709 struct address_space *spaces, *space; 710 unsigned int i, nr; 711 712 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 713 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 714 if (!spaces) 715 return -ENOMEM; 716 for (i = 0; i < nr; i++) { 717 space = spaces + i; 718 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 719 atomic_set(&space->i_mmap_writable, 0); 720 space->a_ops = &swap_aops; 721 /* swap cache doesn't use writeback related tags */ 722 mapping_set_no_writeback_tags(space); 723 } 724 nr_swapper_spaces[type] = nr; 725 swapper_spaces[type] = spaces; 726 727 return 0; 728 } 729 730 void exit_swap_address_space(unsigned int type) 731 { 732 kvfree(swapper_spaces[type]); 733 nr_swapper_spaces[type] = 0; 734 swapper_spaces[type] = NULL; 735 } 736 737 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, 738 unsigned long faddr, 739 unsigned long lpfn, 740 unsigned long rpfn, 741 unsigned long *start, 742 unsigned long *end) 743 { 744 *start = max3(lpfn, PFN_DOWN(vma->vm_start), 745 PFN_DOWN(faddr & PMD_MASK)); 746 *end = min3(rpfn, PFN_DOWN(vma->vm_end), 747 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); 748 } 749 750 static void swap_ra_info(struct vm_fault *vmf, 751 struct vma_swap_readahead *ra_info) 752 { 753 struct vm_area_struct *vma = vmf->vma; 754 unsigned long ra_val; 755 swp_entry_t entry; 756 unsigned long faddr, pfn, fpfn; 757 unsigned long start, end; 758 pte_t *pte, *orig_pte; 759 unsigned int max_win, hits, prev_win, win, left; 760 #ifndef CONFIG_64BIT 761 pte_t *tpte; 762 #endif 763 764 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), 765 SWAP_RA_ORDER_CEILING); 766 if (max_win == 1) { 767 ra_info->win = 1; 768 return; 769 } 770 771 faddr = vmf->address; 772 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); 773 entry = pte_to_swp_entry(*pte); 774 if ((unlikely(non_swap_entry(entry)))) { 775 pte_unmap(orig_pte); 776 return; 777 } 778 779 fpfn = PFN_DOWN(faddr); 780 ra_val = GET_SWAP_RA_VAL(vma); 781 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); 782 prev_win = SWAP_RA_WIN(ra_val); 783 hits = SWAP_RA_HITS(ra_val); 784 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, 785 max_win, prev_win); 786 atomic_long_set(&vma->swap_readahead_info, 787 SWAP_RA_VAL(faddr, win, 0)); 788 789 if (win == 1) { 790 pte_unmap(orig_pte); 791 return; 792 } 793 794 /* Copy the PTEs because the page table may be unmapped */ 795 if (fpfn == pfn + 1) 796 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); 797 else if (pfn == fpfn + 1) 798 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, 799 &start, &end); 800 else { 801 left = (win - 1) / 2; 802 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, 803 &start, &end); 804 } 805 ra_info->nr_pte = end - start; 806 ra_info->offset = fpfn - start; 807 pte -= ra_info->offset; 808 #ifdef CONFIG_64BIT 809 ra_info->ptes = pte; 810 #else 811 tpte = ra_info->ptes; 812 for (pfn = start; pfn != end; pfn++) 813 *tpte++ = *pte++; 814 #endif 815 pte_unmap(orig_pte); 816 } 817 818 /** 819 * swap_vma_readahead - swap in pages in hope we need them soon 820 * @fentry: swap entry of this memory 821 * @gfp_mask: memory allocation flags 822 * @vmf: fault information 823 * 824 * Returns the struct page for entry and addr, after queueing swapin. 825 * 826 * Primitive swap readahead code. We simply read in a few pages whoes 827 * virtual addresses are around the fault address in the same vma. 828 * 829 * Caller must hold read mmap_lock if vmf->vma is not NULL. 830 * 831 */ 832 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 833 struct vm_fault *vmf) 834 { 835 struct blk_plug plug; 836 struct vm_area_struct *vma = vmf->vma; 837 struct page *page; 838 pte_t *pte, pentry; 839 swp_entry_t entry; 840 unsigned int i; 841 bool page_allocated; 842 struct vma_swap_readahead ra_info = {0,}; 843 844 swap_ra_info(vmf, &ra_info); 845 if (ra_info.win == 1) 846 goto skip; 847 848 blk_start_plug(&plug); 849 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; 850 i++, pte++) { 851 pentry = *pte; 852 if (pte_none(pentry)) 853 continue; 854 if (pte_present(pentry)) 855 continue; 856 entry = pte_to_swp_entry(pentry); 857 if (unlikely(non_swap_entry(entry))) 858 continue; 859 page = __read_swap_cache_async(entry, gfp_mask, vma, 860 vmf->address, &page_allocated); 861 if (!page) 862 continue; 863 if (page_allocated) { 864 swap_readpage(page, false); 865 if (i != ra_info.offset) { 866 SetPageReadahead(page); 867 count_vm_event(SWAP_RA); 868 } 869 } 870 put_page(page); 871 } 872 blk_finish_plug(&plug); 873 lru_add_drain(); 874 skip: 875 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, 876 ra_info.win == 1); 877 } 878 879 /** 880 * swapin_readahead - swap in pages in hope we need them soon 881 * @entry: swap entry of this memory 882 * @gfp_mask: memory allocation flags 883 * @vmf: fault information 884 * 885 * Returns the struct page for entry and addr, after queueing swapin. 886 * 887 * It's a main entry function for swap readahead. By the configuration, 888 * it will read ahead blocks by cluster-based(ie, physical disk based) 889 * or vma-based(ie, virtual address based on faulty address) readahead. 890 */ 891 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 892 struct vm_fault *vmf) 893 { 894 return swap_use_vma_readahead() ? 895 swap_vma_readahead(entry, gfp_mask, vmf) : 896 swap_cluster_readahead(entry, gfp_mask, vmf); 897 } 898 899 #ifdef CONFIG_SYSFS 900 static ssize_t vma_ra_enabled_show(struct kobject *kobj, 901 struct kobj_attribute *attr, char *buf) 902 { 903 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false"); 904 } 905 static ssize_t vma_ra_enabled_store(struct kobject *kobj, 906 struct kobj_attribute *attr, 907 const char *buf, size_t count) 908 { 909 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 910 enable_vma_readahead = true; 911 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 912 enable_vma_readahead = false; 913 else 914 return -EINVAL; 915 916 return count; 917 } 918 static struct kobj_attribute vma_ra_enabled_attr = 919 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 920 vma_ra_enabled_store); 921 922 static struct attribute *swap_attrs[] = { 923 &vma_ra_enabled_attr.attr, 924 NULL, 925 }; 926 927 static struct attribute_group swap_attr_group = { 928 .attrs = swap_attrs, 929 }; 930 931 static int __init swap_init_sysfs(void) 932 { 933 int err; 934 struct kobject *swap_kobj; 935 936 swap_kobj = kobject_create_and_add("swap", mm_kobj); 937 if (!swap_kobj) { 938 pr_err("failed to create swap kobject\n"); 939 return -ENOMEM; 940 } 941 err = sysfs_create_group(swap_kobj, &swap_attr_group); 942 if (err) { 943 pr_err("failed to register swap group\n"); 944 goto delete_obj; 945 } 946 return 0; 947 948 delete_obj: 949 kobject_put(swap_kobj); 950 return err; 951 } 952 subsys_initcall(swap_init_sysfs); 953 #endif 954