1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/swap_state.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 * 8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 9 */ 10 #include <linux/mm.h> 11 #include <linux/gfp.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/init.h> 16 #include <linux/pagemap.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/pagevec.h> 20 #include <linux/migrate.h> 21 #include <linux/vmalloc.h> 22 #include <linux/swap_slots.h> 23 #include <linux/huge_mm.h> 24 #include <linux/shmem_fs.h> 25 #include "internal.h" 26 #include "swap.h" 27 28 /* 29 * swapper_space is a fiction, retained to simplify the path through 30 * vmscan's shrink_page_list. 31 */ 32 static const struct address_space_operations swap_aops = { 33 .writepage = swap_writepage, 34 .dirty_folio = noop_dirty_folio, 35 #ifdef CONFIG_MIGRATION 36 .migratepage = migrate_page, 37 #endif 38 }; 39 40 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 41 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 42 static bool enable_vma_readahead __read_mostly = true; 43 44 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 45 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 46 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 47 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 48 49 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 50 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 51 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 52 53 #define SWAP_RA_VAL(addr, win, hits) \ 54 (((addr) & PAGE_MASK) | \ 55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 56 ((hits) & SWAP_RA_HITS_MASK)) 57 58 /* Initial readahead hits is 4 to start up with a small window */ 59 #define GET_SWAP_RA_VAL(vma) \ 60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 61 62 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) 63 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) 64 65 static struct { 66 unsigned long add_total; 67 unsigned long del_total; 68 unsigned long find_success; 69 unsigned long find_total; 70 } swap_cache_info; 71 72 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 73 74 void show_swap_cache_info(void) 75 { 76 printk("%lu pages in swap cache\n", total_swapcache_pages()); 77 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 78 swap_cache_info.add_total, swap_cache_info.del_total, 79 swap_cache_info.find_success, swap_cache_info.find_total); 80 printk("Free swap = %ldkB\n", 81 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 82 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 83 } 84 85 void *get_shadow_from_swap_cache(swp_entry_t entry) 86 { 87 struct address_space *address_space = swap_address_space(entry); 88 pgoff_t idx = swp_offset(entry); 89 struct page *page; 90 91 page = xa_load(&address_space->i_pages, idx); 92 if (xa_is_value(page)) 93 return page; 94 return NULL; 95 } 96 97 /* 98 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 99 * but sets SwapCache flag and private instead of mapping and index. 100 */ 101 int add_to_swap_cache(struct page *page, swp_entry_t entry, 102 gfp_t gfp, void **shadowp) 103 { 104 struct address_space *address_space = swap_address_space(entry); 105 pgoff_t idx = swp_offset(entry); 106 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 107 unsigned long i, nr = thp_nr_pages(page); 108 void *old; 109 110 VM_BUG_ON_PAGE(!PageLocked(page), page); 111 VM_BUG_ON_PAGE(PageSwapCache(page), page); 112 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 113 114 page_ref_add(page, nr); 115 SetPageSwapCache(page); 116 117 do { 118 xas_lock_irq(&xas); 119 xas_create_range(&xas); 120 if (xas_error(&xas)) 121 goto unlock; 122 for (i = 0; i < nr; i++) { 123 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 124 old = xas_load(&xas); 125 if (xa_is_value(old)) { 126 if (shadowp) 127 *shadowp = old; 128 } 129 set_page_private(page + i, entry.val + i); 130 xas_store(&xas, page); 131 xas_next(&xas); 132 } 133 address_space->nrpages += nr; 134 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 135 __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); 136 ADD_CACHE_INFO(add_total, nr); 137 unlock: 138 xas_unlock_irq(&xas); 139 } while (xas_nomem(&xas, gfp)); 140 141 if (!xas_error(&xas)) 142 return 0; 143 144 ClearPageSwapCache(page); 145 page_ref_sub(page, nr); 146 return xas_error(&xas); 147 } 148 149 /* 150 * This must be called only on pages that have 151 * been verified to be in the swap cache. 152 */ 153 void __delete_from_swap_cache(struct page *page, 154 swp_entry_t entry, void *shadow) 155 { 156 struct address_space *address_space = swap_address_space(entry); 157 int i, nr = thp_nr_pages(page); 158 pgoff_t idx = swp_offset(entry); 159 XA_STATE(xas, &address_space->i_pages, idx); 160 161 VM_BUG_ON_PAGE(!PageLocked(page), page); 162 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 163 VM_BUG_ON_PAGE(PageWriteback(page), page); 164 165 for (i = 0; i < nr; i++) { 166 void *entry = xas_store(&xas, shadow); 167 VM_BUG_ON_PAGE(entry != page, entry); 168 set_page_private(page + i, 0); 169 xas_next(&xas); 170 } 171 ClearPageSwapCache(page); 172 address_space->nrpages -= nr; 173 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 174 __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); 175 ADD_CACHE_INFO(del_total, nr); 176 } 177 178 /** 179 * add_to_swap - allocate swap space for a page 180 * @page: page we want to move to swap 181 * 182 * Allocate swap space for the page and add the page to the 183 * swap cache. Caller needs to hold the page lock. 184 */ 185 int add_to_swap(struct page *page) 186 { 187 swp_entry_t entry; 188 int err; 189 190 VM_BUG_ON_PAGE(!PageLocked(page), page); 191 VM_BUG_ON_PAGE(!PageUptodate(page), page); 192 193 entry = get_swap_page(page); 194 if (!entry.val) 195 return 0; 196 197 /* 198 * XArray node allocations from PF_MEMALLOC contexts could 199 * completely exhaust the page allocator. __GFP_NOMEMALLOC 200 * stops emergency reserves from being allocated. 201 * 202 * TODO: this could cause a theoretical memory reclaim 203 * deadlock in the swap out path. 204 */ 205 /* 206 * Add it to the swap cache. 207 */ 208 err = add_to_swap_cache(page, entry, 209 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 210 if (err) 211 /* 212 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 213 * clear SWAP_HAS_CACHE flag. 214 */ 215 goto fail; 216 /* 217 * Normally the page will be dirtied in unmap because its pte should be 218 * dirty. A special case is MADV_FREE page. The page's pte could have 219 * dirty bit cleared but the page's SwapBacked bit is still set because 220 * clearing the dirty bit and SwapBacked bit has no lock protected. For 221 * such page, unmap will not set dirty bit for it, so page reclaim will 222 * not write the page out. This can cause data corruption when the page 223 * is swap in later. Always setting the dirty bit for the page solves 224 * the problem. 225 */ 226 set_page_dirty(page); 227 228 return 1; 229 230 fail: 231 put_swap_page(page, entry); 232 return 0; 233 } 234 235 /* 236 * This must be called only on pages that have 237 * been verified to be in the swap cache and locked. 238 * It will never put the page into the free list, 239 * the caller has a reference on the page. 240 */ 241 void delete_from_swap_cache(struct page *page) 242 { 243 swp_entry_t entry = { .val = page_private(page) }; 244 struct address_space *address_space = swap_address_space(entry); 245 246 xa_lock_irq(&address_space->i_pages); 247 __delete_from_swap_cache(page, entry, NULL); 248 xa_unlock_irq(&address_space->i_pages); 249 250 put_swap_page(page, entry); 251 page_ref_sub(page, thp_nr_pages(page)); 252 } 253 254 void clear_shadow_from_swap_cache(int type, unsigned long begin, 255 unsigned long end) 256 { 257 unsigned long curr = begin; 258 void *old; 259 260 for (;;) { 261 swp_entry_t entry = swp_entry(type, curr); 262 struct address_space *address_space = swap_address_space(entry); 263 XA_STATE(xas, &address_space->i_pages, curr); 264 265 xa_lock_irq(&address_space->i_pages); 266 xas_for_each(&xas, old, end) { 267 if (!xa_is_value(old)) 268 continue; 269 xas_store(&xas, NULL); 270 } 271 xa_unlock_irq(&address_space->i_pages); 272 273 /* search the next swapcache until we meet end */ 274 curr >>= SWAP_ADDRESS_SPACE_SHIFT; 275 curr++; 276 curr <<= SWAP_ADDRESS_SPACE_SHIFT; 277 if (curr > end) 278 break; 279 } 280 } 281 282 /* 283 * If we are the only user, then try to free up the swap cache. 284 * 285 * Its ok to check for PageSwapCache without the page lock 286 * here because we are going to recheck again inside 287 * try_to_free_swap() _with_ the lock. 288 * - Marcelo 289 */ 290 void free_swap_cache(struct page *page) 291 { 292 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 293 try_to_free_swap(page); 294 unlock_page(page); 295 } 296 } 297 298 /* 299 * Perform a free_page(), also freeing any swap cache associated with 300 * this page if it is the last user of the page. 301 */ 302 void free_page_and_swap_cache(struct page *page) 303 { 304 free_swap_cache(page); 305 if (!is_huge_zero_page(page)) 306 put_page(page); 307 } 308 309 /* 310 * Passed an array of pages, drop them all from swapcache and then release 311 * them. They are removed from the LRU and freed if this is their last use. 312 */ 313 void free_pages_and_swap_cache(struct page **pages, int nr) 314 { 315 struct page **pagep = pages; 316 int i; 317 318 lru_add_drain(); 319 for (i = 0; i < nr; i++) 320 free_swap_cache(pagep[i]); 321 release_pages(pagep, nr); 322 } 323 324 static inline bool swap_use_vma_readahead(void) 325 { 326 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 327 } 328 329 /* 330 * Lookup a swap entry in the swap cache. A found page will be returned 331 * unlocked and with its refcount incremented - we rely on the kernel 332 * lock getting page table operations atomic even if we drop the page 333 * lock before returning. 334 */ 335 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, 336 unsigned long addr) 337 { 338 struct page *page; 339 struct swap_info_struct *si; 340 341 si = get_swap_device(entry); 342 if (!si) 343 return NULL; 344 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 345 put_swap_device(si); 346 347 INC_CACHE_INFO(find_total); 348 if (page) { 349 bool vma_ra = swap_use_vma_readahead(); 350 bool readahead; 351 352 INC_CACHE_INFO(find_success); 353 /* 354 * At the moment, we don't support PG_readahead for anon THP 355 * so let's bail out rather than confusing the readahead stat. 356 */ 357 if (unlikely(PageTransCompound(page))) 358 return page; 359 360 readahead = TestClearPageReadahead(page); 361 if (vma && vma_ra) { 362 unsigned long ra_val; 363 int win, hits; 364 365 ra_val = GET_SWAP_RA_VAL(vma); 366 win = SWAP_RA_WIN(ra_val); 367 hits = SWAP_RA_HITS(ra_val); 368 if (readahead) 369 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 370 atomic_long_set(&vma->swap_readahead_info, 371 SWAP_RA_VAL(addr, win, hits)); 372 } 373 374 if (readahead) { 375 count_vm_event(SWAP_RA_HIT); 376 if (!vma || !vma_ra) 377 atomic_inc(&swapin_readahead_hits); 378 } 379 } 380 381 return page; 382 } 383 384 /** 385 * find_get_incore_page - Find and get a page from the page or swap caches. 386 * @mapping: The address_space to search. 387 * @index: The page cache index. 388 * 389 * This differs from find_get_page() in that it will also look for the 390 * page in the swap cache. 391 * 392 * Return: The found page or %NULL. 393 */ 394 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) 395 { 396 swp_entry_t swp; 397 struct swap_info_struct *si; 398 struct page *page = pagecache_get_page(mapping, index, 399 FGP_ENTRY | FGP_HEAD, 0); 400 401 if (!page) 402 return page; 403 if (!xa_is_value(page)) 404 return find_subpage(page, index); 405 if (!shmem_mapping(mapping)) 406 return NULL; 407 408 swp = radix_to_swp_entry(page); 409 /* Prevent swapoff from happening to us */ 410 si = get_swap_device(swp); 411 if (!si) 412 return NULL; 413 page = find_get_page(swap_address_space(swp), swp_offset(swp)); 414 put_swap_device(si); 415 return page; 416 } 417 418 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 419 struct vm_area_struct *vma, unsigned long addr, 420 bool *new_page_allocated) 421 { 422 struct swap_info_struct *si; 423 struct page *page; 424 void *shadow = NULL; 425 426 *new_page_allocated = false; 427 428 for (;;) { 429 int err; 430 /* 431 * First check the swap cache. Since this is normally 432 * called after lookup_swap_cache() failed, re-calling 433 * that would confuse statistics. 434 */ 435 si = get_swap_device(entry); 436 if (!si) 437 return NULL; 438 page = find_get_page(swap_address_space(entry), 439 swp_offset(entry)); 440 put_swap_device(si); 441 if (page) 442 return page; 443 444 /* 445 * Just skip read ahead for unused swap slot. 446 * During swap_off when swap_slot_cache is disabled, 447 * we have to handle the race between putting 448 * swap entry in swap cache and marking swap slot 449 * as SWAP_HAS_CACHE. That's done in later part of code or 450 * else swap_off will be aborted if we return NULL. 451 */ 452 if (!__swp_swapcount(entry) && swap_slot_cache_enabled) 453 return NULL; 454 455 /* 456 * Get a new page to read into from swap. Allocate it now, 457 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 458 * cause any racers to loop around until we add it to cache. 459 */ 460 page = alloc_page_vma(gfp_mask, vma, addr); 461 if (!page) 462 return NULL; 463 464 /* 465 * Swap entry may have been freed since our caller observed it. 466 */ 467 err = swapcache_prepare(entry); 468 if (!err) 469 break; 470 471 put_page(page); 472 if (err != -EEXIST) 473 return NULL; 474 475 /* 476 * We might race against __delete_from_swap_cache(), and 477 * stumble across a swap_map entry whose SWAP_HAS_CACHE 478 * has not yet been cleared. Or race against another 479 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 480 * in swap_map, but not yet added its page to swap cache. 481 */ 482 schedule_timeout_uninterruptible(1); 483 } 484 485 /* 486 * The swap entry is ours to swap in. Prepare the new page. 487 */ 488 489 __SetPageLocked(page); 490 __SetPageSwapBacked(page); 491 492 if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) 493 goto fail_unlock; 494 495 /* May fail (-ENOMEM) if XArray node allocation failed. */ 496 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 497 goto fail_unlock; 498 499 mem_cgroup_swapin_uncharge_swap(entry); 500 501 if (shadow) 502 workingset_refault(page_folio(page), shadow); 503 504 /* Caller will initiate read into locked page */ 505 lru_cache_add(page); 506 *new_page_allocated = true; 507 return page; 508 509 fail_unlock: 510 put_swap_page(page, entry); 511 unlock_page(page); 512 put_page(page); 513 return NULL; 514 } 515 516 /* 517 * Locate a page of swap in physical memory, reserving swap cache space 518 * and reading the disk if it is not already cached. 519 * A failure return means that either the page allocation failed or that 520 * the swap entry is no longer in use. 521 */ 522 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 523 struct vm_area_struct *vma, 524 unsigned long addr, bool do_poll, 525 struct swap_iocb **plug) 526 { 527 bool page_was_allocated; 528 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 529 vma, addr, &page_was_allocated); 530 531 if (page_was_allocated) 532 swap_readpage(retpage, do_poll, plug); 533 534 return retpage; 535 } 536 537 static unsigned int __swapin_nr_pages(unsigned long prev_offset, 538 unsigned long offset, 539 int hits, 540 int max_pages, 541 int prev_win) 542 { 543 unsigned int pages, last_ra; 544 545 /* 546 * This heuristic has been found to work well on both sequential and 547 * random loads, swapping to hard disk or to SSD: please don't ask 548 * what the "+ 2" means, it just happens to work well, that's all. 549 */ 550 pages = hits + 2; 551 if (pages == 2) { 552 /* 553 * We can have no readahead hits to judge by: but must not get 554 * stuck here forever, so check for an adjacent offset instead 555 * (and don't even bother to check whether swap type is same). 556 */ 557 if (offset != prev_offset + 1 && offset != prev_offset - 1) 558 pages = 1; 559 } else { 560 unsigned int roundup = 4; 561 while (roundup < pages) 562 roundup <<= 1; 563 pages = roundup; 564 } 565 566 if (pages > max_pages) 567 pages = max_pages; 568 569 /* Don't shrink readahead too fast */ 570 last_ra = prev_win / 2; 571 if (pages < last_ra) 572 pages = last_ra; 573 574 return pages; 575 } 576 577 static unsigned long swapin_nr_pages(unsigned long offset) 578 { 579 static unsigned long prev_offset; 580 unsigned int hits, pages, max_pages; 581 static atomic_t last_readahead_pages; 582 583 max_pages = 1 << READ_ONCE(page_cluster); 584 if (max_pages <= 1) 585 return 1; 586 587 hits = atomic_xchg(&swapin_readahead_hits, 0); 588 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 589 max_pages, 590 atomic_read(&last_readahead_pages)); 591 if (!hits) 592 WRITE_ONCE(prev_offset, offset); 593 atomic_set(&last_readahead_pages, pages); 594 595 return pages; 596 } 597 598 /** 599 * swap_cluster_readahead - swap in pages in hope we need them soon 600 * @entry: swap entry of this memory 601 * @gfp_mask: memory allocation flags 602 * @vmf: fault information 603 * 604 * Returns the struct page for entry and addr, after queueing swapin. 605 * 606 * Primitive swap readahead code. We simply read an aligned block of 607 * (1 << page_cluster) entries in the swap area. This method is chosen 608 * because it doesn't cost us any seek time. We also make sure to queue 609 * the 'original' request together with the readahead ones... 610 * 611 * This has been extended to use the NUMA policies from the mm triggering 612 * the readahead. 613 * 614 * Caller must hold read mmap_lock if vmf->vma is not NULL. 615 */ 616 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 617 struct vm_fault *vmf) 618 { 619 struct page *page; 620 unsigned long entry_offset = swp_offset(entry); 621 unsigned long offset = entry_offset; 622 unsigned long start_offset, end_offset; 623 unsigned long mask; 624 struct swap_info_struct *si = swp_swap_info(entry); 625 struct blk_plug plug; 626 struct swap_iocb *splug = NULL; 627 bool do_poll = true, page_allocated; 628 struct vm_area_struct *vma = vmf->vma; 629 unsigned long addr = vmf->address; 630 631 mask = swapin_nr_pages(offset) - 1; 632 if (!mask) 633 goto skip; 634 635 do_poll = false; 636 /* Read a page_cluster sized and aligned cluster around offset. */ 637 start_offset = offset & ~mask; 638 end_offset = offset | mask; 639 if (!start_offset) /* First page is swap header. */ 640 start_offset++; 641 if (end_offset >= si->max) 642 end_offset = si->max - 1; 643 644 blk_start_plug(&plug); 645 for (offset = start_offset; offset <= end_offset ; offset++) { 646 /* Ok, do the async read-ahead now */ 647 page = __read_swap_cache_async( 648 swp_entry(swp_type(entry), offset), 649 gfp_mask, vma, addr, &page_allocated); 650 if (!page) 651 continue; 652 if (page_allocated) { 653 swap_readpage(page, false, &splug); 654 if (offset != entry_offset) { 655 SetPageReadahead(page); 656 count_vm_event(SWAP_RA); 657 } 658 } 659 put_page(page); 660 } 661 blk_finish_plug(&plug); 662 swap_read_unplug(splug); 663 664 lru_add_drain(); /* Push any new pages onto the LRU now */ 665 skip: 666 /* The page was likely read above, so no need for plugging here */ 667 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL); 668 } 669 670 int init_swap_address_space(unsigned int type, unsigned long nr_pages) 671 { 672 struct address_space *spaces, *space; 673 unsigned int i, nr; 674 675 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 676 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 677 if (!spaces) 678 return -ENOMEM; 679 for (i = 0; i < nr; i++) { 680 space = spaces + i; 681 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 682 atomic_set(&space->i_mmap_writable, 0); 683 space->a_ops = &swap_aops; 684 /* swap cache doesn't use writeback related tags */ 685 mapping_set_no_writeback_tags(space); 686 } 687 nr_swapper_spaces[type] = nr; 688 swapper_spaces[type] = spaces; 689 690 return 0; 691 } 692 693 void exit_swap_address_space(unsigned int type) 694 { 695 int i; 696 struct address_space *spaces = swapper_spaces[type]; 697 698 for (i = 0; i < nr_swapper_spaces[type]; i++) 699 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); 700 kvfree(spaces); 701 nr_swapper_spaces[type] = 0; 702 swapper_spaces[type] = NULL; 703 } 704 705 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, 706 unsigned long faddr, 707 unsigned long lpfn, 708 unsigned long rpfn, 709 unsigned long *start, 710 unsigned long *end) 711 { 712 *start = max3(lpfn, PFN_DOWN(vma->vm_start), 713 PFN_DOWN(faddr & PMD_MASK)); 714 *end = min3(rpfn, PFN_DOWN(vma->vm_end), 715 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); 716 } 717 718 static void swap_ra_info(struct vm_fault *vmf, 719 struct vma_swap_readahead *ra_info) 720 { 721 struct vm_area_struct *vma = vmf->vma; 722 unsigned long ra_val; 723 unsigned long faddr, pfn, fpfn; 724 unsigned long start, end; 725 pte_t *pte, *orig_pte; 726 unsigned int max_win, hits, prev_win, win, left; 727 #ifndef CONFIG_64BIT 728 pte_t *tpte; 729 #endif 730 731 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), 732 SWAP_RA_ORDER_CEILING); 733 if (max_win == 1) { 734 ra_info->win = 1; 735 return; 736 } 737 738 faddr = vmf->address; 739 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); 740 741 fpfn = PFN_DOWN(faddr); 742 ra_val = GET_SWAP_RA_VAL(vma); 743 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); 744 prev_win = SWAP_RA_WIN(ra_val); 745 hits = SWAP_RA_HITS(ra_val); 746 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, 747 max_win, prev_win); 748 atomic_long_set(&vma->swap_readahead_info, 749 SWAP_RA_VAL(faddr, win, 0)); 750 751 if (win == 1) { 752 pte_unmap(orig_pte); 753 return; 754 } 755 756 /* Copy the PTEs because the page table may be unmapped */ 757 if (fpfn == pfn + 1) 758 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); 759 else if (pfn == fpfn + 1) 760 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, 761 &start, &end); 762 else { 763 left = (win - 1) / 2; 764 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, 765 &start, &end); 766 } 767 ra_info->nr_pte = end - start; 768 ra_info->offset = fpfn - start; 769 pte -= ra_info->offset; 770 #ifdef CONFIG_64BIT 771 ra_info->ptes = pte; 772 #else 773 tpte = ra_info->ptes; 774 for (pfn = start; pfn != end; pfn++) 775 *tpte++ = *pte++; 776 #endif 777 pte_unmap(orig_pte); 778 } 779 780 /** 781 * swap_vma_readahead - swap in pages in hope we need them soon 782 * @fentry: swap entry of this memory 783 * @gfp_mask: memory allocation flags 784 * @vmf: fault information 785 * 786 * Returns the struct page for entry and addr, after queueing swapin. 787 * 788 * Primitive swap readahead code. We simply read in a few pages whose 789 * virtual addresses are around the fault address in the same vma. 790 * 791 * Caller must hold read mmap_lock if vmf->vma is not NULL. 792 * 793 */ 794 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 795 struct vm_fault *vmf) 796 { 797 struct blk_plug plug; 798 struct swap_iocb *splug = NULL; 799 struct vm_area_struct *vma = vmf->vma; 800 struct page *page; 801 pte_t *pte, pentry; 802 swp_entry_t entry; 803 unsigned int i; 804 bool page_allocated; 805 struct vma_swap_readahead ra_info = { 806 .win = 1, 807 }; 808 809 swap_ra_info(vmf, &ra_info); 810 if (ra_info.win == 1) 811 goto skip; 812 813 blk_start_plug(&plug); 814 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; 815 i++, pte++) { 816 pentry = *pte; 817 if (pte_none(pentry)) 818 continue; 819 if (pte_present(pentry)) 820 continue; 821 entry = pte_to_swp_entry(pentry); 822 if (unlikely(non_swap_entry(entry))) 823 continue; 824 page = __read_swap_cache_async(entry, gfp_mask, vma, 825 vmf->address, &page_allocated); 826 if (!page) 827 continue; 828 if (page_allocated) { 829 swap_readpage(page, false, &splug); 830 if (i != ra_info.offset) { 831 SetPageReadahead(page); 832 count_vm_event(SWAP_RA); 833 } 834 } 835 put_page(page); 836 } 837 blk_finish_plug(&plug); 838 swap_read_unplug(splug); 839 lru_add_drain(); 840 skip: 841 /* The page was likely read above, so no need for plugging here */ 842 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, 843 ra_info.win == 1, NULL); 844 } 845 846 /** 847 * swapin_readahead - swap in pages in hope we need them soon 848 * @entry: swap entry of this memory 849 * @gfp_mask: memory allocation flags 850 * @vmf: fault information 851 * 852 * Returns the struct page for entry and addr, after queueing swapin. 853 * 854 * It's a main entry function for swap readahead. By the configuration, 855 * it will read ahead blocks by cluster-based(ie, physical disk based) 856 * or vma-based(ie, virtual address based on faulty address) readahead. 857 */ 858 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 859 struct vm_fault *vmf) 860 { 861 return swap_use_vma_readahead() ? 862 swap_vma_readahead(entry, gfp_mask, vmf) : 863 swap_cluster_readahead(entry, gfp_mask, vmf); 864 } 865 866 #ifdef CONFIG_SYSFS 867 static ssize_t vma_ra_enabled_show(struct kobject *kobj, 868 struct kobj_attribute *attr, char *buf) 869 { 870 return sysfs_emit(buf, "%s\n", 871 enable_vma_readahead ? "true" : "false"); 872 } 873 static ssize_t vma_ra_enabled_store(struct kobject *kobj, 874 struct kobj_attribute *attr, 875 const char *buf, size_t count) 876 { 877 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 878 enable_vma_readahead = true; 879 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 880 enable_vma_readahead = false; 881 else 882 return -EINVAL; 883 884 return count; 885 } 886 static struct kobj_attribute vma_ra_enabled_attr = 887 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 888 vma_ra_enabled_store); 889 890 static struct attribute *swap_attrs[] = { 891 &vma_ra_enabled_attr.attr, 892 NULL, 893 }; 894 895 static const struct attribute_group swap_attr_group = { 896 .attrs = swap_attrs, 897 }; 898 899 static int __init swap_init_sysfs(void) 900 { 901 int err; 902 struct kobject *swap_kobj; 903 904 swap_kobj = kobject_create_and_add("swap", mm_kobj); 905 if (!swap_kobj) { 906 pr_err("failed to create swap kobject\n"); 907 return -ENOMEM; 908 } 909 err = sysfs_create_group(swap_kobj, &swap_attr_group); 910 if (err) { 911 pr_err("failed to register swap group\n"); 912 goto delete_obj; 913 } 914 return 0; 915 916 delete_obj: 917 kobject_put(swap_kobj); 918 return err; 919 } 920 subsys_initcall(swap_init_sysfs); 921 #endif 922