1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/swapfile.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 */ 8 9 #include <linux/mm.h> 10 #include <linux/sched/mm.h> 11 #include <linux/sched/task.h> 12 #include <linux/hugetlb.h> 13 #include <linux/mman.h> 14 #include <linux/slab.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/swap.h> 17 #include <linux/vmalloc.h> 18 #include <linux/pagemap.h> 19 #include <linux/namei.h> 20 #include <linux/shmem_fs.h> 21 #include <linux/blkdev.h> 22 #include <linux/random.h> 23 #include <linux/writeback.h> 24 #include <linux/proc_fs.h> 25 #include <linux/seq_file.h> 26 #include <linux/init.h> 27 #include <linux/ksm.h> 28 #include <linux/rmap.h> 29 #include <linux/security.h> 30 #include <linux/backing-dev.h> 31 #include <linux/mutex.h> 32 #include <linux/capability.h> 33 #include <linux/syscalls.h> 34 #include <linux/memcontrol.h> 35 #include <linux/poll.h> 36 #include <linux/oom.h> 37 #include <linux/frontswap.h> 38 #include <linux/swapfile.h> 39 #include <linux/export.h> 40 #include <linux/swap_slots.h> 41 #include <linux/sort.h> 42 43 #include <asm/pgtable.h> 44 #include <asm/tlbflush.h> 45 #include <linux/swapops.h> 46 #include <linux/swap_cgroup.h> 47 48 static bool swap_count_continued(struct swap_info_struct *, pgoff_t, 49 unsigned char); 50 static void free_swap_count_continuations(struct swap_info_struct *); 51 static sector_t map_swap_entry(swp_entry_t, struct block_device**); 52 53 DEFINE_SPINLOCK(swap_lock); 54 static unsigned int nr_swapfiles; 55 atomic_long_t nr_swap_pages; 56 /* 57 * Some modules use swappable objects and may try to swap them out under 58 * memory pressure (via the shrinker). Before doing so, they may wish to 59 * check to see if any swap space is available. 60 */ 61 EXPORT_SYMBOL_GPL(nr_swap_pages); 62 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ 63 long total_swap_pages; 64 static int least_priority = -1; 65 66 static const char Bad_file[] = "Bad swap file entry "; 67 static const char Unused_file[] = "Unused swap file entry "; 68 static const char Bad_offset[] = "Bad swap offset entry "; 69 static const char Unused_offset[] = "Unused swap offset entry "; 70 71 /* 72 * all active swap_info_structs 73 * protected with swap_lock, and ordered by priority. 74 */ 75 PLIST_HEAD(swap_active_head); 76 77 /* 78 * all available (active, not full) swap_info_structs 79 * protected with swap_avail_lock, ordered by priority. 80 * This is used by get_swap_page() instead of swap_active_head 81 * because swap_active_head includes all swap_info_structs, 82 * but get_swap_page() doesn't need to look at full ones. 83 * This uses its own lock instead of swap_lock because when a 84 * swap_info_struct changes between not-full/full, it needs to 85 * add/remove itself to/from this list, but the swap_info_struct->lock 86 * is held and the locking order requires swap_lock to be taken 87 * before any swap_info_struct->lock. 88 */ 89 static struct plist_head *swap_avail_heads; 90 static DEFINE_SPINLOCK(swap_avail_lock); 91 92 struct swap_info_struct *swap_info[MAX_SWAPFILES]; 93 94 static DEFINE_MUTEX(swapon_mutex); 95 96 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 97 /* Activity counter to indicate that a swapon or swapoff has occurred */ 98 static atomic_t proc_poll_event = ATOMIC_INIT(0); 99 100 atomic_t nr_rotate_swap = ATOMIC_INIT(0); 101 102 static struct swap_info_struct *swap_type_to_swap_info(int type) 103 { 104 if (type >= READ_ONCE(nr_swapfiles)) 105 return NULL; 106 107 smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */ 108 return READ_ONCE(swap_info[type]); 109 } 110 111 static inline unsigned char swap_count(unsigned char ent) 112 { 113 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ 114 } 115 116 /* Reclaim the swap entry anyway if possible */ 117 #define TTRS_ANYWAY 0x1 118 /* 119 * Reclaim the swap entry if there are no more mappings of the 120 * corresponding page 121 */ 122 #define TTRS_UNMAPPED 0x2 123 /* Reclaim the swap entry if swap is getting full*/ 124 #define TTRS_FULL 0x4 125 126 /* returns 1 if swap entry is freed */ 127 static int __try_to_reclaim_swap(struct swap_info_struct *si, 128 unsigned long offset, unsigned long flags) 129 { 130 swp_entry_t entry = swp_entry(si->type, offset); 131 struct page *page; 132 int ret = 0; 133 134 page = find_get_page(swap_address_space(entry), offset); 135 if (!page) 136 return 0; 137 /* 138 * When this function is called from scan_swap_map_slots() and it's 139 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page, 140 * here. We have to use trylock for avoiding deadlock. This is a special 141 * case and you should use try_to_free_swap() with explicit lock_page() 142 * in usual operations. 143 */ 144 if (trylock_page(page)) { 145 if ((flags & TTRS_ANYWAY) || 146 ((flags & TTRS_UNMAPPED) && !page_mapped(page)) || 147 ((flags & TTRS_FULL) && mem_cgroup_swap_full(page))) 148 ret = try_to_free_swap(page); 149 unlock_page(page); 150 } 151 put_page(page); 152 return ret; 153 } 154 155 static inline struct swap_extent *first_se(struct swap_info_struct *sis) 156 { 157 struct rb_node *rb = rb_first(&sis->swap_extent_root); 158 return rb_entry(rb, struct swap_extent, rb_node); 159 } 160 161 static inline struct swap_extent *next_se(struct swap_extent *se) 162 { 163 struct rb_node *rb = rb_next(&se->rb_node); 164 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL; 165 } 166 167 /* 168 * swapon tell device that all the old swap contents can be discarded, 169 * to allow the swap device to optimize its wear-levelling. 170 */ 171 static int discard_swap(struct swap_info_struct *si) 172 { 173 struct swap_extent *se; 174 sector_t start_block; 175 sector_t nr_blocks; 176 int err = 0; 177 178 /* Do not discard the swap header page! */ 179 se = first_se(si); 180 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 181 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 182 if (nr_blocks) { 183 err = blkdev_issue_discard(si->bdev, start_block, 184 nr_blocks, GFP_KERNEL, 0); 185 if (err) 186 return err; 187 cond_resched(); 188 } 189 190 for (se = next_se(se); se; se = next_se(se)) { 191 start_block = se->start_block << (PAGE_SHIFT - 9); 192 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 193 194 err = blkdev_issue_discard(si->bdev, start_block, 195 nr_blocks, GFP_KERNEL, 0); 196 if (err) 197 break; 198 199 cond_resched(); 200 } 201 return err; /* That will often be -EOPNOTSUPP */ 202 } 203 204 static struct swap_extent * 205 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) 206 { 207 struct swap_extent *se; 208 struct rb_node *rb; 209 210 rb = sis->swap_extent_root.rb_node; 211 while (rb) { 212 se = rb_entry(rb, struct swap_extent, rb_node); 213 if (offset < se->start_page) 214 rb = rb->rb_left; 215 else if (offset >= se->start_page + se->nr_pages) 216 rb = rb->rb_right; 217 else 218 return se; 219 } 220 /* It *must* be present */ 221 BUG(); 222 } 223 224 /* 225 * swap allocation tell device that a cluster of swap can now be discarded, 226 * to allow the swap device to optimize its wear-levelling. 227 */ 228 static void discard_swap_cluster(struct swap_info_struct *si, 229 pgoff_t start_page, pgoff_t nr_pages) 230 { 231 struct swap_extent *se = offset_to_swap_extent(si, start_page); 232 233 while (nr_pages) { 234 pgoff_t offset = start_page - se->start_page; 235 sector_t start_block = se->start_block + offset; 236 sector_t nr_blocks = se->nr_pages - offset; 237 238 if (nr_blocks > nr_pages) 239 nr_blocks = nr_pages; 240 start_page += nr_blocks; 241 nr_pages -= nr_blocks; 242 243 start_block <<= PAGE_SHIFT - 9; 244 nr_blocks <<= PAGE_SHIFT - 9; 245 if (blkdev_issue_discard(si->bdev, start_block, 246 nr_blocks, GFP_NOIO, 0)) 247 break; 248 249 se = next_se(se); 250 } 251 } 252 253 #ifdef CONFIG_THP_SWAP 254 #define SWAPFILE_CLUSTER HPAGE_PMD_NR 255 256 #define swap_entry_size(size) (size) 257 #else 258 #define SWAPFILE_CLUSTER 256 259 260 /* 261 * Define swap_entry_size() as constant to let compiler to optimize 262 * out some code if !CONFIG_THP_SWAP 263 */ 264 #define swap_entry_size(size) 1 265 #endif 266 #define LATENCY_LIMIT 256 267 268 static inline void cluster_set_flag(struct swap_cluster_info *info, 269 unsigned int flag) 270 { 271 info->flags = flag; 272 } 273 274 static inline unsigned int cluster_count(struct swap_cluster_info *info) 275 { 276 return info->data; 277 } 278 279 static inline void cluster_set_count(struct swap_cluster_info *info, 280 unsigned int c) 281 { 282 info->data = c; 283 } 284 285 static inline void cluster_set_count_flag(struct swap_cluster_info *info, 286 unsigned int c, unsigned int f) 287 { 288 info->flags = f; 289 info->data = c; 290 } 291 292 static inline unsigned int cluster_next(struct swap_cluster_info *info) 293 { 294 return info->data; 295 } 296 297 static inline void cluster_set_next(struct swap_cluster_info *info, 298 unsigned int n) 299 { 300 info->data = n; 301 } 302 303 static inline void cluster_set_next_flag(struct swap_cluster_info *info, 304 unsigned int n, unsigned int f) 305 { 306 info->flags = f; 307 info->data = n; 308 } 309 310 static inline bool cluster_is_free(struct swap_cluster_info *info) 311 { 312 return info->flags & CLUSTER_FLAG_FREE; 313 } 314 315 static inline bool cluster_is_null(struct swap_cluster_info *info) 316 { 317 return info->flags & CLUSTER_FLAG_NEXT_NULL; 318 } 319 320 static inline void cluster_set_null(struct swap_cluster_info *info) 321 { 322 info->flags = CLUSTER_FLAG_NEXT_NULL; 323 info->data = 0; 324 } 325 326 static inline bool cluster_is_huge(struct swap_cluster_info *info) 327 { 328 if (IS_ENABLED(CONFIG_THP_SWAP)) 329 return info->flags & CLUSTER_FLAG_HUGE; 330 return false; 331 } 332 333 static inline void cluster_clear_huge(struct swap_cluster_info *info) 334 { 335 info->flags &= ~CLUSTER_FLAG_HUGE; 336 } 337 338 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, 339 unsigned long offset) 340 { 341 struct swap_cluster_info *ci; 342 343 ci = si->cluster_info; 344 if (ci) { 345 ci += offset / SWAPFILE_CLUSTER; 346 spin_lock(&ci->lock); 347 } 348 return ci; 349 } 350 351 static inline void unlock_cluster(struct swap_cluster_info *ci) 352 { 353 if (ci) 354 spin_unlock(&ci->lock); 355 } 356 357 /* 358 * Determine the locking method in use for this device. Return 359 * swap_cluster_info if SSD-style cluster-based locking is in place. 360 */ 361 static inline struct swap_cluster_info *lock_cluster_or_swap_info( 362 struct swap_info_struct *si, unsigned long offset) 363 { 364 struct swap_cluster_info *ci; 365 366 /* Try to use fine-grained SSD-style locking if available: */ 367 ci = lock_cluster(si, offset); 368 /* Otherwise, fall back to traditional, coarse locking: */ 369 if (!ci) 370 spin_lock(&si->lock); 371 372 return ci; 373 } 374 375 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, 376 struct swap_cluster_info *ci) 377 { 378 if (ci) 379 unlock_cluster(ci); 380 else 381 spin_unlock(&si->lock); 382 } 383 384 static inline bool cluster_list_empty(struct swap_cluster_list *list) 385 { 386 return cluster_is_null(&list->head); 387 } 388 389 static inline unsigned int cluster_list_first(struct swap_cluster_list *list) 390 { 391 return cluster_next(&list->head); 392 } 393 394 static void cluster_list_init(struct swap_cluster_list *list) 395 { 396 cluster_set_null(&list->head); 397 cluster_set_null(&list->tail); 398 } 399 400 static void cluster_list_add_tail(struct swap_cluster_list *list, 401 struct swap_cluster_info *ci, 402 unsigned int idx) 403 { 404 if (cluster_list_empty(list)) { 405 cluster_set_next_flag(&list->head, idx, 0); 406 cluster_set_next_flag(&list->tail, idx, 0); 407 } else { 408 struct swap_cluster_info *ci_tail; 409 unsigned int tail = cluster_next(&list->tail); 410 411 /* 412 * Nested cluster lock, but both cluster locks are 413 * only acquired when we held swap_info_struct->lock 414 */ 415 ci_tail = ci + tail; 416 spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); 417 cluster_set_next(ci_tail, idx); 418 spin_unlock(&ci_tail->lock); 419 cluster_set_next_flag(&list->tail, idx, 0); 420 } 421 } 422 423 static unsigned int cluster_list_del_first(struct swap_cluster_list *list, 424 struct swap_cluster_info *ci) 425 { 426 unsigned int idx; 427 428 idx = cluster_next(&list->head); 429 if (cluster_next(&list->tail) == idx) { 430 cluster_set_null(&list->head); 431 cluster_set_null(&list->tail); 432 } else 433 cluster_set_next_flag(&list->head, 434 cluster_next(&ci[idx]), 0); 435 436 return idx; 437 } 438 439 /* Add a cluster to discard list and schedule it to do discard */ 440 static void swap_cluster_schedule_discard(struct swap_info_struct *si, 441 unsigned int idx) 442 { 443 /* 444 * If scan_swap_map() can't find a free cluster, it will check 445 * si->swap_map directly. To make sure the discarding cluster isn't 446 * taken by scan_swap_map(), mark the swap entries bad (occupied). It 447 * will be cleared after discard 448 */ 449 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 450 SWAP_MAP_BAD, SWAPFILE_CLUSTER); 451 452 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); 453 454 schedule_work(&si->discard_work); 455 } 456 457 static void __free_cluster(struct swap_info_struct *si, unsigned long idx) 458 { 459 struct swap_cluster_info *ci = si->cluster_info; 460 461 cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); 462 cluster_list_add_tail(&si->free_clusters, ci, idx); 463 } 464 465 /* 466 * Doing discard actually. After a cluster discard is finished, the cluster 467 * will be added to free cluster list. caller should hold si->lock. 468 */ 469 static void swap_do_scheduled_discard(struct swap_info_struct *si) 470 { 471 struct swap_cluster_info *info, *ci; 472 unsigned int idx; 473 474 info = si->cluster_info; 475 476 while (!cluster_list_empty(&si->discard_clusters)) { 477 idx = cluster_list_del_first(&si->discard_clusters, info); 478 spin_unlock(&si->lock); 479 480 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, 481 SWAPFILE_CLUSTER); 482 483 spin_lock(&si->lock); 484 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); 485 __free_cluster(si, idx); 486 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 487 0, SWAPFILE_CLUSTER); 488 unlock_cluster(ci); 489 } 490 } 491 492 static void swap_discard_work(struct work_struct *work) 493 { 494 struct swap_info_struct *si; 495 496 si = container_of(work, struct swap_info_struct, discard_work); 497 498 spin_lock(&si->lock); 499 swap_do_scheduled_discard(si); 500 spin_unlock(&si->lock); 501 } 502 503 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) 504 { 505 struct swap_cluster_info *ci = si->cluster_info; 506 507 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); 508 cluster_list_del_first(&si->free_clusters, ci); 509 cluster_set_count_flag(ci + idx, 0, 0); 510 } 511 512 static void free_cluster(struct swap_info_struct *si, unsigned long idx) 513 { 514 struct swap_cluster_info *ci = si->cluster_info + idx; 515 516 VM_BUG_ON(cluster_count(ci) != 0); 517 /* 518 * If the swap is discardable, prepare discard the cluster 519 * instead of free it immediately. The cluster will be freed 520 * after discard. 521 */ 522 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == 523 (SWP_WRITEOK | SWP_PAGE_DISCARD)) { 524 swap_cluster_schedule_discard(si, idx); 525 return; 526 } 527 528 __free_cluster(si, idx); 529 } 530 531 /* 532 * The cluster corresponding to page_nr will be used. The cluster will be 533 * removed from free cluster list and its usage counter will be increased. 534 */ 535 static void inc_cluster_info_page(struct swap_info_struct *p, 536 struct swap_cluster_info *cluster_info, unsigned long page_nr) 537 { 538 unsigned long idx = page_nr / SWAPFILE_CLUSTER; 539 540 if (!cluster_info) 541 return; 542 if (cluster_is_free(&cluster_info[idx])) 543 alloc_cluster(p, idx); 544 545 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); 546 cluster_set_count(&cluster_info[idx], 547 cluster_count(&cluster_info[idx]) + 1); 548 } 549 550 /* 551 * The cluster corresponding to page_nr decreases one usage. If the usage 552 * counter becomes 0, which means no page in the cluster is in using, we can 553 * optionally discard the cluster and add it to free cluster list. 554 */ 555 static void dec_cluster_info_page(struct swap_info_struct *p, 556 struct swap_cluster_info *cluster_info, unsigned long page_nr) 557 { 558 unsigned long idx = page_nr / SWAPFILE_CLUSTER; 559 560 if (!cluster_info) 561 return; 562 563 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); 564 cluster_set_count(&cluster_info[idx], 565 cluster_count(&cluster_info[idx]) - 1); 566 567 if (cluster_count(&cluster_info[idx]) == 0) 568 free_cluster(p, idx); 569 } 570 571 /* 572 * It's possible scan_swap_map() uses a free cluster in the middle of free 573 * cluster list. Avoiding such abuse to avoid list corruption. 574 */ 575 static bool 576 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, 577 unsigned long offset) 578 { 579 struct percpu_cluster *percpu_cluster; 580 bool conflict; 581 582 offset /= SWAPFILE_CLUSTER; 583 conflict = !cluster_list_empty(&si->free_clusters) && 584 offset != cluster_list_first(&si->free_clusters) && 585 cluster_is_free(&si->cluster_info[offset]); 586 587 if (!conflict) 588 return false; 589 590 percpu_cluster = this_cpu_ptr(si->percpu_cluster); 591 cluster_set_null(&percpu_cluster->index); 592 return true; 593 } 594 595 /* 596 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This 597 * might involve allocating a new cluster for current CPU too. 598 */ 599 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, 600 unsigned long *offset, unsigned long *scan_base) 601 { 602 struct percpu_cluster *cluster; 603 struct swap_cluster_info *ci; 604 unsigned long tmp, max; 605 606 new_cluster: 607 cluster = this_cpu_ptr(si->percpu_cluster); 608 if (cluster_is_null(&cluster->index)) { 609 if (!cluster_list_empty(&si->free_clusters)) { 610 cluster->index = si->free_clusters.head; 611 cluster->next = cluster_next(&cluster->index) * 612 SWAPFILE_CLUSTER; 613 } else if (!cluster_list_empty(&si->discard_clusters)) { 614 /* 615 * we don't have free cluster but have some clusters in 616 * discarding, do discard now and reclaim them, then 617 * reread cluster_next_cpu since we dropped si->lock 618 */ 619 swap_do_scheduled_discard(si); 620 *scan_base = this_cpu_read(*si->cluster_next_cpu); 621 *offset = *scan_base; 622 goto new_cluster; 623 } else 624 return false; 625 } 626 627 /* 628 * Other CPUs can use our cluster if they can't find a free cluster, 629 * check if there is still free entry in the cluster 630 */ 631 tmp = cluster->next; 632 max = min_t(unsigned long, si->max, 633 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); 634 if (tmp < max) { 635 ci = lock_cluster(si, tmp); 636 while (tmp < max) { 637 if (!si->swap_map[tmp]) 638 break; 639 tmp++; 640 } 641 unlock_cluster(ci); 642 } 643 if (tmp >= max) { 644 cluster_set_null(&cluster->index); 645 goto new_cluster; 646 } 647 cluster->next = tmp + 1; 648 *offset = tmp; 649 *scan_base = tmp; 650 return true; 651 } 652 653 static void __del_from_avail_list(struct swap_info_struct *p) 654 { 655 int nid; 656 657 for_each_node(nid) 658 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); 659 } 660 661 static void del_from_avail_list(struct swap_info_struct *p) 662 { 663 spin_lock(&swap_avail_lock); 664 __del_from_avail_list(p); 665 spin_unlock(&swap_avail_lock); 666 } 667 668 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, 669 unsigned int nr_entries) 670 { 671 unsigned int end = offset + nr_entries - 1; 672 673 if (offset == si->lowest_bit) 674 si->lowest_bit += nr_entries; 675 if (end == si->highest_bit) 676 si->highest_bit -= nr_entries; 677 si->inuse_pages += nr_entries; 678 if (si->inuse_pages == si->pages) { 679 si->lowest_bit = si->max; 680 si->highest_bit = 0; 681 del_from_avail_list(si); 682 } 683 } 684 685 static void add_to_avail_list(struct swap_info_struct *p) 686 { 687 int nid; 688 689 spin_lock(&swap_avail_lock); 690 for_each_node(nid) { 691 WARN_ON(!plist_node_empty(&p->avail_lists[nid])); 692 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); 693 } 694 spin_unlock(&swap_avail_lock); 695 } 696 697 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, 698 unsigned int nr_entries) 699 { 700 unsigned long end = offset + nr_entries - 1; 701 void (*swap_slot_free_notify)(struct block_device *, unsigned long); 702 703 if (offset < si->lowest_bit) 704 si->lowest_bit = offset; 705 if (end > si->highest_bit) { 706 bool was_full = !si->highest_bit; 707 708 si->highest_bit = end; 709 if (was_full && (si->flags & SWP_WRITEOK)) 710 add_to_avail_list(si); 711 } 712 atomic_long_add(nr_entries, &nr_swap_pages); 713 si->inuse_pages -= nr_entries; 714 if (si->flags & SWP_BLKDEV) 715 swap_slot_free_notify = 716 si->bdev->bd_disk->fops->swap_slot_free_notify; 717 else 718 swap_slot_free_notify = NULL; 719 while (offset <= end) { 720 frontswap_invalidate_page(si->type, offset); 721 if (swap_slot_free_notify) 722 swap_slot_free_notify(si->bdev, offset); 723 offset++; 724 } 725 } 726 727 static void set_cluster_next(struct swap_info_struct *si, unsigned long next) 728 { 729 unsigned long prev; 730 731 if (!(si->flags & SWP_SOLIDSTATE)) { 732 si->cluster_next = next; 733 return; 734 } 735 736 prev = this_cpu_read(*si->cluster_next_cpu); 737 /* 738 * Cross the swap address space size aligned trunk, choose 739 * another trunk randomly to avoid lock contention on swap 740 * address space if possible. 741 */ 742 if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) != 743 (next >> SWAP_ADDRESS_SPACE_SHIFT)) { 744 /* No free swap slots available */ 745 if (si->highest_bit <= si->lowest_bit) 746 return; 747 next = si->lowest_bit + 748 prandom_u32_max(si->highest_bit - si->lowest_bit + 1); 749 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES); 750 next = max_t(unsigned int, next, si->lowest_bit); 751 } 752 this_cpu_write(*si->cluster_next_cpu, next); 753 } 754 755 static int scan_swap_map_slots(struct swap_info_struct *si, 756 unsigned char usage, int nr, 757 swp_entry_t slots[]) 758 { 759 struct swap_cluster_info *ci; 760 unsigned long offset; 761 unsigned long scan_base; 762 unsigned long last_in_cluster = 0; 763 int latency_ration = LATENCY_LIMIT; 764 int n_ret = 0; 765 bool scanned_many = false; 766 767 /* 768 * We try to cluster swap pages by allocating them sequentially 769 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 770 * way, however, we resort to first-free allocation, starting 771 * a new cluster. This prevents us from scattering swap pages 772 * all over the entire swap partition, so that we reduce 773 * overall disk seek times between swap pages. -- sct 774 * But we do now try to find an empty cluster. -Andrea 775 * And we let swap pages go all over an SSD partition. Hugh 776 */ 777 778 si->flags += SWP_SCANNING; 779 /* 780 * Use percpu scan base for SSD to reduce lock contention on 781 * cluster and swap cache. For HDD, sequential access is more 782 * important. 783 */ 784 if (si->flags & SWP_SOLIDSTATE) 785 scan_base = this_cpu_read(*si->cluster_next_cpu); 786 else 787 scan_base = si->cluster_next; 788 offset = scan_base; 789 790 /* SSD algorithm */ 791 if (si->cluster_info) { 792 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) 793 goto scan; 794 } else if (unlikely(!si->cluster_nr--)) { 795 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 796 si->cluster_nr = SWAPFILE_CLUSTER - 1; 797 goto checks; 798 } 799 800 spin_unlock(&si->lock); 801 802 /* 803 * If seek is expensive, start searching for new cluster from 804 * start of partition, to minimize the span of allocated swap. 805 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info 806 * case, just handled by scan_swap_map_try_ssd_cluster() above. 807 */ 808 scan_base = offset = si->lowest_bit; 809 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 810 811 /* Locate the first empty (unaligned) cluster */ 812 for (; last_in_cluster <= si->highest_bit; offset++) { 813 if (si->swap_map[offset]) 814 last_in_cluster = offset + SWAPFILE_CLUSTER; 815 else if (offset == last_in_cluster) { 816 spin_lock(&si->lock); 817 offset -= SWAPFILE_CLUSTER - 1; 818 si->cluster_next = offset; 819 si->cluster_nr = SWAPFILE_CLUSTER - 1; 820 goto checks; 821 } 822 if (unlikely(--latency_ration < 0)) { 823 cond_resched(); 824 latency_ration = LATENCY_LIMIT; 825 } 826 } 827 828 offset = scan_base; 829 spin_lock(&si->lock); 830 si->cluster_nr = SWAPFILE_CLUSTER - 1; 831 } 832 833 checks: 834 if (si->cluster_info) { 835 while (scan_swap_map_ssd_cluster_conflict(si, offset)) { 836 /* take a break if we already got some slots */ 837 if (n_ret) 838 goto done; 839 if (!scan_swap_map_try_ssd_cluster(si, &offset, 840 &scan_base)) 841 goto scan; 842 } 843 } 844 if (!(si->flags & SWP_WRITEOK)) 845 goto no_page; 846 if (!si->highest_bit) 847 goto no_page; 848 if (offset > si->highest_bit) 849 scan_base = offset = si->lowest_bit; 850 851 ci = lock_cluster(si, offset); 852 /* reuse swap entry of cache-only swap if not busy. */ 853 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 854 int swap_was_freed; 855 unlock_cluster(ci); 856 spin_unlock(&si->lock); 857 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); 858 spin_lock(&si->lock); 859 /* entry was freed successfully, try to use this again */ 860 if (swap_was_freed) 861 goto checks; 862 goto scan; /* check next one */ 863 } 864 865 if (si->swap_map[offset]) { 866 unlock_cluster(ci); 867 if (!n_ret) 868 goto scan; 869 else 870 goto done; 871 } 872 si->swap_map[offset] = usage; 873 inc_cluster_info_page(si, si->cluster_info, offset); 874 unlock_cluster(ci); 875 876 swap_range_alloc(si, offset, 1); 877 slots[n_ret++] = swp_entry(si->type, offset); 878 879 /* got enough slots or reach max slots? */ 880 if ((n_ret == nr) || (offset >= si->highest_bit)) 881 goto done; 882 883 /* search for next available slot */ 884 885 /* time to take a break? */ 886 if (unlikely(--latency_ration < 0)) { 887 if (n_ret) 888 goto done; 889 spin_unlock(&si->lock); 890 cond_resched(); 891 spin_lock(&si->lock); 892 latency_ration = LATENCY_LIMIT; 893 } 894 895 /* try to get more slots in cluster */ 896 if (si->cluster_info) { 897 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) 898 goto checks; 899 } else if (si->cluster_nr && !si->swap_map[++offset]) { 900 /* non-ssd case, still more slots in cluster? */ 901 --si->cluster_nr; 902 goto checks; 903 } 904 905 /* 906 * Even if there's no free clusters available (fragmented), 907 * try to scan a little more quickly with lock held unless we 908 * have scanned too many slots already. 909 */ 910 if (!scanned_many) { 911 unsigned long scan_limit; 912 913 if (offset < scan_base) 914 scan_limit = scan_base; 915 else 916 scan_limit = si->highest_bit; 917 for (; offset <= scan_limit && --latency_ration > 0; 918 offset++) { 919 if (!si->swap_map[offset]) 920 goto checks; 921 } 922 } 923 924 done: 925 set_cluster_next(si, offset + 1); 926 si->flags -= SWP_SCANNING; 927 return n_ret; 928 929 scan: 930 spin_unlock(&si->lock); 931 while (++offset <= si->highest_bit) { 932 if (!si->swap_map[offset]) { 933 spin_lock(&si->lock); 934 goto checks; 935 } 936 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 937 spin_lock(&si->lock); 938 goto checks; 939 } 940 if (unlikely(--latency_ration < 0)) { 941 cond_resched(); 942 latency_ration = LATENCY_LIMIT; 943 scanned_many = true; 944 } 945 } 946 offset = si->lowest_bit; 947 while (offset < scan_base) { 948 if (!si->swap_map[offset]) { 949 spin_lock(&si->lock); 950 goto checks; 951 } 952 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 953 spin_lock(&si->lock); 954 goto checks; 955 } 956 if (unlikely(--latency_ration < 0)) { 957 cond_resched(); 958 latency_ration = LATENCY_LIMIT; 959 scanned_many = true; 960 } 961 offset++; 962 } 963 spin_lock(&si->lock); 964 965 no_page: 966 si->flags -= SWP_SCANNING; 967 return n_ret; 968 } 969 970 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) 971 { 972 unsigned long idx; 973 struct swap_cluster_info *ci; 974 unsigned long offset, i; 975 unsigned char *map; 976 977 /* 978 * Should not even be attempting cluster allocations when huge 979 * page swap is disabled. Warn and fail the allocation. 980 */ 981 if (!IS_ENABLED(CONFIG_THP_SWAP)) { 982 VM_WARN_ON_ONCE(1); 983 return 0; 984 } 985 986 if (cluster_list_empty(&si->free_clusters)) 987 return 0; 988 989 idx = cluster_list_first(&si->free_clusters); 990 offset = idx * SWAPFILE_CLUSTER; 991 ci = lock_cluster(si, offset); 992 alloc_cluster(si, idx); 993 cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); 994 995 map = si->swap_map + offset; 996 for (i = 0; i < SWAPFILE_CLUSTER; i++) 997 map[i] = SWAP_HAS_CACHE; 998 unlock_cluster(ci); 999 swap_range_alloc(si, offset, SWAPFILE_CLUSTER); 1000 *slot = swp_entry(si->type, offset); 1001 1002 return 1; 1003 } 1004 1005 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) 1006 { 1007 unsigned long offset = idx * SWAPFILE_CLUSTER; 1008 struct swap_cluster_info *ci; 1009 1010 ci = lock_cluster(si, offset); 1011 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); 1012 cluster_set_count_flag(ci, 0, 0); 1013 free_cluster(si, idx); 1014 unlock_cluster(ci); 1015 swap_range_free(si, offset, SWAPFILE_CLUSTER); 1016 } 1017 1018 static unsigned long scan_swap_map(struct swap_info_struct *si, 1019 unsigned char usage) 1020 { 1021 swp_entry_t entry; 1022 int n_ret; 1023 1024 n_ret = scan_swap_map_slots(si, usage, 1, &entry); 1025 1026 if (n_ret) 1027 return swp_offset(entry); 1028 else 1029 return 0; 1030 1031 } 1032 1033 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) 1034 { 1035 unsigned long size = swap_entry_size(entry_size); 1036 struct swap_info_struct *si, *next; 1037 long avail_pgs; 1038 int n_ret = 0; 1039 int node; 1040 1041 /* Only single cluster request supported */ 1042 WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); 1043 1044 avail_pgs = atomic_long_read(&nr_swap_pages) / size; 1045 if (avail_pgs <= 0) 1046 goto noswap; 1047 1048 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs); 1049 1050 atomic_long_sub(n_goal * size, &nr_swap_pages); 1051 1052 spin_lock(&swap_avail_lock); 1053 1054 start_over: 1055 node = numa_node_id(); 1056 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { 1057 /* requeue si to after same-priority siblings */ 1058 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); 1059 spin_unlock(&swap_avail_lock); 1060 spin_lock(&si->lock); 1061 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { 1062 spin_lock(&swap_avail_lock); 1063 if (plist_node_empty(&si->avail_lists[node])) { 1064 spin_unlock(&si->lock); 1065 goto nextsi; 1066 } 1067 WARN(!si->highest_bit, 1068 "swap_info %d in list but !highest_bit\n", 1069 si->type); 1070 WARN(!(si->flags & SWP_WRITEOK), 1071 "swap_info %d in list but !SWP_WRITEOK\n", 1072 si->type); 1073 __del_from_avail_list(si); 1074 spin_unlock(&si->lock); 1075 goto nextsi; 1076 } 1077 if (size == SWAPFILE_CLUSTER) { 1078 if (!(si->flags & SWP_FS)) 1079 n_ret = swap_alloc_cluster(si, swp_entries); 1080 } else 1081 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, 1082 n_goal, swp_entries); 1083 spin_unlock(&si->lock); 1084 if (n_ret || size == SWAPFILE_CLUSTER) 1085 goto check_out; 1086 pr_debug("scan_swap_map of si %d failed to find offset\n", 1087 si->type); 1088 1089 spin_lock(&swap_avail_lock); 1090 nextsi: 1091 /* 1092 * if we got here, it's likely that si was almost full before, 1093 * and since scan_swap_map() can drop the si->lock, multiple 1094 * callers probably all tried to get a page from the same si 1095 * and it filled up before we could get one; or, the si filled 1096 * up between us dropping swap_avail_lock and taking si->lock. 1097 * Since we dropped the swap_avail_lock, the swap_avail_head 1098 * list may have been modified; so if next is still in the 1099 * swap_avail_head list then try it, otherwise start over 1100 * if we have not gotten any slots. 1101 */ 1102 if (plist_node_empty(&next->avail_lists[node])) 1103 goto start_over; 1104 } 1105 1106 spin_unlock(&swap_avail_lock); 1107 1108 check_out: 1109 if (n_ret < n_goal) 1110 atomic_long_add((long)(n_goal - n_ret) * size, 1111 &nr_swap_pages); 1112 noswap: 1113 return n_ret; 1114 } 1115 1116 /* The only caller of this function is now suspend routine */ 1117 swp_entry_t get_swap_page_of_type(int type) 1118 { 1119 struct swap_info_struct *si = swap_type_to_swap_info(type); 1120 pgoff_t offset; 1121 1122 if (!si) 1123 goto fail; 1124 1125 spin_lock(&si->lock); 1126 if (si->flags & SWP_WRITEOK) { 1127 atomic_long_dec(&nr_swap_pages); 1128 /* This is called for allocating swap entry, not cache */ 1129 offset = scan_swap_map(si, 1); 1130 if (offset) { 1131 spin_unlock(&si->lock); 1132 return swp_entry(type, offset); 1133 } 1134 atomic_long_inc(&nr_swap_pages); 1135 } 1136 spin_unlock(&si->lock); 1137 fail: 1138 return (swp_entry_t) {0}; 1139 } 1140 1141 static struct swap_info_struct *__swap_info_get(swp_entry_t entry) 1142 { 1143 struct swap_info_struct *p; 1144 unsigned long offset; 1145 1146 if (!entry.val) 1147 goto out; 1148 p = swp_swap_info(entry); 1149 if (!p) 1150 goto bad_nofile; 1151 if (!(p->flags & SWP_USED)) 1152 goto bad_device; 1153 offset = swp_offset(entry); 1154 if (offset >= p->max) 1155 goto bad_offset; 1156 return p; 1157 1158 bad_offset: 1159 pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val); 1160 goto out; 1161 bad_device: 1162 pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val); 1163 goto out; 1164 bad_nofile: 1165 pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val); 1166 out: 1167 return NULL; 1168 } 1169 1170 static struct swap_info_struct *_swap_info_get(swp_entry_t entry) 1171 { 1172 struct swap_info_struct *p; 1173 1174 p = __swap_info_get(entry); 1175 if (!p) 1176 goto out; 1177 if (!p->swap_map[swp_offset(entry)]) 1178 goto bad_free; 1179 return p; 1180 1181 bad_free: 1182 pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val); 1183 goto out; 1184 out: 1185 return NULL; 1186 } 1187 1188 static struct swap_info_struct *swap_info_get(swp_entry_t entry) 1189 { 1190 struct swap_info_struct *p; 1191 1192 p = _swap_info_get(entry); 1193 if (p) 1194 spin_lock(&p->lock); 1195 return p; 1196 } 1197 1198 static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry, 1199 struct swap_info_struct *q) 1200 { 1201 struct swap_info_struct *p; 1202 1203 p = _swap_info_get(entry); 1204 1205 if (p != q) { 1206 if (q != NULL) 1207 spin_unlock(&q->lock); 1208 if (p != NULL) 1209 spin_lock(&p->lock); 1210 } 1211 return p; 1212 } 1213 1214 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, 1215 unsigned long offset, 1216 unsigned char usage) 1217 { 1218 unsigned char count; 1219 unsigned char has_cache; 1220 1221 count = p->swap_map[offset]; 1222 1223 has_cache = count & SWAP_HAS_CACHE; 1224 count &= ~SWAP_HAS_CACHE; 1225 1226 if (usage == SWAP_HAS_CACHE) { 1227 VM_BUG_ON(!has_cache); 1228 has_cache = 0; 1229 } else if (count == SWAP_MAP_SHMEM) { 1230 /* 1231 * Or we could insist on shmem.c using a special 1232 * swap_shmem_free() and free_shmem_swap_and_cache()... 1233 */ 1234 count = 0; 1235 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 1236 if (count == COUNT_CONTINUED) { 1237 if (swap_count_continued(p, offset, count)) 1238 count = SWAP_MAP_MAX | COUNT_CONTINUED; 1239 else 1240 count = SWAP_MAP_MAX; 1241 } else 1242 count--; 1243 } 1244 1245 usage = count | has_cache; 1246 p->swap_map[offset] = usage ? : SWAP_HAS_CACHE; 1247 1248 return usage; 1249 } 1250 1251 /* 1252 * Check whether swap entry is valid in the swap device. If so, 1253 * return pointer to swap_info_struct, and keep the swap entry valid 1254 * via preventing the swap device from being swapoff, until 1255 * put_swap_device() is called. Otherwise return NULL. 1256 * 1257 * The entirety of the RCU read critical section must come before the 1258 * return from or after the call to synchronize_rcu() in 1259 * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is 1260 * true, the si->map, si->cluster_info, etc. must be valid in the 1261 * critical section. 1262 * 1263 * Notice that swapoff or swapoff+swapon can still happen before the 1264 * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock() 1265 * in put_swap_device() if there isn't any other way to prevent 1266 * swapoff, such as page lock, page table lock, etc. The caller must 1267 * be prepared for that. For example, the following situation is 1268 * possible. 1269 * 1270 * CPU1 CPU2 1271 * do_swap_page() 1272 * ... swapoff+swapon 1273 * __read_swap_cache_async() 1274 * swapcache_prepare() 1275 * __swap_duplicate() 1276 * // check swap_map 1277 * // verify PTE not changed 1278 * 1279 * In __swap_duplicate(), the swap_map need to be checked before 1280 * changing partly because the specified swap entry may be for another 1281 * swap device which has been swapoff. And in do_swap_page(), after 1282 * the page is read from the swap device, the PTE is verified not 1283 * changed with the page table locked to check whether the swap device 1284 * has been swapoff or swapoff+swapon. 1285 */ 1286 struct swap_info_struct *get_swap_device(swp_entry_t entry) 1287 { 1288 struct swap_info_struct *si; 1289 unsigned long offset; 1290 1291 if (!entry.val) 1292 goto out; 1293 si = swp_swap_info(entry); 1294 if (!si) 1295 goto bad_nofile; 1296 1297 rcu_read_lock(); 1298 if (!(si->flags & SWP_VALID)) 1299 goto unlock_out; 1300 offset = swp_offset(entry); 1301 if (offset >= si->max) 1302 goto unlock_out; 1303 1304 return si; 1305 bad_nofile: 1306 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); 1307 out: 1308 return NULL; 1309 unlock_out: 1310 rcu_read_unlock(); 1311 return NULL; 1312 } 1313 1314 static unsigned char __swap_entry_free(struct swap_info_struct *p, 1315 swp_entry_t entry) 1316 { 1317 struct swap_cluster_info *ci; 1318 unsigned long offset = swp_offset(entry); 1319 unsigned char usage; 1320 1321 ci = lock_cluster_or_swap_info(p, offset); 1322 usage = __swap_entry_free_locked(p, offset, 1); 1323 unlock_cluster_or_swap_info(p, ci); 1324 if (!usage) 1325 free_swap_slot(entry); 1326 1327 return usage; 1328 } 1329 1330 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) 1331 { 1332 struct swap_cluster_info *ci; 1333 unsigned long offset = swp_offset(entry); 1334 unsigned char count; 1335 1336 ci = lock_cluster(p, offset); 1337 count = p->swap_map[offset]; 1338 VM_BUG_ON(count != SWAP_HAS_CACHE); 1339 p->swap_map[offset] = 0; 1340 dec_cluster_info_page(p, p->cluster_info, offset); 1341 unlock_cluster(ci); 1342 1343 mem_cgroup_uncharge_swap(entry, 1); 1344 swap_range_free(p, offset, 1); 1345 } 1346 1347 /* 1348 * Caller has made sure that the swap device corresponding to entry 1349 * is still around or has not been recycled. 1350 */ 1351 void swap_free(swp_entry_t entry) 1352 { 1353 struct swap_info_struct *p; 1354 1355 p = _swap_info_get(entry); 1356 if (p) 1357 __swap_entry_free(p, entry); 1358 } 1359 1360 /* 1361 * Called after dropping swapcache to decrease refcnt to swap entries. 1362 */ 1363 void put_swap_page(struct page *page, swp_entry_t entry) 1364 { 1365 unsigned long offset = swp_offset(entry); 1366 unsigned long idx = offset / SWAPFILE_CLUSTER; 1367 struct swap_cluster_info *ci; 1368 struct swap_info_struct *si; 1369 unsigned char *map; 1370 unsigned int i, free_entries = 0; 1371 unsigned char val; 1372 int size = swap_entry_size(hpage_nr_pages(page)); 1373 1374 si = _swap_info_get(entry); 1375 if (!si) 1376 return; 1377 1378 ci = lock_cluster_or_swap_info(si, offset); 1379 if (size == SWAPFILE_CLUSTER) { 1380 VM_BUG_ON(!cluster_is_huge(ci)); 1381 map = si->swap_map + offset; 1382 for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1383 val = map[i]; 1384 VM_BUG_ON(!(val & SWAP_HAS_CACHE)); 1385 if (val == SWAP_HAS_CACHE) 1386 free_entries++; 1387 } 1388 cluster_clear_huge(ci); 1389 if (free_entries == SWAPFILE_CLUSTER) { 1390 unlock_cluster_or_swap_info(si, ci); 1391 spin_lock(&si->lock); 1392 mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); 1393 swap_free_cluster(si, idx); 1394 spin_unlock(&si->lock); 1395 return; 1396 } 1397 } 1398 for (i = 0; i < size; i++, entry.val++) { 1399 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { 1400 unlock_cluster_or_swap_info(si, ci); 1401 free_swap_slot(entry); 1402 if (i == size - 1) 1403 return; 1404 lock_cluster_or_swap_info(si, offset); 1405 } 1406 } 1407 unlock_cluster_or_swap_info(si, ci); 1408 } 1409 1410 #ifdef CONFIG_THP_SWAP 1411 int split_swap_cluster(swp_entry_t entry) 1412 { 1413 struct swap_info_struct *si; 1414 struct swap_cluster_info *ci; 1415 unsigned long offset = swp_offset(entry); 1416 1417 si = _swap_info_get(entry); 1418 if (!si) 1419 return -EBUSY; 1420 ci = lock_cluster(si, offset); 1421 cluster_clear_huge(ci); 1422 unlock_cluster(ci); 1423 return 0; 1424 } 1425 #endif 1426 1427 static int swp_entry_cmp(const void *ent1, const void *ent2) 1428 { 1429 const swp_entry_t *e1 = ent1, *e2 = ent2; 1430 1431 return (int)swp_type(*e1) - (int)swp_type(*e2); 1432 } 1433 1434 void swapcache_free_entries(swp_entry_t *entries, int n) 1435 { 1436 struct swap_info_struct *p, *prev; 1437 int i; 1438 1439 if (n <= 0) 1440 return; 1441 1442 prev = NULL; 1443 p = NULL; 1444 1445 /* 1446 * Sort swap entries by swap device, so each lock is only taken once. 1447 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is 1448 * so low that it isn't necessary to optimize further. 1449 */ 1450 if (nr_swapfiles > 1) 1451 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); 1452 for (i = 0; i < n; ++i) { 1453 p = swap_info_get_cont(entries[i], prev); 1454 if (p) 1455 swap_entry_free(p, entries[i]); 1456 prev = p; 1457 } 1458 if (p) 1459 spin_unlock(&p->lock); 1460 } 1461 1462 /* 1463 * How many references to page are currently swapped out? 1464 * This does not give an exact answer when swap count is continued, 1465 * but does include the high COUNT_CONTINUED flag to allow for that. 1466 */ 1467 int page_swapcount(struct page *page) 1468 { 1469 int count = 0; 1470 struct swap_info_struct *p; 1471 struct swap_cluster_info *ci; 1472 swp_entry_t entry; 1473 unsigned long offset; 1474 1475 entry.val = page_private(page); 1476 p = _swap_info_get(entry); 1477 if (p) { 1478 offset = swp_offset(entry); 1479 ci = lock_cluster_or_swap_info(p, offset); 1480 count = swap_count(p->swap_map[offset]); 1481 unlock_cluster_or_swap_info(p, ci); 1482 } 1483 return count; 1484 } 1485 1486 int __swap_count(swp_entry_t entry) 1487 { 1488 struct swap_info_struct *si; 1489 pgoff_t offset = swp_offset(entry); 1490 int count = 0; 1491 1492 si = get_swap_device(entry); 1493 if (si) { 1494 count = swap_count(si->swap_map[offset]); 1495 put_swap_device(si); 1496 } 1497 return count; 1498 } 1499 1500 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) 1501 { 1502 int count = 0; 1503 pgoff_t offset = swp_offset(entry); 1504 struct swap_cluster_info *ci; 1505 1506 ci = lock_cluster_or_swap_info(si, offset); 1507 count = swap_count(si->swap_map[offset]); 1508 unlock_cluster_or_swap_info(si, ci); 1509 return count; 1510 } 1511 1512 /* 1513 * How many references to @entry are currently swapped out? 1514 * This does not give an exact answer when swap count is continued, 1515 * but does include the high COUNT_CONTINUED flag to allow for that. 1516 */ 1517 int __swp_swapcount(swp_entry_t entry) 1518 { 1519 int count = 0; 1520 struct swap_info_struct *si; 1521 1522 si = get_swap_device(entry); 1523 if (si) { 1524 count = swap_swapcount(si, entry); 1525 put_swap_device(si); 1526 } 1527 return count; 1528 } 1529 1530 /* 1531 * How many references to @entry are currently swapped out? 1532 * This considers COUNT_CONTINUED so it returns exact answer. 1533 */ 1534 int swp_swapcount(swp_entry_t entry) 1535 { 1536 int count, tmp_count, n; 1537 struct swap_info_struct *p; 1538 struct swap_cluster_info *ci; 1539 struct page *page; 1540 pgoff_t offset; 1541 unsigned char *map; 1542 1543 p = _swap_info_get(entry); 1544 if (!p) 1545 return 0; 1546 1547 offset = swp_offset(entry); 1548 1549 ci = lock_cluster_or_swap_info(p, offset); 1550 1551 count = swap_count(p->swap_map[offset]); 1552 if (!(count & COUNT_CONTINUED)) 1553 goto out; 1554 1555 count &= ~COUNT_CONTINUED; 1556 n = SWAP_MAP_MAX + 1; 1557 1558 page = vmalloc_to_page(p->swap_map + offset); 1559 offset &= ~PAGE_MASK; 1560 VM_BUG_ON(page_private(page) != SWP_CONTINUED); 1561 1562 do { 1563 page = list_next_entry(page, lru); 1564 map = kmap_atomic(page); 1565 tmp_count = map[offset]; 1566 kunmap_atomic(map); 1567 1568 count += (tmp_count & ~COUNT_CONTINUED) * n; 1569 n *= (SWAP_CONT_MAX + 1); 1570 } while (tmp_count & COUNT_CONTINUED); 1571 out: 1572 unlock_cluster_or_swap_info(p, ci); 1573 return count; 1574 } 1575 1576 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, 1577 swp_entry_t entry) 1578 { 1579 struct swap_cluster_info *ci; 1580 unsigned char *map = si->swap_map; 1581 unsigned long roffset = swp_offset(entry); 1582 unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); 1583 int i; 1584 bool ret = false; 1585 1586 ci = lock_cluster_or_swap_info(si, offset); 1587 if (!ci || !cluster_is_huge(ci)) { 1588 if (swap_count(map[roffset])) 1589 ret = true; 1590 goto unlock_out; 1591 } 1592 for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1593 if (swap_count(map[offset + i])) { 1594 ret = true; 1595 break; 1596 } 1597 } 1598 unlock_out: 1599 unlock_cluster_or_swap_info(si, ci); 1600 return ret; 1601 } 1602 1603 static bool page_swapped(struct page *page) 1604 { 1605 swp_entry_t entry; 1606 struct swap_info_struct *si; 1607 1608 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) 1609 return page_swapcount(page) != 0; 1610 1611 page = compound_head(page); 1612 entry.val = page_private(page); 1613 si = _swap_info_get(entry); 1614 if (si) 1615 return swap_page_trans_huge_swapped(si, entry); 1616 return false; 1617 } 1618 1619 static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount, 1620 int *total_swapcount) 1621 { 1622 int i, map_swapcount, _total_mapcount, _total_swapcount; 1623 unsigned long offset = 0; 1624 struct swap_info_struct *si; 1625 struct swap_cluster_info *ci = NULL; 1626 unsigned char *map = NULL; 1627 int mapcount, swapcount = 0; 1628 1629 /* hugetlbfs shouldn't call it */ 1630 VM_BUG_ON_PAGE(PageHuge(page), page); 1631 1632 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) { 1633 mapcount = page_trans_huge_mapcount(page, total_mapcount); 1634 if (PageSwapCache(page)) 1635 swapcount = page_swapcount(page); 1636 if (total_swapcount) 1637 *total_swapcount = swapcount; 1638 return mapcount + swapcount; 1639 } 1640 1641 page = compound_head(page); 1642 1643 _total_mapcount = _total_swapcount = map_swapcount = 0; 1644 if (PageSwapCache(page)) { 1645 swp_entry_t entry; 1646 1647 entry.val = page_private(page); 1648 si = _swap_info_get(entry); 1649 if (si) { 1650 map = si->swap_map; 1651 offset = swp_offset(entry); 1652 } 1653 } 1654 if (map) 1655 ci = lock_cluster(si, offset); 1656 for (i = 0; i < HPAGE_PMD_NR; i++) { 1657 mapcount = atomic_read(&page[i]._mapcount) + 1; 1658 _total_mapcount += mapcount; 1659 if (map) { 1660 swapcount = swap_count(map[offset + i]); 1661 _total_swapcount += swapcount; 1662 } 1663 map_swapcount = max(map_swapcount, mapcount + swapcount); 1664 } 1665 unlock_cluster(ci); 1666 if (PageDoubleMap(page)) { 1667 map_swapcount -= 1; 1668 _total_mapcount -= HPAGE_PMD_NR; 1669 } 1670 mapcount = compound_mapcount(page); 1671 map_swapcount += mapcount; 1672 _total_mapcount += mapcount; 1673 if (total_mapcount) 1674 *total_mapcount = _total_mapcount; 1675 if (total_swapcount) 1676 *total_swapcount = _total_swapcount; 1677 1678 return map_swapcount; 1679 } 1680 1681 /* 1682 * We can write to an anon page without COW if there are no other references 1683 * to it. And as a side-effect, free up its swap: because the old content 1684 * on disk will never be read, and seeking back there to write new content 1685 * later would only waste time away from clustering. 1686 * 1687 * NOTE: total_map_swapcount should not be relied upon by the caller if 1688 * reuse_swap_page() returns false, but it may be always overwritten 1689 * (see the other implementation for CONFIG_SWAP=n). 1690 */ 1691 bool reuse_swap_page(struct page *page, int *total_map_swapcount) 1692 { 1693 int count, total_mapcount, total_swapcount; 1694 1695 VM_BUG_ON_PAGE(!PageLocked(page), page); 1696 if (unlikely(PageKsm(page))) 1697 return false; 1698 count = page_trans_huge_map_swapcount(page, &total_mapcount, 1699 &total_swapcount); 1700 if (total_map_swapcount) 1701 *total_map_swapcount = total_mapcount + total_swapcount; 1702 if (count == 1 && PageSwapCache(page) && 1703 (likely(!PageTransCompound(page)) || 1704 /* The remaining swap count will be freed soon */ 1705 total_swapcount == page_swapcount(page))) { 1706 if (!PageWriteback(page)) { 1707 page = compound_head(page); 1708 delete_from_swap_cache(page); 1709 SetPageDirty(page); 1710 } else { 1711 swp_entry_t entry; 1712 struct swap_info_struct *p; 1713 1714 entry.val = page_private(page); 1715 p = swap_info_get(entry); 1716 if (p->flags & SWP_STABLE_WRITES) { 1717 spin_unlock(&p->lock); 1718 return false; 1719 } 1720 spin_unlock(&p->lock); 1721 } 1722 } 1723 1724 return count <= 1; 1725 } 1726 1727 /* 1728 * If swap is getting full, or if there are no more mappings of this page, 1729 * then try_to_free_swap is called to free its swap space. 1730 */ 1731 int try_to_free_swap(struct page *page) 1732 { 1733 VM_BUG_ON_PAGE(!PageLocked(page), page); 1734 1735 if (!PageSwapCache(page)) 1736 return 0; 1737 if (PageWriteback(page)) 1738 return 0; 1739 if (page_swapped(page)) 1740 return 0; 1741 1742 /* 1743 * Once hibernation has begun to create its image of memory, 1744 * there's a danger that one of the calls to try_to_free_swap() 1745 * - most probably a call from __try_to_reclaim_swap() while 1746 * hibernation is allocating its own swap pages for the image, 1747 * but conceivably even a call from memory reclaim - will free 1748 * the swap from a page which has already been recorded in the 1749 * image as a clean swapcache page, and then reuse its swap for 1750 * another page of the image. On waking from hibernation, the 1751 * original page might be freed under memory pressure, then 1752 * later read back in from swap, now with the wrong data. 1753 * 1754 * Hibernation suspends storage while it is writing the image 1755 * to disk so check that here. 1756 */ 1757 if (pm_suspended_storage()) 1758 return 0; 1759 1760 page = compound_head(page); 1761 delete_from_swap_cache(page); 1762 SetPageDirty(page); 1763 return 1; 1764 } 1765 1766 /* 1767 * Free the swap entry like above, but also try to 1768 * free the page cache entry if it is the last user. 1769 */ 1770 int free_swap_and_cache(swp_entry_t entry) 1771 { 1772 struct swap_info_struct *p; 1773 unsigned char count; 1774 1775 if (non_swap_entry(entry)) 1776 return 1; 1777 1778 p = _swap_info_get(entry); 1779 if (p) { 1780 count = __swap_entry_free(p, entry); 1781 if (count == SWAP_HAS_CACHE && 1782 !swap_page_trans_huge_swapped(p, entry)) 1783 __try_to_reclaim_swap(p, swp_offset(entry), 1784 TTRS_UNMAPPED | TTRS_FULL); 1785 } 1786 return p != NULL; 1787 } 1788 1789 #ifdef CONFIG_HIBERNATION 1790 /* 1791 * Find the swap type that corresponds to given device (if any). 1792 * 1793 * @offset - number of the PAGE_SIZE-sized block of the device, starting 1794 * from 0, in which the swap header is expected to be located. 1795 * 1796 * This is needed for the suspend to disk (aka swsusp). 1797 */ 1798 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 1799 { 1800 struct block_device *bdev = NULL; 1801 int type; 1802 1803 if (device) 1804 bdev = bdget(device); 1805 1806 spin_lock(&swap_lock); 1807 for (type = 0; type < nr_swapfiles; type++) { 1808 struct swap_info_struct *sis = swap_info[type]; 1809 1810 if (!(sis->flags & SWP_WRITEOK)) 1811 continue; 1812 1813 if (!bdev) { 1814 if (bdev_p) 1815 *bdev_p = bdgrab(sis->bdev); 1816 1817 spin_unlock(&swap_lock); 1818 return type; 1819 } 1820 if (bdev == sis->bdev) { 1821 struct swap_extent *se = first_se(sis); 1822 1823 if (se->start_block == offset) { 1824 if (bdev_p) 1825 *bdev_p = bdgrab(sis->bdev); 1826 1827 spin_unlock(&swap_lock); 1828 bdput(bdev); 1829 return type; 1830 } 1831 } 1832 } 1833 spin_unlock(&swap_lock); 1834 if (bdev) 1835 bdput(bdev); 1836 1837 return -ENODEV; 1838 } 1839 1840 /* 1841 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 1842 * corresponding to given index in swap_info (swap type). 1843 */ 1844 sector_t swapdev_block(int type, pgoff_t offset) 1845 { 1846 struct block_device *bdev; 1847 struct swap_info_struct *si = swap_type_to_swap_info(type); 1848 1849 if (!si || !(si->flags & SWP_WRITEOK)) 1850 return 0; 1851 return map_swap_entry(swp_entry(type, offset), &bdev); 1852 } 1853 1854 /* 1855 * Return either the total number of swap pages of given type, or the number 1856 * of free pages of that type (depending on @free) 1857 * 1858 * This is needed for software suspend 1859 */ 1860 unsigned int count_swap_pages(int type, int free) 1861 { 1862 unsigned int n = 0; 1863 1864 spin_lock(&swap_lock); 1865 if ((unsigned int)type < nr_swapfiles) { 1866 struct swap_info_struct *sis = swap_info[type]; 1867 1868 spin_lock(&sis->lock); 1869 if (sis->flags & SWP_WRITEOK) { 1870 n = sis->pages; 1871 if (free) 1872 n -= sis->inuse_pages; 1873 } 1874 spin_unlock(&sis->lock); 1875 } 1876 spin_unlock(&swap_lock); 1877 return n; 1878 } 1879 #endif /* CONFIG_HIBERNATION */ 1880 1881 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) 1882 { 1883 return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte); 1884 } 1885 1886 /* 1887 * No need to decide whether this PTE shares the swap entry with others, 1888 * just let do_wp_page work it out if a write is requested later - to 1889 * force COW, vm_page_prot omits write permission from any private vma. 1890 */ 1891 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 1892 unsigned long addr, swp_entry_t entry, struct page *page) 1893 { 1894 struct page *swapcache; 1895 spinlock_t *ptl; 1896 pte_t *pte; 1897 int ret = 1; 1898 1899 swapcache = page; 1900 page = ksm_might_need_to_copy(page, vma, addr); 1901 if (unlikely(!page)) 1902 return -ENOMEM; 1903 1904 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1905 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { 1906 ret = 0; 1907 goto out; 1908 } 1909 1910 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 1911 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 1912 get_page(page); 1913 set_pte_at(vma->vm_mm, addr, pte, 1914 pte_mkold(mk_pte(page, vma->vm_page_prot))); 1915 if (page == swapcache) { 1916 page_add_anon_rmap(page, vma, addr, false); 1917 } else { /* ksm created a completely new copy */ 1918 page_add_new_anon_rmap(page, vma, addr, false); 1919 lru_cache_add_active_or_unevictable(page, vma); 1920 } 1921 swap_free(entry); 1922 /* 1923 * Move the page to the active list so it is not 1924 * immediately swapped out again after swapon. 1925 */ 1926 activate_page(page); 1927 out: 1928 pte_unmap_unlock(pte, ptl); 1929 if (page != swapcache) { 1930 unlock_page(page); 1931 put_page(page); 1932 } 1933 return ret; 1934 } 1935 1936 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 1937 unsigned long addr, unsigned long end, 1938 unsigned int type, bool frontswap, 1939 unsigned long *fs_pages_to_unuse) 1940 { 1941 struct page *page; 1942 swp_entry_t entry; 1943 pte_t *pte; 1944 struct swap_info_struct *si; 1945 unsigned long offset; 1946 int ret = 0; 1947 volatile unsigned char *swap_map; 1948 1949 si = swap_info[type]; 1950 pte = pte_offset_map(pmd, addr); 1951 do { 1952 struct vm_fault vmf; 1953 1954 if (!is_swap_pte(*pte)) 1955 continue; 1956 1957 entry = pte_to_swp_entry(*pte); 1958 if (swp_type(entry) != type) 1959 continue; 1960 1961 offset = swp_offset(entry); 1962 if (frontswap && !frontswap_test(si, offset)) 1963 continue; 1964 1965 pte_unmap(pte); 1966 swap_map = &si->swap_map[offset]; 1967 page = lookup_swap_cache(entry, vma, addr); 1968 if (!page) { 1969 vmf.vma = vma; 1970 vmf.address = addr; 1971 vmf.pmd = pmd; 1972 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 1973 &vmf); 1974 } 1975 if (!page) { 1976 if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD) 1977 goto try_next; 1978 return -ENOMEM; 1979 } 1980 1981 lock_page(page); 1982 wait_on_page_writeback(page); 1983 ret = unuse_pte(vma, pmd, addr, entry, page); 1984 if (ret < 0) { 1985 unlock_page(page); 1986 put_page(page); 1987 goto out; 1988 } 1989 1990 try_to_free_swap(page); 1991 unlock_page(page); 1992 put_page(page); 1993 1994 if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) { 1995 ret = FRONTSWAP_PAGES_UNUSED; 1996 goto out; 1997 } 1998 try_next: 1999 pte = pte_offset_map(pmd, addr); 2000 } while (pte++, addr += PAGE_SIZE, addr != end); 2001 pte_unmap(pte - 1); 2002 2003 ret = 0; 2004 out: 2005 return ret; 2006 } 2007 2008 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 2009 unsigned long addr, unsigned long end, 2010 unsigned int type, bool frontswap, 2011 unsigned long *fs_pages_to_unuse) 2012 { 2013 pmd_t *pmd; 2014 unsigned long next; 2015 int ret; 2016 2017 pmd = pmd_offset(pud, addr); 2018 do { 2019 cond_resched(); 2020 next = pmd_addr_end(addr, end); 2021 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 2022 continue; 2023 ret = unuse_pte_range(vma, pmd, addr, next, type, 2024 frontswap, fs_pages_to_unuse); 2025 if (ret) 2026 return ret; 2027 } while (pmd++, addr = next, addr != end); 2028 return 0; 2029 } 2030 2031 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, 2032 unsigned long addr, unsigned long end, 2033 unsigned int type, bool frontswap, 2034 unsigned long *fs_pages_to_unuse) 2035 { 2036 pud_t *pud; 2037 unsigned long next; 2038 int ret; 2039 2040 pud = pud_offset(p4d, addr); 2041 do { 2042 next = pud_addr_end(addr, end); 2043 if (pud_none_or_clear_bad(pud)) 2044 continue; 2045 ret = unuse_pmd_range(vma, pud, addr, next, type, 2046 frontswap, fs_pages_to_unuse); 2047 if (ret) 2048 return ret; 2049 } while (pud++, addr = next, addr != end); 2050 return 0; 2051 } 2052 2053 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, 2054 unsigned long addr, unsigned long end, 2055 unsigned int type, bool frontswap, 2056 unsigned long *fs_pages_to_unuse) 2057 { 2058 p4d_t *p4d; 2059 unsigned long next; 2060 int ret; 2061 2062 p4d = p4d_offset(pgd, addr); 2063 do { 2064 next = p4d_addr_end(addr, end); 2065 if (p4d_none_or_clear_bad(p4d)) 2066 continue; 2067 ret = unuse_pud_range(vma, p4d, addr, next, type, 2068 frontswap, fs_pages_to_unuse); 2069 if (ret) 2070 return ret; 2071 } while (p4d++, addr = next, addr != end); 2072 return 0; 2073 } 2074 2075 static int unuse_vma(struct vm_area_struct *vma, unsigned int type, 2076 bool frontswap, unsigned long *fs_pages_to_unuse) 2077 { 2078 pgd_t *pgd; 2079 unsigned long addr, end, next; 2080 int ret; 2081 2082 addr = vma->vm_start; 2083 end = vma->vm_end; 2084 2085 pgd = pgd_offset(vma->vm_mm, addr); 2086 do { 2087 next = pgd_addr_end(addr, end); 2088 if (pgd_none_or_clear_bad(pgd)) 2089 continue; 2090 ret = unuse_p4d_range(vma, pgd, addr, next, type, 2091 frontswap, fs_pages_to_unuse); 2092 if (ret) 2093 return ret; 2094 } while (pgd++, addr = next, addr != end); 2095 return 0; 2096 } 2097 2098 static int unuse_mm(struct mm_struct *mm, unsigned int type, 2099 bool frontswap, unsigned long *fs_pages_to_unuse) 2100 { 2101 struct vm_area_struct *vma; 2102 int ret = 0; 2103 2104 down_read(&mm->mmap_sem); 2105 for (vma = mm->mmap; vma; vma = vma->vm_next) { 2106 if (vma->anon_vma) { 2107 ret = unuse_vma(vma, type, frontswap, 2108 fs_pages_to_unuse); 2109 if (ret) 2110 break; 2111 } 2112 cond_resched(); 2113 } 2114 up_read(&mm->mmap_sem); 2115 return ret; 2116 } 2117 2118 /* 2119 * Scan swap_map (or frontswap_map if frontswap parameter is true) 2120 * from current position to next entry still in use. Return 0 2121 * if there are no inuse entries after prev till end of the map. 2122 */ 2123 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 2124 unsigned int prev, bool frontswap) 2125 { 2126 unsigned int i; 2127 unsigned char count; 2128 2129 /* 2130 * No need for swap_lock here: we're just looking 2131 * for whether an entry is in use, not modifying it; false 2132 * hits are okay, and sys_swapoff() has already prevented new 2133 * allocations from this area (while holding swap_lock). 2134 */ 2135 for (i = prev + 1; i < si->max; i++) { 2136 count = READ_ONCE(si->swap_map[i]); 2137 if (count && swap_count(count) != SWAP_MAP_BAD) 2138 if (!frontswap || frontswap_test(si, i)) 2139 break; 2140 if ((i % LATENCY_LIMIT) == 0) 2141 cond_resched(); 2142 } 2143 2144 if (i == si->max) 2145 i = 0; 2146 2147 return i; 2148 } 2149 2150 /* 2151 * If the boolean frontswap is true, only unuse pages_to_unuse pages; 2152 * pages_to_unuse==0 means all pages; ignored if frontswap is false 2153 */ 2154 int try_to_unuse(unsigned int type, bool frontswap, 2155 unsigned long pages_to_unuse) 2156 { 2157 struct mm_struct *prev_mm; 2158 struct mm_struct *mm; 2159 struct list_head *p; 2160 int retval = 0; 2161 struct swap_info_struct *si = swap_info[type]; 2162 struct page *page; 2163 swp_entry_t entry; 2164 unsigned int i; 2165 2166 if (!READ_ONCE(si->inuse_pages)) 2167 return 0; 2168 2169 if (!frontswap) 2170 pages_to_unuse = 0; 2171 2172 retry: 2173 retval = shmem_unuse(type, frontswap, &pages_to_unuse); 2174 if (retval) 2175 goto out; 2176 2177 prev_mm = &init_mm; 2178 mmget(prev_mm); 2179 2180 spin_lock(&mmlist_lock); 2181 p = &init_mm.mmlist; 2182 while (READ_ONCE(si->inuse_pages) && 2183 !signal_pending(current) && 2184 (p = p->next) != &init_mm.mmlist) { 2185 2186 mm = list_entry(p, struct mm_struct, mmlist); 2187 if (!mmget_not_zero(mm)) 2188 continue; 2189 spin_unlock(&mmlist_lock); 2190 mmput(prev_mm); 2191 prev_mm = mm; 2192 retval = unuse_mm(mm, type, frontswap, &pages_to_unuse); 2193 2194 if (retval) { 2195 mmput(prev_mm); 2196 goto out; 2197 } 2198 2199 /* 2200 * Make sure that we aren't completely killing 2201 * interactive performance. 2202 */ 2203 cond_resched(); 2204 spin_lock(&mmlist_lock); 2205 } 2206 spin_unlock(&mmlist_lock); 2207 2208 mmput(prev_mm); 2209 2210 i = 0; 2211 while (READ_ONCE(si->inuse_pages) && 2212 !signal_pending(current) && 2213 (i = find_next_to_unuse(si, i, frontswap)) != 0) { 2214 2215 entry = swp_entry(type, i); 2216 page = find_get_page(swap_address_space(entry), i); 2217 if (!page) 2218 continue; 2219 2220 /* 2221 * It is conceivable that a racing task removed this page from 2222 * swap cache just before we acquired the page lock. The page 2223 * might even be back in swap cache on another swap area. But 2224 * that is okay, try_to_free_swap() only removes stale pages. 2225 */ 2226 lock_page(page); 2227 wait_on_page_writeback(page); 2228 try_to_free_swap(page); 2229 unlock_page(page); 2230 put_page(page); 2231 2232 /* 2233 * For frontswap, we just need to unuse pages_to_unuse, if 2234 * it was specified. Need not check frontswap again here as 2235 * we already zeroed out pages_to_unuse if not frontswap. 2236 */ 2237 if (pages_to_unuse && --pages_to_unuse == 0) 2238 goto out; 2239 } 2240 2241 /* 2242 * Lets check again to see if there are still swap entries in the map. 2243 * If yes, we would need to do retry the unuse logic again. 2244 * Under global memory pressure, swap entries can be reinserted back 2245 * into process space after the mmlist loop above passes over them. 2246 * 2247 * Limit the number of retries? No: when mmget_not_zero() above fails, 2248 * that mm is likely to be freeing swap from exit_mmap(), which proceeds 2249 * at its own independent pace; and even shmem_writepage() could have 2250 * been preempted after get_swap_page(), temporarily hiding that swap. 2251 * It's easy and robust (though cpu-intensive) just to keep retrying. 2252 */ 2253 if (READ_ONCE(si->inuse_pages)) { 2254 if (!signal_pending(current)) 2255 goto retry; 2256 retval = -EINTR; 2257 } 2258 out: 2259 return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval; 2260 } 2261 2262 /* 2263 * After a successful try_to_unuse, if no swap is now in use, we know 2264 * we can empty the mmlist. swap_lock must be held on entry and exit. 2265 * Note that mmlist_lock nests inside swap_lock, and an mm must be 2266 * added to the mmlist just after page_duplicate - before would be racy. 2267 */ 2268 static void drain_mmlist(void) 2269 { 2270 struct list_head *p, *next; 2271 unsigned int type; 2272 2273 for (type = 0; type < nr_swapfiles; type++) 2274 if (swap_info[type]->inuse_pages) 2275 return; 2276 spin_lock(&mmlist_lock); 2277 list_for_each_safe(p, next, &init_mm.mmlist) 2278 list_del_init(p); 2279 spin_unlock(&mmlist_lock); 2280 } 2281 2282 /* 2283 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 2284 * corresponds to page offset for the specified swap entry. 2285 * Note that the type of this function is sector_t, but it returns page offset 2286 * into the bdev, not sector offset. 2287 */ 2288 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) 2289 { 2290 struct swap_info_struct *sis; 2291 struct swap_extent *se; 2292 pgoff_t offset; 2293 2294 sis = swp_swap_info(entry); 2295 *bdev = sis->bdev; 2296 2297 offset = swp_offset(entry); 2298 se = offset_to_swap_extent(sis, offset); 2299 return se->start_block + (offset - se->start_page); 2300 } 2301 2302 /* 2303 * Returns the page offset into bdev for the specified page's swap entry. 2304 */ 2305 sector_t map_swap_page(struct page *page, struct block_device **bdev) 2306 { 2307 swp_entry_t entry; 2308 entry.val = page_private(page); 2309 return map_swap_entry(entry, bdev); 2310 } 2311 2312 /* 2313 * Free all of a swapdev's extent information 2314 */ 2315 static void destroy_swap_extents(struct swap_info_struct *sis) 2316 { 2317 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { 2318 struct rb_node *rb = sis->swap_extent_root.rb_node; 2319 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); 2320 2321 rb_erase(rb, &sis->swap_extent_root); 2322 kfree(se); 2323 } 2324 2325 if (sis->flags & SWP_ACTIVATED) { 2326 struct file *swap_file = sis->swap_file; 2327 struct address_space *mapping = swap_file->f_mapping; 2328 2329 sis->flags &= ~SWP_ACTIVATED; 2330 if (mapping->a_ops->swap_deactivate) 2331 mapping->a_ops->swap_deactivate(swap_file); 2332 } 2333 } 2334 2335 /* 2336 * Add a block range (and the corresponding page range) into this swapdev's 2337 * extent tree. 2338 * 2339 * This function rather assumes that it is called in ascending page order. 2340 */ 2341 int 2342 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 2343 unsigned long nr_pages, sector_t start_block) 2344 { 2345 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; 2346 struct swap_extent *se; 2347 struct swap_extent *new_se; 2348 2349 /* 2350 * place the new node at the right most since the 2351 * function is called in ascending page order. 2352 */ 2353 while (*link) { 2354 parent = *link; 2355 link = &parent->rb_right; 2356 } 2357 2358 if (parent) { 2359 se = rb_entry(parent, struct swap_extent, rb_node); 2360 BUG_ON(se->start_page + se->nr_pages != start_page); 2361 if (se->start_block + se->nr_pages == start_block) { 2362 /* Merge it */ 2363 se->nr_pages += nr_pages; 2364 return 0; 2365 } 2366 } 2367 2368 /* No merge, insert a new extent. */ 2369 new_se = kmalloc(sizeof(*se), GFP_KERNEL); 2370 if (new_se == NULL) 2371 return -ENOMEM; 2372 new_se->start_page = start_page; 2373 new_se->nr_pages = nr_pages; 2374 new_se->start_block = start_block; 2375 2376 rb_link_node(&new_se->rb_node, parent, link); 2377 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); 2378 return 1; 2379 } 2380 EXPORT_SYMBOL_GPL(add_swap_extent); 2381 2382 /* 2383 * A `swap extent' is a simple thing which maps a contiguous range of pages 2384 * onto a contiguous range of disk blocks. An ordered list of swap extents 2385 * is built at swapon time and is then used at swap_writepage/swap_readpage 2386 * time for locating where on disk a page belongs. 2387 * 2388 * If the swapfile is an S_ISBLK block device, a single extent is installed. 2389 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 2390 * swap files identically. 2391 * 2392 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 2393 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 2394 * swapfiles are handled *identically* after swapon time. 2395 * 2396 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 2397 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 2398 * some stray blocks are found which do not fall within the PAGE_SIZE alignment 2399 * requirements, they are simply tossed out - we will never use those blocks 2400 * for swapping. 2401 * 2402 * For all swap devices we set S_SWAPFILE across the life of the swapon. This 2403 * prevents users from writing to the swap device, which will corrupt memory. 2404 * 2405 * The amount of disk space which a single swap extent represents varies. 2406 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 2407 * extents in the list. To avoid much list walking, we cache the previous 2408 * search location in `curr_swap_extent', and start new searches from there. 2409 * This is extremely effective. The average number of iterations in 2410 * map_swap_page() has been measured at about 0.3 per page. - akpm. 2411 */ 2412 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 2413 { 2414 struct file *swap_file = sis->swap_file; 2415 struct address_space *mapping = swap_file->f_mapping; 2416 struct inode *inode = mapping->host; 2417 int ret; 2418 2419 if (S_ISBLK(inode->i_mode)) { 2420 ret = add_swap_extent(sis, 0, sis->max, 0); 2421 *span = sis->pages; 2422 return ret; 2423 } 2424 2425 if (mapping->a_ops->swap_activate) { 2426 ret = mapping->a_ops->swap_activate(sis, swap_file, span); 2427 if (ret >= 0) 2428 sis->flags |= SWP_ACTIVATED; 2429 if (!ret) { 2430 sis->flags |= SWP_FS; 2431 ret = add_swap_extent(sis, 0, sis->max, 0); 2432 *span = sis->pages; 2433 } 2434 return ret; 2435 } 2436 2437 return generic_swapfile_activate(sis, swap_file, span); 2438 } 2439 2440 static int swap_node(struct swap_info_struct *p) 2441 { 2442 struct block_device *bdev; 2443 2444 if (p->bdev) 2445 bdev = p->bdev; 2446 else 2447 bdev = p->swap_file->f_inode->i_sb->s_bdev; 2448 2449 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; 2450 } 2451 2452 static void setup_swap_info(struct swap_info_struct *p, int prio, 2453 unsigned char *swap_map, 2454 struct swap_cluster_info *cluster_info) 2455 { 2456 int i; 2457 2458 if (prio >= 0) 2459 p->prio = prio; 2460 else 2461 p->prio = --least_priority; 2462 /* 2463 * the plist prio is negated because plist ordering is 2464 * low-to-high, while swap ordering is high-to-low 2465 */ 2466 p->list.prio = -p->prio; 2467 for_each_node(i) { 2468 if (p->prio >= 0) 2469 p->avail_lists[i].prio = -p->prio; 2470 else { 2471 if (swap_node(p) == i) 2472 p->avail_lists[i].prio = 1; 2473 else 2474 p->avail_lists[i].prio = -p->prio; 2475 } 2476 } 2477 p->swap_map = swap_map; 2478 p->cluster_info = cluster_info; 2479 } 2480 2481 static void _enable_swap_info(struct swap_info_struct *p) 2482 { 2483 p->flags |= SWP_WRITEOK | SWP_VALID; 2484 atomic_long_add(p->pages, &nr_swap_pages); 2485 total_swap_pages += p->pages; 2486 2487 assert_spin_locked(&swap_lock); 2488 /* 2489 * both lists are plists, and thus priority ordered. 2490 * swap_active_head needs to be priority ordered for swapoff(), 2491 * which on removal of any swap_info_struct with an auto-assigned 2492 * (i.e. negative) priority increments the auto-assigned priority 2493 * of any lower-priority swap_info_structs. 2494 * swap_avail_head needs to be priority ordered for get_swap_page(), 2495 * which allocates swap pages from the highest available priority 2496 * swap_info_struct. 2497 */ 2498 plist_add(&p->list, &swap_active_head); 2499 add_to_avail_list(p); 2500 } 2501 2502 static void enable_swap_info(struct swap_info_struct *p, int prio, 2503 unsigned char *swap_map, 2504 struct swap_cluster_info *cluster_info, 2505 unsigned long *frontswap_map) 2506 { 2507 frontswap_init(p->type, frontswap_map); 2508 spin_lock(&swap_lock); 2509 spin_lock(&p->lock); 2510 setup_swap_info(p, prio, swap_map, cluster_info); 2511 spin_unlock(&p->lock); 2512 spin_unlock(&swap_lock); 2513 /* 2514 * Guarantee swap_map, cluster_info, etc. fields are valid 2515 * between get/put_swap_device() if SWP_VALID bit is set 2516 */ 2517 synchronize_rcu(); 2518 spin_lock(&swap_lock); 2519 spin_lock(&p->lock); 2520 _enable_swap_info(p); 2521 spin_unlock(&p->lock); 2522 spin_unlock(&swap_lock); 2523 } 2524 2525 static void reinsert_swap_info(struct swap_info_struct *p) 2526 { 2527 spin_lock(&swap_lock); 2528 spin_lock(&p->lock); 2529 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); 2530 _enable_swap_info(p); 2531 spin_unlock(&p->lock); 2532 spin_unlock(&swap_lock); 2533 } 2534 2535 bool has_usable_swap(void) 2536 { 2537 bool ret = true; 2538 2539 spin_lock(&swap_lock); 2540 if (plist_head_empty(&swap_active_head)) 2541 ret = false; 2542 spin_unlock(&swap_lock); 2543 return ret; 2544 } 2545 2546 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 2547 { 2548 struct swap_info_struct *p = NULL; 2549 unsigned char *swap_map; 2550 struct swap_cluster_info *cluster_info; 2551 unsigned long *frontswap_map; 2552 struct file *swap_file, *victim; 2553 struct address_space *mapping; 2554 struct inode *inode; 2555 struct filename *pathname; 2556 int err, found = 0; 2557 unsigned int old_block_size; 2558 2559 if (!capable(CAP_SYS_ADMIN)) 2560 return -EPERM; 2561 2562 BUG_ON(!current->mm); 2563 2564 pathname = getname(specialfile); 2565 if (IS_ERR(pathname)) 2566 return PTR_ERR(pathname); 2567 2568 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); 2569 err = PTR_ERR(victim); 2570 if (IS_ERR(victim)) 2571 goto out; 2572 2573 mapping = victim->f_mapping; 2574 spin_lock(&swap_lock); 2575 plist_for_each_entry(p, &swap_active_head, list) { 2576 if (p->flags & SWP_WRITEOK) { 2577 if (p->swap_file->f_mapping == mapping) { 2578 found = 1; 2579 break; 2580 } 2581 } 2582 } 2583 if (!found) { 2584 err = -EINVAL; 2585 spin_unlock(&swap_lock); 2586 goto out_dput; 2587 } 2588 if (!security_vm_enough_memory_mm(current->mm, p->pages)) 2589 vm_unacct_memory(p->pages); 2590 else { 2591 err = -ENOMEM; 2592 spin_unlock(&swap_lock); 2593 goto out_dput; 2594 } 2595 del_from_avail_list(p); 2596 spin_lock(&p->lock); 2597 if (p->prio < 0) { 2598 struct swap_info_struct *si = p; 2599 int nid; 2600 2601 plist_for_each_entry_continue(si, &swap_active_head, list) { 2602 si->prio++; 2603 si->list.prio--; 2604 for_each_node(nid) { 2605 if (si->avail_lists[nid].prio != 1) 2606 si->avail_lists[nid].prio--; 2607 } 2608 } 2609 least_priority++; 2610 } 2611 plist_del(&p->list, &swap_active_head); 2612 atomic_long_sub(p->pages, &nr_swap_pages); 2613 total_swap_pages -= p->pages; 2614 p->flags &= ~SWP_WRITEOK; 2615 spin_unlock(&p->lock); 2616 spin_unlock(&swap_lock); 2617 2618 disable_swap_slots_cache_lock(); 2619 2620 set_current_oom_origin(); 2621 err = try_to_unuse(p->type, false, 0); /* force unuse all pages */ 2622 clear_current_oom_origin(); 2623 2624 if (err) { 2625 /* re-insert swap space back into swap_list */ 2626 reinsert_swap_info(p); 2627 reenable_swap_slots_cache_unlock(); 2628 goto out_dput; 2629 } 2630 2631 reenable_swap_slots_cache_unlock(); 2632 2633 spin_lock(&swap_lock); 2634 spin_lock(&p->lock); 2635 p->flags &= ~SWP_VALID; /* mark swap device as invalid */ 2636 spin_unlock(&p->lock); 2637 spin_unlock(&swap_lock); 2638 /* 2639 * wait for swap operations protected by get/put_swap_device() 2640 * to complete 2641 */ 2642 synchronize_rcu(); 2643 2644 flush_work(&p->discard_work); 2645 2646 destroy_swap_extents(p); 2647 if (p->flags & SWP_CONTINUED) 2648 free_swap_count_continuations(p); 2649 2650 if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev))) 2651 atomic_dec(&nr_rotate_swap); 2652 2653 mutex_lock(&swapon_mutex); 2654 spin_lock(&swap_lock); 2655 spin_lock(&p->lock); 2656 drain_mmlist(); 2657 2658 /* wait for anyone still in scan_swap_map */ 2659 p->highest_bit = 0; /* cuts scans short */ 2660 while (p->flags >= SWP_SCANNING) { 2661 spin_unlock(&p->lock); 2662 spin_unlock(&swap_lock); 2663 schedule_timeout_uninterruptible(1); 2664 spin_lock(&swap_lock); 2665 spin_lock(&p->lock); 2666 } 2667 2668 swap_file = p->swap_file; 2669 old_block_size = p->old_block_size; 2670 p->swap_file = NULL; 2671 p->max = 0; 2672 swap_map = p->swap_map; 2673 p->swap_map = NULL; 2674 cluster_info = p->cluster_info; 2675 p->cluster_info = NULL; 2676 frontswap_map = frontswap_map_get(p); 2677 spin_unlock(&p->lock); 2678 spin_unlock(&swap_lock); 2679 frontswap_invalidate_area(p->type); 2680 frontswap_map_set(p, NULL); 2681 mutex_unlock(&swapon_mutex); 2682 free_percpu(p->percpu_cluster); 2683 p->percpu_cluster = NULL; 2684 free_percpu(p->cluster_next_cpu); 2685 p->cluster_next_cpu = NULL; 2686 vfree(swap_map); 2687 kvfree(cluster_info); 2688 kvfree(frontswap_map); 2689 /* Destroy swap account information */ 2690 swap_cgroup_swapoff(p->type); 2691 exit_swap_address_space(p->type); 2692 2693 inode = mapping->host; 2694 if (S_ISBLK(inode->i_mode)) { 2695 struct block_device *bdev = I_BDEV(inode); 2696 2697 set_blocksize(bdev, old_block_size); 2698 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2699 } 2700 2701 inode_lock(inode); 2702 inode->i_flags &= ~S_SWAPFILE; 2703 inode_unlock(inode); 2704 filp_close(swap_file, NULL); 2705 2706 /* 2707 * Clear the SWP_USED flag after all resources are freed so that swapon 2708 * can reuse this swap_info in alloc_swap_info() safely. It is ok to 2709 * not hold p->lock after we cleared its SWP_WRITEOK. 2710 */ 2711 spin_lock(&swap_lock); 2712 p->flags = 0; 2713 spin_unlock(&swap_lock); 2714 2715 err = 0; 2716 atomic_inc(&proc_poll_event); 2717 wake_up_interruptible(&proc_poll_wait); 2718 2719 out_dput: 2720 filp_close(victim, NULL); 2721 out: 2722 putname(pathname); 2723 return err; 2724 } 2725 2726 #ifdef CONFIG_PROC_FS 2727 static __poll_t swaps_poll(struct file *file, poll_table *wait) 2728 { 2729 struct seq_file *seq = file->private_data; 2730 2731 poll_wait(file, &proc_poll_wait, wait); 2732 2733 if (seq->poll_event != atomic_read(&proc_poll_event)) { 2734 seq->poll_event = atomic_read(&proc_poll_event); 2735 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; 2736 } 2737 2738 return EPOLLIN | EPOLLRDNORM; 2739 } 2740 2741 /* iterator */ 2742 static void *swap_start(struct seq_file *swap, loff_t *pos) 2743 { 2744 struct swap_info_struct *si; 2745 int type; 2746 loff_t l = *pos; 2747 2748 mutex_lock(&swapon_mutex); 2749 2750 if (!l) 2751 return SEQ_START_TOKEN; 2752 2753 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { 2754 if (!(si->flags & SWP_USED) || !si->swap_map) 2755 continue; 2756 if (!--l) 2757 return si; 2758 } 2759 2760 return NULL; 2761 } 2762 2763 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 2764 { 2765 struct swap_info_struct *si = v; 2766 int type; 2767 2768 if (v == SEQ_START_TOKEN) 2769 type = 0; 2770 else 2771 type = si->type + 1; 2772 2773 ++(*pos); 2774 for (; (si = swap_type_to_swap_info(type)); type++) { 2775 if (!(si->flags & SWP_USED) || !si->swap_map) 2776 continue; 2777 return si; 2778 } 2779 2780 return NULL; 2781 } 2782 2783 static void swap_stop(struct seq_file *swap, void *v) 2784 { 2785 mutex_unlock(&swapon_mutex); 2786 } 2787 2788 static int swap_show(struct seq_file *swap, void *v) 2789 { 2790 struct swap_info_struct *si = v; 2791 struct file *file; 2792 int len; 2793 unsigned int bytes, inuse; 2794 2795 if (si == SEQ_START_TOKEN) { 2796 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); 2797 return 0; 2798 } 2799 2800 bytes = si->pages << (PAGE_SHIFT - 10); 2801 inuse = si->inuse_pages << (PAGE_SHIFT - 10); 2802 2803 file = si->swap_file; 2804 len = seq_file_path(swap, file, " \t\n\\"); 2805 seq_printf(swap, "%*s%s\t%u\t%s%u\t%s%d\n", 2806 len < 40 ? 40 - len : 1, " ", 2807 S_ISBLK(file_inode(file)->i_mode) ? 2808 "partition" : "file\t", 2809 bytes, bytes < 10000000 ? "\t" : "", 2810 inuse, inuse < 10000000 ? "\t" : "", 2811 si->prio); 2812 return 0; 2813 } 2814 2815 static const struct seq_operations swaps_op = { 2816 .start = swap_start, 2817 .next = swap_next, 2818 .stop = swap_stop, 2819 .show = swap_show 2820 }; 2821 2822 static int swaps_open(struct inode *inode, struct file *file) 2823 { 2824 struct seq_file *seq; 2825 int ret; 2826 2827 ret = seq_open(file, &swaps_op); 2828 if (ret) 2829 return ret; 2830 2831 seq = file->private_data; 2832 seq->poll_event = atomic_read(&proc_poll_event); 2833 return 0; 2834 } 2835 2836 static const struct proc_ops swaps_proc_ops = { 2837 .proc_flags = PROC_ENTRY_PERMANENT, 2838 .proc_open = swaps_open, 2839 .proc_read = seq_read, 2840 .proc_lseek = seq_lseek, 2841 .proc_release = seq_release, 2842 .proc_poll = swaps_poll, 2843 }; 2844 2845 static int __init procswaps_init(void) 2846 { 2847 proc_create("swaps", 0, NULL, &swaps_proc_ops); 2848 return 0; 2849 } 2850 __initcall(procswaps_init); 2851 #endif /* CONFIG_PROC_FS */ 2852 2853 #ifdef MAX_SWAPFILES_CHECK 2854 static int __init max_swapfiles_check(void) 2855 { 2856 MAX_SWAPFILES_CHECK(); 2857 return 0; 2858 } 2859 late_initcall(max_swapfiles_check); 2860 #endif 2861 2862 static struct swap_info_struct *alloc_swap_info(void) 2863 { 2864 struct swap_info_struct *p; 2865 unsigned int type; 2866 int i; 2867 2868 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); 2869 if (!p) 2870 return ERR_PTR(-ENOMEM); 2871 2872 spin_lock(&swap_lock); 2873 for (type = 0; type < nr_swapfiles; type++) { 2874 if (!(swap_info[type]->flags & SWP_USED)) 2875 break; 2876 } 2877 if (type >= MAX_SWAPFILES) { 2878 spin_unlock(&swap_lock); 2879 kvfree(p); 2880 return ERR_PTR(-EPERM); 2881 } 2882 if (type >= nr_swapfiles) { 2883 p->type = type; 2884 WRITE_ONCE(swap_info[type], p); 2885 /* 2886 * Write swap_info[type] before nr_swapfiles, in case a 2887 * racing procfs swap_start() or swap_next() is reading them. 2888 * (We never shrink nr_swapfiles, we never free this entry.) 2889 */ 2890 smp_wmb(); 2891 WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1); 2892 } else { 2893 kvfree(p); 2894 p = swap_info[type]; 2895 /* 2896 * Do not memset this entry: a racing procfs swap_next() 2897 * would be relying on p->type to remain valid. 2898 */ 2899 } 2900 p->swap_extent_root = RB_ROOT; 2901 plist_node_init(&p->list, 0); 2902 for_each_node(i) 2903 plist_node_init(&p->avail_lists[i], 0); 2904 p->flags = SWP_USED; 2905 spin_unlock(&swap_lock); 2906 spin_lock_init(&p->lock); 2907 spin_lock_init(&p->cont_lock); 2908 2909 return p; 2910 } 2911 2912 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) 2913 { 2914 int error; 2915 2916 if (S_ISBLK(inode->i_mode)) { 2917 p->bdev = bdgrab(I_BDEV(inode)); 2918 error = blkdev_get(p->bdev, 2919 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); 2920 if (error < 0) { 2921 p->bdev = NULL; 2922 return error; 2923 } 2924 p->old_block_size = block_size(p->bdev); 2925 error = set_blocksize(p->bdev, PAGE_SIZE); 2926 if (error < 0) 2927 return error; 2928 /* 2929 * Zoned block devices contain zones that have a sequential 2930 * write only restriction. Hence zoned block devices are not 2931 * suitable for swapping. Disallow them here. 2932 */ 2933 if (blk_queue_is_zoned(p->bdev->bd_queue)) 2934 return -EINVAL; 2935 p->flags |= SWP_BLKDEV; 2936 } else if (S_ISREG(inode->i_mode)) { 2937 p->bdev = inode->i_sb->s_bdev; 2938 } 2939 2940 return 0; 2941 } 2942 2943 2944 /* 2945 * Find out how many pages are allowed for a single swap device. There 2946 * are two limiting factors: 2947 * 1) the number of bits for the swap offset in the swp_entry_t type, and 2948 * 2) the number of bits in the swap pte, as defined by the different 2949 * architectures. 2950 * 2951 * In order to find the largest possible bit mask, a swap entry with 2952 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, 2953 * decoded to a swp_entry_t again, and finally the swap offset is 2954 * extracted. 2955 * 2956 * This will mask all the bits from the initial ~0UL mask that can't 2957 * be encoded in either the swp_entry_t or the architecture definition 2958 * of a swap pte. 2959 */ 2960 unsigned long generic_max_swapfile_size(void) 2961 { 2962 return swp_offset(pte_to_swp_entry( 2963 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; 2964 } 2965 2966 /* Can be overridden by an architecture for additional checks. */ 2967 __weak unsigned long max_swapfile_size(void) 2968 { 2969 return generic_max_swapfile_size(); 2970 } 2971 2972 static unsigned long read_swap_header(struct swap_info_struct *p, 2973 union swap_header *swap_header, 2974 struct inode *inode) 2975 { 2976 int i; 2977 unsigned long maxpages; 2978 unsigned long swapfilepages; 2979 unsigned long last_page; 2980 2981 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 2982 pr_err("Unable to find swap-space signature\n"); 2983 return 0; 2984 } 2985 2986 /* swap partition endianess hack... */ 2987 if (swab32(swap_header->info.version) == 1) { 2988 swab32s(&swap_header->info.version); 2989 swab32s(&swap_header->info.last_page); 2990 swab32s(&swap_header->info.nr_badpages); 2991 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 2992 return 0; 2993 for (i = 0; i < swap_header->info.nr_badpages; i++) 2994 swab32s(&swap_header->info.badpages[i]); 2995 } 2996 /* Check the swap header's sub-version */ 2997 if (swap_header->info.version != 1) { 2998 pr_warn("Unable to handle swap header version %d\n", 2999 swap_header->info.version); 3000 return 0; 3001 } 3002 3003 p->lowest_bit = 1; 3004 p->cluster_next = 1; 3005 p->cluster_nr = 0; 3006 3007 maxpages = max_swapfile_size(); 3008 last_page = swap_header->info.last_page; 3009 if (!last_page) { 3010 pr_warn("Empty swap-file\n"); 3011 return 0; 3012 } 3013 if (last_page > maxpages) { 3014 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", 3015 maxpages << (PAGE_SHIFT - 10), 3016 last_page << (PAGE_SHIFT - 10)); 3017 } 3018 if (maxpages > last_page) { 3019 maxpages = last_page + 1; 3020 /* p->max is an unsigned int: don't overflow it */ 3021 if ((unsigned int)maxpages == 0) 3022 maxpages = UINT_MAX; 3023 } 3024 p->highest_bit = maxpages - 1; 3025 3026 if (!maxpages) 3027 return 0; 3028 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 3029 if (swapfilepages && maxpages > swapfilepages) { 3030 pr_warn("Swap area shorter than signature indicates\n"); 3031 return 0; 3032 } 3033 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 3034 return 0; 3035 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 3036 return 0; 3037 3038 return maxpages; 3039 } 3040 3041 #define SWAP_CLUSTER_INFO_COLS \ 3042 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) 3043 #define SWAP_CLUSTER_SPACE_COLS \ 3044 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) 3045 #define SWAP_CLUSTER_COLS \ 3046 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) 3047 3048 static int setup_swap_map_and_extents(struct swap_info_struct *p, 3049 union swap_header *swap_header, 3050 unsigned char *swap_map, 3051 struct swap_cluster_info *cluster_info, 3052 unsigned long maxpages, 3053 sector_t *span) 3054 { 3055 unsigned int j, k; 3056 unsigned int nr_good_pages; 3057 int nr_extents; 3058 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3059 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; 3060 unsigned long i, idx; 3061 3062 nr_good_pages = maxpages - 1; /* omit header page */ 3063 3064 cluster_list_init(&p->free_clusters); 3065 cluster_list_init(&p->discard_clusters); 3066 3067 for (i = 0; i < swap_header->info.nr_badpages; i++) { 3068 unsigned int page_nr = swap_header->info.badpages[i]; 3069 if (page_nr == 0 || page_nr > swap_header->info.last_page) 3070 return -EINVAL; 3071 if (page_nr < maxpages) { 3072 swap_map[page_nr] = SWAP_MAP_BAD; 3073 nr_good_pages--; 3074 /* 3075 * Haven't marked the cluster free yet, no list 3076 * operation involved 3077 */ 3078 inc_cluster_info_page(p, cluster_info, page_nr); 3079 } 3080 } 3081 3082 /* Haven't marked the cluster free yet, no list operation involved */ 3083 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) 3084 inc_cluster_info_page(p, cluster_info, i); 3085 3086 if (nr_good_pages) { 3087 swap_map[0] = SWAP_MAP_BAD; 3088 /* 3089 * Not mark the cluster free yet, no list 3090 * operation involved 3091 */ 3092 inc_cluster_info_page(p, cluster_info, 0); 3093 p->max = maxpages; 3094 p->pages = nr_good_pages; 3095 nr_extents = setup_swap_extents(p, span); 3096 if (nr_extents < 0) 3097 return nr_extents; 3098 nr_good_pages = p->pages; 3099 } 3100 if (!nr_good_pages) { 3101 pr_warn("Empty swap-file\n"); 3102 return -EINVAL; 3103 } 3104 3105 if (!cluster_info) 3106 return nr_extents; 3107 3108 3109 /* 3110 * Reduce false cache line sharing between cluster_info and 3111 * sharing same address space. 3112 */ 3113 for (k = 0; k < SWAP_CLUSTER_COLS; k++) { 3114 j = (k + col) % SWAP_CLUSTER_COLS; 3115 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { 3116 idx = i * SWAP_CLUSTER_COLS + j; 3117 if (idx >= nr_clusters) 3118 continue; 3119 if (cluster_count(&cluster_info[idx])) 3120 continue; 3121 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 3122 cluster_list_add_tail(&p->free_clusters, cluster_info, 3123 idx); 3124 } 3125 } 3126 return nr_extents; 3127 } 3128 3129 /* 3130 * Helper to sys_swapon determining if a given swap 3131 * backing device queue supports DISCARD operations. 3132 */ 3133 static bool swap_discardable(struct swap_info_struct *si) 3134 { 3135 struct request_queue *q = bdev_get_queue(si->bdev); 3136 3137 if (!q || !blk_queue_discard(q)) 3138 return false; 3139 3140 return true; 3141 } 3142 3143 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 3144 { 3145 struct swap_info_struct *p; 3146 struct filename *name; 3147 struct file *swap_file = NULL; 3148 struct address_space *mapping; 3149 int prio; 3150 int error; 3151 union swap_header *swap_header; 3152 int nr_extents; 3153 sector_t span; 3154 unsigned long maxpages; 3155 unsigned char *swap_map = NULL; 3156 struct swap_cluster_info *cluster_info = NULL; 3157 unsigned long *frontswap_map = NULL; 3158 struct page *page = NULL; 3159 struct inode *inode = NULL; 3160 bool inced_nr_rotate_swap = false; 3161 3162 if (swap_flags & ~SWAP_FLAGS_VALID) 3163 return -EINVAL; 3164 3165 if (!capable(CAP_SYS_ADMIN)) 3166 return -EPERM; 3167 3168 if (!swap_avail_heads) 3169 return -ENOMEM; 3170 3171 p = alloc_swap_info(); 3172 if (IS_ERR(p)) 3173 return PTR_ERR(p); 3174 3175 INIT_WORK(&p->discard_work, swap_discard_work); 3176 3177 name = getname(specialfile); 3178 if (IS_ERR(name)) { 3179 error = PTR_ERR(name); 3180 name = NULL; 3181 goto bad_swap; 3182 } 3183 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); 3184 if (IS_ERR(swap_file)) { 3185 error = PTR_ERR(swap_file); 3186 swap_file = NULL; 3187 goto bad_swap; 3188 } 3189 3190 p->swap_file = swap_file; 3191 mapping = swap_file->f_mapping; 3192 inode = mapping->host; 3193 3194 error = claim_swapfile(p, inode); 3195 if (unlikely(error)) 3196 goto bad_swap; 3197 3198 inode_lock(inode); 3199 if (IS_SWAPFILE(inode)) { 3200 error = -EBUSY; 3201 goto bad_swap_unlock_inode; 3202 } 3203 3204 /* 3205 * Read the swap header. 3206 */ 3207 if (!mapping->a_ops->readpage) { 3208 error = -EINVAL; 3209 goto bad_swap_unlock_inode; 3210 } 3211 page = read_mapping_page(mapping, 0, swap_file); 3212 if (IS_ERR(page)) { 3213 error = PTR_ERR(page); 3214 goto bad_swap_unlock_inode; 3215 } 3216 swap_header = kmap(page); 3217 3218 maxpages = read_swap_header(p, swap_header, inode); 3219 if (unlikely(!maxpages)) { 3220 error = -EINVAL; 3221 goto bad_swap_unlock_inode; 3222 } 3223 3224 /* OK, set up the swap map and apply the bad block list */ 3225 swap_map = vzalloc(maxpages); 3226 if (!swap_map) { 3227 error = -ENOMEM; 3228 goto bad_swap_unlock_inode; 3229 } 3230 3231 if (bdi_cap_stable_pages_required(inode_to_bdi(inode))) 3232 p->flags |= SWP_STABLE_WRITES; 3233 3234 if (bdi_cap_synchronous_io(inode_to_bdi(inode))) 3235 p->flags |= SWP_SYNCHRONOUS_IO; 3236 3237 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { 3238 int cpu; 3239 unsigned long ci, nr_cluster; 3240 3241 p->flags |= SWP_SOLIDSTATE; 3242 p->cluster_next_cpu = alloc_percpu(unsigned int); 3243 if (!p->cluster_next_cpu) { 3244 error = -ENOMEM; 3245 goto bad_swap_unlock_inode; 3246 } 3247 /* 3248 * select a random position to start with to help wear leveling 3249 * SSD 3250 */ 3251 for_each_possible_cpu(cpu) { 3252 per_cpu(*p->cluster_next_cpu, cpu) = 3253 1 + prandom_u32_max(p->highest_bit); 3254 } 3255 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3256 3257 cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info), 3258 GFP_KERNEL); 3259 if (!cluster_info) { 3260 error = -ENOMEM; 3261 goto bad_swap_unlock_inode; 3262 } 3263 3264 for (ci = 0; ci < nr_cluster; ci++) 3265 spin_lock_init(&((cluster_info + ci)->lock)); 3266 3267 p->percpu_cluster = alloc_percpu(struct percpu_cluster); 3268 if (!p->percpu_cluster) { 3269 error = -ENOMEM; 3270 goto bad_swap_unlock_inode; 3271 } 3272 for_each_possible_cpu(cpu) { 3273 struct percpu_cluster *cluster; 3274 cluster = per_cpu_ptr(p->percpu_cluster, cpu); 3275 cluster_set_null(&cluster->index); 3276 } 3277 } else { 3278 atomic_inc(&nr_rotate_swap); 3279 inced_nr_rotate_swap = true; 3280 } 3281 3282 error = swap_cgroup_swapon(p->type, maxpages); 3283 if (error) 3284 goto bad_swap_unlock_inode; 3285 3286 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 3287 cluster_info, maxpages, &span); 3288 if (unlikely(nr_extents < 0)) { 3289 error = nr_extents; 3290 goto bad_swap_unlock_inode; 3291 } 3292 /* frontswap enabled? set up bit-per-page map for frontswap */ 3293 if (IS_ENABLED(CONFIG_FRONTSWAP)) 3294 frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages), 3295 sizeof(long), 3296 GFP_KERNEL); 3297 3298 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 3299 /* 3300 * When discard is enabled for swap with no particular 3301 * policy flagged, we set all swap discard flags here in 3302 * order to sustain backward compatibility with older 3303 * swapon(8) releases. 3304 */ 3305 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | 3306 SWP_PAGE_DISCARD); 3307 3308 /* 3309 * By flagging sys_swapon, a sysadmin can tell us to 3310 * either do single-time area discards only, or to just 3311 * perform discards for released swap page-clusters. 3312 * Now it's time to adjust the p->flags accordingly. 3313 */ 3314 if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 3315 p->flags &= ~SWP_PAGE_DISCARD; 3316 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 3317 p->flags &= ~SWP_AREA_DISCARD; 3318 3319 /* issue a swapon-time discard if it's still required */ 3320 if (p->flags & SWP_AREA_DISCARD) { 3321 int err = discard_swap(p); 3322 if (unlikely(err)) 3323 pr_err("swapon: discard_swap(%p): %d\n", 3324 p, err); 3325 } 3326 } 3327 3328 error = init_swap_address_space(p->type, maxpages); 3329 if (error) 3330 goto bad_swap_unlock_inode; 3331 3332 /* 3333 * Flush any pending IO and dirty mappings before we start using this 3334 * swap device. 3335 */ 3336 inode->i_flags |= S_SWAPFILE; 3337 error = inode_drain_writes(inode); 3338 if (error) { 3339 inode->i_flags &= ~S_SWAPFILE; 3340 goto bad_swap_unlock_inode; 3341 } 3342 3343 mutex_lock(&swapon_mutex); 3344 prio = -1; 3345 if (swap_flags & SWAP_FLAG_PREFER) 3346 prio = 3347 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 3348 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); 3349 3350 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", 3351 p->pages<<(PAGE_SHIFT-10), name->name, p->prio, 3352 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 3353 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 3354 (p->flags & SWP_DISCARDABLE) ? "D" : "", 3355 (p->flags & SWP_AREA_DISCARD) ? "s" : "", 3356 (p->flags & SWP_PAGE_DISCARD) ? "c" : "", 3357 (frontswap_map) ? "FS" : ""); 3358 3359 mutex_unlock(&swapon_mutex); 3360 atomic_inc(&proc_poll_event); 3361 wake_up_interruptible(&proc_poll_wait); 3362 3363 error = 0; 3364 goto out; 3365 bad_swap_unlock_inode: 3366 inode_unlock(inode); 3367 bad_swap: 3368 free_percpu(p->percpu_cluster); 3369 p->percpu_cluster = NULL; 3370 free_percpu(p->cluster_next_cpu); 3371 p->cluster_next_cpu = NULL; 3372 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { 3373 set_blocksize(p->bdev, p->old_block_size); 3374 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 3375 } 3376 inode = NULL; 3377 destroy_swap_extents(p); 3378 swap_cgroup_swapoff(p->type); 3379 spin_lock(&swap_lock); 3380 p->swap_file = NULL; 3381 p->flags = 0; 3382 spin_unlock(&swap_lock); 3383 vfree(swap_map); 3384 kvfree(cluster_info); 3385 kvfree(frontswap_map); 3386 if (inced_nr_rotate_swap) 3387 atomic_dec(&nr_rotate_swap); 3388 if (swap_file) 3389 filp_close(swap_file, NULL); 3390 out: 3391 if (page && !IS_ERR(page)) { 3392 kunmap(page); 3393 put_page(page); 3394 } 3395 if (name) 3396 putname(name); 3397 if (inode) 3398 inode_unlock(inode); 3399 if (!error) 3400 enable_swap_slots_cache(); 3401 return error; 3402 } 3403 3404 void si_swapinfo(struct sysinfo *val) 3405 { 3406 unsigned int type; 3407 unsigned long nr_to_be_unused = 0; 3408 3409 spin_lock(&swap_lock); 3410 for (type = 0; type < nr_swapfiles; type++) { 3411 struct swap_info_struct *si = swap_info[type]; 3412 3413 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 3414 nr_to_be_unused += si->inuse_pages; 3415 } 3416 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; 3417 val->totalswap = total_swap_pages + nr_to_be_unused; 3418 spin_unlock(&swap_lock); 3419 } 3420 3421 /* 3422 * Verify that a swap entry is valid and increment its swap map count. 3423 * 3424 * Returns error code in following case. 3425 * - success -> 0 3426 * - swp_entry is invalid -> EINVAL 3427 * - swp_entry is migration entry -> EINVAL 3428 * - swap-cache reference is requested but there is already one. -> EEXIST 3429 * - swap-cache reference is requested but the entry is not used. -> ENOENT 3430 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM 3431 */ 3432 static int __swap_duplicate(swp_entry_t entry, unsigned char usage) 3433 { 3434 struct swap_info_struct *p; 3435 struct swap_cluster_info *ci; 3436 unsigned long offset; 3437 unsigned char count; 3438 unsigned char has_cache; 3439 int err = -EINVAL; 3440 3441 p = get_swap_device(entry); 3442 if (!p) 3443 goto out; 3444 3445 offset = swp_offset(entry); 3446 ci = lock_cluster_or_swap_info(p, offset); 3447 3448 count = p->swap_map[offset]; 3449 3450 /* 3451 * swapin_readahead() doesn't check if a swap entry is valid, so the 3452 * swap entry could be SWAP_MAP_BAD. Check here with lock held. 3453 */ 3454 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { 3455 err = -ENOENT; 3456 goto unlock_out; 3457 } 3458 3459 has_cache = count & SWAP_HAS_CACHE; 3460 count &= ~SWAP_HAS_CACHE; 3461 err = 0; 3462 3463 if (usage == SWAP_HAS_CACHE) { 3464 3465 /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 3466 if (!has_cache && count) 3467 has_cache = SWAP_HAS_CACHE; 3468 else if (has_cache) /* someone else added cache */ 3469 err = -EEXIST; 3470 else /* no users remaining */ 3471 err = -ENOENT; 3472 3473 } else if (count || has_cache) { 3474 3475 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) 3476 count += usage; 3477 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) 3478 err = -EINVAL; 3479 else if (swap_count_continued(p, offset, count)) 3480 count = COUNT_CONTINUED; 3481 else 3482 err = -ENOMEM; 3483 } else 3484 err = -ENOENT; /* unused swap entry */ 3485 3486 p->swap_map[offset] = count | has_cache; 3487 3488 unlock_out: 3489 unlock_cluster_or_swap_info(p, ci); 3490 out: 3491 if (p) 3492 put_swap_device(p); 3493 return err; 3494 } 3495 3496 /* 3497 * Help swapoff by noting that swap entry belongs to shmem/tmpfs 3498 * (in which case its reference count is never incremented). 3499 */ 3500 void swap_shmem_alloc(swp_entry_t entry) 3501 { 3502 __swap_duplicate(entry, SWAP_MAP_SHMEM); 3503 } 3504 3505 /* 3506 * Increase reference count of swap entry by 1. 3507 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required 3508 * but could not be atomically allocated. Returns 0, just as if it succeeded, 3509 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which 3510 * might occur if a page table entry has got corrupted. 3511 */ 3512 int swap_duplicate(swp_entry_t entry) 3513 { 3514 int err = 0; 3515 3516 while (!err && __swap_duplicate(entry, 1) == -ENOMEM) 3517 err = add_swap_count_continuation(entry, GFP_ATOMIC); 3518 return err; 3519 } 3520 3521 /* 3522 * @entry: swap entry for which we allocate swap cache. 3523 * 3524 * Called when allocating swap cache for existing swap entry, 3525 * This can return error codes. Returns 0 at success. 3526 * -EEXIST means there is a swap cache. 3527 * Note: return code is different from swap_duplicate(). 3528 */ 3529 int swapcache_prepare(swp_entry_t entry) 3530 { 3531 return __swap_duplicate(entry, SWAP_HAS_CACHE); 3532 } 3533 3534 struct swap_info_struct *swp_swap_info(swp_entry_t entry) 3535 { 3536 return swap_type_to_swap_info(swp_type(entry)); 3537 } 3538 3539 struct swap_info_struct *page_swap_info(struct page *page) 3540 { 3541 swp_entry_t entry = { .val = page_private(page) }; 3542 return swp_swap_info(entry); 3543 } 3544 3545 /* 3546 * out-of-line __page_file_ methods to avoid include hell. 3547 */ 3548 struct address_space *__page_file_mapping(struct page *page) 3549 { 3550 return page_swap_info(page)->swap_file->f_mapping; 3551 } 3552 EXPORT_SYMBOL_GPL(__page_file_mapping); 3553 3554 pgoff_t __page_file_index(struct page *page) 3555 { 3556 swp_entry_t swap = { .val = page_private(page) }; 3557 return swp_offset(swap); 3558 } 3559 EXPORT_SYMBOL_GPL(__page_file_index); 3560 3561 /* 3562 * add_swap_count_continuation - called when a swap count is duplicated 3563 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 3564 * page of the original vmalloc'ed swap_map, to hold the continuation count 3565 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called 3566 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. 3567 * 3568 * These continuation pages are seldom referenced: the common paths all work 3569 * on the original swap_map, only referring to a continuation page when the 3570 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. 3571 * 3572 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding 3573 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) 3574 * can be called after dropping locks. 3575 */ 3576 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) 3577 { 3578 struct swap_info_struct *si; 3579 struct swap_cluster_info *ci; 3580 struct page *head; 3581 struct page *page; 3582 struct page *list_page; 3583 pgoff_t offset; 3584 unsigned char count; 3585 int ret = 0; 3586 3587 /* 3588 * When debugging, it's easier to use __GFP_ZERO here; but it's better 3589 * for latency not to zero a page while GFP_ATOMIC and holding locks. 3590 */ 3591 page = alloc_page(gfp_mask | __GFP_HIGHMEM); 3592 3593 si = get_swap_device(entry); 3594 if (!si) { 3595 /* 3596 * An acceptable race has occurred since the failing 3597 * __swap_duplicate(): the swap device may be swapoff 3598 */ 3599 goto outer; 3600 } 3601 spin_lock(&si->lock); 3602 3603 offset = swp_offset(entry); 3604 3605 ci = lock_cluster(si, offset); 3606 3607 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; 3608 3609 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { 3610 /* 3611 * The higher the swap count, the more likely it is that tasks 3612 * will race to add swap count continuation: we need to avoid 3613 * over-provisioning. 3614 */ 3615 goto out; 3616 } 3617 3618 if (!page) { 3619 ret = -ENOMEM; 3620 goto out; 3621 } 3622 3623 /* 3624 * We are fortunate that although vmalloc_to_page uses pte_offset_map, 3625 * no architecture is using highmem pages for kernel page tables: so it 3626 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps. 3627 */ 3628 head = vmalloc_to_page(si->swap_map + offset); 3629 offset &= ~PAGE_MASK; 3630 3631 spin_lock(&si->cont_lock); 3632 /* 3633 * Page allocation does not initialize the page's lru field, 3634 * but it does always reset its private field. 3635 */ 3636 if (!page_private(head)) { 3637 BUG_ON(count & COUNT_CONTINUED); 3638 INIT_LIST_HEAD(&head->lru); 3639 set_page_private(head, SWP_CONTINUED); 3640 si->flags |= SWP_CONTINUED; 3641 } 3642 3643 list_for_each_entry(list_page, &head->lru, lru) { 3644 unsigned char *map; 3645 3646 /* 3647 * If the previous map said no continuation, but we've found 3648 * a continuation page, free our allocation and use this one. 3649 */ 3650 if (!(count & COUNT_CONTINUED)) 3651 goto out_unlock_cont; 3652 3653 map = kmap_atomic(list_page) + offset; 3654 count = *map; 3655 kunmap_atomic(map); 3656 3657 /* 3658 * If this continuation count now has some space in it, 3659 * free our allocation and use this one. 3660 */ 3661 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) 3662 goto out_unlock_cont; 3663 } 3664 3665 list_add_tail(&page->lru, &head->lru); 3666 page = NULL; /* now it's attached, don't free it */ 3667 out_unlock_cont: 3668 spin_unlock(&si->cont_lock); 3669 out: 3670 unlock_cluster(ci); 3671 spin_unlock(&si->lock); 3672 put_swap_device(si); 3673 outer: 3674 if (page) 3675 __free_page(page); 3676 return ret; 3677 } 3678 3679 /* 3680 * swap_count_continued - when the original swap_map count is incremented 3681 * from SWAP_MAP_MAX, check if there is already a continuation page to carry 3682 * into, carry if so, or else fail until a new continuation page is allocated; 3683 * when the original swap_map count is decremented from 0 with continuation, 3684 * borrow from the continuation and report whether it still holds more. 3685 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster 3686 * lock. 3687 */ 3688 static bool swap_count_continued(struct swap_info_struct *si, 3689 pgoff_t offset, unsigned char count) 3690 { 3691 struct page *head; 3692 struct page *page; 3693 unsigned char *map; 3694 bool ret; 3695 3696 head = vmalloc_to_page(si->swap_map + offset); 3697 if (page_private(head) != SWP_CONTINUED) { 3698 BUG_ON(count & COUNT_CONTINUED); 3699 return false; /* need to add count continuation */ 3700 } 3701 3702 spin_lock(&si->cont_lock); 3703 offset &= ~PAGE_MASK; 3704 page = list_next_entry(head, lru); 3705 map = kmap_atomic(page) + offset; 3706 3707 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 3708 goto init_map; /* jump over SWAP_CONT_MAX checks */ 3709 3710 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ 3711 /* 3712 * Think of how you add 1 to 999 3713 */ 3714 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 3715 kunmap_atomic(map); 3716 page = list_next_entry(page, lru); 3717 BUG_ON(page == head); 3718 map = kmap_atomic(page) + offset; 3719 } 3720 if (*map == SWAP_CONT_MAX) { 3721 kunmap_atomic(map); 3722 page = list_next_entry(page, lru); 3723 if (page == head) { 3724 ret = false; /* add count continuation */ 3725 goto out; 3726 } 3727 map = kmap_atomic(page) + offset; 3728 init_map: *map = 0; /* we didn't zero the page */ 3729 } 3730 *map += 1; 3731 kunmap_atomic(map); 3732 while ((page = list_prev_entry(page, lru)) != head) { 3733 map = kmap_atomic(page) + offset; 3734 *map = COUNT_CONTINUED; 3735 kunmap_atomic(map); 3736 } 3737 ret = true; /* incremented */ 3738 3739 } else { /* decrementing */ 3740 /* 3741 * Think of how you subtract 1 from 1000 3742 */ 3743 BUG_ON(count != COUNT_CONTINUED); 3744 while (*map == COUNT_CONTINUED) { 3745 kunmap_atomic(map); 3746 page = list_next_entry(page, lru); 3747 BUG_ON(page == head); 3748 map = kmap_atomic(page) + offset; 3749 } 3750 BUG_ON(*map == 0); 3751 *map -= 1; 3752 if (*map == 0) 3753 count = 0; 3754 kunmap_atomic(map); 3755 while ((page = list_prev_entry(page, lru)) != head) { 3756 map = kmap_atomic(page) + offset; 3757 *map = SWAP_CONT_MAX | count; 3758 count = COUNT_CONTINUED; 3759 kunmap_atomic(map); 3760 } 3761 ret = count == COUNT_CONTINUED; 3762 } 3763 out: 3764 spin_unlock(&si->cont_lock); 3765 return ret; 3766 } 3767 3768 /* 3769 * free_swap_count_continuations - swapoff free all the continuation pages 3770 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. 3771 */ 3772 static void free_swap_count_continuations(struct swap_info_struct *si) 3773 { 3774 pgoff_t offset; 3775 3776 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { 3777 struct page *head; 3778 head = vmalloc_to_page(si->swap_map + offset); 3779 if (page_private(head)) { 3780 struct page *page, *next; 3781 3782 list_for_each_entry_safe(page, next, &head->lru, lru) { 3783 list_del(&page->lru); 3784 __free_page(page); 3785 } 3786 } 3787 } 3788 } 3789 3790 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 3791 void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) 3792 { 3793 struct swap_info_struct *si, *next; 3794 int nid = page_to_nid(page); 3795 3796 if (!(gfp_mask & __GFP_IO)) 3797 return; 3798 3799 if (!blk_cgroup_congested()) 3800 return; 3801 3802 /* 3803 * We've already scheduled a throttle, avoid taking the global swap 3804 * lock. 3805 */ 3806 if (current->throttle_queue) 3807 return; 3808 3809 spin_lock(&swap_avail_lock); 3810 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], 3811 avail_lists[nid]) { 3812 if (si->bdev) { 3813 blkcg_schedule_throttle(bdev_get_queue(si->bdev), true); 3814 break; 3815 } 3816 } 3817 spin_unlock(&swap_avail_lock); 3818 } 3819 #endif 3820 3821 static int __init swapfile_init(void) 3822 { 3823 int nid; 3824 3825 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head), 3826 GFP_KERNEL); 3827 if (!swap_avail_heads) { 3828 pr_emerg("Not enough memory for swap heads, swap is disabled\n"); 3829 return -ENOMEM; 3830 } 3831 3832 for_each_node(nid) 3833 plist_head_init(&swap_avail_heads[nid]); 3834 3835 return 0; 3836 } 3837 subsys_initcall(swapfile_init); 3838