1 /* 2 * linux/mm/swapfile.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/hugetlb.h> 10 #include <linux/mman.h> 11 #include <linux/slab.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/vmalloc.h> 15 #include <linux/pagemap.h> 16 #include <linux/namei.h> 17 #include <linux/shmem_fs.h> 18 #include <linux/blkdev.h> 19 #include <linux/random.h> 20 #include <linux/writeback.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/init.h> 24 #include <linux/ksm.h> 25 #include <linux/rmap.h> 26 #include <linux/security.h> 27 #include <linux/backing-dev.h> 28 #include <linux/mutex.h> 29 #include <linux/capability.h> 30 #include <linux/syscalls.h> 31 #include <linux/memcontrol.h> 32 #include <linux/poll.h> 33 #include <linux/oom.h> 34 #include <linux/frontswap.h> 35 #include <linux/swapfile.h> 36 #include <linux/export.h> 37 38 #include <asm/pgtable.h> 39 #include <asm/tlbflush.h> 40 #include <linux/swapops.h> 41 #include <linux/swap_cgroup.h> 42 43 static bool swap_count_continued(struct swap_info_struct *, pgoff_t, 44 unsigned char); 45 static void free_swap_count_continuations(struct swap_info_struct *); 46 static sector_t map_swap_entry(swp_entry_t, struct block_device**); 47 48 DEFINE_SPINLOCK(swap_lock); 49 static unsigned int nr_swapfiles; 50 atomic_long_t nr_swap_pages; 51 /* 52 * Some modules use swappable objects and may try to swap them out under 53 * memory pressure (via the shrinker). Before doing so, they may wish to 54 * check to see if any swap space is available. 55 */ 56 EXPORT_SYMBOL_GPL(nr_swap_pages); 57 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ 58 long total_swap_pages; 59 static int least_priority; 60 61 static const char Bad_file[] = "Bad swap file entry "; 62 static const char Unused_file[] = "Unused swap file entry "; 63 static const char Bad_offset[] = "Bad swap offset entry "; 64 static const char Unused_offset[] = "Unused swap offset entry "; 65 66 /* 67 * all active swap_info_structs 68 * protected with swap_lock, and ordered by priority. 69 */ 70 PLIST_HEAD(swap_active_head); 71 72 /* 73 * all available (active, not full) swap_info_structs 74 * protected with swap_avail_lock, ordered by priority. 75 * This is used by get_swap_page() instead of swap_active_head 76 * because swap_active_head includes all swap_info_structs, 77 * but get_swap_page() doesn't need to look at full ones. 78 * This uses its own lock instead of swap_lock because when a 79 * swap_info_struct changes between not-full/full, it needs to 80 * add/remove itself to/from this list, but the swap_info_struct->lock 81 * is held and the locking order requires swap_lock to be taken 82 * before any swap_info_struct->lock. 83 */ 84 static PLIST_HEAD(swap_avail_head); 85 static DEFINE_SPINLOCK(swap_avail_lock); 86 87 struct swap_info_struct *swap_info[MAX_SWAPFILES]; 88 89 static DEFINE_MUTEX(swapon_mutex); 90 91 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 92 /* Activity counter to indicate that a swapon or swapoff has occurred */ 93 static atomic_t proc_poll_event = ATOMIC_INIT(0); 94 95 static inline unsigned char swap_count(unsigned char ent) 96 { 97 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ 98 } 99 100 /* returns 1 if swap entry is freed */ 101 static int 102 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) 103 { 104 swp_entry_t entry = swp_entry(si->type, offset); 105 struct page *page; 106 int ret = 0; 107 108 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 109 if (!page) 110 return 0; 111 /* 112 * This function is called from scan_swap_map() and it's called 113 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. 114 * We have to use trylock for avoiding deadlock. This is a special 115 * case and you should use try_to_free_swap() with explicit lock_page() 116 * in usual operations. 117 */ 118 if (trylock_page(page)) { 119 ret = try_to_free_swap(page); 120 unlock_page(page); 121 } 122 put_page(page); 123 return ret; 124 } 125 126 /* 127 * swapon tell device that all the old swap contents can be discarded, 128 * to allow the swap device to optimize its wear-levelling. 129 */ 130 static int discard_swap(struct swap_info_struct *si) 131 { 132 struct swap_extent *se; 133 sector_t start_block; 134 sector_t nr_blocks; 135 int err = 0; 136 137 /* Do not discard the swap header page! */ 138 se = &si->first_swap_extent; 139 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 140 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 141 if (nr_blocks) { 142 err = blkdev_issue_discard(si->bdev, start_block, 143 nr_blocks, GFP_KERNEL, 0); 144 if (err) 145 return err; 146 cond_resched(); 147 } 148 149 list_for_each_entry(se, &si->first_swap_extent.list, list) { 150 start_block = se->start_block << (PAGE_SHIFT - 9); 151 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 152 153 err = blkdev_issue_discard(si->bdev, start_block, 154 nr_blocks, GFP_KERNEL, 0); 155 if (err) 156 break; 157 158 cond_resched(); 159 } 160 return err; /* That will often be -EOPNOTSUPP */ 161 } 162 163 /* 164 * swap allocation tell device that a cluster of swap can now be discarded, 165 * to allow the swap device to optimize its wear-levelling. 166 */ 167 static void discard_swap_cluster(struct swap_info_struct *si, 168 pgoff_t start_page, pgoff_t nr_pages) 169 { 170 struct swap_extent *se = si->curr_swap_extent; 171 int found_extent = 0; 172 173 while (nr_pages) { 174 if (se->start_page <= start_page && 175 start_page < se->start_page + se->nr_pages) { 176 pgoff_t offset = start_page - se->start_page; 177 sector_t start_block = se->start_block + offset; 178 sector_t nr_blocks = se->nr_pages - offset; 179 180 if (nr_blocks > nr_pages) 181 nr_blocks = nr_pages; 182 start_page += nr_blocks; 183 nr_pages -= nr_blocks; 184 185 if (!found_extent++) 186 si->curr_swap_extent = se; 187 188 start_block <<= PAGE_SHIFT - 9; 189 nr_blocks <<= PAGE_SHIFT - 9; 190 if (blkdev_issue_discard(si->bdev, start_block, 191 nr_blocks, GFP_NOIO, 0)) 192 break; 193 } 194 195 se = list_next_entry(se, list); 196 } 197 } 198 199 #define SWAPFILE_CLUSTER 256 200 #define LATENCY_LIMIT 256 201 202 static inline void cluster_set_flag(struct swap_cluster_info *info, 203 unsigned int flag) 204 { 205 info->flags = flag; 206 } 207 208 static inline unsigned int cluster_count(struct swap_cluster_info *info) 209 { 210 return info->data; 211 } 212 213 static inline void cluster_set_count(struct swap_cluster_info *info, 214 unsigned int c) 215 { 216 info->data = c; 217 } 218 219 static inline void cluster_set_count_flag(struct swap_cluster_info *info, 220 unsigned int c, unsigned int f) 221 { 222 info->flags = f; 223 info->data = c; 224 } 225 226 static inline unsigned int cluster_next(struct swap_cluster_info *info) 227 { 228 return info->data; 229 } 230 231 static inline void cluster_set_next(struct swap_cluster_info *info, 232 unsigned int n) 233 { 234 info->data = n; 235 } 236 237 static inline void cluster_set_next_flag(struct swap_cluster_info *info, 238 unsigned int n, unsigned int f) 239 { 240 info->flags = f; 241 info->data = n; 242 } 243 244 static inline bool cluster_is_free(struct swap_cluster_info *info) 245 { 246 return info->flags & CLUSTER_FLAG_FREE; 247 } 248 249 static inline bool cluster_is_null(struct swap_cluster_info *info) 250 { 251 return info->flags & CLUSTER_FLAG_NEXT_NULL; 252 } 253 254 static inline void cluster_set_null(struct swap_cluster_info *info) 255 { 256 info->flags = CLUSTER_FLAG_NEXT_NULL; 257 info->data = 0; 258 } 259 260 static inline bool cluster_list_empty(struct swap_cluster_list *list) 261 { 262 return cluster_is_null(&list->head); 263 } 264 265 static inline unsigned int cluster_list_first(struct swap_cluster_list *list) 266 { 267 return cluster_next(&list->head); 268 } 269 270 static void cluster_list_init(struct swap_cluster_list *list) 271 { 272 cluster_set_null(&list->head); 273 cluster_set_null(&list->tail); 274 } 275 276 static void cluster_list_add_tail(struct swap_cluster_list *list, 277 struct swap_cluster_info *ci, 278 unsigned int idx) 279 { 280 if (cluster_list_empty(list)) { 281 cluster_set_next_flag(&list->head, idx, 0); 282 cluster_set_next_flag(&list->tail, idx, 0); 283 } else { 284 unsigned int tail = cluster_next(&list->tail); 285 286 cluster_set_next(&ci[tail], idx); 287 cluster_set_next_flag(&list->tail, idx, 0); 288 } 289 } 290 291 static unsigned int cluster_list_del_first(struct swap_cluster_list *list, 292 struct swap_cluster_info *ci) 293 { 294 unsigned int idx; 295 296 idx = cluster_next(&list->head); 297 if (cluster_next(&list->tail) == idx) { 298 cluster_set_null(&list->head); 299 cluster_set_null(&list->tail); 300 } else 301 cluster_set_next_flag(&list->head, 302 cluster_next(&ci[idx]), 0); 303 304 return idx; 305 } 306 307 /* Add a cluster to discard list and schedule it to do discard */ 308 static void swap_cluster_schedule_discard(struct swap_info_struct *si, 309 unsigned int idx) 310 { 311 /* 312 * If scan_swap_map() can't find a free cluster, it will check 313 * si->swap_map directly. To make sure the discarding cluster isn't 314 * taken by scan_swap_map(), mark the swap entries bad (occupied). It 315 * will be cleared after discard 316 */ 317 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 318 SWAP_MAP_BAD, SWAPFILE_CLUSTER); 319 320 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); 321 322 schedule_work(&si->discard_work); 323 } 324 325 /* 326 * Doing discard actually. After a cluster discard is finished, the cluster 327 * will be added to free cluster list. caller should hold si->lock. 328 */ 329 static void swap_do_scheduled_discard(struct swap_info_struct *si) 330 { 331 struct swap_cluster_info *info; 332 unsigned int idx; 333 334 info = si->cluster_info; 335 336 while (!cluster_list_empty(&si->discard_clusters)) { 337 idx = cluster_list_del_first(&si->discard_clusters, info); 338 spin_unlock(&si->lock); 339 340 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, 341 SWAPFILE_CLUSTER); 342 343 spin_lock(&si->lock); 344 cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE); 345 cluster_list_add_tail(&si->free_clusters, info, idx); 346 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 347 0, SWAPFILE_CLUSTER); 348 } 349 } 350 351 static void swap_discard_work(struct work_struct *work) 352 { 353 struct swap_info_struct *si; 354 355 si = container_of(work, struct swap_info_struct, discard_work); 356 357 spin_lock(&si->lock); 358 swap_do_scheduled_discard(si); 359 spin_unlock(&si->lock); 360 } 361 362 /* 363 * The cluster corresponding to page_nr will be used. The cluster will be 364 * removed from free cluster list and its usage counter will be increased. 365 */ 366 static void inc_cluster_info_page(struct swap_info_struct *p, 367 struct swap_cluster_info *cluster_info, unsigned long page_nr) 368 { 369 unsigned long idx = page_nr / SWAPFILE_CLUSTER; 370 371 if (!cluster_info) 372 return; 373 if (cluster_is_free(&cluster_info[idx])) { 374 VM_BUG_ON(cluster_list_first(&p->free_clusters) != idx); 375 cluster_list_del_first(&p->free_clusters, cluster_info); 376 cluster_set_count_flag(&cluster_info[idx], 0, 0); 377 } 378 379 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); 380 cluster_set_count(&cluster_info[idx], 381 cluster_count(&cluster_info[idx]) + 1); 382 } 383 384 /* 385 * The cluster corresponding to page_nr decreases one usage. If the usage 386 * counter becomes 0, which means no page in the cluster is in using, we can 387 * optionally discard the cluster and add it to free cluster list. 388 */ 389 static void dec_cluster_info_page(struct swap_info_struct *p, 390 struct swap_cluster_info *cluster_info, unsigned long page_nr) 391 { 392 unsigned long idx = page_nr / SWAPFILE_CLUSTER; 393 394 if (!cluster_info) 395 return; 396 397 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); 398 cluster_set_count(&cluster_info[idx], 399 cluster_count(&cluster_info[idx]) - 1); 400 401 if (cluster_count(&cluster_info[idx]) == 0) { 402 /* 403 * If the swap is discardable, prepare discard the cluster 404 * instead of free it immediately. The cluster will be freed 405 * after discard. 406 */ 407 if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == 408 (SWP_WRITEOK | SWP_PAGE_DISCARD)) { 409 swap_cluster_schedule_discard(p, idx); 410 return; 411 } 412 413 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 414 cluster_list_add_tail(&p->free_clusters, cluster_info, idx); 415 } 416 } 417 418 /* 419 * It's possible scan_swap_map() uses a free cluster in the middle of free 420 * cluster list. Avoiding such abuse to avoid list corruption. 421 */ 422 static bool 423 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, 424 unsigned long offset) 425 { 426 struct percpu_cluster *percpu_cluster; 427 bool conflict; 428 429 offset /= SWAPFILE_CLUSTER; 430 conflict = !cluster_list_empty(&si->free_clusters) && 431 offset != cluster_list_first(&si->free_clusters) && 432 cluster_is_free(&si->cluster_info[offset]); 433 434 if (!conflict) 435 return false; 436 437 percpu_cluster = this_cpu_ptr(si->percpu_cluster); 438 cluster_set_null(&percpu_cluster->index); 439 return true; 440 } 441 442 /* 443 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This 444 * might involve allocating a new cluster for current CPU too. 445 */ 446 static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, 447 unsigned long *offset, unsigned long *scan_base) 448 { 449 struct percpu_cluster *cluster; 450 bool found_free; 451 unsigned long tmp; 452 453 new_cluster: 454 cluster = this_cpu_ptr(si->percpu_cluster); 455 if (cluster_is_null(&cluster->index)) { 456 if (!cluster_list_empty(&si->free_clusters)) { 457 cluster->index = si->free_clusters.head; 458 cluster->next = cluster_next(&cluster->index) * 459 SWAPFILE_CLUSTER; 460 } else if (!cluster_list_empty(&si->discard_clusters)) { 461 /* 462 * we don't have free cluster but have some clusters in 463 * discarding, do discard now and reclaim them 464 */ 465 swap_do_scheduled_discard(si); 466 *scan_base = *offset = si->cluster_next; 467 goto new_cluster; 468 } else 469 return; 470 } 471 472 found_free = false; 473 474 /* 475 * Other CPUs can use our cluster if they can't find a free cluster, 476 * check if there is still free entry in the cluster 477 */ 478 tmp = cluster->next; 479 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * 480 SWAPFILE_CLUSTER) { 481 if (!si->swap_map[tmp]) { 482 found_free = true; 483 break; 484 } 485 tmp++; 486 } 487 if (!found_free) { 488 cluster_set_null(&cluster->index); 489 goto new_cluster; 490 } 491 cluster->next = tmp + 1; 492 *offset = tmp; 493 *scan_base = tmp; 494 } 495 496 static unsigned long scan_swap_map(struct swap_info_struct *si, 497 unsigned char usage) 498 { 499 unsigned long offset; 500 unsigned long scan_base; 501 unsigned long last_in_cluster = 0; 502 int latency_ration = LATENCY_LIMIT; 503 504 /* 505 * We try to cluster swap pages by allocating them sequentially 506 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 507 * way, however, we resort to first-free allocation, starting 508 * a new cluster. This prevents us from scattering swap pages 509 * all over the entire swap partition, so that we reduce 510 * overall disk seek times between swap pages. -- sct 511 * But we do now try to find an empty cluster. -Andrea 512 * And we let swap pages go all over an SSD partition. Hugh 513 */ 514 515 si->flags += SWP_SCANNING; 516 scan_base = offset = si->cluster_next; 517 518 /* SSD algorithm */ 519 if (si->cluster_info) { 520 scan_swap_map_try_ssd_cluster(si, &offset, &scan_base); 521 goto checks; 522 } 523 524 if (unlikely(!si->cluster_nr--)) { 525 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 526 si->cluster_nr = SWAPFILE_CLUSTER - 1; 527 goto checks; 528 } 529 530 spin_unlock(&si->lock); 531 532 /* 533 * If seek is expensive, start searching for new cluster from 534 * start of partition, to minimize the span of allocated swap. 535 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info 536 * case, just handled by scan_swap_map_try_ssd_cluster() above. 537 */ 538 scan_base = offset = si->lowest_bit; 539 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 540 541 /* Locate the first empty (unaligned) cluster */ 542 for (; last_in_cluster <= si->highest_bit; offset++) { 543 if (si->swap_map[offset]) 544 last_in_cluster = offset + SWAPFILE_CLUSTER; 545 else if (offset == last_in_cluster) { 546 spin_lock(&si->lock); 547 offset -= SWAPFILE_CLUSTER - 1; 548 si->cluster_next = offset; 549 si->cluster_nr = SWAPFILE_CLUSTER - 1; 550 goto checks; 551 } 552 if (unlikely(--latency_ration < 0)) { 553 cond_resched(); 554 latency_ration = LATENCY_LIMIT; 555 } 556 } 557 558 offset = scan_base; 559 spin_lock(&si->lock); 560 si->cluster_nr = SWAPFILE_CLUSTER - 1; 561 } 562 563 checks: 564 if (si->cluster_info) { 565 while (scan_swap_map_ssd_cluster_conflict(si, offset)) 566 scan_swap_map_try_ssd_cluster(si, &offset, &scan_base); 567 } 568 if (!(si->flags & SWP_WRITEOK)) 569 goto no_page; 570 if (!si->highest_bit) 571 goto no_page; 572 if (offset > si->highest_bit) 573 scan_base = offset = si->lowest_bit; 574 575 /* reuse swap entry of cache-only swap if not busy. */ 576 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 577 int swap_was_freed; 578 spin_unlock(&si->lock); 579 swap_was_freed = __try_to_reclaim_swap(si, offset); 580 spin_lock(&si->lock); 581 /* entry was freed successfully, try to use this again */ 582 if (swap_was_freed) 583 goto checks; 584 goto scan; /* check next one */ 585 } 586 587 if (si->swap_map[offset]) 588 goto scan; 589 590 if (offset == si->lowest_bit) 591 si->lowest_bit++; 592 if (offset == si->highest_bit) 593 si->highest_bit--; 594 si->inuse_pages++; 595 if (si->inuse_pages == si->pages) { 596 si->lowest_bit = si->max; 597 si->highest_bit = 0; 598 spin_lock(&swap_avail_lock); 599 plist_del(&si->avail_list, &swap_avail_head); 600 spin_unlock(&swap_avail_lock); 601 } 602 si->swap_map[offset] = usage; 603 inc_cluster_info_page(si, si->cluster_info, offset); 604 si->cluster_next = offset + 1; 605 si->flags -= SWP_SCANNING; 606 607 return offset; 608 609 scan: 610 spin_unlock(&si->lock); 611 while (++offset <= si->highest_bit) { 612 if (!si->swap_map[offset]) { 613 spin_lock(&si->lock); 614 goto checks; 615 } 616 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 617 spin_lock(&si->lock); 618 goto checks; 619 } 620 if (unlikely(--latency_ration < 0)) { 621 cond_resched(); 622 latency_ration = LATENCY_LIMIT; 623 } 624 } 625 offset = si->lowest_bit; 626 while (offset < scan_base) { 627 if (!si->swap_map[offset]) { 628 spin_lock(&si->lock); 629 goto checks; 630 } 631 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 632 spin_lock(&si->lock); 633 goto checks; 634 } 635 if (unlikely(--latency_ration < 0)) { 636 cond_resched(); 637 latency_ration = LATENCY_LIMIT; 638 } 639 offset++; 640 } 641 spin_lock(&si->lock); 642 643 no_page: 644 si->flags -= SWP_SCANNING; 645 return 0; 646 } 647 648 swp_entry_t get_swap_page(void) 649 { 650 struct swap_info_struct *si, *next; 651 pgoff_t offset; 652 653 if (atomic_long_read(&nr_swap_pages) <= 0) 654 goto noswap; 655 atomic_long_dec(&nr_swap_pages); 656 657 spin_lock(&swap_avail_lock); 658 659 start_over: 660 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { 661 /* requeue si to after same-priority siblings */ 662 plist_requeue(&si->avail_list, &swap_avail_head); 663 spin_unlock(&swap_avail_lock); 664 spin_lock(&si->lock); 665 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { 666 spin_lock(&swap_avail_lock); 667 if (plist_node_empty(&si->avail_list)) { 668 spin_unlock(&si->lock); 669 goto nextsi; 670 } 671 WARN(!si->highest_bit, 672 "swap_info %d in list but !highest_bit\n", 673 si->type); 674 WARN(!(si->flags & SWP_WRITEOK), 675 "swap_info %d in list but !SWP_WRITEOK\n", 676 si->type); 677 plist_del(&si->avail_list, &swap_avail_head); 678 spin_unlock(&si->lock); 679 goto nextsi; 680 } 681 682 /* This is called for allocating swap entry for cache */ 683 offset = scan_swap_map(si, SWAP_HAS_CACHE); 684 spin_unlock(&si->lock); 685 if (offset) 686 return swp_entry(si->type, offset); 687 pr_debug("scan_swap_map of si %d failed to find offset\n", 688 si->type); 689 spin_lock(&swap_avail_lock); 690 nextsi: 691 /* 692 * if we got here, it's likely that si was almost full before, 693 * and since scan_swap_map() can drop the si->lock, multiple 694 * callers probably all tried to get a page from the same si 695 * and it filled up before we could get one; or, the si filled 696 * up between us dropping swap_avail_lock and taking si->lock. 697 * Since we dropped the swap_avail_lock, the swap_avail_head 698 * list may have been modified; so if next is still in the 699 * swap_avail_head list then try it, otherwise start over. 700 */ 701 if (plist_node_empty(&next->avail_list)) 702 goto start_over; 703 } 704 705 spin_unlock(&swap_avail_lock); 706 707 atomic_long_inc(&nr_swap_pages); 708 noswap: 709 return (swp_entry_t) {0}; 710 } 711 712 /* The only caller of this function is now suspend routine */ 713 swp_entry_t get_swap_page_of_type(int type) 714 { 715 struct swap_info_struct *si; 716 pgoff_t offset; 717 718 si = swap_info[type]; 719 spin_lock(&si->lock); 720 if (si && (si->flags & SWP_WRITEOK)) { 721 atomic_long_dec(&nr_swap_pages); 722 /* This is called for allocating swap entry, not cache */ 723 offset = scan_swap_map(si, 1); 724 if (offset) { 725 spin_unlock(&si->lock); 726 return swp_entry(type, offset); 727 } 728 atomic_long_inc(&nr_swap_pages); 729 } 730 spin_unlock(&si->lock); 731 return (swp_entry_t) {0}; 732 } 733 734 static struct swap_info_struct *swap_info_get(swp_entry_t entry) 735 { 736 struct swap_info_struct *p; 737 unsigned long offset, type; 738 739 if (!entry.val) 740 goto out; 741 type = swp_type(entry); 742 if (type >= nr_swapfiles) 743 goto bad_nofile; 744 p = swap_info[type]; 745 if (!(p->flags & SWP_USED)) 746 goto bad_device; 747 offset = swp_offset(entry); 748 if (offset >= p->max) 749 goto bad_offset; 750 if (!p->swap_map[offset]) 751 goto bad_free; 752 spin_lock(&p->lock); 753 return p; 754 755 bad_free: 756 pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val); 757 goto out; 758 bad_offset: 759 pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val); 760 goto out; 761 bad_device: 762 pr_err("swap_free: %s%08lx\n", Unused_file, entry.val); 763 goto out; 764 bad_nofile: 765 pr_err("swap_free: %s%08lx\n", Bad_file, entry.val); 766 out: 767 return NULL; 768 } 769 770 static unsigned char swap_entry_free(struct swap_info_struct *p, 771 swp_entry_t entry, unsigned char usage) 772 { 773 unsigned long offset = swp_offset(entry); 774 unsigned char count; 775 unsigned char has_cache; 776 777 count = p->swap_map[offset]; 778 has_cache = count & SWAP_HAS_CACHE; 779 count &= ~SWAP_HAS_CACHE; 780 781 if (usage == SWAP_HAS_CACHE) { 782 VM_BUG_ON(!has_cache); 783 has_cache = 0; 784 } else if (count == SWAP_MAP_SHMEM) { 785 /* 786 * Or we could insist on shmem.c using a special 787 * swap_shmem_free() and free_shmem_swap_and_cache()... 788 */ 789 count = 0; 790 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 791 if (count == COUNT_CONTINUED) { 792 if (swap_count_continued(p, offset, count)) 793 count = SWAP_MAP_MAX | COUNT_CONTINUED; 794 else 795 count = SWAP_MAP_MAX; 796 } else 797 count--; 798 } 799 800 usage = count | has_cache; 801 p->swap_map[offset] = usage; 802 803 /* free if no reference */ 804 if (!usage) { 805 mem_cgroup_uncharge_swap(entry); 806 dec_cluster_info_page(p, p->cluster_info, offset); 807 if (offset < p->lowest_bit) 808 p->lowest_bit = offset; 809 if (offset > p->highest_bit) { 810 bool was_full = !p->highest_bit; 811 p->highest_bit = offset; 812 if (was_full && (p->flags & SWP_WRITEOK)) { 813 spin_lock(&swap_avail_lock); 814 WARN_ON(!plist_node_empty(&p->avail_list)); 815 if (plist_node_empty(&p->avail_list)) 816 plist_add(&p->avail_list, 817 &swap_avail_head); 818 spin_unlock(&swap_avail_lock); 819 } 820 } 821 atomic_long_inc(&nr_swap_pages); 822 p->inuse_pages--; 823 frontswap_invalidate_page(p->type, offset); 824 if (p->flags & SWP_BLKDEV) { 825 struct gendisk *disk = p->bdev->bd_disk; 826 if (disk->fops->swap_slot_free_notify) 827 disk->fops->swap_slot_free_notify(p->bdev, 828 offset); 829 } 830 } 831 832 return usage; 833 } 834 835 /* 836 * Caller has made sure that the swap device corresponding to entry 837 * is still around or has not been recycled. 838 */ 839 void swap_free(swp_entry_t entry) 840 { 841 struct swap_info_struct *p; 842 843 p = swap_info_get(entry); 844 if (p) { 845 swap_entry_free(p, entry, 1); 846 spin_unlock(&p->lock); 847 } 848 } 849 850 /* 851 * Called after dropping swapcache to decrease refcnt to swap entries. 852 */ 853 void swapcache_free(swp_entry_t entry) 854 { 855 struct swap_info_struct *p; 856 857 p = swap_info_get(entry); 858 if (p) { 859 swap_entry_free(p, entry, SWAP_HAS_CACHE); 860 spin_unlock(&p->lock); 861 } 862 } 863 864 /* 865 * How many references to page are currently swapped out? 866 * This does not give an exact answer when swap count is continued, 867 * but does include the high COUNT_CONTINUED flag to allow for that. 868 */ 869 int page_swapcount(struct page *page) 870 { 871 int count = 0; 872 struct swap_info_struct *p; 873 swp_entry_t entry; 874 875 entry.val = page_private(page); 876 p = swap_info_get(entry); 877 if (p) { 878 count = swap_count(p->swap_map[swp_offset(entry)]); 879 spin_unlock(&p->lock); 880 } 881 return count; 882 } 883 884 /* 885 * How many references to @entry are currently swapped out? 886 * This considers COUNT_CONTINUED so it returns exact answer. 887 */ 888 int swp_swapcount(swp_entry_t entry) 889 { 890 int count, tmp_count, n; 891 struct swap_info_struct *p; 892 struct page *page; 893 pgoff_t offset; 894 unsigned char *map; 895 896 p = swap_info_get(entry); 897 if (!p) 898 return 0; 899 900 count = swap_count(p->swap_map[swp_offset(entry)]); 901 if (!(count & COUNT_CONTINUED)) 902 goto out; 903 904 count &= ~COUNT_CONTINUED; 905 n = SWAP_MAP_MAX + 1; 906 907 offset = swp_offset(entry); 908 page = vmalloc_to_page(p->swap_map + offset); 909 offset &= ~PAGE_MASK; 910 VM_BUG_ON(page_private(page) != SWP_CONTINUED); 911 912 do { 913 page = list_next_entry(page, lru); 914 map = kmap_atomic(page); 915 tmp_count = map[offset]; 916 kunmap_atomic(map); 917 918 count += (tmp_count & ~COUNT_CONTINUED) * n; 919 n *= (SWAP_CONT_MAX + 1); 920 } while (tmp_count & COUNT_CONTINUED); 921 out: 922 spin_unlock(&p->lock); 923 return count; 924 } 925 926 /* 927 * We can write to an anon page without COW if there are no other references 928 * to it. And as a side-effect, free up its swap: because the old content 929 * on disk will never be read, and seeking back there to write new content 930 * later would only waste time away from clustering. 931 * 932 * NOTE: total_mapcount should not be relied upon by the caller if 933 * reuse_swap_page() returns false, but it may be always overwritten 934 * (see the other implementation for CONFIG_SWAP=n). 935 */ 936 bool reuse_swap_page(struct page *page, int *total_mapcount) 937 { 938 int count; 939 940 VM_BUG_ON_PAGE(!PageLocked(page), page); 941 if (unlikely(PageKsm(page))) 942 return false; 943 count = page_trans_huge_mapcount(page, total_mapcount); 944 if (count <= 1 && PageSwapCache(page)) { 945 count += page_swapcount(page); 946 if (count == 1 && !PageWriteback(page)) { 947 delete_from_swap_cache(page); 948 SetPageDirty(page); 949 } 950 } 951 return count <= 1; 952 } 953 954 /* 955 * If swap is getting full, or if there are no more mappings of this page, 956 * then try_to_free_swap is called to free its swap space. 957 */ 958 int try_to_free_swap(struct page *page) 959 { 960 VM_BUG_ON_PAGE(!PageLocked(page), page); 961 962 if (!PageSwapCache(page)) 963 return 0; 964 if (PageWriteback(page)) 965 return 0; 966 if (page_swapcount(page)) 967 return 0; 968 969 /* 970 * Once hibernation has begun to create its image of memory, 971 * there's a danger that one of the calls to try_to_free_swap() 972 * - most probably a call from __try_to_reclaim_swap() while 973 * hibernation is allocating its own swap pages for the image, 974 * but conceivably even a call from memory reclaim - will free 975 * the swap from a page which has already been recorded in the 976 * image as a clean swapcache page, and then reuse its swap for 977 * another page of the image. On waking from hibernation, the 978 * original page might be freed under memory pressure, then 979 * later read back in from swap, now with the wrong data. 980 * 981 * Hibernation suspends storage while it is writing the image 982 * to disk so check that here. 983 */ 984 if (pm_suspended_storage()) 985 return 0; 986 987 delete_from_swap_cache(page); 988 SetPageDirty(page); 989 return 1; 990 } 991 992 /* 993 * Free the swap entry like above, but also try to 994 * free the page cache entry if it is the last user. 995 */ 996 int free_swap_and_cache(swp_entry_t entry) 997 { 998 struct swap_info_struct *p; 999 struct page *page = NULL; 1000 1001 if (non_swap_entry(entry)) 1002 return 1; 1003 1004 p = swap_info_get(entry); 1005 if (p) { 1006 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) { 1007 page = find_get_page(swap_address_space(entry), 1008 swp_offset(entry)); 1009 if (page && !trylock_page(page)) { 1010 put_page(page); 1011 page = NULL; 1012 } 1013 } 1014 spin_unlock(&p->lock); 1015 } 1016 if (page) { 1017 /* 1018 * Not mapped elsewhere, or swap space full? Free it! 1019 * Also recheck PageSwapCache now page is locked (above). 1020 */ 1021 if (PageSwapCache(page) && !PageWriteback(page) && 1022 (!page_mapped(page) || mem_cgroup_swap_full(page))) { 1023 delete_from_swap_cache(page); 1024 SetPageDirty(page); 1025 } 1026 unlock_page(page); 1027 put_page(page); 1028 } 1029 return p != NULL; 1030 } 1031 1032 #ifdef CONFIG_HIBERNATION 1033 /* 1034 * Find the swap type that corresponds to given device (if any). 1035 * 1036 * @offset - number of the PAGE_SIZE-sized block of the device, starting 1037 * from 0, in which the swap header is expected to be located. 1038 * 1039 * This is needed for the suspend to disk (aka swsusp). 1040 */ 1041 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 1042 { 1043 struct block_device *bdev = NULL; 1044 int type; 1045 1046 if (device) 1047 bdev = bdget(device); 1048 1049 spin_lock(&swap_lock); 1050 for (type = 0; type < nr_swapfiles; type++) { 1051 struct swap_info_struct *sis = swap_info[type]; 1052 1053 if (!(sis->flags & SWP_WRITEOK)) 1054 continue; 1055 1056 if (!bdev) { 1057 if (bdev_p) 1058 *bdev_p = bdgrab(sis->bdev); 1059 1060 spin_unlock(&swap_lock); 1061 return type; 1062 } 1063 if (bdev == sis->bdev) { 1064 struct swap_extent *se = &sis->first_swap_extent; 1065 1066 if (se->start_block == offset) { 1067 if (bdev_p) 1068 *bdev_p = bdgrab(sis->bdev); 1069 1070 spin_unlock(&swap_lock); 1071 bdput(bdev); 1072 return type; 1073 } 1074 } 1075 } 1076 spin_unlock(&swap_lock); 1077 if (bdev) 1078 bdput(bdev); 1079 1080 return -ENODEV; 1081 } 1082 1083 /* 1084 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 1085 * corresponding to given index in swap_info (swap type). 1086 */ 1087 sector_t swapdev_block(int type, pgoff_t offset) 1088 { 1089 struct block_device *bdev; 1090 1091 if ((unsigned int)type >= nr_swapfiles) 1092 return 0; 1093 if (!(swap_info[type]->flags & SWP_WRITEOK)) 1094 return 0; 1095 return map_swap_entry(swp_entry(type, offset), &bdev); 1096 } 1097 1098 /* 1099 * Return either the total number of swap pages of given type, or the number 1100 * of free pages of that type (depending on @free) 1101 * 1102 * This is needed for software suspend 1103 */ 1104 unsigned int count_swap_pages(int type, int free) 1105 { 1106 unsigned int n = 0; 1107 1108 spin_lock(&swap_lock); 1109 if ((unsigned int)type < nr_swapfiles) { 1110 struct swap_info_struct *sis = swap_info[type]; 1111 1112 spin_lock(&sis->lock); 1113 if (sis->flags & SWP_WRITEOK) { 1114 n = sis->pages; 1115 if (free) 1116 n -= sis->inuse_pages; 1117 } 1118 spin_unlock(&sis->lock); 1119 } 1120 spin_unlock(&swap_lock); 1121 return n; 1122 } 1123 #endif /* CONFIG_HIBERNATION */ 1124 1125 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) 1126 { 1127 return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte); 1128 } 1129 1130 /* 1131 * No need to decide whether this PTE shares the swap entry with others, 1132 * just let do_wp_page work it out if a write is requested later - to 1133 * force COW, vm_page_prot omits write permission from any private vma. 1134 */ 1135 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 1136 unsigned long addr, swp_entry_t entry, struct page *page) 1137 { 1138 struct page *swapcache; 1139 struct mem_cgroup *memcg; 1140 spinlock_t *ptl; 1141 pte_t *pte; 1142 int ret = 1; 1143 1144 swapcache = page; 1145 page = ksm_might_need_to_copy(page, vma, addr); 1146 if (unlikely(!page)) 1147 return -ENOMEM; 1148 1149 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, 1150 &memcg, false)) { 1151 ret = -ENOMEM; 1152 goto out_nolock; 1153 } 1154 1155 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1156 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { 1157 mem_cgroup_cancel_charge(page, memcg, false); 1158 ret = 0; 1159 goto out; 1160 } 1161 1162 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 1163 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 1164 get_page(page); 1165 set_pte_at(vma->vm_mm, addr, pte, 1166 pte_mkold(mk_pte(page, vma->vm_page_prot))); 1167 if (page == swapcache) { 1168 page_add_anon_rmap(page, vma, addr, false); 1169 mem_cgroup_commit_charge(page, memcg, true, false); 1170 } else { /* ksm created a completely new copy */ 1171 page_add_new_anon_rmap(page, vma, addr, false); 1172 mem_cgroup_commit_charge(page, memcg, false, false); 1173 lru_cache_add_active_or_unevictable(page, vma); 1174 } 1175 swap_free(entry); 1176 /* 1177 * Move the page to the active list so it is not 1178 * immediately swapped out again after swapon. 1179 */ 1180 activate_page(page); 1181 out: 1182 pte_unmap_unlock(pte, ptl); 1183 out_nolock: 1184 if (page != swapcache) { 1185 unlock_page(page); 1186 put_page(page); 1187 } 1188 return ret; 1189 } 1190 1191 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 1192 unsigned long addr, unsigned long end, 1193 swp_entry_t entry, struct page *page) 1194 { 1195 pte_t swp_pte = swp_entry_to_pte(entry); 1196 pte_t *pte; 1197 int ret = 0; 1198 1199 /* 1200 * We don't actually need pte lock while scanning for swp_pte: since 1201 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the 1202 * page table while we're scanning; though it could get zapped, and on 1203 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse 1204 * of unmatched parts which look like swp_pte, so unuse_pte must 1205 * recheck under pte lock. Scanning without pte lock lets it be 1206 * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. 1207 */ 1208 pte = pte_offset_map(pmd, addr); 1209 do { 1210 /* 1211 * swapoff spends a _lot_ of time in this loop! 1212 * Test inline before going to call unuse_pte. 1213 */ 1214 if (unlikely(pte_same_as_swp(*pte, swp_pte))) { 1215 pte_unmap(pte); 1216 ret = unuse_pte(vma, pmd, addr, entry, page); 1217 if (ret) 1218 goto out; 1219 pte = pte_offset_map(pmd, addr); 1220 } 1221 } while (pte++, addr += PAGE_SIZE, addr != end); 1222 pte_unmap(pte - 1); 1223 out: 1224 return ret; 1225 } 1226 1227 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 1228 unsigned long addr, unsigned long end, 1229 swp_entry_t entry, struct page *page) 1230 { 1231 pmd_t *pmd; 1232 unsigned long next; 1233 int ret; 1234 1235 pmd = pmd_offset(pud, addr); 1236 do { 1237 cond_resched(); 1238 next = pmd_addr_end(addr, end); 1239 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 1240 continue; 1241 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); 1242 if (ret) 1243 return ret; 1244 } while (pmd++, addr = next, addr != end); 1245 return 0; 1246 } 1247 1248 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 1249 unsigned long addr, unsigned long end, 1250 swp_entry_t entry, struct page *page) 1251 { 1252 pud_t *pud; 1253 unsigned long next; 1254 int ret; 1255 1256 pud = pud_offset(pgd, addr); 1257 do { 1258 next = pud_addr_end(addr, end); 1259 if (pud_none_or_clear_bad(pud)) 1260 continue; 1261 ret = unuse_pmd_range(vma, pud, addr, next, entry, page); 1262 if (ret) 1263 return ret; 1264 } while (pud++, addr = next, addr != end); 1265 return 0; 1266 } 1267 1268 static int unuse_vma(struct vm_area_struct *vma, 1269 swp_entry_t entry, struct page *page) 1270 { 1271 pgd_t *pgd; 1272 unsigned long addr, end, next; 1273 int ret; 1274 1275 if (page_anon_vma(page)) { 1276 addr = page_address_in_vma(page, vma); 1277 if (addr == -EFAULT) 1278 return 0; 1279 else 1280 end = addr + PAGE_SIZE; 1281 } else { 1282 addr = vma->vm_start; 1283 end = vma->vm_end; 1284 } 1285 1286 pgd = pgd_offset(vma->vm_mm, addr); 1287 do { 1288 next = pgd_addr_end(addr, end); 1289 if (pgd_none_or_clear_bad(pgd)) 1290 continue; 1291 ret = unuse_pud_range(vma, pgd, addr, next, entry, page); 1292 if (ret) 1293 return ret; 1294 } while (pgd++, addr = next, addr != end); 1295 return 0; 1296 } 1297 1298 static int unuse_mm(struct mm_struct *mm, 1299 swp_entry_t entry, struct page *page) 1300 { 1301 struct vm_area_struct *vma; 1302 int ret = 0; 1303 1304 if (!down_read_trylock(&mm->mmap_sem)) { 1305 /* 1306 * Activate page so shrink_inactive_list is unlikely to unmap 1307 * its ptes while lock is dropped, so swapoff can make progress. 1308 */ 1309 activate_page(page); 1310 unlock_page(page); 1311 down_read(&mm->mmap_sem); 1312 lock_page(page); 1313 } 1314 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1315 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) 1316 break; 1317 cond_resched(); 1318 } 1319 up_read(&mm->mmap_sem); 1320 return (ret < 0)? ret: 0; 1321 } 1322 1323 /* 1324 * Scan swap_map (or frontswap_map if frontswap parameter is true) 1325 * from current position to next entry still in use. 1326 * Recycle to start on reaching the end, returning 0 when empty. 1327 */ 1328 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 1329 unsigned int prev, bool frontswap) 1330 { 1331 unsigned int max = si->max; 1332 unsigned int i = prev; 1333 unsigned char count; 1334 1335 /* 1336 * No need for swap_lock here: we're just looking 1337 * for whether an entry is in use, not modifying it; false 1338 * hits are okay, and sys_swapoff() has already prevented new 1339 * allocations from this area (while holding swap_lock). 1340 */ 1341 for (;;) { 1342 if (++i >= max) { 1343 if (!prev) { 1344 i = 0; 1345 break; 1346 } 1347 /* 1348 * No entries in use at top of swap_map, 1349 * loop back to start and recheck there. 1350 */ 1351 max = prev + 1; 1352 prev = 0; 1353 i = 1; 1354 } 1355 count = READ_ONCE(si->swap_map[i]); 1356 if (count && swap_count(count) != SWAP_MAP_BAD) 1357 if (!frontswap || frontswap_test(si, i)) 1358 break; 1359 if ((i % LATENCY_LIMIT) == 0) 1360 cond_resched(); 1361 } 1362 return i; 1363 } 1364 1365 /* 1366 * We completely avoid races by reading each swap page in advance, 1367 * and then search for the process using it. All the necessary 1368 * page table adjustments can then be made atomically. 1369 * 1370 * if the boolean frontswap is true, only unuse pages_to_unuse pages; 1371 * pages_to_unuse==0 means all pages; ignored if frontswap is false 1372 */ 1373 int try_to_unuse(unsigned int type, bool frontswap, 1374 unsigned long pages_to_unuse) 1375 { 1376 struct swap_info_struct *si = swap_info[type]; 1377 struct mm_struct *start_mm; 1378 volatile unsigned char *swap_map; /* swap_map is accessed without 1379 * locking. Mark it as volatile 1380 * to prevent compiler doing 1381 * something odd. 1382 */ 1383 unsigned char swcount; 1384 struct page *page; 1385 swp_entry_t entry; 1386 unsigned int i = 0; 1387 int retval = 0; 1388 1389 /* 1390 * When searching mms for an entry, a good strategy is to 1391 * start at the first mm we freed the previous entry from 1392 * (though actually we don't notice whether we or coincidence 1393 * freed the entry). Initialize this start_mm with a hold. 1394 * 1395 * A simpler strategy would be to start at the last mm we 1396 * freed the previous entry from; but that would take less 1397 * advantage of mmlist ordering, which clusters forked mms 1398 * together, child after parent. If we race with dup_mmap(), we 1399 * prefer to resolve parent before child, lest we miss entries 1400 * duplicated after we scanned child: using last mm would invert 1401 * that. 1402 */ 1403 start_mm = &init_mm; 1404 atomic_inc(&init_mm.mm_users); 1405 1406 /* 1407 * Keep on scanning until all entries have gone. Usually, 1408 * one pass through swap_map is enough, but not necessarily: 1409 * there are races when an instance of an entry might be missed. 1410 */ 1411 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { 1412 if (signal_pending(current)) { 1413 retval = -EINTR; 1414 break; 1415 } 1416 1417 /* 1418 * Get a page for the entry, using the existing swap 1419 * cache page if there is one. Otherwise, get a clean 1420 * page and read the swap into it. 1421 */ 1422 swap_map = &si->swap_map[i]; 1423 entry = swp_entry(type, i); 1424 page = read_swap_cache_async(entry, 1425 GFP_HIGHUSER_MOVABLE, NULL, 0); 1426 if (!page) { 1427 /* 1428 * Either swap_duplicate() failed because entry 1429 * has been freed independently, and will not be 1430 * reused since sys_swapoff() already disabled 1431 * allocation from here, or alloc_page() failed. 1432 */ 1433 swcount = *swap_map; 1434 /* 1435 * We don't hold lock here, so the swap entry could be 1436 * SWAP_MAP_BAD (when the cluster is discarding). 1437 * Instead of fail out, We can just skip the swap 1438 * entry because swapoff will wait for discarding 1439 * finish anyway. 1440 */ 1441 if (!swcount || swcount == SWAP_MAP_BAD) 1442 continue; 1443 retval = -ENOMEM; 1444 break; 1445 } 1446 1447 /* 1448 * Don't hold on to start_mm if it looks like exiting. 1449 */ 1450 if (atomic_read(&start_mm->mm_users) == 1) { 1451 mmput(start_mm); 1452 start_mm = &init_mm; 1453 atomic_inc(&init_mm.mm_users); 1454 } 1455 1456 /* 1457 * Wait for and lock page. When do_swap_page races with 1458 * try_to_unuse, do_swap_page can handle the fault much 1459 * faster than try_to_unuse can locate the entry. This 1460 * apparently redundant "wait_on_page_locked" lets try_to_unuse 1461 * defer to do_swap_page in such a case - in some tests, 1462 * do_swap_page and try_to_unuse repeatedly compete. 1463 */ 1464 wait_on_page_locked(page); 1465 wait_on_page_writeback(page); 1466 lock_page(page); 1467 wait_on_page_writeback(page); 1468 1469 /* 1470 * Remove all references to entry. 1471 */ 1472 swcount = *swap_map; 1473 if (swap_count(swcount) == SWAP_MAP_SHMEM) { 1474 retval = shmem_unuse(entry, page); 1475 /* page has already been unlocked and released */ 1476 if (retval < 0) 1477 break; 1478 continue; 1479 } 1480 if (swap_count(swcount) && start_mm != &init_mm) 1481 retval = unuse_mm(start_mm, entry, page); 1482 1483 if (swap_count(*swap_map)) { 1484 int set_start_mm = (*swap_map >= swcount); 1485 struct list_head *p = &start_mm->mmlist; 1486 struct mm_struct *new_start_mm = start_mm; 1487 struct mm_struct *prev_mm = start_mm; 1488 struct mm_struct *mm; 1489 1490 atomic_inc(&new_start_mm->mm_users); 1491 atomic_inc(&prev_mm->mm_users); 1492 spin_lock(&mmlist_lock); 1493 while (swap_count(*swap_map) && !retval && 1494 (p = p->next) != &start_mm->mmlist) { 1495 mm = list_entry(p, struct mm_struct, mmlist); 1496 if (!atomic_inc_not_zero(&mm->mm_users)) 1497 continue; 1498 spin_unlock(&mmlist_lock); 1499 mmput(prev_mm); 1500 prev_mm = mm; 1501 1502 cond_resched(); 1503 1504 swcount = *swap_map; 1505 if (!swap_count(swcount)) /* any usage ? */ 1506 ; 1507 else if (mm == &init_mm) 1508 set_start_mm = 1; 1509 else 1510 retval = unuse_mm(mm, entry, page); 1511 1512 if (set_start_mm && *swap_map < swcount) { 1513 mmput(new_start_mm); 1514 atomic_inc(&mm->mm_users); 1515 new_start_mm = mm; 1516 set_start_mm = 0; 1517 } 1518 spin_lock(&mmlist_lock); 1519 } 1520 spin_unlock(&mmlist_lock); 1521 mmput(prev_mm); 1522 mmput(start_mm); 1523 start_mm = new_start_mm; 1524 } 1525 if (retval) { 1526 unlock_page(page); 1527 put_page(page); 1528 break; 1529 } 1530 1531 /* 1532 * If a reference remains (rare), we would like to leave 1533 * the page in the swap cache; but try_to_unmap could 1534 * then re-duplicate the entry once we drop page lock, 1535 * so we might loop indefinitely; also, that page could 1536 * not be swapped out to other storage meanwhile. So: 1537 * delete from cache even if there's another reference, 1538 * after ensuring that the data has been saved to disk - 1539 * since if the reference remains (rarer), it will be 1540 * read from disk into another page. Splitting into two 1541 * pages would be incorrect if swap supported "shared 1542 * private" pages, but they are handled by tmpfs files. 1543 * 1544 * Given how unuse_vma() targets one particular offset 1545 * in an anon_vma, once the anon_vma has been determined, 1546 * this splitting happens to be just what is needed to 1547 * handle where KSM pages have been swapped out: re-reading 1548 * is unnecessarily slow, but we can fix that later on. 1549 */ 1550 if (swap_count(*swap_map) && 1551 PageDirty(page) && PageSwapCache(page)) { 1552 struct writeback_control wbc = { 1553 .sync_mode = WB_SYNC_NONE, 1554 }; 1555 1556 swap_writepage(page, &wbc); 1557 lock_page(page); 1558 wait_on_page_writeback(page); 1559 } 1560 1561 /* 1562 * It is conceivable that a racing task removed this page from 1563 * swap cache just before we acquired the page lock at the top, 1564 * or while we dropped it in unuse_mm(). The page might even 1565 * be back in swap cache on another swap area: that we must not 1566 * delete, since it may not have been written out to swap yet. 1567 */ 1568 if (PageSwapCache(page) && 1569 likely(page_private(page) == entry.val)) 1570 delete_from_swap_cache(page); 1571 1572 /* 1573 * So we could skip searching mms once swap count went 1574 * to 1, we did not mark any present ptes as dirty: must 1575 * mark page dirty so shrink_page_list will preserve it. 1576 */ 1577 SetPageDirty(page); 1578 unlock_page(page); 1579 put_page(page); 1580 1581 /* 1582 * Make sure that we aren't completely killing 1583 * interactive performance. 1584 */ 1585 cond_resched(); 1586 if (frontswap && pages_to_unuse > 0) { 1587 if (!--pages_to_unuse) 1588 break; 1589 } 1590 } 1591 1592 mmput(start_mm); 1593 return retval; 1594 } 1595 1596 /* 1597 * After a successful try_to_unuse, if no swap is now in use, we know 1598 * we can empty the mmlist. swap_lock must be held on entry and exit. 1599 * Note that mmlist_lock nests inside swap_lock, and an mm must be 1600 * added to the mmlist just after page_duplicate - before would be racy. 1601 */ 1602 static void drain_mmlist(void) 1603 { 1604 struct list_head *p, *next; 1605 unsigned int type; 1606 1607 for (type = 0; type < nr_swapfiles; type++) 1608 if (swap_info[type]->inuse_pages) 1609 return; 1610 spin_lock(&mmlist_lock); 1611 list_for_each_safe(p, next, &init_mm.mmlist) 1612 list_del_init(p); 1613 spin_unlock(&mmlist_lock); 1614 } 1615 1616 /* 1617 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 1618 * corresponds to page offset for the specified swap entry. 1619 * Note that the type of this function is sector_t, but it returns page offset 1620 * into the bdev, not sector offset. 1621 */ 1622 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) 1623 { 1624 struct swap_info_struct *sis; 1625 struct swap_extent *start_se; 1626 struct swap_extent *se; 1627 pgoff_t offset; 1628 1629 sis = swap_info[swp_type(entry)]; 1630 *bdev = sis->bdev; 1631 1632 offset = swp_offset(entry); 1633 start_se = sis->curr_swap_extent; 1634 se = start_se; 1635 1636 for ( ; ; ) { 1637 if (se->start_page <= offset && 1638 offset < (se->start_page + se->nr_pages)) { 1639 return se->start_block + (offset - se->start_page); 1640 } 1641 se = list_next_entry(se, list); 1642 sis->curr_swap_extent = se; 1643 BUG_ON(se == start_se); /* It *must* be present */ 1644 } 1645 } 1646 1647 /* 1648 * Returns the page offset into bdev for the specified page's swap entry. 1649 */ 1650 sector_t map_swap_page(struct page *page, struct block_device **bdev) 1651 { 1652 swp_entry_t entry; 1653 entry.val = page_private(page); 1654 return map_swap_entry(entry, bdev); 1655 } 1656 1657 /* 1658 * Free all of a swapdev's extent information 1659 */ 1660 static void destroy_swap_extents(struct swap_info_struct *sis) 1661 { 1662 while (!list_empty(&sis->first_swap_extent.list)) { 1663 struct swap_extent *se; 1664 1665 se = list_first_entry(&sis->first_swap_extent.list, 1666 struct swap_extent, list); 1667 list_del(&se->list); 1668 kfree(se); 1669 } 1670 1671 if (sis->flags & SWP_FILE) { 1672 struct file *swap_file = sis->swap_file; 1673 struct address_space *mapping = swap_file->f_mapping; 1674 1675 sis->flags &= ~SWP_FILE; 1676 mapping->a_ops->swap_deactivate(swap_file); 1677 } 1678 } 1679 1680 /* 1681 * Add a block range (and the corresponding page range) into this swapdev's 1682 * extent list. The extent list is kept sorted in page order. 1683 * 1684 * This function rather assumes that it is called in ascending page order. 1685 */ 1686 int 1687 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 1688 unsigned long nr_pages, sector_t start_block) 1689 { 1690 struct swap_extent *se; 1691 struct swap_extent *new_se; 1692 struct list_head *lh; 1693 1694 if (start_page == 0) { 1695 se = &sis->first_swap_extent; 1696 sis->curr_swap_extent = se; 1697 se->start_page = 0; 1698 se->nr_pages = nr_pages; 1699 se->start_block = start_block; 1700 return 1; 1701 } else { 1702 lh = sis->first_swap_extent.list.prev; /* Highest extent */ 1703 se = list_entry(lh, struct swap_extent, list); 1704 BUG_ON(se->start_page + se->nr_pages != start_page); 1705 if (se->start_block + se->nr_pages == start_block) { 1706 /* Merge it */ 1707 se->nr_pages += nr_pages; 1708 return 0; 1709 } 1710 } 1711 1712 /* 1713 * No merge. Insert a new extent, preserving ordering. 1714 */ 1715 new_se = kmalloc(sizeof(*se), GFP_KERNEL); 1716 if (new_se == NULL) 1717 return -ENOMEM; 1718 new_se->start_page = start_page; 1719 new_se->nr_pages = nr_pages; 1720 new_se->start_block = start_block; 1721 1722 list_add_tail(&new_se->list, &sis->first_swap_extent.list); 1723 return 1; 1724 } 1725 1726 /* 1727 * A `swap extent' is a simple thing which maps a contiguous range of pages 1728 * onto a contiguous range of disk blocks. An ordered list of swap extents 1729 * is built at swapon time and is then used at swap_writepage/swap_readpage 1730 * time for locating where on disk a page belongs. 1731 * 1732 * If the swapfile is an S_ISBLK block device, a single extent is installed. 1733 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 1734 * swap files identically. 1735 * 1736 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 1737 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 1738 * swapfiles are handled *identically* after swapon time. 1739 * 1740 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 1741 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 1742 * some stray blocks are found which do not fall within the PAGE_SIZE alignment 1743 * requirements, they are simply tossed out - we will never use those blocks 1744 * for swapping. 1745 * 1746 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This 1747 * prevents root from shooting her foot off by ftruncating an in-use swapfile, 1748 * which will scribble on the fs. 1749 * 1750 * The amount of disk space which a single swap extent represents varies. 1751 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 1752 * extents in the list. To avoid much list walking, we cache the previous 1753 * search location in `curr_swap_extent', and start new searches from there. 1754 * This is extremely effective. The average number of iterations in 1755 * map_swap_page() has been measured at about 0.3 per page. - akpm. 1756 */ 1757 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 1758 { 1759 struct file *swap_file = sis->swap_file; 1760 struct address_space *mapping = swap_file->f_mapping; 1761 struct inode *inode = mapping->host; 1762 int ret; 1763 1764 if (S_ISBLK(inode->i_mode)) { 1765 ret = add_swap_extent(sis, 0, sis->max, 0); 1766 *span = sis->pages; 1767 return ret; 1768 } 1769 1770 if (mapping->a_ops->swap_activate) { 1771 ret = mapping->a_ops->swap_activate(sis, swap_file, span); 1772 if (!ret) { 1773 sis->flags |= SWP_FILE; 1774 ret = add_swap_extent(sis, 0, sis->max, 0); 1775 *span = sis->pages; 1776 } 1777 return ret; 1778 } 1779 1780 return generic_swapfile_activate(sis, swap_file, span); 1781 } 1782 1783 static void _enable_swap_info(struct swap_info_struct *p, int prio, 1784 unsigned char *swap_map, 1785 struct swap_cluster_info *cluster_info) 1786 { 1787 if (prio >= 0) 1788 p->prio = prio; 1789 else 1790 p->prio = --least_priority; 1791 /* 1792 * the plist prio is negated because plist ordering is 1793 * low-to-high, while swap ordering is high-to-low 1794 */ 1795 p->list.prio = -p->prio; 1796 p->avail_list.prio = -p->prio; 1797 p->swap_map = swap_map; 1798 p->cluster_info = cluster_info; 1799 p->flags |= SWP_WRITEOK; 1800 atomic_long_add(p->pages, &nr_swap_pages); 1801 total_swap_pages += p->pages; 1802 1803 assert_spin_locked(&swap_lock); 1804 /* 1805 * both lists are plists, and thus priority ordered. 1806 * swap_active_head needs to be priority ordered for swapoff(), 1807 * which on removal of any swap_info_struct with an auto-assigned 1808 * (i.e. negative) priority increments the auto-assigned priority 1809 * of any lower-priority swap_info_structs. 1810 * swap_avail_head needs to be priority ordered for get_swap_page(), 1811 * which allocates swap pages from the highest available priority 1812 * swap_info_struct. 1813 */ 1814 plist_add(&p->list, &swap_active_head); 1815 spin_lock(&swap_avail_lock); 1816 plist_add(&p->avail_list, &swap_avail_head); 1817 spin_unlock(&swap_avail_lock); 1818 } 1819 1820 static void enable_swap_info(struct swap_info_struct *p, int prio, 1821 unsigned char *swap_map, 1822 struct swap_cluster_info *cluster_info, 1823 unsigned long *frontswap_map) 1824 { 1825 frontswap_init(p->type, frontswap_map); 1826 spin_lock(&swap_lock); 1827 spin_lock(&p->lock); 1828 _enable_swap_info(p, prio, swap_map, cluster_info); 1829 spin_unlock(&p->lock); 1830 spin_unlock(&swap_lock); 1831 } 1832 1833 static void reinsert_swap_info(struct swap_info_struct *p) 1834 { 1835 spin_lock(&swap_lock); 1836 spin_lock(&p->lock); 1837 _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info); 1838 spin_unlock(&p->lock); 1839 spin_unlock(&swap_lock); 1840 } 1841 1842 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 1843 { 1844 struct swap_info_struct *p = NULL; 1845 unsigned char *swap_map; 1846 struct swap_cluster_info *cluster_info; 1847 unsigned long *frontswap_map; 1848 struct file *swap_file, *victim; 1849 struct address_space *mapping; 1850 struct inode *inode; 1851 struct filename *pathname; 1852 int err, found = 0; 1853 unsigned int old_block_size; 1854 1855 if (!capable(CAP_SYS_ADMIN)) 1856 return -EPERM; 1857 1858 BUG_ON(!current->mm); 1859 1860 pathname = getname(specialfile); 1861 if (IS_ERR(pathname)) 1862 return PTR_ERR(pathname); 1863 1864 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); 1865 err = PTR_ERR(victim); 1866 if (IS_ERR(victim)) 1867 goto out; 1868 1869 mapping = victim->f_mapping; 1870 spin_lock(&swap_lock); 1871 plist_for_each_entry(p, &swap_active_head, list) { 1872 if (p->flags & SWP_WRITEOK) { 1873 if (p->swap_file->f_mapping == mapping) { 1874 found = 1; 1875 break; 1876 } 1877 } 1878 } 1879 if (!found) { 1880 err = -EINVAL; 1881 spin_unlock(&swap_lock); 1882 goto out_dput; 1883 } 1884 if (!security_vm_enough_memory_mm(current->mm, p->pages)) 1885 vm_unacct_memory(p->pages); 1886 else { 1887 err = -ENOMEM; 1888 spin_unlock(&swap_lock); 1889 goto out_dput; 1890 } 1891 spin_lock(&swap_avail_lock); 1892 plist_del(&p->avail_list, &swap_avail_head); 1893 spin_unlock(&swap_avail_lock); 1894 spin_lock(&p->lock); 1895 if (p->prio < 0) { 1896 struct swap_info_struct *si = p; 1897 1898 plist_for_each_entry_continue(si, &swap_active_head, list) { 1899 si->prio++; 1900 si->list.prio--; 1901 si->avail_list.prio--; 1902 } 1903 least_priority++; 1904 } 1905 plist_del(&p->list, &swap_active_head); 1906 atomic_long_sub(p->pages, &nr_swap_pages); 1907 total_swap_pages -= p->pages; 1908 p->flags &= ~SWP_WRITEOK; 1909 spin_unlock(&p->lock); 1910 spin_unlock(&swap_lock); 1911 1912 set_current_oom_origin(); 1913 err = try_to_unuse(p->type, false, 0); /* force unuse all pages */ 1914 clear_current_oom_origin(); 1915 1916 if (err) { 1917 /* re-insert swap space back into swap_list */ 1918 reinsert_swap_info(p); 1919 goto out_dput; 1920 } 1921 1922 flush_work(&p->discard_work); 1923 1924 destroy_swap_extents(p); 1925 if (p->flags & SWP_CONTINUED) 1926 free_swap_count_continuations(p); 1927 1928 mutex_lock(&swapon_mutex); 1929 spin_lock(&swap_lock); 1930 spin_lock(&p->lock); 1931 drain_mmlist(); 1932 1933 /* wait for anyone still in scan_swap_map */ 1934 p->highest_bit = 0; /* cuts scans short */ 1935 while (p->flags >= SWP_SCANNING) { 1936 spin_unlock(&p->lock); 1937 spin_unlock(&swap_lock); 1938 schedule_timeout_uninterruptible(1); 1939 spin_lock(&swap_lock); 1940 spin_lock(&p->lock); 1941 } 1942 1943 swap_file = p->swap_file; 1944 old_block_size = p->old_block_size; 1945 p->swap_file = NULL; 1946 p->max = 0; 1947 swap_map = p->swap_map; 1948 p->swap_map = NULL; 1949 cluster_info = p->cluster_info; 1950 p->cluster_info = NULL; 1951 frontswap_map = frontswap_map_get(p); 1952 spin_unlock(&p->lock); 1953 spin_unlock(&swap_lock); 1954 frontswap_invalidate_area(p->type); 1955 frontswap_map_set(p, NULL); 1956 mutex_unlock(&swapon_mutex); 1957 free_percpu(p->percpu_cluster); 1958 p->percpu_cluster = NULL; 1959 vfree(swap_map); 1960 vfree(cluster_info); 1961 vfree(frontswap_map); 1962 /* Destroy swap account information */ 1963 swap_cgroup_swapoff(p->type); 1964 1965 inode = mapping->host; 1966 if (S_ISBLK(inode->i_mode)) { 1967 struct block_device *bdev = I_BDEV(inode); 1968 set_blocksize(bdev, old_block_size); 1969 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1970 } else { 1971 inode_lock(inode); 1972 inode->i_flags &= ~S_SWAPFILE; 1973 inode_unlock(inode); 1974 } 1975 filp_close(swap_file, NULL); 1976 1977 /* 1978 * Clear the SWP_USED flag after all resources are freed so that swapon 1979 * can reuse this swap_info in alloc_swap_info() safely. It is ok to 1980 * not hold p->lock after we cleared its SWP_WRITEOK. 1981 */ 1982 spin_lock(&swap_lock); 1983 p->flags = 0; 1984 spin_unlock(&swap_lock); 1985 1986 err = 0; 1987 atomic_inc(&proc_poll_event); 1988 wake_up_interruptible(&proc_poll_wait); 1989 1990 out_dput: 1991 filp_close(victim, NULL); 1992 out: 1993 putname(pathname); 1994 return err; 1995 } 1996 1997 #ifdef CONFIG_PROC_FS 1998 static unsigned swaps_poll(struct file *file, poll_table *wait) 1999 { 2000 struct seq_file *seq = file->private_data; 2001 2002 poll_wait(file, &proc_poll_wait, wait); 2003 2004 if (seq->poll_event != atomic_read(&proc_poll_event)) { 2005 seq->poll_event = atomic_read(&proc_poll_event); 2006 return POLLIN | POLLRDNORM | POLLERR | POLLPRI; 2007 } 2008 2009 return POLLIN | POLLRDNORM; 2010 } 2011 2012 /* iterator */ 2013 static void *swap_start(struct seq_file *swap, loff_t *pos) 2014 { 2015 struct swap_info_struct *si; 2016 int type; 2017 loff_t l = *pos; 2018 2019 mutex_lock(&swapon_mutex); 2020 2021 if (!l) 2022 return SEQ_START_TOKEN; 2023 2024 for (type = 0; type < nr_swapfiles; type++) { 2025 smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 2026 si = swap_info[type]; 2027 if (!(si->flags & SWP_USED) || !si->swap_map) 2028 continue; 2029 if (!--l) 2030 return si; 2031 } 2032 2033 return NULL; 2034 } 2035 2036 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 2037 { 2038 struct swap_info_struct *si = v; 2039 int type; 2040 2041 if (v == SEQ_START_TOKEN) 2042 type = 0; 2043 else 2044 type = si->type + 1; 2045 2046 for (; type < nr_swapfiles; type++) { 2047 smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 2048 si = swap_info[type]; 2049 if (!(si->flags & SWP_USED) || !si->swap_map) 2050 continue; 2051 ++*pos; 2052 return si; 2053 } 2054 2055 return NULL; 2056 } 2057 2058 static void swap_stop(struct seq_file *swap, void *v) 2059 { 2060 mutex_unlock(&swapon_mutex); 2061 } 2062 2063 static int swap_show(struct seq_file *swap, void *v) 2064 { 2065 struct swap_info_struct *si = v; 2066 struct file *file; 2067 int len; 2068 2069 if (si == SEQ_START_TOKEN) { 2070 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 2071 return 0; 2072 } 2073 2074 file = si->swap_file; 2075 len = seq_file_path(swap, file, " \t\n\\"); 2076 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 2077 len < 40 ? 40 - len : 1, " ", 2078 S_ISBLK(file_inode(file)->i_mode) ? 2079 "partition" : "file\t", 2080 si->pages << (PAGE_SHIFT - 10), 2081 si->inuse_pages << (PAGE_SHIFT - 10), 2082 si->prio); 2083 return 0; 2084 } 2085 2086 static const struct seq_operations swaps_op = { 2087 .start = swap_start, 2088 .next = swap_next, 2089 .stop = swap_stop, 2090 .show = swap_show 2091 }; 2092 2093 static int swaps_open(struct inode *inode, struct file *file) 2094 { 2095 struct seq_file *seq; 2096 int ret; 2097 2098 ret = seq_open(file, &swaps_op); 2099 if (ret) 2100 return ret; 2101 2102 seq = file->private_data; 2103 seq->poll_event = atomic_read(&proc_poll_event); 2104 return 0; 2105 } 2106 2107 static const struct file_operations proc_swaps_operations = { 2108 .open = swaps_open, 2109 .read = seq_read, 2110 .llseek = seq_lseek, 2111 .release = seq_release, 2112 .poll = swaps_poll, 2113 }; 2114 2115 static int __init procswaps_init(void) 2116 { 2117 proc_create("swaps", 0, NULL, &proc_swaps_operations); 2118 return 0; 2119 } 2120 __initcall(procswaps_init); 2121 #endif /* CONFIG_PROC_FS */ 2122 2123 #ifdef MAX_SWAPFILES_CHECK 2124 static int __init max_swapfiles_check(void) 2125 { 2126 MAX_SWAPFILES_CHECK(); 2127 return 0; 2128 } 2129 late_initcall(max_swapfiles_check); 2130 #endif 2131 2132 static struct swap_info_struct *alloc_swap_info(void) 2133 { 2134 struct swap_info_struct *p; 2135 unsigned int type; 2136 2137 p = kzalloc(sizeof(*p), GFP_KERNEL); 2138 if (!p) 2139 return ERR_PTR(-ENOMEM); 2140 2141 spin_lock(&swap_lock); 2142 for (type = 0; type < nr_swapfiles; type++) { 2143 if (!(swap_info[type]->flags & SWP_USED)) 2144 break; 2145 } 2146 if (type >= MAX_SWAPFILES) { 2147 spin_unlock(&swap_lock); 2148 kfree(p); 2149 return ERR_PTR(-EPERM); 2150 } 2151 if (type >= nr_swapfiles) { 2152 p->type = type; 2153 swap_info[type] = p; 2154 /* 2155 * Write swap_info[type] before nr_swapfiles, in case a 2156 * racing procfs swap_start() or swap_next() is reading them. 2157 * (We never shrink nr_swapfiles, we never free this entry.) 2158 */ 2159 smp_wmb(); 2160 nr_swapfiles++; 2161 } else { 2162 kfree(p); 2163 p = swap_info[type]; 2164 /* 2165 * Do not memset this entry: a racing procfs swap_next() 2166 * would be relying on p->type to remain valid. 2167 */ 2168 } 2169 INIT_LIST_HEAD(&p->first_swap_extent.list); 2170 plist_node_init(&p->list, 0); 2171 plist_node_init(&p->avail_list, 0); 2172 p->flags = SWP_USED; 2173 spin_unlock(&swap_lock); 2174 spin_lock_init(&p->lock); 2175 2176 return p; 2177 } 2178 2179 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) 2180 { 2181 int error; 2182 2183 if (S_ISBLK(inode->i_mode)) { 2184 p->bdev = bdgrab(I_BDEV(inode)); 2185 error = blkdev_get(p->bdev, 2186 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); 2187 if (error < 0) { 2188 p->bdev = NULL; 2189 return error; 2190 } 2191 p->old_block_size = block_size(p->bdev); 2192 error = set_blocksize(p->bdev, PAGE_SIZE); 2193 if (error < 0) 2194 return error; 2195 p->flags |= SWP_BLKDEV; 2196 } else if (S_ISREG(inode->i_mode)) { 2197 p->bdev = inode->i_sb->s_bdev; 2198 inode_lock(inode); 2199 if (IS_SWAPFILE(inode)) 2200 return -EBUSY; 2201 } else 2202 return -EINVAL; 2203 2204 return 0; 2205 } 2206 2207 static unsigned long read_swap_header(struct swap_info_struct *p, 2208 union swap_header *swap_header, 2209 struct inode *inode) 2210 { 2211 int i; 2212 unsigned long maxpages; 2213 unsigned long swapfilepages; 2214 unsigned long last_page; 2215 2216 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 2217 pr_err("Unable to find swap-space signature\n"); 2218 return 0; 2219 } 2220 2221 /* swap partition endianess hack... */ 2222 if (swab32(swap_header->info.version) == 1) { 2223 swab32s(&swap_header->info.version); 2224 swab32s(&swap_header->info.last_page); 2225 swab32s(&swap_header->info.nr_badpages); 2226 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 2227 return 0; 2228 for (i = 0; i < swap_header->info.nr_badpages; i++) 2229 swab32s(&swap_header->info.badpages[i]); 2230 } 2231 /* Check the swap header's sub-version */ 2232 if (swap_header->info.version != 1) { 2233 pr_warn("Unable to handle swap header version %d\n", 2234 swap_header->info.version); 2235 return 0; 2236 } 2237 2238 p->lowest_bit = 1; 2239 p->cluster_next = 1; 2240 p->cluster_nr = 0; 2241 2242 /* 2243 * Find out how many pages are allowed for a single swap 2244 * device. There are two limiting factors: 1) the number 2245 * of bits for the swap offset in the swp_entry_t type, and 2246 * 2) the number of bits in the swap pte as defined by the 2247 * different architectures. In order to find the 2248 * largest possible bit mask, a swap entry with swap type 0 2249 * and swap offset ~0UL is created, encoded to a swap pte, 2250 * decoded to a swp_entry_t again, and finally the swap 2251 * offset is extracted. This will mask all the bits from 2252 * the initial ~0UL mask that can't be encoded in either 2253 * the swp_entry_t or the architecture definition of a 2254 * swap pte. 2255 */ 2256 maxpages = swp_offset(pte_to_swp_entry( 2257 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; 2258 last_page = swap_header->info.last_page; 2259 if (last_page > maxpages) { 2260 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", 2261 maxpages << (PAGE_SHIFT - 10), 2262 last_page << (PAGE_SHIFT - 10)); 2263 } 2264 if (maxpages > last_page) { 2265 maxpages = last_page + 1; 2266 /* p->max is an unsigned int: don't overflow it */ 2267 if ((unsigned int)maxpages == 0) 2268 maxpages = UINT_MAX; 2269 } 2270 p->highest_bit = maxpages - 1; 2271 2272 if (!maxpages) 2273 return 0; 2274 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 2275 if (swapfilepages && maxpages > swapfilepages) { 2276 pr_warn("Swap area shorter than signature indicates\n"); 2277 return 0; 2278 } 2279 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 2280 return 0; 2281 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 2282 return 0; 2283 2284 return maxpages; 2285 } 2286 2287 static int setup_swap_map_and_extents(struct swap_info_struct *p, 2288 union swap_header *swap_header, 2289 unsigned char *swap_map, 2290 struct swap_cluster_info *cluster_info, 2291 unsigned long maxpages, 2292 sector_t *span) 2293 { 2294 int i; 2295 unsigned int nr_good_pages; 2296 int nr_extents; 2297 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 2298 unsigned long idx = p->cluster_next / SWAPFILE_CLUSTER; 2299 2300 nr_good_pages = maxpages - 1; /* omit header page */ 2301 2302 cluster_list_init(&p->free_clusters); 2303 cluster_list_init(&p->discard_clusters); 2304 2305 for (i = 0; i < swap_header->info.nr_badpages; i++) { 2306 unsigned int page_nr = swap_header->info.badpages[i]; 2307 if (page_nr == 0 || page_nr > swap_header->info.last_page) 2308 return -EINVAL; 2309 if (page_nr < maxpages) { 2310 swap_map[page_nr] = SWAP_MAP_BAD; 2311 nr_good_pages--; 2312 /* 2313 * Haven't marked the cluster free yet, no list 2314 * operation involved 2315 */ 2316 inc_cluster_info_page(p, cluster_info, page_nr); 2317 } 2318 } 2319 2320 /* Haven't marked the cluster free yet, no list operation involved */ 2321 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) 2322 inc_cluster_info_page(p, cluster_info, i); 2323 2324 if (nr_good_pages) { 2325 swap_map[0] = SWAP_MAP_BAD; 2326 /* 2327 * Not mark the cluster free yet, no list 2328 * operation involved 2329 */ 2330 inc_cluster_info_page(p, cluster_info, 0); 2331 p->max = maxpages; 2332 p->pages = nr_good_pages; 2333 nr_extents = setup_swap_extents(p, span); 2334 if (nr_extents < 0) 2335 return nr_extents; 2336 nr_good_pages = p->pages; 2337 } 2338 if (!nr_good_pages) { 2339 pr_warn("Empty swap-file\n"); 2340 return -EINVAL; 2341 } 2342 2343 if (!cluster_info) 2344 return nr_extents; 2345 2346 for (i = 0; i < nr_clusters; i++) { 2347 if (!cluster_count(&cluster_info[idx])) { 2348 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 2349 cluster_list_add_tail(&p->free_clusters, cluster_info, 2350 idx); 2351 } 2352 idx++; 2353 if (idx == nr_clusters) 2354 idx = 0; 2355 } 2356 return nr_extents; 2357 } 2358 2359 /* 2360 * Helper to sys_swapon determining if a given swap 2361 * backing device queue supports DISCARD operations. 2362 */ 2363 static bool swap_discardable(struct swap_info_struct *si) 2364 { 2365 struct request_queue *q = bdev_get_queue(si->bdev); 2366 2367 if (!q || !blk_queue_discard(q)) 2368 return false; 2369 2370 return true; 2371 } 2372 2373 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 2374 { 2375 struct swap_info_struct *p; 2376 struct filename *name; 2377 struct file *swap_file = NULL; 2378 struct address_space *mapping; 2379 int prio; 2380 int error; 2381 union swap_header *swap_header; 2382 int nr_extents; 2383 sector_t span; 2384 unsigned long maxpages; 2385 unsigned char *swap_map = NULL; 2386 struct swap_cluster_info *cluster_info = NULL; 2387 unsigned long *frontswap_map = NULL; 2388 struct page *page = NULL; 2389 struct inode *inode = NULL; 2390 2391 if (swap_flags & ~SWAP_FLAGS_VALID) 2392 return -EINVAL; 2393 2394 if (!capable(CAP_SYS_ADMIN)) 2395 return -EPERM; 2396 2397 p = alloc_swap_info(); 2398 if (IS_ERR(p)) 2399 return PTR_ERR(p); 2400 2401 INIT_WORK(&p->discard_work, swap_discard_work); 2402 2403 name = getname(specialfile); 2404 if (IS_ERR(name)) { 2405 error = PTR_ERR(name); 2406 name = NULL; 2407 goto bad_swap; 2408 } 2409 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); 2410 if (IS_ERR(swap_file)) { 2411 error = PTR_ERR(swap_file); 2412 swap_file = NULL; 2413 goto bad_swap; 2414 } 2415 2416 p->swap_file = swap_file; 2417 mapping = swap_file->f_mapping; 2418 inode = mapping->host; 2419 2420 /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */ 2421 error = claim_swapfile(p, inode); 2422 if (unlikely(error)) 2423 goto bad_swap; 2424 2425 /* 2426 * Read the swap header. 2427 */ 2428 if (!mapping->a_ops->readpage) { 2429 error = -EINVAL; 2430 goto bad_swap; 2431 } 2432 page = read_mapping_page(mapping, 0, swap_file); 2433 if (IS_ERR(page)) { 2434 error = PTR_ERR(page); 2435 goto bad_swap; 2436 } 2437 swap_header = kmap(page); 2438 2439 maxpages = read_swap_header(p, swap_header, inode); 2440 if (unlikely(!maxpages)) { 2441 error = -EINVAL; 2442 goto bad_swap; 2443 } 2444 2445 /* OK, set up the swap map and apply the bad block list */ 2446 swap_map = vzalloc(maxpages); 2447 if (!swap_map) { 2448 error = -ENOMEM; 2449 goto bad_swap; 2450 } 2451 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2452 int cpu; 2453 2454 p->flags |= SWP_SOLIDSTATE; 2455 /* 2456 * select a random position to start with to help wear leveling 2457 * SSD 2458 */ 2459 p->cluster_next = 1 + (prandom_u32() % p->highest_bit); 2460 2461 cluster_info = vzalloc(DIV_ROUND_UP(maxpages, 2462 SWAPFILE_CLUSTER) * sizeof(*cluster_info)); 2463 if (!cluster_info) { 2464 error = -ENOMEM; 2465 goto bad_swap; 2466 } 2467 p->percpu_cluster = alloc_percpu(struct percpu_cluster); 2468 if (!p->percpu_cluster) { 2469 error = -ENOMEM; 2470 goto bad_swap; 2471 } 2472 for_each_possible_cpu(cpu) { 2473 struct percpu_cluster *cluster; 2474 cluster = per_cpu_ptr(p->percpu_cluster, cpu); 2475 cluster_set_null(&cluster->index); 2476 } 2477 } 2478 2479 error = swap_cgroup_swapon(p->type, maxpages); 2480 if (error) 2481 goto bad_swap; 2482 2483 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 2484 cluster_info, maxpages, &span); 2485 if (unlikely(nr_extents < 0)) { 2486 error = nr_extents; 2487 goto bad_swap; 2488 } 2489 /* frontswap enabled? set up bit-per-page map for frontswap */ 2490 if (IS_ENABLED(CONFIG_FRONTSWAP)) 2491 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long)); 2492 2493 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 2494 /* 2495 * When discard is enabled for swap with no particular 2496 * policy flagged, we set all swap discard flags here in 2497 * order to sustain backward compatibility with older 2498 * swapon(8) releases. 2499 */ 2500 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | 2501 SWP_PAGE_DISCARD); 2502 2503 /* 2504 * By flagging sys_swapon, a sysadmin can tell us to 2505 * either do single-time area discards only, or to just 2506 * perform discards for released swap page-clusters. 2507 * Now it's time to adjust the p->flags accordingly. 2508 */ 2509 if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 2510 p->flags &= ~SWP_PAGE_DISCARD; 2511 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 2512 p->flags &= ~SWP_AREA_DISCARD; 2513 2514 /* issue a swapon-time discard if it's still required */ 2515 if (p->flags & SWP_AREA_DISCARD) { 2516 int err = discard_swap(p); 2517 if (unlikely(err)) 2518 pr_err("swapon: discard_swap(%p): %d\n", 2519 p, err); 2520 } 2521 } 2522 2523 mutex_lock(&swapon_mutex); 2524 prio = -1; 2525 if (swap_flags & SWAP_FLAG_PREFER) 2526 prio = 2527 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 2528 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); 2529 2530 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", 2531 p->pages<<(PAGE_SHIFT-10), name->name, p->prio, 2532 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 2533 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 2534 (p->flags & SWP_DISCARDABLE) ? "D" : "", 2535 (p->flags & SWP_AREA_DISCARD) ? "s" : "", 2536 (p->flags & SWP_PAGE_DISCARD) ? "c" : "", 2537 (frontswap_map) ? "FS" : ""); 2538 2539 mutex_unlock(&swapon_mutex); 2540 atomic_inc(&proc_poll_event); 2541 wake_up_interruptible(&proc_poll_wait); 2542 2543 if (S_ISREG(inode->i_mode)) 2544 inode->i_flags |= S_SWAPFILE; 2545 error = 0; 2546 goto out; 2547 bad_swap: 2548 free_percpu(p->percpu_cluster); 2549 p->percpu_cluster = NULL; 2550 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { 2551 set_blocksize(p->bdev, p->old_block_size); 2552 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2553 } 2554 destroy_swap_extents(p); 2555 swap_cgroup_swapoff(p->type); 2556 spin_lock(&swap_lock); 2557 p->swap_file = NULL; 2558 p->flags = 0; 2559 spin_unlock(&swap_lock); 2560 vfree(swap_map); 2561 vfree(cluster_info); 2562 if (swap_file) { 2563 if (inode && S_ISREG(inode->i_mode)) { 2564 inode_unlock(inode); 2565 inode = NULL; 2566 } 2567 filp_close(swap_file, NULL); 2568 } 2569 out: 2570 if (page && !IS_ERR(page)) { 2571 kunmap(page); 2572 put_page(page); 2573 } 2574 if (name) 2575 putname(name); 2576 if (inode && S_ISREG(inode->i_mode)) 2577 inode_unlock(inode); 2578 return error; 2579 } 2580 2581 void si_swapinfo(struct sysinfo *val) 2582 { 2583 unsigned int type; 2584 unsigned long nr_to_be_unused = 0; 2585 2586 spin_lock(&swap_lock); 2587 for (type = 0; type < nr_swapfiles; type++) { 2588 struct swap_info_struct *si = swap_info[type]; 2589 2590 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 2591 nr_to_be_unused += si->inuse_pages; 2592 } 2593 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; 2594 val->totalswap = total_swap_pages + nr_to_be_unused; 2595 spin_unlock(&swap_lock); 2596 } 2597 2598 /* 2599 * Verify that a swap entry is valid and increment its swap map count. 2600 * 2601 * Returns error code in following case. 2602 * - success -> 0 2603 * - swp_entry is invalid -> EINVAL 2604 * - swp_entry is migration entry -> EINVAL 2605 * - swap-cache reference is requested but there is already one. -> EEXIST 2606 * - swap-cache reference is requested but the entry is not used. -> ENOENT 2607 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM 2608 */ 2609 static int __swap_duplicate(swp_entry_t entry, unsigned char usage) 2610 { 2611 struct swap_info_struct *p; 2612 unsigned long offset, type; 2613 unsigned char count; 2614 unsigned char has_cache; 2615 int err = -EINVAL; 2616 2617 if (non_swap_entry(entry)) 2618 goto out; 2619 2620 type = swp_type(entry); 2621 if (type >= nr_swapfiles) 2622 goto bad_file; 2623 p = swap_info[type]; 2624 offset = swp_offset(entry); 2625 2626 spin_lock(&p->lock); 2627 if (unlikely(offset >= p->max)) 2628 goto unlock_out; 2629 2630 count = p->swap_map[offset]; 2631 2632 /* 2633 * swapin_readahead() doesn't check if a swap entry is valid, so the 2634 * swap entry could be SWAP_MAP_BAD. Check here with lock held. 2635 */ 2636 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { 2637 err = -ENOENT; 2638 goto unlock_out; 2639 } 2640 2641 has_cache = count & SWAP_HAS_CACHE; 2642 count &= ~SWAP_HAS_CACHE; 2643 err = 0; 2644 2645 if (usage == SWAP_HAS_CACHE) { 2646 2647 /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 2648 if (!has_cache && count) 2649 has_cache = SWAP_HAS_CACHE; 2650 else if (has_cache) /* someone else added cache */ 2651 err = -EEXIST; 2652 else /* no users remaining */ 2653 err = -ENOENT; 2654 2655 } else if (count || has_cache) { 2656 2657 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) 2658 count += usage; 2659 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) 2660 err = -EINVAL; 2661 else if (swap_count_continued(p, offset, count)) 2662 count = COUNT_CONTINUED; 2663 else 2664 err = -ENOMEM; 2665 } else 2666 err = -ENOENT; /* unused swap entry */ 2667 2668 p->swap_map[offset] = count | has_cache; 2669 2670 unlock_out: 2671 spin_unlock(&p->lock); 2672 out: 2673 return err; 2674 2675 bad_file: 2676 pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val); 2677 goto out; 2678 } 2679 2680 /* 2681 * Help swapoff by noting that swap entry belongs to shmem/tmpfs 2682 * (in which case its reference count is never incremented). 2683 */ 2684 void swap_shmem_alloc(swp_entry_t entry) 2685 { 2686 __swap_duplicate(entry, SWAP_MAP_SHMEM); 2687 } 2688 2689 /* 2690 * Increase reference count of swap entry by 1. 2691 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required 2692 * but could not be atomically allocated. Returns 0, just as if it succeeded, 2693 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which 2694 * might occur if a page table entry has got corrupted. 2695 */ 2696 int swap_duplicate(swp_entry_t entry) 2697 { 2698 int err = 0; 2699 2700 while (!err && __swap_duplicate(entry, 1) == -ENOMEM) 2701 err = add_swap_count_continuation(entry, GFP_ATOMIC); 2702 return err; 2703 } 2704 2705 /* 2706 * @entry: swap entry for which we allocate swap cache. 2707 * 2708 * Called when allocating swap cache for existing swap entry, 2709 * This can return error codes. Returns 0 at success. 2710 * -EBUSY means there is a swap cache. 2711 * Note: return code is different from swap_duplicate(). 2712 */ 2713 int swapcache_prepare(swp_entry_t entry) 2714 { 2715 return __swap_duplicate(entry, SWAP_HAS_CACHE); 2716 } 2717 2718 struct swap_info_struct *page_swap_info(struct page *page) 2719 { 2720 swp_entry_t swap = { .val = page_private(page) }; 2721 return swap_info[swp_type(swap)]; 2722 } 2723 2724 /* 2725 * out-of-line __page_file_ methods to avoid include hell. 2726 */ 2727 struct address_space *__page_file_mapping(struct page *page) 2728 { 2729 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 2730 return page_swap_info(page)->swap_file->f_mapping; 2731 } 2732 EXPORT_SYMBOL_GPL(__page_file_mapping); 2733 2734 pgoff_t __page_file_index(struct page *page) 2735 { 2736 swp_entry_t swap = { .val = page_private(page) }; 2737 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 2738 return swp_offset(swap); 2739 } 2740 EXPORT_SYMBOL_GPL(__page_file_index); 2741 2742 /* 2743 * add_swap_count_continuation - called when a swap count is duplicated 2744 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 2745 * page of the original vmalloc'ed swap_map, to hold the continuation count 2746 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called 2747 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. 2748 * 2749 * These continuation pages are seldom referenced: the common paths all work 2750 * on the original swap_map, only referring to a continuation page when the 2751 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. 2752 * 2753 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding 2754 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) 2755 * can be called after dropping locks. 2756 */ 2757 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) 2758 { 2759 struct swap_info_struct *si; 2760 struct page *head; 2761 struct page *page; 2762 struct page *list_page; 2763 pgoff_t offset; 2764 unsigned char count; 2765 2766 /* 2767 * When debugging, it's easier to use __GFP_ZERO here; but it's better 2768 * for latency not to zero a page while GFP_ATOMIC and holding locks. 2769 */ 2770 page = alloc_page(gfp_mask | __GFP_HIGHMEM); 2771 2772 si = swap_info_get(entry); 2773 if (!si) { 2774 /* 2775 * An acceptable race has occurred since the failing 2776 * __swap_duplicate(): the swap entry has been freed, 2777 * perhaps even the whole swap_map cleared for swapoff. 2778 */ 2779 goto outer; 2780 } 2781 2782 offset = swp_offset(entry); 2783 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; 2784 2785 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { 2786 /* 2787 * The higher the swap count, the more likely it is that tasks 2788 * will race to add swap count continuation: we need to avoid 2789 * over-provisioning. 2790 */ 2791 goto out; 2792 } 2793 2794 if (!page) { 2795 spin_unlock(&si->lock); 2796 return -ENOMEM; 2797 } 2798 2799 /* 2800 * We are fortunate that although vmalloc_to_page uses pte_offset_map, 2801 * no architecture is using highmem pages for kernel page tables: so it 2802 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps. 2803 */ 2804 head = vmalloc_to_page(si->swap_map + offset); 2805 offset &= ~PAGE_MASK; 2806 2807 /* 2808 * Page allocation does not initialize the page's lru field, 2809 * but it does always reset its private field. 2810 */ 2811 if (!page_private(head)) { 2812 BUG_ON(count & COUNT_CONTINUED); 2813 INIT_LIST_HEAD(&head->lru); 2814 set_page_private(head, SWP_CONTINUED); 2815 si->flags |= SWP_CONTINUED; 2816 } 2817 2818 list_for_each_entry(list_page, &head->lru, lru) { 2819 unsigned char *map; 2820 2821 /* 2822 * If the previous map said no continuation, but we've found 2823 * a continuation page, free our allocation and use this one. 2824 */ 2825 if (!(count & COUNT_CONTINUED)) 2826 goto out; 2827 2828 map = kmap_atomic(list_page) + offset; 2829 count = *map; 2830 kunmap_atomic(map); 2831 2832 /* 2833 * If this continuation count now has some space in it, 2834 * free our allocation and use this one. 2835 */ 2836 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) 2837 goto out; 2838 } 2839 2840 list_add_tail(&page->lru, &head->lru); 2841 page = NULL; /* now it's attached, don't free it */ 2842 out: 2843 spin_unlock(&si->lock); 2844 outer: 2845 if (page) 2846 __free_page(page); 2847 return 0; 2848 } 2849 2850 /* 2851 * swap_count_continued - when the original swap_map count is incremented 2852 * from SWAP_MAP_MAX, check if there is already a continuation page to carry 2853 * into, carry if so, or else fail until a new continuation page is allocated; 2854 * when the original swap_map count is decremented from 0 with continuation, 2855 * borrow from the continuation and report whether it still holds more. 2856 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock. 2857 */ 2858 static bool swap_count_continued(struct swap_info_struct *si, 2859 pgoff_t offset, unsigned char count) 2860 { 2861 struct page *head; 2862 struct page *page; 2863 unsigned char *map; 2864 2865 head = vmalloc_to_page(si->swap_map + offset); 2866 if (page_private(head) != SWP_CONTINUED) { 2867 BUG_ON(count & COUNT_CONTINUED); 2868 return false; /* need to add count continuation */ 2869 } 2870 2871 offset &= ~PAGE_MASK; 2872 page = list_entry(head->lru.next, struct page, lru); 2873 map = kmap_atomic(page) + offset; 2874 2875 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 2876 goto init_map; /* jump over SWAP_CONT_MAX checks */ 2877 2878 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ 2879 /* 2880 * Think of how you add 1 to 999 2881 */ 2882 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 2883 kunmap_atomic(map); 2884 page = list_entry(page->lru.next, struct page, lru); 2885 BUG_ON(page == head); 2886 map = kmap_atomic(page) + offset; 2887 } 2888 if (*map == SWAP_CONT_MAX) { 2889 kunmap_atomic(map); 2890 page = list_entry(page->lru.next, struct page, lru); 2891 if (page == head) 2892 return false; /* add count continuation */ 2893 map = kmap_atomic(page) + offset; 2894 init_map: *map = 0; /* we didn't zero the page */ 2895 } 2896 *map += 1; 2897 kunmap_atomic(map); 2898 page = list_entry(page->lru.prev, struct page, lru); 2899 while (page != head) { 2900 map = kmap_atomic(page) + offset; 2901 *map = COUNT_CONTINUED; 2902 kunmap_atomic(map); 2903 page = list_entry(page->lru.prev, struct page, lru); 2904 } 2905 return true; /* incremented */ 2906 2907 } else { /* decrementing */ 2908 /* 2909 * Think of how you subtract 1 from 1000 2910 */ 2911 BUG_ON(count != COUNT_CONTINUED); 2912 while (*map == COUNT_CONTINUED) { 2913 kunmap_atomic(map); 2914 page = list_entry(page->lru.next, struct page, lru); 2915 BUG_ON(page == head); 2916 map = kmap_atomic(page) + offset; 2917 } 2918 BUG_ON(*map == 0); 2919 *map -= 1; 2920 if (*map == 0) 2921 count = 0; 2922 kunmap_atomic(map); 2923 page = list_entry(page->lru.prev, struct page, lru); 2924 while (page != head) { 2925 map = kmap_atomic(page) + offset; 2926 *map = SWAP_CONT_MAX | count; 2927 count = COUNT_CONTINUED; 2928 kunmap_atomic(map); 2929 page = list_entry(page->lru.prev, struct page, lru); 2930 } 2931 return count == COUNT_CONTINUED; 2932 } 2933 } 2934 2935 /* 2936 * free_swap_count_continuations - swapoff free all the continuation pages 2937 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. 2938 */ 2939 static void free_swap_count_continuations(struct swap_info_struct *si) 2940 { 2941 pgoff_t offset; 2942 2943 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { 2944 struct page *head; 2945 head = vmalloc_to_page(si->swap_map + offset); 2946 if (page_private(head)) { 2947 struct page *page, *next; 2948 2949 list_for_each_entry_safe(page, next, &head->lru, lru) { 2950 list_del(&page->lru); 2951 __free_page(page); 2952 } 2953 } 2954 } 2955 } 2956