1 /* 2 * linux/mm/swapfile.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/hugetlb.h> 10 #include <linux/mman.h> 11 #include <linux/slab.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/vmalloc.h> 15 #include <linux/pagemap.h> 16 #include <linux/namei.h> 17 #include <linux/shm.h> 18 #include <linux/blkdev.h> 19 #include <linux/random.h> 20 #include <linux/writeback.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/rmap.h> 26 #include <linux/security.h> 27 #include <linux/backing-dev.h> 28 #include <linux/mutex.h> 29 #include <linux/capability.h> 30 #include <linux/syscalls.h> 31 #include <linux/memcontrol.h> 32 33 #include <asm/pgtable.h> 34 #include <asm/tlbflush.h> 35 #include <linux/swapops.h> 36 37 static DEFINE_SPINLOCK(swap_lock); 38 static unsigned int nr_swapfiles; 39 long nr_swap_pages; 40 long total_swap_pages; 41 static int swap_overflow; 42 static int least_priority; 43 44 static const char Bad_file[] = "Bad swap file entry "; 45 static const char Unused_file[] = "Unused swap file entry "; 46 static const char Bad_offset[] = "Bad swap offset entry "; 47 static const char Unused_offset[] = "Unused swap offset entry "; 48 49 static struct swap_list_t swap_list = {-1, -1}; 50 51 static struct swap_info_struct swap_info[MAX_SWAPFILES]; 52 53 static DEFINE_MUTEX(swapon_mutex); 54 55 /* 56 * We need this because the bdev->unplug_fn can sleep and we cannot 57 * hold swap_lock while calling the unplug_fn. And swap_lock 58 * cannot be turned into a mutex. 59 */ 60 static DECLARE_RWSEM(swap_unplug_sem); 61 62 void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) 63 { 64 swp_entry_t entry; 65 66 down_read(&swap_unplug_sem); 67 entry.val = page_private(page); 68 if (PageSwapCache(page)) { 69 struct block_device *bdev = swap_info[swp_type(entry)].bdev; 70 struct backing_dev_info *bdi; 71 72 /* 73 * If the page is removed from swapcache from under us (with a 74 * racy try_to_unuse/swapoff) we need an additional reference 75 * count to avoid reading garbage from page_private(page) above. 76 * If the WARN_ON triggers during a swapoff it maybe the race 77 * condition and it's harmless. However if it triggers without 78 * swapoff it signals a problem. 79 */ 80 WARN_ON(page_count(page) <= 1); 81 82 bdi = bdev->bd_inode->i_mapping->backing_dev_info; 83 blk_run_backing_dev(bdi, page); 84 } 85 up_read(&swap_unplug_sem); 86 } 87 88 /* 89 * swapon tell device that all the old swap contents can be discarded, 90 * to allow the swap device to optimize its wear-levelling. 91 */ 92 static int discard_swap(struct swap_info_struct *si) 93 { 94 struct swap_extent *se; 95 int err = 0; 96 97 list_for_each_entry(se, &si->extent_list, list) { 98 sector_t start_block = se->start_block << (PAGE_SHIFT - 9); 99 pgoff_t nr_blocks = se->nr_pages << (PAGE_SHIFT - 9); 100 101 if (se->start_page == 0) { 102 /* Do not discard the swap header page! */ 103 start_block += 1 << (PAGE_SHIFT - 9); 104 nr_blocks -= 1 << (PAGE_SHIFT - 9); 105 if (!nr_blocks) 106 continue; 107 } 108 109 err = blkdev_issue_discard(si->bdev, start_block, 110 nr_blocks, GFP_KERNEL); 111 if (err) 112 break; 113 114 cond_resched(); 115 } 116 return err; /* That will often be -EOPNOTSUPP */ 117 } 118 119 /* 120 * swap allocation tell device that a cluster of swap can now be discarded, 121 * to allow the swap device to optimize its wear-levelling. 122 */ 123 static void discard_swap_cluster(struct swap_info_struct *si, 124 pgoff_t start_page, pgoff_t nr_pages) 125 { 126 struct swap_extent *se = si->curr_swap_extent; 127 int found_extent = 0; 128 129 while (nr_pages) { 130 struct list_head *lh; 131 132 if (se->start_page <= start_page && 133 start_page < se->start_page + se->nr_pages) { 134 pgoff_t offset = start_page - se->start_page; 135 sector_t start_block = se->start_block + offset; 136 pgoff_t nr_blocks = se->nr_pages - offset; 137 138 if (nr_blocks > nr_pages) 139 nr_blocks = nr_pages; 140 start_page += nr_blocks; 141 nr_pages -= nr_blocks; 142 143 if (!found_extent++) 144 si->curr_swap_extent = se; 145 146 start_block <<= PAGE_SHIFT - 9; 147 nr_blocks <<= PAGE_SHIFT - 9; 148 if (blkdev_issue_discard(si->bdev, start_block, 149 nr_blocks, GFP_NOIO)) 150 break; 151 } 152 153 lh = se->list.next; 154 if (lh == &si->extent_list) 155 lh = lh->next; 156 se = list_entry(lh, struct swap_extent, list); 157 } 158 } 159 160 static int wait_for_discard(void *word) 161 { 162 schedule(); 163 return 0; 164 } 165 166 #define SWAPFILE_CLUSTER 256 167 #define LATENCY_LIMIT 256 168 169 static inline unsigned long scan_swap_map(struct swap_info_struct *si) 170 { 171 unsigned long offset; 172 unsigned long scan_base; 173 unsigned long last_in_cluster = 0; 174 int latency_ration = LATENCY_LIMIT; 175 int found_free_cluster = 0; 176 177 /* 178 * We try to cluster swap pages by allocating them sequentially 179 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 180 * way, however, we resort to first-free allocation, starting 181 * a new cluster. This prevents us from scattering swap pages 182 * all over the entire swap partition, so that we reduce 183 * overall disk seek times between swap pages. -- sct 184 * But we do now try to find an empty cluster. -Andrea 185 * And we let swap pages go all over an SSD partition. Hugh 186 */ 187 188 si->flags += SWP_SCANNING; 189 scan_base = offset = si->cluster_next; 190 191 if (unlikely(!si->cluster_nr--)) { 192 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 193 si->cluster_nr = SWAPFILE_CLUSTER - 1; 194 goto checks; 195 } 196 if (si->flags & SWP_DISCARDABLE) { 197 /* 198 * Start range check on racing allocations, in case 199 * they overlap the cluster we eventually decide on 200 * (we scan without swap_lock to allow preemption). 201 * It's hardly conceivable that cluster_nr could be 202 * wrapped during our scan, but don't depend on it. 203 */ 204 if (si->lowest_alloc) 205 goto checks; 206 si->lowest_alloc = si->max; 207 si->highest_alloc = 0; 208 } 209 spin_unlock(&swap_lock); 210 211 /* 212 * If seek is expensive, start searching for new cluster from 213 * start of partition, to minimize the span of allocated swap. 214 * But if seek is cheap, search from our current position, so 215 * that swap is allocated from all over the partition: if the 216 * Flash Translation Layer only remaps within limited zones, 217 * we don't want to wear out the first zone too quickly. 218 */ 219 if (!(si->flags & SWP_SOLIDSTATE)) 220 scan_base = offset = si->lowest_bit; 221 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 222 223 /* Locate the first empty (unaligned) cluster */ 224 for (; last_in_cluster <= si->highest_bit; offset++) { 225 if (si->swap_map[offset]) 226 last_in_cluster = offset + SWAPFILE_CLUSTER; 227 else if (offset == last_in_cluster) { 228 spin_lock(&swap_lock); 229 offset -= SWAPFILE_CLUSTER - 1; 230 si->cluster_next = offset; 231 si->cluster_nr = SWAPFILE_CLUSTER - 1; 232 found_free_cluster = 1; 233 goto checks; 234 } 235 if (unlikely(--latency_ration < 0)) { 236 cond_resched(); 237 latency_ration = LATENCY_LIMIT; 238 } 239 } 240 241 offset = si->lowest_bit; 242 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 243 244 /* Locate the first empty (unaligned) cluster */ 245 for (; last_in_cluster < scan_base; offset++) { 246 if (si->swap_map[offset]) 247 last_in_cluster = offset + SWAPFILE_CLUSTER; 248 else if (offset == last_in_cluster) { 249 spin_lock(&swap_lock); 250 offset -= SWAPFILE_CLUSTER - 1; 251 si->cluster_next = offset; 252 si->cluster_nr = SWAPFILE_CLUSTER - 1; 253 found_free_cluster = 1; 254 goto checks; 255 } 256 if (unlikely(--latency_ration < 0)) { 257 cond_resched(); 258 latency_ration = LATENCY_LIMIT; 259 } 260 } 261 262 offset = scan_base; 263 spin_lock(&swap_lock); 264 si->cluster_nr = SWAPFILE_CLUSTER - 1; 265 si->lowest_alloc = 0; 266 } 267 268 checks: 269 if (!(si->flags & SWP_WRITEOK)) 270 goto no_page; 271 if (!si->highest_bit) 272 goto no_page; 273 if (offset > si->highest_bit) 274 scan_base = offset = si->lowest_bit; 275 if (si->swap_map[offset]) 276 goto scan; 277 278 if (offset == si->lowest_bit) 279 si->lowest_bit++; 280 if (offset == si->highest_bit) 281 si->highest_bit--; 282 si->inuse_pages++; 283 if (si->inuse_pages == si->pages) { 284 si->lowest_bit = si->max; 285 si->highest_bit = 0; 286 } 287 si->swap_map[offset] = 1; 288 si->cluster_next = offset + 1; 289 si->flags -= SWP_SCANNING; 290 291 if (si->lowest_alloc) { 292 /* 293 * Only set when SWP_DISCARDABLE, and there's a scan 294 * for a free cluster in progress or just completed. 295 */ 296 if (found_free_cluster) { 297 /* 298 * To optimize wear-levelling, discard the 299 * old data of the cluster, taking care not to 300 * discard any of its pages that have already 301 * been allocated by racing tasks (offset has 302 * already stepped over any at the beginning). 303 */ 304 if (offset < si->highest_alloc && 305 si->lowest_alloc <= last_in_cluster) 306 last_in_cluster = si->lowest_alloc - 1; 307 si->flags |= SWP_DISCARDING; 308 spin_unlock(&swap_lock); 309 310 if (offset < last_in_cluster) 311 discard_swap_cluster(si, offset, 312 last_in_cluster - offset + 1); 313 314 spin_lock(&swap_lock); 315 si->lowest_alloc = 0; 316 si->flags &= ~SWP_DISCARDING; 317 318 smp_mb(); /* wake_up_bit advises this */ 319 wake_up_bit(&si->flags, ilog2(SWP_DISCARDING)); 320 321 } else if (si->flags & SWP_DISCARDING) { 322 /* 323 * Delay using pages allocated by racing tasks 324 * until the whole discard has been issued. We 325 * could defer that delay until swap_writepage, 326 * but it's easier to keep this self-contained. 327 */ 328 spin_unlock(&swap_lock); 329 wait_on_bit(&si->flags, ilog2(SWP_DISCARDING), 330 wait_for_discard, TASK_UNINTERRUPTIBLE); 331 spin_lock(&swap_lock); 332 } else { 333 /* 334 * Note pages allocated by racing tasks while 335 * scan for a free cluster is in progress, so 336 * that its final discard can exclude them. 337 */ 338 if (offset < si->lowest_alloc) 339 si->lowest_alloc = offset; 340 if (offset > si->highest_alloc) 341 si->highest_alloc = offset; 342 } 343 } 344 return offset; 345 346 scan: 347 spin_unlock(&swap_lock); 348 while (++offset <= si->highest_bit) { 349 if (!si->swap_map[offset]) { 350 spin_lock(&swap_lock); 351 goto checks; 352 } 353 if (unlikely(--latency_ration < 0)) { 354 cond_resched(); 355 latency_ration = LATENCY_LIMIT; 356 } 357 } 358 offset = si->lowest_bit; 359 while (++offset < scan_base) { 360 if (!si->swap_map[offset]) { 361 spin_lock(&swap_lock); 362 goto checks; 363 } 364 if (unlikely(--latency_ration < 0)) { 365 cond_resched(); 366 latency_ration = LATENCY_LIMIT; 367 } 368 } 369 spin_lock(&swap_lock); 370 371 no_page: 372 si->flags -= SWP_SCANNING; 373 return 0; 374 } 375 376 swp_entry_t get_swap_page(void) 377 { 378 struct swap_info_struct *si; 379 pgoff_t offset; 380 int type, next; 381 int wrapped = 0; 382 383 spin_lock(&swap_lock); 384 if (nr_swap_pages <= 0) 385 goto noswap; 386 nr_swap_pages--; 387 388 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 389 si = swap_info + type; 390 next = si->next; 391 if (next < 0 || 392 (!wrapped && si->prio != swap_info[next].prio)) { 393 next = swap_list.head; 394 wrapped++; 395 } 396 397 if (!si->highest_bit) 398 continue; 399 if (!(si->flags & SWP_WRITEOK)) 400 continue; 401 402 swap_list.next = next; 403 offset = scan_swap_map(si); 404 if (offset) { 405 spin_unlock(&swap_lock); 406 return swp_entry(type, offset); 407 } 408 next = swap_list.next; 409 } 410 411 nr_swap_pages++; 412 noswap: 413 spin_unlock(&swap_lock); 414 return (swp_entry_t) {0}; 415 } 416 417 swp_entry_t get_swap_page_of_type(int type) 418 { 419 struct swap_info_struct *si; 420 pgoff_t offset; 421 422 spin_lock(&swap_lock); 423 si = swap_info + type; 424 if (si->flags & SWP_WRITEOK) { 425 nr_swap_pages--; 426 offset = scan_swap_map(si); 427 if (offset) { 428 spin_unlock(&swap_lock); 429 return swp_entry(type, offset); 430 } 431 nr_swap_pages++; 432 } 433 spin_unlock(&swap_lock); 434 return (swp_entry_t) {0}; 435 } 436 437 static struct swap_info_struct * swap_info_get(swp_entry_t entry) 438 { 439 struct swap_info_struct * p; 440 unsigned long offset, type; 441 442 if (!entry.val) 443 goto out; 444 type = swp_type(entry); 445 if (type >= nr_swapfiles) 446 goto bad_nofile; 447 p = & swap_info[type]; 448 if (!(p->flags & SWP_USED)) 449 goto bad_device; 450 offset = swp_offset(entry); 451 if (offset >= p->max) 452 goto bad_offset; 453 if (!p->swap_map[offset]) 454 goto bad_free; 455 spin_lock(&swap_lock); 456 return p; 457 458 bad_free: 459 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val); 460 goto out; 461 bad_offset: 462 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val); 463 goto out; 464 bad_device: 465 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val); 466 goto out; 467 bad_nofile: 468 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); 469 out: 470 return NULL; 471 } 472 473 static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) 474 { 475 int count = p->swap_map[offset]; 476 477 if (count < SWAP_MAP_MAX) { 478 count--; 479 p->swap_map[offset] = count; 480 if (!count) { 481 if (offset < p->lowest_bit) 482 p->lowest_bit = offset; 483 if (offset > p->highest_bit) 484 p->highest_bit = offset; 485 if (p->prio > swap_info[swap_list.next].prio) 486 swap_list.next = p - swap_info; 487 nr_swap_pages++; 488 p->inuse_pages--; 489 } 490 } 491 return count; 492 } 493 494 /* 495 * Caller has made sure that the swapdevice corresponding to entry 496 * is still around or has not been recycled. 497 */ 498 void swap_free(swp_entry_t entry) 499 { 500 struct swap_info_struct * p; 501 502 p = swap_info_get(entry); 503 if (p) { 504 swap_entry_free(p, swp_offset(entry)); 505 spin_unlock(&swap_lock); 506 } 507 } 508 509 /* 510 * How many references to page are currently swapped out? 511 */ 512 static inline int page_swapcount(struct page *page) 513 { 514 int count = 0; 515 struct swap_info_struct *p; 516 swp_entry_t entry; 517 518 entry.val = page_private(page); 519 p = swap_info_get(entry); 520 if (p) { 521 /* Subtract the 1 for the swap cache itself */ 522 count = p->swap_map[swp_offset(entry)] - 1; 523 spin_unlock(&swap_lock); 524 } 525 return count; 526 } 527 528 /* 529 * We can write to an anon page without COW if there are no other references 530 * to it. And as a side-effect, free up its swap: because the old content 531 * on disk will never be read, and seeking back there to write new content 532 * later would only waste time away from clustering. 533 */ 534 int reuse_swap_page(struct page *page) 535 { 536 int count; 537 538 VM_BUG_ON(!PageLocked(page)); 539 count = page_mapcount(page); 540 if (count <= 1 && PageSwapCache(page)) { 541 count += page_swapcount(page); 542 if (count == 1 && !PageWriteback(page)) { 543 delete_from_swap_cache(page); 544 SetPageDirty(page); 545 } 546 } 547 return count == 1; 548 } 549 550 /* 551 * If swap is getting full, or if there are no more mappings of this page, 552 * then try_to_free_swap is called to free its swap space. 553 */ 554 int try_to_free_swap(struct page *page) 555 { 556 VM_BUG_ON(!PageLocked(page)); 557 558 if (!PageSwapCache(page)) 559 return 0; 560 if (PageWriteback(page)) 561 return 0; 562 if (page_swapcount(page)) 563 return 0; 564 565 delete_from_swap_cache(page); 566 SetPageDirty(page); 567 return 1; 568 } 569 570 /* 571 * Free the swap entry like above, but also try to 572 * free the page cache entry if it is the last user. 573 */ 574 void free_swap_and_cache(swp_entry_t entry) 575 { 576 struct swap_info_struct * p; 577 struct page *page = NULL; 578 579 if (is_migration_entry(entry)) 580 return; 581 582 p = swap_info_get(entry); 583 if (p) { 584 if (swap_entry_free(p, swp_offset(entry)) == 1) { 585 page = find_get_page(&swapper_space, entry.val); 586 if (page && !trylock_page(page)) { 587 page_cache_release(page); 588 page = NULL; 589 } 590 } 591 spin_unlock(&swap_lock); 592 } 593 if (page) { 594 /* 595 * Not mapped elsewhere, or swap space full? Free it! 596 * Also recheck PageSwapCache now page is locked (above). 597 */ 598 if (PageSwapCache(page) && !PageWriteback(page) && 599 (!page_mapped(page) || vm_swap_full())) { 600 delete_from_swap_cache(page); 601 SetPageDirty(page); 602 } 603 unlock_page(page); 604 page_cache_release(page); 605 } 606 } 607 608 #ifdef CONFIG_HIBERNATION 609 /* 610 * Find the swap type that corresponds to given device (if any). 611 * 612 * @offset - number of the PAGE_SIZE-sized block of the device, starting 613 * from 0, in which the swap header is expected to be located. 614 * 615 * This is needed for the suspend to disk (aka swsusp). 616 */ 617 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 618 { 619 struct block_device *bdev = NULL; 620 int i; 621 622 if (device) 623 bdev = bdget(device); 624 625 spin_lock(&swap_lock); 626 for (i = 0; i < nr_swapfiles; i++) { 627 struct swap_info_struct *sis = swap_info + i; 628 629 if (!(sis->flags & SWP_WRITEOK)) 630 continue; 631 632 if (!bdev) { 633 if (bdev_p) 634 *bdev_p = sis->bdev; 635 636 spin_unlock(&swap_lock); 637 return i; 638 } 639 if (bdev == sis->bdev) { 640 struct swap_extent *se; 641 642 se = list_entry(sis->extent_list.next, 643 struct swap_extent, list); 644 if (se->start_block == offset) { 645 if (bdev_p) 646 *bdev_p = sis->bdev; 647 648 spin_unlock(&swap_lock); 649 bdput(bdev); 650 return i; 651 } 652 } 653 } 654 spin_unlock(&swap_lock); 655 if (bdev) 656 bdput(bdev); 657 658 return -ENODEV; 659 } 660 661 /* 662 * Return either the total number of swap pages of given type, or the number 663 * of free pages of that type (depending on @free) 664 * 665 * This is needed for software suspend 666 */ 667 unsigned int count_swap_pages(int type, int free) 668 { 669 unsigned int n = 0; 670 671 if (type < nr_swapfiles) { 672 spin_lock(&swap_lock); 673 if (swap_info[type].flags & SWP_WRITEOK) { 674 n = swap_info[type].pages; 675 if (free) 676 n -= swap_info[type].inuse_pages; 677 } 678 spin_unlock(&swap_lock); 679 } 680 return n; 681 } 682 #endif 683 684 /* 685 * No need to decide whether this PTE shares the swap entry with others, 686 * just let do_wp_page work it out if a write is requested later - to 687 * force COW, vm_page_prot omits write permission from any private vma. 688 */ 689 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 690 unsigned long addr, swp_entry_t entry, struct page *page) 691 { 692 spinlock_t *ptl; 693 pte_t *pte; 694 int ret = 1; 695 696 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) 697 ret = -ENOMEM; 698 699 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 700 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 701 if (ret > 0) 702 mem_cgroup_uncharge_page(page); 703 ret = 0; 704 goto out; 705 } 706 707 inc_mm_counter(vma->vm_mm, anon_rss); 708 get_page(page); 709 set_pte_at(vma->vm_mm, addr, pte, 710 pte_mkold(mk_pte(page, vma->vm_page_prot))); 711 page_add_anon_rmap(page, vma, addr); 712 swap_free(entry); 713 /* 714 * Move the page to the active list so it is not 715 * immediately swapped out again after swapon. 716 */ 717 activate_page(page); 718 out: 719 pte_unmap_unlock(pte, ptl); 720 return ret; 721 } 722 723 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 724 unsigned long addr, unsigned long end, 725 swp_entry_t entry, struct page *page) 726 { 727 pte_t swp_pte = swp_entry_to_pte(entry); 728 pte_t *pte; 729 int ret = 0; 730 731 /* 732 * We don't actually need pte lock while scanning for swp_pte: since 733 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the 734 * page table while we're scanning; though it could get zapped, and on 735 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse 736 * of unmatched parts which look like swp_pte, so unuse_pte must 737 * recheck under pte lock. Scanning without pte lock lets it be 738 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. 739 */ 740 pte = pte_offset_map(pmd, addr); 741 do { 742 /* 743 * swapoff spends a _lot_ of time in this loop! 744 * Test inline before going to call unuse_pte. 745 */ 746 if (unlikely(pte_same(*pte, swp_pte))) { 747 pte_unmap(pte); 748 ret = unuse_pte(vma, pmd, addr, entry, page); 749 if (ret) 750 goto out; 751 pte = pte_offset_map(pmd, addr); 752 } 753 } while (pte++, addr += PAGE_SIZE, addr != end); 754 pte_unmap(pte - 1); 755 out: 756 return ret; 757 } 758 759 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 760 unsigned long addr, unsigned long end, 761 swp_entry_t entry, struct page *page) 762 { 763 pmd_t *pmd; 764 unsigned long next; 765 int ret; 766 767 pmd = pmd_offset(pud, addr); 768 do { 769 next = pmd_addr_end(addr, end); 770 if (pmd_none_or_clear_bad(pmd)) 771 continue; 772 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); 773 if (ret) 774 return ret; 775 } while (pmd++, addr = next, addr != end); 776 return 0; 777 } 778 779 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 780 unsigned long addr, unsigned long end, 781 swp_entry_t entry, struct page *page) 782 { 783 pud_t *pud; 784 unsigned long next; 785 int ret; 786 787 pud = pud_offset(pgd, addr); 788 do { 789 next = pud_addr_end(addr, end); 790 if (pud_none_or_clear_bad(pud)) 791 continue; 792 ret = unuse_pmd_range(vma, pud, addr, next, entry, page); 793 if (ret) 794 return ret; 795 } while (pud++, addr = next, addr != end); 796 return 0; 797 } 798 799 static int unuse_vma(struct vm_area_struct *vma, 800 swp_entry_t entry, struct page *page) 801 { 802 pgd_t *pgd; 803 unsigned long addr, end, next; 804 int ret; 805 806 if (page->mapping) { 807 addr = page_address_in_vma(page, vma); 808 if (addr == -EFAULT) 809 return 0; 810 else 811 end = addr + PAGE_SIZE; 812 } else { 813 addr = vma->vm_start; 814 end = vma->vm_end; 815 } 816 817 pgd = pgd_offset(vma->vm_mm, addr); 818 do { 819 next = pgd_addr_end(addr, end); 820 if (pgd_none_or_clear_bad(pgd)) 821 continue; 822 ret = unuse_pud_range(vma, pgd, addr, next, entry, page); 823 if (ret) 824 return ret; 825 } while (pgd++, addr = next, addr != end); 826 return 0; 827 } 828 829 static int unuse_mm(struct mm_struct *mm, 830 swp_entry_t entry, struct page *page) 831 { 832 struct vm_area_struct *vma; 833 int ret = 0; 834 835 if (!down_read_trylock(&mm->mmap_sem)) { 836 /* 837 * Activate page so shrink_inactive_list is unlikely to unmap 838 * its ptes while lock is dropped, so swapoff can make progress. 839 */ 840 activate_page(page); 841 unlock_page(page); 842 down_read(&mm->mmap_sem); 843 lock_page(page); 844 } 845 for (vma = mm->mmap; vma; vma = vma->vm_next) { 846 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) 847 break; 848 } 849 up_read(&mm->mmap_sem); 850 return (ret < 0)? ret: 0; 851 } 852 853 /* 854 * Scan swap_map from current position to next entry still in use. 855 * Recycle to start on reaching the end, returning 0 when empty. 856 */ 857 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 858 unsigned int prev) 859 { 860 unsigned int max = si->max; 861 unsigned int i = prev; 862 int count; 863 864 /* 865 * No need for swap_lock here: we're just looking 866 * for whether an entry is in use, not modifying it; false 867 * hits are okay, and sys_swapoff() has already prevented new 868 * allocations from this area (while holding swap_lock). 869 */ 870 for (;;) { 871 if (++i >= max) { 872 if (!prev) { 873 i = 0; 874 break; 875 } 876 /* 877 * No entries in use at top of swap_map, 878 * loop back to start and recheck there. 879 */ 880 max = prev + 1; 881 prev = 0; 882 i = 1; 883 } 884 count = si->swap_map[i]; 885 if (count && count != SWAP_MAP_BAD) 886 break; 887 } 888 return i; 889 } 890 891 /* 892 * We completely avoid races by reading each swap page in advance, 893 * and then search for the process using it. All the necessary 894 * page table adjustments can then be made atomically. 895 */ 896 static int try_to_unuse(unsigned int type) 897 { 898 struct swap_info_struct * si = &swap_info[type]; 899 struct mm_struct *start_mm; 900 unsigned short *swap_map; 901 unsigned short swcount; 902 struct page *page; 903 swp_entry_t entry; 904 unsigned int i = 0; 905 int retval = 0; 906 int reset_overflow = 0; 907 int shmem; 908 909 /* 910 * When searching mms for an entry, a good strategy is to 911 * start at the first mm we freed the previous entry from 912 * (though actually we don't notice whether we or coincidence 913 * freed the entry). Initialize this start_mm with a hold. 914 * 915 * A simpler strategy would be to start at the last mm we 916 * freed the previous entry from; but that would take less 917 * advantage of mmlist ordering, which clusters forked mms 918 * together, child after parent. If we race with dup_mmap(), we 919 * prefer to resolve parent before child, lest we miss entries 920 * duplicated after we scanned child: using last mm would invert 921 * that. Though it's only a serious concern when an overflowed 922 * swap count is reset from SWAP_MAP_MAX, preventing a rescan. 923 */ 924 start_mm = &init_mm; 925 atomic_inc(&init_mm.mm_users); 926 927 /* 928 * Keep on scanning until all entries have gone. Usually, 929 * one pass through swap_map is enough, but not necessarily: 930 * there are races when an instance of an entry might be missed. 931 */ 932 while ((i = find_next_to_unuse(si, i)) != 0) { 933 if (signal_pending(current)) { 934 retval = -EINTR; 935 break; 936 } 937 938 /* 939 * Get a page for the entry, using the existing swap 940 * cache page if there is one. Otherwise, get a clean 941 * page and read the swap into it. 942 */ 943 swap_map = &si->swap_map[i]; 944 entry = swp_entry(type, i); 945 page = read_swap_cache_async(entry, 946 GFP_HIGHUSER_MOVABLE, NULL, 0); 947 if (!page) { 948 /* 949 * Either swap_duplicate() failed because entry 950 * has been freed independently, and will not be 951 * reused since sys_swapoff() already disabled 952 * allocation from here, or alloc_page() failed. 953 */ 954 if (!*swap_map) 955 continue; 956 retval = -ENOMEM; 957 break; 958 } 959 960 /* 961 * Don't hold on to start_mm if it looks like exiting. 962 */ 963 if (atomic_read(&start_mm->mm_users) == 1) { 964 mmput(start_mm); 965 start_mm = &init_mm; 966 atomic_inc(&init_mm.mm_users); 967 } 968 969 /* 970 * Wait for and lock page. When do_swap_page races with 971 * try_to_unuse, do_swap_page can handle the fault much 972 * faster than try_to_unuse can locate the entry. This 973 * apparently redundant "wait_on_page_locked" lets try_to_unuse 974 * defer to do_swap_page in such a case - in some tests, 975 * do_swap_page and try_to_unuse repeatedly compete. 976 */ 977 wait_on_page_locked(page); 978 wait_on_page_writeback(page); 979 lock_page(page); 980 wait_on_page_writeback(page); 981 982 /* 983 * Remove all references to entry. 984 * Whenever we reach init_mm, there's no address space 985 * to search, but use it as a reminder to search shmem. 986 */ 987 shmem = 0; 988 swcount = *swap_map; 989 if (swcount > 1) { 990 if (start_mm == &init_mm) 991 shmem = shmem_unuse(entry, page); 992 else 993 retval = unuse_mm(start_mm, entry, page); 994 } 995 if (*swap_map > 1) { 996 int set_start_mm = (*swap_map >= swcount); 997 struct list_head *p = &start_mm->mmlist; 998 struct mm_struct *new_start_mm = start_mm; 999 struct mm_struct *prev_mm = start_mm; 1000 struct mm_struct *mm; 1001 1002 atomic_inc(&new_start_mm->mm_users); 1003 atomic_inc(&prev_mm->mm_users); 1004 spin_lock(&mmlist_lock); 1005 while (*swap_map > 1 && !retval && !shmem && 1006 (p = p->next) != &start_mm->mmlist) { 1007 mm = list_entry(p, struct mm_struct, mmlist); 1008 if (!atomic_inc_not_zero(&mm->mm_users)) 1009 continue; 1010 spin_unlock(&mmlist_lock); 1011 mmput(prev_mm); 1012 prev_mm = mm; 1013 1014 cond_resched(); 1015 1016 swcount = *swap_map; 1017 if (swcount <= 1) 1018 ; 1019 else if (mm == &init_mm) { 1020 set_start_mm = 1; 1021 shmem = shmem_unuse(entry, page); 1022 } else 1023 retval = unuse_mm(mm, entry, page); 1024 if (set_start_mm && *swap_map < swcount) { 1025 mmput(new_start_mm); 1026 atomic_inc(&mm->mm_users); 1027 new_start_mm = mm; 1028 set_start_mm = 0; 1029 } 1030 spin_lock(&mmlist_lock); 1031 } 1032 spin_unlock(&mmlist_lock); 1033 mmput(prev_mm); 1034 mmput(start_mm); 1035 start_mm = new_start_mm; 1036 } 1037 if (shmem) { 1038 /* page has already been unlocked and released */ 1039 if (shmem > 0) 1040 continue; 1041 retval = shmem; 1042 break; 1043 } 1044 if (retval) { 1045 unlock_page(page); 1046 page_cache_release(page); 1047 break; 1048 } 1049 1050 /* 1051 * How could swap count reach 0x7fff when the maximum 1052 * pid is 0x7fff, and there's no way to repeat a swap 1053 * page within an mm (except in shmem, where it's the 1054 * shared object which takes the reference count)? 1055 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. 1056 * 1057 * If that's wrong, then we should worry more about 1058 * exit_mmap() and do_munmap() cases described above: 1059 * we might be resetting SWAP_MAP_MAX too early here. 1060 * We know "Undead"s can happen, they're okay, so don't 1061 * report them; but do report if we reset SWAP_MAP_MAX. 1062 */ 1063 if (*swap_map == SWAP_MAP_MAX) { 1064 spin_lock(&swap_lock); 1065 *swap_map = 1; 1066 spin_unlock(&swap_lock); 1067 reset_overflow = 1; 1068 } 1069 1070 /* 1071 * If a reference remains (rare), we would like to leave 1072 * the page in the swap cache; but try_to_unmap could 1073 * then re-duplicate the entry once we drop page lock, 1074 * so we might loop indefinitely; also, that page could 1075 * not be swapped out to other storage meanwhile. So: 1076 * delete from cache even if there's another reference, 1077 * after ensuring that the data has been saved to disk - 1078 * since if the reference remains (rarer), it will be 1079 * read from disk into another page. Splitting into two 1080 * pages would be incorrect if swap supported "shared 1081 * private" pages, but they are handled by tmpfs files. 1082 */ 1083 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { 1084 struct writeback_control wbc = { 1085 .sync_mode = WB_SYNC_NONE, 1086 }; 1087 1088 swap_writepage(page, &wbc); 1089 lock_page(page); 1090 wait_on_page_writeback(page); 1091 } 1092 1093 /* 1094 * It is conceivable that a racing task removed this page from 1095 * swap cache just before we acquired the page lock at the top, 1096 * or while we dropped it in unuse_mm(). The page might even 1097 * be back in swap cache on another swap area: that we must not 1098 * delete, since it may not have been written out to swap yet. 1099 */ 1100 if (PageSwapCache(page) && 1101 likely(page_private(page) == entry.val)) 1102 delete_from_swap_cache(page); 1103 1104 /* 1105 * So we could skip searching mms once swap count went 1106 * to 1, we did not mark any present ptes as dirty: must 1107 * mark page dirty so shrink_page_list will preserve it. 1108 */ 1109 SetPageDirty(page); 1110 unlock_page(page); 1111 page_cache_release(page); 1112 1113 /* 1114 * Make sure that we aren't completely killing 1115 * interactive performance. 1116 */ 1117 cond_resched(); 1118 } 1119 1120 mmput(start_mm); 1121 if (reset_overflow) { 1122 printk(KERN_WARNING "swapoff: cleared swap entry overflow\n"); 1123 swap_overflow = 0; 1124 } 1125 return retval; 1126 } 1127 1128 /* 1129 * After a successful try_to_unuse, if no swap is now in use, we know 1130 * we can empty the mmlist. swap_lock must be held on entry and exit. 1131 * Note that mmlist_lock nests inside swap_lock, and an mm must be 1132 * added to the mmlist just after page_duplicate - before would be racy. 1133 */ 1134 static void drain_mmlist(void) 1135 { 1136 struct list_head *p, *next; 1137 unsigned int i; 1138 1139 for (i = 0; i < nr_swapfiles; i++) 1140 if (swap_info[i].inuse_pages) 1141 return; 1142 spin_lock(&mmlist_lock); 1143 list_for_each_safe(p, next, &init_mm.mmlist) 1144 list_del_init(p); 1145 spin_unlock(&mmlist_lock); 1146 } 1147 1148 /* 1149 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 1150 * corresponds to page offset `offset'. 1151 */ 1152 sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) 1153 { 1154 struct swap_extent *se = sis->curr_swap_extent; 1155 struct swap_extent *start_se = se; 1156 1157 for ( ; ; ) { 1158 struct list_head *lh; 1159 1160 if (se->start_page <= offset && 1161 offset < (se->start_page + se->nr_pages)) { 1162 return se->start_block + (offset - se->start_page); 1163 } 1164 lh = se->list.next; 1165 if (lh == &sis->extent_list) 1166 lh = lh->next; 1167 se = list_entry(lh, struct swap_extent, list); 1168 sis->curr_swap_extent = se; 1169 BUG_ON(se == start_se); /* It *must* be present */ 1170 } 1171 } 1172 1173 #ifdef CONFIG_HIBERNATION 1174 /* 1175 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 1176 * corresponding to given index in swap_info (swap type). 1177 */ 1178 sector_t swapdev_block(int swap_type, pgoff_t offset) 1179 { 1180 struct swap_info_struct *sis; 1181 1182 if (swap_type >= nr_swapfiles) 1183 return 0; 1184 1185 sis = swap_info + swap_type; 1186 return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0; 1187 } 1188 #endif /* CONFIG_HIBERNATION */ 1189 1190 /* 1191 * Free all of a swapdev's extent information 1192 */ 1193 static void destroy_swap_extents(struct swap_info_struct *sis) 1194 { 1195 while (!list_empty(&sis->extent_list)) { 1196 struct swap_extent *se; 1197 1198 se = list_entry(sis->extent_list.next, 1199 struct swap_extent, list); 1200 list_del(&se->list); 1201 kfree(se); 1202 } 1203 } 1204 1205 /* 1206 * Add a block range (and the corresponding page range) into this swapdev's 1207 * extent list. The extent list is kept sorted in page order. 1208 * 1209 * This function rather assumes that it is called in ascending page order. 1210 */ 1211 static int 1212 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 1213 unsigned long nr_pages, sector_t start_block) 1214 { 1215 struct swap_extent *se; 1216 struct swap_extent *new_se; 1217 struct list_head *lh; 1218 1219 lh = sis->extent_list.prev; /* The highest page extent */ 1220 if (lh != &sis->extent_list) { 1221 se = list_entry(lh, struct swap_extent, list); 1222 BUG_ON(se->start_page + se->nr_pages != start_page); 1223 if (se->start_block + se->nr_pages == start_block) { 1224 /* Merge it */ 1225 se->nr_pages += nr_pages; 1226 return 0; 1227 } 1228 } 1229 1230 /* 1231 * No merge. Insert a new extent, preserving ordering. 1232 */ 1233 new_se = kmalloc(sizeof(*se), GFP_KERNEL); 1234 if (new_se == NULL) 1235 return -ENOMEM; 1236 new_se->start_page = start_page; 1237 new_se->nr_pages = nr_pages; 1238 new_se->start_block = start_block; 1239 1240 list_add_tail(&new_se->list, &sis->extent_list); 1241 return 1; 1242 } 1243 1244 /* 1245 * A `swap extent' is a simple thing which maps a contiguous range of pages 1246 * onto a contiguous range of disk blocks. An ordered list of swap extents 1247 * is built at swapon time and is then used at swap_writepage/swap_readpage 1248 * time for locating where on disk a page belongs. 1249 * 1250 * If the swapfile is an S_ISBLK block device, a single extent is installed. 1251 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 1252 * swap files identically. 1253 * 1254 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 1255 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 1256 * swapfiles are handled *identically* after swapon time. 1257 * 1258 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 1259 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 1260 * some stray blocks are found which do not fall within the PAGE_SIZE alignment 1261 * requirements, they are simply tossed out - we will never use those blocks 1262 * for swapping. 1263 * 1264 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This 1265 * prevents root from shooting her foot off by ftruncating an in-use swapfile, 1266 * which will scribble on the fs. 1267 * 1268 * The amount of disk space which a single swap extent represents varies. 1269 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 1270 * extents in the list. To avoid much list walking, we cache the previous 1271 * search location in `curr_swap_extent', and start new searches from there. 1272 * This is extremely effective. The average number of iterations in 1273 * map_swap_page() has been measured at about 0.3 per page. - akpm. 1274 */ 1275 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 1276 { 1277 struct inode *inode; 1278 unsigned blocks_per_page; 1279 unsigned long page_no; 1280 unsigned blkbits; 1281 sector_t probe_block; 1282 sector_t last_block; 1283 sector_t lowest_block = -1; 1284 sector_t highest_block = 0; 1285 int nr_extents = 0; 1286 int ret; 1287 1288 inode = sis->swap_file->f_mapping->host; 1289 if (S_ISBLK(inode->i_mode)) { 1290 ret = add_swap_extent(sis, 0, sis->max, 0); 1291 *span = sis->pages; 1292 goto done; 1293 } 1294 1295 blkbits = inode->i_blkbits; 1296 blocks_per_page = PAGE_SIZE >> blkbits; 1297 1298 /* 1299 * Map all the blocks into the extent list. This code doesn't try 1300 * to be very smart. 1301 */ 1302 probe_block = 0; 1303 page_no = 0; 1304 last_block = i_size_read(inode) >> blkbits; 1305 while ((probe_block + blocks_per_page) <= last_block && 1306 page_no < sis->max) { 1307 unsigned block_in_page; 1308 sector_t first_block; 1309 1310 first_block = bmap(inode, probe_block); 1311 if (first_block == 0) 1312 goto bad_bmap; 1313 1314 /* 1315 * It must be PAGE_SIZE aligned on-disk 1316 */ 1317 if (first_block & (blocks_per_page - 1)) { 1318 probe_block++; 1319 goto reprobe; 1320 } 1321 1322 for (block_in_page = 1; block_in_page < blocks_per_page; 1323 block_in_page++) { 1324 sector_t block; 1325 1326 block = bmap(inode, probe_block + block_in_page); 1327 if (block == 0) 1328 goto bad_bmap; 1329 if (block != first_block + block_in_page) { 1330 /* Discontiguity */ 1331 probe_block++; 1332 goto reprobe; 1333 } 1334 } 1335 1336 first_block >>= (PAGE_SHIFT - blkbits); 1337 if (page_no) { /* exclude the header page */ 1338 if (first_block < lowest_block) 1339 lowest_block = first_block; 1340 if (first_block > highest_block) 1341 highest_block = first_block; 1342 } 1343 1344 /* 1345 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 1346 */ 1347 ret = add_swap_extent(sis, page_no, 1, first_block); 1348 if (ret < 0) 1349 goto out; 1350 nr_extents += ret; 1351 page_no++; 1352 probe_block += blocks_per_page; 1353 reprobe: 1354 continue; 1355 } 1356 ret = nr_extents; 1357 *span = 1 + highest_block - lowest_block; 1358 if (page_no == 0) 1359 page_no = 1; /* force Empty message */ 1360 sis->max = page_no; 1361 sis->pages = page_no - 1; 1362 sis->highest_bit = page_no - 1; 1363 done: 1364 sis->curr_swap_extent = list_entry(sis->extent_list.prev, 1365 struct swap_extent, list); 1366 goto out; 1367 bad_bmap: 1368 printk(KERN_ERR "swapon: swapfile has holes\n"); 1369 ret = -EINVAL; 1370 out: 1371 return ret; 1372 } 1373 1374 #if 0 /* We don't need this yet */ 1375 #include <linux/backing-dev.h> 1376 int page_queue_congested(struct page *page) 1377 { 1378 struct backing_dev_info *bdi; 1379 1380 VM_BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ 1381 1382 if (PageSwapCache(page)) { 1383 swp_entry_t entry = { .val = page_private(page) }; 1384 struct swap_info_struct *sis; 1385 1386 sis = get_swap_info_struct(swp_type(entry)); 1387 bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info; 1388 } else 1389 bdi = page->mapping->backing_dev_info; 1390 return bdi_write_congested(bdi); 1391 } 1392 #endif 1393 1394 asmlinkage long sys_swapoff(const char __user * specialfile) 1395 { 1396 struct swap_info_struct * p = NULL; 1397 unsigned short *swap_map; 1398 struct file *swap_file, *victim; 1399 struct address_space *mapping; 1400 struct inode *inode; 1401 char * pathname; 1402 int i, type, prev; 1403 int err; 1404 1405 if (!capable(CAP_SYS_ADMIN)) 1406 return -EPERM; 1407 1408 pathname = getname(specialfile); 1409 err = PTR_ERR(pathname); 1410 if (IS_ERR(pathname)) 1411 goto out; 1412 1413 victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0); 1414 putname(pathname); 1415 err = PTR_ERR(victim); 1416 if (IS_ERR(victim)) 1417 goto out; 1418 1419 mapping = victim->f_mapping; 1420 prev = -1; 1421 spin_lock(&swap_lock); 1422 for (type = swap_list.head; type >= 0; type = swap_info[type].next) { 1423 p = swap_info + type; 1424 if (p->flags & SWP_WRITEOK) { 1425 if (p->swap_file->f_mapping == mapping) 1426 break; 1427 } 1428 prev = type; 1429 } 1430 if (type < 0) { 1431 err = -EINVAL; 1432 spin_unlock(&swap_lock); 1433 goto out_dput; 1434 } 1435 if (!security_vm_enough_memory(p->pages)) 1436 vm_unacct_memory(p->pages); 1437 else { 1438 err = -ENOMEM; 1439 spin_unlock(&swap_lock); 1440 goto out_dput; 1441 } 1442 if (prev < 0) { 1443 swap_list.head = p->next; 1444 } else { 1445 swap_info[prev].next = p->next; 1446 } 1447 if (type == swap_list.next) { 1448 /* just pick something that's safe... */ 1449 swap_list.next = swap_list.head; 1450 } 1451 if (p->prio < 0) { 1452 for (i = p->next; i >= 0; i = swap_info[i].next) 1453 swap_info[i].prio = p->prio--; 1454 least_priority++; 1455 } 1456 nr_swap_pages -= p->pages; 1457 total_swap_pages -= p->pages; 1458 p->flags &= ~SWP_WRITEOK; 1459 spin_unlock(&swap_lock); 1460 1461 current->flags |= PF_SWAPOFF; 1462 err = try_to_unuse(type); 1463 current->flags &= ~PF_SWAPOFF; 1464 1465 if (err) { 1466 /* re-insert swap space back into swap_list */ 1467 spin_lock(&swap_lock); 1468 if (p->prio < 0) 1469 p->prio = --least_priority; 1470 prev = -1; 1471 for (i = swap_list.head; i >= 0; i = swap_info[i].next) { 1472 if (p->prio >= swap_info[i].prio) 1473 break; 1474 prev = i; 1475 } 1476 p->next = i; 1477 if (prev < 0) 1478 swap_list.head = swap_list.next = p - swap_info; 1479 else 1480 swap_info[prev].next = p - swap_info; 1481 nr_swap_pages += p->pages; 1482 total_swap_pages += p->pages; 1483 p->flags |= SWP_WRITEOK; 1484 spin_unlock(&swap_lock); 1485 goto out_dput; 1486 } 1487 1488 /* wait for any unplug function to finish */ 1489 down_write(&swap_unplug_sem); 1490 up_write(&swap_unplug_sem); 1491 1492 destroy_swap_extents(p); 1493 mutex_lock(&swapon_mutex); 1494 spin_lock(&swap_lock); 1495 drain_mmlist(); 1496 1497 /* wait for anyone still in scan_swap_map */ 1498 p->highest_bit = 0; /* cuts scans short */ 1499 while (p->flags >= SWP_SCANNING) { 1500 spin_unlock(&swap_lock); 1501 schedule_timeout_uninterruptible(1); 1502 spin_lock(&swap_lock); 1503 } 1504 1505 swap_file = p->swap_file; 1506 p->swap_file = NULL; 1507 p->max = 0; 1508 swap_map = p->swap_map; 1509 p->swap_map = NULL; 1510 p->flags = 0; 1511 spin_unlock(&swap_lock); 1512 mutex_unlock(&swapon_mutex); 1513 vfree(swap_map); 1514 inode = mapping->host; 1515 if (S_ISBLK(inode->i_mode)) { 1516 struct block_device *bdev = I_BDEV(inode); 1517 set_blocksize(bdev, p->old_block_size); 1518 bd_release(bdev); 1519 } else { 1520 mutex_lock(&inode->i_mutex); 1521 inode->i_flags &= ~S_SWAPFILE; 1522 mutex_unlock(&inode->i_mutex); 1523 } 1524 filp_close(swap_file, NULL); 1525 err = 0; 1526 1527 out_dput: 1528 filp_close(victim, NULL); 1529 out: 1530 return err; 1531 } 1532 1533 #ifdef CONFIG_PROC_FS 1534 /* iterator */ 1535 static void *swap_start(struct seq_file *swap, loff_t *pos) 1536 { 1537 struct swap_info_struct *ptr = swap_info; 1538 int i; 1539 loff_t l = *pos; 1540 1541 mutex_lock(&swapon_mutex); 1542 1543 if (!l) 1544 return SEQ_START_TOKEN; 1545 1546 for (i = 0; i < nr_swapfiles; i++, ptr++) { 1547 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1548 continue; 1549 if (!--l) 1550 return ptr; 1551 } 1552 1553 return NULL; 1554 } 1555 1556 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 1557 { 1558 struct swap_info_struct *ptr; 1559 struct swap_info_struct *endptr = swap_info + nr_swapfiles; 1560 1561 if (v == SEQ_START_TOKEN) 1562 ptr = swap_info; 1563 else { 1564 ptr = v; 1565 ptr++; 1566 } 1567 1568 for (; ptr < endptr; ptr++) { 1569 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1570 continue; 1571 ++*pos; 1572 return ptr; 1573 } 1574 1575 return NULL; 1576 } 1577 1578 static void swap_stop(struct seq_file *swap, void *v) 1579 { 1580 mutex_unlock(&swapon_mutex); 1581 } 1582 1583 static int swap_show(struct seq_file *swap, void *v) 1584 { 1585 struct swap_info_struct *ptr = v; 1586 struct file *file; 1587 int len; 1588 1589 if (ptr == SEQ_START_TOKEN) { 1590 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 1591 return 0; 1592 } 1593 1594 file = ptr->swap_file; 1595 len = seq_path(swap, &file->f_path, " \t\n\\"); 1596 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 1597 len < 40 ? 40 - len : 1, " ", 1598 S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? 1599 "partition" : "file\t", 1600 ptr->pages << (PAGE_SHIFT - 10), 1601 ptr->inuse_pages << (PAGE_SHIFT - 10), 1602 ptr->prio); 1603 return 0; 1604 } 1605 1606 static const struct seq_operations swaps_op = { 1607 .start = swap_start, 1608 .next = swap_next, 1609 .stop = swap_stop, 1610 .show = swap_show 1611 }; 1612 1613 static int swaps_open(struct inode *inode, struct file *file) 1614 { 1615 return seq_open(file, &swaps_op); 1616 } 1617 1618 static const struct file_operations proc_swaps_operations = { 1619 .open = swaps_open, 1620 .read = seq_read, 1621 .llseek = seq_lseek, 1622 .release = seq_release, 1623 }; 1624 1625 static int __init procswaps_init(void) 1626 { 1627 proc_create("swaps", 0, NULL, &proc_swaps_operations); 1628 return 0; 1629 } 1630 __initcall(procswaps_init); 1631 #endif /* CONFIG_PROC_FS */ 1632 1633 #ifdef MAX_SWAPFILES_CHECK 1634 static int __init max_swapfiles_check(void) 1635 { 1636 MAX_SWAPFILES_CHECK(); 1637 return 0; 1638 } 1639 late_initcall(max_swapfiles_check); 1640 #endif 1641 1642 /* 1643 * Written 01/25/92 by Simmule Turner, heavily changed by Linus. 1644 * 1645 * The swapon system call 1646 */ 1647 asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) 1648 { 1649 struct swap_info_struct * p; 1650 char *name = NULL; 1651 struct block_device *bdev = NULL; 1652 struct file *swap_file = NULL; 1653 struct address_space *mapping; 1654 unsigned int type; 1655 int i, prev; 1656 int error; 1657 union swap_header *swap_header = NULL; 1658 unsigned int nr_good_pages = 0; 1659 int nr_extents = 0; 1660 sector_t span; 1661 unsigned long maxpages = 1; 1662 unsigned long swapfilepages; 1663 unsigned short *swap_map = NULL; 1664 struct page *page = NULL; 1665 struct inode *inode = NULL; 1666 int did_down = 0; 1667 1668 if (!capable(CAP_SYS_ADMIN)) 1669 return -EPERM; 1670 spin_lock(&swap_lock); 1671 p = swap_info; 1672 for (type = 0 ; type < nr_swapfiles ; type++,p++) 1673 if (!(p->flags & SWP_USED)) 1674 break; 1675 error = -EPERM; 1676 if (type >= MAX_SWAPFILES) { 1677 spin_unlock(&swap_lock); 1678 goto out; 1679 } 1680 if (type >= nr_swapfiles) 1681 nr_swapfiles = type+1; 1682 memset(p, 0, sizeof(*p)); 1683 INIT_LIST_HEAD(&p->extent_list); 1684 p->flags = SWP_USED; 1685 p->next = -1; 1686 spin_unlock(&swap_lock); 1687 name = getname(specialfile); 1688 error = PTR_ERR(name); 1689 if (IS_ERR(name)) { 1690 name = NULL; 1691 goto bad_swap_2; 1692 } 1693 swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0); 1694 error = PTR_ERR(swap_file); 1695 if (IS_ERR(swap_file)) { 1696 swap_file = NULL; 1697 goto bad_swap_2; 1698 } 1699 1700 p->swap_file = swap_file; 1701 mapping = swap_file->f_mapping; 1702 inode = mapping->host; 1703 1704 error = -EBUSY; 1705 for (i = 0; i < nr_swapfiles; i++) { 1706 struct swap_info_struct *q = &swap_info[i]; 1707 1708 if (i == type || !q->swap_file) 1709 continue; 1710 if (mapping == q->swap_file->f_mapping) 1711 goto bad_swap; 1712 } 1713 1714 error = -EINVAL; 1715 if (S_ISBLK(inode->i_mode)) { 1716 bdev = I_BDEV(inode); 1717 error = bd_claim(bdev, sys_swapon); 1718 if (error < 0) { 1719 bdev = NULL; 1720 error = -EINVAL; 1721 goto bad_swap; 1722 } 1723 p->old_block_size = block_size(bdev); 1724 error = set_blocksize(bdev, PAGE_SIZE); 1725 if (error < 0) 1726 goto bad_swap; 1727 p->bdev = bdev; 1728 } else if (S_ISREG(inode->i_mode)) { 1729 p->bdev = inode->i_sb->s_bdev; 1730 mutex_lock(&inode->i_mutex); 1731 did_down = 1; 1732 if (IS_SWAPFILE(inode)) { 1733 error = -EBUSY; 1734 goto bad_swap; 1735 } 1736 } else { 1737 goto bad_swap; 1738 } 1739 1740 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 1741 1742 /* 1743 * Read the swap header. 1744 */ 1745 if (!mapping->a_ops->readpage) { 1746 error = -EINVAL; 1747 goto bad_swap; 1748 } 1749 page = read_mapping_page(mapping, 0, swap_file); 1750 if (IS_ERR(page)) { 1751 error = PTR_ERR(page); 1752 goto bad_swap; 1753 } 1754 swap_header = kmap(page); 1755 1756 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 1757 printk(KERN_ERR "Unable to find swap-space signature\n"); 1758 error = -EINVAL; 1759 goto bad_swap; 1760 } 1761 1762 /* swap partition endianess hack... */ 1763 if (swab32(swap_header->info.version) == 1) { 1764 swab32s(&swap_header->info.version); 1765 swab32s(&swap_header->info.last_page); 1766 swab32s(&swap_header->info.nr_badpages); 1767 for (i = 0; i < swap_header->info.nr_badpages; i++) 1768 swab32s(&swap_header->info.badpages[i]); 1769 } 1770 /* Check the swap header's sub-version */ 1771 if (swap_header->info.version != 1) { 1772 printk(KERN_WARNING 1773 "Unable to handle swap header version %d\n", 1774 swap_header->info.version); 1775 error = -EINVAL; 1776 goto bad_swap; 1777 } 1778 1779 p->lowest_bit = 1; 1780 p->cluster_next = 1; 1781 1782 /* 1783 * Find out how many pages are allowed for a single swap 1784 * device. There are two limiting factors: 1) the number of 1785 * bits for the swap offset in the swp_entry_t type and 1786 * 2) the number of bits in the a swap pte as defined by 1787 * the different architectures. In order to find the 1788 * largest possible bit mask a swap entry with swap type 0 1789 * and swap offset ~0UL is created, encoded to a swap pte, 1790 * decoded to a swp_entry_t again and finally the swap 1791 * offset is extracted. This will mask all the bits from 1792 * the initial ~0UL mask that can't be encoded in either 1793 * the swp_entry_t or the architecture definition of a 1794 * swap pte. 1795 */ 1796 maxpages = swp_offset(pte_to_swp_entry( 1797 swp_entry_to_pte(swp_entry(0, ~0UL)))) - 1; 1798 if (maxpages > swap_header->info.last_page) 1799 maxpages = swap_header->info.last_page; 1800 p->highest_bit = maxpages - 1; 1801 1802 error = -EINVAL; 1803 if (!maxpages) 1804 goto bad_swap; 1805 if (swapfilepages && maxpages > swapfilepages) { 1806 printk(KERN_WARNING 1807 "Swap area shorter than signature indicates\n"); 1808 goto bad_swap; 1809 } 1810 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 1811 goto bad_swap; 1812 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 1813 goto bad_swap; 1814 1815 /* OK, set up the swap map and apply the bad block list */ 1816 swap_map = vmalloc(maxpages * sizeof(short)); 1817 if (!swap_map) { 1818 error = -ENOMEM; 1819 goto bad_swap; 1820 } 1821 1822 memset(swap_map, 0, maxpages * sizeof(short)); 1823 for (i = 0; i < swap_header->info.nr_badpages; i++) { 1824 int page_nr = swap_header->info.badpages[i]; 1825 if (page_nr <= 0 || page_nr >= swap_header->info.last_page) { 1826 error = -EINVAL; 1827 goto bad_swap; 1828 } 1829 swap_map[page_nr] = SWAP_MAP_BAD; 1830 } 1831 nr_good_pages = swap_header->info.last_page - 1832 swap_header->info.nr_badpages - 1833 1 /* header page */; 1834 1835 if (nr_good_pages) { 1836 swap_map[0] = SWAP_MAP_BAD; 1837 p->max = maxpages; 1838 p->pages = nr_good_pages; 1839 nr_extents = setup_swap_extents(p, &span); 1840 if (nr_extents < 0) { 1841 error = nr_extents; 1842 goto bad_swap; 1843 } 1844 nr_good_pages = p->pages; 1845 } 1846 if (!nr_good_pages) { 1847 printk(KERN_WARNING "Empty swap-file\n"); 1848 error = -EINVAL; 1849 goto bad_swap; 1850 } 1851 1852 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 1853 p->flags |= SWP_SOLIDSTATE; 1854 srandom32((u32)get_seconds()); 1855 p->cluster_next = 1 + (random32() % p->highest_bit); 1856 } 1857 if (discard_swap(p) == 0) 1858 p->flags |= SWP_DISCARDABLE; 1859 1860 mutex_lock(&swapon_mutex); 1861 spin_lock(&swap_lock); 1862 if (swap_flags & SWAP_FLAG_PREFER) 1863 p->prio = 1864 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 1865 else 1866 p->prio = --least_priority; 1867 p->swap_map = swap_map; 1868 p->flags |= SWP_WRITEOK; 1869 nr_swap_pages += nr_good_pages; 1870 total_swap_pages += nr_good_pages; 1871 1872 printk(KERN_INFO "Adding %uk swap on %s. " 1873 "Priority:%d extents:%d across:%lluk %s%s\n", 1874 nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, 1875 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 1876 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 1877 (p->flags & SWP_DISCARDABLE) ? "D" : ""); 1878 1879 /* insert swap space into swap_list: */ 1880 prev = -1; 1881 for (i = swap_list.head; i >= 0; i = swap_info[i].next) { 1882 if (p->prio >= swap_info[i].prio) { 1883 break; 1884 } 1885 prev = i; 1886 } 1887 p->next = i; 1888 if (prev < 0) { 1889 swap_list.head = swap_list.next = p - swap_info; 1890 } else { 1891 swap_info[prev].next = p - swap_info; 1892 } 1893 spin_unlock(&swap_lock); 1894 mutex_unlock(&swapon_mutex); 1895 error = 0; 1896 goto out; 1897 bad_swap: 1898 if (bdev) { 1899 set_blocksize(bdev, p->old_block_size); 1900 bd_release(bdev); 1901 } 1902 destroy_swap_extents(p); 1903 bad_swap_2: 1904 spin_lock(&swap_lock); 1905 p->swap_file = NULL; 1906 p->flags = 0; 1907 spin_unlock(&swap_lock); 1908 vfree(swap_map); 1909 if (swap_file) 1910 filp_close(swap_file, NULL); 1911 out: 1912 if (page && !IS_ERR(page)) { 1913 kunmap(page); 1914 page_cache_release(page); 1915 } 1916 if (name) 1917 putname(name); 1918 if (did_down) { 1919 if (!error) 1920 inode->i_flags |= S_SWAPFILE; 1921 mutex_unlock(&inode->i_mutex); 1922 } 1923 return error; 1924 } 1925 1926 void si_swapinfo(struct sysinfo *val) 1927 { 1928 unsigned int i; 1929 unsigned long nr_to_be_unused = 0; 1930 1931 spin_lock(&swap_lock); 1932 for (i = 0; i < nr_swapfiles; i++) { 1933 if (!(swap_info[i].flags & SWP_USED) || 1934 (swap_info[i].flags & SWP_WRITEOK)) 1935 continue; 1936 nr_to_be_unused += swap_info[i].inuse_pages; 1937 } 1938 val->freeswap = nr_swap_pages + nr_to_be_unused; 1939 val->totalswap = total_swap_pages + nr_to_be_unused; 1940 spin_unlock(&swap_lock); 1941 } 1942 1943 /* 1944 * Verify that a swap entry is valid and increment its swap map count. 1945 * 1946 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as 1947 * "permanent", but will be reclaimed by the next swapoff. 1948 */ 1949 int swap_duplicate(swp_entry_t entry) 1950 { 1951 struct swap_info_struct * p; 1952 unsigned long offset, type; 1953 int result = 0; 1954 1955 if (is_migration_entry(entry)) 1956 return 1; 1957 1958 type = swp_type(entry); 1959 if (type >= nr_swapfiles) 1960 goto bad_file; 1961 p = type + swap_info; 1962 offset = swp_offset(entry); 1963 1964 spin_lock(&swap_lock); 1965 if (offset < p->max && p->swap_map[offset]) { 1966 if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { 1967 p->swap_map[offset]++; 1968 result = 1; 1969 } else if (p->swap_map[offset] <= SWAP_MAP_MAX) { 1970 if (swap_overflow++ < 5) 1971 printk(KERN_WARNING "swap_dup: swap entry overflow\n"); 1972 p->swap_map[offset] = SWAP_MAP_MAX; 1973 result = 1; 1974 } 1975 } 1976 spin_unlock(&swap_lock); 1977 out: 1978 return result; 1979 1980 bad_file: 1981 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 1982 goto out; 1983 } 1984 1985 struct swap_info_struct * 1986 get_swap_info_struct(unsigned type) 1987 { 1988 return &swap_info[type]; 1989 } 1990 1991 /* 1992 * swap_lock prevents swap_map being freed. Don't grab an extra 1993 * reference on the swaphandle, it doesn't matter if it becomes unused. 1994 */ 1995 int valid_swaphandles(swp_entry_t entry, unsigned long *offset) 1996 { 1997 struct swap_info_struct *si; 1998 int our_page_cluster = page_cluster; 1999 pgoff_t target, toff; 2000 pgoff_t base, end; 2001 int nr_pages = 0; 2002 2003 if (!our_page_cluster) /* no readahead */ 2004 return 0; 2005 2006 si = &swap_info[swp_type(entry)]; 2007 target = swp_offset(entry); 2008 base = (target >> our_page_cluster) << our_page_cluster; 2009 end = base + (1 << our_page_cluster); 2010 if (!base) /* first page is swap header */ 2011 base++; 2012 2013 spin_lock(&swap_lock); 2014 if (end > si->max) /* don't go beyond end of map */ 2015 end = si->max; 2016 2017 /* Count contiguous allocated slots above our target */ 2018 for (toff = target; ++toff < end; nr_pages++) { 2019 /* Don't read in free or bad pages */ 2020 if (!si->swap_map[toff]) 2021 break; 2022 if (si->swap_map[toff] == SWAP_MAP_BAD) 2023 break; 2024 } 2025 /* Count contiguous allocated slots below our target */ 2026 for (toff = target; --toff >= base; nr_pages++) { 2027 /* Don't read in free or bad pages */ 2028 if (!si->swap_map[toff]) 2029 break; 2030 if (si->swap_map[toff] == SWAP_MAP_BAD) 2031 break; 2032 } 2033 spin_unlock(&swap_lock); 2034 2035 /* 2036 * Indicate starting offset, and return number of pages to get: 2037 * if only 1, say 0, since there's then no readahead to be done. 2038 */ 2039 *offset = ++toff; 2040 return nr_pages? ++nr_pages: 0; 2041 } 2042