11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/swapfile.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include <linux/mm.h> 91da177e4SLinus Torvalds #include <linux/hugetlb.h> 101da177e4SLinus Torvalds #include <linux/mman.h> 111da177e4SLinus Torvalds #include <linux/slab.h> 121da177e4SLinus Torvalds #include <linux/kernel_stat.h> 131da177e4SLinus Torvalds #include <linux/swap.h> 141da177e4SLinus Torvalds #include <linux/vmalloc.h> 151da177e4SLinus Torvalds #include <linux/pagemap.h> 161da177e4SLinus Torvalds #include <linux/namei.h> 17072441e2SHugh Dickins #include <linux/shmem_fs.h> 181da177e4SLinus Torvalds #include <linux/blkdev.h> 1920137a49SHugh Dickins #include <linux/random.h> 201da177e4SLinus Torvalds #include <linux/writeback.h> 211da177e4SLinus Torvalds #include <linux/proc_fs.h> 221da177e4SLinus Torvalds #include <linux/seq_file.h> 231da177e4SLinus Torvalds #include <linux/init.h> 245ad64688SHugh Dickins #include <linux/ksm.h> 251da177e4SLinus Torvalds #include <linux/rmap.h> 261da177e4SLinus Torvalds #include <linux/security.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 28fc0abb14SIngo Molnar #include <linux/mutex.h> 29c59ede7bSRandy.Dunlap #include <linux/capability.h> 301da177e4SLinus Torvalds #include <linux/syscalls.h> 318a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 3266d7dd51SKay Sievers #include <linux/poll.h> 3372788c38SDavid Rientjes #include <linux/oom.h> 3438b5faf4SDan Magenheimer #include <linux/frontswap.h> 3538b5faf4SDan Magenheimer #include <linux/swapfile.h> 36f981c595SMel Gorman #include <linux/export.h> 371da177e4SLinus Torvalds 381da177e4SLinus Torvalds #include <asm/pgtable.h> 391da177e4SLinus Torvalds #include <asm/tlbflush.h> 401da177e4SLinus Torvalds #include <linux/swapops.h> 4127a7faa0SKAMEZAWA Hiroyuki #include <linux/page_cgroup.h> 421da177e4SLinus Torvalds 43570a335bSHugh Dickins static bool swap_count_continued(struct swap_info_struct *, pgoff_t, 44570a335bSHugh Dickins unsigned char); 45570a335bSHugh Dickins static void free_swap_count_continuations(struct swap_info_struct *); 46d4906e1aSLee Schermerhorn static sector_t map_swap_entry(swp_entry_t, struct block_device**); 47570a335bSHugh Dickins 4838b5faf4SDan Magenheimer DEFINE_SPINLOCK(swap_lock); 497c363b8cSAdrian Bunk static unsigned int nr_swapfiles; 50ec8acf20SShaohua Li atomic_long_t nr_swap_pages; 51ec8acf20SShaohua Li /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ 521da177e4SLinus Torvalds long total_swap_pages; 5378ecba08SHugh Dickins static int least_priority; 54ec8acf20SShaohua Li static atomic_t highest_priority_index = ATOMIC_INIT(-1); 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds static const char Bad_file[] = "Bad swap file entry "; 571da177e4SLinus Torvalds static const char Unused_file[] = "Unused swap file entry "; 581da177e4SLinus Torvalds static const char Bad_offset[] = "Bad swap offset entry "; 591da177e4SLinus Torvalds static const char Unused_offset[] = "Unused swap offset entry "; 601da177e4SLinus Torvalds 6138b5faf4SDan Magenheimer struct swap_list_t swap_list = {-1, -1}; 621da177e4SLinus Torvalds 6338b5faf4SDan Magenheimer struct swap_info_struct *swap_info[MAX_SWAPFILES]; 641da177e4SLinus Torvalds 65fc0abb14SIngo Molnar static DEFINE_MUTEX(swapon_mutex); 661da177e4SLinus Torvalds 6766d7dd51SKay Sievers static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 6866d7dd51SKay Sievers /* Activity counter to indicate that a swapon or swapoff has occurred */ 6966d7dd51SKay Sievers static atomic_t proc_poll_event = ATOMIC_INIT(0); 7066d7dd51SKay Sievers 718d69aaeeSHugh Dickins static inline unsigned char swap_count(unsigned char ent) 72355cfa73SKAMEZAWA Hiroyuki { 73570a335bSHugh Dickins return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ 74355cfa73SKAMEZAWA Hiroyuki } 75355cfa73SKAMEZAWA Hiroyuki 76efa90a98SHugh Dickins /* returns 1 if swap entry is freed */ 77c9e44410SKAMEZAWA Hiroyuki static int 78c9e44410SKAMEZAWA Hiroyuki __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) 79c9e44410SKAMEZAWA Hiroyuki { 80efa90a98SHugh Dickins swp_entry_t entry = swp_entry(si->type, offset); 81c9e44410SKAMEZAWA Hiroyuki struct page *page; 82c9e44410SKAMEZAWA Hiroyuki int ret = 0; 83c9e44410SKAMEZAWA Hiroyuki 8433806f06SShaohua Li page = find_get_page(swap_address_space(entry), entry.val); 85c9e44410SKAMEZAWA Hiroyuki if (!page) 86c9e44410SKAMEZAWA Hiroyuki return 0; 87c9e44410SKAMEZAWA Hiroyuki /* 88c9e44410SKAMEZAWA Hiroyuki * This function is called from scan_swap_map() and it's called 89c9e44410SKAMEZAWA Hiroyuki * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. 90c9e44410SKAMEZAWA Hiroyuki * We have to use trylock for avoiding deadlock. This is a special 91c9e44410SKAMEZAWA Hiroyuki * case and you should use try_to_free_swap() with explicit lock_page() 92c9e44410SKAMEZAWA Hiroyuki * in usual operations. 93c9e44410SKAMEZAWA Hiroyuki */ 94c9e44410SKAMEZAWA Hiroyuki if (trylock_page(page)) { 95c9e44410SKAMEZAWA Hiroyuki ret = try_to_free_swap(page); 96c9e44410SKAMEZAWA Hiroyuki unlock_page(page); 97c9e44410SKAMEZAWA Hiroyuki } 98c9e44410SKAMEZAWA Hiroyuki page_cache_release(page); 99c9e44410SKAMEZAWA Hiroyuki return ret; 100c9e44410SKAMEZAWA Hiroyuki } 101355cfa73SKAMEZAWA Hiroyuki 1021da177e4SLinus Torvalds /* 1036a6ba831SHugh Dickins * swapon tell device that all the old swap contents can be discarded, 1046a6ba831SHugh Dickins * to allow the swap device to optimize its wear-levelling. 1056a6ba831SHugh Dickins */ 1066a6ba831SHugh Dickins static int discard_swap(struct swap_info_struct *si) 1076a6ba831SHugh Dickins { 1086a6ba831SHugh Dickins struct swap_extent *se; 1099625a5f2SHugh Dickins sector_t start_block; 1109625a5f2SHugh Dickins sector_t nr_blocks; 1116a6ba831SHugh Dickins int err = 0; 1126a6ba831SHugh Dickins 1136a6ba831SHugh Dickins /* Do not discard the swap header page! */ 1149625a5f2SHugh Dickins se = &si->first_swap_extent; 1159625a5f2SHugh Dickins start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 1169625a5f2SHugh Dickins nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 1179625a5f2SHugh Dickins if (nr_blocks) { 1189625a5f2SHugh Dickins err = blkdev_issue_discard(si->bdev, start_block, 119dd3932edSChristoph Hellwig nr_blocks, GFP_KERNEL, 0); 1209625a5f2SHugh Dickins if (err) 1219625a5f2SHugh Dickins return err; 1229625a5f2SHugh Dickins cond_resched(); 1236a6ba831SHugh Dickins } 1246a6ba831SHugh Dickins 1259625a5f2SHugh Dickins list_for_each_entry(se, &si->first_swap_extent.list, list) { 1269625a5f2SHugh Dickins start_block = se->start_block << (PAGE_SHIFT - 9); 1279625a5f2SHugh Dickins nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 1289625a5f2SHugh Dickins 1296a6ba831SHugh Dickins err = blkdev_issue_discard(si->bdev, start_block, 130dd3932edSChristoph Hellwig nr_blocks, GFP_KERNEL, 0); 1316a6ba831SHugh Dickins if (err) 1326a6ba831SHugh Dickins break; 1336a6ba831SHugh Dickins 1346a6ba831SHugh Dickins cond_resched(); 1356a6ba831SHugh Dickins } 1366a6ba831SHugh Dickins return err; /* That will often be -EOPNOTSUPP */ 1376a6ba831SHugh Dickins } 1386a6ba831SHugh Dickins 1397992fde7SHugh Dickins /* 1407992fde7SHugh Dickins * swap allocation tell device that a cluster of swap can now be discarded, 1417992fde7SHugh Dickins * to allow the swap device to optimize its wear-levelling. 1427992fde7SHugh Dickins */ 1437992fde7SHugh Dickins static void discard_swap_cluster(struct swap_info_struct *si, 1447992fde7SHugh Dickins pgoff_t start_page, pgoff_t nr_pages) 1457992fde7SHugh Dickins { 1467992fde7SHugh Dickins struct swap_extent *se = si->curr_swap_extent; 1477992fde7SHugh Dickins int found_extent = 0; 1487992fde7SHugh Dickins 1497992fde7SHugh Dickins while (nr_pages) { 1507992fde7SHugh Dickins struct list_head *lh; 1517992fde7SHugh Dickins 1527992fde7SHugh Dickins if (se->start_page <= start_page && 1537992fde7SHugh Dickins start_page < se->start_page + se->nr_pages) { 1547992fde7SHugh Dickins pgoff_t offset = start_page - se->start_page; 1557992fde7SHugh Dickins sector_t start_block = se->start_block + offset; 156858a2990SHugh Dickins sector_t nr_blocks = se->nr_pages - offset; 1577992fde7SHugh Dickins 1587992fde7SHugh Dickins if (nr_blocks > nr_pages) 1597992fde7SHugh Dickins nr_blocks = nr_pages; 1607992fde7SHugh Dickins start_page += nr_blocks; 1617992fde7SHugh Dickins nr_pages -= nr_blocks; 1627992fde7SHugh Dickins 1637992fde7SHugh Dickins if (!found_extent++) 1647992fde7SHugh Dickins si->curr_swap_extent = se; 1657992fde7SHugh Dickins 1667992fde7SHugh Dickins start_block <<= PAGE_SHIFT - 9; 1677992fde7SHugh Dickins nr_blocks <<= PAGE_SHIFT - 9; 1687992fde7SHugh Dickins if (blkdev_issue_discard(si->bdev, start_block, 169dd3932edSChristoph Hellwig nr_blocks, GFP_NOIO, 0)) 1707992fde7SHugh Dickins break; 1717992fde7SHugh Dickins } 1727992fde7SHugh Dickins 1737992fde7SHugh Dickins lh = se->list.next; 1747992fde7SHugh Dickins se = list_entry(lh, struct swap_extent, list); 1757992fde7SHugh Dickins } 1767992fde7SHugh Dickins } 1777992fde7SHugh Dickins 1787992fde7SHugh Dickins static int wait_for_discard(void *word) 1797992fde7SHugh Dickins { 1807992fde7SHugh Dickins schedule(); 1817992fde7SHugh Dickins return 0; 1827992fde7SHugh Dickins } 1837992fde7SHugh Dickins 184048c27fdSHugh Dickins #define SWAPFILE_CLUSTER 256 185048c27fdSHugh Dickins #define LATENCY_LIMIT 256 186048c27fdSHugh Dickins 18724b8ff7cSCesar Eduardo Barros static unsigned long scan_swap_map(struct swap_info_struct *si, 1888d69aaeeSHugh Dickins unsigned char usage) 1891da177e4SLinus Torvalds { 190ebebbbe9SHugh Dickins unsigned long offset; 191c60aa176SHugh Dickins unsigned long scan_base; 1927992fde7SHugh Dickins unsigned long last_in_cluster = 0; 193048c27fdSHugh Dickins int latency_ration = LATENCY_LIMIT; 1947992fde7SHugh Dickins int found_free_cluster = 0; 1951da177e4SLinus Torvalds 1967dfad418SHugh Dickins /* 1977dfad418SHugh Dickins * We try to cluster swap pages by allocating them sequentially 1987dfad418SHugh Dickins * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 1997dfad418SHugh Dickins * way, however, we resort to first-free allocation, starting 2007dfad418SHugh Dickins * a new cluster. This prevents us from scattering swap pages 2017dfad418SHugh Dickins * all over the entire swap partition, so that we reduce 2027dfad418SHugh Dickins * overall disk seek times between swap pages. -- sct 2037dfad418SHugh Dickins * But we do now try to find an empty cluster. -Andrea 204c60aa176SHugh Dickins * And we let swap pages go all over an SSD partition. Hugh 2051da177e4SLinus Torvalds */ 2067dfad418SHugh Dickins 20752b7efdbSHugh Dickins si->flags += SWP_SCANNING; 208c60aa176SHugh Dickins scan_base = offset = si->cluster_next; 209ebebbbe9SHugh Dickins 210ebebbbe9SHugh Dickins if (unlikely(!si->cluster_nr--)) { 211ebebbbe9SHugh Dickins if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 2127dfad418SHugh Dickins si->cluster_nr = SWAPFILE_CLUSTER - 1; 213ebebbbe9SHugh Dickins goto checks; 214ebebbbe9SHugh Dickins } 215dcf6b7ddSRafael Aquini if (si->flags & SWP_PAGE_DISCARD) { 2167992fde7SHugh Dickins /* 2177992fde7SHugh Dickins * Start range check on racing allocations, in case 2187992fde7SHugh Dickins * they overlap the cluster we eventually decide on 2197992fde7SHugh Dickins * (we scan without swap_lock to allow preemption). 2207992fde7SHugh Dickins * It's hardly conceivable that cluster_nr could be 2217992fde7SHugh Dickins * wrapped during our scan, but don't depend on it. 2227992fde7SHugh Dickins */ 2237992fde7SHugh Dickins if (si->lowest_alloc) 2247992fde7SHugh Dickins goto checks; 2257992fde7SHugh Dickins si->lowest_alloc = si->max; 2267992fde7SHugh Dickins si->highest_alloc = 0; 2277992fde7SHugh Dickins } 228ec8acf20SShaohua Li spin_unlock(&si->lock); 2297dfad418SHugh Dickins 230c60aa176SHugh Dickins /* 231c60aa176SHugh Dickins * If seek is expensive, start searching for new cluster from 232c60aa176SHugh Dickins * start of partition, to minimize the span of allocated swap. 233c60aa176SHugh Dickins * But if seek is cheap, search from our current position, so 234c60aa176SHugh Dickins * that swap is allocated from all over the partition: if the 235c60aa176SHugh Dickins * Flash Translation Layer only remaps within limited zones, 236c60aa176SHugh Dickins * we don't want to wear out the first zone too quickly. 237c60aa176SHugh Dickins */ 238c60aa176SHugh Dickins if (!(si->flags & SWP_SOLIDSTATE)) 239c60aa176SHugh Dickins scan_base = offset = si->lowest_bit; 2407dfad418SHugh Dickins last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 2417dfad418SHugh Dickins 2427dfad418SHugh Dickins /* Locate the first empty (unaligned) cluster */ 2437dfad418SHugh Dickins for (; last_in_cluster <= si->highest_bit; offset++) { 2441da177e4SLinus Torvalds if (si->swap_map[offset]) 2457dfad418SHugh Dickins last_in_cluster = offset + SWAPFILE_CLUSTER; 2467dfad418SHugh Dickins else if (offset == last_in_cluster) { 247ec8acf20SShaohua Li spin_lock(&si->lock); 248ebebbbe9SHugh Dickins offset -= SWAPFILE_CLUSTER - 1; 249ebebbbe9SHugh Dickins si->cluster_next = offset; 250ebebbbe9SHugh Dickins si->cluster_nr = SWAPFILE_CLUSTER - 1; 2517992fde7SHugh Dickins found_free_cluster = 1; 252ebebbbe9SHugh Dickins goto checks; 2537dfad418SHugh Dickins } 254048c27fdSHugh Dickins if (unlikely(--latency_ration < 0)) { 255048c27fdSHugh Dickins cond_resched(); 256048c27fdSHugh Dickins latency_ration = LATENCY_LIMIT; 257048c27fdSHugh Dickins } 2587dfad418SHugh Dickins } 259ebebbbe9SHugh Dickins 260ebebbbe9SHugh Dickins offset = si->lowest_bit; 261c60aa176SHugh Dickins last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 262c60aa176SHugh Dickins 263c60aa176SHugh Dickins /* Locate the first empty (unaligned) cluster */ 264c60aa176SHugh Dickins for (; last_in_cluster < scan_base; offset++) { 265c60aa176SHugh Dickins if (si->swap_map[offset]) 266c60aa176SHugh Dickins last_in_cluster = offset + SWAPFILE_CLUSTER; 267c60aa176SHugh Dickins else if (offset == last_in_cluster) { 268ec8acf20SShaohua Li spin_lock(&si->lock); 269c60aa176SHugh Dickins offset -= SWAPFILE_CLUSTER - 1; 270c60aa176SHugh Dickins si->cluster_next = offset; 271c60aa176SHugh Dickins si->cluster_nr = SWAPFILE_CLUSTER - 1; 272c60aa176SHugh Dickins found_free_cluster = 1; 273c60aa176SHugh Dickins goto checks; 274c60aa176SHugh Dickins } 275c60aa176SHugh Dickins if (unlikely(--latency_ration < 0)) { 276c60aa176SHugh Dickins cond_resched(); 277c60aa176SHugh Dickins latency_ration = LATENCY_LIMIT; 278c60aa176SHugh Dickins } 279c60aa176SHugh Dickins } 280c60aa176SHugh Dickins 281c60aa176SHugh Dickins offset = scan_base; 282ec8acf20SShaohua Li spin_lock(&si->lock); 283ebebbbe9SHugh Dickins si->cluster_nr = SWAPFILE_CLUSTER - 1; 2847992fde7SHugh Dickins si->lowest_alloc = 0; 2857dfad418SHugh Dickins } 2867dfad418SHugh Dickins 287ebebbbe9SHugh Dickins checks: 288ebebbbe9SHugh Dickins if (!(si->flags & SWP_WRITEOK)) 28952b7efdbSHugh Dickins goto no_page; 2907dfad418SHugh Dickins if (!si->highest_bit) 2917dfad418SHugh Dickins goto no_page; 292ebebbbe9SHugh Dickins if (offset > si->highest_bit) 293c60aa176SHugh Dickins scan_base = offset = si->lowest_bit; 294c9e44410SKAMEZAWA Hiroyuki 295b73d7fceSHugh Dickins /* reuse swap entry of cache-only swap if not busy. */ 296b73d7fceSHugh Dickins if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 297c9e44410SKAMEZAWA Hiroyuki int swap_was_freed; 298ec8acf20SShaohua Li spin_unlock(&si->lock); 299c9e44410SKAMEZAWA Hiroyuki swap_was_freed = __try_to_reclaim_swap(si, offset); 300ec8acf20SShaohua Li spin_lock(&si->lock); 301c9e44410SKAMEZAWA Hiroyuki /* entry was freed successfully, try to use this again */ 302c9e44410SKAMEZAWA Hiroyuki if (swap_was_freed) 303c9e44410SKAMEZAWA Hiroyuki goto checks; 304c9e44410SKAMEZAWA Hiroyuki goto scan; /* check next one */ 305c9e44410SKAMEZAWA Hiroyuki } 306c9e44410SKAMEZAWA Hiroyuki 307ebebbbe9SHugh Dickins if (si->swap_map[offset]) 308ebebbbe9SHugh Dickins goto scan; 309ebebbbe9SHugh Dickins 31052b7efdbSHugh Dickins if (offset == si->lowest_bit) 3111da177e4SLinus Torvalds si->lowest_bit++; 3121da177e4SLinus Torvalds if (offset == si->highest_bit) 3131da177e4SLinus Torvalds si->highest_bit--; 3147dfad418SHugh Dickins si->inuse_pages++; 3157dfad418SHugh Dickins if (si->inuse_pages == si->pages) { 3161da177e4SLinus Torvalds si->lowest_bit = si->max; 3171da177e4SLinus Torvalds si->highest_bit = 0; 3181da177e4SLinus Torvalds } 319253d553bSHugh Dickins si->swap_map[offset] = usage; 3201da177e4SLinus Torvalds si->cluster_next = offset + 1; 32152b7efdbSHugh Dickins si->flags -= SWP_SCANNING; 3227992fde7SHugh Dickins 3237992fde7SHugh Dickins if (si->lowest_alloc) { 3247992fde7SHugh Dickins /* 325dcf6b7ddSRafael Aquini * Only set when SWP_PAGE_DISCARD, and there's a scan 3267992fde7SHugh Dickins * for a free cluster in progress or just completed. 3277992fde7SHugh Dickins */ 3287992fde7SHugh Dickins if (found_free_cluster) { 3297992fde7SHugh Dickins /* 3307992fde7SHugh Dickins * To optimize wear-levelling, discard the 3317992fde7SHugh Dickins * old data of the cluster, taking care not to 3327992fde7SHugh Dickins * discard any of its pages that have already 3337992fde7SHugh Dickins * been allocated by racing tasks (offset has 3347992fde7SHugh Dickins * already stepped over any at the beginning). 3357992fde7SHugh Dickins */ 3367992fde7SHugh Dickins if (offset < si->highest_alloc && 3377992fde7SHugh Dickins si->lowest_alloc <= last_in_cluster) 3387992fde7SHugh Dickins last_in_cluster = si->lowest_alloc - 1; 3397992fde7SHugh Dickins si->flags |= SWP_DISCARDING; 340ec8acf20SShaohua Li spin_unlock(&si->lock); 3417992fde7SHugh Dickins 3427992fde7SHugh Dickins if (offset < last_in_cluster) 3437992fde7SHugh Dickins discard_swap_cluster(si, offset, 3447992fde7SHugh Dickins last_in_cluster - offset + 1); 3457992fde7SHugh Dickins 346ec8acf20SShaohua Li spin_lock(&si->lock); 3477992fde7SHugh Dickins si->lowest_alloc = 0; 3487992fde7SHugh Dickins si->flags &= ~SWP_DISCARDING; 3497992fde7SHugh Dickins 3507992fde7SHugh Dickins smp_mb(); /* wake_up_bit advises this */ 3517992fde7SHugh Dickins wake_up_bit(&si->flags, ilog2(SWP_DISCARDING)); 3527992fde7SHugh Dickins 3537992fde7SHugh Dickins } else if (si->flags & SWP_DISCARDING) { 3547992fde7SHugh Dickins /* 3557992fde7SHugh Dickins * Delay using pages allocated by racing tasks 3567992fde7SHugh Dickins * until the whole discard has been issued. We 3577992fde7SHugh Dickins * could defer that delay until swap_writepage, 3587992fde7SHugh Dickins * but it's easier to keep this self-contained. 3597992fde7SHugh Dickins */ 360ec8acf20SShaohua Li spin_unlock(&si->lock); 3617992fde7SHugh Dickins wait_on_bit(&si->flags, ilog2(SWP_DISCARDING), 3627992fde7SHugh Dickins wait_for_discard, TASK_UNINTERRUPTIBLE); 363ec8acf20SShaohua Li spin_lock(&si->lock); 3647992fde7SHugh Dickins } else { 3657992fde7SHugh Dickins /* 3667992fde7SHugh Dickins * Note pages allocated by racing tasks while 3677992fde7SHugh Dickins * scan for a free cluster is in progress, so 3687992fde7SHugh Dickins * that its final discard can exclude them. 3697992fde7SHugh Dickins */ 3707992fde7SHugh Dickins if (offset < si->lowest_alloc) 3717992fde7SHugh Dickins si->lowest_alloc = offset; 3727992fde7SHugh Dickins if (offset > si->highest_alloc) 3737992fde7SHugh Dickins si->highest_alloc = offset; 3747992fde7SHugh Dickins } 3757992fde7SHugh Dickins } 3761da177e4SLinus Torvalds return offset; 3777dfad418SHugh Dickins 378ebebbbe9SHugh Dickins scan: 379ec8acf20SShaohua Li spin_unlock(&si->lock); 3807dfad418SHugh Dickins while (++offset <= si->highest_bit) { 38152b7efdbSHugh Dickins if (!si->swap_map[offset]) { 382ec8acf20SShaohua Li spin_lock(&si->lock); 38352b7efdbSHugh Dickins goto checks; 3847dfad418SHugh Dickins } 385c9e44410SKAMEZAWA Hiroyuki if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 386ec8acf20SShaohua Li spin_lock(&si->lock); 387c9e44410SKAMEZAWA Hiroyuki goto checks; 388c9e44410SKAMEZAWA Hiroyuki } 389048c27fdSHugh Dickins if (unlikely(--latency_ration < 0)) { 390048c27fdSHugh Dickins cond_resched(); 391048c27fdSHugh Dickins latency_ration = LATENCY_LIMIT; 392048c27fdSHugh Dickins } 39352b7efdbSHugh Dickins } 394c60aa176SHugh Dickins offset = si->lowest_bit; 395c60aa176SHugh Dickins while (++offset < scan_base) { 396c60aa176SHugh Dickins if (!si->swap_map[offset]) { 397ec8acf20SShaohua Li spin_lock(&si->lock); 398ebebbbe9SHugh Dickins goto checks; 399c60aa176SHugh Dickins } 400c9e44410SKAMEZAWA Hiroyuki if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 401ec8acf20SShaohua Li spin_lock(&si->lock); 402c9e44410SKAMEZAWA Hiroyuki goto checks; 403c9e44410SKAMEZAWA Hiroyuki } 404c60aa176SHugh Dickins if (unlikely(--latency_ration < 0)) { 405c60aa176SHugh Dickins cond_resched(); 406c60aa176SHugh Dickins latency_ration = LATENCY_LIMIT; 407c60aa176SHugh Dickins } 408c60aa176SHugh Dickins } 409ec8acf20SShaohua Li spin_lock(&si->lock); 4107dfad418SHugh Dickins 4117dfad418SHugh Dickins no_page: 41252b7efdbSHugh Dickins si->flags -= SWP_SCANNING; 4131da177e4SLinus Torvalds return 0; 4141da177e4SLinus Torvalds } 4151da177e4SLinus Torvalds 4161da177e4SLinus Torvalds swp_entry_t get_swap_page(void) 4171da177e4SLinus Torvalds { 418fb4f88dcSHugh Dickins struct swap_info_struct *si; 419fb4f88dcSHugh Dickins pgoff_t offset; 420fb4f88dcSHugh Dickins int type, next; 421fb4f88dcSHugh Dickins int wrapped = 0; 422ec8acf20SShaohua Li int hp_index; 4231da177e4SLinus Torvalds 4245d337b91SHugh Dickins spin_lock(&swap_lock); 425ec8acf20SShaohua Li if (atomic_long_read(&nr_swap_pages) <= 0) 426fb4f88dcSHugh Dickins goto noswap; 427ec8acf20SShaohua Li atomic_long_dec(&nr_swap_pages); 4281da177e4SLinus Torvalds 429fb4f88dcSHugh Dickins for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 430ec8acf20SShaohua Li hp_index = atomic_xchg(&highest_priority_index, -1); 431ec8acf20SShaohua Li /* 432ec8acf20SShaohua Li * highest_priority_index records current highest priority swap 433ec8acf20SShaohua Li * type which just frees swap entries. If its priority is 434ec8acf20SShaohua Li * higher than that of swap_list.next swap type, we use it. It 435ec8acf20SShaohua Li * isn't protected by swap_lock, so it can be an invalid value 436ec8acf20SShaohua Li * if the corresponding swap type is swapoff. We double check 437ec8acf20SShaohua Li * the flags here. It's even possible the swap type is swapoff 438ec8acf20SShaohua Li * and swapon again and its priority is changed. In such rare 439ec8acf20SShaohua Li * case, low prority swap type might be used, but eventually 440ec8acf20SShaohua Li * high priority swap will be used after several rounds of 441ec8acf20SShaohua Li * swap. 442ec8acf20SShaohua Li */ 443ec8acf20SShaohua Li if (hp_index != -1 && hp_index != type && 444ec8acf20SShaohua Li swap_info[type]->prio < swap_info[hp_index]->prio && 445ec8acf20SShaohua Li (swap_info[hp_index]->flags & SWP_WRITEOK)) { 446ec8acf20SShaohua Li type = hp_index; 447ec8acf20SShaohua Li swap_list.next = type; 448ec8acf20SShaohua Li } 449ec8acf20SShaohua Li 450efa90a98SHugh Dickins si = swap_info[type]; 451fb4f88dcSHugh Dickins next = si->next; 452fb4f88dcSHugh Dickins if (next < 0 || 453efa90a98SHugh Dickins (!wrapped && si->prio != swap_info[next]->prio)) { 454fb4f88dcSHugh Dickins next = swap_list.head; 455fb4f88dcSHugh Dickins wrapped++; 4561da177e4SLinus Torvalds } 457fb4f88dcSHugh Dickins 458ec8acf20SShaohua Li spin_lock(&si->lock); 459ec8acf20SShaohua Li if (!si->highest_bit) { 460ec8acf20SShaohua Li spin_unlock(&si->lock); 461fb4f88dcSHugh Dickins continue; 462ec8acf20SShaohua Li } 463ec8acf20SShaohua Li if (!(si->flags & SWP_WRITEOK)) { 464ec8acf20SShaohua Li spin_unlock(&si->lock); 465fb4f88dcSHugh Dickins continue; 466ec8acf20SShaohua Li } 467fb4f88dcSHugh Dickins 468fb4f88dcSHugh Dickins swap_list.next = next; 469ec8acf20SShaohua Li 470ec8acf20SShaohua Li spin_unlock(&swap_lock); 471355cfa73SKAMEZAWA Hiroyuki /* This is called for allocating swap entry for cache */ 472253d553bSHugh Dickins offset = scan_swap_map(si, SWAP_HAS_CACHE); 473ec8acf20SShaohua Li spin_unlock(&si->lock); 474ec8acf20SShaohua Li if (offset) 475fb4f88dcSHugh Dickins return swp_entry(type, offset); 476ec8acf20SShaohua Li spin_lock(&swap_lock); 477fb4f88dcSHugh Dickins next = swap_list.next; 478fb4f88dcSHugh Dickins } 479fb4f88dcSHugh Dickins 480ec8acf20SShaohua Li atomic_long_inc(&nr_swap_pages); 481fb4f88dcSHugh Dickins noswap: 4825d337b91SHugh Dickins spin_unlock(&swap_lock); 483fb4f88dcSHugh Dickins return (swp_entry_t) {0}; 4841da177e4SLinus Torvalds } 4851da177e4SLinus Torvalds 486910321eaSHugh Dickins /* The only caller of this function is now susupend routine */ 487910321eaSHugh Dickins swp_entry_t get_swap_page_of_type(int type) 488910321eaSHugh Dickins { 489910321eaSHugh Dickins struct swap_info_struct *si; 490910321eaSHugh Dickins pgoff_t offset; 491910321eaSHugh Dickins 492910321eaSHugh Dickins si = swap_info[type]; 493ec8acf20SShaohua Li spin_lock(&si->lock); 494910321eaSHugh Dickins if (si && (si->flags & SWP_WRITEOK)) { 495ec8acf20SShaohua Li atomic_long_dec(&nr_swap_pages); 496910321eaSHugh Dickins /* This is called for allocating swap entry, not cache */ 497910321eaSHugh Dickins offset = scan_swap_map(si, 1); 498910321eaSHugh Dickins if (offset) { 499ec8acf20SShaohua Li spin_unlock(&si->lock); 500910321eaSHugh Dickins return swp_entry(type, offset); 501910321eaSHugh Dickins } 502ec8acf20SShaohua Li atomic_long_inc(&nr_swap_pages); 503910321eaSHugh Dickins } 504ec8acf20SShaohua Li spin_unlock(&si->lock); 505910321eaSHugh Dickins return (swp_entry_t) {0}; 506910321eaSHugh Dickins } 507910321eaSHugh Dickins 5081da177e4SLinus Torvalds static struct swap_info_struct *swap_info_get(swp_entry_t entry) 5091da177e4SLinus Torvalds { 5101da177e4SLinus Torvalds struct swap_info_struct *p; 5111da177e4SLinus Torvalds unsigned long offset, type; 5121da177e4SLinus Torvalds 5131da177e4SLinus Torvalds if (!entry.val) 5141da177e4SLinus Torvalds goto out; 5151da177e4SLinus Torvalds type = swp_type(entry); 5161da177e4SLinus Torvalds if (type >= nr_swapfiles) 5171da177e4SLinus Torvalds goto bad_nofile; 518efa90a98SHugh Dickins p = swap_info[type]; 5191da177e4SLinus Torvalds if (!(p->flags & SWP_USED)) 5201da177e4SLinus Torvalds goto bad_device; 5211da177e4SLinus Torvalds offset = swp_offset(entry); 5221da177e4SLinus Torvalds if (offset >= p->max) 5231da177e4SLinus Torvalds goto bad_offset; 5241da177e4SLinus Torvalds if (!p->swap_map[offset]) 5251da177e4SLinus Torvalds goto bad_free; 526ec8acf20SShaohua Li spin_lock(&p->lock); 5271da177e4SLinus Torvalds return p; 5281da177e4SLinus Torvalds 5291da177e4SLinus Torvalds bad_free: 5301da177e4SLinus Torvalds printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val); 5311da177e4SLinus Torvalds goto out; 5321da177e4SLinus Torvalds bad_offset: 5331da177e4SLinus Torvalds printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val); 5341da177e4SLinus Torvalds goto out; 5351da177e4SLinus Torvalds bad_device: 5361da177e4SLinus Torvalds printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val); 5371da177e4SLinus Torvalds goto out; 5381da177e4SLinus Torvalds bad_nofile: 5391da177e4SLinus Torvalds printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); 5401da177e4SLinus Torvalds out: 5411da177e4SLinus Torvalds return NULL; 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 544ec8acf20SShaohua Li /* 545ec8acf20SShaohua Li * This swap type frees swap entry, check if it is the highest priority swap 546ec8acf20SShaohua Li * type which just frees swap entry. get_swap_page() uses 547ec8acf20SShaohua Li * highest_priority_index to search highest priority swap type. The 548ec8acf20SShaohua Li * swap_info_struct.lock can't protect us if there are multiple swap types 549ec8acf20SShaohua Li * active, so we use atomic_cmpxchg. 550ec8acf20SShaohua Li */ 551ec8acf20SShaohua Li static void set_highest_priority_index(int type) 552ec8acf20SShaohua Li { 553ec8acf20SShaohua Li int old_hp_index, new_hp_index; 554ec8acf20SShaohua Li 555ec8acf20SShaohua Li do { 556ec8acf20SShaohua Li old_hp_index = atomic_read(&highest_priority_index); 557ec8acf20SShaohua Li if (old_hp_index != -1 && 558ec8acf20SShaohua Li swap_info[old_hp_index]->prio >= swap_info[type]->prio) 559ec8acf20SShaohua Li break; 560ec8acf20SShaohua Li new_hp_index = type; 561ec8acf20SShaohua Li } while (atomic_cmpxchg(&highest_priority_index, 562ec8acf20SShaohua Li old_hp_index, new_hp_index) != old_hp_index); 563ec8acf20SShaohua Li } 564ec8acf20SShaohua Li 5658d69aaeeSHugh Dickins static unsigned char swap_entry_free(struct swap_info_struct *p, 5668d69aaeeSHugh Dickins swp_entry_t entry, unsigned char usage) 5671da177e4SLinus Torvalds { 568253d553bSHugh Dickins unsigned long offset = swp_offset(entry); 5698d69aaeeSHugh Dickins unsigned char count; 5708d69aaeeSHugh Dickins unsigned char has_cache; 5711da177e4SLinus Torvalds 572355cfa73SKAMEZAWA Hiroyuki count = p->swap_map[offset]; 573253d553bSHugh Dickins has_cache = count & SWAP_HAS_CACHE; 574253d553bSHugh Dickins count &= ~SWAP_HAS_CACHE; 575253d553bSHugh Dickins 576253d553bSHugh Dickins if (usage == SWAP_HAS_CACHE) { 577253d553bSHugh Dickins VM_BUG_ON(!has_cache); 578253d553bSHugh Dickins has_cache = 0; 579aaa46865SHugh Dickins } else if (count == SWAP_MAP_SHMEM) { 580aaa46865SHugh Dickins /* 581aaa46865SHugh Dickins * Or we could insist on shmem.c using a special 582aaa46865SHugh Dickins * swap_shmem_free() and free_shmem_swap_and_cache()... 583aaa46865SHugh Dickins */ 584aaa46865SHugh Dickins count = 0; 585570a335bSHugh Dickins } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 586570a335bSHugh Dickins if (count == COUNT_CONTINUED) { 587570a335bSHugh Dickins if (swap_count_continued(p, offset, count)) 588570a335bSHugh Dickins count = SWAP_MAP_MAX | COUNT_CONTINUED; 589570a335bSHugh Dickins else 590570a335bSHugh Dickins count = SWAP_MAP_MAX; 591570a335bSHugh Dickins } else 592253d553bSHugh Dickins count--; 593570a335bSHugh Dickins } 594253d553bSHugh Dickins 595253d553bSHugh Dickins if (!count) 596253d553bSHugh Dickins mem_cgroup_uncharge_swap(entry); 597253d553bSHugh Dickins 598253d553bSHugh Dickins usage = count | has_cache; 599253d553bSHugh Dickins p->swap_map[offset] = usage; 600253d553bSHugh Dickins 601355cfa73SKAMEZAWA Hiroyuki /* free if no reference */ 602253d553bSHugh Dickins if (!usage) { 6031da177e4SLinus Torvalds if (offset < p->lowest_bit) 6041da177e4SLinus Torvalds p->lowest_bit = offset; 6051da177e4SLinus Torvalds if (offset > p->highest_bit) 6061da177e4SLinus Torvalds p->highest_bit = offset; 607ec8acf20SShaohua Li set_highest_priority_index(p->type); 608ec8acf20SShaohua Li atomic_long_inc(&nr_swap_pages); 6091da177e4SLinus Torvalds p->inuse_pages--; 61038b5faf4SDan Magenheimer frontswap_invalidate_page(p->type, offset); 61173744923SMel Gorman if (p->flags & SWP_BLKDEV) { 61273744923SMel Gorman struct gendisk *disk = p->bdev->bd_disk; 61373744923SMel Gorman if (disk->fops->swap_slot_free_notify) 61473744923SMel Gorman disk->fops->swap_slot_free_notify(p->bdev, 61573744923SMel Gorman offset); 61673744923SMel Gorman } 6171da177e4SLinus Torvalds } 618253d553bSHugh Dickins 619253d553bSHugh Dickins return usage; 6201da177e4SLinus Torvalds } 6211da177e4SLinus Torvalds 6221da177e4SLinus Torvalds /* 6231da177e4SLinus Torvalds * Caller has made sure that the swapdevice corresponding to entry 6241da177e4SLinus Torvalds * is still around or has not been recycled. 6251da177e4SLinus Torvalds */ 6261da177e4SLinus Torvalds void swap_free(swp_entry_t entry) 6271da177e4SLinus Torvalds { 6281da177e4SLinus Torvalds struct swap_info_struct *p; 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds p = swap_info_get(entry); 6311da177e4SLinus Torvalds if (p) { 632253d553bSHugh Dickins swap_entry_free(p, entry, 1); 633ec8acf20SShaohua Li spin_unlock(&p->lock); 6341da177e4SLinus Torvalds } 6351da177e4SLinus Torvalds } 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds /* 638cb4b86baSKAMEZAWA Hiroyuki * Called after dropping swapcache to decrease refcnt to swap entries. 639cb4b86baSKAMEZAWA Hiroyuki */ 640cb4b86baSKAMEZAWA Hiroyuki void swapcache_free(swp_entry_t entry, struct page *page) 641cb4b86baSKAMEZAWA Hiroyuki { 642355cfa73SKAMEZAWA Hiroyuki struct swap_info_struct *p; 6438d69aaeeSHugh Dickins unsigned char count; 644355cfa73SKAMEZAWA Hiroyuki 645355cfa73SKAMEZAWA Hiroyuki p = swap_info_get(entry); 646355cfa73SKAMEZAWA Hiroyuki if (p) { 647253d553bSHugh Dickins count = swap_entry_free(p, entry, SWAP_HAS_CACHE); 648253d553bSHugh Dickins if (page) 649253d553bSHugh Dickins mem_cgroup_uncharge_swapcache(page, entry, count != 0); 650ec8acf20SShaohua Li spin_unlock(&p->lock); 651355cfa73SKAMEZAWA Hiroyuki } 652cb4b86baSKAMEZAWA Hiroyuki } 653cb4b86baSKAMEZAWA Hiroyuki 654cb4b86baSKAMEZAWA Hiroyuki /* 655c475a8abSHugh Dickins * How many references to page are currently swapped out? 656570a335bSHugh Dickins * This does not give an exact answer when swap count is continued, 657570a335bSHugh Dickins * but does include the high COUNT_CONTINUED flag to allow for that. 6581da177e4SLinus Torvalds */ 659bde05d1cSHugh Dickins int page_swapcount(struct page *page) 6601da177e4SLinus Torvalds { 661c475a8abSHugh Dickins int count = 0; 6621da177e4SLinus Torvalds struct swap_info_struct *p; 6631da177e4SLinus Torvalds swp_entry_t entry; 6641da177e4SLinus Torvalds 6654c21e2f2SHugh Dickins entry.val = page_private(page); 6661da177e4SLinus Torvalds p = swap_info_get(entry); 6671da177e4SLinus Torvalds if (p) { 668355cfa73SKAMEZAWA Hiroyuki count = swap_count(p->swap_map[swp_offset(entry)]); 669ec8acf20SShaohua Li spin_unlock(&p->lock); 6701da177e4SLinus Torvalds } 671c475a8abSHugh Dickins return count; 6721da177e4SLinus Torvalds } 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds /* 6757b1fe597SHugh Dickins * We can write to an anon page without COW if there are no other references 6767b1fe597SHugh Dickins * to it. And as a side-effect, free up its swap: because the old content 6777b1fe597SHugh Dickins * on disk will never be read, and seeking back there to write new content 6787b1fe597SHugh Dickins * later would only waste time away from clustering. 6791da177e4SLinus Torvalds */ 6807b1fe597SHugh Dickins int reuse_swap_page(struct page *page) 6811da177e4SLinus Torvalds { 682c475a8abSHugh Dickins int count; 6831da177e4SLinus Torvalds 68451726b12SHugh Dickins VM_BUG_ON(!PageLocked(page)); 6855ad64688SHugh Dickins if (unlikely(PageKsm(page))) 6865ad64688SHugh Dickins return 0; 687c475a8abSHugh Dickins count = page_mapcount(page); 6887b1fe597SHugh Dickins if (count <= 1 && PageSwapCache(page)) { 689c475a8abSHugh Dickins count += page_swapcount(page); 6907b1fe597SHugh Dickins if (count == 1 && !PageWriteback(page)) { 6917b1fe597SHugh Dickins delete_from_swap_cache(page); 6927b1fe597SHugh Dickins SetPageDirty(page); 6937b1fe597SHugh Dickins } 6947b1fe597SHugh Dickins } 6955ad64688SHugh Dickins return count <= 1; 6961da177e4SLinus Torvalds } 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds /* 699a2c43eedSHugh Dickins * If swap is getting full, or if there are no more mappings of this page, 700a2c43eedSHugh Dickins * then try_to_free_swap is called to free its swap space. 7011da177e4SLinus Torvalds */ 702a2c43eedSHugh Dickins int try_to_free_swap(struct page *page) 7031da177e4SLinus Torvalds { 70451726b12SHugh Dickins VM_BUG_ON(!PageLocked(page)); 7051da177e4SLinus Torvalds 7061da177e4SLinus Torvalds if (!PageSwapCache(page)) 7071da177e4SLinus Torvalds return 0; 7081da177e4SLinus Torvalds if (PageWriteback(page)) 7091da177e4SLinus Torvalds return 0; 710a2c43eedSHugh Dickins if (page_swapcount(page)) 7111da177e4SLinus Torvalds return 0; 7121da177e4SLinus Torvalds 713b73d7fceSHugh Dickins /* 714b73d7fceSHugh Dickins * Once hibernation has begun to create its image of memory, 715b73d7fceSHugh Dickins * there's a danger that one of the calls to try_to_free_swap() 716b73d7fceSHugh Dickins * - most probably a call from __try_to_reclaim_swap() while 717b73d7fceSHugh Dickins * hibernation is allocating its own swap pages for the image, 718b73d7fceSHugh Dickins * but conceivably even a call from memory reclaim - will free 719b73d7fceSHugh Dickins * the swap from a page which has already been recorded in the 720b73d7fceSHugh Dickins * image as a clean swapcache page, and then reuse its swap for 721b73d7fceSHugh Dickins * another page of the image. On waking from hibernation, the 722b73d7fceSHugh Dickins * original page might be freed under memory pressure, then 723b73d7fceSHugh Dickins * later read back in from swap, now with the wrong data. 724b73d7fceSHugh Dickins * 725f90ac398SMel Gorman * Hibration suspends storage while it is writing the image 726f90ac398SMel Gorman * to disk so check that here. 727b73d7fceSHugh Dickins */ 728f90ac398SMel Gorman if (pm_suspended_storage()) 729b73d7fceSHugh Dickins return 0; 730b73d7fceSHugh Dickins 731a2c43eedSHugh Dickins delete_from_swap_cache(page); 7321da177e4SLinus Torvalds SetPageDirty(page); 733a2c43eedSHugh Dickins return 1; 73468a22394SRik van Riel } 73568a22394SRik van Riel 73668a22394SRik van Riel /* 7371da177e4SLinus Torvalds * Free the swap entry like above, but also try to 7381da177e4SLinus Torvalds * free the page cache entry if it is the last user. 7391da177e4SLinus Torvalds */ 7402509ef26SHugh Dickins int free_swap_and_cache(swp_entry_t entry) 7411da177e4SLinus Torvalds { 7421da177e4SLinus Torvalds struct swap_info_struct *p; 7431da177e4SLinus Torvalds struct page *page = NULL; 7441da177e4SLinus Torvalds 745a7420aa5SAndi Kleen if (non_swap_entry(entry)) 7462509ef26SHugh Dickins return 1; 7470697212aSChristoph Lameter 7481da177e4SLinus Torvalds p = swap_info_get(entry); 7491da177e4SLinus Torvalds if (p) { 750253d553bSHugh Dickins if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) { 75133806f06SShaohua Li page = find_get_page(swap_address_space(entry), 75233806f06SShaohua Li entry.val); 7538413ac9dSNick Piggin if (page && !trylock_page(page)) { 75493fac704SNick Piggin page_cache_release(page); 75593fac704SNick Piggin page = NULL; 75693fac704SNick Piggin } 75793fac704SNick Piggin } 758ec8acf20SShaohua Li spin_unlock(&p->lock); 7591da177e4SLinus Torvalds } 7601da177e4SLinus Torvalds if (page) { 761a2c43eedSHugh Dickins /* 762a2c43eedSHugh Dickins * Not mapped elsewhere, or swap space full? Free it! 763a2c43eedSHugh Dickins * Also recheck PageSwapCache now page is locked (above). 764a2c43eedSHugh Dickins */ 76593fac704SNick Piggin if (PageSwapCache(page) && !PageWriteback(page) && 766a2c43eedSHugh Dickins (!page_mapped(page) || vm_swap_full())) { 7671da177e4SLinus Torvalds delete_from_swap_cache(page); 7681da177e4SLinus Torvalds SetPageDirty(page); 7691da177e4SLinus Torvalds } 7701da177e4SLinus Torvalds unlock_page(page); 7711da177e4SLinus Torvalds page_cache_release(page); 7721da177e4SLinus Torvalds } 7732509ef26SHugh Dickins return p != NULL; 7741da177e4SLinus Torvalds } 7751da177e4SLinus Torvalds 776b0cb1a19SRafael J. Wysocki #ifdef CONFIG_HIBERNATION 777f577eb30SRafael J. Wysocki /* 778915bae9eSRafael J. Wysocki * Find the swap type that corresponds to given device (if any). 779f577eb30SRafael J. Wysocki * 780915bae9eSRafael J. Wysocki * @offset - number of the PAGE_SIZE-sized block of the device, starting 781915bae9eSRafael J. Wysocki * from 0, in which the swap header is expected to be located. 782915bae9eSRafael J. Wysocki * 783915bae9eSRafael J. Wysocki * This is needed for the suspend to disk (aka swsusp). 784f577eb30SRafael J. Wysocki */ 7857bf23687SRafael J. Wysocki int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 786f577eb30SRafael J. Wysocki { 787915bae9eSRafael J. Wysocki struct block_device *bdev = NULL; 788efa90a98SHugh Dickins int type; 789f577eb30SRafael J. Wysocki 790915bae9eSRafael J. Wysocki if (device) 791915bae9eSRafael J. Wysocki bdev = bdget(device); 792915bae9eSRafael J. Wysocki 793f577eb30SRafael J. Wysocki spin_lock(&swap_lock); 794efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 795efa90a98SHugh Dickins struct swap_info_struct *sis = swap_info[type]; 796f577eb30SRafael J. Wysocki 797915bae9eSRafael J. Wysocki if (!(sis->flags & SWP_WRITEOK)) 798f577eb30SRafael J. Wysocki continue; 799b6b5bce3SRafael J. Wysocki 800915bae9eSRafael J. Wysocki if (!bdev) { 8017bf23687SRafael J. Wysocki if (bdev_p) 802dddac6a7SAlan Jenkins *bdev_p = bdgrab(sis->bdev); 8037bf23687SRafael J. Wysocki 8046e1819d6SRafael J. Wysocki spin_unlock(&swap_lock); 805efa90a98SHugh Dickins return type; 8066e1819d6SRafael J. Wysocki } 807915bae9eSRafael J. Wysocki if (bdev == sis->bdev) { 8089625a5f2SHugh Dickins struct swap_extent *se = &sis->first_swap_extent; 809915bae9eSRafael J. Wysocki 810915bae9eSRafael J. Wysocki if (se->start_block == offset) { 8117bf23687SRafael J. Wysocki if (bdev_p) 812dddac6a7SAlan Jenkins *bdev_p = bdgrab(sis->bdev); 8137bf23687SRafael J. Wysocki 814f577eb30SRafael J. Wysocki spin_unlock(&swap_lock); 815915bae9eSRafael J. Wysocki bdput(bdev); 816efa90a98SHugh Dickins return type; 817f577eb30SRafael J. Wysocki } 818f577eb30SRafael J. Wysocki } 819915bae9eSRafael J. Wysocki } 820f577eb30SRafael J. Wysocki spin_unlock(&swap_lock); 821915bae9eSRafael J. Wysocki if (bdev) 822915bae9eSRafael J. Wysocki bdput(bdev); 823915bae9eSRafael J. Wysocki 824f577eb30SRafael J. Wysocki return -ENODEV; 825f577eb30SRafael J. Wysocki } 826f577eb30SRafael J. Wysocki 827f577eb30SRafael J. Wysocki /* 82873c34b6aSHugh Dickins * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 82973c34b6aSHugh Dickins * corresponding to given index in swap_info (swap type). 83073c34b6aSHugh Dickins */ 83173c34b6aSHugh Dickins sector_t swapdev_block(int type, pgoff_t offset) 83273c34b6aSHugh Dickins { 83373c34b6aSHugh Dickins struct block_device *bdev; 83473c34b6aSHugh Dickins 83573c34b6aSHugh Dickins if ((unsigned int)type >= nr_swapfiles) 83673c34b6aSHugh Dickins return 0; 83773c34b6aSHugh Dickins if (!(swap_info[type]->flags & SWP_WRITEOK)) 83873c34b6aSHugh Dickins return 0; 839d4906e1aSLee Schermerhorn return map_swap_entry(swp_entry(type, offset), &bdev); 84073c34b6aSHugh Dickins } 84173c34b6aSHugh Dickins 84273c34b6aSHugh Dickins /* 843f577eb30SRafael J. Wysocki * Return either the total number of swap pages of given type, or the number 844f577eb30SRafael J. Wysocki * of free pages of that type (depending on @free) 845f577eb30SRafael J. Wysocki * 846f577eb30SRafael J. Wysocki * This is needed for software suspend 847f577eb30SRafael J. Wysocki */ 848f577eb30SRafael J. Wysocki unsigned int count_swap_pages(int type, int free) 849f577eb30SRafael J. Wysocki { 850f577eb30SRafael J. Wysocki unsigned int n = 0; 851f577eb30SRafael J. Wysocki 852f577eb30SRafael J. Wysocki spin_lock(&swap_lock); 853efa90a98SHugh Dickins if ((unsigned int)type < nr_swapfiles) { 854efa90a98SHugh Dickins struct swap_info_struct *sis = swap_info[type]; 855efa90a98SHugh Dickins 856ec8acf20SShaohua Li spin_lock(&sis->lock); 857efa90a98SHugh Dickins if (sis->flags & SWP_WRITEOK) { 858efa90a98SHugh Dickins n = sis->pages; 859f577eb30SRafael J. Wysocki if (free) 860efa90a98SHugh Dickins n -= sis->inuse_pages; 861efa90a98SHugh Dickins } 862ec8acf20SShaohua Li spin_unlock(&sis->lock); 863f577eb30SRafael J. Wysocki } 864f577eb30SRafael J. Wysocki spin_unlock(&swap_lock); 865f577eb30SRafael J. Wysocki return n; 866f577eb30SRafael J. Wysocki } 86773c34b6aSHugh Dickins #endif /* CONFIG_HIBERNATION */ 868f577eb30SRafael J. Wysocki 869*179ef71cSCyrill Gorcunov static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) 870*179ef71cSCyrill Gorcunov { 871*179ef71cSCyrill Gorcunov #ifdef CONFIG_MEM_SOFT_DIRTY 872*179ef71cSCyrill Gorcunov /* 873*179ef71cSCyrill Gorcunov * When pte keeps soft dirty bit the pte generated 874*179ef71cSCyrill Gorcunov * from swap entry does not has it, still it's same 875*179ef71cSCyrill Gorcunov * pte from logical point of view. 876*179ef71cSCyrill Gorcunov */ 877*179ef71cSCyrill Gorcunov pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); 878*179ef71cSCyrill Gorcunov return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); 879*179ef71cSCyrill Gorcunov #else 880*179ef71cSCyrill Gorcunov return pte_same(pte, swp_pte); 881*179ef71cSCyrill Gorcunov #endif 882*179ef71cSCyrill Gorcunov } 883*179ef71cSCyrill Gorcunov 8841da177e4SLinus Torvalds /* 88572866f6fSHugh Dickins * No need to decide whether this PTE shares the swap entry with others, 88672866f6fSHugh Dickins * just let do_wp_page work it out if a write is requested later - to 88772866f6fSHugh Dickins * force COW, vm_page_prot omits write permission from any private vma. 8881da177e4SLinus Torvalds */ 889044d66c1SHugh Dickins static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 8901da177e4SLinus Torvalds unsigned long addr, swp_entry_t entry, struct page *page) 8911da177e4SLinus Torvalds { 8929e16b7fbSHugh Dickins struct page *swapcache; 89372835c86SJohannes Weiner struct mem_cgroup *memcg; 894044d66c1SHugh Dickins spinlock_t *ptl; 895044d66c1SHugh Dickins pte_t *pte; 896044d66c1SHugh Dickins int ret = 1; 897044d66c1SHugh Dickins 8989e16b7fbSHugh Dickins swapcache = page; 8999e16b7fbSHugh Dickins page = ksm_might_need_to_copy(page, vma, addr); 9009e16b7fbSHugh Dickins if (unlikely(!page)) 9019e16b7fbSHugh Dickins return -ENOMEM; 9029e16b7fbSHugh Dickins 90372835c86SJohannes Weiner if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, 90472835c86SJohannes Weiner GFP_KERNEL, &memcg)) { 905044d66c1SHugh Dickins ret = -ENOMEM; 90685d9fc89SKAMEZAWA Hiroyuki goto out_nolock; 90785d9fc89SKAMEZAWA Hiroyuki } 908044d66c1SHugh Dickins 909044d66c1SHugh Dickins pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 910*179ef71cSCyrill Gorcunov if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { 91172835c86SJohannes Weiner mem_cgroup_cancel_charge_swapin(memcg); 912044d66c1SHugh Dickins ret = 0; 913044d66c1SHugh Dickins goto out; 914044d66c1SHugh Dickins } 9158a9f3ccdSBalbir Singh 916b084d435SKAMEZAWA Hiroyuki dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 917d559db08SKAMEZAWA Hiroyuki inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 9181da177e4SLinus Torvalds get_page(page); 9191da177e4SLinus Torvalds set_pte_at(vma->vm_mm, addr, pte, 9201da177e4SLinus Torvalds pte_mkold(mk_pte(page, vma->vm_page_prot))); 9219e16b7fbSHugh Dickins if (page == swapcache) 9221da177e4SLinus Torvalds page_add_anon_rmap(page, vma, addr); 9239e16b7fbSHugh Dickins else /* ksm created a completely new copy */ 9249e16b7fbSHugh Dickins page_add_new_anon_rmap(page, vma, addr); 92572835c86SJohannes Weiner mem_cgroup_commit_charge_swapin(page, memcg); 9261da177e4SLinus Torvalds swap_free(entry); 9271da177e4SLinus Torvalds /* 9281da177e4SLinus Torvalds * Move the page to the active list so it is not 9291da177e4SLinus Torvalds * immediately swapped out again after swapon. 9301da177e4SLinus Torvalds */ 9311da177e4SLinus Torvalds activate_page(page); 932044d66c1SHugh Dickins out: 933044d66c1SHugh Dickins pte_unmap_unlock(pte, ptl); 93485d9fc89SKAMEZAWA Hiroyuki out_nolock: 9359e16b7fbSHugh Dickins if (page != swapcache) { 9369e16b7fbSHugh Dickins unlock_page(page); 9379e16b7fbSHugh Dickins put_page(page); 9389e16b7fbSHugh Dickins } 939044d66c1SHugh Dickins return ret; 9401da177e4SLinus Torvalds } 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 9431da177e4SLinus Torvalds unsigned long addr, unsigned long end, 9441da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 9451da177e4SLinus Torvalds { 9461da177e4SLinus Torvalds pte_t swp_pte = swp_entry_to_pte(entry); 947705e87c0SHugh Dickins pte_t *pte; 9488a9f3ccdSBalbir Singh int ret = 0; 9491da177e4SLinus Torvalds 950044d66c1SHugh Dickins /* 951044d66c1SHugh Dickins * We don't actually need pte lock while scanning for swp_pte: since 952044d66c1SHugh Dickins * we hold page lock and mmap_sem, swp_pte cannot be inserted into the 953044d66c1SHugh Dickins * page table while we're scanning; though it could get zapped, and on 954044d66c1SHugh Dickins * some architectures (e.g. x86_32 with PAE) we might catch a glimpse 955044d66c1SHugh Dickins * of unmatched parts which look like swp_pte, so unuse_pte must 956044d66c1SHugh Dickins * recheck under pte lock. Scanning without pte lock lets it be 957044d66c1SHugh Dickins * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. 958044d66c1SHugh Dickins */ 959044d66c1SHugh Dickins pte = pte_offset_map(pmd, addr); 9601da177e4SLinus Torvalds do { 9611da177e4SLinus Torvalds /* 9621da177e4SLinus Torvalds * swapoff spends a _lot_ of time in this loop! 9631da177e4SLinus Torvalds * Test inline before going to call unuse_pte. 9641da177e4SLinus Torvalds */ 965*179ef71cSCyrill Gorcunov if (unlikely(maybe_same_pte(*pte, swp_pte))) { 966044d66c1SHugh Dickins pte_unmap(pte); 967044d66c1SHugh Dickins ret = unuse_pte(vma, pmd, addr, entry, page); 968044d66c1SHugh Dickins if (ret) 969044d66c1SHugh Dickins goto out; 970044d66c1SHugh Dickins pte = pte_offset_map(pmd, addr); 9711da177e4SLinus Torvalds } 9721da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 973044d66c1SHugh Dickins pte_unmap(pte - 1); 974044d66c1SHugh Dickins out: 9758a9f3ccdSBalbir Singh return ret; 9761da177e4SLinus Torvalds } 9771da177e4SLinus Torvalds 9781da177e4SLinus Torvalds static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 9791da177e4SLinus Torvalds unsigned long addr, unsigned long end, 9801da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 9811da177e4SLinus Torvalds { 9821da177e4SLinus Torvalds pmd_t *pmd; 9831da177e4SLinus Torvalds unsigned long next; 9848a9f3ccdSBalbir Singh int ret; 9851da177e4SLinus Torvalds 9861da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 9871da177e4SLinus Torvalds do { 9881da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 9891a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 9901da177e4SLinus Torvalds continue; 9918a9f3ccdSBalbir Singh ret = unuse_pte_range(vma, pmd, addr, next, entry, page); 9928a9f3ccdSBalbir Singh if (ret) 9938a9f3ccdSBalbir Singh return ret; 9941da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 9951da177e4SLinus Torvalds return 0; 9961da177e4SLinus Torvalds } 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 9991da177e4SLinus Torvalds unsigned long addr, unsigned long end, 10001da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 10011da177e4SLinus Torvalds { 10021da177e4SLinus Torvalds pud_t *pud; 10031da177e4SLinus Torvalds unsigned long next; 10048a9f3ccdSBalbir Singh int ret; 10051da177e4SLinus Torvalds 10061da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 10071da177e4SLinus Torvalds do { 10081da177e4SLinus Torvalds next = pud_addr_end(addr, end); 10091da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 10101da177e4SLinus Torvalds continue; 10118a9f3ccdSBalbir Singh ret = unuse_pmd_range(vma, pud, addr, next, entry, page); 10128a9f3ccdSBalbir Singh if (ret) 10138a9f3ccdSBalbir Singh return ret; 10141da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 10151da177e4SLinus Torvalds return 0; 10161da177e4SLinus Torvalds } 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds static int unuse_vma(struct vm_area_struct *vma, 10191da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 10201da177e4SLinus Torvalds { 10211da177e4SLinus Torvalds pgd_t *pgd; 10221da177e4SLinus Torvalds unsigned long addr, end, next; 10238a9f3ccdSBalbir Singh int ret; 10241da177e4SLinus Torvalds 10253ca7b3c5SHugh Dickins if (page_anon_vma(page)) { 10261da177e4SLinus Torvalds addr = page_address_in_vma(page, vma); 10271da177e4SLinus Torvalds if (addr == -EFAULT) 10281da177e4SLinus Torvalds return 0; 10291da177e4SLinus Torvalds else 10301da177e4SLinus Torvalds end = addr + PAGE_SIZE; 10311da177e4SLinus Torvalds } else { 10321da177e4SLinus Torvalds addr = vma->vm_start; 10331da177e4SLinus Torvalds end = vma->vm_end; 10341da177e4SLinus Torvalds } 10351da177e4SLinus Torvalds 10361da177e4SLinus Torvalds pgd = pgd_offset(vma->vm_mm, addr); 10371da177e4SLinus Torvalds do { 10381da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 10391da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 10401da177e4SLinus Torvalds continue; 10418a9f3ccdSBalbir Singh ret = unuse_pud_range(vma, pgd, addr, next, entry, page); 10428a9f3ccdSBalbir Singh if (ret) 10438a9f3ccdSBalbir Singh return ret; 10441da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 10451da177e4SLinus Torvalds return 0; 10461da177e4SLinus Torvalds } 10471da177e4SLinus Torvalds 10481da177e4SLinus Torvalds static int unuse_mm(struct mm_struct *mm, 10491da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 10501da177e4SLinus Torvalds { 10511da177e4SLinus Torvalds struct vm_area_struct *vma; 10528a9f3ccdSBalbir Singh int ret = 0; 10531da177e4SLinus Torvalds 10541da177e4SLinus Torvalds if (!down_read_trylock(&mm->mmap_sem)) { 10551da177e4SLinus Torvalds /* 10567d03431cSFernando Luis Vazquez Cao * Activate page so shrink_inactive_list is unlikely to unmap 10577d03431cSFernando Luis Vazquez Cao * its ptes while lock is dropped, so swapoff can make progress. 10581da177e4SLinus Torvalds */ 1059c475a8abSHugh Dickins activate_page(page); 10601da177e4SLinus Torvalds unlock_page(page); 10611da177e4SLinus Torvalds down_read(&mm->mmap_sem); 10621da177e4SLinus Torvalds lock_page(page); 10631da177e4SLinus Torvalds } 10641da177e4SLinus Torvalds for (vma = mm->mmap; vma; vma = vma->vm_next) { 10658a9f3ccdSBalbir Singh if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) 10661da177e4SLinus Torvalds break; 10671da177e4SLinus Torvalds } 10681da177e4SLinus Torvalds up_read(&mm->mmap_sem); 10698a9f3ccdSBalbir Singh return (ret < 0)? ret: 0; 10701da177e4SLinus Torvalds } 10711da177e4SLinus Torvalds 10721da177e4SLinus Torvalds /* 107338b5faf4SDan Magenheimer * Scan swap_map (or frontswap_map if frontswap parameter is true) 107438b5faf4SDan Magenheimer * from current position to next entry still in use. 10751da177e4SLinus Torvalds * Recycle to start on reaching the end, returning 0 when empty. 10761da177e4SLinus Torvalds */ 10776eb396dcSHugh Dickins static unsigned int find_next_to_unuse(struct swap_info_struct *si, 107838b5faf4SDan Magenheimer unsigned int prev, bool frontswap) 10791da177e4SLinus Torvalds { 10806eb396dcSHugh Dickins unsigned int max = si->max; 10816eb396dcSHugh Dickins unsigned int i = prev; 10828d69aaeeSHugh Dickins unsigned char count; 10831da177e4SLinus Torvalds 10841da177e4SLinus Torvalds /* 10855d337b91SHugh Dickins * No need for swap_lock here: we're just looking 10861da177e4SLinus Torvalds * for whether an entry is in use, not modifying it; false 10871da177e4SLinus Torvalds * hits are okay, and sys_swapoff() has already prevented new 10885d337b91SHugh Dickins * allocations from this area (while holding swap_lock). 10891da177e4SLinus Torvalds */ 10901da177e4SLinus Torvalds for (;;) { 10911da177e4SLinus Torvalds if (++i >= max) { 10921da177e4SLinus Torvalds if (!prev) { 10931da177e4SLinus Torvalds i = 0; 10941da177e4SLinus Torvalds break; 10951da177e4SLinus Torvalds } 10961da177e4SLinus Torvalds /* 10971da177e4SLinus Torvalds * No entries in use at top of swap_map, 10981da177e4SLinus Torvalds * loop back to start and recheck there. 10991da177e4SLinus Torvalds */ 11001da177e4SLinus Torvalds max = prev + 1; 11011da177e4SLinus Torvalds prev = 0; 11021da177e4SLinus Torvalds i = 1; 11031da177e4SLinus Torvalds } 110438b5faf4SDan Magenheimer if (frontswap) { 110538b5faf4SDan Magenheimer if (frontswap_test(si, i)) 110638b5faf4SDan Magenheimer break; 110738b5faf4SDan Magenheimer else 110838b5faf4SDan Magenheimer continue; 110938b5faf4SDan Magenheimer } 11101da177e4SLinus Torvalds count = si->swap_map[i]; 1111355cfa73SKAMEZAWA Hiroyuki if (count && swap_count(count) != SWAP_MAP_BAD) 11121da177e4SLinus Torvalds break; 11131da177e4SLinus Torvalds } 11141da177e4SLinus Torvalds return i; 11151da177e4SLinus Torvalds } 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds /* 11181da177e4SLinus Torvalds * We completely avoid races by reading each swap page in advance, 11191da177e4SLinus Torvalds * and then search for the process using it. All the necessary 11201da177e4SLinus Torvalds * page table adjustments can then be made atomically. 112138b5faf4SDan Magenheimer * 112238b5faf4SDan Magenheimer * if the boolean frontswap is true, only unuse pages_to_unuse pages; 112338b5faf4SDan Magenheimer * pages_to_unuse==0 means all pages; ignored if frontswap is false 11241da177e4SLinus Torvalds */ 112538b5faf4SDan Magenheimer int try_to_unuse(unsigned int type, bool frontswap, 112638b5faf4SDan Magenheimer unsigned long pages_to_unuse) 11271da177e4SLinus Torvalds { 1128efa90a98SHugh Dickins struct swap_info_struct *si = swap_info[type]; 11291da177e4SLinus Torvalds struct mm_struct *start_mm; 11308d69aaeeSHugh Dickins unsigned char *swap_map; 11318d69aaeeSHugh Dickins unsigned char swcount; 11321da177e4SLinus Torvalds struct page *page; 11331da177e4SLinus Torvalds swp_entry_t entry; 11346eb396dcSHugh Dickins unsigned int i = 0; 11351da177e4SLinus Torvalds int retval = 0; 11361da177e4SLinus Torvalds 11371da177e4SLinus Torvalds /* 11381da177e4SLinus Torvalds * When searching mms for an entry, a good strategy is to 11391da177e4SLinus Torvalds * start at the first mm we freed the previous entry from 11401da177e4SLinus Torvalds * (though actually we don't notice whether we or coincidence 11411da177e4SLinus Torvalds * freed the entry). Initialize this start_mm with a hold. 11421da177e4SLinus Torvalds * 11431da177e4SLinus Torvalds * A simpler strategy would be to start at the last mm we 11441da177e4SLinus Torvalds * freed the previous entry from; but that would take less 11451da177e4SLinus Torvalds * advantage of mmlist ordering, which clusters forked mms 11461da177e4SLinus Torvalds * together, child after parent. If we race with dup_mmap(), we 11471da177e4SLinus Torvalds * prefer to resolve parent before child, lest we miss entries 11481da177e4SLinus Torvalds * duplicated after we scanned child: using last mm would invert 1149570a335bSHugh Dickins * that. 11501da177e4SLinus Torvalds */ 11511da177e4SLinus Torvalds start_mm = &init_mm; 11521da177e4SLinus Torvalds atomic_inc(&init_mm.mm_users); 11531da177e4SLinus Torvalds 11541da177e4SLinus Torvalds /* 11551da177e4SLinus Torvalds * Keep on scanning until all entries have gone. Usually, 11561da177e4SLinus Torvalds * one pass through swap_map is enough, but not necessarily: 11571da177e4SLinus Torvalds * there are races when an instance of an entry might be missed. 11581da177e4SLinus Torvalds */ 115938b5faf4SDan Magenheimer while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { 11601da177e4SLinus Torvalds if (signal_pending(current)) { 11611da177e4SLinus Torvalds retval = -EINTR; 11621da177e4SLinus Torvalds break; 11631da177e4SLinus Torvalds } 11641da177e4SLinus Torvalds 11651da177e4SLinus Torvalds /* 11661da177e4SLinus Torvalds * Get a page for the entry, using the existing swap 11671da177e4SLinus Torvalds * cache page if there is one. Otherwise, get a clean 11681da177e4SLinus Torvalds * page and read the swap into it. 11691da177e4SLinus Torvalds */ 11701da177e4SLinus Torvalds swap_map = &si->swap_map[i]; 11711da177e4SLinus Torvalds entry = swp_entry(type, i); 117202098feaSHugh Dickins page = read_swap_cache_async(entry, 117302098feaSHugh Dickins GFP_HIGHUSER_MOVABLE, NULL, 0); 11741da177e4SLinus Torvalds if (!page) { 11751da177e4SLinus Torvalds /* 11761da177e4SLinus Torvalds * Either swap_duplicate() failed because entry 11771da177e4SLinus Torvalds * has been freed independently, and will not be 11781da177e4SLinus Torvalds * reused since sys_swapoff() already disabled 11791da177e4SLinus Torvalds * allocation from here, or alloc_page() failed. 11801da177e4SLinus Torvalds */ 11811da177e4SLinus Torvalds if (!*swap_map) 11821da177e4SLinus Torvalds continue; 11831da177e4SLinus Torvalds retval = -ENOMEM; 11841da177e4SLinus Torvalds break; 11851da177e4SLinus Torvalds } 11861da177e4SLinus Torvalds 11871da177e4SLinus Torvalds /* 11881da177e4SLinus Torvalds * Don't hold on to start_mm if it looks like exiting. 11891da177e4SLinus Torvalds */ 11901da177e4SLinus Torvalds if (atomic_read(&start_mm->mm_users) == 1) { 11911da177e4SLinus Torvalds mmput(start_mm); 11921da177e4SLinus Torvalds start_mm = &init_mm; 11931da177e4SLinus Torvalds atomic_inc(&init_mm.mm_users); 11941da177e4SLinus Torvalds } 11951da177e4SLinus Torvalds 11961da177e4SLinus Torvalds /* 11971da177e4SLinus Torvalds * Wait for and lock page. When do_swap_page races with 11981da177e4SLinus Torvalds * try_to_unuse, do_swap_page can handle the fault much 11991da177e4SLinus Torvalds * faster than try_to_unuse can locate the entry. This 12001da177e4SLinus Torvalds * apparently redundant "wait_on_page_locked" lets try_to_unuse 12011da177e4SLinus Torvalds * defer to do_swap_page in such a case - in some tests, 12021da177e4SLinus Torvalds * do_swap_page and try_to_unuse repeatedly compete. 12031da177e4SLinus Torvalds */ 12041da177e4SLinus Torvalds wait_on_page_locked(page); 12051da177e4SLinus Torvalds wait_on_page_writeback(page); 12061da177e4SLinus Torvalds lock_page(page); 12071da177e4SLinus Torvalds wait_on_page_writeback(page); 12081da177e4SLinus Torvalds 12091da177e4SLinus Torvalds /* 12101da177e4SLinus Torvalds * Remove all references to entry. 12111da177e4SLinus Torvalds */ 12121da177e4SLinus Torvalds swcount = *swap_map; 1213aaa46865SHugh Dickins if (swap_count(swcount) == SWAP_MAP_SHMEM) { 1214aaa46865SHugh Dickins retval = shmem_unuse(entry, page); 1215aaa46865SHugh Dickins /* page has already been unlocked and released */ 1216aaa46865SHugh Dickins if (retval < 0) 1217aaa46865SHugh Dickins break; 1218aaa46865SHugh Dickins continue; 12191da177e4SLinus Torvalds } 1220aaa46865SHugh Dickins if (swap_count(swcount) && start_mm != &init_mm) 1221aaa46865SHugh Dickins retval = unuse_mm(start_mm, entry, page); 1222aaa46865SHugh Dickins 1223355cfa73SKAMEZAWA Hiroyuki if (swap_count(*swap_map)) { 12241da177e4SLinus Torvalds int set_start_mm = (*swap_map >= swcount); 12251da177e4SLinus Torvalds struct list_head *p = &start_mm->mmlist; 12261da177e4SLinus Torvalds struct mm_struct *new_start_mm = start_mm; 12271da177e4SLinus Torvalds struct mm_struct *prev_mm = start_mm; 12281da177e4SLinus Torvalds struct mm_struct *mm; 12291da177e4SLinus Torvalds 12301da177e4SLinus Torvalds atomic_inc(&new_start_mm->mm_users); 12311da177e4SLinus Torvalds atomic_inc(&prev_mm->mm_users); 12321da177e4SLinus Torvalds spin_lock(&mmlist_lock); 1233aaa46865SHugh Dickins while (swap_count(*swap_map) && !retval && 12341da177e4SLinus Torvalds (p = p->next) != &start_mm->mmlist) { 12351da177e4SLinus Torvalds mm = list_entry(p, struct mm_struct, mmlist); 123670af7c5cSHugh Dickins if (!atomic_inc_not_zero(&mm->mm_users)) 12371da177e4SLinus Torvalds continue; 12381da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 12391da177e4SLinus Torvalds mmput(prev_mm); 12401da177e4SLinus Torvalds prev_mm = mm; 12411da177e4SLinus Torvalds 12421da177e4SLinus Torvalds cond_resched(); 12431da177e4SLinus Torvalds 12441da177e4SLinus Torvalds swcount = *swap_map; 1245355cfa73SKAMEZAWA Hiroyuki if (!swap_count(swcount)) /* any usage ? */ 12461da177e4SLinus Torvalds ; 1247aaa46865SHugh Dickins else if (mm == &init_mm) 12481da177e4SLinus Torvalds set_start_mm = 1; 1249aaa46865SHugh Dickins else 12501da177e4SLinus Torvalds retval = unuse_mm(mm, entry, page); 1251355cfa73SKAMEZAWA Hiroyuki 125232c5fc10SBo Liu if (set_start_mm && *swap_map < swcount) { 12531da177e4SLinus Torvalds mmput(new_start_mm); 12541da177e4SLinus Torvalds atomic_inc(&mm->mm_users); 12551da177e4SLinus Torvalds new_start_mm = mm; 12561da177e4SLinus Torvalds set_start_mm = 0; 12571da177e4SLinus Torvalds } 12581da177e4SLinus Torvalds spin_lock(&mmlist_lock); 12591da177e4SLinus Torvalds } 12601da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 12611da177e4SLinus Torvalds mmput(prev_mm); 12621da177e4SLinus Torvalds mmput(start_mm); 12631da177e4SLinus Torvalds start_mm = new_start_mm; 12641da177e4SLinus Torvalds } 12651da177e4SLinus Torvalds if (retval) { 12661da177e4SLinus Torvalds unlock_page(page); 12671da177e4SLinus Torvalds page_cache_release(page); 12681da177e4SLinus Torvalds break; 12691da177e4SLinus Torvalds } 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds /* 12721da177e4SLinus Torvalds * If a reference remains (rare), we would like to leave 12731da177e4SLinus Torvalds * the page in the swap cache; but try_to_unmap could 12741da177e4SLinus Torvalds * then re-duplicate the entry once we drop page lock, 12751da177e4SLinus Torvalds * so we might loop indefinitely; also, that page could 12761da177e4SLinus Torvalds * not be swapped out to other storage meanwhile. So: 12771da177e4SLinus Torvalds * delete from cache even if there's another reference, 12781da177e4SLinus Torvalds * after ensuring that the data has been saved to disk - 12791da177e4SLinus Torvalds * since if the reference remains (rarer), it will be 12801da177e4SLinus Torvalds * read from disk into another page. Splitting into two 12811da177e4SLinus Torvalds * pages would be incorrect if swap supported "shared 12821da177e4SLinus Torvalds * private" pages, but they are handled by tmpfs files. 12835ad64688SHugh Dickins * 12845ad64688SHugh Dickins * Given how unuse_vma() targets one particular offset 12855ad64688SHugh Dickins * in an anon_vma, once the anon_vma has been determined, 12865ad64688SHugh Dickins * this splitting happens to be just what is needed to 12875ad64688SHugh Dickins * handle where KSM pages have been swapped out: re-reading 12885ad64688SHugh Dickins * is unnecessarily slow, but we can fix that later on. 12891da177e4SLinus Torvalds */ 1290355cfa73SKAMEZAWA Hiroyuki if (swap_count(*swap_map) && 1291355cfa73SKAMEZAWA Hiroyuki PageDirty(page) && PageSwapCache(page)) { 12921da177e4SLinus Torvalds struct writeback_control wbc = { 12931da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 12941da177e4SLinus Torvalds }; 12951da177e4SLinus Torvalds 12961da177e4SLinus Torvalds swap_writepage(page, &wbc); 12971da177e4SLinus Torvalds lock_page(page); 12981da177e4SLinus Torvalds wait_on_page_writeback(page); 12991da177e4SLinus Torvalds } 130068bdc8d6SHugh Dickins 130168bdc8d6SHugh Dickins /* 130268bdc8d6SHugh Dickins * It is conceivable that a racing task removed this page from 130368bdc8d6SHugh Dickins * swap cache just before we acquired the page lock at the top, 130468bdc8d6SHugh Dickins * or while we dropped it in unuse_mm(). The page might even 130568bdc8d6SHugh Dickins * be back in swap cache on another swap area: that we must not 130668bdc8d6SHugh Dickins * delete, since it may not have been written out to swap yet. 130768bdc8d6SHugh Dickins */ 130868bdc8d6SHugh Dickins if (PageSwapCache(page) && 130968bdc8d6SHugh Dickins likely(page_private(page) == entry.val)) 13101da177e4SLinus Torvalds delete_from_swap_cache(page); 13111da177e4SLinus Torvalds 13121da177e4SLinus Torvalds /* 13131da177e4SLinus Torvalds * So we could skip searching mms once swap count went 13141da177e4SLinus Torvalds * to 1, we did not mark any present ptes as dirty: must 13152706a1b8SAnderson Briglia * mark page dirty so shrink_page_list will preserve it. 13161da177e4SLinus Torvalds */ 13171da177e4SLinus Torvalds SetPageDirty(page); 13181da177e4SLinus Torvalds unlock_page(page); 13191da177e4SLinus Torvalds page_cache_release(page); 13201da177e4SLinus Torvalds 13211da177e4SLinus Torvalds /* 13221da177e4SLinus Torvalds * Make sure that we aren't completely killing 13231da177e4SLinus Torvalds * interactive performance. 13241da177e4SLinus Torvalds */ 13251da177e4SLinus Torvalds cond_resched(); 132638b5faf4SDan Magenheimer if (frontswap && pages_to_unuse > 0) { 132738b5faf4SDan Magenheimer if (!--pages_to_unuse) 132838b5faf4SDan Magenheimer break; 132938b5faf4SDan Magenheimer } 13301da177e4SLinus Torvalds } 13311da177e4SLinus Torvalds 13321da177e4SLinus Torvalds mmput(start_mm); 13331da177e4SLinus Torvalds return retval; 13341da177e4SLinus Torvalds } 13351da177e4SLinus Torvalds 13361da177e4SLinus Torvalds /* 13375d337b91SHugh Dickins * After a successful try_to_unuse, if no swap is now in use, we know 13385d337b91SHugh Dickins * we can empty the mmlist. swap_lock must be held on entry and exit. 13395d337b91SHugh Dickins * Note that mmlist_lock nests inside swap_lock, and an mm must be 13401da177e4SLinus Torvalds * added to the mmlist just after page_duplicate - before would be racy. 13411da177e4SLinus Torvalds */ 13421da177e4SLinus Torvalds static void drain_mmlist(void) 13431da177e4SLinus Torvalds { 13441da177e4SLinus Torvalds struct list_head *p, *next; 1345efa90a98SHugh Dickins unsigned int type; 13461da177e4SLinus Torvalds 1347efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) 1348efa90a98SHugh Dickins if (swap_info[type]->inuse_pages) 13491da177e4SLinus Torvalds return; 13501da177e4SLinus Torvalds spin_lock(&mmlist_lock); 13511da177e4SLinus Torvalds list_for_each_safe(p, next, &init_mm.mmlist) 13521da177e4SLinus Torvalds list_del_init(p); 13531da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 13541da177e4SLinus Torvalds } 13551da177e4SLinus Torvalds 13561da177e4SLinus Torvalds /* 13571da177e4SLinus Torvalds * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 1358d4906e1aSLee Schermerhorn * corresponds to page offset for the specified swap entry. 1359d4906e1aSLee Schermerhorn * Note that the type of this function is sector_t, but it returns page offset 1360d4906e1aSLee Schermerhorn * into the bdev, not sector offset. 13611da177e4SLinus Torvalds */ 1362d4906e1aSLee Schermerhorn static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) 13631da177e4SLinus Torvalds { 1364f29ad6a9SHugh Dickins struct swap_info_struct *sis; 1365f29ad6a9SHugh Dickins struct swap_extent *start_se; 1366f29ad6a9SHugh Dickins struct swap_extent *se; 1367f29ad6a9SHugh Dickins pgoff_t offset; 1368f29ad6a9SHugh Dickins 1369efa90a98SHugh Dickins sis = swap_info[swp_type(entry)]; 1370f29ad6a9SHugh Dickins *bdev = sis->bdev; 1371f29ad6a9SHugh Dickins 1372f29ad6a9SHugh Dickins offset = swp_offset(entry); 1373f29ad6a9SHugh Dickins start_se = sis->curr_swap_extent; 1374f29ad6a9SHugh Dickins se = start_se; 13751da177e4SLinus Torvalds 13761da177e4SLinus Torvalds for ( ; ; ) { 13771da177e4SLinus Torvalds struct list_head *lh; 13781da177e4SLinus Torvalds 13791da177e4SLinus Torvalds if (se->start_page <= offset && 13801da177e4SLinus Torvalds offset < (se->start_page + se->nr_pages)) { 13811da177e4SLinus Torvalds return se->start_block + (offset - se->start_page); 13821da177e4SLinus Torvalds } 138311d31886SHugh Dickins lh = se->list.next; 13841da177e4SLinus Torvalds se = list_entry(lh, struct swap_extent, list); 13851da177e4SLinus Torvalds sis->curr_swap_extent = se; 13861da177e4SLinus Torvalds BUG_ON(se == start_se); /* It *must* be present */ 13871da177e4SLinus Torvalds } 13881da177e4SLinus Torvalds } 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds /* 1391d4906e1aSLee Schermerhorn * Returns the page offset into bdev for the specified page's swap entry. 1392d4906e1aSLee Schermerhorn */ 1393d4906e1aSLee Schermerhorn sector_t map_swap_page(struct page *page, struct block_device **bdev) 1394d4906e1aSLee Schermerhorn { 1395d4906e1aSLee Schermerhorn swp_entry_t entry; 1396d4906e1aSLee Schermerhorn entry.val = page_private(page); 1397d4906e1aSLee Schermerhorn return map_swap_entry(entry, bdev); 1398d4906e1aSLee Schermerhorn } 1399d4906e1aSLee Schermerhorn 1400d4906e1aSLee Schermerhorn /* 14011da177e4SLinus Torvalds * Free all of a swapdev's extent information 14021da177e4SLinus Torvalds */ 14031da177e4SLinus Torvalds static void destroy_swap_extents(struct swap_info_struct *sis) 14041da177e4SLinus Torvalds { 14059625a5f2SHugh Dickins while (!list_empty(&sis->first_swap_extent.list)) { 14061da177e4SLinus Torvalds struct swap_extent *se; 14071da177e4SLinus Torvalds 14089625a5f2SHugh Dickins se = list_entry(sis->first_swap_extent.list.next, 14091da177e4SLinus Torvalds struct swap_extent, list); 14101da177e4SLinus Torvalds list_del(&se->list); 14111da177e4SLinus Torvalds kfree(se); 14121da177e4SLinus Torvalds } 141362c230bcSMel Gorman 141462c230bcSMel Gorman if (sis->flags & SWP_FILE) { 141562c230bcSMel Gorman struct file *swap_file = sis->swap_file; 141662c230bcSMel Gorman struct address_space *mapping = swap_file->f_mapping; 141762c230bcSMel Gorman 141862c230bcSMel Gorman sis->flags &= ~SWP_FILE; 141962c230bcSMel Gorman mapping->a_ops->swap_deactivate(swap_file); 142062c230bcSMel Gorman } 14211da177e4SLinus Torvalds } 14221da177e4SLinus Torvalds 14231da177e4SLinus Torvalds /* 14241da177e4SLinus Torvalds * Add a block range (and the corresponding page range) into this swapdev's 142511d31886SHugh Dickins * extent list. The extent list is kept sorted in page order. 14261da177e4SLinus Torvalds * 142711d31886SHugh Dickins * This function rather assumes that it is called in ascending page order. 14281da177e4SLinus Torvalds */ 1429a509bc1aSMel Gorman int 14301da177e4SLinus Torvalds add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 14311da177e4SLinus Torvalds unsigned long nr_pages, sector_t start_block) 14321da177e4SLinus Torvalds { 14331da177e4SLinus Torvalds struct swap_extent *se; 14341da177e4SLinus Torvalds struct swap_extent *new_se; 14351da177e4SLinus Torvalds struct list_head *lh; 14361da177e4SLinus Torvalds 14379625a5f2SHugh Dickins if (start_page == 0) { 14389625a5f2SHugh Dickins se = &sis->first_swap_extent; 14399625a5f2SHugh Dickins sis->curr_swap_extent = se; 14409625a5f2SHugh Dickins se->start_page = 0; 14419625a5f2SHugh Dickins se->nr_pages = nr_pages; 14429625a5f2SHugh Dickins se->start_block = start_block; 14439625a5f2SHugh Dickins return 1; 14449625a5f2SHugh Dickins } else { 14459625a5f2SHugh Dickins lh = sis->first_swap_extent.list.prev; /* Highest extent */ 14461da177e4SLinus Torvalds se = list_entry(lh, struct swap_extent, list); 144711d31886SHugh Dickins BUG_ON(se->start_page + se->nr_pages != start_page); 144811d31886SHugh Dickins if (se->start_block + se->nr_pages == start_block) { 14491da177e4SLinus Torvalds /* Merge it */ 14501da177e4SLinus Torvalds se->nr_pages += nr_pages; 14511da177e4SLinus Torvalds return 0; 14521da177e4SLinus Torvalds } 14531da177e4SLinus Torvalds } 14541da177e4SLinus Torvalds 14551da177e4SLinus Torvalds /* 14561da177e4SLinus Torvalds * No merge. Insert a new extent, preserving ordering. 14571da177e4SLinus Torvalds */ 14581da177e4SLinus Torvalds new_se = kmalloc(sizeof(*se), GFP_KERNEL); 14591da177e4SLinus Torvalds if (new_se == NULL) 14601da177e4SLinus Torvalds return -ENOMEM; 14611da177e4SLinus Torvalds new_se->start_page = start_page; 14621da177e4SLinus Torvalds new_se->nr_pages = nr_pages; 14631da177e4SLinus Torvalds new_se->start_block = start_block; 14641da177e4SLinus Torvalds 14659625a5f2SHugh Dickins list_add_tail(&new_se->list, &sis->first_swap_extent.list); 146653092a74SHugh Dickins return 1; 14671da177e4SLinus Torvalds } 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds /* 14701da177e4SLinus Torvalds * A `swap extent' is a simple thing which maps a contiguous range of pages 14711da177e4SLinus Torvalds * onto a contiguous range of disk blocks. An ordered list of swap extents 14721da177e4SLinus Torvalds * is built at swapon time and is then used at swap_writepage/swap_readpage 14731da177e4SLinus Torvalds * time for locating where on disk a page belongs. 14741da177e4SLinus Torvalds * 14751da177e4SLinus Torvalds * If the swapfile is an S_ISBLK block device, a single extent is installed. 14761da177e4SLinus Torvalds * This is done so that the main operating code can treat S_ISBLK and S_ISREG 14771da177e4SLinus Torvalds * swap files identically. 14781da177e4SLinus Torvalds * 14791da177e4SLinus Torvalds * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 14801da177e4SLinus Torvalds * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 14811da177e4SLinus Torvalds * swapfiles are handled *identically* after swapon time. 14821da177e4SLinus Torvalds * 14831da177e4SLinus Torvalds * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 14841da177e4SLinus Torvalds * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 14851da177e4SLinus Torvalds * some stray blocks are found which do not fall within the PAGE_SIZE alignment 14861da177e4SLinus Torvalds * requirements, they are simply tossed out - we will never use those blocks 14871da177e4SLinus Torvalds * for swapping. 14881da177e4SLinus Torvalds * 1489b0d9bcd4SHugh Dickins * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This 14901da177e4SLinus Torvalds * prevents root from shooting her foot off by ftruncating an in-use swapfile, 14911da177e4SLinus Torvalds * which will scribble on the fs. 14921da177e4SLinus Torvalds * 14931da177e4SLinus Torvalds * The amount of disk space which a single swap extent represents varies. 14941da177e4SLinus Torvalds * Typically it is in the 1-4 megabyte range. So we can have hundreds of 14951da177e4SLinus Torvalds * extents in the list. To avoid much list walking, we cache the previous 14961da177e4SLinus Torvalds * search location in `curr_swap_extent', and start new searches from there. 14971da177e4SLinus Torvalds * This is extremely effective. The average number of iterations in 14981da177e4SLinus Torvalds * map_swap_page() has been measured at about 0.3 per page. - akpm. 14991da177e4SLinus Torvalds */ 150053092a74SHugh Dickins static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 15011da177e4SLinus Torvalds { 150262c230bcSMel Gorman struct file *swap_file = sis->swap_file; 150362c230bcSMel Gorman struct address_space *mapping = swap_file->f_mapping; 150462c230bcSMel Gorman struct inode *inode = mapping->host; 15051da177e4SLinus Torvalds int ret; 15061da177e4SLinus Torvalds 15071da177e4SLinus Torvalds if (S_ISBLK(inode->i_mode)) { 15081da177e4SLinus Torvalds ret = add_swap_extent(sis, 0, sis->max, 0); 150953092a74SHugh Dickins *span = sis->pages; 1510a509bc1aSMel Gorman return ret; 15111da177e4SLinus Torvalds } 15121da177e4SLinus Torvalds 151362c230bcSMel Gorman if (mapping->a_ops->swap_activate) { 1514a509bc1aSMel Gorman ret = mapping->a_ops->swap_activate(sis, swap_file, span); 151562c230bcSMel Gorman if (!ret) { 151662c230bcSMel Gorman sis->flags |= SWP_FILE; 151762c230bcSMel Gorman ret = add_swap_extent(sis, 0, sis->max, 0); 151862c230bcSMel Gorman *span = sis->pages; 151962c230bcSMel Gorman } 15209625a5f2SHugh Dickins return ret; 1521a509bc1aSMel Gorman } 1522a509bc1aSMel Gorman 1523a509bc1aSMel Gorman return generic_swapfile_activate(sis, swap_file, span); 15241da177e4SLinus Torvalds } 15251da177e4SLinus Torvalds 1526cf0cac0aSCesar Eduardo Barros static void _enable_swap_info(struct swap_info_struct *p, int prio, 15274f89849dSMinchan Kim unsigned char *swap_map) 152840531542SCesar Eduardo Barros { 152940531542SCesar Eduardo Barros int i, prev; 153040531542SCesar Eduardo Barros 153140531542SCesar Eduardo Barros if (prio >= 0) 153240531542SCesar Eduardo Barros p->prio = prio; 153340531542SCesar Eduardo Barros else 153440531542SCesar Eduardo Barros p->prio = --least_priority; 153540531542SCesar Eduardo Barros p->swap_map = swap_map; 153640531542SCesar Eduardo Barros p->flags |= SWP_WRITEOK; 1537ec8acf20SShaohua Li atomic_long_add(p->pages, &nr_swap_pages); 153840531542SCesar Eduardo Barros total_swap_pages += p->pages; 153940531542SCesar Eduardo Barros 154040531542SCesar Eduardo Barros /* insert swap space into swap_list: */ 154140531542SCesar Eduardo Barros prev = -1; 154240531542SCesar Eduardo Barros for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { 154340531542SCesar Eduardo Barros if (p->prio >= swap_info[i]->prio) 154440531542SCesar Eduardo Barros break; 154540531542SCesar Eduardo Barros prev = i; 154640531542SCesar Eduardo Barros } 154740531542SCesar Eduardo Barros p->next = i; 154840531542SCesar Eduardo Barros if (prev < 0) 154940531542SCesar Eduardo Barros swap_list.head = swap_list.next = p->type; 155040531542SCesar Eduardo Barros else 155140531542SCesar Eduardo Barros swap_info[prev]->next = p->type; 1552cf0cac0aSCesar Eduardo Barros } 1553cf0cac0aSCesar Eduardo Barros 1554cf0cac0aSCesar Eduardo Barros static void enable_swap_info(struct swap_info_struct *p, int prio, 1555cf0cac0aSCesar Eduardo Barros unsigned char *swap_map, 1556cf0cac0aSCesar Eduardo Barros unsigned long *frontswap_map) 1557cf0cac0aSCesar Eduardo Barros { 15584f89849dSMinchan Kim frontswap_init(p->type, frontswap_map); 1559cf0cac0aSCesar Eduardo Barros spin_lock(&swap_lock); 1560ec8acf20SShaohua Li spin_lock(&p->lock); 15614f89849dSMinchan Kim _enable_swap_info(p, prio, swap_map); 1562ec8acf20SShaohua Li spin_unlock(&p->lock); 1563cf0cac0aSCesar Eduardo Barros spin_unlock(&swap_lock); 1564cf0cac0aSCesar Eduardo Barros } 1565cf0cac0aSCesar Eduardo Barros 1566cf0cac0aSCesar Eduardo Barros static void reinsert_swap_info(struct swap_info_struct *p) 1567cf0cac0aSCesar Eduardo Barros { 1568cf0cac0aSCesar Eduardo Barros spin_lock(&swap_lock); 1569ec8acf20SShaohua Li spin_lock(&p->lock); 15704f89849dSMinchan Kim _enable_swap_info(p, p->prio, p->swap_map); 1571ec8acf20SShaohua Li spin_unlock(&p->lock); 157240531542SCesar Eduardo Barros spin_unlock(&swap_lock); 157340531542SCesar Eduardo Barros } 157440531542SCesar Eduardo Barros 1575c4ea37c2SHeiko Carstens SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 15761da177e4SLinus Torvalds { 15771da177e4SLinus Torvalds struct swap_info_struct *p = NULL; 15788d69aaeeSHugh Dickins unsigned char *swap_map; 15794f89849dSMinchan Kim unsigned long *frontswap_map; 15801da177e4SLinus Torvalds struct file *swap_file, *victim; 15811da177e4SLinus Torvalds struct address_space *mapping; 15821da177e4SLinus Torvalds struct inode *inode; 158391a27b2aSJeff Layton struct filename *pathname; 15841da177e4SLinus Torvalds int i, type, prev; 15851da177e4SLinus Torvalds int err; 15861da177e4SLinus Torvalds 15871da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 15881da177e4SLinus Torvalds return -EPERM; 15891da177e4SLinus Torvalds 1590191c5424SAl Viro BUG_ON(!current->mm); 1591191c5424SAl Viro 15921da177e4SLinus Torvalds pathname = getname(specialfile); 15931da177e4SLinus Torvalds if (IS_ERR(pathname)) 1594f58b59c1SXiaotian Feng return PTR_ERR(pathname); 15951da177e4SLinus Torvalds 1596669abf4eSJeff Layton victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); 15971da177e4SLinus Torvalds err = PTR_ERR(victim); 15981da177e4SLinus Torvalds if (IS_ERR(victim)) 15991da177e4SLinus Torvalds goto out; 16001da177e4SLinus Torvalds 16011da177e4SLinus Torvalds mapping = victim->f_mapping; 16021da177e4SLinus Torvalds prev = -1; 16035d337b91SHugh Dickins spin_lock(&swap_lock); 1604efa90a98SHugh Dickins for (type = swap_list.head; type >= 0; type = swap_info[type]->next) { 1605efa90a98SHugh Dickins p = swap_info[type]; 160622c6f8fdSHugh Dickins if (p->flags & SWP_WRITEOK) { 16071da177e4SLinus Torvalds if (p->swap_file->f_mapping == mapping) 16081da177e4SLinus Torvalds break; 16091da177e4SLinus Torvalds } 16101da177e4SLinus Torvalds prev = type; 16111da177e4SLinus Torvalds } 16121da177e4SLinus Torvalds if (type < 0) { 16131da177e4SLinus Torvalds err = -EINVAL; 16145d337b91SHugh Dickins spin_unlock(&swap_lock); 16151da177e4SLinus Torvalds goto out_dput; 16161da177e4SLinus Torvalds } 1617191c5424SAl Viro if (!security_vm_enough_memory_mm(current->mm, p->pages)) 16181da177e4SLinus Torvalds vm_unacct_memory(p->pages); 16191da177e4SLinus Torvalds else { 16201da177e4SLinus Torvalds err = -ENOMEM; 16215d337b91SHugh Dickins spin_unlock(&swap_lock); 16221da177e4SLinus Torvalds goto out_dput; 16231da177e4SLinus Torvalds } 1624efa90a98SHugh Dickins if (prev < 0) 16251da177e4SLinus Torvalds swap_list.head = p->next; 1626efa90a98SHugh Dickins else 1627efa90a98SHugh Dickins swap_info[prev]->next = p->next; 16281da177e4SLinus Torvalds if (type == swap_list.next) { 16291da177e4SLinus Torvalds /* just pick something that's safe... */ 16301da177e4SLinus Torvalds swap_list.next = swap_list.head; 16311da177e4SLinus Torvalds } 1632ec8acf20SShaohua Li spin_lock(&p->lock); 163378ecba08SHugh Dickins if (p->prio < 0) { 1634efa90a98SHugh Dickins for (i = p->next; i >= 0; i = swap_info[i]->next) 1635efa90a98SHugh Dickins swap_info[i]->prio = p->prio--; 163678ecba08SHugh Dickins least_priority++; 163778ecba08SHugh Dickins } 1638ec8acf20SShaohua Li atomic_long_sub(p->pages, &nr_swap_pages); 16391da177e4SLinus Torvalds total_swap_pages -= p->pages; 16401da177e4SLinus Torvalds p->flags &= ~SWP_WRITEOK; 1641ec8acf20SShaohua Li spin_unlock(&p->lock); 16425d337b91SHugh Dickins spin_unlock(&swap_lock); 1643fb4f88dcSHugh Dickins 1644e1e12d2fSDavid Rientjes set_current_oom_origin(); 164538b5faf4SDan Magenheimer err = try_to_unuse(type, false, 0); /* force all pages to be unused */ 1646e1e12d2fSDavid Rientjes clear_current_oom_origin(); 16471da177e4SLinus Torvalds 16481da177e4SLinus Torvalds if (err) { 16491da177e4SLinus Torvalds /* re-insert swap space back into swap_list */ 1650cf0cac0aSCesar Eduardo Barros reinsert_swap_info(p); 16511da177e4SLinus Torvalds goto out_dput; 16521da177e4SLinus Torvalds } 165352b7efdbSHugh Dickins 16544cd3bb10SHugh Dickins destroy_swap_extents(p); 1655570a335bSHugh Dickins if (p->flags & SWP_CONTINUED) 1656570a335bSHugh Dickins free_swap_count_continuations(p); 1657570a335bSHugh Dickins 1658fc0abb14SIngo Molnar mutex_lock(&swapon_mutex); 16595d337b91SHugh Dickins spin_lock(&swap_lock); 1660ec8acf20SShaohua Li spin_lock(&p->lock); 16611da177e4SLinus Torvalds drain_mmlist(); 16625d337b91SHugh Dickins 16635d337b91SHugh Dickins /* wait for anyone still in scan_swap_map */ 16645d337b91SHugh Dickins p->highest_bit = 0; /* cuts scans short */ 16655d337b91SHugh Dickins while (p->flags >= SWP_SCANNING) { 1666ec8acf20SShaohua Li spin_unlock(&p->lock); 16675d337b91SHugh Dickins spin_unlock(&swap_lock); 166813e4b57fSNishanth Aravamudan schedule_timeout_uninterruptible(1); 16695d337b91SHugh Dickins spin_lock(&swap_lock); 1670ec8acf20SShaohua Li spin_lock(&p->lock); 16715d337b91SHugh Dickins } 16725d337b91SHugh Dickins 16731da177e4SLinus Torvalds swap_file = p->swap_file; 16741da177e4SLinus Torvalds p->swap_file = NULL; 16751da177e4SLinus Torvalds p->max = 0; 16761da177e4SLinus Torvalds swap_map = p->swap_map; 16771da177e4SLinus Torvalds p->swap_map = NULL; 16781da177e4SLinus Torvalds p->flags = 0; 16794f89849dSMinchan Kim frontswap_map = frontswap_map_get(p); 16804f89849dSMinchan Kim frontswap_map_set(p, NULL); 1681ec8acf20SShaohua Li spin_unlock(&p->lock); 16825d337b91SHugh Dickins spin_unlock(&swap_lock); 16834f89849dSMinchan Kim frontswap_invalidate_area(type); 1684fc0abb14SIngo Molnar mutex_unlock(&swapon_mutex); 16851da177e4SLinus Torvalds vfree(swap_map); 16864f89849dSMinchan Kim vfree(frontswap_map); 168727a7faa0SKAMEZAWA Hiroyuki /* Destroy swap account informatin */ 168827a7faa0SKAMEZAWA Hiroyuki swap_cgroup_swapoff(type); 168927a7faa0SKAMEZAWA Hiroyuki 16901da177e4SLinus Torvalds inode = mapping->host; 16911da177e4SLinus Torvalds if (S_ISBLK(inode->i_mode)) { 16921da177e4SLinus Torvalds struct block_device *bdev = I_BDEV(inode); 16931da177e4SLinus Torvalds set_blocksize(bdev, p->old_block_size); 1694e525fd89STejun Heo blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 16951da177e4SLinus Torvalds } else { 16961b1dcc1bSJes Sorensen mutex_lock(&inode->i_mutex); 16971da177e4SLinus Torvalds inode->i_flags &= ~S_SWAPFILE; 16981b1dcc1bSJes Sorensen mutex_unlock(&inode->i_mutex); 16991da177e4SLinus Torvalds } 17001da177e4SLinus Torvalds filp_close(swap_file, NULL); 17011da177e4SLinus Torvalds err = 0; 170266d7dd51SKay Sievers atomic_inc(&proc_poll_event); 170366d7dd51SKay Sievers wake_up_interruptible(&proc_poll_wait); 17041da177e4SLinus Torvalds 17051da177e4SLinus Torvalds out_dput: 17061da177e4SLinus Torvalds filp_close(victim, NULL); 17071da177e4SLinus Torvalds out: 1708f58b59c1SXiaotian Feng putname(pathname); 17091da177e4SLinus Torvalds return err; 17101da177e4SLinus Torvalds } 17111da177e4SLinus Torvalds 17121da177e4SLinus Torvalds #ifdef CONFIG_PROC_FS 171366d7dd51SKay Sievers static unsigned swaps_poll(struct file *file, poll_table *wait) 171466d7dd51SKay Sievers { 1715f1514638SKay Sievers struct seq_file *seq = file->private_data; 171666d7dd51SKay Sievers 171766d7dd51SKay Sievers poll_wait(file, &proc_poll_wait, wait); 171866d7dd51SKay Sievers 1719f1514638SKay Sievers if (seq->poll_event != atomic_read(&proc_poll_event)) { 1720f1514638SKay Sievers seq->poll_event = atomic_read(&proc_poll_event); 172166d7dd51SKay Sievers return POLLIN | POLLRDNORM | POLLERR | POLLPRI; 172266d7dd51SKay Sievers } 172366d7dd51SKay Sievers 172466d7dd51SKay Sievers return POLLIN | POLLRDNORM; 172566d7dd51SKay Sievers } 172666d7dd51SKay Sievers 17271da177e4SLinus Torvalds /* iterator */ 17281da177e4SLinus Torvalds static void *swap_start(struct seq_file *swap, loff_t *pos) 17291da177e4SLinus Torvalds { 1730efa90a98SHugh Dickins struct swap_info_struct *si; 1731efa90a98SHugh Dickins int type; 17321da177e4SLinus Torvalds loff_t l = *pos; 17331da177e4SLinus Torvalds 1734fc0abb14SIngo Molnar mutex_lock(&swapon_mutex); 17351da177e4SLinus Torvalds 1736881e4aabSSuleiman Souhlal if (!l) 1737881e4aabSSuleiman Souhlal return SEQ_START_TOKEN; 1738881e4aabSSuleiman Souhlal 1739efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 1740efa90a98SHugh Dickins smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 1741efa90a98SHugh Dickins si = swap_info[type]; 1742efa90a98SHugh Dickins if (!(si->flags & SWP_USED) || !si->swap_map) 17431da177e4SLinus Torvalds continue; 1744881e4aabSSuleiman Souhlal if (!--l) 1745efa90a98SHugh Dickins return si; 17461da177e4SLinus Torvalds } 17471da177e4SLinus Torvalds 17481da177e4SLinus Torvalds return NULL; 17491da177e4SLinus Torvalds } 17501da177e4SLinus Torvalds 17511da177e4SLinus Torvalds static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 17521da177e4SLinus Torvalds { 1753efa90a98SHugh Dickins struct swap_info_struct *si = v; 1754efa90a98SHugh Dickins int type; 17551da177e4SLinus Torvalds 1756881e4aabSSuleiman Souhlal if (v == SEQ_START_TOKEN) 1757efa90a98SHugh Dickins type = 0; 1758efa90a98SHugh Dickins else 1759efa90a98SHugh Dickins type = si->type + 1; 1760881e4aabSSuleiman Souhlal 1761efa90a98SHugh Dickins for (; type < nr_swapfiles; type++) { 1762efa90a98SHugh Dickins smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 1763efa90a98SHugh Dickins si = swap_info[type]; 1764efa90a98SHugh Dickins if (!(si->flags & SWP_USED) || !si->swap_map) 17651da177e4SLinus Torvalds continue; 17661da177e4SLinus Torvalds ++*pos; 1767efa90a98SHugh Dickins return si; 17681da177e4SLinus Torvalds } 17691da177e4SLinus Torvalds 17701da177e4SLinus Torvalds return NULL; 17711da177e4SLinus Torvalds } 17721da177e4SLinus Torvalds 17731da177e4SLinus Torvalds static void swap_stop(struct seq_file *swap, void *v) 17741da177e4SLinus Torvalds { 1775fc0abb14SIngo Molnar mutex_unlock(&swapon_mutex); 17761da177e4SLinus Torvalds } 17771da177e4SLinus Torvalds 17781da177e4SLinus Torvalds static int swap_show(struct seq_file *swap, void *v) 17791da177e4SLinus Torvalds { 1780efa90a98SHugh Dickins struct swap_info_struct *si = v; 17811da177e4SLinus Torvalds struct file *file; 17821da177e4SLinus Torvalds int len; 17831da177e4SLinus Torvalds 1784efa90a98SHugh Dickins if (si == SEQ_START_TOKEN) { 17851da177e4SLinus Torvalds seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 1786881e4aabSSuleiman Souhlal return 0; 1787881e4aabSSuleiman Souhlal } 17881da177e4SLinus Torvalds 1789efa90a98SHugh Dickins file = si->swap_file; 1790c32c2f63SJan Blunck len = seq_path(swap, &file->f_path, " \t\n\\"); 17916eb396dcSHugh Dickins seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 17921da177e4SLinus Torvalds len < 40 ? 40 - len : 1, " ", 1793496ad9aaSAl Viro S_ISBLK(file_inode(file)->i_mode) ? 17941da177e4SLinus Torvalds "partition" : "file\t", 1795efa90a98SHugh Dickins si->pages << (PAGE_SHIFT - 10), 1796efa90a98SHugh Dickins si->inuse_pages << (PAGE_SHIFT - 10), 1797efa90a98SHugh Dickins si->prio); 17981da177e4SLinus Torvalds return 0; 17991da177e4SLinus Torvalds } 18001da177e4SLinus Torvalds 180115ad7cdcSHelge Deller static const struct seq_operations swaps_op = { 18021da177e4SLinus Torvalds .start = swap_start, 18031da177e4SLinus Torvalds .next = swap_next, 18041da177e4SLinus Torvalds .stop = swap_stop, 18051da177e4SLinus Torvalds .show = swap_show 18061da177e4SLinus Torvalds }; 18071da177e4SLinus Torvalds 18081da177e4SLinus Torvalds static int swaps_open(struct inode *inode, struct file *file) 18091da177e4SLinus Torvalds { 1810f1514638SKay Sievers struct seq_file *seq; 181166d7dd51SKay Sievers int ret; 181266d7dd51SKay Sievers 181366d7dd51SKay Sievers ret = seq_open(file, &swaps_op); 1814f1514638SKay Sievers if (ret) 181566d7dd51SKay Sievers return ret; 181666d7dd51SKay Sievers 1817f1514638SKay Sievers seq = file->private_data; 1818f1514638SKay Sievers seq->poll_event = atomic_read(&proc_poll_event); 1819f1514638SKay Sievers return 0; 18201da177e4SLinus Torvalds } 18211da177e4SLinus Torvalds 182215ad7cdcSHelge Deller static const struct file_operations proc_swaps_operations = { 18231da177e4SLinus Torvalds .open = swaps_open, 18241da177e4SLinus Torvalds .read = seq_read, 18251da177e4SLinus Torvalds .llseek = seq_lseek, 18261da177e4SLinus Torvalds .release = seq_release, 182766d7dd51SKay Sievers .poll = swaps_poll, 18281da177e4SLinus Torvalds }; 18291da177e4SLinus Torvalds 18301da177e4SLinus Torvalds static int __init procswaps_init(void) 18311da177e4SLinus Torvalds { 18323d71f86fSDenis V. Lunev proc_create("swaps", 0, NULL, &proc_swaps_operations); 18331da177e4SLinus Torvalds return 0; 18341da177e4SLinus Torvalds } 18351da177e4SLinus Torvalds __initcall(procswaps_init); 18361da177e4SLinus Torvalds #endif /* CONFIG_PROC_FS */ 18371da177e4SLinus Torvalds 18381796316aSJan Beulich #ifdef MAX_SWAPFILES_CHECK 18391796316aSJan Beulich static int __init max_swapfiles_check(void) 18401796316aSJan Beulich { 18411796316aSJan Beulich MAX_SWAPFILES_CHECK(); 18421796316aSJan Beulich return 0; 18431796316aSJan Beulich } 18441796316aSJan Beulich late_initcall(max_swapfiles_check); 18451796316aSJan Beulich #endif 18461796316aSJan Beulich 184753cbb243SCesar Eduardo Barros static struct swap_info_struct *alloc_swap_info(void) 18481da177e4SLinus Torvalds { 18491da177e4SLinus Torvalds struct swap_info_struct *p; 18501da177e4SLinus Torvalds unsigned int type; 1851efa90a98SHugh Dickins 1852efa90a98SHugh Dickins p = kzalloc(sizeof(*p), GFP_KERNEL); 1853efa90a98SHugh Dickins if (!p) 185453cbb243SCesar Eduardo Barros return ERR_PTR(-ENOMEM); 1855efa90a98SHugh Dickins 18565d337b91SHugh Dickins spin_lock(&swap_lock); 1857efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 1858efa90a98SHugh Dickins if (!(swap_info[type]->flags & SWP_USED)) 18591da177e4SLinus Torvalds break; 1860efa90a98SHugh Dickins } 18610697212aSChristoph Lameter if (type >= MAX_SWAPFILES) { 18625d337b91SHugh Dickins spin_unlock(&swap_lock); 1863efa90a98SHugh Dickins kfree(p); 1864730c0581SCesar Eduardo Barros return ERR_PTR(-EPERM); 18651da177e4SLinus Torvalds } 1866efa90a98SHugh Dickins if (type >= nr_swapfiles) { 1867efa90a98SHugh Dickins p->type = type; 1868efa90a98SHugh Dickins swap_info[type] = p; 1869efa90a98SHugh Dickins /* 1870efa90a98SHugh Dickins * Write swap_info[type] before nr_swapfiles, in case a 1871efa90a98SHugh Dickins * racing procfs swap_start() or swap_next() is reading them. 1872efa90a98SHugh Dickins * (We never shrink nr_swapfiles, we never free this entry.) 1873efa90a98SHugh Dickins */ 1874efa90a98SHugh Dickins smp_wmb(); 1875efa90a98SHugh Dickins nr_swapfiles++; 1876efa90a98SHugh Dickins } else { 1877efa90a98SHugh Dickins kfree(p); 1878efa90a98SHugh Dickins p = swap_info[type]; 1879efa90a98SHugh Dickins /* 1880efa90a98SHugh Dickins * Do not memset this entry: a racing procfs swap_next() 1881efa90a98SHugh Dickins * would be relying on p->type to remain valid. 1882efa90a98SHugh Dickins */ 1883efa90a98SHugh Dickins } 18849625a5f2SHugh Dickins INIT_LIST_HEAD(&p->first_swap_extent.list); 18851da177e4SLinus Torvalds p->flags = SWP_USED; 18861da177e4SLinus Torvalds p->next = -1; 18875d337b91SHugh Dickins spin_unlock(&swap_lock); 1888ec8acf20SShaohua Li spin_lock_init(&p->lock); 1889efa90a98SHugh Dickins 189053cbb243SCesar Eduardo Barros return p; 189153cbb243SCesar Eduardo Barros } 189253cbb243SCesar Eduardo Barros 18934d0e1e10SCesar Eduardo Barros static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) 18944d0e1e10SCesar Eduardo Barros { 18954d0e1e10SCesar Eduardo Barros int error; 18964d0e1e10SCesar Eduardo Barros 18974d0e1e10SCesar Eduardo Barros if (S_ISBLK(inode->i_mode)) { 18984d0e1e10SCesar Eduardo Barros p->bdev = bdgrab(I_BDEV(inode)); 18994d0e1e10SCesar Eduardo Barros error = blkdev_get(p->bdev, 19004d0e1e10SCesar Eduardo Barros FMODE_READ | FMODE_WRITE | FMODE_EXCL, 19014d0e1e10SCesar Eduardo Barros sys_swapon); 19024d0e1e10SCesar Eduardo Barros if (error < 0) { 19034d0e1e10SCesar Eduardo Barros p->bdev = NULL; 190487ade72aSCesar Eduardo Barros return -EINVAL; 19054d0e1e10SCesar Eduardo Barros } 19064d0e1e10SCesar Eduardo Barros p->old_block_size = block_size(p->bdev); 19074d0e1e10SCesar Eduardo Barros error = set_blocksize(p->bdev, PAGE_SIZE); 19084d0e1e10SCesar Eduardo Barros if (error < 0) 190987ade72aSCesar Eduardo Barros return error; 19104d0e1e10SCesar Eduardo Barros p->flags |= SWP_BLKDEV; 19114d0e1e10SCesar Eduardo Barros } else if (S_ISREG(inode->i_mode)) { 19124d0e1e10SCesar Eduardo Barros p->bdev = inode->i_sb->s_bdev; 19134d0e1e10SCesar Eduardo Barros mutex_lock(&inode->i_mutex); 191487ade72aSCesar Eduardo Barros if (IS_SWAPFILE(inode)) 191587ade72aSCesar Eduardo Barros return -EBUSY; 191687ade72aSCesar Eduardo Barros } else 191787ade72aSCesar Eduardo Barros return -EINVAL; 19184d0e1e10SCesar Eduardo Barros 19194d0e1e10SCesar Eduardo Barros return 0; 19204d0e1e10SCesar Eduardo Barros } 19214d0e1e10SCesar Eduardo Barros 1922ca8bd38bSCesar Eduardo Barros static unsigned long read_swap_header(struct swap_info_struct *p, 1923ca8bd38bSCesar Eduardo Barros union swap_header *swap_header, 1924ca8bd38bSCesar Eduardo Barros struct inode *inode) 1925ca8bd38bSCesar Eduardo Barros { 1926ca8bd38bSCesar Eduardo Barros int i; 1927ca8bd38bSCesar Eduardo Barros unsigned long maxpages; 1928ca8bd38bSCesar Eduardo Barros unsigned long swapfilepages; 1929ca8bd38bSCesar Eduardo Barros 1930ca8bd38bSCesar Eduardo Barros if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 1931ca8bd38bSCesar Eduardo Barros printk(KERN_ERR "Unable to find swap-space signature\n"); 193238719025SCesar Eduardo Barros return 0; 1933ca8bd38bSCesar Eduardo Barros } 1934ca8bd38bSCesar Eduardo Barros 1935ca8bd38bSCesar Eduardo Barros /* swap partition endianess hack... */ 1936ca8bd38bSCesar Eduardo Barros if (swab32(swap_header->info.version) == 1) { 1937ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.version); 1938ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.last_page); 1939ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.nr_badpages); 1940ca8bd38bSCesar Eduardo Barros for (i = 0; i < swap_header->info.nr_badpages; i++) 1941ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.badpages[i]); 1942ca8bd38bSCesar Eduardo Barros } 1943ca8bd38bSCesar Eduardo Barros /* Check the swap header's sub-version */ 1944ca8bd38bSCesar Eduardo Barros if (swap_header->info.version != 1) { 1945ca8bd38bSCesar Eduardo Barros printk(KERN_WARNING 1946ca8bd38bSCesar Eduardo Barros "Unable to handle swap header version %d\n", 1947ca8bd38bSCesar Eduardo Barros swap_header->info.version); 194838719025SCesar Eduardo Barros return 0; 1949ca8bd38bSCesar Eduardo Barros } 1950ca8bd38bSCesar Eduardo Barros 1951ca8bd38bSCesar Eduardo Barros p->lowest_bit = 1; 1952ca8bd38bSCesar Eduardo Barros p->cluster_next = 1; 1953ca8bd38bSCesar Eduardo Barros p->cluster_nr = 0; 1954ca8bd38bSCesar Eduardo Barros 1955ca8bd38bSCesar Eduardo Barros /* 1956ca8bd38bSCesar Eduardo Barros * Find out how many pages are allowed for a single swap 19579b15b817SHugh Dickins * device. There are two limiting factors: 1) the number 1958a2c16d6cSHugh Dickins * of bits for the swap offset in the swp_entry_t type, and 1959a2c16d6cSHugh Dickins * 2) the number of bits in the swap pte as defined by the 19609b15b817SHugh Dickins * different architectures. In order to find the 1961a2c16d6cSHugh Dickins * largest possible bit mask, a swap entry with swap type 0 1962ca8bd38bSCesar Eduardo Barros * and swap offset ~0UL is created, encoded to a swap pte, 1963a2c16d6cSHugh Dickins * decoded to a swp_entry_t again, and finally the swap 1964ca8bd38bSCesar Eduardo Barros * offset is extracted. This will mask all the bits from 1965ca8bd38bSCesar Eduardo Barros * the initial ~0UL mask that can't be encoded in either 1966ca8bd38bSCesar Eduardo Barros * the swp_entry_t or the architecture definition of a 19679b15b817SHugh Dickins * swap pte. 1968ca8bd38bSCesar Eduardo Barros */ 1969ca8bd38bSCesar Eduardo Barros maxpages = swp_offset(pte_to_swp_entry( 19709b15b817SHugh Dickins swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; 1971ca8bd38bSCesar Eduardo Barros if (maxpages > swap_header->info.last_page) { 1972ca8bd38bSCesar Eduardo Barros maxpages = swap_header->info.last_page + 1; 1973ca8bd38bSCesar Eduardo Barros /* p->max is an unsigned int: don't overflow it */ 1974ca8bd38bSCesar Eduardo Barros if ((unsigned int)maxpages == 0) 1975ca8bd38bSCesar Eduardo Barros maxpages = UINT_MAX; 1976ca8bd38bSCesar Eduardo Barros } 1977ca8bd38bSCesar Eduardo Barros p->highest_bit = maxpages - 1; 1978ca8bd38bSCesar Eduardo Barros 1979ca8bd38bSCesar Eduardo Barros if (!maxpages) 198038719025SCesar Eduardo Barros return 0; 1981ca8bd38bSCesar Eduardo Barros swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 1982ca8bd38bSCesar Eduardo Barros if (swapfilepages && maxpages > swapfilepages) { 1983ca8bd38bSCesar Eduardo Barros printk(KERN_WARNING 1984ca8bd38bSCesar Eduardo Barros "Swap area shorter than signature indicates\n"); 198538719025SCesar Eduardo Barros return 0; 1986ca8bd38bSCesar Eduardo Barros } 1987ca8bd38bSCesar Eduardo Barros if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 198838719025SCesar Eduardo Barros return 0; 1989ca8bd38bSCesar Eduardo Barros if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 199038719025SCesar Eduardo Barros return 0; 1991ca8bd38bSCesar Eduardo Barros 1992ca8bd38bSCesar Eduardo Barros return maxpages; 1993ca8bd38bSCesar Eduardo Barros } 1994ca8bd38bSCesar Eduardo Barros 1995915d4d7bSCesar Eduardo Barros static int setup_swap_map_and_extents(struct swap_info_struct *p, 1996915d4d7bSCesar Eduardo Barros union swap_header *swap_header, 1997915d4d7bSCesar Eduardo Barros unsigned char *swap_map, 1998915d4d7bSCesar Eduardo Barros unsigned long maxpages, 1999915d4d7bSCesar Eduardo Barros sector_t *span) 2000915d4d7bSCesar Eduardo Barros { 2001915d4d7bSCesar Eduardo Barros int i; 2002915d4d7bSCesar Eduardo Barros unsigned int nr_good_pages; 2003915d4d7bSCesar Eduardo Barros int nr_extents; 2004915d4d7bSCesar Eduardo Barros 2005915d4d7bSCesar Eduardo Barros nr_good_pages = maxpages - 1; /* omit header page */ 2006915d4d7bSCesar Eduardo Barros 2007915d4d7bSCesar Eduardo Barros for (i = 0; i < swap_header->info.nr_badpages; i++) { 2008915d4d7bSCesar Eduardo Barros unsigned int page_nr = swap_header->info.badpages[i]; 2009bdb8e3f6SCesar Eduardo Barros if (page_nr == 0 || page_nr > swap_header->info.last_page) 2010bdb8e3f6SCesar Eduardo Barros return -EINVAL; 2011915d4d7bSCesar Eduardo Barros if (page_nr < maxpages) { 2012915d4d7bSCesar Eduardo Barros swap_map[page_nr] = SWAP_MAP_BAD; 2013915d4d7bSCesar Eduardo Barros nr_good_pages--; 2014915d4d7bSCesar Eduardo Barros } 2015915d4d7bSCesar Eduardo Barros } 2016915d4d7bSCesar Eduardo Barros 2017915d4d7bSCesar Eduardo Barros if (nr_good_pages) { 2018915d4d7bSCesar Eduardo Barros swap_map[0] = SWAP_MAP_BAD; 2019915d4d7bSCesar Eduardo Barros p->max = maxpages; 2020915d4d7bSCesar Eduardo Barros p->pages = nr_good_pages; 2021915d4d7bSCesar Eduardo Barros nr_extents = setup_swap_extents(p, span); 2022bdb8e3f6SCesar Eduardo Barros if (nr_extents < 0) 2023bdb8e3f6SCesar Eduardo Barros return nr_extents; 2024915d4d7bSCesar Eduardo Barros nr_good_pages = p->pages; 2025915d4d7bSCesar Eduardo Barros } 2026915d4d7bSCesar Eduardo Barros if (!nr_good_pages) { 2027915d4d7bSCesar Eduardo Barros printk(KERN_WARNING "Empty swap-file\n"); 2028bdb8e3f6SCesar Eduardo Barros return -EINVAL; 2029915d4d7bSCesar Eduardo Barros } 2030915d4d7bSCesar Eduardo Barros 2031915d4d7bSCesar Eduardo Barros return nr_extents; 2032915d4d7bSCesar Eduardo Barros } 2033915d4d7bSCesar Eduardo Barros 2034dcf6b7ddSRafael Aquini /* 2035dcf6b7ddSRafael Aquini * Helper to sys_swapon determining if a given swap 2036dcf6b7ddSRafael Aquini * backing device queue supports DISCARD operations. 2037dcf6b7ddSRafael Aquini */ 2038dcf6b7ddSRafael Aquini static bool swap_discardable(struct swap_info_struct *si) 2039dcf6b7ddSRafael Aquini { 2040dcf6b7ddSRafael Aquini struct request_queue *q = bdev_get_queue(si->bdev); 2041dcf6b7ddSRafael Aquini 2042dcf6b7ddSRafael Aquini if (!q || !blk_queue_discard(q)) 2043dcf6b7ddSRafael Aquini return false; 2044dcf6b7ddSRafael Aquini 2045dcf6b7ddSRafael Aquini return true; 2046dcf6b7ddSRafael Aquini } 2047dcf6b7ddSRafael Aquini 204853cbb243SCesar Eduardo Barros SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 204953cbb243SCesar Eduardo Barros { 205053cbb243SCesar Eduardo Barros struct swap_info_struct *p; 205191a27b2aSJeff Layton struct filename *name; 205253cbb243SCesar Eduardo Barros struct file *swap_file = NULL; 205353cbb243SCesar Eduardo Barros struct address_space *mapping; 205440531542SCesar Eduardo Barros int i; 205540531542SCesar Eduardo Barros int prio; 205653cbb243SCesar Eduardo Barros int error; 205753cbb243SCesar Eduardo Barros union swap_header *swap_header; 2058915d4d7bSCesar Eduardo Barros int nr_extents; 205953cbb243SCesar Eduardo Barros sector_t span; 206053cbb243SCesar Eduardo Barros unsigned long maxpages; 206153cbb243SCesar Eduardo Barros unsigned char *swap_map = NULL; 206238b5faf4SDan Magenheimer unsigned long *frontswap_map = NULL; 206353cbb243SCesar Eduardo Barros struct page *page = NULL; 206453cbb243SCesar Eduardo Barros struct inode *inode = NULL; 206553cbb243SCesar Eduardo Barros 2066d15cab97SHugh Dickins if (swap_flags & ~SWAP_FLAGS_VALID) 2067d15cab97SHugh Dickins return -EINVAL; 2068d15cab97SHugh Dickins 206953cbb243SCesar Eduardo Barros if (!capable(CAP_SYS_ADMIN)) 207053cbb243SCesar Eduardo Barros return -EPERM; 207153cbb243SCesar Eduardo Barros 207253cbb243SCesar Eduardo Barros p = alloc_swap_info(); 20732542e513SCesar Eduardo Barros if (IS_ERR(p)) 20742542e513SCesar Eduardo Barros return PTR_ERR(p); 207553cbb243SCesar Eduardo Barros 20761da177e4SLinus Torvalds name = getname(specialfile); 20771da177e4SLinus Torvalds if (IS_ERR(name)) { 20787de7fb6bSCesar Eduardo Barros error = PTR_ERR(name); 20791da177e4SLinus Torvalds name = NULL; 2080bd69010bSCesar Eduardo Barros goto bad_swap; 20811da177e4SLinus Torvalds } 2082669abf4eSJeff Layton swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); 20831da177e4SLinus Torvalds if (IS_ERR(swap_file)) { 20847de7fb6bSCesar Eduardo Barros error = PTR_ERR(swap_file); 20851da177e4SLinus Torvalds swap_file = NULL; 2086bd69010bSCesar Eduardo Barros goto bad_swap; 20871da177e4SLinus Torvalds } 20881da177e4SLinus Torvalds 20891da177e4SLinus Torvalds p->swap_file = swap_file; 20901da177e4SLinus Torvalds mapping = swap_file->f_mapping; 20911da177e4SLinus Torvalds 20921da177e4SLinus Torvalds for (i = 0; i < nr_swapfiles; i++) { 2093efa90a98SHugh Dickins struct swap_info_struct *q = swap_info[i]; 20941da177e4SLinus Torvalds 2095e8e6c2ecSCesar Eduardo Barros if (q == p || !q->swap_file) 20961da177e4SLinus Torvalds continue; 20977de7fb6bSCesar Eduardo Barros if (mapping == q->swap_file->f_mapping) { 20987de7fb6bSCesar Eduardo Barros error = -EBUSY; 20991da177e4SLinus Torvalds goto bad_swap; 21001da177e4SLinus Torvalds } 21017de7fb6bSCesar Eduardo Barros } 21021da177e4SLinus Torvalds 21032130781eSCesar Eduardo Barros inode = mapping->host; 21042130781eSCesar Eduardo Barros /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */ 21054d0e1e10SCesar Eduardo Barros error = claim_swapfile(p, inode); 21064d0e1e10SCesar Eduardo Barros if (unlikely(error)) 21071da177e4SLinus Torvalds goto bad_swap; 21081da177e4SLinus Torvalds 21091da177e4SLinus Torvalds /* 21101da177e4SLinus Torvalds * Read the swap header. 21111da177e4SLinus Torvalds */ 21121da177e4SLinus Torvalds if (!mapping->a_ops->readpage) { 21131da177e4SLinus Torvalds error = -EINVAL; 21141da177e4SLinus Torvalds goto bad_swap; 21151da177e4SLinus Torvalds } 2116090d2b18SPekka Enberg page = read_mapping_page(mapping, 0, swap_file); 21171da177e4SLinus Torvalds if (IS_ERR(page)) { 21181da177e4SLinus Torvalds error = PTR_ERR(page); 21191da177e4SLinus Torvalds goto bad_swap; 21201da177e4SLinus Torvalds } 212181e33971SHugh Dickins swap_header = kmap(page); 21221da177e4SLinus Torvalds 2123ca8bd38bSCesar Eduardo Barros maxpages = read_swap_header(p, swap_header, inode); 2124ca8bd38bSCesar Eduardo Barros if (unlikely(!maxpages)) { 21251da177e4SLinus Torvalds error = -EINVAL; 21261da177e4SLinus Torvalds goto bad_swap; 21271da177e4SLinus Torvalds } 21281da177e4SLinus Torvalds 21291da177e4SLinus Torvalds /* OK, set up the swap map and apply the bad block list */ 2130803d0c83SCesar Eduardo Barros swap_map = vzalloc(maxpages); 213178ecba08SHugh Dickins if (!swap_map) { 21321da177e4SLinus Torvalds error = -ENOMEM; 21331da177e4SLinus Torvalds goto bad_swap; 21341da177e4SLinus Torvalds } 21351da177e4SLinus Torvalds 21361421ef3cSCesar Eduardo Barros error = swap_cgroup_swapon(p->type, maxpages); 21371421ef3cSCesar Eduardo Barros if (error) 21381421ef3cSCesar Eduardo Barros goto bad_swap; 21391421ef3cSCesar Eduardo Barros 2140915d4d7bSCesar Eduardo Barros nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 2141915d4d7bSCesar Eduardo Barros maxpages, &span); 2142915d4d7bSCesar Eduardo Barros if (unlikely(nr_extents < 0)) { 214353092a74SHugh Dickins error = nr_extents; 2144e2244ec2SHugh Dickins goto bad_swap; 214553092a74SHugh Dickins } 214638b5faf4SDan Magenheimer /* frontswap enabled? set up bit-per-page map for frontswap */ 214738b5faf4SDan Magenheimer if (frontswap_enabled) 21487b57976dSAkinobu Mita frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long)); 21491da177e4SLinus Torvalds 21503bd0f0c7SSuresh Jayaraman if (p->bdev) { 215120137a49SHugh Dickins if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 215220137a49SHugh Dickins p->flags |= SWP_SOLIDSTATE; 2153d3d30417SAkinobu Mita p->cluster_next = 1 + (prandom_u32() % p->highest_bit); 215420137a49SHugh Dickins } 2155dcf6b7ddSRafael Aquini 2156dcf6b7ddSRafael Aquini if ((swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 2157dcf6b7ddSRafael Aquini /* 2158dcf6b7ddSRafael Aquini * When discard is enabled for swap with no particular 2159dcf6b7ddSRafael Aquini * policy flagged, we set all swap discard flags here in 2160dcf6b7ddSRafael Aquini * order to sustain backward compatibility with older 2161dcf6b7ddSRafael Aquini * swapon(8) releases. 2162dcf6b7ddSRafael Aquini */ 2163dcf6b7ddSRafael Aquini p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | 2164dcf6b7ddSRafael Aquini SWP_PAGE_DISCARD); 2165dcf6b7ddSRafael Aquini 2166dcf6b7ddSRafael Aquini /* 2167dcf6b7ddSRafael Aquini * By flagging sys_swapon, a sysadmin can tell us to 2168dcf6b7ddSRafael Aquini * either do single-time area discards only, or to just 2169dcf6b7ddSRafael Aquini * perform discards for released swap page-clusters. 2170dcf6b7ddSRafael Aquini * Now it's time to adjust the p->flags accordingly. 2171dcf6b7ddSRafael Aquini */ 2172dcf6b7ddSRafael Aquini if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 2173dcf6b7ddSRafael Aquini p->flags &= ~SWP_PAGE_DISCARD; 2174dcf6b7ddSRafael Aquini else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 2175dcf6b7ddSRafael Aquini p->flags &= ~SWP_AREA_DISCARD; 2176dcf6b7ddSRafael Aquini 2177dcf6b7ddSRafael Aquini /* issue a swapon-time discard if it's still required */ 2178dcf6b7ddSRafael Aquini if (p->flags & SWP_AREA_DISCARD) { 2179dcf6b7ddSRafael Aquini int err = discard_swap(p); 2180dcf6b7ddSRafael Aquini if (unlikely(err)) 2181dcf6b7ddSRafael Aquini printk(KERN_ERR 2182dcf6b7ddSRafael Aquini "swapon: discard_swap(%p): %d\n", 2183dcf6b7ddSRafael Aquini p, err); 2184dcf6b7ddSRafael Aquini } 2185dcf6b7ddSRafael Aquini } 21863bd0f0c7SSuresh Jayaraman } 21876a6ba831SHugh Dickins 2188fc0abb14SIngo Molnar mutex_lock(&swapon_mutex); 218940531542SCesar Eduardo Barros prio = -1; 219078ecba08SHugh Dickins if (swap_flags & SWAP_FLAG_PREFER) 219140531542SCesar Eduardo Barros prio = 219278ecba08SHugh Dickins (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 219338b5faf4SDan Magenheimer enable_swap_info(p, prio, swap_map, frontswap_map); 2194c69dbfb8SCesar Eduardo Barros 2195c69dbfb8SCesar Eduardo Barros printk(KERN_INFO "Adding %uk swap on %s. " 2196dcf6b7ddSRafael Aquini "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", 219791a27b2aSJeff Layton p->pages<<(PAGE_SHIFT-10), name->name, p->prio, 2198c69dbfb8SCesar Eduardo Barros nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 2199c69dbfb8SCesar Eduardo Barros (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 220038b5faf4SDan Magenheimer (p->flags & SWP_DISCARDABLE) ? "D" : "", 2201dcf6b7ddSRafael Aquini (p->flags & SWP_AREA_DISCARD) ? "s" : "", 2202dcf6b7ddSRafael Aquini (p->flags & SWP_PAGE_DISCARD) ? "c" : "", 220338b5faf4SDan Magenheimer (frontswap_map) ? "FS" : ""); 2204c69dbfb8SCesar Eduardo Barros 2205fc0abb14SIngo Molnar mutex_unlock(&swapon_mutex); 220666d7dd51SKay Sievers atomic_inc(&proc_poll_event); 220766d7dd51SKay Sievers wake_up_interruptible(&proc_poll_wait); 220866d7dd51SKay Sievers 22099b01c350SCesar Eduardo Barros if (S_ISREG(inode->i_mode)) 22109b01c350SCesar Eduardo Barros inode->i_flags |= S_SWAPFILE; 22111da177e4SLinus Torvalds error = 0; 22121da177e4SLinus Torvalds goto out; 22131da177e4SLinus Torvalds bad_swap: 2214bd69010bSCesar Eduardo Barros if (inode && S_ISBLK(inode->i_mode) && p->bdev) { 2215f2090d2dSCesar Eduardo Barros set_blocksize(p->bdev, p->old_block_size); 2216f2090d2dSCesar Eduardo Barros blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 22171da177e4SLinus Torvalds } 22184cd3bb10SHugh Dickins destroy_swap_extents(p); 2219e8e6c2ecSCesar Eduardo Barros swap_cgroup_swapoff(p->type); 22205d337b91SHugh Dickins spin_lock(&swap_lock); 22211da177e4SLinus Torvalds p->swap_file = NULL; 22221da177e4SLinus Torvalds p->flags = 0; 22235d337b91SHugh Dickins spin_unlock(&swap_lock); 22241da177e4SLinus Torvalds vfree(swap_map); 222552c50567SMel Gorman if (swap_file) { 22262130781eSCesar Eduardo Barros if (inode && S_ISREG(inode->i_mode)) { 222752c50567SMel Gorman mutex_unlock(&inode->i_mutex); 22282130781eSCesar Eduardo Barros inode = NULL; 22292130781eSCesar Eduardo Barros } 22301da177e4SLinus Torvalds filp_close(swap_file, NULL); 223152c50567SMel Gorman } 22321da177e4SLinus Torvalds out: 22331da177e4SLinus Torvalds if (page && !IS_ERR(page)) { 22341da177e4SLinus Torvalds kunmap(page); 22351da177e4SLinus Torvalds page_cache_release(page); 22361da177e4SLinus Torvalds } 22371da177e4SLinus Torvalds if (name) 22381da177e4SLinus Torvalds putname(name); 22399b01c350SCesar Eduardo Barros if (inode && S_ISREG(inode->i_mode)) 22401b1dcc1bSJes Sorensen mutex_unlock(&inode->i_mutex); 22411da177e4SLinus Torvalds return error; 22421da177e4SLinus Torvalds } 22431da177e4SLinus Torvalds 22441da177e4SLinus Torvalds void si_swapinfo(struct sysinfo *val) 22451da177e4SLinus Torvalds { 2246efa90a98SHugh Dickins unsigned int type; 22471da177e4SLinus Torvalds unsigned long nr_to_be_unused = 0; 22481da177e4SLinus Torvalds 22495d337b91SHugh Dickins spin_lock(&swap_lock); 2250efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 2251efa90a98SHugh Dickins struct swap_info_struct *si = swap_info[type]; 2252efa90a98SHugh Dickins 2253efa90a98SHugh Dickins if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 2254efa90a98SHugh Dickins nr_to_be_unused += si->inuse_pages; 22551da177e4SLinus Torvalds } 2256ec8acf20SShaohua Li val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; 22571da177e4SLinus Torvalds val->totalswap = total_swap_pages + nr_to_be_unused; 22585d337b91SHugh Dickins spin_unlock(&swap_lock); 22591da177e4SLinus Torvalds } 22601da177e4SLinus Torvalds 22611da177e4SLinus Torvalds /* 22621da177e4SLinus Torvalds * Verify that a swap entry is valid and increment its swap map count. 22631da177e4SLinus Torvalds * 2264355cfa73SKAMEZAWA Hiroyuki * Returns error code in following case. 2265355cfa73SKAMEZAWA Hiroyuki * - success -> 0 2266355cfa73SKAMEZAWA Hiroyuki * - swp_entry is invalid -> EINVAL 2267355cfa73SKAMEZAWA Hiroyuki * - swp_entry is migration entry -> EINVAL 2268355cfa73SKAMEZAWA Hiroyuki * - swap-cache reference is requested but there is already one. -> EEXIST 2269355cfa73SKAMEZAWA Hiroyuki * - swap-cache reference is requested but the entry is not used. -> ENOENT 2270570a335bSHugh Dickins * - swap-mapped reference requested but needs continued swap count. -> ENOMEM 22711da177e4SLinus Torvalds */ 22728d69aaeeSHugh Dickins static int __swap_duplicate(swp_entry_t entry, unsigned char usage) 22731da177e4SLinus Torvalds { 22741da177e4SLinus Torvalds struct swap_info_struct *p; 22751da177e4SLinus Torvalds unsigned long offset, type; 22768d69aaeeSHugh Dickins unsigned char count; 22778d69aaeeSHugh Dickins unsigned char has_cache; 2278253d553bSHugh Dickins int err = -EINVAL; 22791da177e4SLinus Torvalds 2280a7420aa5SAndi Kleen if (non_swap_entry(entry)) 2281253d553bSHugh Dickins goto out; 22820697212aSChristoph Lameter 22831da177e4SLinus Torvalds type = swp_type(entry); 22841da177e4SLinus Torvalds if (type >= nr_swapfiles) 22851da177e4SLinus Torvalds goto bad_file; 2286efa90a98SHugh Dickins p = swap_info[type]; 22871da177e4SLinus Torvalds offset = swp_offset(entry); 22881da177e4SLinus Torvalds 2289ec8acf20SShaohua Li spin_lock(&p->lock); 2290355cfa73SKAMEZAWA Hiroyuki if (unlikely(offset >= p->max)) 2291355cfa73SKAMEZAWA Hiroyuki goto unlock_out; 2292355cfa73SKAMEZAWA Hiroyuki 2293253d553bSHugh Dickins count = p->swap_map[offset]; 2294253d553bSHugh Dickins has_cache = count & SWAP_HAS_CACHE; 2295253d553bSHugh Dickins count &= ~SWAP_HAS_CACHE; 2296253d553bSHugh Dickins err = 0; 2297355cfa73SKAMEZAWA Hiroyuki 2298253d553bSHugh Dickins if (usage == SWAP_HAS_CACHE) { 2299355cfa73SKAMEZAWA Hiroyuki 2300355cfa73SKAMEZAWA Hiroyuki /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 2301253d553bSHugh Dickins if (!has_cache && count) 2302253d553bSHugh Dickins has_cache = SWAP_HAS_CACHE; 2303253d553bSHugh Dickins else if (has_cache) /* someone else added cache */ 2304253d553bSHugh Dickins err = -EEXIST; 2305253d553bSHugh Dickins else /* no users remaining */ 2306253d553bSHugh Dickins err = -ENOENT; 2307355cfa73SKAMEZAWA Hiroyuki 2308355cfa73SKAMEZAWA Hiroyuki } else if (count || has_cache) { 2309253d553bSHugh Dickins 2310570a335bSHugh Dickins if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) 2311570a335bSHugh Dickins count += usage; 2312570a335bSHugh Dickins else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) 2313253d553bSHugh Dickins err = -EINVAL; 2314570a335bSHugh Dickins else if (swap_count_continued(p, offset, count)) 2315570a335bSHugh Dickins count = COUNT_CONTINUED; 2316570a335bSHugh Dickins else 2317570a335bSHugh Dickins err = -ENOMEM; 2318253d553bSHugh Dickins } else 2319253d553bSHugh Dickins err = -ENOENT; /* unused swap entry */ 2320253d553bSHugh Dickins 2321253d553bSHugh Dickins p->swap_map[offset] = count | has_cache; 2322253d553bSHugh Dickins 2323355cfa73SKAMEZAWA Hiroyuki unlock_out: 2324ec8acf20SShaohua Li spin_unlock(&p->lock); 23251da177e4SLinus Torvalds out: 2326253d553bSHugh Dickins return err; 23271da177e4SLinus Torvalds 23281da177e4SLinus Torvalds bad_file: 23291da177e4SLinus Torvalds printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 23301da177e4SLinus Torvalds goto out; 23311da177e4SLinus Torvalds } 2332253d553bSHugh Dickins 2333355cfa73SKAMEZAWA Hiroyuki /* 2334aaa46865SHugh Dickins * Help swapoff by noting that swap entry belongs to shmem/tmpfs 2335aaa46865SHugh Dickins * (in which case its reference count is never incremented). 2336aaa46865SHugh Dickins */ 2337aaa46865SHugh Dickins void swap_shmem_alloc(swp_entry_t entry) 2338aaa46865SHugh Dickins { 2339aaa46865SHugh Dickins __swap_duplicate(entry, SWAP_MAP_SHMEM); 2340aaa46865SHugh Dickins } 2341aaa46865SHugh Dickins 2342aaa46865SHugh Dickins /* 234308259d58SHugh Dickins * Increase reference count of swap entry by 1. 234408259d58SHugh Dickins * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required 234508259d58SHugh Dickins * but could not be atomically allocated. Returns 0, just as if it succeeded, 234608259d58SHugh Dickins * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which 234708259d58SHugh Dickins * might occur if a page table entry has got corrupted. 2348355cfa73SKAMEZAWA Hiroyuki */ 2349570a335bSHugh Dickins int swap_duplicate(swp_entry_t entry) 2350355cfa73SKAMEZAWA Hiroyuki { 2351570a335bSHugh Dickins int err = 0; 2352570a335bSHugh Dickins 2353570a335bSHugh Dickins while (!err && __swap_duplicate(entry, 1) == -ENOMEM) 2354570a335bSHugh Dickins err = add_swap_count_continuation(entry, GFP_ATOMIC); 2355570a335bSHugh Dickins return err; 2356355cfa73SKAMEZAWA Hiroyuki } 23571da177e4SLinus Torvalds 2358cb4b86baSKAMEZAWA Hiroyuki /* 2359355cfa73SKAMEZAWA Hiroyuki * @entry: swap entry for which we allocate swap cache. 2360355cfa73SKAMEZAWA Hiroyuki * 236173c34b6aSHugh Dickins * Called when allocating swap cache for existing swap entry, 2362355cfa73SKAMEZAWA Hiroyuki * This can return error codes. Returns 0 at success. 2363355cfa73SKAMEZAWA Hiroyuki * -EBUSY means there is a swap cache. 2364355cfa73SKAMEZAWA Hiroyuki * Note: return code is different from swap_duplicate(). 2365cb4b86baSKAMEZAWA Hiroyuki */ 2366cb4b86baSKAMEZAWA Hiroyuki int swapcache_prepare(swp_entry_t entry) 2367cb4b86baSKAMEZAWA Hiroyuki { 2368253d553bSHugh Dickins return __swap_duplicate(entry, SWAP_HAS_CACHE); 2369cb4b86baSKAMEZAWA Hiroyuki } 2370cb4b86baSKAMEZAWA Hiroyuki 2371f981c595SMel Gorman struct swap_info_struct *page_swap_info(struct page *page) 2372f981c595SMel Gorman { 2373f981c595SMel Gorman swp_entry_t swap = { .val = page_private(page) }; 2374f981c595SMel Gorman BUG_ON(!PageSwapCache(page)); 2375f981c595SMel Gorman return swap_info[swp_type(swap)]; 2376f981c595SMel Gorman } 2377f981c595SMel Gorman 2378f981c595SMel Gorman /* 2379f981c595SMel Gorman * out-of-line __page_file_ methods to avoid include hell. 2380f981c595SMel Gorman */ 2381f981c595SMel Gorman struct address_space *__page_file_mapping(struct page *page) 2382f981c595SMel Gorman { 2383f981c595SMel Gorman VM_BUG_ON(!PageSwapCache(page)); 2384f981c595SMel Gorman return page_swap_info(page)->swap_file->f_mapping; 2385f981c595SMel Gorman } 2386f981c595SMel Gorman EXPORT_SYMBOL_GPL(__page_file_mapping); 2387f981c595SMel Gorman 2388f981c595SMel Gorman pgoff_t __page_file_index(struct page *page) 2389f981c595SMel Gorman { 2390f981c595SMel Gorman swp_entry_t swap = { .val = page_private(page) }; 2391f981c595SMel Gorman VM_BUG_ON(!PageSwapCache(page)); 2392f981c595SMel Gorman return swp_offset(swap); 2393f981c595SMel Gorman } 2394f981c595SMel Gorman EXPORT_SYMBOL_GPL(__page_file_index); 2395f981c595SMel Gorman 23961da177e4SLinus Torvalds /* 2397570a335bSHugh Dickins * add_swap_count_continuation - called when a swap count is duplicated 2398570a335bSHugh Dickins * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 2399570a335bSHugh Dickins * page of the original vmalloc'ed swap_map, to hold the continuation count 2400570a335bSHugh Dickins * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called 2401570a335bSHugh Dickins * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. 2402570a335bSHugh Dickins * 2403570a335bSHugh Dickins * These continuation pages are seldom referenced: the common paths all work 2404570a335bSHugh Dickins * on the original swap_map, only referring to a continuation page when the 2405570a335bSHugh Dickins * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. 2406570a335bSHugh Dickins * 2407570a335bSHugh Dickins * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding 2408570a335bSHugh Dickins * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) 2409570a335bSHugh Dickins * can be called after dropping locks. 2410570a335bSHugh Dickins */ 2411570a335bSHugh Dickins int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) 2412570a335bSHugh Dickins { 2413570a335bSHugh Dickins struct swap_info_struct *si; 2414570a335bSHugh Dickins struct page *head; 2415570a335bSHugh Dickins struct page *page; 2416570a335bSHugh Dickins struct page *list_page; 2417570a335bSHugh Dickins pgoff_t offset; 2418570a335bSHugh Dickins unsigned char count; 2419570a335bSHugh Dickins 2420570a335bSHugh Dickins /* 2421570a335bSHugh Dickins * When debugging, it's easier to use __GFP_ZERO here; but it's better 2422570a335bSHugh Dickins * for latency not to zero a page while GFP_ATOMIC and holding locks. 2423570a335bSHugh Dickins */ 2424570a335bSHugh Dickins page = alloc_page(gfp_mask | __GFP_HIGHMEM); 2425570a335bSHugh Dickins 2426570a335bSHugh Dickins si = swap_info_get(entry); 2427570a335bSHugh Dickins if (!si) { 2428570a335bSHugh Dickins /* 2429570a335bSHugh Dickins * An acceptable race has occurred since the failing 2430570a335bSHugh Dickins * __swap_duplicate(): the swap entry has been freed, 2431570a335bSHugh Dickins * perhaps even the whole swap_map cleared for swapoff. 2432570a335bSHugh Dickins */ 2433570a335bSHugh Dickins goto outer; 2434570a335bSHugh Dickins } 2435570a335bSHugh Dickins 2436570a335bSHugh Dickins offset = swp_offset(entry); 2437570a335bSHugh Dickins count = si->swap_map[offset] & ~SWAP_HAS_CACHE; 2438570a335bSHugh Dickins 2439570a335bSHugh Dickins if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { 2440570a335bSHugh Dickins /* 2441570a335bSHugh Dickins * The higher the swap count, the more likely it is that tasks 2442570a335bSHugh Dickins * will race to add swap count continuation: we need to avoid 2443570a335bSHugh Dickins * over-provisioning. 2444570a335bSHugh Dickins */ 2445570a335bSHugh Dickins goto out; 2446570a335bSHugh Dickins } 2447570a335bSHugh Dickins 2448570a335bSHugh Dickins if (!page) { 2449ec8acf20SShaohua Li spin_unlock(&si->lock); 2450570a335bSHugh Dickins return -ENOMEM; 2451570a335bSHugh Dickins } 2452570a335bSHugh Dickins 2453570a335bSHugh Dickins /* 2454570a335bSHugh Dickins * We are fortunate that although vmalloc_to_page uses pte_offset_map, 2455570a335bSHugh Dickins * no architecture is using highmem pages for kernel pagetables: so it 2456570a335bSHugh Dickins * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps. 2457570a335bSHugh Dickins */ 2458570a335bSHugh Dickins head = vmalloc_to_page(si->swap_map + offset); 2459570a335bSHugh Dickins offset &= ~PAGE_MASK; 2460570a335bSHugh Dickins 2461570a335bSHugh Dickins /* 2462570a335bSHugh Dickins * Page allocation does not initialize the page's lru field, 2463570a335bSHugh Dickins * but it does always reset its private field. 2464570a335bSHugh Dickins */ 2465570a335bSHugh Dickins if (!page_private(head)) { 2466570a335bSHugh Dickins BUG_ON(count & COUNT_CONTINUED); 2467570a335bSHugh Dickins INIT_LIST_HEAD(&head->lru); 2468570a335bSHugh Dickins set_page_private(head, SWP_CONTINUED); 2469570a335bSHugh Dickins si->flags |= SWP_CONTINUED; 2470570a335bSHugh Dickins } 2471570a335bSHugh Dickins 2472570a335bSHugh Dickins list_for_each_entry(list_page, &head->lru, lru) { 2473570a335bSHugh Dickins unsigned char *map; 2474570a335bSHugh Dickins 2475570a335bSHugh Dickins /* 2476570a335bSHugh Dickins * If the previous map said no continuation, but we've found 2477570a335bSHugh Dickins * a continuation page, free our allocation and use this one. 2478570a335bSHugh Dickins */ 2479570a335bSHugh Dickins if (!(count & COUNT_CONTINUED)) 2480570a335bSHugh Dickins goto out; 2481570a335bSHugh Dickins 24829b04c5feSCong Wang map = kmap_atomic(list_page) + offset; 2483570a335bSHugh Dickins count = *map; 24849b04c5feSCong Wang kunmap_atomic(map); 2485570a335bSHugh Dickins 2486570a335bSHugh Dickins /* 2487570a335bSHugh Dickins * If this continuation count now has some space in it, 2488570a335bSHugh Dickins * free our allocation and use this one. 2489570a335bSHugh Dickins */ 2490570a335bSHugh Dickins if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) 2491570a335bSHugh Dickins goto out; 2492570a335bSHugh Dickins } 2493570a335bSHugh Dickins 2494570a335bSHugh Dickins list_add_tail(&page->lru, &head->lru); 2495570a335bSHugh Dickins page = NULL; /* now it's attached, don't free it */ 2496570a335bSHugh Dickins out: 2497ec8acf20SShaohua Li spin_unlock(&si->lock); 2498570a335bSHugh Dickins outer: 2499570a335bSHugh Dickins if (page) 2500570a335bSHugh Dickins __free_page(page); 2501570a335bSHugh Dickins return 0; 2502570a335bSHugh Dickins } 2503570a335bSHugh Dickins 2504570a335bSHugh Dickins /* 2505570a335bSHugh Dickins * swap_count_continued - when the original swap_map count is incremented 2506570a335bSHugh Dickins * from SWAP_MAP_MAX, check if there is already a continuation page to carry 2507570a335bSHugh Dickins * into, carry if so, or else fail until a new continuation page is allocated; 2508570a335bSHugh Dickins * when the original swap_map count is decremented from 0 with continuation, 2509570a335bSHugh Dickins * borrow from the continuation and report whether it still holds more. 2510570a335bSHugh Dickins * Called while __swap_duplicate() or swap_entry_free() holds swap_lock. 2511570a335bSHugh Dickins */ 2512570a335bSHugh Dickins static bool swap_count_continued(struct swap_info_struct *si, 2513570a335bSHugh Dickins pgoff_t offset, unsigned char count) 2514570a335bSHugh Dickins { 2515570a335bSHugh Dickins struct page *head; 2516570a335bSHugh Dickins struct page *page; 2517570a335bSHugh Dickins unsigned char *map; 2518570a335bSHugh Dickins 2519570a335bSHugh Dickins head = vmalloc_to_page(si->swap_map + offset); 2520570a335bSHugh Dickins if (page_private(head) != SWP_CONTINUED) { 2521570a335bSHugh Dickins BUG_ON(count & COUNT_CONTINUED); 2522570a335bSHugh Dickins return false; /* need to add count continuation */ 2523570a335bSHugh Dickins } 2524570a335bSHugh Dickins 2525570a335bSHugh Dickins offset &= ~PAGE_MASK; 2526570a335bSHugh Dickins page = list_entry(head->lru.next, struct page, lru); 25279b04c5feSCong Wang map = kmap_atomic(page) + offset; 2528570a335bSHugh Dickins 2529570a335bSHugh Dickins if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 2530570a335bSHugh Dickins goto init_map; /* jump over SWAP_CONT_MAX checks */ 2531570a335bSHugh Dickins 2532570a335bSHugh Dickins if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ 2533570a335bSHugh Dickins /* 2534570a335bSHugh Dickins * Think of how you add 1 to 999 2535570a335bSHugh Dickins */ 2536570a335bSHugh Dickins while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 25379b04c5feSCong Wang kunmap_atomic(map); 2538570a335bSHugh Dickins page = list_entry(page->lru.next, struct page, lru); 2539570a335bSHugh Dickins BUG_ON(page == head); 25409b04c5feSCong Wang map = kmap_atomic(page) + offset; 2541570a335bSHugh Dickins } 2542570a335bSHugh Dickins if (*map == SWAP_CONT_MAX) { 25439b04c5feSCong Wang kunmap_atomic(map); 2544570a335bSHugh Dickins page = list_entry(page->lru.next, struct page, lru); 2545570a335bSHugh Dickins if (page == head) 2546570a335bSHugh Dickins return false; /* add count continuation */ 25479b04c5feSCong Wang map = kmap_atomic(page) + offset; 2548570a335bSHugh Dickins init_map: *map = 0; /* we didn't zero the page */ 2549570a335bSHugh Dickins } 2550570a335bSHugh Dickins *map += 1; 25519b04c5feSCong Wang kunmap_atomic(map); 2552570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 2553570a335bSHugh Dickins while (page != head) { 25549b04c5feSCong Wang map = kmap_atomic(page) + offset; 2555570a335bSHugh Dickins *map = COUNT_CONTINUED; 25569b04c5feSCong Wang kunmap_atomic(map); 2557570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 2558570a335bSHugh Dickins } 2559570a335bSHugh Dickins return true; /* incremented */ 2560570a335bSHugh Dickins 2561570a335bSHugh Dickins } else { /* decrementing */ 2562570a335bSHugh Dickins /* 2563570a335bSHugh Dickins * Think of how you subtract 1 from 1000 2564570a335bSHugh Dickins */ 2565570a335bSHugh Dickins BUG_ON(count != COUNT_CONTINUED); 2566570a335bSHugh Dickins while (*map == COUNT_CONTINUED) { 25679b04c5feSCong Wang kunmap_atomic(map); 2568570a335bSHugh Dickins page = list_entry(page->lru.next, struct page, lru); 2569570a335bSHugh Dickins BUG_ON(page == head); 25709b04c5feSCong Wang map = kmap_atomic(page) + offset; 2571570a335bSHugh Dickins } 2572570a335bSHugh Dickins BUG_ON(*map == 0); 2573570a335bSHugh Dickins *map -= 1; 2574570a335bSHugh Dickins if (*map == 0) 2575570a335bSHugh Dickins count = 0; 25769b04c5feSCong Wang kunmap_atomic(map); 2577570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 2578570a335bSHugh Dickins while (page != head) { 25799b04c5feSCong Wang map = kmap_atomic(page) + offset; 2580570a335bSHugh Dickins *map = SWAP_CONT_MAX | count; 2581570a335bSHugh Dickins count = COUNT_CONTINUED; 25829b04c5feSCong Wang kunmap_atomic(map); 2583570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 2584570a335bSHugh Dickins } 2585570a335bSHugh Dickins return count == COUNT_CONTINUED; 2586570a335bSHugh Dickins } 2587570a335bSHugh Dickins } 2588570a335bSHugh Dickins 2589570a335bSHugh Dickins /* 2590570a335bSHugh Dickins * free_swap_count_continuations - swapoff free all the continuation pages 2591570a335bSHugh Dickins * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. 2592570a335bSHugh Dickins */ 2593570a335bSHugh Dickins static void free_swap_count_continuations(struct swap_info_struct *si) 2594570a335bSHugh Dickins { 2595570a335bSHugh Dickins pgoff_t offset; 2596570a335bSHugh Dickins 2597570a335bSHugh Dickins for (offset = 0; offset < si->max; offset += PAGE_SIZE) { 2598570a335bSHugh Dickins struct page *head; 2599570a335bSHugh Dickins head = vmalloc_to_page(si->swap_map + offset); 2600570a335bSHugh Dickins if (page_private(head)) { 2601570a335bSHugh Dickins struct list_head *this, *next; 2602570a335bSHugh Dickins list_for_each_safe(this, next, &head->lru) { 2603570a335bSHugh Dickins struct page *page; 2604570a335bSHugh Dickins page = list_entry(this, struct page, lru); 2605570a335bSHugh Dickins list_del(this); 2606570a335bSHugh Dickins __free_page(page); 2607570a335bSHugh Dickins } 2608570a335bSHugh Dickins } 2609570a335bSHugh Dickins } 2610570a335bSHugh Dickins } 2611