11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/swapfile.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds #include <linux/mm.h> 96e84f315SIngo Molnar #include <linux/sched/mm.h> 1029930025SIngo Molnar #include <linux/sched/task.h> 111da177e4SLinus Torvalds #include <linux/hugetlb.h> 121da177e4SLinus Torvalds #include <linux/mman.h> 131da177e4SLinus Torvalds #include <linux/slab.h> 141da177e4SLinus Torvalds #include <linux/kernel_stat.h> 151da177e4SLinus Torvalds #include <linux/swap.h> 161da177e4SLinus Torvalds #include <linux/vmalloc.h> 171da177e4SLinus Torvalds #include <linux/pagemap.h> 181da177e4SLinus Torvalds #include <linux/namei.h> 19072441e2SHugh Dickins #include <linux/shmem_fs.h> 201da177e4SLinus Torvalds #include <linux/blkdev.h> 2120137a49SHugh Dickins #include <linux/random.h> 221da177e4SLinus Torvalds #include <linux/writeback.h> 231da177e4SLinus Torvalds #include <linux/proc_fs.h> 241da177e4SLinus Torvalds #include <linux/seq_file.h> 251da177e4SLinus Torvalds #include <linux/init.h> 265ad64688SHugh Dickins #include <linux/ksm.h> 271da177e4SLinus Torvalds #include <linux/rmap.h> 281da177e4SLinus Torvalds #include <linux/security.h> 291da177e4SLinus Torvalds #include <linux/backing-dev.h> 30fc0abb14SIngo Molnar #include <linux/mutex.h> 31c59ede7bSRandy.Dunlap #include <linux/capability.h> 321da177e4SLinus Torvalds #include <linux/syscalls.h> 338a9f3ccdSBalbir Singh #include <linux/memcontrol.h> 3466d7dd51SKay Sievers #include <linux/poll.h> 3572788c38SDavid Rientjes #include <linux/oom.h> 3638b5faf4SDan Magenheimer #include <linux/frontswap.h> 3738b5faf4SDan Magenheimer #include <linux/swapfile.h> 38f981c595SMel Gorman #include <linux/export.h> 3967afa38eSTim Chen #include <linux/swap_slots.h> 40155b5f88SHuang Ying #include <linux/sort.h> 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds #include <asm/pgtable.h> 431da177e4SLinus Torvalds #include <asm/tlbflush.h> 441da177e4SLinus Torvalds #include <linux/swapops.h> 455d1ea48bSJohannes Weiner #include <linux/swap_cgroup.h> 461da177e4SLinus Torvalds 47570a335bSHugh Dickins static bool swap_count_continued(struct swap_info_struct *, pgoff_t, 48570a335bSHugh Dickins unsigned char); 49570a335bSHugh Dickins static void free_swap_count_continuations(struct swap_info_struct *); 50d4906e1aSLee Schermerhorn static sector_t map_swap_entry(swp_entry_t, struct block_device**); 51570a335bSHugh Dickins 5238b5faf4SDan Magenheimer DEFINE_SPINLOCK(swap_lock); 537c363b8cSAdrian Bunk static unsigned int nr_swapfiles; 54ec8acf20SShaohua Li atomic_long_t nr_swap_pages; 55fb0fec50SChris Wilson /* 56fb0fec50SChris Wilson * Some modules use swappable objects and may try to swap them out under 57fb0fec50SChris Wilson * memory pressure (via the shrinker). Before doing so, they may wish to 58fb0fec50SChris Wilson * check to see if any swap space is available. 59fb0fec50SChris Wilson */ 60fb0fec50SChris Wilson EXPORT_SYMBOL_GPL(nr_swap_pages); 61ec8acf20SShaohua Li /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ 621da177e4SLinus Torvalds long total_swap_pages; 63a2468cc9SAaron Lu static int least_priority = -1; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds static const char Bad_file[] = "Bad swap file entry "; 661da177e4SLinus Torvalds static const char Unused_file[] = "Unused swap file entry "; 671da177e4SLinus Torvalds static const char Bad_offset[] = "Bad swap offset entry "; 681da177e4SLinus Torvalds static const char Unused_offset[] = "Unused swap offset entry "; 691da177e4SLinus Torvalds 70adfab836SDan Streetman /* 71adfab836SDan Streetman * all active swap_info_structs 72adfab836SDan Streetman * protected with swap_lock, and ordered by priority. 73adfab836SDan Streetman */ 7418ab4d4cSDan Streetman PLIST_HEAD(swap_active_head); 7518ab4d4cSDan Streetman 7618ab4d4cSDan Streetman /* 7718ab4d4cSDan Streetman * all available (active, not full) swap_info_structs 7818ab4d4cSDan Streetman * protected with swap_avail_lock, ordered by priority. 7918ab4d4cSDan Streetman * This is used by get_swap_page() instead of swap_active_head 8018ab4d4cSDan Streetman * because swap_active_head includes all swap_info_structs, 8118ab4d4cSDan Streetman * but get_swap_page() doesn't need to look at full ones. 8218ab4d4cSDan Streetman * This uses its own lock instead of swap_lock because when a 8318ab4d4cSDan Streetman * swap_info_struct changes between not-full/full, it needs to 8418ab4d4cSDan Streetman * add/remove itself to/from this list, but the swap_info_struct->lock 8518ab4d4cSDan Streetman * is held and the locking order requires swap_lock to be taken 8618ab4d4cSDan Streetman * before any swap_info_struct->lock. 8718ab4d4cSDan Streetman */ 88bfc6b1caSColin Ian King static struct plist_head *swap_avail_heads; 8918ab4d4cSDan Streetman static DEFINE_SPINLOCK(swap_avail_lock); 901da177e4SLinus Torvalds 9138b5faf4SDan Magenheimer struct swap_info_struct *swap_info[MAX_SWAPFILES]; 921da177e4SLinus Torvalds 93fc0abb14SIngo Molnar static DEFINE_MUTEX(swapon_mutex); 941da177e4SLinus Torvalds 9566d7dd51SKay Sievers static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 9666d7dd51SKay Sievers /* Activity counter to indicate that a swapon or swapoff has occurred */ 9766d7dd51SKay Sievers static atomic_t proc_poll_event = ATOMIC_INIT(0); 9866d7dd51SKay Sievers 9981a0298bSHuang Ying atomic_t nr_rotate_swap = ATOMIC_INIT(0); 10081a0298bSHuang Ying 1018d69aaeeSHugh Dickins static inline unsigned char swap_count(unsigned char ent) 102355cfa73SKAMEZAWA Hiroyuki { 103955c97f0SDaniel Jordan return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ 104355cfa73SKAMEZAWA Hiroyuki } 105355cfa73SKAMEZAWA Hiroyuki 106efa90a98SHugh Dickins /* returns 1 if swap entry is freed */ 107c9e44410SKAMEZAWA Hiroyuki static int 108c9e44410SKAMEZAWA Hiroyuki __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) 109c9e44410SKAMEZAWA Hiroyuki { 110efa90a98SHugh Dickins swp_entry_t entry = swp_entry(si->type, offset); 111c9e44410SKAMEZAWA Hiroyuki struct page *page; 112c9e44410SKAMEZAWA Hiroyuki int ret = 0; 113c9e44410SKAMEZAWA Hiroyuki 114f6ab1f7fSHuang Ying page = find_get_page(swap_address_space(entry), swp_offset(entry)); 115c9e44410SKAMEZAWA Hiroyuki if (!page) 116c9e44410SKAMEZAWA Hiroyuki return 0; 117c9e44410SKAMEZAWA Hiroyuki /* 118c9e44410SKAMEZAWA Hiroyuki * This function is called from scan_swap_map() and it's called 119c9e44410SKAMEZAWA Hiroyuki * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. 120c9e44410SKAMEZAWA Hiroyuki * We have to use trylock for avoiding deadlock. This is a special 121c9e44410SKAMEZAWA Hiroyuki * case and you should use try_to_free_swap() with explicit lock_page() 122c9e44410SKAMEZAWA Hiroyuki * in usual operations. 123c9e44410SKAMEZAWA Hiroyuki */ 124c9e44410SKAMEZAWA Hiroyuki if (trylock_page(page)) { 125c9e44410SKAMEZAWA Hiroyuki ret = try_to_free_swap(page); 126c9e44410SKAMEZAWA Hiroyuki unlock_page(page); 127c9e44410SKAMEZAWA Hiroyuki } 12809cbfeafSKirill A. Shutemov put_page(page); 129c9e44410SKAMEZAWA Hiroyuki return ret; 130c9e44410SKAMEZAWA Hiroyuki } 131355cfa73SKAMEZAWA Hiroyuki 1321da177e4SLinus Torvalds /* 1336a6ba831SHugh Dickins * swapon tell device that all the old swap contents can be discarded, 1346a6ba831SHugh Dickins * to allow the swap device to optimize its wear-levelling. 1356a6ba831SHugh Dickins */ 1366a6ba831SHugh Dickins static int discard_swap(struct swap_info_struct *si) 1376a6ba831SHugh Dickins { 1386a6ba831SHugh Dickins struct swap_extent *se; 1399625a5f2SHugh Dickins sector_t start_block; 1409625a5f2SHugh Dickins sector_t nr_blocks; 1416a6ba831SHugh Dickins int err = 0; 1426a6ba831SHugh Dickins 1436a6ba831SHugh Dickins /* Do not discard the swap header page! */ 1449625a5f2SHugh Dickins se = &si->first_swap_extent; 1459625a5f2SHugh Dickins start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 1469625a5f2SHugh Dickins nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 1479625a5f2SHugh Dickins if (nr_blocks) { 1489625a5f2SHugh Dickins err = blkdev_issue_discard(si->bdev, start_block, 149dd3932edSChristoph Hellwig nr_blocks, GFP_KERNEL, 0); 1509625a5f2SHugh Dickins if (err) 1519625a5f2SHugh Dickins return err; 1529625a5f2SHugh Dickins cond_resched(); 1536a6ba831SHugh Dickins } 1546a6ba831SHugh Dickins 1559625a5f2SHugh Dickins list_for_each_entry(se, &si->first_swap_extent.list, list) { 1569625a5f2SHugh Dickins start_block = se->start_block << (PAGE_SHIFT - 9); 1579625a5f2SHugh Dickins nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 1589625a5f2SHugh Dickins 1596a6ba831SHugh Dickins err = blkdev_issue_discard(si->bdev, start_block, 160dd3932edSChristoph Hellwig nr_blocks, GFP_KERNEL, 0); 1616a6ba831SHugh Dickins if (err) 1626a6ba831SHugh Dickins break; 1636a6ba831SHugh Dickins 1646a6ba831SHugh Dickins cond_resched(); 1656a6ba831SHugh Dickins } 1666a6ba831SHugh Dickins return err; /* That will often be -EOPNOTSUPP */ 1676a6ba831SHugh Dickins } 1686a6ba831SHugh Dickins 1697992fde7SHugh Dickins /* 1707992fde7SHugh Dickins * swap allocation tell device that a cluster of swap can now be discarded, 1717992fde7SHugh Dickins * to allow the swap device to optimize its wear-levelling. 1727992fde7SHugh Dickins */ 1737992fde7SHugh Dickins static void discard_swap_cluster(struct swap_info_struct *si, 1747992fde7SHugh Dickins pgoff_t start_page, pgoff_t nr_pages) 1757992fde7SHugh Dickins { 1767992fde7SHugh Dickins struct swap_extent *se = si->curr_swap_extent; 1777992fde7SHugh Dickins int found_extent = 0; 1787992fde7SHugh Dickins 1797992fde7SHugh Dickins while (nr_pages) { 1807992fde7SHugh Dickins if (se->start_page <= start_page && 1817992fde7SHugh Dickins start_page < se->start_page + se->nr_pages) { 1827992fde7SHugh Dickins pgoff_t offset = start_page - se->start_page; 1837992fde7SHugh Dickins sector_t start_block = se->start_block + offset; 184858a2990SHugh Dickins sector_t nr_blocks = se->nr_pages - offset; 1857992fde7SHugh Dickins 1867992fde7SHugh Dickins if (nr_blocks > nr_pages) 1877992fde7SHugh Dickins nr_blocks = nr_pages; 1887992fde7SHugh Dickins start_page += nr_blocks; 1897992fde7SHugh Dickins nr_pages -= nr_blocks; 1907992fde7SHugh Dickins 1917992fde7SHugh Dickins if (!found_extent++) 1927992fde7SHugh Dickins si->curr_swap_extent = se; 1937992fde7SHugh Dickins 1947992fde7SHugh Dickins start_block <<= PAGE_SHIFT - 9; 1957992fde7SHugh Dickins nr_blocks <<= PAGE_SHIFT - 9; 1967992fde7SHugh Dickins if (blkdev_issue_discard(si->bdev, start_block, 197dd3932edSChristoph Hellwig nr_blocks, GFP_NOIO, 0)) 1987992fde7SHugh Dickins break; 1997992fde7SHugh Dickins } 2007992fde7SHugh Dickins 201a8ae4991SGeliang Tang se = list_next_entry(se, list); 2027992fde7SHugh Dickins } 2037992fde7SHugh Dickins } 2047992fde7SHugh Dickins 20538d8b4e6SHuang Ying #ifdef CONFIG_THP_SWAP 20638d8b4e6SHuang Ying #define SWAPFILE_CLUSTER HPAGE_PMD_NR 207a448f2d0SHuang Ying 208a448f2d0SHuang Ying #define swap_entry_size(size) (size) 20938d8b4e6SHuang Ying #else 210048c27fdSHugh Dickins #define SWAPFILE_CLUSTER 256 211a448f2d0SHuang Ying 212a448f2d0SHuang Ying /* 213a448f2d0SHuang Ying * Define swap_entry_size() as constant to let compiler to optimize 214a448f2d0SHuang Ying * out some code if !CONFIG_THP_SWAP 215a448f2d0SHuang Ying */ 216a448f2d0SHuang Ying #define swap_entry_size(size) 1 21738d8b4e6SHuang Ying #endif 218048c27fdSHugh Dickins #define LATENCY_LIMIT 256 219048c27fdSHugh Dickins 2202a8f9449SShaohua Li static inline void cluster_set_flag(struct swap_cluster_info *info, 2212a8f9449SShaohua Li unsigned int flag) 2222a8f9449SShaohua Li { 2232a8f9449SShaohua Li info->flags = flag; 2242a8f9449SShaohua Li } 2252a8f9449SShaohua Li 2262a8f9449SShaohua Li static inline unsigned int cluster_count(struct swap_cluster_info *info) 2272a8f9449SShaohua Li { 2282a8f9449SShaohua Li return info->data; 2292a8f9449SShaohua Li } 2302a8f9449SShaohua Li 2312a8f9449SShaohua Li static inline void cluster_set_count(struct swap_cluster_info *info, 2322a8f9449SShaohua Li unsigned int c) 2332a8f9449SShaohua Li { 2342a8f9449SShaohua Li info->data = c; 2352a8f9449SShaohua Li } 2362a8f9449SShaohua Li 2372a8f9449SShaohua Li static inline void cluster_set_count_flag(struct swap_cluster_info *info, 2382a8f9449SShaohua Li unsigned int c, unsigned int f) 2392a8f9449SShaohua Li { 2402a8f9449SShaohua Li info->flags = f; 2412a8f9449SShaohua Li info->data = c; 2422a8f9449SShaohua Li } 2432a8f9449SShaohua Li 2442a8f9449SShaohua Li static inline unsigned int cluster_next(struct swap_cluster_info *info) 2452a8f9449SShaohua Li { 2462a8f9449SShaohua Li return info->data; 2472a8f9449SShaohua Li } 2482a8f9449SShaohua Li 2492a8f9449SShaohua Li static inline void cluster_set_next(struct swap_cluster_info *info, 2502a8f9449SShaohua Li unsigned int n) 2512a8f9449SShaohua Li { 2522a8f9449SShaohua Li info->data = n; 2532a8f9449SShaohua Li } 2542a8f9449SShaohua Li 2552a8f9449SShaohua Li static inline void cluster_set_next_flag(struct swap_cluster_info *info, 2562a8f9449SShaohua Li unsigned int n, unsigned int f) 2572a8f9449SShaohua Li { 2582a8f9449SShaohua Li info->flags = f; 2592a8f9449SShaohua Li info->data = n; 2602a8f9449SShaohua Li } 2612a8f9449SShaohua Li 2622a8f9449SShaohua Li static inline bool cluster_is_free(struct swap_cluster_info *info) 2632a8f9449SShaohua Li { 2642a8f9449SShaohua Li return info->flags & CLUSTER_FLAG_FREE; 2652a8f9449SShaohua Li } 2662a8f9449SShaohua Li 2672a8f9449SShaohua Li static inline bool cluster_is_null(struct swap_cluster_info *info) 2682a8f9449SShaohua Li { 2692a8f9449SShaohua Li return info->flags & CLUSTER_FLAG_NEXT_NULL; 2702a8f9449SShaohua Li } 2712a8f9449SShaohua Li 2722a8f9449SShaohua Li static inline void cluster_set_null(struct swap_cluster_info *info) 2732a8f9449SShaohua Li { 2742a8f9449SShaohua Li info->flags = CLUSTER_FLAG_NEXT_NULL; 2752a8f9449SShaohua Li info->data = 0; 2762a8f9449SShaohua Li } 2772a8f9449SShaohua Li 278e0709829SHuang Ying static inline bool cluster_is_huge(struct swap_cluster_info *info) 279e0709829SHuang Ying { 28033ee011eSHuang Ying if (IS_ENABLED(CONFIG_THP_SWAP)) 281e0709829SHuang Ying return info->flags & CLUSTER_FLAG_HUGE; 28233ee011eSHuang Ying return false; 283e0709829SHuang Ying } 284e0709829SHuang Ying 285e0709829SHuang Ying static inline void cluster_clear_huge(struct swap_cluster_info *info) 286e0709829SHuang Ying { 287e0709829SHuang Ying info->flags &= ~CLUSTER_FLAG_HUGE; 288e0709829SHuang Ying } 289e0709829SHuang Ying 290235b6217SHuang, Ying static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, 291235b6217SHuang, Ying unsigned long offset) 292235b6217SHuang, Ying { 293235b6217SHuang, Ying struct swap_cluster_info *ci; 294235b6217SHuang, Ying 295235b6217SHuang, Ying ci = si->cluster_info; 296235b6217SHuang, Ying if (ci) { 297235b6217SHuang, Ying ci += offset / SWAPFILE_CLUSTER; 298235b6217SHuang, Ying spin_lock(&ci->lock); 299235b6217SHuang, Ying } 300235b6217SHuang, Ying return ci; 301235b6217SHuang, Ying } 302235b6217SHuang, Ying 303235b6217SHuang, Ying static inline void unlock_cluster(struct swap_cluster_info *ci) 304235b6217SHuang, Ying { 305235b6217SHuang, Ying if (ci) 306235b6217SHuang, Ying spin_unlock(&ci->lock); 307235b6217SHuang, Ying } 308235b6217SHuang, Ying 30959d98bf3SHuang Ying /* 31059d98bf3SHuang Ying * Determine the locking method in use for this device. Return 31159d98bf3SHuang Ying * swap_cluster_info if SSD-style cluster-based locking is in place. 31259d98bf3SHuang Ying */ 313235b6217SHuang, Ying static inline struct swap_cluster_info *lock_cluster_or_swap_info( 31459d98bf3SHuang Ying struct swap_info_struct *si, unsigned long offset) 315235b6217SHuang, Ying { 316235b6217SHuang, Ying struct swap_cluster_info *ci; 317235b6217SHuang, Ying 31859d98bf3SHuang Ying /* Try to use fine-grained SSD-style locking if available: */ 319235b6217SHuang, Ying ci = lock_cluster(si, offset); 32059d98bf3SHuang Ying /* Otherwise, fall back to traditional, coarse locking: */ 321235b6217SHuang, Ying if (!ci) 322235b6217SHuang, Ying spin_lock(&si->lock); 323235b6217SHuang, Ying 324235b6217SHuang, Ying return ci; 325235b6217SHuang, Ying } 326235b6217SHuang, Ying 327235b6217SHuang, Ying static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, 328235b6217SHuang, Ying struct swap_cluster_info *ci) 329235b6217SHuang, Ying { 330235b6217SHuang, Ying if (ci) 331235b6217SHuang, Ying unlock_cluster(ci); 332235b6217SHuang, Ying else 333235b6217SHuang, Ying spin_unlock(&si->lock); 334235b6217SHuang, Ying } 335235b6217SHuang, Ying 3366b534915SHuang Ying static inline bool cluster_list_empty(struct swap_cluster_list *list) 3376b534915SHuang Ying { 3386b534915SHuang Ying return cluster_is_null(&list->head); 3396b534915SHuang Ying } 3406b534915SHuang Ying 3416b534915SHuang Ying static inline unsigned int cluster_list_first(struct swap_cluster_list *list) 3426b534915SHuang Ying { 3436b534915SHuang Ying return cluster_next(&list->head); 3446b534915SHuang Ying } 3456b534915SHuang Ying 3466b534915SHuang Ying static void cluster_list_init(struct swap_cluster_list *list) 3476b534915SHuang Ying { 3486b534915SHuang Ying cluster_set_null(&list->head); 3496b534915SHuang Ying cluster_set_null(&list->tail); 3506b534915SHuang Ying } 3516b534915SHuang Ying 3526b534915SHuang Ying static void cluster_list_add_tail(struct swap_cluster_list *list, 3536b534915SHuang Ying struct swap_cluster_info *ci, 3546b534915SHuang Ying unsigned int idx) 3556b534915SHuang Ying { 3566b534915SHuang Ying if (cluster_list_empty(list)) { 3576b534915SHuang Ying cluster_set_next_flag(&list->head, idx, 0); 3586b534915SHuang Ying cluster_set_next_flag(&list->tail, idx, 0); 3596b534915SHuang Ying } else { 360235b6217SHuang, Ying struct swap_cluster_info *ci_tail; 3616b534915SHuang Ying unsigned int tail = cluster_next(&list->tail); 3626b534915SHuang Ying 363235b6217SHuang, Ying /* 364235b6217SHuang, Ying * Nested cluster lock, but both cluster locks are 365235b6217SHuang, Ying * only acquired when we held swap_info_struct->lock 366235b6217SHuang, Ying */ 367235b6217SHuang, Ying ci_tail = ci + tail; 368235b6217SHuang, Ying spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); 369235b6217SHuang, Ying cluster_set_next(ci_tail, idx); 3700ef017d1SHuang Ying spin_unlock(&ci_tail->lock); 3716b534915SHuang Ying cluster_set_next_flag(&list->tail, idx, 0); 3726b534915SHuang Ying } 3736b534915SHuang Ying } 3746b534915SHuang Ying 3756b534915SHuang Ying static unsigned int cluster_list_del_first(struct swap_cluster_list *list, 3766b534915SHuang Ying struct swap_cluster_info *ci) 3776b534915SHuang Ying { 3786b534915SHuang Ying unsigned int idx; 3796b534915SHuang Ying 3806b534915SHuang Ying idx = cluster_next(&list->head); 3816b534915SHuang Ying if (cluster_next(&list->tail) == idx) { 3826b534915SHuang Ying cluster_set_null(&list->head); 3836b534915SHuang Ying cluster_set_null(&list->tail); 3846b534915SHuang Ying } else 3856b534915SHuang Ying cluster_set_next_flag(&list->head, 3866b534915SHuang Ying cluster_next(&ci[idx]), 0); 3876b534915SHuang Ying 3886b534915SHuang Ying return idx; 3896b534915SHuang Ying } 3906b534915SHuang Ying 391815c2c54SShaohua Li /* Add a cluster to discard list and schedule it to do discard */ 392815c2c54SShaohua Li static void swap_cluster_schedule_discard(struct swap_info_struct *si, 393815c2c54SShaohua Li unsigned int idx) 394815c2c54SShaohua Li { 395815c2c54SShaohua Li /* 396815c2c54SShaohua Li * If scan_swap_map() can't find a free cluster, it will check 397815c2c54SShaohua Li * si->swap_map directly. To make sure the discarding cluster isn't 398815c2c54SShaohua Li * taken by scan_swap_map(), mark the swap entries bad (occupied). It 399815c2c54SShaohua Li * will be cleared after discard 400815c2c54SShaohua Li */ 401815c2c54SShaohua Li memset(si->swap_map + idx * SWAPFILE_CLUSTER, 402815c2c54SShaohua Li SWAP_MAP_BAD, SWAPFILE_CLUSTER); 403815c2c54SShaohua Li 4046b534915SHuang Ying cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); 405815c2c54SShaohua Li 406815c2c54SShaohua Li schedule_work(&si->discard_work); 407815c2c54SShaohua Li } 408815c2c54SShaohua Li 40938d8b4e6SHuang Ying static void __free_cluster(struct swap_info_struct *si, unsigned long idx) 41038d8b4e6SHuang Ying { 41138d8b4e6SHuang Ying struct swap_cluster_info *ci = si->cluster_info; 41238d8b4e6SHuang Ying 41338d8b4e6SHuang Ying cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); 41438d8b4e6SHuang Ying cluster_list_add_tail(&si->free_clusters, ci, idx); 41538d8b4e6SHuang Ying } 41638d8b4e6SHuang Ying 417815c2c54SShaohua Li /* 418815c2c54SShaohua Li * Doing discard actually. After a cluster discard is finished, the cluster 419815c2c54SShaohua Li * will be added to free cluster list. caller should hold si->lock. 420815c2c54SShaohua Li */ 421815c2c54SShaohua Li static void swap_do_scheduled_discard(struct swap_info_struct *si) 422815c2c54SShaohua Li { 423235b6217SHuang, Ying struct swap_cluster_info *info, *ci; 424815c2c54SShaohua Li unsigned int idx; 425815c2c54SShaohua Li 426815c2c54SShaohua Li info = si->cluster_info; 427815c2c54SShaohua Li 4286b534915SHuang Ying while (!cluster_list_empty(&si->discard_clusters)) { 4296b534915SHuang Ying idx = cluster_list_del_first(&si->discard_clusters, info); 430815c2c54SShaohua Li spin_unlock(&si->lock); 431815c2c54SShaohua Li 432815c2c54SShaohua Li discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, 433815c2c54SShaohua Li SWAPFILE_CLUSTER); 434815c2c54SShaohua Li 435815c2c54SShaohua Li spin_lock(&si->lock); 436235b6217SHuang, Ying ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); 43738d8b4e6SHuang Ying __free_cluster(si, idx); 438815c2c54SShaohua Li memset(si->swap_map + idx * SWAPFILE_CLUSTER, 439815c2c54SShaohua Li 0, SWAPFILE_CLUSTER); 440235b6217SHuang, Ying unlock_cluster(ci); 441815c2c54SShaohua Li } 442815c2c54SShaohua Li } 443815c2c54SShaohua Li 444815c2c54SShaohua Li static void swap_discard_work(struct work_struct *work) 445815c2c54SShaohua Li { 446815c2c54SShaohua Li struct swap_info_struct *si; 447815c2c54SShaohua Li 448815c2c54SShaohua Li si = container_of(work, struct swap_info_struct, discard_work); 449815c2c54SShaohua Li 450815c2c54SShaohua Li spin_lock(&si->lock); 451815c2c54SShaohua Li swap_do_scheduled_discard(si); 452815c2c54SShaohua Li spin_unlock(&si->lock); 453815c2c54SShaohua Li } 454815c2c54SShaohua Li 45538d8b4e6SHuang Ying static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) 45638d8b4e6SHuang Ying { 45738d8b4e6SHuang Ying struct swap_cluster_info *ci = si->cluster_info; 45838d8b4e6SHuang Ying 45938d8b4e6SHuang Ying VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); 46038d8b4e6SHuang Ying cluster_list_del_first(&si->free_clusters, ci); 46138d8b4e6SHuang Ying cluster_set_count_flag(ci + idx, 0, 0); 46238d8b4e6SHuang Ying } 46338d8b4e6SHuang Ying 46438d8b4e6SHuang Ying static void free_cluster(struct swap_info_struct *si, unsigned long idx) 46538d8b4e6SHuang Ying { 46638d8b4e6SHuang Ying struct swap_cluster_info *ci = si->cluster_info + idx; 46738d8b4e6SHuang Ying 46838d8b4e6SHuang Ying VM_BUG_ON(cluster_count(ci) != 0); 46938d8b4e6SHuang Ying /* 47038d8b4e6SHuang Ying * If the swap is discardable, prepare discard the cluster 47138d8b4e6SHuang Ying * instead of free it immediately. The cluster will be freed 47238d8b4e6SHuang Ying * after discard. 47338d8b4e6SHuang Ying */ 47438d8b4e6SHuang Ying if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == 47538d8b4e6SHuang Ying (SWP_WRITEOK | SWP_PAGE_DISCARD)) { 47638d8b4e6SHuang Ying swap_cluster_schedule_discard(si, idx); 47738d8b4e6SHuang Ying return; 47838d8b4e6SHuang Ying } 47938d8b4e6SHuang Ying 48038d8b4e6SHuang Ying __free_cluster(si, idx); 48138d8b4e6SHuang Ying } 48238d8b4e6SHuang Ying 4832a8f9449SShaohua Li /* 4842a8f9449SShaohua Li * The cluster corresponding to page_nr will be used. The cluster will be 4852a8f9449SShaohua Li * removed from free cluster list and its usage counter will be increased. 4862a8f9449SShaohua Li */ 4872a8f9449SShaohua Li static void inc_cluster_info_page(struct swap_info_struct *p, 4882a8f9449SShaohua Li struct swap_cluster_info *cluster_info, unsigned long page_nr) 4892a8f9449SShaohua Li { 4902a8f9449SShaohua Li unsigned long idx = page_nr / SWAPFILE_CLUSTER; 4912a8f9449SShaohua Li 4922a8f9449SShaohua Li if (!cluster_info) 4932a8f9449SShaohua Li return; 49438d8b4e6SHuang Ying if (cluster_is_free(&cluster_info[idx])) 49538d8b4e6SHuang Ying alloc_cluster(p, idx); 4962a8f9449SShaohua Li 4972a8f9449SShaohua Li VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); 4982a8f9449SShaohua Li cluster_set_count(&cluster_info[idx], 4992a8f9449SShaohua Li cluster_count(&cluster_info[idx]) + 1); 5002a8f9449SShaohua Li } 5012a8f9449SShaohua Li 5022a8f9449SShaohua Li /* 5032a8f9449SShaohua Li * The cluster corresponding to page_nr decreases one usage. If the usage 5042a8f9449SShaohua Li * counter becomes 0, which means no page in the cluster is in using, we can 5052a8f9449SShaohua Li * optionally discard the cluster and add it to free cluster list. 5062a8f9449SShaohua Li */ 5072a8f9449SShaohua Li static void dec_cluster_info_page(struct swap_info_struct *p, 5082a8f9449SShaohua Li struct swap_cluster_info *cluster_info, unsigned long page_nr) 5092a8f9449SShaohua Li { 5102a8f9449SShaohua Li unsigned long idx = page_nr / SWAPFILE_CLUSTER; 5112a8f9449SShaohua Li 5122a8f9449SShaohua Li if (!cluster_info) 5132a8f9449SShaohua Li return; 5142a8f9449SShaohua Li 5152a8f9449SShaohua Li VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); 5162a8f9449SShaohua Li cluster_set_count(&cluster_info[idx], 5172a8f9449SShaohua Li cluster_count(&cluster_info[idx]) - 1); 5182a8f9449SShaohua Li 51938d8b4e6SHuang Ying if (cluster_count(&cluster_info[idx]) == 0) 52038d8b4e6SHuang Ying free_cluster(p, idx); 5212a8f9449SShaohua Li } 5222a8f9449SShaohua Li 5232a8f9449SShaohua Li /* 5242a8f9449SShaohua Li * It's possible scan_swap_map() uses a free cluster in the middle of free 5252a8f9449SShaohua Li * cluster list. Avoiding such abuse to avoid list corruption. 5262a8f9449SShaohua Li */ 527ebc2a1a6SShaohua Li static bool 528ebc2a1a6SShaohua Li scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, 5292a8f9449SShaohua Li unsigned long offset) 5302a8f9449SShaohua Li { 531ebc2a1a6SShaohua Li struct percpu_cluster *percpu_cluster; 532ebc2a1a6SShaohua Li bool conflict; 533ebc2a1a6SShaohua Li 5342a8f9449SShaohua Li offset /= SWAPFILE_CLUSTER; 5356b534915SHuang Ying conflict = !cluster_list_empty(&si->free_clusters) && 5366b534915SHuang Ying offset != cluster_list_first(&si->free_clusters) && 5372a8f9449SShaohua Li cluster_is_free(&si->cluster_info[offset]); 538ebc2a1a6SShaohua Li 539ebc2a1a6SShaohua Li if (!conflict) 540ebc2a1a6SShaohua Li return false; 541ebc2a1a6SShaohua Li 542ebc2a1a6SShaohua Li percpu_cluster = this_cpu_ptr(si->percpu_cluster); 543ebc2a1a6SShaohua Li cluster_set_null(&percpu_cluster->index); 544ebc2a1a6SShaohua Li return true; 545ebc2a1a6SShaohua Li } 546ebc2a1a6SShaohua Li 547ebc2a1a6SShaohua Li /* 548ebc2a1a6SShaohua Li * Try to get a swap entry from current cpu's swap entry pool (a cluster). This 549ebc2a1a6SShaohua Li * might involve allocating a new cluster for current CPU too. 550ebc2a1a6SShaohua Li */ 55136005baeSTim Chen static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, 552ebc2a1a6SShaohua Li unsigned long *offset, unsigned long *scan_base) 553ebc2a1a6SShaohua Li { 554ebc2a1a6SShaohua Li struct percpu_cluster *cluster; 555235b6217SHuang, Ying struct swap_cluster_info *ci; 556ebc2a1a6SShaohua Li bool found_free; 557235b6217SHuang, Ying unsigned long tmp, max; 558ebc2a1a6SShaohua Li 559ebc2a1a6SShaohua Li new_cluster: 560ebc2a1a6SShaohua Li cluster = this_cpu_ptr(si->percpu_cluster); 561ebc2a1a6SShaohua Li if (cluster_is_null(&cluster->index)) { 5626b534915SHuang Ying if (!cluster_list_empty(&si->free_clusters)) { 5636b534915SHuang Ying cluster->index = si->free_clusters.head; 564ebc2a1a6SShaohua Li cluster->next = cluster_next(&cluster->index) * 565ebc2a1a6SShaohua Li SWAPFILE_CLUSTER; 5666b534915SHuang Ying } else if (!cluster_list_empty(&si->discard_clusters)) { 567ebc2a1a6SShaohua Li /* 568ebc2a1a6SShaohua Li * we don't have free cluster but have some clusters in 569ebc2a1a6SShaohua Li * discarding, do discard now and reclaim them 570ebc2a1a6SShaohua Li */ 571ebc2a1a6SShaohua Li swap_do_scheduled_discard(si); 572ebc2a1a6SShaohua Li *scan_base = *offset = si->cluster_next; 573ebc2a1a6SShaohua Li goto new_cluster; 574ebc2a1a6SShaohua Li } else 57536005baeSTim Chen return false; 576ebc2a1a6SShaohua Li } 577ebc2a1a6SShaohua Li 578ebc2a1a6SShaohua Li found_free = false; 579ebc2a1a6SShaohua Li 580ebc2a1a6SShaohua Li /* 581ebc2a1a6SShaohua Li * Other CPUs can use our cluster if they can't find a free cluster, 582ebc2a1a6SShaohua Li * check if there is still free entry in the cluster 583ebc2a1a6SShaohua Li */ 584ebc2a1a6SShaohua Li tmp = cluster->next; 585235b6217SHuang, Ying max = min_t(unsigned long, si->max, 586235b6217SHuang, Ying (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); 587235b6217SHuang, Ying if (tmp >= max) { 588235b6217SHuang, Ying cluster_set_null(&cluster->index); 589235b6217SHuang, Ying goto new_cluster; 590235b6217SHuang, Ying } 591235b6217SHuang, Ying ci = lock_cluster(si, tmp); 592235b6217SHuang, Ying while (tmp < max) { 593ebc2a1a6SShaohua Li if (!si->swap_map[tmp]) { 594ebc2a1a6SShaohua Li found_free = true; 595ebc2a1a6SShaohua Li break; 596ebc2a1a6SShaohua Li } 597ebc2a1a6SShaohua Li tmp++; 598ebc2a1a6SShaohua Li } 599235b6217SHuang, Ying unlock_cluster(ci); 600ebc2a1a6SShaohua Li if (!found_free) { 601ebc2a1a6SShaohua Li cluster_set_null(&cluster->index); 602ebc2a1a6SShaohua Li goto new_cluster; 603ebc2a1a6SShaohua Li } 604ebc2a1a6SShaohua Li cluster->next = tmp + 1; 605ebc2a1a6SShaohua Li *offset = tmp; 606ebc2a1a6SShaohua Li *scan_base = tmp; 60736005baeSTim Chen return found_free; 6082a8f9449SShaohua Li } 6092a8f9449SShaohua Li 610a2468cc9SAaron Lu static void __del_from_avail_list(struct swap_info_struct *p) 611a2468cc9SAaron Lu { 612a2468cc9SAaron Lu int nid; 613a2468cc9SAaron Lu 614a2468cc9SAaron Lu for_each_node(nid) 615a2468cc9SAaron Lu plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); 616a2468cc9SAaron Lu } 617a2468cc9SAaron Lu 618a2468cc9SAaron Lu static void del_from_avail_list(struct swap_info_struct *p) 619a2468cc9SAaron Lu { 620a2468cc9SAaron Lu spin_lock(&swap_avail_lock); 621a2468cc9SAaron Lu __del_from_avail_list(p); 622a2468cc9SAaron Lu spin_unlock(&swap_avail_lock); 623a2468cc9SAaron Lu } 624a2468cc9SAaron Lu 62538d8b4e6SHuang Ying static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, 62638d8b4e6SHuang Ying unsigned int nr_entries) 62738d8b4e6SHuang Ying { 62838d8b4e6SHuang Ying unsigned int end = offset + nr_entries - 1; 62938d8b4e6SHuang Ying 63038d8b4e6SHuang Ying if (offset == si->lowest_bit) 63138d8b4e6SHuang Ying si->lowest_bit += nr_entries; 63238d8b4e6SHuang Ying if (end == si->highest_bit) 63338d8b4e6SHuang Ying si->highest_bit -= nr_entries; 63438d8b4e6SHuang Ying si->inuse_pages += nr_entries; 63538d8b4e6SHuang Ying if (si->inuse_pages == si->pages) { 63638d8b4e6SHuang Ying si->lowest_bit = si->max; 63738d8b4e6SHuang Ying si->highest_bit = 0; 638a2468cc9SAaron Lu del_from_avail_list(si); 63938d8b4e6SHuang Ying } 64038d8b4e6SHuang Ying } 64138d8b4e6SHuang Ying 642a2468cc9SAaron Lu static void add_to_avail_list(struct swap_info_struct *p) 643a2468cc9SAaron Lu { 644a2468cc9SAaron Lu int nid; 645a2468cc9SAaron Lu 646a2468cc9SAaron Lu spin_lock(&swap_avail_lock); 647a2468cc9SAaron Lu for_each_node(nid) { 648a2468cc9SAaron Lu WARN_ON(!plist_node_empty(&p->avail_lists[nid])); 649a2468cc9SAaron Lu plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); 650a2468cc9SAaron Lu } 651a2468cc9SAaron Lu spin_unlock(&swap_avail_lock); 652a2468cc9SAaron Lu } 653a2468cc9SAaron Lu 65438d8b4e6SHuang Ying static void swap_range_free(struct swap_info_struct *si, unsigned long offset, 65538d8b4e6SHuang Ying unsigned int nr_entries) 65638d8b4e6SHuang Ying { 65738d8b4e6SHuang Ying unsigned long end = offset + nr_entries - 1; 65838d8b4e6SHuang Ying void (*swap_slot_free_notify)(struct block_device *, unsigned long); 65938d8b4e6SHuang Ying 66038d8b4e6SHuang Ying if (offset < si->lowest_bit) 66138d8b4e6SHuang Ying si->lowest_bit = offset; 66238d8b4e6SHuang Ying if (end > si->highest_bit) { 66338d8b4e6SHuang Ying bool was_full = !si->highest_bit; 66438d8b4e6SHuang Ying 66538d8b4e6SHuang Ying si->highest_bit = end; 666a2468cc9SAaron Lu if (was_full && (si->flags & SWP_WRITEOK)) 667a2468cc9SAaron Lu add_to_avail_list(si); 66838d8b4e6SHuang Ying } 66938d8b4e6SHuang Ying atomic_long_add(nr_entries, &nr_swap_pages); 67038d8b4e6SHuang Ying si->inuse_pages -= nr_entries; 67138d8b4e6SHuang Ying if (si->flags & SWP_BLKDEV) 67238d8b4e6SHuang Ying swap_slot_free_notify = 67338d8b4e6SHuang Ying si->bdev->bd_disk->fops->swap_slot_free_notify; 67438d8b4e6SHuang Ying else 67538d8b4e6SHuang Ying swap_slot_free_notify = NULL; 67638d8b4e6SHuang Ying while (offset <= end) { 67738d8b4e6SHuang Ying frontswap_invalidate_page(si->type, offset); 67838d8b4e6SHuang Ying if (swap_slot_free_notify) 67938d8b4e6SHuang Ying swap_slot_free_notify(si->bdev, offset); 68038d8b4e6SHuang Ying offset++; 68138d8b4e6SHuang Ying } 68238d8b4e6SHuang Ying } 68338d8b4e6SHuang Ying 68436005baeSTim Chen static int scan_swap_map_slots(struct swap_info_struct *si, 68536005baeSTim Chen unsigned char usage, int nr, 68636005baeSTim Chen swp_entry_t slots[]) 6871da177e4SLinus Torvalds { 688235b6217SHuang, Ying struct swap_cluster_info *ci; 689ebebbbe9SHugh Dickins unsigned long offset; 690c60aa176SHugh Dickins unsigned long scan_base; 6917992fde7SHugh Dickins unsigned long last_in_cluster = 0; 692048c27fdSHugh Dickins int latency_ration = LATENCY_LIMIT; 69336005baeSTim Chen int n_ret = 0; 69436005baeSTim Chen 69536005baeSTim Chen if (nr > SWAP_BATCH) 69636005baeSTim Chen nr = SWAP_BATCH; 6971da177e4SLinus Torvalds 6987dfad418SHugh Dickins /* 6997dfad418SHugh Dickins * We try to cluster swap pages by allocating them sequentially 7007dfad418SHugh Dickins * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 7017dfad418SHugh Dickins * way, however, we resort to first-free allocation, starting 7027dfad418SHugh Dickins * a new cluster. This prevents us from scattering swap pages 7037dfad418SHugh Dickins * all over the entire swap partition, so that we reduce 7047dfad418SHugh Dickins * overall disk seek times between swap pages. -- sct 7057dfad418SHugh Dickins * But we do now try to find an empty cluster. -Andrea 706c60aa176SHugh Dickins * And we let swap pages go all over an SSD partition. Hugh 7071da177e4SLinus Torvalds */ 7087dfad418SHugh Dickins 70952b7efdbSHugh Dickins si->flags += SWP_SCANNING; 710c60aa176SHugh Dickins scan_base = offset = si->cluster_next; 711ebebbbe9SHugh Dickins 712ebc2a1a6SShaohua Li /* SSD algorithm */ 713ebc2a1a6SShaohua Li if (si->cluster_info) { 71436005baeSTim Chen if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) 715815c2c54SShaohua Li goto checks; 71636005baeSTim Chen else 71736005baeSTim Chen goto scan; 718815c2c54SShaohua Li } 719815c2c54SShaohua Li 720ebc2a1a6SShaohua Li if (unlikely(!si->cluster_nr--)) { 721ebc2a1a6SShaohua Li if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 722ebc2a1a6SShaohua Li si->cluster_nr = SWAPFILE_CLUSTER - 1; 7232a8f9449SShaohua Li goto checks; 7242a8f9449SShaohua Li } 7252a8f9449SShaohua Li 726ec8acf20SShaohua Li spin_unlock(&si->lock); 7277dfad418SHugh Dickins 728c60aa176SHugh Dickins /* 729c60aa176SHugh Dickins * If seek is expensive, start searching for new cluster from 730c60aa176SHugh Dickins * start of partition, to minimize the span of allocated swap. 73150088c44SChen Yucong * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info 73250088c44SChen Yucong * case, just handled by scan_swap_map_try_ssd_cluster() above. 733c60aa176SHugh Dickins */ 734c60aa176SHugh Dickins scan_base = offset = si->lowest_bit; 7357dfad418SHugh Dickins last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 7367dfad418SHugh Dickins 7377dfad418SHugh Dickins /* Locate the first empty (unaligned) cluster */ 7387dfad418SHugh Dickins for (; last_in_cluster <= si->highest_bit; offset++) { 7391da177e4SLinus Torvalds if (si->swap_map[offset]) 7407dfad418SHugh Dickins last_in_cluster = offset + SWAPFILE_CLUSTER; 7417dfad418SHugh Dickins else if (offset == last_in_cluster) { 742ec8acf20SShaohua Li spin_lock(&si->lock); 743ebebbbe9SHugh Dickins offset -= SWAPFILE_CLUSTER - 1; 744ebebbbe9SHugh Dickins si->cluster_next = offset; 745ebebbbe9SHugh Dickins si->cluster_nr = SWAPFILE_CLUSTER - 1; 746ebebbbe9SHugh Dickins goto checks; 7477dfad418SHugh Dickins } 748048c27fdSHugh Dickins if (unlikely(--latency_ration < 0)) { 749048c27fdSHugh Dickins cond_resched(); 750048c27fdSHugh Dickins latency_ration = LATENCY_LIMIT; 751048c27fdSHugh Dickins } 7527dfad418SHugh Dickins } 753ebebbbe9SHugh Dickins 754c60aa176SHugh Dickins offset = scan_base; 755ec8acf20SShaohua Li spin_lock(&si->lock); 756ebebbbe9SHugh Dickins si->cluster_nr = SWAPFILE_CLUSTER - 1; 7577dfad418SHugh Dickins } 7587dfad418SHugh Dickins 759ebebbbe9SHugh Dickins checks: 760ebc2a1a6SShaohua Li if (si->cluster_info) { 76136005baeSTim Chen while (scan_swap_map_ssd_cluster_conflict(si, offset)) { 76236005baeSTim Chen /* take a break if we already got some slots */ 76336005baeSTim Chen if (n_ret) 76436005baeSTim Chen goto done; 76536005baeSTim Chen if (!scan_swap_map_try_ssd_cluster(si, &offset, 76636005baeSTim Chen &scan_base)) 76736005baeSTim Chen goto scan; 76836005baeSTim Chen } 769ebc2a1a6SShaohua Li } 770ebebbbe9SHugh Dickins if (!(si->flags & SWP_WRITEOK)) 77152b7efdbSHugh Dickins goto no_page; 7727dfad418SHugh Dickins if (!si->highest_bit) 7737dfad418SHugh Dickins goto no_page; 774ebebbbe9SHugh Dickins if (offset > si->highest_bit) 775c60aa176SHugh Dickins scan_base = offset = si->lowest_bit; 776c9e44410SKAMEZAWA Hiroyuki 777235b6217SHuang, Ying ci = lock_cluster(si, offset); 778b73d7fceSHugh Dickins /* reuse swap entry of cache-only swap if not busy. */ 779b73d7fceSHugh Dickins if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 780c9e44410SKAMEZAWA Hiroyuki int swap_was_freed; 781235b6217SHuang, Ying unlock_cluster(ci); 782ec8acf20SShaohua Li spin_unlock(&si->lock); 783c9e44410SKAMEZAWA Hiroyuki swap_was_freed = __try_to_reclaim_swap(si, offset); 784ec8acf20SShaohua Li spin_lock(&si->lock); 785c9e44410SKAMEZAWA Hiroyuki /* entry was freed successfully, try to use this again */ 786c9e44410SKAMEZAWA Hiroyuki if (swap_was_freed) 787c9e44410SKAMEZAWA Hiroyuki goto checks; 788c9e44410SKAMEZAWA Hiroyuki goto scan; /* check next one */ 789c9e44410SKAMEZAWA Hiroyuki } 790c9e44410SKAMEZAWA Hiroyuki 791235b6217SHuang, Ying if (si->swap_map[offset]) { 792235b6217SHuang, Ying unlock_cluster(ci); 79336005baeSTim Chen if (!n_ret) 794ebebbbe9SHugh Dickins goto scan; 79536005baeSTim Chen else 79636005baeSTim Chen goto done; 797235b6217SHuang, Ying } 7982872bb2dSHuang Ying si->swap_map[offset] = usage; 7992872bb2dSHuang Ying inc_cluster_info_page(si, si->cluster_info, offset); 8002872bb2dSHuang Ying unlock_cluster(ci); 801ebebbbe9SHugh Dickins 80238d8b4e6SHuang Ying swap_range_alloc(si, offset, 1); 8031da177e4SLinus Torvalds si->cluster_next = offset + 1; 80436005baeSTim Chen slots[n_ret++] = swp_entry(si->type, offset); 8057992fde7SHugh Dickins 80636005baeSTim Chen /* got enough slots or reach max slots? */ 80736005baeSTim Chen if ((n_ret == nr) || (offset >= si->highest_bit)) 80836005baeSTim Chen goto done; 80936005baeSTim Chen 81036005baeSTim Chen /* search for next available slot */ 81136005baeSTim Chen 81236005baeSTim Chen /* time to take a break? */ 81336005baeSTim Chen if (unlikely(--latency_ration < 0)) { 81436005baeSTim Chen if (n_ret) 81536005baeSTim Chen goto done; 81636005baeSTim Chen spin_unlock(&si->lock); 81736005baeSTim Chen cond_resched(); 81836005baeSTim Chen spin_lock(&si->lock); 81936005baeSTim Chen latency_ration = LATENCY_LIMIT; 82036005baeSTim Chen } 82136005baeSTim Chen 82236005baeSTim Chen /* try to get more slots in cluster */ 82336005baeSTim Chen if (si->cluster_info) { 82436005baeSTim Chen if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) 82536005baeSTim Chen goto checks; 82636005baeSTim Chen else 82736005baeSTim Chen goto done; 82836005baeSTim Chen } 82936005baeSTim Chen /* non-ssd case */ 83036005baeSTim Chen ++offset; 83136005baeSTim Chen 83236005baeSTim Chen /* non-ssd case, still more slots in cluster? */ 83336005baeSTim Chen if (si->cluster_nr && !si->swap_map[offset]) { 83436005baeSTim Chen --si->cluster_nr; 83536005baeSTim Chen goto checks; 83636005baeSTim Chen } 83736005baeSTim Chen 83836005baeSTim Chen done: 83936005baeSTim Chen si->flags -= SWP_SCANNING; 84036005baeSTim Chen return n_ret; 8417dfad418SHugh Dickins 842ebebbbe9SHugh Dickins scan: 843ec8acf20SShaohua Li spin_unlock(&si->lock); 8447dfad418SHugh Dickins while (++offset <= si->highest_bit) { 84552b7efdbSHugh Dickins if (!si->swap_map[offset]) { 846ec8acf20SShaohua Li spin_lock(&si->lock); 84752b7efdbSHugh Dickins goto checks; 8487dfad418SHugh Dickins } 849c9e44410SKAMEZAWA Hiroyuki if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 850ec8acf20SShaohua Li spin_lock(&si->lock); 851c9e44410SKAMEZAWA Hiroyuki goto checks; 852c9e44410SKAMEZAWA Hiroyuki } 853048c27fdSHugh Dickins if (unlikely(--latency_ration < 0)) { 854048c27fdSHugh Dickins cond_resched(); 855048c27fdSHugh Dickins latency_ration = LATENCY_LIMIT; 856048c27fdSHugh Dickins } 85752b7efdbSHugh Dickins } 858c60aa176SHugh Dickins offset = si->lowest_bit; 859a5998061SJamie Liu while (offset < scan_base) { 860c60aa176SHugh Dickins if (!si->swap_map[offset]) { 861ec8acf20SShaohua Li spin_lock(&si->lock); 862ebebbbe9SHugh Dickins goto checks; 863c60aa176SHugh Dickins } 864c9e44410SKAMEZAWA Hiroyuki if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 865ec8acf20SShaohua Li spin_lock(&si->lock); 866c9e44410SKAMEZAWA Hiroyuki goto checks; 867c9e44410SKAMEZAWA Hiroyuki } 868c60aa176SHugh Dickins if (unlikely(--latency_ration < 0)) { 869c60aa176SHugh Dickins cond_resched(); 870c60aa176SHugh Dickins latency_ration = LATENCY_LIMIT; 871c60aa176SHugh Dickins } 872a5998061SJamie Liu offset++; 873c60aa176SHugh Dickins } 874ec8acf20SShaohua Li spin_lock(&si->lock); 8757dfad418SHugh Dickins 8767dfad418SHugh Dickins no_page: 87752b7efdbSHugh Dickins si->flags -= SWP_SCANNING; 87836005baeSTim Chen return n_ret; 8791da177e4SLinus Torvalds } 8801da177e4SLinus Torvalds 88138d8b4e6SHuang Ying static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) 88238d8b4e6SHuang Ying { 88338d8b4e6SHuang Ying unsigned long idx; 88438d8b4e6SHuang Ying struct swap_cluster_info *ci; 88538d8b4e6SHuang Ying unsigned long offset, i; 88638d8b4e6SHuang Ying unsigned char *map; 88738d8b4e6SHuang Ying 888fe5266d5SHuang Ying /* 889fe5266d5SHuang Ying * Should not even be attempting cluster allocations when huge 890fe5266d5SHuang Ying * page swap is disabled. Warn and fail the allocation. 891fe5266d5SHuang Ying */ 892fe5266d5SHuang Ying if (!IS_ENABLED(CONFIG_THP_SWAP)) { 893fe5266d5SHuang Ying VM_WARN_ON_ONCE(1); 894fe5266d5SHuang Ying return 0; 895fe5266d5SHuang Ying } 896fe5266d5SHuang Ying 89738d8b4e6SHuang Ying if (cluster_list_empty(&si->free_clusters)) 89838d8b4e6SHuang Ying return 0; 89938d8b4e6SHuang Ying 90038d8b4e6SHuang Ying idx = cluster_list_first(&si->free_clusters); 90138d8b4e6SHuang Ying offset = idx * SWAPFILE_CLUSTER; 90238d8b4e6SHuang Ying ci = lock_cluster(si, offset); 90338d8b4e6SHuang Ying alloc_cluster(si, idx); 904e0709829SHuang Ying cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); 90538d8b4e6SHuang Ying 90638d8b4e6SHuang Ying map = si->swap_map + offset; 90738d8b4e6SHuang Ying for (i = 0; i < SWAPFILE_CLUSTER; i++) 90838d8b4e6SHuang Ying map[i] = SWAP_HAS_CACHE; 90938d8b4e6SHuang Ying unlock_cluster(ci); 91038d8b4e6SHuang Ying swap_range_alloc(si, offset, SWAPFILE_CLUSTER); 91138d8b4e6SHuang Ying *slot = swp_entry(si->type, offset); 91238d8b4e6SHuang Ying 91338d8b4e6SHuang Ying return 1; 91438d8b4e6SHuang Ying } 91538d8b4e6SHuang Ying 91638d8b4e6SHuang Ying static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) 91738d8b4e6SHuang Ying { 91838d8b4e6SHuang Ying unsigned long offset = idx * SWAPFILE_CLUSTER; 91938d8b4e6SHuang Ying struct swap_cluster_info *ci; 92038d8b4e6SHuang Ying 92138d8b4e6SHuang Ying ci = lock_cluster(si, offset); 92238d8b4e6SHuang Ying cluster_set_count_flag(ci, 0, 0); 92338d8b4e6SHuang Ying free_cluster(si, idx); 92438d8b4e6SHuang Ying unlock_cluster(ci); 92538d8b4e6SHuang Ying swap_range_free(si, offset, SWAPFILE_CLUSTER); 92638d8b4e6SHuang Ying } 92738d8b4e6SHuang Ying 92836005baeSTim Chen static unsigned long scan_swap_map(struct swap_info_struct *si, 92936005baeSTim Chen unsigned char usage) 93036005baeSTim Chen { 93136005baeSTim Chen swp_entry_t entry; 93236005baeSTim Chen int n_ret; 93336005baeSTim Chen 93436005baeSTim Chen n_ret = scan_swap_map_slots(si, usage, 1, &entry); 93536005baeSTim Chen 93636005baeSTim Chen if (n_ret) 93736005baeSTim Chen return swp_offset(entry); 93836005baeSTim Chen else 93936005baeSTim Chen return 0; 94036005baeSTim Chen 94136005baeSTim Chen } 94236005baeSTim Chen 9435d5e8f19SHuang Ying int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) 9441da177e4SLinus Torvalds { 9455d5e8f19SHuang Ying unsigned long size = swap_entry_size(entry_size); 946adfab836SDan Streetman struct swap_info_struct *si, *next; 94736005baeSTim Chen long avail_pgs; 94836005baeSTim Chen int n_ret = 0; 949a2468cc9SAaron Lu int node; 9501da177e4SLinus Torvalds 95138d8b4e6SHuang Ying /* Only single cluster request supported */ 9525d5e8f19SHuang Ying WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); 95338d8b4e6SHuang Ying 9545d5e8f19SHuang Ying avail_pgs = atomic_long_read(&nr_swap_pages) / size; 95536005baeSTim Chen if (avail_pgs <= 0) 956fb4f88dcSHugh Dickins goto noswap; 95736005baeSTim Chen 95836005baeSTim Chen if (n_goal > SWAP_BATCH) 95936005baeSTim Chen n_goal = SWAP_BATCH; 96036005baeSTim Chen 96136005baeSTim Chen if (n_goal > avail_pgs) 96236005baeSTim Chen n_goal = avail_pgs; 96336005baeSTim Chen 9645d5e8f19SHuang Ying atomic_long_sub(n_goal * size, &nr_swap_pages); 9651da177e4SLinus Torvalds 96618ab4d4cSDan Streetman spin_lock(&swap_avail_lock); 96718ab4d4cSDan Streetman 96818ab4d4cSDan Streetman start_over: 969a2468cc9SAaron Lu node = numa_node_id(); 970a2468cc9SAaron Lu plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { 97118ab4d4cSDan Streetman /* requeue si to after same-priority siblings */ 972a2468cc9SAaron Lu plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); 97318ab4d4cSDan Streetman spin_unlock(&swap_avail_lock); 974ec8acf20SShaohua Li spin_lock(&si->lock); 975adfab836SDan Streetman if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { 97618ab4d4cSDan Streetman spin_lock(&swap_avail_lock); 977a2468cc9SAaron Lu if (plist_node_empty(&si->avail_lists[node])) { 978ec8acf20SShaohua Li spin_unlock(&si->lock); 97918ab4d4cSDan Streetman goto nextsi; 98018ab4d4cSDan Streetman } 98118ab4d4cSDan Streetman WARN(!si->highest_bit, 98218ab4d4cSDan Streetman "swap_info %d in list but !highest_bit\n", 98318ab4d4cSDan Streetman si->type); 98418ab4d4cSDan Streetman WARN(!(si->flags & SWP_WRITEOK), 98518ab4d4cSDan Streetman "swap_info %d in list but !SWP_WRITEOK\n", 98618ab4d4cSDan Streetman si->type); 987a2468cc9SAaron Lu __del_from_avail_list(si); 98818ab4d4cSDan Streetman spin_unlock(&si->lock); 98918ab4d4cSDan Streetman goto nextsi; 990ec8acf20SShaohua Li } 9915d5e8f19SHuang Ying if (size == SWAPFILE_CLUSTER) { 992f0eea189SHuang Ying if (!(si->flags & SWP_FILE)) 99338d8b4e6SHuang Ying n_ret = swap_alloc_cluster(si, swp_entries); 994f0eea189SHuang Ying } else 99536005baeSTim Chen n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, 99636005baeSTim Chen n_goal, swp_entries); 997ec8acf20SShaohua Li spin_unlock(&si->lock); 9985d5e8f19SHuang Ying if (n_ret || size == SWAPFILE_CLUSTER) 99936005baeSTim Chen goto check_out; 100018ab4d4cSDan Streetman pr_debug("scan_swap_map of si %d failed to find offset\n", 100118ab4d4cSDan Streetman si->type); 100236005baeSTim Chen 100318ab4d4cSDan Streetman spin_lock(&swap_avail_lock); 100418ab4d4cSDan Streetman nextsi: 1005adfab836SDan Streetman /* 1006adfab836SDan Streetman * if we got here, it's likely that si was almost full before, 1007adfab836SDan Streetman * and since scan_swap_map() can drop the si->lock, multiple 1008adfab836SDan Streetman * callers probably all tried to get a page from the same si 100918ab4d4cSDan Streetman * and it filled up before we could get one; or, the si filled 101018ab4d4cSDan Streetman * up between us dropping swap_avail_lock and taking si->lock. 101118ab4d4cSDan Streetman * Since we dropped the swap_avail_lock, the swap_avail_head 101218ab4d4cSDan Streetman * list may have been modified; so if next is still in the 101336005baeSTim Chen * swap_avail_head list then try it, otherwise start over 101436005baeSTim Chen * if we have not gotten any slots. 1015adfab836SDan Streetman */ 1016a2468cc9SAaron Lu if (plist_node_empty(&next->avail_lists[node])) 101718ab4d4cSDan Streetman goto start_over; 1018fb4f88dcSHugh Dickins } 1019fb4f88dcSHugh Dickins 102018ab4d4cSDan Streetman spin_unlock(&swap_avail_lock); 102118ab4d4cSDan Streetman 102236005baeSTim Chen check_out: 102336005baeSTim Chen if (n_ret < n_goal) 10245d5e8f19SHuang Ying atomic_long_add((long)(n_goal - n_ret) * size, 102538d8b4e6SHuang Ying &nr_swap_pages); 1026fb4f88dcSHugh Dickins noswap: 102736005baeSTim Chen return n_ret; 102836005baeSTim Chen } 102936005baeSTim Chen 10302de1a7e4SSeth Jennings /* The only caller of this function is now suspend routine */ 1031910321eaSHugh Dickins swp_entry_t get_swap_page_of_type(int type) 1032910321eaSHugh Dickins { 1033910321eaSHugh Dickins struct swap_info_struct *si; 1034910321eaSHugh Dickins pgoff_t offset; 1035910321eaSHugh Dickins 1036910321eaSHugh Dickins si = swap_info[type]; 1037ec8acf20SShaohua Li spin_lock(&si->lock); 1038910321eaSHugh Dickins if (si && (si->flags & SWP_WRITEOK)) { 1039ec8acf20SShaohua Li atomic_long_dec(&nr_swap_pages); 1040910321eaSHugh Dickins /* This is called for allocating swap entry, not cache */ 1041910321eaSHugh Dickins offset = scan_swap_map(si, 1); 1042910321eaSHugh Dickins if (offset) { 1043ec8acf20SShaohua Li spin_unlock(&si->lock); 1044910321eaSHugh Dickins return swp_entry(type, offset); 1045910321eaSHugh Dickins } 1046ec8acf20SShaohua Li atomic_long_inc(&nr_swap_pages); 1047910321eaSHugh Dickins } 1048ec8acf20SShaohua Li spin_unlock(&si->lock); 1049910321eaSHugh Dickins return (swp_entry_t) {0}; 1050910321eaSHugh Dickins } 1051910321eaSHugh Dickins 1052e8c26ab6STim Chen static struct swap_info_struct *__swap_info_get(swp_entry_t entry) 10531da177e4SLinus Torvalds { 10541da177e4SLinus Torvalds struct swap_info_struct *p; 10551da177e4SLinus Torvalds unsigned long offset, type; 10561da177e4SLinus Torvalds 10571da177e4SLinus Torvalds if (!entry.val) 10581da177e4SLinus Torvalds goto out; 10591da177e4SLinus Torvalds type = swp_type(entry); 10601da177e4SLinus Torvalds if (type >= nr_swapfiles) 10611da177e4SLinus Torvalds goto bad_nofile; 1062efa90a98SHugh Dickins p = swap_info[type]; 10631da177e4SLinus Torvalds if (!(p->flags & SWP_USED)) 10641da177e4SLinus Torvalds goto bad_device; 10651da177e4SLinus Torvalds offset = swp_offset(entry); 10661da177e4SLinus Torvalds if (offset >= p->max) 10671da177e4SLinus Torvalds goto bad_offset; 10681da177e4SLinus Torvalds return p; 10691da177e4SLinus Torvalds 10701da177e4SLinus Torvalds bad_offset: 10716a991fc7SHuang, Ying pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val); 10721da177e4SLinus Torvalds goto out; 10731da177e4SLinus Torvalds bad_device: 10746a991fc7SHuang, Ying pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val); 10751da177e4SLinus Torvalds goto out; 10761da177e4SLinus Torvalds bad_nofile: 10776a991fc7SHuang, Ying pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val); 10781da177e4SLinus Torvalds out: 10791da177e4SLinus Torvalds return NULL; 10801da177e4SLinus Torvalds } 10811da177e4SLinus Torvalds 1082e8c26ab6STim Chen static struct swap_info_struct *_swap_info_get(swp_entry_t entry) 1083e8c26ab6STim Chen { 1084e8c26ab6STim Chen struct swap_info_struct *p; 1085e8c26ab6STim Chen 1086e8c26ab6STim Chen p = __swap_info_get(entry); 1087e8c26ab6STim Chen if (!p) 1088e8c26ab6STim Chen goto out; 1089e8c26ab6STim Chen if (!p->swap_map[swp_offset(entry)]) 1090e8c26ab6STim Chen goto bad_free; 1091e8c26ab6STim Chen return p; 1092e8c26ab6STim Chen 1093e8c26ab6STim Chen bad_free: 1094e8c26ab6STim Chen pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val); 1095e8c26ab6STim Chen goto out; 1096e8c26ab6STim Chen out: 1097e8c26ab6STim Chen return NULL; 1098e8c26ab6STim Chen } 1099e8c26ab6STim Chen 1100235b6217SHuang, Ying static struct swap_info_struct *swap_info_get(swp_entry_t entry) 11011da177e4SLinus Torvalds { 1102235b6217SHuang, Ying struct swap_info_struct *p; 1103235b6217SHuang, Ying 1104235b6217SHuang, Ying p = _swap_info_get(entry); 1105235b6217SHuang, Ying if (p) 1106235b6217SHuang, Ying spin_lock(&p->lock); 1107235b6217SHuang, Ying return p; 1108235b6217SHuang, Ying } 1109235b6217SHuang, Ying 11107c00bafeSTim Chen static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry, 11117c00bafeSTim Chen struct swap_info_struct *q) 11127c00bafeSTim Chen { 11137c00bafeSTim Chen struct swap_info_struct *p; 11147c00bafeSTim Chen 11157c00bafeSTim Chen p = _swap_info_get(entry); 11167c00bafeSTim Chen 11177c00bafeSTim Chen if (p != q) { 11187c00bafeSTim Chen if (q != NULL) 11197c00bafeSTim Chen spin_unlock(&q->lock); 11207c00bafeSTim Chen if (p != NULL) 11217c00bafeSTim Chen spin_lock(&p->lock); 11227c00bafeSTim Chen } 11237c00bafeSTim Chen return p; 11247c00bafeSTim Chen } 11257c00bafeSTim Chen 1126*b32d5f32SHuang Ying static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, 1127*b32d5f32SHuang Ying unsigned long offset, 1128*b32d5f32SHuang Ying unsigned char usage) 1129235b6217SHuang, Ying { 11308d69aaeeSHugh Dickins unsigned char count; 11318d69aaeeSHugh Dickins unsigned char has_cache; 1132235b6217SHuang, Ying 1133355cfa73SKAMEZAWA Hiroyuki count = p->swap_map[offset]; 1134235b6217SHuang, Ying 1135253d553bSHugh Dickins has_cache = count & SWAP_HAS_CACHE; 1136253d553bSHugh Dickins count &= ~SWAP_HAS_CACHE; 1137253d553bSHugh Dickins 1138253d553bSHugh Dickins if (usage == SWAP_HAS_CACHE) { 1139253d553bSHugh Dickins VM_BUG_ON(!has_cache); 1140253d553bSHugh Dickins has_cache = 0; 1141aaa46865SHugh Dickins } else if (count == SWAP_MAP_SHMEM) { 1142aaa46865SHugh Dickins /* 1143aaa46865SHugh Dickins * Or we could insist on shmem.c using a special 1144aaa46865SHugh Dickins * swap_shmem_free() and free_shmem_swap_and_cache()... 1145aaa46865SHugh Dickins */ 1146aaa46865SHugh Dickins count = 0; 1147570a335bSHugh Dickins } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 1148570a335bSHugh Dickins if (count == COUNT_CONTINUED) { 1149570a335bSHugh Dickins if (swap_count_continued(p, offset, count)) 1150570a335bSHugh Dickins count = SWAP_MAP_MAX | COUNT_CONTINUED; 1151570a335bSHugh Dickins else 1152570a335bSHugh Dickins count = SWAP_MAP_MAX; 1153570a335bSHugh Dickins } else 1154253d553bSHugh Dickins count--; 1155570a335bSHugh Dickins } 1156253d553bSHugh Dickins 1157253d553bSHugh Dickins usage = count | has_cache; 11587c00bafeSTim Chen p->swap_map[offset] = usage ? : SWAP_HAS_CACHE; 1159253d553bSHugh Dickins 1160*b32d5f32SHuang Ying return usage; 1161*b32d5f32SHuang Ying } 1162*b32d5f32SHuang Ying 1163*b32d5f32SHuang Ying static unsigned char __swap_entry_free(struct swap_info_struct *p, 1164*b32d5f32SHuang Ying swp_entry_t entry, unsigned char usage) 1165*b32d5f32SHuang Ying { 1166*b32d5f32SHuang Ying struct swap_cluster_info *ci; 1167*b32d5f32SHuang Ying unsigned long offset = swp_offset(entry); 1168*b32d5f32SHuang Ying 1169*b32d5f32SHuang Ying ci = lock_cluster_or_swap_info(p, offset); 1170*b32d5f32SHuang Ying usage = __swap_entry_free_locked(p, offset, usage); 11717c00bafeSTim Chen unlock_cluster_or_swap_info(p, ci); 1172235b6217SHuang, Ying 11737c00bafeSTim Chen return usage; 11747c00bafeSTim Chen } 11757c00bafeSTim Chen 11767c00bafeSTim Chen static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) 11777c00bafeSTim Chen { 11787c00bafeSTim Chen struct swap_cluster_info *ci; 11797c00bafeSTim Chen unsigned long offset = swp_offset(entry); 11807c00bafeSTim Chen unsigned char count; 11817c00bafeSTim Chen 1182235b6217SHuang, Ying ci = lock_cluster(p, offset); 11837c00bafeSTim Chen count = p->swap_map[offset]; 11847c00bafeSTim Chen VM_BUG_ON(count != SWAP_HAS_CACHE); 11857c00bafeSTim Chen p->swap_map[offset] = 0; 11862a8f9449SShaohua Li dec_cluster_info_page(p, p->cluster_info, offset); 1187235b6217SHuang, Ying unlock_cluster(ci); 11887c00bafeSTim Chen 118938d8b4e6SHuang Ying mem_cgroup_uncharge_swap(entry, 1); 119038d8b4e6SHuang Ying swap_range_free(p, offset, 1); 11911da177e4SLinus Torvalds } 1192253d553bSHugh Dickins 11931da177e4SLinus Torvalds /* 11941da177e4SLinus Torvalds * Caller has made sure that the swap device corresponding to entry 11951da177e4SLinus Torvalds * is still around or has not been recycled. 11961da177e4SLinus Torvalds */ 11971da177e4SLinus Torvalds void swap_free(swp_entry_t entry) 11981da177e4SLinus Torvalds { 11991da177e4SLinus Torvalds struct swap_info_struct *p; 12001da177e4SLinus Torvalds 1201235b6217SHuang, Ying p = _swap_info_get(entry); 12027c00bafeSTim Chen if (p) { 12037c00bafeSTim Chen if (!__swap_entry_free(p, entry, 1)) 120467afa38eSTim Chen free_swap_slot(entry); 12057c00bafeSTim Chen } 12061da177e4SLinus Torvalds } 12071da177e4SLinus Torvalds 12081da177e4SLinus Torvalds /* 1209cb4b86baSKAMEZAWA Hiroyuki * Called after dropping swapcache to decrease refcnt to swap entries. 1210cb4b86baSKAMEZAWA Hiroyuki */ 1211a448f2d0SHuang Ying void put_swap_page(struct page *page, swp_entry_t entry) 121238d8b4e6SHuang Ying { 121338d8b4e6SHuang Ying unsigned long offset = swp_offset(entry); 121438d8b4e6SHuang Ying unsigned long idx = offset / SWAPFILE_CLUSTER; 121538d8b4e6SHuang Ying struct swap_cluster_info *ci; 121638d8b4e6SHuang Ying struct swap_info_struct *si; 121738d8b4e6SHuang Ying unsigned char *map; 1218a3aea839SHuang Ying unsigned int i, free_entries = 0; 1219a3aea839SHuang Ying unsigned char val; 1220a448f2d0SHuang Ying int size = swap_entry_size(hpage_nr_pages(page)); 1221fe5266d5SHuang Ying 1222a3aea839SHuang Ying si = _swap_info_get(entry); 122338d8b4e6SHuang Ying if (!si) 122438d8b4e6SHuang Ying return; 122538d8b4e6SHuang Ying 1226a448f2d0SHuang Ying if (size == SWAPFILE_CLUSTER) { 122738d8b4e6SHuang Ying ci = lock_cluster(si, offset); 1228e0709829SHuang Ying VM_BUG_ON(!cluster_is_huge(ci)); 122938d8b4e6SHuang Ying map = si->swap_map + offset; 123038d8b4e6SHuang Ying for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1231a3aea839SHuang Ying val = map[i]; 1232a3aea839SHuang Ying VM_BUG_ON(!(val & SWAP_HAS_CACHE)); 1233a3aea839SHuang Ying if (val == SWAP_HAS_CACHE) 1234a3aea839SHuang Ying free_entries++; 123538d8b4e6SHuang Ying } 1236a3aea839SHuang Ying if (!free_entries) { 1237a3aea839SHuang Ying for (i = 0; i < SWAPFILE_CLUSTER; i++) 1238a3aea839SHuang Ying map[i] &= ~SWAP_HAS_CACHE; 1239a3aea839SHuang Ying } 1240e0709829SHuang Ying cluster_clear_huge(ci); 1241a3aea839SHuang Ying unlock_cluster(ci); 1242a3aea839SHuang Ying if (free_entries == SWAPFILE_CLUSTER) { 1243a3aea839SHuang Ying spin_lock(&si->lock); 1244a3aea839SHuang Ying ci = lock_cluster(si, offset); 1245a3aea839SHuang Ying memset(map, 0, SWAPFILE_CLUSTER); 124638d8b4e6SHuang Ying unlock_cluster(ci); 124738d8b4e6SHuang Ying mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); 124838d8b4e6SHuang Ying swap_free_cluster(si, idx); 124938d8b4e6SHuang Ying spin_unlock(&si->lock); 1250a448f2d0SHuang Ying return; 1251a448f2d0SHuang Ying } 1252a448f2d0SHuang Ying } 1253a448f2d0SHuang Ying if (size == 1 || free_entries) { 1254a448f2d0SHuang Ying for (i = 0; i < size; i++, entry.val++) { 1255a3aea839SHuang Ying if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE)) 1256a3aea839SHuang Ying free_swap_slot(entry); 1257a3aea839SHuang Ying } 1258a3aea839SHuang Ying } 125938d8b4e6SHuang Ying } 126059807685SHuang Ying 1261fe5266d5SHuang Ying #ifdef CONFIG_THP_SWAP 126259807685SHuang Ying int split_swap_cluster(swp_entry_t entry) 126359807685SHuang Ying { 126459807685SHuang Ying struct swap_info_struct *si; 126559807685SHuang Ying struct swap_cluster_info *ci; 126659807685SHuang Ying unsigned long offset = swp_offset(entry); 126759807685SHuang Ying 126859807685SHuang Ying si = _swap_info_get(entry); 126959807685SHuang Ying if (!si) 127059807685SHuang Ying return -EBUSY; 127159807685SHuang Ying ci = lock_cluster(si, offset); 127259807685SHuang Ying cluster_clear_huge(ci); 127359807685SHuang Ying unlock_cluster(ci); 127459807685SHuang Ying return 0; 127559807685SHuang Ying } 1276fe5266d5SHuang Ying #endif 127738d8b4e6SHuang Ying 1278155b5f88SHuang Ying static int swp_entry_cmp(const void *ent1, const void *ent2) 1279155b5f88SHuang Ying { 1280155b5f88SHuang Ying const swp_entry_t *e1 = ent1, *e2 = ent2; 1281155b5f88SHuang Ying 1282155b5f88SHuang Ying return (int)swp_type(*e1) - (int)swp_type(*e2); 1283155b5f88SHuang Ying } 1284155b5f88SHuang Ying 12857c00bafeSTim Chen void swapcache_free_entries(swp_entry_t *entries, int n) 12867c00bafeSTim Chen { 12877c00bafeSTim Chen struct swap_info_struct *p, *prev; 12887c00bafeSTim Chen int i; 12897c00bafeSTim Chen 12907c00bafeSTim Chen if (n <= 0) 12917c00bafeSTim Chen return; 12927c00bafeSTim Chen 12937c00bafeSTim Chen prev = NULL; 12947c00bafeSTim Chen p = NULL; 1295155b5f88SHuang Ying 1296155b5f88SHuang Ying /* 1297155b5f88SHuang Ying * Sort swap entries by swap device, so each lock is only taken once. 1298155b5f88SHuang Ying * nr_swapfiles isn't absolutely correct, but the overhead of sort() is 1299155b5f88SHuang Ying * so low that it isn't necessary to optimize further. 1300155b5f88SHuang Ying */ 1301155b5f88SHuang Ying if (nr_swapfiles > 1) 1302155b5f88SHuang Ying sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); 13037c00bafeSTim Chen for (i = 0; i < n; ++i) { 13047c00bafeSTim Chen p = swap_info_get_cont(entries[i], prev); 1305235b6217SHuang, Ying if (p) 13067c00bafeSTim Chen swap_entry_free(p, entries[i]); 13077c00bafeSTim Chen prev = p; 13087c00bafeSTim Chen } 13097c00bafeSTim Chen if (p) 13107c00bafeSTim Chen spin_unlock(&p->lock); 1311cb4b86baSKAMEZAWA Hiroyuki } 1312cb4b86baSKAMEZAWA Hiroyuki 1313cb4b86baSKAMEZAWA Hiroyuki /* 1314c475a8abSHugh Dickins * How many references to page are currently swapped out? 1315570a335bSHugh Dickins * This does not give an exact answer when swap count is continued, 1316570a335bSHugh Dickins * but does include the high COUNT_CONTINUED flag to allow for that. 13171da177e4SLinus Torvalds */ 1318bde05d1cSHugh Dickins int page_swapcount(struct page *page) 13191da177e4SLinus Torvalds { 1320c475a8abSHugh Dickins int count = 0; 13211da177e4SLinus Torvalds struct swap_info_struct *p; 1322235b6217SHuang, Ying struct swap_cluster_info *ci; 13231da177e4SLinus Torvalds swp_entry_t entry; 1324235b6217SHuang, Ying unsigned long offset; 13251da177e4SLinus Torvalds 13264c21e2f2SHugh Dickins entry.val = page_private(page); 1327235b6217SHuang, Ying p = _swap_info_get(entry); 13281da177e4SLinus Torvalds if (p) { 1329235b6217SHuang, Ying offset = swp_offset(entry); 1330235b6217SHuang, Ying ci = lock_cluster_or_swap_info(p, offset); 1331235b6217SHuang, Ying count = swap_count(p->swap_map[offset]); 1332235b6217SHuang, Ying unlock_cluster_or_swap_info(p, ci); 13331da177e4SLinus Torvalds } 1334c475a8abSHugh Dickins return count; 13351da177e4SLinus Torvalds } 13361da177e4SLinus Torvalds 1337aa8d22a1SMinchan Kim int __swap_count(struct swap_info_struct *si, swp_entry_t entry) 1338aa8d22a1SMinchan Kim { 1339aa8d22a1SMinchan Kim pgoff_t offset = swp_offset(entry); 1340aa8d22a1SMinchan Kim 1341aa8d22a1SMinchan Kim return swap_count(si->swap_map[offset]); 1342aa8d22a1SMinchan Kim } 1343aa8d22a1SMinchan Kim 1344322b8afeSHuang Ying static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) 1345322b8afeSHuang Ying { 1346322b8afeSHuang Ying int count = 0; 1347322b8afeSHuang Ying pgoff_t offset = swp_offset(entry); 1348322b8afeSHuang Ying struct swap_cluster_info *ci; 1349322b8afeSHuang Ying 1350322b8afeSHuang Ying ci = lock_cluster_or_swap_info(si, offset); 1351322b8afeSHuang Ying count = swap_count(si->swap_map[offset]); 1352322b8afeSHuang Ying unlock_cluster_or_swap_info(si, ci); 1353322b8afeSHuang Ying return count; 1354322b8afeSHuang Ying } 1355322b8afeSHuang Ying 13561da177e4SLinus Torvalds /* 13578334b962SMinchan Kim * How many references to @entry are currently swapped out? 1358e8c26ab6STim Chen * This does not give an exact answer when swap count is continued, 1359e8c26ab6STim Chen * but does include the high COUNT_CONTINUED flag to allow for that. 1360e8c26ab6STim Chen */ 1361e8c26ab6STim Chen int __swp_swapcount(swp_entry_t entry) 1362e8c26ab6STim Chen { 1363e8c26ab6STim Chen int count = 0; 1364e8c26ab6STim Chen struct swap_info_struct *si; 1365e8c26ab6STim Chen 1366e8c26ab6STim Chen si = __swap_info_get(entry); 1367322b8afeSHuang Ying if (si) 1368322b8afeSHuang Ying count = swap_swapcount(si, entry); 1369e8c26ab6STim Chen return count; 1370e8c26ab6STim Chen } 1371e8c26ab6STim Chen 1372e8c26ab6STim Chen /* 1373e8c26ab6STim Chen * How many references to @entry are currently swapped out? 13748334b962SMinchan Kim * This considers COUNT_CONTINUED so it returns exact answer. 13758334b962SMinchan Kim */ 13768334b962SMinchan Kim int swp_swapcount(swp_entry_t entry) 13778334b962SMinchan Kim { 13788334b962SMinchan Kim int count, tmp_count, n; 13798334b962SMinchan Kim struct swap_info_struct *p; 1380235b6217SHuang, Ying struct swap_cluster_info *ci; 13818334b962SMinchan Kim struct page *page; 13828334b962SMinchan Kim pgoff_t offset; 13838334b962SMinchan Kim unsigned char *map; 13848334b962SMinchan Kim 1385235b6217SHuang, Ying p = _swap_info_get(entry); 13868334b962SMinchan Kim if (!p) 13878334b962SMinchan Kim return 0; 13888334b962SMinchan Kim 1389235b6217SHuang, Ying offset = swp_offset(entry); 1390235b6217SHuang, Ying 1391235b6217SHuang, Ying ci = lock_cluster_or_swap_info(p, offset); 1392235b6217SHuang, Ying 1393235b6217SHuang, Ying count = swap_count(p->swap_map[offset]); 13948334b962SMinchan Kim if (!(count & COUNT_CONTINUED)) 13958334b962SMinchan Kim goto out; 13968334b962SMinchan Kim 13978334b962SMinchan Kim count &= ~COUNT_CONTINUED; 13988334b962SMinchan Kim n = SWAP_MAP_MAX + 1; 13998334b962SMinchan Kim 14008334b962SMinchan Kim page = vmalloc_to_page(p->swap_map + offset); 14018334b962SMinchan Kim offset &= ~PAGE_MASK; 14028334b962SMinchan Kim VM_BUG_ON(page_private(page) != SWP_CONTINUED); 14038334b962SMinchan Kim 14048334b962SMinchan Kim do { 1405a8ae4991SGeliang Tang page = list_next_entry(page, lru); 14068334b962SMinchan Kim map = kmap_atomic(page); 14078334b962SMinchan Kim tmp_count = map[offset]; 14088334b962SMinchan Kim kunmap_atomic(map); 14098334b962SMinchan Kim 14108334b962SMinchan Kim count += (tmp_count & ~COUNT_CONTINUED) * n; 14118334b962SMinchan Kim n *= (SWAP_CONT_MAX + 1); 14128334b962SMinchan Kim } while (tmp_count & COUNT_CONTINUED); 14138334b962SMinchan Kim out: 1414235b6217SHuang, Ying unlock_cluster_or_swap_info(p, ci); 14158334b962SMinchan Kim return count; 14168334b962SMinchan Kim } 14178334b962SMinchan Kim 1418e0709829SHuang Ying static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, 1419e0709829SHuang Ying swp_entry_t entry) 1420e0709829SHuang Ying { 1421e0709829SHuang Ying struct swap_cluster_info *ci; 1422e0709829SHuang Ying unsigned char *map = si->swap_map; 1423e0709829SHuang Ying unsigned long roffset = swp_offset(entry); 1424e0709829SHuang Ying unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); 1425e0709829SHuang Ying int i; 1426e0709829SHuang Ying bool ret = false; 1427e0709829SHuang Ying 1428e0709829SHuang Ying ci = lock_cluster_or_swap_info(si, offset); 1429e0709829SHuang Ying if (!ci || !cluster_is_huge(ci)) { 1430afa4711eSHuang Ying if (swap_count(map[roffset])) 1431e0709829SHuang Ying ret = true; 1432e0709829SHuang Ying goto unlock_out; 1433e0709829SHuang Ying } 1434e0709829SHuang Ying for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1435afa4711eSHuang Ying if (swap_count(map[offset + i])) { 1436e0709829SHuang Ying ret = true; 1437e0709829SHuang Ying break; 1438e0709829SHuang Ying } 1439e0709829SHuang Ying } 1440e0709829SHuang Ying unlock_out: 1441e0709829SHuang Ying unlock_cluster_or_swap_info(si, ci); 1442e0709829SHuang Ying return ret; 1443e0709829SHuang Ying } 1444e0709829SHuang Ying 1445e0709829SHuang Ying static bool page_swapped(struct page *page) 1446e0709829SHuang Ying { 1447e0709829SHuang Ying swp_entry_t entry; 1448e0709829SHuang Ying struct swap_info_struct *si; 1449e0709829SHuang Ying 1450fe5266d5SHuang Ying if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) 1451e0709829SHuang Ying return page_swapcount(page) != 0; 1452e0709829SHuang Ying 1453e0709829SHuang Ying page = compound_head(page); 1454e0709829SHuang Ying entry.val = page_private(page); 1455e0709829SHuang Ying si = _swap_info_get(entry); 1456e0709829SHuang Ying if (si) 1457e0709829SHuang Ying return swap_page_trans_huge_swapped(si, entry); 1458e0709829SHuang Ying return false; 1459e0709829SHuang Ying } 1460ba3c4ce6SHuang Ying 1461ba3c4ce6SHuang Ying static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount, 1462ba3c4ce6SHuang Ying int *total_swapcount) 1463ba3c4ce6SHuang Ying { 1464ba3c4ce6SHuang Ying int i, map_swapcount, _total_mapcount, _total_swapcount; 1465ba3c4ce6SHuang Ying unsigned long offset = 0; 1466ba3c4ce6SHuang Ying struct swap_info_struct *si; 1467ba3c4ce6SHuang Ying struct swap_cluster_info *ci = NULL; 1468ba3c4ce6SHuang Ying unsigned char *map = NULL; 1469ba3c4ce6SHuang Ying int mapcount, swapcount = 0; 1470ba3c4ce6SHuang Ying 1471ba3c4ce6SHuang Ying /* hugetlbfs shouldn't call it */ 1472ba3c4ce6SHuang Ying VM_BUG_ON_PAGE(PageHuge(page), page); 1473ba3c4ce6SHuang Ying 1474fe5266d5SHuang Ying if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) { 1475fe5266d5SHuang Ying mapcount = page_trans_huge_mapcount(page, total_mapcount); 1476ba3c4ce6SHuang Ying if (PageSwapCache(page)) 1477ba3c4ce6SHuang Ying swapcount = page_swapcount(page); 1478ba3c4ce6SHuang Ying if (total_swapcount) 1479ba3c4ce6SHuang Ying *total_swapcount = swapcount; 1480ba3c4ce6SHuang Ying return mapcount + swapcount; 1481ba3c4ce6SHuang Ying } 1482ba3c4ce6SHuang Ying 1483ba3c4ce6SHuang Ying page = compound_head(page); 1484ba3c4ce6SHuang Ying 1485ba3c4ce6SHuang Ying _total_mapcount = _total_swapcount = map_swapcount = 0; 1486ba3c4ce6SHuang Ying if (PageSwapCache(page)) { 1487ba3c4ce6SHuang Ying swp_entry_t entry; 1488ba3c4ce6SHuang Ying 1489ba3c4ce6SHuang Ying entry.val = page_private(page); 1490ba3c4ce6SHuang Ying si = _swap_info_get(entry); 1491ba3c4ce6SHuang Ying if (si) { 1492ba3c4ce6SHuang Ying map = si->swap_map; 1493ba3c4ce6SHuang Ying offset = swp_offset(entry); 1494ba3c4ce6SHuang Ying } 1495ba3c4ce6SHuang Ying } 1496ba3c4ce6SHuang Ying if (map) 1497ba3c4ce6SHuang Ying ci = lock_cluster(si, offset); 1498ba3c4ce6SHuang Ying for (i = 0; i < HPAGE_PMD_NR; i++) { 1499ba3c4ce6SHuang Ying mapcount = atomic_read(&page[i]._mapcount) + 1; 1500ba3c4ce6SHuang Ying _total_mapcount += mapcount; 1501ba3c4ce6SHuang Ying if (map) { 1502ba3c4ce6SHuang Ying swapcount = swap_count(map[offset + i]); 1503ba3c4ce6SHuang Ying _total_swapcount += swapcount; 1504ba3c4ce6SHuang Ying } 1505ba3c4ce6SHuang Ying map_swapcount = max(map_swapcount, mapcount + swapcount); 1506ba3c4ce6SHuang Ying } 1507ba3c4ce6SHuang Ying unlock_cluster(ci); 1508ba3c4ce6SHuang Ying if (PageDoubleMap(page)) { 1509ba3c4ce6SHuang Ying map_swapcount -= 1; 1510ba3c4ce6SHuang Ying _total_mapcount -= HPAGE_PMD_NR; 1511ba3c4ce6SHuang Ying } 1512ba3c4ce6SHuang Ying mapcount = compound_mapcount(page); 1513ba3c4ce6SHuang Ying map_swapcount += mapcount; 1514ba3c4ce6SHuang Ying _total_mapcount += mapcount; 1515ba3c4ce6SHuang Ying if (total_mapcount) 1516ba3c4ce6SHuang Ying *total_mapcount = _total_mapcount; 1517ba3c4ce6SHuang Ying if (total_swapcount) 1518ba3c4ce6SHuang Ying *total_swapcount = _total_swapcount; 1519ba3c4ce6SHuang Ying 1520ba3c4ce6SHuang Ying return map_swapcount; 1521ba3c4ce6SHuang Ying } 1522e0709829SHuang Ying 15238334b962SMinchan Kim /* 15247b1fe597SHugh Dickins * We can write to an anon page without COW if there are no other references 15257b1fe597SHugh Dickins * to it. And as a side-effect, free up its swap: because the old content 15267b1fe597SHugh Dickins * on disk will never be read, and seeking back there to write new content 15277b1fe597SHugh Dickins * later would only waste time away from clustering. 15286d0a07edSAndrea Arcangeli * 1529ba3c4ce6SHuang Ying * NOTE: total_map_swapcount should not be relied upon by the caller if 15306d0a07edSAndrea Arcangeli * reuse_swap_page() returns false, but it may be always overwritten 15316d0a07edSAndrea Arcangeli * (see the other implementation for CONFIG_SWAP=n). 15321da177e4SLinus Torvalds */ 1533ba3c4ce6SHuang Ying bool reuse_swap_page(struct page *page, int *total_map_swapcount) 15341da177e4SLinus Torvalds { 1535ba3c4ce6SHuang Ying int count, total_mapcount, total_swapcount; 15361da177e4SLinus Torvalds 1537309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 15385ad64688SHugh Dickins if (unlikely(PageKsm(page))) 15396d0a07edSAndrea Arcangeli return false; 1540ba3c4ce6SHuang Ying count = page_trans_huge_map_swapcount(page, &total_mapcount, 1541ba3c4ce6SHuang Ying &total_swapcount); 1542ba3c4ce6SHuang Ying if (total_map_swapcount) 1543ba3c4ce6SHuang Ying *total_map_swapcount = total_mapcount + total_swapcount; 1544ba3c4ce6SHuang Ying if (count == 1 && PageSwapCache(page) && 1545ba3c4ce6SHuang Ying (likely(!PageTransCompound(page)) || 1546ba3c4ce6SHuang Ying /* The remaining swap count will be freed soon */ 1547ba3c4ce6SHuang Ying total_swapcount == page_swapcount(page))) { 1548f0571429SMinchan Kim if (!PageWriteback(page)) { 1549ba3c4ce6SHuang Ying page = compound_head(page); 15507b1fe597SHugh Dickins delete_from_swap_cache(page); 15517b1fe597SHugh Dickins SetPageDirty(page); 1552f0571429SMinchan Kim } else { 1553f0571429SMinchan Kim swp_entry_t entry; 1554f0571429SMinchan Kim struct swap_info_struct *p; 1555f0571429SMinchan Kim 1556f0571429SMinchan Kim entry.val = page_private(page); 1557f0571429SMinchan Kim p = swap_info_get(entry); 1558f0571429SMinchan Kim if (p->flags & SWP_STABLE_WRITES) { 1559f0571429SMinchan Kim spin_unlock(&p->lock); 1560f0571429SMinchan Kim return false; 1561f0571429SMinchan Kim } 1562f0571429SMinchan Kim spin_unlock(&p->lock); 15637b1fe597SHugh Dickins } 15647b1fe597SHugh Dickins } 1565ba3c4ce6SHuang Ying 15665ad64688SHugh Dickins return count <= 1; 15671da177e4SLinus Torvalds } 15681da177e4SLinus Torvalds 15691da177e4SLinus Torvalds /* 1570a2c43eedSHugh Dickins * If swap is getting full, or if there are no more mappings of this page, 1571a2c43eedSHugh Dickins * then try_to_free_swap is called to free its swap space. 15721da177e4SLinus Torvalds */ 1573a2c43eedSHugh Dickins int try_to_free_swap(struct page *page) 15741da177e4SLinus Torvalds { 1575309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 15761da177e4SLinus Torvalds 15771da177e4SLinus Torvalds if (!PageSwapCache(page)) 15781da177e4SLinus Torvalds return 0; 15791da177e4SLinus Torvalds if (PageWriteback(page)) 15801da177e4SLinus Torvalds return 0; 1581e0709829SHuang Ying if (page_swapped(page)) 15821da177e4SLinus Torvalds return 0; 15831da177e4SLinus Torvalds 1584b73d7fceSHugh Dickins /* 1585b73d7fceSHugh Dickins * Once hibernation has begun to create its image of memory, 1586b73d7fceSHugh Dickins * there's a danger that one of the calls to try_to_free_swap() 1587b73d7fceSHugh Dickins * - most probably a call from __try_to_reclaim_swap() while 1588b73d7fceSHugh Dickins * hibernation is allocating its own swap pages for the image, 1589b73d7fceSHugh Dickins * but conceivably even a call from memory reclaim - will free 1590b73d7fceSHugh Dickins * the swap from a page which has already been recorded in the 1591b73d7fceSHugh Dickins * image as a clean swapcache page, and then reuse its swap for 1592b73d7fceSHugh Dickins * another page of the image. On waking from hibernation, the 1593b73d7fceSHugh Dickins * original page might be freed under memory pressure, then 1594b73d7fceSHugh Dickins * later read back in from swap, now with the wrong data. 1595b73d7fceSHugh Dickins * 15962de1a7e4SSeth Jennings * Hibernation suspends storage while it is writing the image 1597f90ac398SMel Gorman * to disk so check that here. 1598b73d7fceSHugh Dickins */ 1599f90ac398SMel Gorman if (pm_suspended_storage()) 1600b73d7fceSHugh Dickins return 0; 1601b73d7fceSHugh Dickins 1602e0709829SHuang Ying page = compound_head(page); 1603a2c43eedSHugh Dickins delete_from_swap_cache(page); 16041da177e4SLinus Torvalds SetPageDirty(page); 1605a2c43eedSHugh Dickins return 1; 160668a22394SRik van Riel } 160768a22394SRik van Riel 160868a22394SRik van Riel /* 16091da177e4SLinus Torvalds * Free the swap entry like above, but also try to 16101da177e4SLinus Torvalds * free the page cache entry if it is the last user. 16111da177e4SLinus Torvalds */ 16122509ef26SHugh Dickins int free_swap_and_cache(swp_entry_t entry) 16131da177e4SLinus Torvalds { 16141da177e4SLinus Torvalds struct swap_info_struct *p; 16151da177e4SLinus Torvalds struct page *page = NULL; 16167c00bafeSTim Chen unsigned char count; 16171da177e4SLinus Torvalds 1618a7420aa5SAndi Kleen if (non_swap_entry(entry)) 16192509ef26SHugh Dickins return 1; 16200697212aSChristoph Lameter 16217c00bafeSTim Chen p = _swap_info_get(entry); 16221da177e4SLinus Torvalds if (p) { 16237c00bafeSTim Chen count = __swap_entry_free(p, entry, 1); 1624e0709829SHuang Ying if (count == SWAP_HAS_CACHE && 1625e0709829SHuang Ying !swap_page_trans_huge_swapped(p, entry)) { 162633806f06SShaohua Li page = find_get_page(swap_address_space(entry), 1627f6ab1f7fSHuang Ying swp_offset(entry)); 16288413ac9dSNick Piggin if (page && !trylock_page(page)) { 162909cbfeafSKirill A. Shutemov put_page(page); 163093fac704SNick Piggin page = NULL; 163193fac704SNick Piggin } 16327c00bafeSTim Chen } else if (!count) 163367afa38eSTim Chen free_swap_slot(entry); 16341da177e4SLinus Torvalds } 16351da177e4SLinus Torvalds if (page) { 1636a2c43eedSHugh Dickins /* 1637a2c43eedSHugh Dickins * Not mapped elsewhere, or swap space full? Free it! 1638a2c43eedSHugh Dickins * Also recheck PageSwapCache now page is locked (above). 1639a2c43eedSHugh Dickins */ 164093fac704SNick Piggin if (PageSwapCache(page) && !PageWriteback(page) && 1641322b8afeSHuang Ying (!page_mapped(page) || mem_cgroup_swap_full(page)) && 1642e0709829SHuang Ying !swap_page_trans_huge_swapped(p, entry)) { 1643e0709829SHuang Ying page = compound_head(page); 16441da177e4SLinus Torvalds delete_from_swap_cache(page); 16451da177e4SLinus Torvalds SetPageDirty(page); 16461da177e4SLinus Torvalds } 16471da177e4SLinus Torvalds unlock_page(page); 164809cbfeafSKirill A. Shutemov put_page(page); 16491da177e4SLinus Torvalds } 16502509ef26SHugh Dickins return p != NULL; 16511da177e4SLinus Torvalds } 16521da177e4SLinus Torvalds 1653b0cb1a19SRafael J. Wysocki #ifdef CONFIG_HIBERNATION 1654f577eb30SRafael J. Wysocki /* 1655915bae9eSRafael J. Wysocki * Find the swap type that corresponds to given device (if any). 1656f577eb30SRafael J. Wysocki * 1657915bae9eSRafael J. Wysocki * @offset - number of the PAGE_SIZE-sized block of the device, starting 1658915bae9eSRafael J. Wysocki * from 0, in which the swap header is expected to be located. 1659915bae9eSRafael J. Wysocki * 1660915bae9eSRafael J. Wysocki * This is needed for the suspend to disk (aka swsusp). 1661f577eb30SRafael J. Wysocki */ 16627bf23687SRafael J. Wysocki int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 1663f577eb30SRafael J. Wysocki { 1664915bae9eSRafael J. Wysocki struct block_device *bdev = NULL; 1665efa90a98SHugh Dickins int type; 1666f577eb30SRafael J. Wysocki 1667915bae9eSRafael J. Wysocki if (device) 1668915bae9eSRafael J. Wysocki bdev = bdget(device); 1669915bae9eSRafael J. Wysocki 1670f577eb30SRafael J. Wysocki spin_lock(&swap_lock); 1671efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 1672efa90a98SHugh Dickins struct swap_info_struct *sis = swap_info[type]; 1673f577eb30SRafael J. Wysocki 1674915bae9eSRafael J. Wysocki if (!(sis->flags & SWP_WRITEOK)) 1675f577eb30SRafael J. Wysocki continue; 1676b6b5bce3SRafael J. Wysocki 1677915bae9eSRafael J. Wysocki if (!bdev) { 16787bf23687SRafael J. Wysocki if (bdev_p) 1679dddac6a7SAlan Jenkins *bdev_p = bdgrab(sis->bdev); 16807bf23687SRafael J. Wysocki 16816e1819d6SRafael J. Wysocki spin_unlock(&swap_lock); 1682efa90a98SHugh Dickins return type; 16836e1819d6SRafael J. Wysocki } 1684915bae9eSRafael J. Wysocki if (bdev == sis->bdev) { 16859625a5f2SHugh Dickins struct swap_extent *se = &sis->first_swap_extent; 1686915bae9eSRafael J. Wysocki 1687915bae9eSRafael J. Wysocki if (se->start_block == offset) { 16887bf23687SRafael J. Wysocki if (bdev_p) 1689dddac6a7SAlan Jenkins *bdev_p = bdgrab(sis->bdev); 16907bf23687SRafael J. Wysocki 1691f577eb30SRafael J. Wysocki spin_unlock(&swap_lock); 1692915bae9eSRafael J. Wysocki bdput(bdev); 1693efa90a98SHugh Dickins return type; 1694f577eb30SRafael J. Wysocki } 1695f577eb30SRafael J. Wysocki } 1696915bae9eSRafael J. Wysocki } 1697f577eb30SRafael J. Wysocki spin_unlock(&swap_lock); 1698915bae9eSRafael J. Wysocki if (bdev) 1699915bae9eSRafael J. Wysocki bdput(bdev); 1700915bae9eSRafael J. Wysocki 1701f577eb30SRafael J. Wysocki return -ENODEV; 1702f577eb30SRafael J. Wysocki } 1703f577eb30SRafael J. Wysocki 1704f577eb30SRafael J. Wysocki /* 170573c34b6aSHugh Dickins * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 170673c34b6aSHugh Dickins * corresponding to given index in swap_info (swap type). 170773c34b6aSHugh Dickins */ 170873c34b6aSHugh Dickins sector_t swapdev_block(int type, pgoff_t offset) 170973c34b6aSHugh Dickins { 171073c34b6aSHugh Dickins struct block_device *bdev; 171173c34b6aSHugh Dickins 171273c34b6aSHugh Dickins if ((unsigned int)type >= nr_swapfiles) 171373c34b6aSHugh Dickins return 0; 171473c34b6aSHugh Dickins if (!(swap_info[type]->flags & SWP_WRITEOK)) 171573c34b6aSHugh Dickins return 0; 1716d4906e1aSLee Schermerhorn return map_swap_entry(swp_entry(type, offset), &bdev); 171773c34b6aSHugh Dickins } 171873c34b6aSHugh Dickins 171973c34b6aSHugh Dickins /* 1720f577eb30SRafael J. Wysocki * Return either the total number of swap pages of given type, or the number 1721f577eb30SRafael J. Wysocki * of free pages of that type (depending on @free) 1722f577eb30SRafael J. Wysocki * 1723f577eb30SRafael J. Wysocki * This is needed for software suspend 1724f577eb30SRafael J. Wysocki */ 1725f577eb30SRafael J. Wysocki unsigned int count_swap_pages(int type, int free) 1726f577eb30SRafael J. Wysocki { 1727f577eb30SRafael J. Wysocki unsigned int n = 0; 1728f577eb30SRafael J. Wysocki 1729f577eb30SRafael J. Wysocki spin_lock(&swap_lock); 1730efa90a98SHugh Dickins if ((unsigned int)type < nr_swapfiles) { 1731efa90a98SHugh Dickins struct swap_info_struct *sis = swap_info[type]; 1732efa90a98SHugh Dickins 1733ec8acf20SShaohua Li spin_lock(&sis->lock); 1734efa90a98SHugh Dickins if (sis->flags & SWP_WRITEOK) { 1735efa90a98SHugh Dickins n = sis->pages; 1736f577eb30SRafael J. Wysocki if (free) 1737efa90a98SHugh Dickins n -= sis->inuse_pages; 1738efa90a98SHugh Dickins } 1739ec8acf20SShaohua Li spin_unlock(&sis->lock); 1740f577eb30SRafael J. Wysocki } 1741f577eb30SRafael J. Wysocki spin_unlock(&swap_lock); 1742f577eb30SRafael J. Wysocki return n; 1743f577eb30SRafael J. Wysocki } 174473c34b6aSHugh Dickins #endif /* CONFIG_HIBERNATION */ 1745f577eb30SRafael J. Wysocki 17469f8bdb3fSHugh Dickins static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) 1747179ef71cSCyrill Gorcunov { 17489f8bdb3fSHugh Dickins return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte); 1749179ef71cSCyrill Gorcunov } 1750179ef71cSCyrill Gorcunov 17511da177e4SLinus Torvalds /* 175272866f6fSHugh Dickins * No need to decide whether this PTE shares the swap entry with others, 175372866f6fSHugh Dickins * just let do_wp_page work it out if a write is requested later - to 175472866f6fSHugh Dickins * force COW, vm_page_prot omits write permission from any private vma. 17551da177e4SLinus Torvalds */ 1756044d66c1SHugh Dickins static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 17571da177e4SLinus Torvalds unsigned long addr, swp_entry_t entry, struct page *page) 17581da177e4SLinus Torvalds { 17599e16b7fbSHugh Dickins struct page *swapcache; 176072835c86SJohannes Weiner struct mem_cgroup *memcg; 1761044d66c1SHugh Dickins spinlock_t *ptl; 1762044d66c1SHugh Dickins pte_t *pte; 1763044d66c1SHugh Dickins int ret = 1; 1764044d66c1SHugh Dickins 17659e16b7fbSHugh Dickins swapcache = page; 17669e16b7fbSHugh Dickins page = ksm_might_need_to_copy(page, vma, addr); 17679e16b7fbSHugh Dickins if (unlikely(!page)) 17689e16b7fbSHugh Dickins return -ENOMEM; 17699e16b7fbSHugh Dickins 1770f627c2f5SKirill A. Shutemov if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, 1771f627c2f5SKirill A. Shutemov &memcg, false)) { 1772044d66c1SHugh Dickins ret = -ENOMEM; 177385d9fc89SKAMEZAWA Hiroyuki goto out_nolock; 177485d9fc89SKAMEZAWA Hiroyuki } 1775044d66c1SHugh Dickins 1776044d66c1SHugh Dickins pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 17779f8bdb3fSHugh Dickins if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { 1778f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 1779044d66c1SHugh Dickins ret = 0; 1780044d66c1SHugh Dickins goto out; 1781044d66c1SHugh Dickins } 17828a9f3ccdSBalbir Singh 1783b084d435SKAMEZAWA Hiroyuki dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 1784d559db08SKAMEZAWA Hiroyuki inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 17851da177e4SLinus Torvalds get_page(page); 17861da177e4SLinus Torvalds set_pte_at(vma->vm_mm, addr, pte, 17871da177e4SLinus Torvalds pte_mkold(mk_pte(page, vma->vm_page_prot))); 178800501b53SJohannes Weiner if (page == swapcache) { 1789d281ee61SKirill A. Shutemov page_add_anon_rmap(page, vma, addr, false); 1790f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 179100501b53SJohannes Weiner } else { /* ksm created a completely new copy */ 1792d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, vma, addr, false); 1793f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 179400501b53SJohannes Weiner lru_cache_add_active_or_unevictable(page, vma); 179500501b53SJohannes Weiner } 17961da177e4SLinus Torvalds swap_free(entry); 17971da177e4SLinus Torvalds /* 17981da177e4SLinus Torvalds * Move the page to the active list so it is not 17991da177e4SLinus Torvalds * immediately swapped out again after swapon. 18001da177e4SLinus Torvalds */ 18011da177e4SLinus Torvalds activate_page(page); 1802044d66c1SHugh Dickins out: 1803044d66c1SHugh Dickins pte_unmap_unlock(pte, ptl); 180485d9fc89SKAMEZAWA Hiroyuki out_nolock: 18059e16b7fbSHugh Dickins if (page != swapcache) { 18069e16b7fbSHugh Dickins unlock_page(page); 18079e16b7fbSHugh Dickins put_page(page); 18089e16b7fbSHugh Dickins } 1809044d66c1SHugh Dickins return ret; 18101da177e4SLinus Torvalds } 18111da177e4SLinus Torvalds 18121da177e4SLinus Torvalds static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 18131da177e4SLinus Torvalds unsigned long addr, unsigned long end, 18141da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 18151da177e4SLinus Torvalds { 18161da177e4SLinus Torvalds pte_t swp_pte = swp_entry_to_pte(entry); 1817705e87c0SHugh Dickins pte_t *pte; 18188a9f3ccdSBalbir Singh int ret = 0; 18191da177e4SLinus Torvalds 1820044d66c1SHugh Dickins /* 1821044d66c1SHugh Dickins * We don't actually need pte lock while scanning for swp_pte: since 1822044d66c1SHugh Dickins * we hold page lock and mmap_sem, swp_pte cannot be inserted into the 1823044d66c1SHugh Dickins * page table while we're scanning; though it could get zapped, and on 1824044d66c1SHugh Dickins * some architectures (e.g. x86_32 with PAE) we might catch a glimpse 1825044d66c1SHugh Dickins * of unmatched parts which look like swp_pte, so unuse_pte must 1826044d66c1SHugh Dickins * recheck under pte lock. Scanning without pte lock lets it be 18272de1a7e4SSeth Jennings * preemptable whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. 1828044d66c1SHugh Dickins */ 1829044d66c1SHugh Dickins pte = pte_offset_map(pmd, addr); 18301da177e4SLinus Torvalds do { 18311da177e4SLinus Torvalds /* 18321da177e4SLinus Torvalds * swapoff spends a _lot_ of time in this loop! 18331da177e4SLinus Torvalds * Test inline before going to call unuse_pte. 18341da177e4SLinus Torvalds */ 18359f8bdb3fSHugh Dickins if (unlikely(pte_same_as_swp(*pte, swp_pte))) { 1836044d66c1SHugh Dickins pte_unmap(pte); 1837044d66c1SHugh Dickins ret = unuse_pte(vma, pmd, addr, entry, page); 1838044d66c1SHugh Dickins if (ret) 1839044d66c1SHugh Dickins goto out; 1840044d66c1SHugh Dickins pte = pte_offset_map(pmd, addr); 18411da177e4SLinus Torvalds } 18421da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1843044d66c1SHugh Dickins pte_unmap(pte - 1); 1844044d66c1SHugh Dickins out: 18458a9f3ccdSBalbir Singh return ret; 18461da177e4SLinus Torvalds } 18471da177e4SLinus Torvalds 18481da177e4SLinus Torvalds static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 18491da177e4SLinus Torvalds unsigned long addr, unsigned long end, 18501da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 18511da177e4SLinus Torvalds { 18521da177e4SLinus Torvalds pmd_t *pmd; 18531da177e4SLinus Torvalds unsigned long next; 18548a9f3ccdSBalbir Singh int ret; 18551da177e4SLinus Torvalds 18561da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 18571da177e4SLinus Torvalds do { 1858dc644a07SHugh Dickins cond_resched(); 18591da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 18601a5a9906SAndrea Arcangeli if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 18611da177e4SLinus Torvalds continue; 18628a9f3ccdSBalbir Singh ret = unuse_pte_range(vma, pmd, addr, next, entry, page); 18638a9f3ccdSBalbir Singh if (ret) 18648a9f3ccdSBalbir Singh return ret; 18651da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 18661da177e4SLinus Torvalds return 0; 18671da177e4SLinus Torvalds } 18681da177e4SLinus Torvalds 1869c2febafcSKirill A. Shutemov static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, 18701da177e4SLinus Torvalds unsigned long addr, unsigned long end, 18711da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 18721da177e4SLinus Torvalds { 18731da177e4SLinus Torvalds pud_t *pud; 18741da177e4SLinus Torvalds unsigned long next; 18758a9f3ccdSBalbir Singh int ret; 18761da177e4SLinus Torvalds 1877c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 18781da177e4SLinus Torvalds do { 18791da177e4SLinus Torvalds next = pud_addr_end(addr, end); 18801da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 18811da177e4SLinus Torvalds continue; 18828a9f3ccdSBalbir Singh ret = unuse_pmd_range(vma, pud, addr, next, entry, page); 18838a9f3ccdSBalbir Singh if (ret) 18848a9f3ccdSBalbir Singh return ret; 18851da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 18861da177e4SLinus Torvalds return 0; 18871da177e4SLinus Torvalds } 18881da177e4SLinus Torvalds 1889c2febafcSKirill A. Shutemov static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, 1890c2febafcSKirill A. Shutemov unsigned long addr, unsigned long end, 1891c2febafcSKirill A. Shutemov swp_entry_t entry, struct page *page) 1892c2febafcSKirill A. Shutemov { 1893c2febafcSKirill A. Shutemov p4d_t *p4d; 1894c2febafcSKirill A. Shutemov unsigned long next; 1895c2febafcSKirill A. Shutemov int ret; 1896c2febafcSKirill A. Shutemov 1897c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 1898c2febafcSKirill A. Shutemov do { 1899c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 1900c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 1901c2febafcSKirill A. Shutemov continue; 1902c2febafcSKirill A. Shutemov ret = unuse_pud_range(vma, p4d, addr, next, entry, page); 1903c2febafcSKirill A. Shutemov if (ret) 1904c2febafcSKirill A. Shutemov return ret; 1905c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 1906c2febafcSKirill A. Shutemov return 0; 1907c2febafcSKirill A. Shutemov } 1908c2febafcSKirill A. Shutemov 19091da177e4SLinus Torvalds static int unuse_vma(struct vm_area_struct *vma, 19101da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 19111da177e4SLinus Torvalds { 19121da177e4SLinus Torvalds pgd_t *pgd; 19131da177e4SLinus Torvalds unsigned long addr, end, next; 19148a9f3ccdSBalbir Singh int ret; 19151da177e4SLinus Torvalds 19163ca7b3c5SHugh Dickins if (page_anon_vma(page)) { 19171da177e4SLinus Torvalds addr = page_address_in_vma(page, vma); 19181da177e4SLinus Torvalds if (addr == -EFAULT) 19191da177e4SLinus Torvalds return 0; 19201da177e4SLinus Torvalds else 19211da177e4SLinus Torvalds end = addr + PAGE_SIZE; 19221da177e4SLinus Torvalds } else { 19231da177e4SLinus Torvalds addr = vma->vm_start; 19241da177e4SLinus Torvalds end = vma->vm_end; 19251da177e4SLinus Torvalds } 19261da177e4SLinus Torvalds 19271da177e4SLinus Torvalds pgd = pgd_offset(vma->vm_mm, addr); 19281da177e4SLinus Torvalds do { 19291da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 19301da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 19311da177e4SLinus Torvalds continue; 1932c2febafcSKirill A. Shutemov ret = unuse_p4d_range(vma, pgd, addr, next, entry, page); 19338a9f3ccdSBalbir Singh if (ret) 19348a9f3ccdSBalbir Singh return ret; 19351da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 19361da177e4SLinus Torvalds return 0; 19371da177e4SLinus Torvalds } 19381da177e4SLinus Torvalds 19391da177e4SLinus Torvalds static int unuse_mm(struct mm_struct *mm, 19401da177e4SLinus Torvalds swp_entry_t entry, struct page *page) 19411da177e4SLinus Torvalds { 19421da177e4SLinus Torvalds struct vm_area_struct *vma; 19438a9f3ccdSBalbir Singh int ret = 0; 19441da177e4SLinus Torvalds 19451da177e4SLinus Torvalds if (!down_read_trylock(&mm->mmap_sem)) { 19461da177e4SLinus Torvalds /* 19477d03431cSFernando Luis Vazquez Cao * Activate page so shrink_inactive_list is unlikely to unmap 19487d03431cSFernando Luis Vazquez Cao * its ptes while lock is dropped, so swapoff can make progress. 19491da177e4SLinus Torvalds */ 1950c475a8abSHugh Dickins activate_page(page); 19511da177e4SLinus Torvalds unlock_page(page); 19521da177e4SLinus Torvalds down_read(&mm->mmap_sem); 19531da177e4SLinus Torvalds lock_page(page); 19541da177e4SLinus Torvalds } 19551da177e4SLinus Torvalds for (vma = mm->mmap; vma; vma = vma->vm_next) { 19568a9f3ccdSBalbir Singh if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) 19571da177e4SLinus Torvalds break; 1958dc644a07SHugh Dickins cond_resched(); 19591da177e4SLinus Torvalds } 19601da177e4SLinus Torvalds up_read(&mm->mmap_sem); 19618a9f3ccdSBalbir Singh return (ret < 0)? ret: 0; 19621da177e4SLinus Torvalds } 19631da177e4SLinus Torvalds 19641da177e4SLinus Torvalds /* 196538b5faf4SDan Magenheimer * Scan swap_map (or frontswap_map if frontswap parameter is true) 196638b5faf4SDan Magenheimer * from current position to next entry still in use. 19671da177e4SLinus Torvalds * Recycle to start on reaching the end, returning 0 when empty. 19681da177e4SLinus Torvalds */ 19696eb396dcSHugh Dickins static unsigned int find_next_to_unuse(struct swap_info_struct *si, 197038b5faf4SDan Magenheimer unsigned int prev, bool frontswap) 19711da177e4SLinus Torvalds { 19726eb396dcSHugh Dickins unsigned int max = si->max; 19736eb396dcSHugh Dickins unsigned int i = prev; 19748d69aaeeSHugh Dickins unsigned char count; 19751da177e4SLinus Torvalds 19761da177e4SLinus Torvalds /* 19775d337b91SHugh Dickins * No need for swap_lock here: we're just looking 19781da177e4SLinus Torvalds * for whether an entry is in use, not modifying it; false 19791da177e4SLinus Torvalds * hits are okay, and sys_swapoff() has already prevented new 19805d337b91SHugh Dickins * allocations from this area (while holding swap_lock). 19811da177e4SLinus Torvalds */ 19821da177e4SLinus Torvalds for (;;) { 19831da177e4SLinus Torvalds if (++i >= max) { 19841da177e4SLinus Torvalds if (!prev) { 19851da177e4SLinus Torvalds i = 0; 19861da177e4SLinus Torvalds break; 19871da177e4SLinus Torvalds } 19881da177e4SLinus Torvalds /* 19891da177e4SLinus Torvalds * No entries in use at top of swap_map, 19901da177e4SLinus Torvalds * loop back to start and recheck there. 19911da177e4SLinus Torvalds */ 19921da177e4SLinus Torvalds max = prev + 1; 19931da177e4SLinus Torvalds prev = 0; 19941da177e4SLinus Torvalds i = 1; 19951da177e4SLinus Torvalds } 19964db0c3c2SJason Low count = READ_ONCE(si->swap_map[i]); 1997355cfa73SKAMEZAWA Hiroyuki if (count && swap_count(count) != SWAP_MAP_BAD) 1998dc644a07SHugh Dickins if (!frontswap || frontswap_test(si, i)) 19991da177e4SLinus Torvalds break; 2000dc644a07SHugh Dickins if ((i % LATENCY_LIMIT) == 0) 2001dc644a07SHugh Dickins cond_resched(); 20021da177e4SLinus Torvalds } 20031da177e4SLinus Torvalds return i; 20041da177e4SLinus Torvalds } 20051da177e4SLinus Torvalds 20061da177e4SLinus Torvalds /* 20071da177e4SLinus Torvalds * We completely avoid races by reading each swap page in advance, 20081da177e4SLinus Torvalds * and then search for the process using it. All the necessary 20091da177e4SLinus Torvalds * page table adjustments can then be made atomically. 201038b5faf4SDan Magenheimer * 201138b5faf4SDan Magenheimer * if the boolean frontswap is true, only unuse pages_to_unuse pages; 201238b5faf4SDan Magenheimer * pages_to_unuse==0 means all pages; ignored if frontswap is false 20131da177e4SLinus Torvalds */ 201438b5faf4SDan Magenheimer int try_to_unuse(unsigned int type, bool frontswap, 201538b5faf4SDan Magenheimer unsigned long pages_to_unuse) 20161da177e4SLinus Torvalds { 2017efa90a98SHugh Dickins struct swap_info_struct *si = swap_info[type]; 20181da177e4SLinus Torvalds struct mm_struct *start_mm; 2019edfe23daSShaohua Li volatile unsigned char *swap_map; /* swap_map is accessed without 2020edfe23daSShaohua Li * locking. Mark it as volatile 2021edfe23daSShaohua Li * to prevent compiler doing 2022edfe23daSShaohua Li * something odd. 2023edfe23daSShaohua Li */ 20248d69aaeeSHugh Dickins unsigned char swcount; 20251da177e4SLinus Torvalds struct page *page; 20261da177e4SLinus Torvalds swp_entry_t entry; 20276eb396dcSHugh Dickins unsigned int i = 0; 20281da177e4SLinus Torvalds int retval = 0; 20291da177e4SLinus Torvalds 20301da177e4SLinus Torvalds /* 20311da177e4SLinus Torvalds * When searching mms for an entry, a good strategy is to 20321da177e4SLinus Torvalds * start at the first mm we freed the previous entry from 20331da177e4SLinus Torvalds * (though actually we don't notice whether we or coincidence 20341da177e4SLinus Torvalds * freed the entry). Initialize this start_mm with a hold. 20351da177e4SLinus Torvalds * 20361da177e4SLinus Torvalds * A simpler strategy would be to start at the last mm we 20371da177e4SLinus Torvalds * freed the previous entry from; but that would take less 20381da177e4SLinus Torvalds * advantage of mmlist ordering, which clusters forked mms 20391da177e4SLinus Torvalds * together, child after parent. If we race with dup_mmap(), we 20401da177e4SLinus Torvalds * prefer to resolve parent before child, lest we miss entries 20411da177e4SLinus Torvalds * duplicated after we scanned child: using last mm would invert 2042570a335bSHugh Dickins * that. 20431da177e4SLinus Torvalds */ 20441da177e4SLinus Torvalds start_mm = &init_mm; 20453fce371bSVegard Nossum mmget(&init_mm); 20461da177e4SLinus Torvalds 20471da177e4SLinus Torvalds /* 20481da177e4SLinus Torvalds * Keep on scanning until all entries have gone. Usually, 20491da177e4SLinus Torvalds * one pass through swap_map is enough, but not necessarily: 20501da177e4SLinus Torvalds * there are races when an instance of an entry might be missed. 20511da177e4SLinus Torvalds */ 205238b5faf4SDan Magenheimer while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { 20531da177e4SLinus Torvalds if (signal_pending(current)) { 20541da177e4SLinus Torvalds retval = -EINTR; 20551da177e4SLinus Torvalds break; 20561da177e4SLinus Torvalds } 20571da177e4SLinus Torvalds 20581da177e4SLinus Torvalds /* 20591da177e4SLinus Torvalds * Get a page for the entry, using the existing swap 20601da177e4SLinus Torvalds * cache page if there is one. Otherwise, get a clean 20611da177e4SLinus Torvalds * page and read the swap into it. 20621da177e4SLinus Torvalds */ 20631da177e4SLinus Torvalds swap_map = &si->swap_map[i]; 20641da177e4SLinus Torvalds entry = swp_entry(type, i); 206502098feaSHugh Dickins page = read_swap_cache_async(entry, 206623955622SShaohua Li GFP_HIGHUSER_MOVABLE, NULL, 0, false); 20671da177e4SLinus Torvalds if (!page) { 20681da177e4SLinus Torvalds /* 20691da177e4SLinus Torvalds * Either swap_duplicate() failed because entry 20701da177e4SLinus Torvalds * has been freed independently, and will not be 20711da177e4SLinus Torvalds * reused since sys_swapoff() already disabled 20721da177e4SLinus Torvalds * allocation from here, or alloc_page() failed. 20731da177e4SLinus Torvalds */ 2074edfe23daSShaohua Li swcount = *swap_map; 2075edfe23daSShaohua Li /* 2076edfe23daSShaohua Li * We don't hold lock here, so the swap entry could be 2077edfe23daSShaohua Li * SWAP_MAP_BAD (when the cluster is discarding). 2078edfe23daSShaohua Li * Instead of fail out, We can just skip the swap 2079edfe23daSShaohua Li * entry because swapoff will wait for discarding 2080edfe23daSShaohua Li * finish anyway. 2081edfe23daSShaohua Li */ 2082edfe23daSShaohua Li if (!swcount || swcount == SWAP_MAP_BAD) 20831da177e4SLinus Torvalds continue; 20841da177e4SLinus Torvalds retval = -ENOMEM; 20851da177e4SLinus Torvalds break; 20861da177e4SLinus Torvalds } 20871da177e4SLinus Torvalds 20881da177e4SLinus Torvalds /* 20891da177e4SLinus Torvalds * Don't hold on to start_mm if it looks like exiting. 20901da177e4SLinus Torvalds */ 20911da177e4SLinus Torvalds if (atomic_read(&start_mm->mm_users) == 1) { 20921da177e4SLinus Torvalds mmput(start_mm); 20931da177e4SLinus Torvalds start_mm = &init_mm; 20943fce371bSVegard Nossum mmget(&init_mm); 20951da177e4SLinus Torvalds } 20961da177e4SLinus Torvalds 20971da177e4SLinus Torvalds /* 20981da177e4SLinus Torvalds * Wait for and lock page. When do_swap_page races with 20991da177e4SLinus Torvalds * try_to_unuse, do_swap_page can handle the fault much 21001da177e4SLinus Torvalds * faster than try_to_unuse can locate the entry. This 21011da177e4SLinus Torvalds * apparently redundant "wait_on_page_locked" lets try_to_unuse 21021da177e4SLinus Torvalds * defer to do_swap_page in such a case - in some tests, 21031da177e4SLinus Torvalds * do_swap_page and try_to_unuse repeatedly compete. 21041da177e4SLinus Torvalds */ 21051da177e4SLinus Torvalds wait_on_page_locked(page); 21061da177e4SLinus Torvalds wait_on_page_writeback(page); 21071da177e4SLinus Torvalds lock_page(page); 21081da177e4SLinus Torvalds wait_on_page_writeback(page); 21091da177e4SLinus Torvalds 21101da177e4SLinus Torvalds /* 21111da177e4SLinus Torvalds * Remove all references to entry. 21121da177e4SLinus Torvalds */ 21131da177e4SLinus Torvalds swcount = *swap_map; 2114aaa46865SHugh Dickins if (swap_count(swcount) == SWAP_MAP_SHMEM) { 2115aaa46865SHugh Dickins retval = shmem_unuse(entry, page); 2116aaa46865SHugh Dickins /* page has already been unlocked and released */ 2117aaa46865SHugh Dickins if (retval < 0) 2118aaa46865SHugh Dickins break; 2119aaa46865SHugh Dickins continue; 21201da177e4SLinus Torvalds } 2121aaa46865SHugh Dickins if (swap_count(swcount) && start_mm != &init_mm) 2122aaa46865SHugh Dickins retval = unuse_mm(start_mm, entry, page); 2123aaa46865SHugh Dickins 2124355cfa73SKAMEZAWA Hiroyuki if (swap_count(*swap_map)) { 21251da177e4SLinus Torvalds int set_start_mm = (*swap_map >= swcount); 21261da177e4SLinus Torvalds struct list_head *p = &start_mm->mmlist; 21271da177e4SLinus Torvalds struct mm_struct *new_start_mm = start_mm; 21281da177e4SLinus Torvalds struct mm_struct *prev_mm = start_mm; 21291da177e4SLinus Torvalds struct mm_struct *mm; 21301da177e4SLinus Torvalds 21313fce371bSVegard Nossum mmget(new_start_mm); 21323fce371bSVegard Nossum mmget(prev_mm); 21331da177e4SLinus Torvalds spin_lock(&mmlist_lock); 2134aaa46865SHugh Dickins while (swap_count(*swap_map) && !retval && 21351da177e4SLinus Torvalds (p = p->next) != &start_mm->mmlist) { 21361da177e4SLinus Torvalds mm = list_entry(p, struct mm_struct, mmlist); 2137388f7934SVegard Nossum if (!mmget_not_zero(mm)) 21381da177e4SLinus Torvalds continue; 21391da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 21401da177e4SLinus Torvalds mmput(prev_mm); 21411da177e4SLinus Torvalds prev_mm = mm; 21421da177e4SLinus Torvalds 21431da177e4SLinus Torvalds cond_resched(); 21441da177e4SLinus Torvalds 21451da177e4SLinus Torvalds swcount = *swap_map; 2146355cfa73SKAMEZAWA Hiroyuki if (!swap_count(swcount)) /* any usage ? */ 21471da177e4SLinus Torvalds ; 2148aaa46865SHugh Dickins else if (mm == &init_mm) 21491da177e4SLinus Torvalds set_start_mm = 1; 2150aaa46865SHugh Dickins else 21511da177e4SLinus Torvalds retval = unuse_mm(mm, entry, page); 2152355cfa73SKAMEZAWA Hiroyuki 215332c5fc10SBo Liu if (set_start_mm && *swap_map < swcount) { 21541da177e4SLinus Torvalds mmput(new_start_mm); 21553fce371bSVegard Nossum mmget(mm); 21561da177e4SLinus Torvalds new_start_mm = mm; 21571da177e4SLinus Torvalds set_start_mm = 0; 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds spin_lock(&mmlist_lock); 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 21621da177e4SLinus Torvalds mmput(prev_mm); 21631da177e4SLinus Torvalds mmput(start_mm); 21641da177e4SLinus Torvalds start_mm = new_start_mm; 21651da177e4SLinus Torvalds } 21661da177e4SLinus Torvalds if (retval) { 21671da177e4SLinus Torvalds unlock_page(page); 216809cbfeafSKirill A. Shutemov put_page(page); 21691da177e4SLinus Torvalds break; 21701da177e4SLinus Torvalds } 21711da177e4SLinus Torvalds 21721da177e4SLinus Torvalds /* 21731da177e4SLinus Torvalds * If a reference remains (rare), we would like to leave 21741da177e4SLinus Torvalds * the page in the swap cache; but try_to_unmap could 21751da177e4SLinus Torvalds * then re-duplicate the entry once we drop page lock, 21761da177e4SLinus Torvalds * so we might loop indefinitely; also, that page could 21771da177e4SLinus Torvalds * not be swapped out to other storage meanwhile. So: 21781da177e4SLinus Torvalds * delete from cache even if there's another reference, 21791da177e4SLinus Torvalds * after ensuring that the data has been saved to disk - 21801da177e4SLinus Torvalds * since if the reference remains (rarer), it will be 21811da177e4SLinus Torvalds * read from disk into another page. Splitting into two 21821da177e4SLinus Torvalds * pages would be incorrect if swap supported "shared 21831da177e4SLinus Torvalds * private" pages, but they are handled by tmpfs files. 21845ad64688SHugh Dickins * 21855ad64688SHugh Dickins * Given how unuse_vma() targets one particular offset 21865ad64688SHugh Dickins * in an anon_vma, once the anon_vma has been determined, 21875ad64688SHugh Dickins * this splitting happens to be just what is needed to 21885ad64688SHugh Dickins * handle where KSM pages have been swapped out: re-reading 21895ad64688SHugh Dickins * is unnecessarily slow, but we can fix that later on. 21901da177e4SLinus Torvalds */ 2191355cfa73SKAMEZAWA Hiroyuki if (swap_count(*swap_map) && 2192355cfa73SKAMEZAWA Hiroyuki PageDirty(page) && PageSwapCache(page)) { 21931da177e4SLinus Torvalds struct writeback_control wbc = { 21941da177e4SLinus Torvalds .sync_mode = WB_SYNC_NONE, 21951da177e4SLinus Torvalds }; 21961da177e4SLinus Torvalds 2197e0709829SHuang Ying swap_writepage(compound_head(page), &wbc); 21981da177e4SLinus Torvalds lock_page(page); 21991da177e4SLinus Torvalds wait_on_page_writeback(page); 22001da177e4SLinus Torvalds } 220168bdc8d6SHugh Dickins 220268bdc8d6SHugh Dickins /* 220368bdc8d6SHugh Dickins * It is conceivable that a racing task removed this page from 220468bdc8d6SHugh Dickins * swap cache just before we acquired the page lock at the top, 220568bdc8d6SHugh Dickins * or while we dropped it in unuse_mm(). The page might even 220668bdc8d6SHugh Dickins * be back in swap cache on another swap area: that we must not 220768bdc8d6SHugh Dickins * delete, since it may not have been written out to swap yet. 220868bdc8d6SHugh Dickins */ 220968bdc8d6SHugh Dickins if (PageSwapCache(page) && 2210e0709829SHuang Ying likely(page_private(page) == entry.val) && 2211e0709829SHuang Ying !page_swapped(page)) 2212e0709829SHuang Ying delete_from_swap_cache(compound_head(page)); 22131da177e4SLinus Torvalds 22141da177e4SLinus Torvalds /* 22151da177e4SLinus Torvalds * So we could skip searching mms once swap count went 22161da177e4SLinus Torvalds * to 1, we did not mark any present ptes as dirty: must 22172706a1b8SAnderson Briglia * mark page dirty so shrink_page_list will preserve it. 22181da177e4SLinus Torvalds */ 22191da177e4SLinus Torvalds SetPageDirty(page); 22201da177e4SLinus Torvalds unlock_page(page); 222109cbfeafSKirill A. Shutemov put_page(page); 22221da177e4SLinus Torvalds 22231da177e4SLinus Torvalds /* 22241da177e4SLinus Torvalds * Make sure that we aren't completely killing 22251da177e4SLinus Torvalds * interactive performance. 22261da177e4SLinus Torvalds */ 22271da177e4SLinus Torvalds cond_resched(); 222838b5faf4SDan Magenheimer if (frontswap && pages_to_unuse > 0) { 222938b5faf4SDan Magenheimer if (!--pages_to_unuse) 223038b5faf4SDan Magenheimer break; 223138b5faf4SDan Magenheimer } 22321da177e4SLinus Torvalds } 22331da177e4SLinus Torvalds 22341da177e4SLinus Torvalds mmput(start_mm); 22351da177e4SLinus Torvalds return retval; 22361da177e4SLinus Torvalds } 22371da177e4SLinus Torvalds 22381da177e4SLinus Torvalds /* 22395d337b91SHugh Dickins * After a successful try_to_unuse, if no swap is now in use, we know 22405d337b91SHugh Dickins * we can empty the mmlist. swap_lock must be held on entry and exit. 22415d337b91SHugh Dickins * Note that mmlist_lock nests inside swap_lock, and an mm must be 22421da177e4SLinus Torvalds * added to the mmlist just after page_duplicate - before would be racy. 22431da177e4SLinus Torvalds */ 22441da177e4SLinus Torvalds static void drain_mmlist(void) 22451da177e4SLinus Torvalds { 22461da177e4SLinus Torvalds struct list_head *p, *next; 2247efa90a98SHugh Dickins unsigned int type; 22481da177e4SLinus Torvalds 2249efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) 2250efa90a98SHugh Dickins if (swap_info[type]->inuse_pages) 22511da177e4SLinus Torvalds return; 22521da177e4SLinus Torvalds spin_lock(&mmlist_lock); 22531da177e4SLinus Torvalds list_for_each_safe(p, next, &init_mm.mmlist) 22541da177e4SLinus Torvalds list_del_init(p); 22551da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 22561da177e4SLinus Torvalds } 22571da177e4SLinus Torvalds 22581da177e4SLinus Torvalds /* 22591da177e4SLinus Torvalds * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 2260d4906e1aSLee Schermerhorn * corresponds to page offset for the specified swap entry. 2261d4906e1aSLee Schermerhorn * Note that the type of this function is sector_t, but it returns page offset 2262d4906e1aSLee Schermerhorn * into the bdev, not sector offset. 22631da177e4SLinus Torvalds */ 2264d4906e1aSLee Schermerhorn static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) 22651da177e4SLinus Torvalds { 2266f29ad6a9SHugh Dickins struct swap_info_struct *sis; 2267f29ad6a9SHugh Dickins struct swap_extent *start_se; 2268f29ad6a9SHugh Dickins struct swap_extent *se; 2269f29ad6a9SHugh Dickins pgoff_t offset; 2270f29ad6a9SHugh Dickins 2271efa90a98SHugh Dickins sis = swap_info[swp_type(entry)]; 2272f29ad6a9SHugh Dickins *bdev = sis->bdev; 2273f29ad6a9SHugh Dickins 2274f29ad6a9SHugh Dickins offset = swp_offset(entry); 2275f29ad6a9SHugh Dickins start_se = sis->curr_swap_extent; 2276f29ad6a9SHugh Dickins se = start_se; 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds for ( ; ; ) { 22791da177e4SLinus Torvalds if (se->start_page <= offset && 22801da177e4SLinus Torvalds offset < (se->start_page + se->nr_pages)) { 22811da177e4SLinus Torvalds return se->start_block + (offset - se->start_page); 22821da177e4SLinus Torvalds } 2283a8ae4991SGeliang Tang se = list_next_entry(se, list); 22841da177e4SLinus Torvalds sis->curr_swap_extent = se; 22851da177e4SLinus Torvalds BUG_ON(se == start_se); /* It *must* be present */ 22861da177e4SLinus Torvalds } 22871da177e4SLinus Torvalds } 22881da177e4SLinus Torvalds 22891da177e4SLinus Torvalds /* 2290d4906e1aSLee Schermerhorn * Returns the page offset into bdev for the specified page's swap entry. 2291d4906e1aSLee Schermerhorn */ 2292d4906e1aSLee Schermerhorn sector_t map_swap_page(struct page *page, struct block_device **bdev) 2293d4906e1aSLee Schermerhorn { 2294d4906e1aSLee Schermerhorn swp_entry_t entry; 2295d4906e1aSLee Schermerhorn entry.val = page_private(page); 2296d4906e1aSLee Schermerhorn return map_swap_entry(entry, bdev); 2297d4906e1aSLee Schermerhorn } 2298d4906e1aSLee Schermerhorn 2299d4906e1aSLee Schermerhorn /* 23001da177e4SLinus Torvalds * Free all of a swapdev's extent information 23011da177e4SLinus Torvalds */ 23021da177e4SLinus Torvalds static void destroy_swap_extents(struct swap_info_struct *sis) 23031da177e4SLinus Torvalds { 23049625a5f2SHugh Dickins while (!list_empty(&sis->first_swap_extent.list)) { 23051da177e4SLinus Torvalds struct swap_extent *se; 23061da177e4SLinus Torvalds 2307a8ae4991SGeliang Tang se = list_first_entry(&sis->first_swap_extent.list, 23081da177e4SLinus Torvalds struct swap_extent, list); 23091da177e4SLinus Torvalds list_del(&se->list); 23101da177e4SLinus Torvalds kfree(se); 23111da177e4SLinus Torvalds } 231262c230bcSMel Gorman 231362c230bcSMel Gorman if (sis->flags & SWP_FILE) { 231462c230bcSMel Gorman struct file *swap_file = sis->swap_file; 231562c230bcSMel Gorman struct address_space *mapping = swap_file->f_mapping; 231662c230bcSMel Gorman 231762c230bcSMel Gorman sis->flags &= ~SWP_FILE; 231862c230bcSMel Gorman mapping->a_ops->swap_deactivate(swap_file); 231962c230bcSMel Gorman } 23201da177e4SLinus Torvalds } 23211da177e4SLinus Torvalds 23221da177e4SLinus Torvalds /* 23231da177e4SLinus Torvalds * Add a block range (and the corresponding page range) into this swapdev's 232411d31886SHugh Dickins * extent list. The extent list is kept sorted in page order. 23251da177e4SLinus Torvalds * 232611d31886SHugh Dickins * This function rather assumes that it is called in ascending page order. 23271da177e4SLinus Torvalds */ 2328a509bc1aSMel Gorman int 23291da177e4SLinus Torvalds add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 23301da177e4SLinus Torvalds unsigned long nr_pages, sector_t start_block) 23311da177e4SLinus Torvalds { 23321da177e4SLinus Torvalds struct swap_extent *se; 23331da177e4SLinus Torvalds struct swap_extent *new_se; 23341da177e4SLinus Torvalds struct list_head *lh; 23351da177e4SLinus Torvalds 23369625a5f2SHugh Dickins if (start_page == 0) { 23379625a5f2SHugh Dickins se = &sis->first_swap_extent; 23389625a5f2SHugh Dickins sis->curr_swap_extent = se; 23399625a5f2SHugh Dickins se->start_page = 0; 23409625a5f2SHugh Dickins se->nr_pages = nr_pages; 23419625a5f2SHugh Dickins se->start_block = start_block; 23429625a5f2SHugh Dickins return 1; 23439625a5f2SHugh Dickins } else { 23449625a5f2SHugh Dickins lh = sis->first_swap_extent.list.prev; /* Highest extent */ 23451da177e4SLinus Torvalds se = list_entry(lh, struct swap_extent, list); 234611d31886SHugh Dickins BUG_ON(se->start_page + se->nr_pages != start_page); 234711d31886SHugh Dickins if (se->start_block + se->nr_pages == start_block) { 23481da177e4SLinus Torvalds /* Merge it */ 23491da177e4SLinus Torvalds se->nr_pages += nr_pages; 23501da177e4SLinus Torvalds return 0; 23511da177e4SLinus Torvalds } 23521da177e4SLinus Torvalds } 23531da177e4SLinus Torvalds 23541da177e4SLinus Torvalds /* 23551da177e4SLinus Torvalds * No merge. Insert a new extent, preserving ordering. 23561da177e4SLinus Torvalds */ 23571da177e4SLinus Torvalds new_se = kmalloc(sizeof(*se), GFP_KERNEL); 23581da177e4SLinus Torvalds if (new_se == NULL) 23591da177e4SLinus Torvalds return -ENOMEM; 23601da177e4SLinus Torvalds new_se->start_page = start_page; 23611da177e4SLinus Torvalds new_se->nr_pages = nr_pages; 23621da177e4SLinus Torvalds new_se->start_block = start_block; 23631da177e4SLinus Torvalds 23649625a5f2SHugh Dickins list_add_tail(&new_se->list, &sis->first_swap_extent.list); 236553092a74SHugh Dickins return 1; 23661da177e4SLinus Torvalds } 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds /* 23691da177e4SLinus Torvalds * A `swap extent' is a simple thing which maps a contiguous range of pages 23701da177e4SLinus Torvalds * onto a contiguous range of disk blocks. An ordered list of swap extents 23711da177e4SLinus Torvalds * is built at swapon time and is then used at swap_writepage/swap_readpage 23721da177e4SLinus Torvalds * time for locating where on disk a page belongs. 23731da177e4SLinus Torvalds * 23741da177e4SLinus Torvalds * If the swapfile is an S_ISBLK block device, a single extent is installed. 23751da177e4SLinus Torvalds * This is done so that the main operating code can treat S_ISBLK and S_ISREG 23761da177e4SLinus Torvalds * swap files identically. 23771da177e4SLinus Torvalds * 23781da177e4SLinus Torvalds * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 23791da177e4SLinus Torvalds * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 23801da177e4SLinus Torvalds * swapfiles are handled *identically* after swapon time. 23811da177e4SLinus Torvalds * 23821da177e4SLinus Torvalds * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 23831da177e4SLinus Torvalds * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 23841da177e4SLinus Torvalds * some stray blocks are found which do not fall within the PAGE_SIZE alignment 23851da177e4SLinus Torvalds * requirements, they are simply tossed out - we will never use those blocks 23861da177e4SLinus Torvalds * for swapping. 23871da177e4SLinus Torvalds * 2388b0d9bcd4SHugh Dickins * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This 23891da177e4SLinus Torvalds * prevents root from shooting her foot off by ftruncating an in-use swapfile, 23901da177e4SLinus Torvalds * which will scribble on the fs. 23911da177e4SLinus Torvalds * 23921da177e4SLinus Torvalds * The amount of disk space which a single swap extent represents varies. 23931da177e4SLinus Torvalds * Typically it is in the 1-4 megabyte range. So we can have hundreds of 23941da177e4SLinus Torvalds * extents in the list. To avoid much list walking, we cache the previous 23951da177e4SLinus Torvalds * search location in `curr_swap_extent', and start new searches from there. 23961da177e4SLinus Torvalds * This is extremely effective. The average number of iterations in 23971da177e4SLinus Torvalds * map_swap_page() has been measured at about 0.3 per page. - akpm. 23981da177e4SLinus Torvalds */ 239953092a74SHugh Dickins static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 24001da177e4SLinus Torvalds { 240162c230bcSMel Gorman struct file *swap_file = sis->swap_file; 240262c230bcSMel Gorman struct address_space *mapping = swap_file->f_mapping; 240362c230bcSMel Gorman struct inode *inode = mapping->host; 24041da177e4SLinus Torvalds int ret; 24051da177e4SLinus Torvalds 24061da177e4SLinus Torvalds if (S_ISBLK(inode->i_mode)) { 24071da177e4SLinus Torvalds ret = add_swap_extent(sis, 0, sis->max, 0); 240853092a74SHugh Dickins *span = sis->pages; 2409a509bc1aSMel Gorman return ret; 24101da177e4SLinus Torvalds } 24111da177e4SLinus Torvalds 241262c230bcSMel Gorman if (mapping->a_ops->swap_activate) { 2413a509bc1aSMel Gorman ret = mapping->a_ops->swap_activate(sis, swap_file, span); 241462c230bcSMel Gorman if (!ret) { 241562c230bcSMel Gorman sis->flags |= SWP_FILE; 241662c230bcSMel Gorman ret = add_swap_extent(sis, 0, sis->max, 0); 241762c230bcSMel Gorman *span = sis->pages; 241862c230bcSMel Gorman } 24199625a5f2SHugh Dickins return ret; 2420a509bc1aSMel Gorman } 2421a509bc1aSMel Gorman 2422a509bc1aSMel Gorman return generic_swapfile_activate(sis, swap_file, span); 24231da177e4SLinus Torvalds } 24241da177e4SLinus Torvalds 2425a2468cc9SAaron Lu static int swap_node(struct swap_info_struct *p) 2426a2468cc9SAaron Lu { 2427a2468cc9SAaron Lu struct block_device *bdev; 2428a2468cc9SAaron Lu 2429a2468cc9SAaron Lu if (p->bdev) 2430a2468cc9SAaron Lu bdev = p->bdev; 2431a2468cc9SAaron Lu else 2432a2468cc9SAaron Lu bdev = p->swap_file->f_inode->i_sb->s_bdev; 2433a2468cc9SAaron Lu 2434a2468cc9SAaron Lu return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; 2435a2468cc9SAaron Lu } 2436a2468cc9SAaron Lu 2437cf0cac0aSCesar Eduardo Barros static void _enable_swap_info(struct swap_info_struct *p, int prio, 24382a8f9449SShaohua Li unsigned char *swap_map, 24392a8f9449SShaohua Li struct swap_cluster_info *cluster_info) 244040531542SCesar Eduardo Barros { 2441a2468cc9SAaron Lu int i; 2442a2468cc9SAaron Lu 244340531542SCesar Eduardo Barros if (prio >= 0) 244440531542SCesar Eduardo Barros p->prio = prio; 244540531542SCesar Eduardo Barros else 244640531542SCesar Eduardo Barros p->prio = --least_priority; 244718ab4d4cSDan Streetman /* 244818ab4d4cSDan Streetman * the plist prio is negated because plist ordering is 244918ab4d4cSDan Streetman * low-to-high, while swap ordering is high-to-low 245018ab4d4cSDan Streetman */ 245118ab4d4cSDan Streetman p->list.prio = -p->prio; 2452a2468cc9SAaron Lu for_each_node(i) { 2453a2468cc9SAaron Lu if (p->prio >= 0) 2454a2468cc9SAaron Lu p->avail_lists[i].prio = -p->prio; 2455a2468cc9SAaron Lu else { 2456a2468cc9SAaron Lu if (swap_node(p) == i) 2457a2468cc9SAaron Lu p->avail_lists[i].prio = 1; 2458a2468cc9SAaron Lu else 2459a2468cc9SAaron Lu p->avail_lists[i].prio = -p->prio; 2460a2468cc9SAaron Lu } 2461a2468cc9SAaron Lu } 246240531542SCesar Eduardo Barros p->swap_map = swap_map; 24632a8f9449SShaohua Li p->cluster_info = cluster_info; 246440531542SCesar Eduardo Barros p->flags |= SWP_WRITEOK; 2465ec8acf20SShaohua Li atomic_long_add(p->pages, &nr_swap_pages); 246640531542SCesar Eduardo Barros total_swap_pages += p->pages; 246740531542SCesar Eduardo Barros 2468adfab836SDan Streetman assert_spin_locked(&swap_lock); 2469adfab836SDan Streetman /* 247018ab4d4cSDan Streetman * both lists are plists, and thus priority ordered. 247118ab4d4cSDan Streetman * swap_active_head needs to be priority ordered for swapoff(), 247218ab4d4cSDan Streetman * which on removal of any swap_info_struct with an auto-assigned 247318ab4d4cSDan Streetman * (i.e. negative) priority increments the auto-assigned priority 247418ab4d4cSDan Streetman * of any lower-priority swap_info_structs. 247518ab4d4cSDan Streetman * swap_avail_head needs to be priority ordered for get_swap_page(), 247618ab4d4cSDan Streetman * which allocates swap pages from the highest available priority 247718ab4d4cSDan Streetman * swap_info_struct. 2478adfab836SDan Streetman */ 247918ab4d4cSDan Streetman plist_add(&p->list, &swap_active_head); 2480a2468cc9SAaron Lu add_to_avail_list(p); 2481cf0cac0aSCesar Eduardo Barros } 2482cf0cac0aSCesar Eduardo Barros 2483cf0cac0aSCesar Eduardo Barros static void enable_swap_info(struct swap_info_struct *p, int prio, 2484cf0cac0aSCesar Eduardo Barros unsigned char *swap_map, 24852a8f9449SShaohua Li struct swap_cluster_info *cluster_info, 2486cf0cac0aSCesar Eduardo Barros unsigned long *frontswap_map) 2487cf0cac0aSCesar Eduardo Barros { 24884f89849dSMinchan Kim frontswap_init(p->type, frontswap_map); 2489cf0cac0aSCesar Eduardo Barros spin_lock(&swap_lock); 2490ec8acf20SShaohua Li spin_lock(&p->lock); 24912a8f9449SShaohua Li _enable_swap_info(p, prio, swap_map, cluster_info); 2492ec8acf20SShaohua Li spin_unlock(&p->lock); 2493cf0cac0aSCesar Eduardo Barros spin_unlock(&swap_lock); 2494cf0cac0aSCesar Eduardo Barros } 2495cf0cac0aSCesar Eduardo Barros 2496cf0cac0aSCesar Eduardo Barros static void reinsert_swap_info(struct swap_info_struct *p) 2497cf0cac0aSCesar Eduardo Barros { 2498cf0cac0aSCesar Eduardo Barros spin_lock(&swap_lock); 2499ec8acf20SShaohua Li spin_lock(&p->lock); 25002a8f9449SShaohua Li _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info); 2501ec8acf20SShaohua Li spin_unlock(&p->lock); 250240531542SCesar Eduardo Barros spin_unlock(&swap_lock); 250340531542SCesar Eduardo Barros } 250440531542SCesar Eduardo Barros 250567afa38eSTim Chen bool has_usable_swap(void) 250667afa38eSTim Chen { 250767afa38eSTim Chen bool ret = true; 250867afa38eSTim Chen 250967afa38eSTim Chen spin_lock(&swap_lock); 251067afa38eSTim Chen if (plist_head_empty(&swap_active_head)) 251167afa38eSTim Chen ret = false; 251267afa38eSTim Chen spin_unlock(&swap_lock); 251367afa38eSTim Chen return ret; 251467afa38eSTim Chen } 251567afa38eSTim Chen 2516c4ea37c2SHeiko Carstens SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 25171da177e4SLinus Torvalds { 25181da177e4SLinus Torvalds struct swap_info_struct *p = NULL; 25198d69aaeeSHugh Dickins unsigned char *swap_map; 25202a8f9449SShaohua Li struct swap_cluster_info *cluster_info; 25214f89849dSMinchan Kim unsigned long *frontswap_map; 25221da177e4SLinus Torvalds struct file *swap_file, *victim; 25231da177e4SLinus Torvalds struct address_space *mapping; 25241da177e4SLinus Torvalds struct inode *inode; 252591a27b2aSJeff Layton struct filename *pathname; 2526adfab836SDan Streetman int err, found = 0; 25275b808a23SKrzysztof Kozlowski unsigned int old_block_size; 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds if (!capable(CAP_SYS_ADMIN)) 25301da177e4SLinus Torvalds return -EPERM; 25311da177e4SLinus Torvalds 2532191c5424SAl Viro BUG_ON(!current->mm); 2533191c5424SAl Viro 25341da177e4SLinus Torvalds pathname = getname(specialfile); 25351da177e4SLinus Torvalds if (IS_ERR(pathname)) 2536f58b59c1SXiaotian Feng return PTR_ERR(pathname); 25371da177e4SLinus Torvalds 2538669abf4eSJeff Layton victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); 25391da177e4SLinus Torvalds err = PTR_ERR(victim); 25401da177e4SLinus Torvalds if (IS_ERR(victim)) 25411da177e4SLinus Torvalds goto out; 25421da177e4SLinus Torvalds 25431da177e4SLinus Torvalds mapping = victim->f_mapping; 25445d337b91SHugh Dickins spin_lock(&swap_lock); 254518ab4d4cSDan Streetman plist_for_each_entry(p, &swap_active_head, list) { 254622c6f8fdSHugh Dickins if (p->flags & SWP_WRITEOK) { 2547adfab836SDan Streetman if (p->swap_file->f_mapping == mapping) { 2548adfab836SDan Streetman found = 1; 25491da177e4SLinus Torvalds break; 25501da177e4SLinus Torvalds } 25511da177e4SLinus Torvalds } 2552adfab836SDan Streetman } 2553adfab836SDan Streetman if (!found) { 25541da177e4SLinus Torvalds err = -EINVAL; 25555d337b91SHugh Dickins spin_unlock(&swap_lock); 25561da177e4SLinus Torvalds goto out_dput; 25571da177e4SLinus Torvalds } 2558191c5424SAl Viro if (!security_vm_enough_memory_mm(current->mm, p->pages)) 25591da177e4SLinus Torvalds vm_unacct_memory(p->pages); 25601da177e4SLinus Torvalds else { 25611da177e4SLinus Torvalds err = -ENOMEM; 25625d337b91SHugh Dickins spin_unlock(&swap_lock); 25631da177e4SLinus Torvalds goto out_dput; 25641da177e4SLinus Torvalds } 2565a2468cc9SAaron Lu del_from_avail_list(p); 2566ec8acf20SShaohua Li spin_lock(&p->lock); 256778ecba08SHugh Dickins if (p->prio < 0) { 2568adfab836SDan Streetman struct swap_info_struct *si = p; 2569a2468cc9SAaron Lu int nid; 2570adfab836SDan Streetman 257118ab4d4cSDan Streetman plist_for_each_entry_continue(si, &swap_active_head, list) { 2572adfab836SDan Streetman si->prio++; 257318ab4d4cSDan Streetman si->list.prio--; 2574a2468cc9SAaron Lu for_each_node(nid) { 2575a2468cc9SAaron Lu if (si->avail_lists[nid].prio != 1) 2576a2468cc9SAaron Lu si->avail_lists[nid].prio--; 2577a2468cc9SAaron Lu } 2578adfab836SDan Streetman } 257978ecba08SHugh Dickins least_priority++; 258078ecba08SHugh Dickins } 258118ab4d4cSDan Streetman plist_del(&p->list, &swap_active_head); 2582ec8acf20SShaohua Li atomic_long_sub(p->pages, &nr_swap_pages); 25831da177e4SLinus Torvalds total_swap_pages -= p->pages; 25841da177e4SLinus Torvalds p->flags &= ~SWP_WRITEOK; 2585ec8acf20SShaohua Li spin_unlock(&p->lock); 25865d337b91SHugh Dickins spin_unlock(&swap_lock); 2587fb4f88dcSHugh Dickins 2588039939a6STim Chen disable_swap_slots_cache_lock(); 2589039939a6STim Chen 2590e1e12d2fSDavid Rientjes set_current_oom_origin(); 2591adfab836SDan Streetman err = try_to_unuse(p->type, false, 0); /* force unuse all pages */ 2592e1e12d2fSDavid Rientjes clear_current_oom_origin(); 25931da177e4SLinus Torvalds 25941da177e4SLinus Torvalds if (err) { 25951da177e4SLinus Torvalds /* re-insert swap space back into swap_list */ 2596cf0cac0aSCesar Eduardo Barros reinsert_swap_info(p); 2597039939a6STim Chen reenable_swap_slots_cache_unlock(); 25981da177e4SLinus Torvalds goto out_dput; 25991da177e4SLinus Torvalds } 260052b7efdbSHugh Dickins 2601039939a6STim Chen reenable_swap_slots_cache_unlock(); 2602039939a6STim Chen 2603815c2c54SShaohua Li flush_work(&p->discard_work); 2604815c2c54SShaohua Li 26054cd3bb10SHugh Dickins destroy_swap_extents(p); 2606570a335bSHugh Dickins if (p->flags & SWP_CONTINUED) 2607570a335bSHugh Dickins free_swap_count_continuations(p); 2608570a335bSHugh Dickins 260981a0298bSHuang Ying if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev))) 261081a0298bSHuang Ying atomic_dec(&nr_rotate_swap); 261181a0298bSHuang Ying 2612fc0abb14SIngo Molnar mutex_lock(&swapon_mutex); 26135d337b91SHugh Dickins spin_lock(&swap_lock); 2614ec8acf20SShaohua Li spin_lock(&p->lock); 26151da177e4SLinus Torvalds drain_mmlist(); 26165d337b91SHugh Dickins 26175d337b91SHugh Dickins /* wait for anyone still in scan_swap_map */ 26185d337b91SHugh Dickins p->highest_bit = 0; /* cuts scans short */ 26195d337b91SHugh Dickins while (p->flags >= SWP_SCANNING) { 2620ec8acf20SShaohua Li spin_unlock(&p->lock); 26215d337b91SHugh Dickins spin_unlock(&swap_lock); 262213e4b57fSNishanth Aravamudan schedule_timeout_uninterruptible(1); 26235d337b91SHugh Dickins spin_lock(&swap_lock); 2624ec8acf20SShaohua Li spin_lock(&p->lock); 26255d337b91SHugh Dickins } 26265d337b91SHugh Dickins 26271da177e4SLinus Torvalds swap_file = p->swap_file; 26285b808a23SKrzysztof Kozlowski old_block_size = p->old_block_size; 26291da177e4SLinus Torvalds p->swap_file = NULL; 26301da177e4SLinus Torvalds p->max = 0; 26311da177e4SLinus Torvalds swap_map = p->swap_map; 26321da177e4SLinus Torvalds p->swap_map = NULL; 26332a8f9449SShaohua Li cluster_info = p->cluster_info; 26342a8f9449SShaohua Li p->cluster_info = NULL; 26354f89849dSMinchan Kim frontswap_map = frontswap_map_get(p); 2636ec8acf20SShaohua Li spin_unlock(&p->lock); 26375d337b91SHugh Dickins spin_unlock(&swap_lock); 2638adfab836SDan Streetman frontswap_invalidate_area(p->type); 263958e97ba6SKrzysztof Kozlowski frontswap_map_set(p, NULL); 2640fc0abb14SIngo Molnar mutex_unlock(&swapon_mutex); 2641ebc2a1a6SShaohua Li free_percpu(p->percpu_cluster); 2642ebc2a1a6SShaohua Li p->percpu_cluster = NULL; 26431da177e4SLinus Torvalds vfree(swap_map); 264454f180d3SHuang Ying kvfree(cluster_info); 264554f180d3SHuang Ying kvfree(frontswap_map); 26462de1a7e4SSeth Jennings /* Destroy swap account information */ 2647adfab836SDan Streetman swap_cgroup_swapoff(p->type); 26484b3ef9daSHuang, Ying exit_swap_address_space(p->type); 264927a7faa0SKAMEZAWA Hiroyuki 26501da177e4SLinus Torvalds inode = mapping->host; 26511da177e4SLinus Torvalds if (S_ISBLK(inode->i_mode)) { 26521da177e4SLinus Torvalds struct block_device *bdev = I_BDEV(inode); 26535b808a23SKrzysztof Kozlowski set_blocksize(bdev, old_block_size); 2654e525fd89STejun Heo blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 26551da177e4SLinus Torvalds } else { 26565955102cSAl Viro inode_lock(inode); 26571da177e4SLinus Torvalds inode->i_flags &= ~S_SWAPFILE; 26585955102cSAl Viro inode_unlock(inode); 26591da177e4SLinus Torvalds } 26601da177e4SLinus Torvalds filp_close(swap_file, NULL); 2661f893ab41SWeijie Yang 2662f893ab41SWeijie Yang /* 2663f893ab41SWeijie Yang * Clear the SWP_USED flag after all resources are freed so that swapon 2664f893ab41SWeijie Yang * can reuse this swap_info in alloc_swap_info() safely. It is ok to 2665f893ab41SWeijie Yang * not hold p->lock after we cleared its SWP_WRITEOK. 2666f893ab41SWeijie Yang */ 2667f893ab41SWeijie Yang spin_lock(&swap_lock); 2668f893ab41SWeijie Yang p->flags = 0; 2669f893ab41SWeijie Yang spin_unlock(&swap_lock); 2670f893ab41SWeijie Yang 26711da177e4SLinus Torvalds err = 0; 267266d7dd51SKay Sievers atomic_inc(&proc_poll_event); 267366d7dd51SKay Sievers wake_up_interruptible(&proc_poll_wait); 26741da177e4SLinus Torvalds 26751da177e4SLinus Torvalds out_dput: 26761da177e4SLinus Torvalds filp_close(victim, NULL); 26771da177e4SLinus Torvalds out: 2678f58b59c1SXiaotian Feng putname(pathname); 26791da177e4SLinus Torvalds return err; 26801da177e4SLinus Torvalds } 26811da177e4SLinus Torvalds 26821da177e4SLinus Torvalds #ifdef CONFIG_PROC_FS 26839dd95748SAl Viro static __poll_t swaps_poll(struct file *file, poll_table *wait) 268466d7dd51SKay Sievers { 2685f1514638SKay Sievers struct seq_file *seq = file->private_data; 268666d7dd51SKay Sievers 268766d7dd51SKay Sievers poll_wait(file, &proc_poll_wait, wait); 268866d7dd51SKay Sievers 2689f1514638SKay Sievers if (seq->poll_event != atomic_read(&proc_poll_event)) { 2690f1514638SKay Sievers seq->poll_event = atomic_read(&proc_poll_event); 2691a9a08845SLinus Torvalds return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; 269266d7dd51SKay Sievers } 269366d7dd51SKay Sievers 2694a9a08845SLinus Torvalds return EPOLLIN | EPOLLRDNORM; 269566d7dd51SKay Sievers } 269666d7dd51SKay Sievers 26971da177e4SLinus Torvalds /* iterator */ 26981da177e4SLinus Torvalds static void *swap_start(struct seq_file *swap, loff_t *pos) 26991da177e4SLinus Torvalds { 2700efa90a98SHugh Dickins struct swap_info_struct *si; 2701efa90a98SHugh Dickins int type; 27021da177e4SLinus Torvalds loff_t l = *pos; 27031da177e4SLinus Torvalds 2704fc0abb14SIngo Molnar mutex_lock(&swapon_mutex); 27051da177e4SLinus Torvalds 2706881e4aabSSuleiman Souhlal if (!l) 2707881e4aabSSuleiman Souhlal return SEQ_START_TOKEN; 2708881e4aabSSuleiman Souhlal 2709efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 2710efa90a98SHugh Dickins smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 2711efa90a98SHugh Dickins si = swap_info[type]; 2712efa90a98SHugh Dickins if (!(si->flags & SWP_USED) || !si->swap_map) 27131da177e4SLinus Torvalds continue; 2714881e4aabSSuleiman Souhlal if (!--l) 2715efa90a98SHugh Dickins return si; 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds 27181da177e4SLinus Torvalds return NULL; 27191da177e4SLinus Torvalds } 27201da177e4SLinus Torvalds 27211da177e4SLinus Torvalds static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 27221da177e4SLinus Torvalds { 2723efa90a98SHugh Dickins struct swap_info_struct *si = v; 2724efa90a98SHugh Dickins int type; 27251da177e4SLinus Torvalds 2726881e4aabSSuleiman Souhlal if (v == SEQ_START_TOKEN) 2727efa90a98SHugh Dickins type = 0; 2728efa90a98SHugh Dickins else 2729efa90a98SHugh Dickins type = si->type + 1; 2730881e4aabSSuleiman Souhlal 2731efa90a98SHugh Dickins for (; type < nr_swapfiles; type++) { 2732efa90a98SHugh Dickins smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 2733efa90a98SHugh Dickins si = swap_info[type]; 2734efa90a98SHugh Dickins if (!(si->flags & SWP_USED) || !si->swap_map) 27351da177e4SLinus Torvalds continue; 27361da177e4SLinus Torvalds ++*pos; 2737efa90a98SHugh Dickins return si; 27381da177e4SLinus Torvalds } 27391da177e4SLinus Torvalds 27401da177e4SLinus Torvalds return NULL; 27411da177e4SLinus Torvalds } 27421da177e4SLinus Torvalds 27431da177e4SLinus Torvalds static void swap_stop(struct seq_file *swap, void *v) 27441da177e4SLinus Torvalds { 2745fc0abb14SIngo Molnar mutex_unlock(&swapon_mutex); 27461da177e4SLinus Torvalds } 27471da177e4SLinus Torvalds 27481da177e4SLinus Torvalds static int swap_show(struct seq_file *swap, void *v) 27491da177e4SLinus Torvalds { 2750efa90a98SHugh Dickins struct swap_info_struct *si = v; 27511da177e4SLinus Torvalds struct file *file; 27521da177e4SLinus Torvalds int len; 27531da177e4SLinus Torvalds 2754efa90a98SHugh Dickins if (si == SEQ_START_TOKEN) { 27551da177e4SLinus Torvalds seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 2756881e4aabSSuleiman Souhlal return 0; 2757881e4aabSSuleiman Souhlal } 27581da177e4SLinus Torvalds 2759efa90a98SHugh Dickins file = si->swap_file; 27602726d566SMiklos Szeredi len = seq_file_path(swap, file, " \t\n\\"); 27616eb396dcSHugh Dickins seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 27621da177e4SLinus Torvalds len < 40 ? 40 - len : 1, " ", 2763496ad9aaSAl Viro S_ISBLK(file_inode(file)->i_mode) ? 27641da177e4SLinus Torvalds "partition" : "file\t", 2765efa90a98SHugh Dickins si->pages << (PAGE_SHIFT - 10), 2766efa90a98SHugh Dickins si->inuse_pages << (PAGE_SHIFT - 10), 2767efa90a98SHugh Dickins si->prio); 27681da177e4SLinus Torvalds return 0; 27691da177e4SLinus Torvalds } 27701da177e4SLinus Torvalds 277115ad7cdcSHelge Deller static const struct seq_operations swaps_op = { 27721da177e4SLinus Torvalds .start = swap_start, 27731da177e4SLinus Torvalds .next = swap_next, 27741da177e4SLinus Torvalds .stop = swap_stop, 27751da177e4SLinus Torvalds .show = swap_show 27761da177e4SLinus Torvalds }; 27771da177e4SLinus Torvalds 27781da177e4SLinus Torvalds static int swaps_open(struct inode *inode, struct file *file) 27791da177e4SLinus Torvalds { 2780f1514638SKay Sievers struct seq_file *seq; 278166d7dd51SKay Sievers int ret; 278266d7dd51SKay Sievers 278366d7dd51SKay Sievers ret = seq_open(file, &swaps_op); 2784f1514638SKay Sievers if (ret) 278566d7dd51SKay Sievers return ret; 278666d7dd51SKay Sievers 2787f1514638SKay Sievers seq = file->private_data; 2788f1514638SKay Sievers seq->poll_event = atomic_read(&proc_poll_event); 2789f1514638SKay Sievers return 0; 27901da177e4SLinus Torvalds } 27911da177e4SLinus Torvalds 279215ad7cdcSHelge Deller static const struct file_operations proc_swaps_operations = { 27931da177e4SLinus Torvalds .open = swaps_open, 27941da177e4SLinus Torvalds .read = seq_read, 27951da177e4SLinus Torvalds .llseek = seq_lseek, 27961da177e4SLinus Torvalds .release = seq_release, 279766d7dd51SKay Sievers .poll = swaps_poll, 27981da177e4SLinus Torvalds }; 27991da177e4SLinus Torvalds 28001da177e4SLinus Torvalds static int __init procswaps_init(void) 28011da177e4SLinus Torvalds { 28023d71f86fSDenis V. Lunev proc_create("swaps", 0, NULL, &proc_swaps_operations); 28031da177e4SLinus Torvalds return 0; 28041da177e4SLinus Torvalds } 28051da177e4SLinus Torvalds __initcall(procswaps_init); 28061da177e4SLinus Torvalds #endif /* CONFIG_PROC_FS */ 28071da177e4SLinus Torvalds 28081796316aSJan Beulich #ifdef MAX_SWAPFILES_CHECK 28091796316aSJan Beulich static int __init max_swapfiles_check(void) 28101796316aSJan Beulich { 28111796316aSJan Beulich MAX_SWAPFILES_CHECK(); 28121796316aSJan Beulich return 0; 28131796316aSJan Beulich } 28141796316aSJan Beulich late_initcall(max_swapfiles_check); 28151796316aSJan Beulich #endif 28161796316aSJan Beulich 281753cbb243SCesar Eduardo Barros static struct swap_info_struct *alloc_swap_info(void) 28181da177e4SLinus Torvalds { 28191da177e4SLinus Torvalds struct swap_info_struct *p; 28201da177e4SLinus Torvalds unsigned int type; 2821a2468cc9SAaron Lu int i; 2822efa90a98SHugh Dickins 2823efa90a98SHugh Dickins p = kzalloc(sizeof(*p), GFP_KERNEL); 2824efa90a98SHugh Dickins if (!p) 282553cbb243SCesar Eduardo Barros return ERR_PTR(-ENOMEM); 2826efa90a98SHugh Dickins 28275d337b91SHugh Dickins spin_lock(&swap_lock); 2828efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 2829efa90a98SHugh Dickins if (!(swap_info[type]->flags & SWP_USED)) 28301da177e4SLinus Torvalds break; 2831efa90a98SHugh Dickins } 28320697212aSChristoph Lameter if (type >= MAX_SWAPFILES) { 28335d337b91SHugh Dickins spin_unlock(&swap_lock); 2834efa90a98SHugh Dickins kfree(p); 2835730c0581SCesar Eduardo Barros return ERR_PTR(-EPERM); 28361da177e4SLinus Torvalds } 2837efa90a98SHugh Dickins if (type >= nr_swapfiles) { 2838efa90a98SHugh Dickins p->type = type; 2839efa90a98SHugh Dickins swap_info[type] = p; 2840efa90a98SHugh Dickins /* 2841efa90a98SHugh Dickins * Write swap_info[type] before nr_swapfiles, in case a 2842efa90a98SHugh Dickins * racing procfs swap_start() or swap_next() is reading them. 2843efa90a98SHugh Dickins * (We never shrink nr_swapfiles, we never free this entry.) 2844efa90a98SHugh Dickins */ 2845efa90a98SHugh Dickins smp_wmb(); 2846efa90a98SHugh Dickins nr_swapfiles++; 2847efa90a98SHugh Dickins } else { 2848efa90a98SHugh Dickins kfree(p); 2849efa90a98SHugh Dickins p = swap_info[type]; 2850efa90a98SHugh Dickins /* 2851efa90a98SHugh Dickins * Do not memset this entry: a racing procfs swap_next() 2852efa90a98SHugh Dickins * would be relying on p->type to remain valid. 2853efa90a98SHugh Dickins */ 2854efa90a98SHugh Dickins } 28559625a5f2SHugh Dickins INIT_LIST_HEAD(&p->first_swap_extent.list); 285618ab4d4cSDan Streetman plist_node_init(&p->list, 0); 2857a2468cc9SAaron Lu for_each_node(i) 2858a2468cc9SAaron Lu plist_node_init(&p->avail_lists[i], 0); 28591da177e4SLinus Torvalds p->flags = SWP_USED; 28605d337b91SHugh Dickins spin_unlock(&swap_lock); 2861ec8acf20SShaohua Li spin_lock_init(&p->lock); 28622628bd6fSHuang Ying spin_lock_init(&p->cont_lock); 2863efa90a98SHugh Dickins 286453cbb243SCesar Eduardo Barros return p; 286553cbb243SCesar Eduardo Barros } 286653cbb243SCesar Eduardo Barros 28674d0e1e10SCesar Eduardo Barros static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) 28684d0e1e10SCesar Eduardo Barros { 28694d0e1e10SCesar Eduardo Barros int error; 28704d0e1e10SCesar Eduardo Barros 28714d0e1e10SCesar Eduardo Barros if (S_ISBLK(inode->i_mode)) { 28724d0e1e10SCesar Eduardo Barros p->bdev = bdgrab(I_BDEV(inode)); 28734d0e1e10SCesar Eduardo Barros error = blkdev_get(p->bdev, 28746f179af8SHugh Dickins FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); 28754d0e1e10SCesar Eduardo Barros if (error < 0) { 28764d0e1e10SCesar Eduardo Barros p->bdev = NULL; 28776f179af8SHugh Dickins return error; 28784d0e1e10SCesar Eduardo Barros } 28794d0e1e10SCesar Eduardo Barros p->old_block_size = block_size(p->bdev); 28804d0e1e10SCesar Eduardo Barros error = set_blocksize(p->bdev, PAGE_SIZE); 28814d0e1e10SCesar Eduardo Barros if (error < 0) 288287ade72aSCesar Eduardo Barros return error; 28834d0e1e10SCesar Eduardo Barros p->flags |= SWP_BLKDEV; 28844d0e1e10SCesar Eduardo Barros } else if (S_ISREG(inode->i_mode)) { 28854d0e1e10SCesar Eduardo Barros p->bdev = inode->i_sb->s_bdev; 28865955102cSAl Viro inode_lock(inode); 288787ade72aSCesar Eduardo Barros if (IS_SWAPFILE(inode)) 288887ade72aSCesar Eduardo Barros return -EBUSY; 288987ade72aSCesar Eduardo Barros } else 289087ade72aSCesar Eduardo Barros return -EINVAL; 28914d0e1e10SCesar Eduardo Barros 28924d0e1e10SCesar Eduardo Barros return 0; 28934d0e1e10SCesar Eduardo Barros } 28944d0e1e10SCesar Eduardo Barros 2895377eeaa8SAndi Kleen 2896377eeaa8SAndi Kleen /* 2897377eeaa8SAndi Kleen * Find out how many pages are allowed for a single swap device. There 2898377eeaa8SAndi Kleen * are two limiting factors: 2899377eeaa8SAndi Kleen * 1) the number of bits for the swap offset in the swp_entry_t type, and 2900377eeaa8SAndi Kleen * 2) the number of bits in the swap pte, as defined by the different 2901377eeaa8SAndi Kleen * architectures. 2902377eeaa8SAndi Kleen * 2903377eeaa8SAndi Kleen * In order to find the largest possible bit mask, a swap entry with 2904377eeaa8SAndi Kleen * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, 2905377eeaa8SAndi Kleen * decoded to a swp_entry_t again, and finally the swap offset is 2906377eeaa8SAndi Kleen * extracted. 2907377eeaa8SAndi Kleen * 2908377eeaa8SAndi Kleen * This will mask all the bits from the initial ~0UL mask that can't 2909377eeaa8SAndi Kleen * be encoded in either the swp_entry_t or the architecture definition 2910377eeaa8SAndi Kleen * of a swap pte. 2911377eeaa8SAndi Kleen */ 2912377eeaa8SAndi Kleen unsigned long generic_max_swapfile_size(void) 2913377eeaa8SAndi Kleen { 2914377eeaa8SAndi Kleen return swp_offset(pte_to_swp_entry( 2915377eeaa8SAndi Kleen swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; 2916377eeaa8SAndi Kleen } 2917377eeaa8SAndi Kleen 2918377eeaa8SAndi Kleen /* Can be overridden by an architecture for additional checks. */ 2919377eeaa8SAndi Kleen __weak unsigned long max_swapfile_size(void) 2920377eeaa8SAndi Kleen { 2921377eeaa8SAndi Kleen return generic_max_swapfile_size(); 2922377eeaa8SAndi Kleen } 2923377eeaa8SAndi Kleen 2924ca8bd38bSCesar Eduardo Barros static unsigned long read_swap_header(struct swap_info_struct *p, 2925ca8bd38bSCesar Eduardo Barros union swap_header *swap_header, 2926ca8bd38bSCesar Eduardo Barros struct inode *inode) 2927ca8bd38bSCesar Eduardo Barros { 2928ca8bd38bSCesar Eduardo Barros int i; 2929ca8bd38bSCesar Eduardo Barros unsigned long maxpages; 2930ca8bd38bSCesar Eduardo Barros unsigned long swapfilepages; 2931d6bbbd29SRaymond Jennings unsigned long last_page; 2932ca8bd38bSCesar Eduardo Barros 2933ca8bd38bSCesar Eduardo Barros if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 2934465c47fdSAndrew Morton pr_err("Unable to find swap-space signature\n"); 293538719025SCesar Eduardo Barros return 0; 2936ca8bd38bSCesar Eduardo Barros } 2937ca8bd38bSCesar Eduardo Barros 2938ca8bd38bSCesar Eduardo Barros /* swap partition endianess hack... */ 2939ca8bd38bSCesar Eduardo Barros if (swab32(swap_header->info.version) == 1) { 2940ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.version); 2941ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.last_page); 2942ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.nr_badpages); 2943dd111be6SJann Horn if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 2944dd111be6SJann Horn return 0; 2945ca8bd38bSCesar Eduardo Barros for (i = 0; i < swap_header->info.nr_badpages; i++) 2946ca8bd38bSCesar Eduardo Barros swab32s(&swap_header->info.badpages[i]); 2947ca8bd38bSCesar Eduardo Barros } 2948ca8bd38bSCesar Eduardo Barros /* Check the swap header's sub-version */ 2949ca8bd38bSCesar Eduardo Barros if (swap_header->info.version != 1) { 2950465c47fdSAndrew Morton pr_warn("Unable to handle swap header version %d\n", 2951ca8bd38bSCesar Eduardo Barros swap_header->info.version); 295238719025SCesar Eduardo Barros return 0; 2953ca8bd38bSCesar Eduardo Barros } 2954ca8bd38bSCesar Eduardo Barros 2955ca8bd38bSCesar Eduardo Barros p->lowest_bit = 1; 2956ca8bd38bSCesar Eduardo Barros p->cluster_next = 1; 2957ca8bd38bSCesar Eduardo Barros p->cluster_nr = 0; 2958ca8bd38bSCesar Eduardo Barros 2959377eeaa8SAndi Kleen maxpages = max_swapfile_size(); 2960d6bbbd29SRaymond Jennings last_page = swap_header->info.last_page; 2961a06ad633STom Abraham if (!last_page) { 2962a06ad633STom Abraham pr_warn("Empty swap-file\n"); 2963a06ad633STom Abraham return 0; 2964a06ad633STom Abraham } 2965d6bbbd29SRaymond Jennings if (last_page > maxpages) { 2966465c47fdSAndrew Morton pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", 2967d6bbbd29SRaymond Jennings maxpages << (PAGE_SHIFT - 10), 2968d6bbbd29SRaymond Jennings last_page << (PAGE_SHIFT - 10)); 2969d6bbbd29SRaymond Jennings } 2970d6bbbd29SRaymond Jennings if (maxpages > last_page) { 2971d6bbbd29SRaymond Jennings maxpages = last_page + 1; 2972ca8bd38bSCesar Eduardo Barros /* p->max is an unsigned int: don't overflow it */ 2973ca8bd38bSCesar Eduardo Barros if ((unsigned int)maxpages == 0) 2974ca8bd38bSCesar Eduardo Barros maxpages = UINT_MAX; 2975ca8bd38bSCesar Eduardo Barros } 2976ca8bd38bSCesar Eduardo Barros p->highest_bit = maxpages - 1; 2977ca8bd38bSCesar Eduardo Barros 2978ca8bd38bSCesar Eduardo Barros if (!maxpages) 297938719025SCesar Eduardo Barros return 0; 2980ca8bd38bSCesar Eduardo Barros swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 2981ca8bd38bSCesar Eduardo Barros if (swapfilepages && maxpages > swapfilepages) { 2982465c47fdSAndrew Morton pr_warn("Swap area shorter than signature indicates\n"); 298338719025SCesar Eduardo Barros return 0; 2984ca8bd38bSCesar Eduardo Barros } 2985ca8bd38bSCesar Eduardo Barros if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 298638719025SCesar Eduardo Barros return 0; 2987ca8bd38bSCesar Eduardo Barros if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 298838719025SCesar Eduardo Barros return 0; 2989ca8bd38bSCesar Eduardo Barros 2990ca8bd38bSCesar Eduardo Barros return maxpages; 2991ca8bd38bSCesar Eduardo Barros } 2992ca8bd38bSCesar Eduardo Barros 29934b3ef9daSHuang, Ying #define SWAP_CLUSTER_INFO_COLS \ 2994235b6217SHuang, Ying DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) 29954b3ef9daSHuang, Ying #define SWAP_CLUSTER_SPACE_COLS \ 29964b3ef9daSHuang, Ying DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) 29974b3ef9daSHuang, Ying #define SWAP_CLUSTER_COLS \ 29984b3ef9daSHuang, Ying max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) 2999235b6217SHuang, Ying 3000915d4d7bSCesar Eduardo Barros static int setup_swap_map_and_extents(struct swap_info_struct *p, 3001915d4d7bSCesar Eduardo Barros union swap_header *swap_header, 3002915d4d7bSCesar Eduardo Barros unsigned char *swap_map, 30032a8f9449SShaohua Li struct swap_cluster_info *cluster_info, 3004915d4d7bSCesar Eduardo Barros unsigned long maxpages, 3005915d4d7bSCesar Eduardo Barros sector_t *span) 3006915d4d7bSCesar Eduardo Barros { 3007235b6217SHuang, Ying unsigned int j, k; 3008915d4d7bSCesar Eduardo Barros unsigned int nr_good_pages; 3009915d4d7bSCesar Eduardo Barros int nr_extents; 30102a8f9449SShaohua Li unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3011235b6217SHuang, Ying unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; 3012235b6217SHuang, Ying unsigned long i, idx; 3013915d4d7bSCesar Eduardo Barros 3014915d4d7bSCesar Eduardo Barros nr_good_pages = maxpages - 1; /* omit header page */ 3015915d4d7bSCesar Eduardo Barros 30166b534915SHuang Ying cluster_list_init(&p->free_clusters); 30176b534915SHuang Ying cluster_list_init(&p->discard_clusters); 30182a8f9449SShaohua Li 3019915d4d7bSCesar Eduardo Barros for (i = 0; i < swap_header->info.nr_badpages; i++) { 3020915d4d7bSCesar Eduardo Barros unsigned int page_nr = swap_header->info.badpages[i]; 3021bdb8e3f6SCesar Eduardo Barros if (page_nr == 0 || page_nr > swap_header->info.last_page) 3022bdb8e3f6SCesar Eduardo Barros return -EINVAL; 3023915d4d7bSCesar Eduardo Barros if (page_nr < maxpages) { 3024915d4d7bSCesar Eduardo Barros swap_map[page_nr] = SWAP_MAP_BAD; 3025915d4d7bSCesar Eduardo Barros nr_good_pages--; 30262a8f9449SShaohua Li /* 30272a8f9449SShaohua Li * Haven't marked the cluster free yet, no list 30282a8f9449SShaohua Li * operation involved 30292a8f9449SShaohua Li */ 30302a8f9449SShaohua Li inc_cluster_info_page(p, cluster_info, page_nr); 3031915d4d7bSCesar Eduardo Barros } 3032915d4d7bSCesar Eduardo Barros } 3033915d4d7bSCesar Eduardo Barros 30342a8f9449SShaohua Li /* Haven't marked the cluster free yet, no list operation involved */ 30352a8f9449SShaohua Li for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) 30362a8f9449SShaohua Li inc_cluster_info_page(p, cluster_info, i); 30372a8f9449SShaohua Li 3038915d4d7bSCesar Eduardo Barros if (nr_good_pages) { 3039915d4d7bSCesar Eduardo Barros swap_map[0] = SWAP_MAP_BAD; 30402a8f9449SShaohua Li /* 30412a8f9449SShaohua Li * Not mark the cluster free yet, no list 30422a8f9449SShaohua Li * operation involved 30432a8f9449SShaohua Li */ 30442a8f9449SShaohua Li inc_cluster_info_page(p, cluster_info, 0); 3045915d4d7bSCesar Eduardo Barros p->max = maxpages; 3046915d4d7bSCesar Eduardo Barros p->pages = nr_good_pages; 3047915d4d7bSCesar Eduardo Barros nr_extents = setup_swap_extents(p, span); 3048bdb8e3f6SCesar Eduardo Barros if (nr_extents < 0) 3049bdb8e3f6SCesar Eduardo Barros return nr_extents; 3050915d4d7bSCesar Eduardo Barros nr_good_pages = p->pages; 3051915d4d7bSCesar Eduardo Barros } 3052915d4d7bSCesar Eduardo Barros if (!nr_good_pages) { 3053465c47fdSAndrew Morton pr_warn("Empty swap-file\n"); 3054bdb8e3f6SCesar Eduardo Barros return -EINVAL; 3055915d4d7bSCesar Eduardo Barros } 3056915d4d7bSCesar Eduardo Barros 30572a8f9449SShaohua Li if (!cluster_info) 30582a8f9449SShaohua Li return nr_extents; 30592a8f9449SShaohua Li 3060235b6217SHuang, Ying 30614b3ef9daSHuang, Ying /* 30624b3ef9daSHuang, Ying * Reduce false cache line sharing between cluster_info and 30634b3ef9daSHuang, Ying * sharing same address space. 30644b3ef9daSHuang, Ying */ 3065235b6217SHuang, Ying for (k = 0; k < SWAP_CLUSTER_COLS; k++) { 3066235b6217SHuang, Ying j = (k + col) % SWAP_CLUSTER_COLS; 3067235b6217SHuang, Ying for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { 3068235b6217SHuang, Ying idx = i * SWAP_CLUSTER_COLS + j; 3069235b6217SHuang, Ying if (idx >= nr_clusters) 3070235b6217SHuang, Ying continue; 3071235b6217SHuang, Ying if (cluster_count(&cluster_info[idx])) 3072235b6217SHuang, Ying continue; 30732a8f9449SShaohua Li cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 30746b534915SHuang Ying cluster_list_add_tail(&p->free_clusters, cluster_info, 30756b534915SHuang Ying idx); 30762a8f9449SShaohua Li } 30772a8f9449SShaohua Li } 3078915d4d7bSCesar Eduardo Barros return nr_extents; 3079915d4d7bSCesar Eduardo Barros } 3080915d4d7bSCesar Eduardo Barros 3081dcf6b7ddSRafael Aquini /* 3082dcf6b7ddSRafael Aquini * Helper to sys_swapon determining if a given swap 3083dcf6b7ddSRafael Aquini * backing device queue supports DISCARD operations. 3084dcf6b7ddSRafael Aquini */ 3085dcf6b7ddSRafael Aquini static bool swap_discardable(struct swap_info_struct *si) 3086dcf6b7ddSRafael Aquini { 3087dcf6b7ddSRafael Aquini struct request_queue *q = bdev_get_queue(si->bdev); 3088dcf6b7ddSRafael Aquini 3089dcf6b7ddSRafael Aquini if (!q || !blk_queue_discard(q)) 3090dcf6b7ddSRafael Aquini return false; 3091dcf6b7ddSRafael Aquini 3092dcf6b7ddSRafael Aquini return true; 3093dcf6b7ddSRafael Aquini } 3094dcf6b7ddSRafael Aquini 309553cbb243SCesar Eduardo Barros SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 309653cbb243SCesar Eduardo Barros { 309753cbb243SCesar Eduardo Barros struct swap_info_struct *p; 309891a27b2aSJeff Layton struct filename *name; 309953cbb243SCesar Eduardo Barros struct file *swap_file = NULL; 310053cbb243SCesar Eduardo Barros struct address_space *mapping; 310140531542SCesar Eduardo Barros int prio; 310253cbb243SCesar Eduardo Barros int error; 310353cbb243SCesar Eduardo Barros union swap_header *swap_header; 3104915d4d7bSCesar Eduardo Barros int nr_extents; 310553cbb243SCesar Eduardo Barros sector_t span; 310653cbb243SCesar Eduardo Barros unsigned long maxpages; 310753cbb243SCesar Eduardo Barros unsigned char *swap_map = NULL; 31082a8f9449SShaohua Li struct swap_cluster_info *cluster_info = NULL; 310938b5faf4SDan Magenheimer unsigned long *frontswap_map = NULL; 311053cbb243SCesar Eduardo Barros struct page *page = NULL; 311153cbb243SCesar Eduardo Barros struct inode *inode = NULL; 31127cbf3192SOmar Sandoval bool inced_nr_rotate_swap = false; 311353cbb243SCesar Eduardo Barros 3114d15cab97SHugh Dickins if (swap_flags & ~SWAP_FLAGS_VALID) 3115d15cab97SHugh Dickins return -EINVAL; 3116d15cab97SHugh Dickins 311753cbb243SCesar Eduardo Barros if (!capable(CAP_SYS_ADMIN)) 311853cbb243SCesar Eduardo Barros return -EPERM; 311953cbb243SCesar Eduardo Barros 3120a2468cc9SAaron Lu if (!swap_avail_heads) 3121a2468cc9SAaron Lu return -ENOMEM; 3122a2468cc9SAaron Lu 312353cbb243SCesar Eduardo Barros p = alloc_swap_info(); 31242542e513SCesar Eduardo Barros if (IS_ERR(p)) 31252542e513SCesar Eduardo Barros return PTR_ERR(p); 312653cbb243SCesar Eduardo Barros 3127815c2c54SShaohua Li INIT_WORK(&p->discard_work, swap_discard_work); 3128815c2c54SShaohua Li 31291da177e4SLinus Torvalds name = getname(specialfile); 31301da177e4SLinus Torvalds if (IS_ERR(name)) { 31317de7fb6bSCesar Eduardo Barros error = PTR_ERR(name); 31321da177e4SLinus Torvalds name = NULL; 3133bd69010bSCesar Eduardo Barros goto bad_swap; 31341da177e4SLinus Torvalds } 3135669abf4eSJeff Layton swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); 31361da177e4SLinus Torvalds if (IS_ERR(swap_file)) { 31377de7fb6bSCesar Eduardo Barros error = PTR_ERR(swap_file); 31381da177e4SLinus Torvalds swap_file = NULL; 3139bd69010bSCesar Eduardo Barros goto bad_swap; 31401da177e4SLinus Torvalds } 31411da177e4SLinus Torvalds 31421da177e4SLinus Torvalds p->swap_file = swap_file; 31431da177e4SLinus Torvalds mapping = swap_file->f_mapping; 31442130781eSCesar Eduardo Barros inode = mapping->host; 31456f179af8SHugh Dickins 31465955102cSAl Viro /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */ 31474d0e1e10SCesar Eduardo Barros error = claim_swapfile(p, inode); 31484d0e1e10SCesar Eduardo Barros if (unlikely(error)) 31491da177e4SLinus Torvalds goto bad_swap; 31501da177e4SLinus Torvalds 31511da177e4SLinus Torvalds /* 31521da177e4SLinus Torvalds * Read the swap header. 31531da177e4SLinus Torvalds */ 31541da177e4SLinus Torvalds if (!mapping->a_ops->readpage) { 31551da177e4SLinus Torvalds error = -EINVAL; 31561da177e4SLinus Torvalds goto bad_swap; 31571da177e4SLinus Torvalds } 3158090d2b18SPekka Enberg page = read_mapping_page(mapping, 0, swap_file); 31591da177e4SLinus Torvalds if (IS_ERR(page)) { 31601da177e4SLinus Torvalds error = PTR_ERR(page); 31611da177e4SLinus Torvalds goto bad_swap; 31621da177e4SLinus Torvalds } 316381e33971SHugh Dickins swap_header = kmap(page); 31641da177e4SLinus Torvalds 3165ca8bd38bSCesar Eduardo Barros maxpages = read_swap_header(p, swap_header, inode); 3166ca8bd38bSCesar Eduardo Barros if (unlikely(!maxpages)) { 31671da177e4SLinus Torvalds error = -EINVAL; 31681da177e4SLinus Torvalds goto bad_swap; 31691da177e4SLinus Torvalds } 31701da177e4SLinus Torvalds 31711da177e4SLinus Torvalds /* OK, set up the swap map and apply the bad block list */ 3172803d0c83SCesar Eduardo Barros swap_map = vzalloc(maxpages); 317378ecba08SHugh Dickins if (!swap_map) { 31741da177e4SLinus Torvalds error = -ENOMEM; 31751da177e4SLinus Torvalds goto bad_swap; 31761da177e4SLinus Torvalds } 3177f0571429SMinchan Kim 3178f0571429SMinchan Kim if (bdi_cap_stable_pages_required(inode_to_bdi(inode))) 3179f0571429SMinchan Kim p->flags |= SWP_STABLE_WRITES; 3180f0571429SMinchan Kim 3181539a6feaSMinchan Kim if (bdi_cap_synchronous_io(inode_to_bdi(inode))) 3182539a6feaSMinchan Kim p->flags |= SWP_SYNCHRONOUS_IO; 3183539a6feaSMinchan Kim 31842a8f9449SShaohua Li if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { 31856f179af8SHugh Dickins int cpu; 3186235b6217SHuang, Ying unsigned long ci, nr_cluster; 31876f179af8SHugh Dickins 31882a8f9449SShaohua Li p->flags |= SWP_SOLIDSTATE; 31892a8f9449SShaohua Li /* 31902a8f9449SShaohua Li * select a random position to start with to help wear leveling 31912a8f9449SShaohua Li * SSD 31922a8f9449SShaohua Li */ 31932a8f9449SShaohua Li p->cluster_next = 1 + (prandom_u32() % p->highest_bit); 3194235b6217SHuang, Ying nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 31952a8f9449SShaohua Li 3196778e1cddSKees Cook cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info), 319754f180d3SHuang Ying GFP_KERNEL); 31982a8f9449SShaohua Li if (!cluster_info) { 31992a8f9449SShaohua Li error = -ENOMEM; 32002a8f9449SShaohua Li goto bad_swap; 32012a8f9449SShaohua Li } 3202235b6217SHuang, Ying 3203235b6217SHuang, Ying for (ci = 0; ci < nr_cluster; ci++) 3204235b6217SHuang, Ying spin_lock_init(&((cluster_info + ci)->lock)); 3205235b6217SHuang, Ying 3206ebc2a1a6SShaohua Li p->percpu_cluster = alloc_percpu(struct percpu_cluster); 3207ebc2a1a6SShaohua Li if (!p->percpu_cluster) { 3208ebc2a1a6SShaohua Li error = -ENOMEM; 3209ebc2a1a6SShaohua Li goto bad_swap; 3210ebc2a1a6SShaohua Li } 32116f179af8SHugh Dickins for_each_possible_cpu(cpu) { 3212ebc2a1a6SShaohua Li struct percpu_cluster *cluster; 32136f179af8SHugh Dickins cluster = per_cpu_ptr(p->percpu_cluster, cpu); 3214ebc2a1a6SShaohua Li cluster_set_null(&cluster->index); 3215ebc2a1a6SShaohua Li } 32167cbf3192SOmar Sandoval } else { 321781a0298bSHuang Ying atomic_inc(&nr_rotate_swap); 32187cbf3192SOmar Sandoval inced_nr_rotate_swap = true; 32197cbf3192SOmar Sandoval } 32201da177e4SLinus Torvalds 32211421ef3cSCesar Eduardo Barros error = swap_cgroup_swapon(p->type, maxpages); 32221421ef3cSCesar Eduardo Barros if (error) 32231421ef3cSCesar Eduardo Barros goto bad_swap; 32241421ef3cSCesar Eduardo Barros 3225915d4d7bSCesar Eduardo Barros nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 32262a8f9449SShaohua Li cluster_info, maxpages, &span); 3227915d4d7bSCesar Eduardo Barros if (unlikely(nr_extents < 0)) { 322853092a74SHugh Dickins error = nr_extents; 3229e2244ec2SHugh Dickins goto bad_swap; 323053092a74SHugh Dickins } 323138b5faf4SDan Magenheimer /* frontswap enabled? set up bit-per-page map for frontswap */ 32328ea1d2a1SVlastimil Babka if (IS_ENABLED(CONFIG_FRONTSWAP)) 3233778e1cddSKees Cook frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages), 3234778e1cddSKees Cook sizeof(long), 323554f180d3SHuang Ying GFP_KERNEL); 32361da177e4SLinus Torvalds 32372a8f9449SShaohua Li if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 3238dcf6b7ddSRafael Aquini /* 3239dcf6b7ddSRafael Aquini * When discard is enabled for swap with no particular 3240dcf6b7ddSRafael Aquini * policy flagged, we set all swap discard flags here in 3241dcf6b7ddSRafael Aquini * order to sustain backward compatibility with older 3242dcf6b7ddSRafael Aquini * swapon(8) releases. 3243dcf6b7ddSRafael Aquini */ 3244dcf6b7ddSRafael Aquini p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | 3245dcf6b7ddSRafael Aquini SWP_PAGE_DISCARD); 3246dcf6b7ddSRafael Aquini 3247dcf6b7ddSRafael Aquini /* 3248dcf6b7ddSRafael Aquini * By flagging sys_swapon, a sysadmin can tell us to 3249dcf6b7ddSRafael Aquini * either do single-time area discards only, or to just 3250dcf6b7ddSRafael Aquini * perform discards for released swap page-clusters. 3251dcf6b7ddSRafael Aquini * Now it's time to adjust the p->flags accordingly. 3252dcf6b7ddSRafael Aquini */ 3253dcf6b7ddSRafael Aquini if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 3254dcf6b7ddSRafael Aquini p->flags &= ~SWP_PAGE_DISCARD; 3255dcf6b7ddSRafael Aquini else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 3256dcf6b7ddSRafael Aquini p->flags &= ~SWP_AREA_DISCARD; 3257dcf6b7ddSRafael Aquini 3258dcf6b7ddSRafael Aquini /* issue a swapon-time discard if it's still required */ 3259dcf6b7ddSRafael Aquini if (p->flags & SWP_AREA_DISCARD) { 3260dcf6b7ddSRafael Aquini int err = discard_swap(p); 3261dcf6b7ddSRafael Aquini if (unlikely(err)) 3262465c47fdSAndrew Morton pr_err("swapon: discard_swap(%p): %d\n", 3263dcf6b7ddSRafael Aquini p, err); 3264dcf6b7ddSRafael Aquini } 3265dcf6b7ddSRafael Aquini } 32666a6ba831SHugh Dickins 32674b3ef9daSHuang, Ying error = init_swap_address_space(p->type, maxpages); 32684b3ef9daSHuang, Ying if (error) 32694b3ef9daSHuang, Ying goto bad_swap; 32704b3ef9daSHuang, Ying 3271fc0abb14SIngo Molnar mutex_lock(&swapon_mutex); 327240531542SCesar Eduardo Barros prio = -1; 327378ecba08SHugh Dickins if (swap_flags & SWAP_FLAG_PREFER) 327440531542SCesar Eduardo Barros prio = 327578ecba08SHugh Dickins (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 32762a8f9449SShaohua Li enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); 3277c69dbfb8SCesar Eduardo Barros 3278756a025fSJoe Perches pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", 327991a27b2aSJeff Layton p->pages<<(PAGE_SHIFT-10), name->name, p->prio, 3280c69dbfb8SCesar Eduardo Barros nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 3281c69dbfb8SCesar Eduardo Barros (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 328238b5faf4SDan Magenheimer (p->flags & SWP_DISCARDABLE) ? "D" : "", 3283dcf6b7ddSRafael Aquini (p->flags & SWP_AREA_DISCARD) ? "s" : "", 3284dcf6b7ddSRafael Aquini (p->flags & SWP_PAGE_DISCARD) ? "c" : "", 328538b5faf4SDan Magenheimer (frontswap_map) ? "FS" : ""); 3286c69dbfb8SCesar Eduardo Barros 3287fc0abb14SIngo Molnar mutex_unlock(&swapon_mutex); 328866d7dd51SKay Sievers atomic_inc(&proc_poll_event); 328966d7dd51SKay Sievers wake_up_interruptible(&proc_poll_wait); 329066d7dd51SKay Sievers 32919b01c350SCesar Eduardo Barros if (S_ISREG(inode->i_mode)) 32929b01c350SCesar Eduardo Barros inode->i_flags |= S_SWAPFILE; 32931da177e4SLinus Torvalds error = 0; 32941da177e4SLinus Torvalds goto out; 32951da177e4SLinus Torvalds bad_swap: 3296ebc2a1a6SShaohua Li free_percpu(p->percpu_cluster); 3297ebc2a1a6SShaohua Li p->percpu_cluster = NULL; 3298bd69010bSCesar Eduardo Barros if (inode && S_ISBLK(inode->i_mode) && p->bdev) { 3299f2090d2dSCesar Eduardo Barros set_blocksize(p->bdev, p->old_block_size); 3300f2090d2dSCesar Eduardo Barros blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 33011da177e4SLinus Torvalds } 33024cd3bb10SHugh Dickins destroy_swap_extents(p); 3303e8e6c2ecSCesar Eduardo Barros swap_cgroup_swapoff(p->type); 33045d337b91SHugh Dickins spin_lock(&swap_lock); 33051da177e4SLinus Torvalds p->swap_file = NULL; 33061da177e4SLinus Torvalds p->flags = 0; 33075d337b91SHugh Dickins spin_unlock(&swap_lock); 33081da177e4SLinus Torvalds vfree(swap_map); 33098606a1a9SDarrick J. Wong kvfree(cluster_info); 3310b6b1fd2aSDavid Rientjes kvfree(frontswap_map); 33117cbf3192SOmar Sandoval if (inced_nr_rotate_swap) 33127cbf3192SOmar Sandoval atomic_dec(&nr_rotate_swap); 331352c50567SMel Gorman if (swap_file) { 33142130781eSCesar Eduardo Barros if (inode && S_ISREG(inode->i_mode)) { 33155955102cSAl Viro inode_unlock(inode); 33162130781eSCesar Eduardo Barros inode = NULL; 33172130781eSCesar Eduardo Barros } 33181da177e4SLinus Torvalds filp_close(swap_file, NULL); 331952c50567SMel Gorman } 33201da177e4SLinus Torvalds out: 33211da177e4SLinus Torvalds if (page && !IS_ERR(page)) { 33221da177e4SLinus Torvalds kunmap(page); 332309cbfeafSKirill A. Shutemov put_page(page); 33241da177e4SLinus Torvalds } 33251da177e4SLinus Torvalds if (name) 33261da177e4SLinus Torvalds putname(name); 33279b01c350SCesar Eduardo Barros if (inode && S_ISREG(inode->i_mode)) 33285955102cSAl Viro inode_unlock(inode); 3329039939a6STim Chen if (!error) 3330039939a6STim Chen enable_swap_slots_cache(); 33311da177e4SLinus Torvalds return error; 33321da177e4SLinus Torvalds } 33331da177e4SLinus Torvalds 33341da177e4SLinus Torvalds void si_swapinfo(struct sysinfo *val) 33351da177e4SLinus Torvalds { 3336efa90a98SHugh Dickins unsigned int type; 33371da177e4SLinus Torvalds unsigned long nr_to_be_unused = 0; 33381da177e4SLinus Torvalds 33395d337b91SHugh Dickins spin_lock(&swap_lock); 3340efa90a98SHugh Dickins for (type = 0; type < nr_swapfiles; type++) { 3341efa90a98SHugh Dickins struct swap_info_struct *si = swap_info[type]; 3342efa90a98SHugh Dickins 3343efa90a98SHugh Dickins if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 3344efa90a98SHugh Dickins nr_to_be_unused += si->inuse_pages; 33451da177e4SLinus Torvalds } 3346ec8acf20SShaohua Li val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; 33471da177e4SLinus Torvalds val->totalswap = total_swap_pages + nr_to_be_unused; 33485d337b91SHugh Dickins spin_unlock(&swap_lock); 33491da177e4SLinus Torvalds } 33501da177e4SLinus Torvalds 33511da177e4SLinus Torvalds /* 33521da177e4SLinus Torvalds * Verify that a swap entry is valid and increment its swap map count. 33531da177e4SLinus Torvalds * 3354355cfa73SKAMEZAWA Hiroyuki * Returns error code in following case. 3355355cfa73SKAMEZAWA Hiroyuki * - success -> 0 3356355cfa73SKAMEZAWA Hiroyuki * - swp_entry is invalid -> EINVAL 3357355cfa73SKAMEZAWA Hiroyuki * - swp_entry is migration entry -> EINVAL 3358355cfa73SKAMEZAWA Hiroyuki * - swap-cache reference is requested but there is already one. -> EEXIST 3359355cfa73SKAMEZAWA Hiroyuki * - swap-cache reference is requested but the entry is not used. -> ENOENT 3360570a335bSHugh Dickins * - swap-mapped reference requested but needs continued swap count. -> ENOMEM 33611da177e4SLinus Torvalds */ 33628d69aaeeSHugh Dickins static int __swap_duplicate(swp_entry_t entry, unsigned char usage) 33631da177e4SLinus Torvalds { 33641da177e4SLinus Torvalds struct swap_info_struct *p; 3365235b6217SHuang, Ying struct swap_cluster_info *ci; 33661da177e4SLinus Torvalds unsigned long offset, type; 33678d69aaeeSHugh Dickins unsigned char count; 33688d69aaeeSHugh Dickins unsigned char has_cache; 3369253d553bSHugh Dickins int err = -EINVAL; 33701da177e4SLinus Torvalds 3371a7420aa5SAndi Kleen if (non_swap_entry(entry)) 3372253d553bSHugh Dickins goto out; 33730697212aSChristoph Lameter 33741da177e4SLinus Torvalds type = swp_type(entry); 33751da177e4SLinus Torvalds if (type >= nr_swapfiles) 33761da177e4SLinus Torvalds goto bad_file; 3377efa90a98SHugh Dickins p = swap_info[type]; 33781da177e4SLinus Torvalds offset = swp_offset(entry); 3379355cfa73SKAMEZAWA Hiroyuki if (unlikely(offset >= p->max)) 3380235b6217SHuang, Ying goto out; 3381235b6217SHuang, Ying 3382235b6217SHuang, Ying ci = lock_cluster_or_swap_info(p, offset); 3383355cfa73SKAMEZAWA Hiroyuki 3384253d553bSHugh Dickins count = p->swap_map[offset]; 3385edfe23daSShaohua Li 3386edfe23daSShaohua Li /* 3387edfe23daSShaohua Li * swapin_readahead() doesn't check if a swap entry is valid, so the 3388edfe23daSShaohua Li * swap entry could be SWAP_MAP_BAD. Check here with lock held. 3389edfe23daSShaohua Li */ 3390edfe23daSShaohua Li if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { 3391edfe23daSShaohua Li err = -ENOENT; 3392edfe23daSShaohua Li goto unlock_out; 3393edfe23daSShaohua Li } 3394edfe23daSShaohua Li 3395253d553bSHugh Dickins has_cache = count & SWAP_HAS_CACHE; 3396253d553bSHugh Dickins count &= ~SWAP_HAS_CACHE; 3397253d553bSHugh Dickins err = 0; 3398355cfa73SKAMEZAWA Hiroyuki 3399253d553bSHugh Dickins if (usage == SWAP_HAS_CACHE) { 3400355cfa73SKAMEZAWA Hiroyuki 3401355cfa73SKAMEZAWA Hiroyuki /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 3402253d553bSHugh Dickins if (!has_cache && count) 3403253d553bSHugh Dickins has_cache = SWAP_HAS_CACHE; 3404253d553bSHugh Dickins else if (has_cache) /* someone else added cache */ 3405253d553bSHugh Dickins err = -EEXIST; 3406253d553bSHugh Dickins else /* no users remaining */ 3407253d553bSHugh Dickins err = -ENOENT; 3408355cfa73SKAMEZAWA Hiroyuki 3409355cfa73SKAMEZAWA Hiroyuki } else if (count || has_cache) { 3410253d553bSHugh Dickins 3411570a335bSHugh Dickins if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) 3412570a335bSHugh Dickins count += usage; 3413570a335bSHugh Dickins else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) 3414253d553bSHugh Dickins err = -EINVAL; 3415570a335bSHugh Dickins else if (swap_count_continued(p, offset, count)) 3416570a335bSHugh Dickins count = COUNT_CONTINUED; 3417570a335bSHugh Dickins else 3418570a335bSHugh Dickins err = -ENOMEM; 3419253d553bSHugh Dickins } else 3420253d553bSHugh Dickins err = -ENOENT; /* unused swap entry */ 3421253d553bSHugh Dickins 3422253d553bSHugh Dickins p->swap_map[offset] = count | has_cache; 3423253d553bSHugh Dickins 3424355cfa73SKAMEZAWA Hiroyuki unlock_out: 3425235b6217SHuang, Ying unlock_cluster_or_swap_info(p, ci); 34261da177e4SLinus Torvalds out: 3427253d553bSHugh Dickins return err; 34281da177e4SLinus Torvalds 34291da177e4SLinus Torvalds bad_file: 3430465c47fdSAndrew Morton pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val); 34311da177e4SLinus Torvalds goto out; 34321da177e4SLinus Torvalds } 3433253d553bSHugh Dickins 3434355cfa73SKAMEZAWA Hiroyuki /* 3435aaa46865SHugh Dickins * Help swapoff by noting that swap entry belongs to shmem/tmpfs 3436aaa46865SHugh Dickins * (in which case its reference count is never incremented). 3437aaa46865SHugh Dickins */ 3438aaa46865SHugh Dickins void swap_shmem_alloc(swp_entry_t entry) 3439aaa46865SHugh Dickins { 3440aaa46865SHugh Dickins __swap_duplicate(entry, SWAP_MAP_SHMEM); 3441aaa46865SHugh Dickins } 3442aaa46865SHugh Dickins 3443aaa46865SHugh Dickins /* 344408259d58SHugh Dickins * Increase reference count of swap entry by 1. 344508259d58SHugh Dickins * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required 344608259d58SHugh Dickins * but could not be atomically allocated. Returns 0, just as if it succeeded, 344708259d58SHugh Dickins * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which 344808259d58SHugh Dickins * might occur if a page table entry has got corrupted. 3449355cfa73SKAMEZAWA Hiroyuki */ 3450570a335bSHugh Dickins int swap_duplicate(swp_entry_t entry) 3451355cfa73SKAMEZAWA Hiroyuki { 3452570a335bSHugh Dickins int err = 0; 3453570a335bSHugh Dickins 3454570a335bSHugh Dickins while (!err && __swap_duplicate(entry, 1) == -ENOMEM) 3455570a335bSHugh Dickins err = add_swap_count_continuation(entry, GFP_ATOMIC); 3456570a335bSHugh Dickins return err; 3457355cfa73SKAMEZAWA Hiroyuki } 34581da177e4SLinus Torvalds 3459cb4b86baSKAMEZAWA Hiroyuki /* 3460355cfa73SKAMEZAWA Hiroyuki * @entry: swap entry for which we allocate swap cache. 3461355cfa73SKAMEZAWA Hiroyuki * 346273c34b6aSHugh Dickins * Called when allocating swap cache for existing swap entry, 3463355cfa73SKAMEZAWA Hiroyuki * This can return error codes. Returns 0 at success. 3464355cfa73SKAMEZAWA Hiroyuki * -EBUSY means there is a swap cache. 3465355cfa73SKAMEZAWA Hiroyuki * Note: return code is different from swap_duplicate(). 3466cb4b86baSKAMEZAWA Hiroyuki */ 3467cb4b86baSKAMEZAWA Hiroyuki int swapcache_prepare(swp_entry_t entry) 3468cb4b86baSKAMEZAWA Hiroyuki { 3469253d553bSHugh Dickins return __swap_duplicate(entry, SWAP_HAS_CACHE); 3470cb4b86baSKAMEZAWA Hiroyuki } 3471cb4b86baSKAMEZAWA Hiroyuki 34720bcac06fSMinchan Kim struct swap_info_struct *swp_swap_info(swp_entry_t entry) 34730bcac06fSMinchan Kim { 34740bcac06fSMinchan Kim return swap_info[swp_type(entry)]; 34750bcac06fSMinchan Kim } 34760bcac06fSMinchan Kim 3477f981c595SMel Gorman struct swap_info_struct *page_swap_info(struct page *page) 3478f981c595SMel Gorman { 34790bcac06fSMinchan Kim swp_entry_t entry = { .val = page_private(page) }; 34800bcac06fSMinchan Kim return swp_swap_info(entry); 3481f981c595SMel Gorman } 3482f981c595SMel Gorman 3483f981c595SMel Gorman /* 3484f981c595SMel Gorman * out-of-line __page_file_ methods to avoid include hell. 3485f981c595SMel Gorman */ 3486f981c595SMel Gorman struct address_space *__page_file_mapping(struct page *page) 3487f981c595SMel Gorman { 3488f981c595SMel Gorman return page_swap_info(page)->swap_file->f_mapping; 3489f981c595SMel Gorman } 3490f981c595SMel Gorman EXPORT_SYMBOL_GPL(__page_file_mapping); 3491f981c595SMel Gorman 3492f981c595SMel Gorman pgoff_t __page_file_index(struct page *page) 3493f981c595SMel Gorman { 3494f981c595SMel Gorman swp_entry_t swap = { .val = page_private(page) }; 3495f981c595SMel Gorman return swp_offset(swap); 3496f981c595SMel Gorman } 3497f981c595SMel Gorman EXPORT_SYMBOL_GPL(__page_file_index); 3498f981c595SMel Gorman 34991da177e4SLinus Torvalds /* 3500570a335bSHugh Dickins * add_swap_count_continuation - called when a swap count is duplicated 3501570a335bSHugh Dickins * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 3502570a335bSHugh Dickins * page of the original vmalloc'ed swap_map, to hold the continuation count 3503570a335bSHugh Dickins * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called 3504570a335bSHugh Dickins * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. 3505570a335bSHugh Dickins * 3506570a335bSHugh Dickins * These continuation pages are seldom referenced: the common paths all work 3507570a335bSHugh Dickins * on the original swap_map, only referring to a continuation page when the 3508570a335bSHugh Dickins * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. 3509570a335bSHugh Dickins * 3510570a335bSHugh Dickins * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding 3511570a335bSHugh Dickins * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) 3512570a335bSHugh Dickins * can be called after dropping locks. 3513570a335bSHugh Dickins */ 3514570a335bSHugh Dickins int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) 3515570a335bSHugh Dickins { 3516570a335bSHugh Dickins struct swap_info_struct *si; 3517235b6217SHuang, Ying struct swap_cluster_info *ci; 3518570a335bSHugh Dickins struct page *head; 3519570a335bSHugh Dickins struct page *page; 3520570a335bSHugh Dickins struct page *list_page; 3521570a335bSHugh Dickins pgoff_t offset; 3522570a335bSHugh Dickins unsigned char count; 3523570a335bSHugh Dickins 3524570a335bSHugh Dickins /* 3525570a335bSHugh Dickins * When debugging, it's easier to use __GFP_ZERO here; but it's better 3526570a335bSHugh Dickins * for latency not to zero a page while GFP_ATOMIC and holding locks. 3527570a335bSHugh Dickins */ 3528570a335bSHugh Dickins page = alloc_page(gfp_mask | __GFP_HIGHMEM); 3529570a335bSHugh Dickins 3530570a335bSHugh Dickins si = swap_info_get(entry); 3531570a335bSHugh Dickins if (!si) { 3532570a335bSHugh Dickins /* 3533570a335bSHugh Dickins * An acceptable race has occurred since the failing 3534570a335bSHugh Dickins * __swap_duplicate(): the swap entry has been freed, 3535570a335bSHugh Dickins * perhaps even the whole swap_map cleared for swapoff. 3536570a335bSHugh Dickins */ 3537570a335bSHugh Dickins goto outer; 3538570a335bSHugh Dickins } 3539570a335bSHugh Dickins 3540570a335bSHugh Dickins offset = swp_offset(entry); 3541235b6217SHuang, Ying 3542235b6217SHuang, Ying ci = lock_cluster(si, offset); 3543235b6217SHuang, Ying 3544570a335bSHugh Dickins count = si->swap_map[offset] & ~SWAP_HAS_CACHE; 3545570a335bSHugh Dickins 3546570a335bSHugh Dickins if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { 3547570a335bSHugh Dickins /* 3548570a335bSHugh Dickins * The higher the swap count, the more likely it is that tasks 3549570a335bSHugh Dickins * will race to add swap count continuation: we need to avoid 3550570a335bSHugh Dickins * over-provisioning. 3551570a335bSHugh Dickins */ 3552570a335bSHugh Dickins goto out; 3553570a335bSHugh Dickins } 3554570a335bSHugh Dickins 3555570a335bSHugh Dickins if (!page) { 3556235b6217SHuang, Ying unlock_cluster(ci); 3557ec8acf20SShaohua Li spin_unlock(&si->lock); 3558570a335bSHugh Dickins return -ENOMEM; 3559570a335bSHugh Dickins } 3560570a335bSHugh Dickins 3561570a335bSHugh Dickins /* 3562570a335bSHugh Dickins * We are fortunate that although vmalloc_to_page uses pte_offset_map, 3563570a335bSHugh Dickins * no architecture is using highmem pages for kernel page tables: so it 3564570a335bSHugh Dickins * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps. 3565570a335bSHugh Dickins */ 3566570a335bSHugh Dickins head = vmalloc_to_page(si->swap_map + offset); 3567570a335bSHugh Dickins offset &= ~PAGE_MASK; 3568570a335bSHugh Dickins 35692628bd6fSHuang Ying spin_lock(&si->cont_lock); 3570570a335bSHugh Dickins /* 3571570a335bSHugh Dickins * Page allocation does not initialize the page's lru field, 3572570a335bSHugh Dickins * but it does always reset its private field. 3573570a335bSHugh Dickins */ 3574570a335bSHugh Dickins if (!page_private(head)) { 3575570a335bSHugh Dickins BUG_ON(count & COUNT_CONTINUED); 3576570a335bSHugh Dickins INIT_LIST_HEAD(&head->lru); 3577570a335bSHugh Dickins set_page_private(head, SWP_CONTINUED); 3578570a335bSHugh Dickins si->flags |= SWP_CONTINUED; 3579570a335bSHugh Dickins } 3580570a335bSHugh Dickins 3581570a335bSHugh Dickins list_for_each_entry(list_page, &head->lru, lru) { 3582570a335bSHugh Dickins unsigned char *map; 3583570a335bSHugh Dickins 3584570a335bSHugh Dickins /* 3585570a335bSHugh Dickins * If the previous map said no continuation, but we've found 3586570a335bSHugh Dickins * a continuation page, free our allocation and use this one. 3587570a335bSHugh Dickins */ 3588570a335bSHugh Dickins if (!(count & COUNT_CONTINUED)) 35892628bd6fSHuang Ying goto out_unlock_cont; 3590570a335bSHugh Dickins 35919b04c5feSCong Wang map = kmap_atomic(list_page) + offset; 3592570a335bSHugh Dickins count = *map; 35939b04c5feSCong Wang kunmap_atomic(map); 3594570a335bSHugh Dickins 3595570a335bSHugh Dickins /* 3596570a335bSHugh Dickins * If this continuation count now has some space in it, 3597570a335bSHugh Dickins * free our allocation and use this one. 3598570a335bSHugh Dickins */ 3599570a335bSHugh Dickins if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) 36002628bd6fSHuang Ying goto out_unlock_cont; 3601570a335bSHugh Dickins } 3602570a335bSHugh Dickins 3603570a335bSHugh Dickins list_add_tail(&page->lru, &head->lru); 3604570a335bSHugh Dickins page = NULL; /* now it's attached, don't free it */ 36052628bd6fSHuang Ying out_unlock_cont: 36062628bd6fSHuang Ying spin_unlock(&si->cont_lock); 3607570a335bSHugh Dickins out: 3608235b6217SHuang, Ying unlock_cluster(ci); 3609ec8acf20SShaohua Li spin_unlock(&si->lock); 3610570a335bSHugh Dickins outer: 3611570a335bSHugh Dickins if (page) 3612570a335bSHugh Dickins __free_page(page); 3613570a335bSHugh Dickins return 0; 3614570a335bSHugh Dickins } 3615570a335bSHugh Dickins 3616570a335bSHugh Dickins /* 3617570a335bSHugh Dickins * swap_count_continued - when the original swap_map count is incremented 3618570a335bSHugh Dickins * from SWAP_MAP_MAX, check if there is already a continuation page to carry 3619570a335bSHugh Dickins * into, carry if so, or else fail until a new continuation page is allocated; 3620570a335bSHugh Dickins * when the original swap_map count is decremented from 0 with continuation, 3621570a335bSHugh Dickins * borrow from the continuation and report whether it still holds more. 3622235b6217SHuang, Ying * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster 3623235b6217SHuang, Ying * lock. 3624570a335bSHugh Dickins */ 3625570a335bSHugh Dickins static bool swap_count_continued(struct swap_info_struct *si, 3626570a335bSHugh Dickins pgoff_t offset, unsigned char count) 3627570a335bSHugh Dickins { 3628570a335bSHugh Dickins struct page *head; 3629570a335bSHugh Dickins struct page *page; 3630570a335bSHugh Dickins unsigned char *map; 36312628bd6fSHuang Ying bool ret; 3632570a335bSHugh Dickins 3633570a335bSHugh Dickins head = vmalloc_to_page(si->swap_map + offset); 3634570a335bSHugh Dickins if (page_private(head) != SWP_CONTINUED) { 3635570a335bSHugh Dickins BUG_ON(count & COUNT_CONTINUED); 3636570a335bSHugh Dickins return false; /* need to add count continuation */ 3637570a335bSHugh Dickins } 3638570a335bSHugh Dickins 36392628bd6fSHuang Ying spin_lock(&si->cont_lock); 3640570a335bSHugh Dickins offset &= ~PAGE_MASK; 3641570a335bSHugh Dickins page = list_entry(head->lru.next, struct page, lru); 36429b04c5feSCong Wang map = kmap_atomic(page) + offset; 3643570a335bSHugh Dickins 3644570a335bSHugh Dickins if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 3645570a335bSHugh Dickins goto init_map; /* jump over SWAP_CONT_MAX checks */ 3646570a335bSHugh Dickins 3647570a335bSHugh Dickins if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ 3648570a335bSHugh Dickins /* 3649570a335bSHugh Dickins * Think of how you add 1 to 999 3650570a335bSHugh Dickins */ 3651570a335bSHugh Dickins while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 36529b04c5feSCong Wang kunmap_atomic(map); 3653570a335bSHugh Dickins page = list_entry(page->lru.next, struct page, lru); 3654570a335bSHugh Dickins BUG_ON(page == head); 36559b04c5feSCong Wang map = kmap_atomic(page) + offset; 3656570a335bSHugh Dickins } 3657570a335bSHugh Dickins if (*map == SWAP_CONT_MAX) { 36589b04c5feSCong Wang kunmap_atomic(map); 3659570a335bSHugh Dickins page = list_entry(page->lru.next, struct page, lru); 36602628bd6fSHuang Ying if (page == head) { 36612628bd6fSHuang Ying ret = false; /* add count continuation */ 36622628bd6fSHuang Ying goto out; 36632628bd6fSHuang Ying } 36649b04c5feSCong Wang map = kmap_atomic(page) + offset; 3665570a335bSHugh Dickins init_map: *map = 0; /* we didn't zero the page */ 3666570a335bSHugh Dickins } 3667570a335bSHugh Dickins *map += 1; 36689b04c5feSCong Wang kunmap_atomic(map); 3669570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 3670570a335bSHugh Dickins while (page != head) { 36719b04c5feSCong Wang map = kmap_atomic(page) + offset; 3672570a335bSHugh Dickins *map = COUNT_CONTINUED; 36739b04c5feSCong Wang kunmap_atomic(map); 3674570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 3675570a335bSHugh Dickins } 36762628bd6fSHuang Ying ret = true; /* incremented */ 3677570a335bSHugh Dickins 3678570a335bSHugh Dickins } else { /* decrementing */ 3679570a335bSHugh Dickins /* 3680570a335bSHugh Dickins * Think of how you subtract 1 from 1000 3681570a335bSHugh Dickins */ 3682570a335bSHugh Dickins BUG_ON(count != COUNT_CONTINUED); 3683570a335bSHugh Dickins while (*map == COUNT_CONTINUED) { 36849b04c5feSCong Wang kunmap_atomic(map); 3685570a335bSHugh Dickins page = list_entry(page->lru.next, struct page, lru); 3686570a335bSHugh Dickins BUG_ON(page == head); 36879b04c5feSCong Wang map = kmap_atomic(page) + offset; 3688570a335bSHugh Dickins } 3689570a335bSHugh Dickins BUG_ON(*map == 0); 3690570a335bSHugh Dickins *map -= 1; 3691570a335bSHugh Dickins if (*map == 0) 3692570a335bSHugh Dickins count = 0; 36939b04c5feSCong Wang kunmap_atomic(map); 3694570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 3695570a335bSHugh Dickins while (page != head) { 36969b04c5feSCong Wang map = kmap_atomic(page) + offset; 3697570a335bSHugh Dickins *map = SWAP_CONT_MAX | count; 3698570a335bSHugh Dickins count = COUNT_CONTINUED; 36999b04c5feSCong Wang kunmap_atomic(map); 3700570a335bSHugh Dickins page = list_entry(page->lru.prev, struct page, lru); 3701570a335bSHugh Dickins } 37022628bd6fSHuang Ying ret = count == COUNT_CONTINUED; 3703570a335bSHugh Dickins } 37042628bd6fSHuang Ying out: 37052628bd6fSHuang Ying spin_unlock(&si->cont_lock); 37062628bd6fSHuang Ying return ret; 3707570a335bSHugh Dickins } 3708570a335bSHugh Dickins 3709570a335bSHugh Dickins /* 3710570a335bSHugh Dickins * free_swap_count_continuations - swapoff free all the continuation pages 3711570a335bSHugh Dickins * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. 3712570a335bSHugh Dickins */ 3713570a335bSHugh Dickins static void free_swap_count_continuations(struct swap_info_struct *si) 3714570a335bSHugh Dickins { 3715570a335bSHugh Dickins pgoff_t offset; 3716570a335bSHugh Dickins 3717570a335bSHugh Dickins for (offset = 0; offset < si->max; offset += PAGE_SIZE) { 3718570a335bSHugh Dickins struct page *head; 3719570a335bSHugh Dickins head = vmalloc_to_page(si->swap_map + offset); 3720570a335bSHugh Dickins if (page_private(head)) { 37210d576d20SGeliang Tang struct page *page, *next; 37220d576d20SGeliang Tang 37230d576d20SGeliang Tang list_for_each_entry_safe(page, next, &head->lru, lru) { 37240d576d20SGeliang Tang list_del(&page->lru); 3725570a335bSHugh Dickins __free_page(page); 3726570a335bSHugh Dickins } 3727570a335bSHugh Dickins } 3728570a335bSHugh Dickins } 3729570a335bSHugh Dickins } 3730a2468cc9SAaron Lu 37312cf85583STejun Heo #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 37322cf85583STejun Heo void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, 37332cf85583STejun Heo gfp_t gfp_mask) 37342cf85583STejun Heo { 37352cf85583STejun Heo struct swap_info_struct *si, *next; 37362cf85583STejun Heo if (!(gfp_mask & __GFP_IO) || !memcg) 37372cf85583STejun Heo return; 37382cf85583STejun Heo 37392cf85583STejun Heo if (!blk_cgroup_congested()) 37402cf85583STejun Heo return; 37412cf85583STejun Heo 37422cf85583STejun Heo /* 37432cf85583STejun Heo * We've already scheduled a throttle, avoid taking the global swap 37442cf85583STejun Heo * lock. 37452cf85583STejun Heo */ 37462cf85583STejun Heo if (current->throttle_queue) 37472cf85583STejun Heo return; 37482cf85583STejun Heo 37492cf85583STejun Heo spin_lock(&swap_avail_lock); 37502cf85583STejun Heo plist_for_each_entry_safe(si, next, &swap_avail_heads[node], 37512cf85583STejun Heo avail_lists[node]) { 37522cf85583STejun Heo if (si->bdev) { 37532cf85583STejun Heo blkcg_schedule_throttle(bdev_get_queue(si->bdev), 37542cf85583STejun Heo true); 37552cf85583STejun Heo break; 37562cf85583STejun Heo } 37572cf85583STejun Heo } 37582cf85583STejun Heo spin_unlock(&swap_avail_lock); 37592cf85583STejun Heo } 37602cf85583STejun Heo #endif 37612cf85583STejun Heo 3762a2468cc9SAaron Lu static int __init swapfile_init(void) 3763a2468cc9SAaron Lu { 3764a2468cc9SAaron Lu int nid; 3765a2468cc9SAaron Lu 3766a2468cc9SAaron Lu swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head), 3767a2468cc9SAaron Lu GFP_KERNEL); 3768a2468cc9SAaron Lu if (!swap_avail_heads) { 3769a2468cc9SAaron Lu pr_emerg("Not enough memory for swap heads, swap is disabled\n"); 3770a2468cc9SAaron Lu return -ENOMEM; 3771a2468cc9SAaron Lu } 3772a2468cc9SAaron Lu 3773a2468cc9SAaron Lu for_each_node(nid) 3774a2468cc9SAaron Lu plist_head_init(&swap_avail_heads[nid]); 3775a2468cc9SAaron Lu 3776a2468cc9SAaron Lu return 0; 3777a2468cc9SAaron Lu } 3778a2468cc9SAaron Lu subsys_initcall(swapfile_init); 3779