1 /* 2 * linux/mm/swap_state.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 * 7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 8 */ 9 #include <linux/module.h> 10 #include <linux/mm.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/init.h> 15 #include <linux/pagemap.h> 16 #include <linux/buffer_head.h> 17 #include <linux/backing-dev.h> 18 #include <linux/pagevec.h> 19 #include <linux/migrate.h> 20 21 #include <asm/pgtable.h> 22 23 /* 24 * swapper_space is a fiction, retained to simplify the path through 25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow 26 * future use of radix_tree tags in the swap cache. 27 */ 28 static const struct address_space_operations swap_aops = { 29 .writepage = swap_writepage, 30 .sync_page = block_sync_page, 31 .set_page_dirty = __set_page_dirty_nobuffers, 32 .migratepage = migrate_page, 33 }; 34 35 static struct backing_dev_info swap_backing_dev_info = { 36 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 37 .unplug_io_fn = swap_unplug_io_fn, 38 }; 39 40 struct address_space swapper_space = { 41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 42 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), 43 .a_ops = &swap_aops, 44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 45 .backing_dev_info = &swap_backing_dev_info, 46 }; 47 48 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) 49 50 static struct { 51 unsigned long add_total; 52 unsigned long del_total; 53 unsigned long find_success; 54 unsigned long find_total; 55 } swap_cache_info; 56 57 void show_swap_cache_info(void) 58 { 59 printk("%lu pages in swap cache\n", total_swapcache_pages); 60 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 61 swap_cache_info.add_total, swap_cache_info.del_total, 62 swap_cache_info.find_success, swap_cache_info.find_total); 63 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); 64 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 65 } 66 67 /* 68 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 69 * but sets SwapCache flag and private instead of mapping and index. 70 */ 71 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) 72 { 73 int error; 74 75 BUG_ON(!PageLocked(page)); 76 BUG_ON(PageSwapCache(page)); 77 BUG_ON(PagePrivate(page)); 78 error = radix_tree_preload(gfp_mask); 79 if (!error) { 80 page_cache_get(page); 81 SetPageSwapCache(page); 82 set_page_private(page, entry.val); 83 84 spin_lock_irq(&swapper_space.tree_lock); 85 error = radix_tree_insert(&swapper_space.page_tree, 86 entry.val, page); 87 if (likely(!error)) { 88 total_swapcache_pages++; 89 __inc_zone_page_state(page, NR_FILE_PAGES); 90 INC_CACHE_INFO(add_total); 91 } 92 spin_unlock_irq(&swapper_space.tree_lock); 93 radix_tree_preload_end(); 94 95 if (unlikely(error)) { 96 set_page_private(page, 0UL); 97 ClearPageSwapCache(page); 98 page_cache_release(page); 99 } 100 } 101 return error; 102 } 103 104 /* 105 * This must be called only on pages that have 106 * been verified to be in the swap cache. 107 */ 108 void __delete_from_swap_cache(struct page *page) 109 { 110 BUG_ON(!PageLocked(page)); 111 BUG_ON(!PageSwapCache(page)); 112 BUG_ON(PageWriteback(page)); 113 BUG_ON(PagePrivate(page)); 114 115 radix_tree_delete(&swapper_space.page_tree, page_private(page)); 116 set_page_private(page, 0); 117 ClearPageSwapCache(page); 118 total_swapcache_pages--; 119 __dec_zone_page_state(page, NR_FILE_PAGES); 120 INC_CACHE_INFO(del_total); 121 } 122 123 /** 124 * add_to_swap - allocate swap space for a page 125 * @page: page we want to move to swap 126 * @gfp_mask: memory allocation flags 127 * 128 * Allocate swap space for the page and add the page to the 129 * swap cache. Caller needs to hold the page lock. 130 */ 131 int add_to_swap(struct page * page, gfp_t gfp_mask) 132 { 133 swp_entry_t entry; 134 int err; 135 136 BUG_ON(!PageLocked(page)); 137 BUG_ON(!PageUptodate(page)); 138 139 for (;;) { 140 entry = get_swap_page(); 141 if (!entry.val) 142 return 0; 143 144 /* 145 * Radix-tree node allocations from PF_MEMALLOC contexts could 146 * completely exhaust the page allocator. __GFP_NOMEMALLOC 147 * stops emergency reserves from being allocated. 148 * 149 * TODO: this could cause a theoretical memory reclaim 150 * deadlock in the swap out path. 151 */ 152 /* 153 * Add it to the swap cache and mark it dirty 154 */ 155 err = add_to_swap_cache(page, entry, 156 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN); 157 158 switch (err) { 159 case 0: /* Success */ 160 SetPageDirty(page); 161 return 1; 162 case -EEXIST: 163 /* Raced with "speculative" read_swap_cache_async */ 164 swap_free(entry); 165 continue; 166 default: 167 /* -ENOMEM radix-tree allocation failure */ 168 swap_free(entry); 169 return 0; 170 } 171 } 172 } 173 174 /* 175 * This must be called only on pages that have 176 * been verified to be in the swap cache and locked. 177 * It will never put the page into the free list, 178 * the caller has a reference on the page. 179 */ 180 void delete_from_swap_cache(struct page *page) 181 { 182 swp_entry_t entry; 183 184 entry.val = page_private(page); 185 186 spin_lock_irq(&swapper_space.tree_lock); 187 __delete_from_swap_cache(page); 188 spin_unlock_irq(&swapper_space.tree_lock); 189 190 swap_free(entry); 191 page_cache_release(page); 192 } 193 194 /* 195 * If we are the only user, then try to free up the swap cache. 196 * 197 * Its ok to check for PageSwapCache without the page lock 198 * here because we are going to recheck again inside 199 * exclusive_swap_page() _with_ the lock. 200 * - Marcelo 201 */ 202 static inline void free_swap_cache(struct page *page) 203 { 204 if (PageSwapCache(page) && trylock_page(page)) { 205 remove_exclusive_swap_page(page); 206 unlock_page(page); 207 } 208 } 209 210 /* 211 * Perform a free_page(), also freeing any swap cache associated with 212 * this page if it is the last user of the page. 213 */ 214 void free_page_and_swap_cache(struct page *page) 215 { 216 free_swap_cache(page); 217 page_cache_release(page); 218 } 219 220 /* 221 * Passed an array of pages, drop them all from swapcache and then release 222 * them. They are removed from the LRU and freed if this is their last use. 223 */ 224 void free_pages_and_swap_cache(struct page **pages, int nr) 225 { 226 struct page **pagep = pages; 227 228 lru_add_drain(); 229 while (nr) { 230 int todo = min(nr, PAGEVEC_SIZE); 231 int i; 232 233 for (i = 0; i < todo; i++) 234 free_swap_cache(pagep[i]); 235 release_pages(pagep, todo, 0); 236 pagep += todo; 237 nr -= todo; 238 } 239 } 240 241 /* 242 * Lookup a swap entry in the swap cache. A found page will be returned 243 * unlocked and with its refcount incremented - we rely on the kernel 244 * lock getting page table operations atomic even if we drop the page 245 * lock before returning. 246 */ 247 struct page * lookup_swap_cache(swp_entry_t entry) 248 { 249 struct page *page; 250 251 page = find_get_page(&swapper_space, entry.val); 252 253 if (page) 254 INC_CACHE_INFO(find_success); 255 256 INC_CACHE_INFO(find_total); 257 return page; 258 } 259 260 /* 261 * Locate a page of swap in physical memory, reserving swap cache space 262 * and reading the disk if it is not already cached. 263 * A failure return means that either the page allocation failed or that 264 * the swap entry is no longer in use. 265 */ 266 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 267 struct vm_area_struct *vma, unsigned long addr) 268 { 269 struct page *found_page, *new_page = NULL; 270 int err; 271 272 do { 273 /* 274 * First check the swap cache. Since this is normally 275 * called after lookup_swap_cache() failed, re-calling 276 * that would confuse statistics. 277 */ 278 found_page = find_get_page(&swapper_space, entry.val); 279 if (found_page) 280 break; 281 282 /* 283 * Get a new page to read into from swap. 284 */ 285 if (!new_page) { 286 new_page = alloc_page_vma(gfp_mask, vma, addr); 287 if (!new_page) 288 break; /* Out of memory */ 289 } 290 291 /* 292 * Swap entry may have been freed since our caller observed it. 293 */ 294 if (!swap_duplicate(entry)) 295 break; 296 297 /* 298 * Associate the page with swap entry in the swap cache. 299 * May fail (-EEXIST) if there is already a page associated 300 * with this entry in the swap cache: added by a racing 301 * read_swap_cache_async, or add_to_swap or shmem_writepage 302 * re-using the just freed swap entry for an existing page. 303 * May fail (-ENOMEM) if radix-tree node allocation failed. 304 */ 305 set_page_locked(new_page); 306 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); 307 if (likely(!err)) { 308 /* 309 * Initiate read into locked page and return. 310 */ 311 lru_cache_add_active(new_page); 312 swap_readpage(NULL, new_page); 313 return new_page; 314 } 315 clear_page_locked(new_page); 316 swap_free(entry); 317 } while (err != -ENOMEM); 318 319 if (new_page) 320 page_cache_release(new_page); 321 return found_page; 322 } 323 324 /** 325 * swapin_readahead - swap in pages in hope we need them soon 326 * @entry: swap entry of this memory 327 * @gfp_mask: memory allocation flags 328 * @vma: user vma this address belongs to 329 * @addr: target address for mempolicy 330 * 331 * Returns the struct page for entry and addr, after queueing swapin. 332 * 333 * Primitive swap readahead code. We simply read an aligned block of 334 * (1 << page_cluster) entries in the swap area. This method is chosen 335 * because it doesn't cost us any seek time. We also make sure to queue 336 * the 'original' request together with the readahead ones... 337 * 338 * This has been extended to use the NUMA policies from the mm triggering 339 * the readahead. 340 * 341 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 342 */ 343 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 344 struct vm_area_struct *vma, unsigned long addr) 345 { 346 int nr_pages; 347 struct page *page; 348 unsigned long offset; 349 unsigned long end_offset; 350 351 /* 352 * Get starting offset for readaround, and number of pages to read. 353 * Adjust starting address by readbehind (for NUMA interleave case)? 354 * No, it's very unlikely that swap layout would follow vma layout, 355 * more likely that neighbouring swap pages came from the same node: 356 * so use the same "addr" to choose the same node for each swap read. 357 */ 358 nr_pages = valid_swaphandles(entry, &offset); 359 for (end_offset = offset + nr_pages; offset < end_offset; offset++) { 360 /* Ok, do the async read-ahead now */ 361 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 362 gfp_mask, vma, addr); 363 if (!page) 364 break; 365 page_cache_release(page); 366 } 367 lru_add_drain(); /* Push any new pages onto the LRU now */ 368 return read_swap_cache_async(entry, gfp_mask, vma, addr); 369 } 370