xref: /openbmc/linux/mm/swap_state.c (revision 4bacd796)
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/buffer_head.h>
18 #include <linux/backing-dev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/page_cgroup.h>
22 
23 #include <asm/pgtable.h>
24 
25 /*
26  * swapper_space is a fiction, retained to simplify the path through
27  * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
28  * future use of radix_tree tags in the swap cache.
29  */
30 static const struct address_space_operations swap_aops = {
31 	.writepage	= swap_writepage,
32 	.sync_page	= block_sync_page,
33 	.set_page_dirty	= __set_page_dirty_nobuffers,
34 	.migratepage	= migrate_page,
35 };
36 
37 static struct backing_dev_info swap_backing_dev_info = {
38 	.name		= "swap",
39 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
40 	.unplug_io_fn	= swap_unplug_io_fn,
41 };
42 
43 struct address_space swapper_space = {
44 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
45 	.tree_lock	= __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
46 	.a_ops		= &swap_aops,
47 	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
48 	.backing_dev_info = &swap_backing_dev_info,
49 };
50 
51 #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
52 
53 static struct {
54 	unsigned long add_total;
55 	unsigned long del_total;
56 	unsigned long find_success;
57 	unsigned long find_total;
58 } swap_cache_info;
59 
60 void show_swap_cache_info(void)
61 {
62 	printk("%lu pages in swap cache\n", total_swapcache_pages);
63 	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
64 		swap_cache_info.add_total, swap_cache_info.del_total,
65 		swap_cache_info.find_success, swap_cache_info.find_total);
66 	printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
67 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
68 }
69 
70 /*
71  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
72  * but sets SwapCache flag and private instead of mapping and index.
73  */
74 static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
75 {
76 	int error;
77 
78 	VM_BUG_ON(!PageLocked(page));
79 	VM_BUG_ON(PageSwapCache(page));
80 	VM_BUG_ON(!PageSwapBacked(page));
81 
82 	page_cache_get(page);
83 	SetPageSwapCache(page);
84 	set_page_private(page, entry.val);
85 
86 	spin_lock_irq(&swapper_space.tree_lock);
87 	error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
88 	if (likely(!error)) {
89 		total_swapcache_pages++;
90 		__inc_zone_page_state(page, NR_FILE_PAGES);
91 		INC_CACHE_INFO(add_total);
92 	}
93 	spin_unlock_irq(&swapper_space.tree_lock);
94 
95 	if (unlikely(error)) {
96 		/*
97 		 * Only the context which have set SWAP_HAS_CACHE flag
98 		 * would call add_to_swap_cache().
99 		 * So add_to_swap_cache() doesn't returns -EEXIST.
100 		 */
101 		VM_BUG_ON(error == -EEXIST);
102 		set_page_private(page, 0UL);
103 		ClearPageSwapCache(page);
104 		page_cache_release(page);
105 	}
106 
107 	return error;
108 }
109 
110 
111 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
112 {
113 	int error;
114 
115 	error = radix_tree_preload(gfp_mask);
116 	if (!error) {
117 		error = __add_to_swap_cache(page, entry);
118 		radix_tree_preload_end();
119 	}
120 	return error;
121 }
122 
123 /*
124  * This must be called only on pages that have
125  * been verified to be in the swap cache.
126  */
127 void __delete_from_swap_cache(struct page *page)
128 {
129 	VM_BUG_ON(!PageLocked(page));
130 	VM_BUG_ON(!PageSwapCache(page));
131 	VM_BUG_ON(PageWriteback(page));
132 
133 	radix_tree_delete(&swapper_space.page_tree, page_private(page));
134 	set_page_private(page, 0);
135 	ClearPageSwapCache(page);
136 	total_swapcache_pages--;
137 	__dec_zone_page_state(page, NR_FILE_PAGES);
138 	INC_CACHE_INFO(del_total);
139 }
140 
141 /**
142  * add_to_swap - allocate swap space for a page
143  * @page: page we want to move to swap
144  *
145  * Allocate swap space for the page and add the page to the
146  * swap cache.  Caller needs to hold the page lock.
147  */
148 int add_to_swap(struct page *page)
149 {
150 	swp_entry_t entry;
151 	int err;
152 
153 	VM_BUG_ON(!PageLocked(page));
154 	VM_BUG_ON(!PageUptodate(page));
155 
156 	entry = get_swap_page();
157 	if (!entry.val)
158 		return 0;
159 
160 	/*
161 	 * Radix-tree node allocations from PF_MEMALLOC contexts could
162 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
163 	 * stops emergency reserves from being allocated.
164 	 *
165 	 * TODO: this could cause a theoretical memory reclaim
166 	 * deadlock in the swap out path.
167 	 */
168 	/*
169 	 * Add it to the swap cache and mark it dirty
170 	 */
171 	err = add_to_swap_cache(page, entry,
172 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
173 
174 	if (!err) {	/* Success */
175 		SetPageDirty(page);
176 		return 1;
177 	} else {	/* -ENOMEM radix-tree allocation failure */
178 		/*
179 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
180 		 * clear SWAP_HAS_CACHE flag.
181 		 */
182 		swapcache_free(entry, NULL);
183 		return 0;
184 	}
185 }
186 
187 /*
188  * This must be called only on pages that have
189  * been verified to be in the swap cache and locked.
190  * It will never put the page into the free list,
191  * the caller has a reference on the page.
192  */
193 void delete_from_swap_cache(struct page *page)
194 {
195 	swp_entry_t entry;
196 
197 	entry.val = page_private(page);
198 
199 	spin_lock_irq(&swapper_space.tree_lock);
200 	__delete_from_swap_cache(page);
201 	spin_unlock_irq(&swapper_space.tree_lock);
202 
203 	swapcache_free(entry, page);
204 	page_cache_release(page);
205 }
206 
207 /*
208  * If we are the only user, then try to free up the swap cache.
209  *
210  * Its ok to check for PageSwapCache without the page lock
211  * here because we are going to recheck again inside
212  * try_to_free_swap() _with_ the lock.
213  * 					- Marcelo
214  */
215 static inline void free_swap_cache(struct page *page)
216 {
217 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
218 		try_to_free_swap(page);
219 		unlock_page(page);
220 	}
221 }
222 
223 /*
224  * Perform a free_page(), also freeing any swap cache associated with
225  * this page if it is the last user of the page.
226  */
227 void free_page_and_swap_cache(struct page *page)
228 {
229 	free_swap_cache(page);
230 	page_cache_release(page);
231 }
232 
233 /*
234  * Passed an array of pages, drop them all from swapcache and then release
235  * them.  They are removed from the LRU and freed if this is their last use.
236  */
237 void free_pages_and_swap_cache(struct page **pages, int nr)
238 {
239 	struct page **pagep = pages;
240 
241 	lru_add_drain();
242 	while (nr) {
243 		int todo = min(nr, PAGEVEC_SIZE);
244 		int i;
245 
246 		for (i = 0; i < todo; i++)
247 			free_swap_cache(pagep[i]);
248 		release_pages(pagep, todo, 0);
249 		pagep += todo;
250 		nr -= todo;
251 	}
252 }
253 
254 /*
255  * Lookup a swap entry in the swap cache. A found page will be returned
256  * unlocked and with its refcount incremented - we rely on the kernel
257  * lock getting page table operations atomic even if we drop the page
258  * lock before returning.
259  */
260 struct page * lookup_swap_cache(swp_entry_t entry)
261 {
262 	struct page *page;
263 
264 	page = find_get_page(&swapper_space, entry.val);
265 
266 	if (page)
267 		INC_CACHE_INFO(find_success);
268 
269 	INC_CACHE_INFO(find_total);
270 	return page;
271 }
272 
273 /*
274  * Locate a page of swap in physical memory, reserving swap cache space
275  * and reading the disk if it is not already cached.
276  * A failure return means that either the page allocation failed or that
277  * the swap entry is no longer in use.
278  */
279 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
280 			struct vm_area_struct *vma, unsigned long addr)
281 {
282 	struct page *found_page, *new_page = NULL;
283 	int err;
284 
285 	do {
286 		/*
287 		 * First check the swap cache.  Since this is normally
288 		 * called after lookup_swap_cache() failed, re-calling
289 		 * that would confuse statistics.
290 		 */
291 		found_page = find_get_page(&swapper_space, entry.val);
292 		if (found_page)
293 			break;
294 
295 		/*
296 		 * Get a new page to read into from swap.
297 		 */
298 		if (!new_page) {
299 			new_page = alloc_page_vma(gfp_mask, vma, addr);
300 			if (!new_page)
301 				break;		/* Out of memory */
302 		}
303 
304 		/*
305 		 * call radix_tree_preload() while we can wait.
306 		 */
307 		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
308 		if (err)
309 			break;
310 
311 		/*
312 		 * Swap entry may have been freed since our caller observed it.
313 		 */
314 		err = swapcache_prepare(entry);
315 		if (err == -EEXIST) {	/* seems racy */
316 			radix_tree_preload_end();
317 			continue;
318 		}
319 		if (err) {		/* swp entry is obsolete ? */
320 			radix_tree_preload_end();
321 			break;
322 		}
323 
324 		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
325 		__set_page_locked(new_page);
326 		SetPageSwapBacked(new_page);
327 		err = __add_to_swap_cache(new_page, entry);
328 		if (likely(!err)) {
329 			radix_tree_preload_end();
330 			/*
331 			 * Initiate read into locked page and return.
332 			 */
333 			lru_cache_add_anon(new_page);
334 			swap_readpage(new_page);
335 			return new_page;
336 		}
337 		radix_tree_preload_end();
338 		ClearPageSwapBacked(new_page);
339 		__clear_page_locked(new_page);
340 		/*
341 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
342 		 * clear SWAP_HAS_CACHE flag.
343 		 */
344 		swapcache_free(entry, NULL);
345 	} while (err != -ENOMEM);
346 
347 	if (new_page)
348 		page_cache_release(new_page);
349 	return found_page;
350 }
351 
352 /**
353  * swapin_readahead - swap in pages in hope we need them soon
354  * @entry: swap entry of this memory
355  * @gfp_mask: memory allocation flags
356  * @vma: user vma this address belongs to
357  * @addr: target address for mempolicy
358  *
359  * Returns the struct page for entry and addr, after queueing swapin.
360  *
361  * Primitive swap readahead code. We simply read an aligned block of
362  * (1 << page_cluster) entries in the swap area. This method is chosen
363  * because it doesn't cost us any seek time.  We also make sure to queue
364  * the 'original' request together with the readahead ones...
365  *
366  * This has been extended to use the NUMA policies from the mm triggering
367  * the readahead.
368  *
369  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
370  */
371 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
372 			struct vm_area_struct *vma, unsigned long addr)
373 {
374 	int nr_pages;
375 	struct page *page;
376 	unsigned long offset;
377 	unsigned long end_offset;
378 
379 	/*
380 	 * Get starting offset for readaround, and number of pages to read.
381 	 * Adjust starting address by readbehind (for NUMA interleave case)?
382 	 * No, it's very unlikely that swap layout would follow vma layout,
383 	 * more likely that neighbouring swap pages came from the same node:
384 	 * so use the same "addr" to choose the same node for each swap read.
385 	 */
386 	nr_pages = valid_swaphandles(entry, &offset);
387 	for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
388 		/* Ok, do the async read-ahead now */
389 		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
390 						gfp_mask, vma, addr);
391 		if (!page)
392 			break;
393 		page_cache_release(page);
394 	}
395 	lru_add_drain();	/* Push any new pages onto the LRU now */
396 	return read_swap_cache_async(entry, gfp_mask, vma, addr);
397 }
398