xref: /openbmc/linux/mm/swap_state.c (revision bc5aa3a0)
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 
21 #include <asm/pgtable.h>
22 
23 /*
24  * swapper_space is a fiction, retained to simplify the path through
25  * vmscan's shrink_page_list.
26  */
27 static const struct address_space_operations swap_aops = {
28 	.writepage	= swap_writepage,
29 	.set_page_dirty	= swap_set_page_dirty,
30 #ifdef CONFIG_MIGRATION
31 	.migratepage	= migrate_page,
32 #endif
33 };
34 
35 struct address_space swapper_spaces[MAX_SWAPFILES] = {
36 	[0 ... MAX_SWAPFILES - 1] = {
37 		.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
38 		.i_mmap_writable = ATOMIC_INIT(0),
39 		.a_ops		= &swap_aops,
40 	}
41 };
42 
43 #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
44 
45 static struct {
46 	unsigned long add_total;
47 	unsigned long del_total;
48 	unsigned long find_success;
49 	unsigned long find_total;
50 } swap_cache_info;
51 
52 unsigned long total_swapcache_pages(void)
53 {
54 	int i;
55 	unsigned long ret = 0;
56 
57 	for (i = 0; i < MAX_SWAPFILES; i++)
58 		ret += swapper_spaces[i].nrpages;
59 	return ret;
60 }
61 
62 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63 
64 void show_swap_cache_info(void)
65 {
66 	printk("%lu pages in swap cache\n", total_swapcache_pages());
67 	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
68 		swap_cache_info.add_total, swap_cache_info.del_total,
69 		swap_cache_info.find_success, swap_cache_info.find_total);
70 	printk("Free swap  = %ldkB\n",
71 		get_nr_swap_pages() << (PAGE_SHIFT - 10));
72 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
73 }
74 
75 /*
76  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
77  * but sets SwapCache flag and private instead of mapping and index.
78  */
79 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
80 {
81 	int error;
82 	struct address_space *address_space;
83 
84 	VM_BUG_ON_PAGE(!PageLocked(page), page);
85 	VM_BUG_ON_PAGE(PageSwapCache(page), page);
86 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
87 
88 	get_page(page);
89 	SetPageSwapCache(page);
90 	set_page_private(page, entry.val);
91 
92 	address_space = swap_address_space(entry);
93 	spin_lock_irq(&address_space->tree_lock);
94 	error = radix_tree_insert(&address_space->page_tree,
95 					entry.val, page);
96 	if (likely(!error)) {
97 		address_space->nrpages++;
98 		__inc_node_page_state(page, NR_FILE_PAGES);
99 		INC_CACHE_INFO(add_total);
100 	}
101 	spin_unlock_irq(&address_space->tree_lock);
102 
103 	if (unlikely(error)) {
104 		/*
105 		 * Only the context which have set SWAP_HAS_CACHE flag
106 		 * would call add_to_swap_cache().
107 		 * So add_to_swap_cache() doesn't returns -EEXIST.
108 		 */
109 		VM_BUG_ON(error == -EEXIST);
110 		set_page_private(page, 0UL);
111 		ClearPageSwapCache(page);
112 		put_page(page);
113 	}
114 
115 	return error;
116 }
117 
118 
119 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
120 {
121 	int error;
122 
123 	error = radix_tree_maybe_preload(gfp_mask);
124 	if (!error) {
125 		error = __add_to_swap_cache(page, entry);
126 		radix_tree_preload_end();
127 	}
128 	return error;
129 }
130 
131 /*
132  * This must be called only on pages that have
133  * been verified to be in the swap cache.
134  */
135 void __delete_from_swap_cache(struct page *page)
136 {
137 	swp_entry_t entry;
138 	struct address_space *address_space;
139 
140 	VM_BUG_ON_PAGE(!PageLocked(page), page);
141 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
142 	VM_BUG_ON_PAGE(PageWriteback(page), page);
143 
144 	entry.val = page_private(page);
145 	address_space = swap_address_space(entry);
146 	radix_tree_delete(&address_space->page_tree, page_private(page));
147 	set_page_private(page, 0);
148 	ClearPageSwapCache(page);
149 	address_space->nrpages--;
150 	__dec_node_page_state(page, NR_FILE_PAGES);
151 	INC_CACHE_INFO(del_total);
152 }
153 
154 /**
155  * add_to_swap - allocate swap space for a page
156  * @page: page we want to move to swap
157  *
158  * Allocate swap space for the page and add the page to the
159  * swap cache.  Caller needs to hold the page lock.
160  */
161 int add_to_swap(struct page *page, struct list_head *list)
162 {
163 	swp_entry_t entry;
164 	int err;
165 
166 	VM_BUG_ON_PAGE(!PageLocked(page), page);
167 	VM_BUG_ON_PAGE(!PageUptodate(page), page);
168 
169 	entry = get_swap_page();
170 	if (!entry.val)
171 		return 0;
172 
173 	if (mem_cgroup_try_charge_swap(page, entry)) {
174 		swapcache_free(entry);
175 		return 0;
176 	}
177 
178 	if (unlikely(PageTransHuge(page)))
179 		if (unlikely(split_huge_page_to_list(page, list))) {
180 			swapcache_free(entry);
181 			return 0;
182 		}
183 
184 	/*
185 	 * Radix-tree node allocations from PF_MEMALLOC contexts could
186 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
187 	 * stops emergency reserves from being allocated.
188 	 *
189 	 * TODO: this could cause a theoretical memory reclaim
190 	 * deadlock in the swap out path.
191 	 */
192 	/*
193 	 * Add it to the swap cache.
194 	 */
195 	err = add_to_swap_cache(page, entry,
196 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
197 
198 	if (!err) {
199 		return 1;
200 	} else {	/* -ENOMEM radix-tree allocation failure */
201 		/*
202 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203 		 * clear SWAP_HAS_CACHE flag.
204 		 */
205 		swapcache_free(entry);
206 		return 0;
207 	}
208 }
209 
210 /*
211  * This must be called only on pages that have
212  * been verified to be in the swap cache and locked.
213  * It will never put the page into the free list,
214  * the caller has a reference on the page.
215  */
216 void delete_from_swap_cache(struct page *page)
217 {
218 	swp_entry_t entry;
219 	struct address_space *address_space;
220 
221 	entry.val = page_private(page);
222 
223 	address_space = swap_address_space(entry);
224 	spin_lock_irq(&address_space->tree_lock);
225 	__delete_from_swap_cache(page);
226 	spin_unlock_irq(&address_space->tree_lock);
227 
228 	swapcache_free(entry);
229 	put_page(page);
230 }
231 
232 /*
233  * If we are the only user, then try to free up the swap cache.
234  *
235  * Its ok to check for PageSwapCache without the page lock
236  * here because we are going to recheck again inside
237  * try_to_free_swap() _with_ the lock.
238  * 					- Marcelo
239  */
240 static inline void free_swap_cache(struct page *page)
241 {
242 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243 		try_to_free_swap(page);
244 		unlock_page(page);
245 	}
246 }
247 
248 /*
249  * Perform a free_page(), also freeing any swap cache associated with
250  * this page if it is the last user of the page.
251  */
252 void free_page_and_swap_cache(struct page *page)
253 {
254 	free_swap_cache(page);
255 	if (is_huge_zero_page(page))
256 		put_huge_zero_page();
257 	else
258 		put_page(page);
259 }
260 
261 /*
262  * Passed an array of pages, drop them all from swapcache and then release
263  * them.  They are removed from the LRU and freed if this is their last use.
264  */
265 void free_pages_and_swap_cache(struct page **pages, int nr)
266 {
267 	struct page **pagep = pages;
268 	int i;
269 
270 	lru_add_drain();
271 	for (i = 0; i < nr; i++)
272 		free_swap_cache(pagep[i]);
273 	release_pages(pagep, nr, false);
274 }
275 
276 /*
277  * Lookup a swap entry in the swap cache. A found page will be returned
278  * unlocked and with its refcount incremented - we rely on the kernel
279  * lock getting page table operations atomic even if we drop the page
280  * lock before returning.
281  */
282 struct page * lookup_swap_cache(swp_entry_t entry)
283 {
284 	struct page *page;
285 
286 	page = find_get_page(swap_address_space(entry), entry.val);
287 
288 	if (page) {
289 		INC_CACHE_INFO(find_success);
290 		if (TestClearPageReadahead(page))
291 			atomic_inc(&swapin_readahead_hits);
292 	}
293 
294 	INC_CACHE_INFO(find_total);
295 	return page;
296 }
297 
298 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
299 			struct vm_area_struct *vma, unsigned long addr,
300 			bool *new_page_allocated)
301 {
302 	struct page *found_page, *new_page = NULL;
303 	struct address_space *swapper_space = swap_address_space(entry);
304 	int err;
305 	*new_page_allocated = false;
306 
307 	do {
308 		/*
309 		 * First check the swap cache.  Since this is normally
310 		 * called after lookup_swap_cache() failed, re-calling
311 		 * that would confuse statistics.
312 		 */
313 		found_page = find_get_page(swapper_space, entry.val);
314 		if (found_page)
315 			break;
316 
317 		/*
318 		 * Get a new page to read into from swap.
319 		 */
320 		if (!new_page) {
321 			new_page = alloc_page_vma(gfp_mask, vma, addr);
322 			if (!new_page)
323 				break;		/* Out of memory */
324 		}
325 
326 		/*
327 		 * call radix_tree_preload() while we can wait.
328 		 */
329 		err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
330 		if (err)
331 			break;
332 
333 		/*
334 		 * Swap entry may have been freed since our caller observed it.
335 		 */
336 		err = swapcache_prepare(entry);
337 		if (err == -EEXIST) {
338 			radix_tree_preload_end();
339 			/*
340 			 * We might race against get_swap_page() and stumble
341 			 * across a SWAP_HAS_CACHE swap_map entry whose page
342 			 * has not been brought into the swapcache yet, while
343 			 * the other end is scheduled away waiting on discard
344 			 * I/O completion at scan_swap_map().
345 			 *
346 			 * In order to avoid turning this transitory state
347 			 * into a permanent loop around this -EEXIST case
348 			 * if !CONFIG_PREEMPT and the I/O completion happens
349 			 * to be waiting on the CPU waitqueue where we are now
350 			 * busy looping, we just conditionally invoke the
351 			 * scheduler here, if there are some more important
352 			 * tasks to run.
353 			 */
354 			cond_resched();
355 			continue;
356 		}
357 		if (err) {		/* swp entry is obsolete ? */
358 			radix_tree_preload_end();
359 			break;
360 		}
361 
362 		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
363 		__SetPageLocked(new_page);
364 		__SetPageSwapBacked(new_page);
365 		err = __add_to_swap_cache(new_page, entry);
366 		if (likely(!err)) {
367 			radix_tree_preload_end();
368 			/*
369 			 * Initiate read into locked page and return.
370 			 */
371 			lru_cache_add_anon(new_page);
372 			*new_page_allocated = true;
373 			return new_page;
374 		}
375 		radix_tree_preload_end();
376 		__ClearPageLocked(new_page);
377 		/*
378 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
379 		 * clear SWAP_HAS_CACHE flag.
380 		 */
381 		swapcache_free(entry);
382 	} while (err != -ENOMEM);
383 
384 	if (new_page)
385 		put_page(new_page);
386 	return found_page;
387 }
388 
389 /*
390  * Locate a page of swap in physical memory, reserving swap cache space
391  * and reading the disk if it is not already cached.
392  * A failure return means that either the page allocation failed or that
393  * the swap entry is no longer in use.
394  */
395 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
396 			struct vm_area_struct *vma, unsigned long addr)
397 {
398 	bool page_was_allocated;
399 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
400 			vma, addr, &page_was_allocated);
401 
402 	if (page_was_allocated)
403 		swap_readpage(retpage);
404 
405 	return retpage;
406 }
407 
408 static unsigned long swapin_nr_pages(unsigned long offset)
409 {
410 	static unsigned long prev_offset;
411 	unsigned int pages, max_pages, last_ra;
412 	static atomic_t last_readahead_pages;
413 
414 	max_pages = 1 << READ_ONCE(page_cluster);
415 	if (max_pages <= 1)
416 		return 1;
417 
418 	/*
419 	 * This heuristic has been found to work well on both sequential and
420 	 * random loads, swapping to hard disk or to SSD: please don't ask
421 	 * what the "+ 2" means, it just happens to work well, that's all.
422 	 */
423 	pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
424 	if (pages == 2) {
425 		/*
426 		 * We can have no readahead hits to judge by: but must not get
427 		 * stuck here forever, so check for an adjacent offset instead
428 		 * (and don't even bother to check whether swap type is same).
429 		 */
430 		if (offset != prev_offset + 1 && offset != prev_offset - 1)
431 			pages = 1;
432 		prev_offset = offset;
433 	} else {
434 		unsigned int roundup = 4;
435 		while (roundup < pages)
436 			roundup <<= 1;
437 		pages = roundup;
438 	}
439 
440 	if (pages > max_pages)
441 		pages = max_pages;
442 
443 	/* Don't shrink readahead too fast */
444 	last_ra = atomic_read(&last_readahead_pages) / 2;
445 	if (pages < last_ra)
446 		pages = last_ra;
447 	atomic_set(&last_readahead_pages, pages);
448 
449 	return pages;
450 }
451 
452 /**
453  * swapin_readahead - swap in pages in hope we need them soon
454  * @entry: swap entry of this memory
455  * @gfp_mask: memory allocation flags
456  * @vma: user vma this address belongs to
457  * @addr: target address for mempolicy
458  *
459  * Returns the struct page for entry and addr, after queueing swapin.
460  *
461  * Primitive swap readahead code. We simply read an aligned block of
462  * (1 << page_cluster) entries in the swap area. This method is chosen
463  * because it doesn't cost us any seek time.  We also make sure to queue
464  * the 'original' request together with the readahead ones...
465  *
466  * This has been extended to use the NUMA policies from the mm triggering
467  * the readahead.
468  *
469  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
470  */
471 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
472 			struct vm_area_struct *vma, unsigned long addr)
473 {
474 	struct page *page;
475 	unsigned long entry_offset = swp_offset(entry);
476 	unsigned long offset = entry_offset;
477 	unsigned long start_offset, end_offset;
478 	unsigned long mask;
479 	struct blk_plug plug;
480 
481 	mask = swapin_nr_pages(offset) - 1;
482 	if (!mask)
483 		goto skip;
484 
485 	/* Read a page_cluster sized and aligned cluster around offset. */
486 	start_offset = offset & ~mask;
487 	end_offset = offset | mask;
488 	if (!start_offset)	/* First page is swap header. */
489 		start_offset++;
490 
491 	blk_start_plug(&plug);
492 	for (offset = start_offset; offset <= end_offset ; offset++) {
493 		/* Ok, do the async read-ahead now */
494 		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
495 						gfp_mask, vma, addr);
496 		if (!page)
497 			continue;
498 		if (offset != entry_offset)
499 			SetPageReadahead(page);
500 		put_page(page);
501 	}
502 	blk_finish_plug(&plug);
503 
504 	lru_add_drain();	/* Push any new pages onto the LRU now */
505 skip:
506 	return read_swap_cache_async(entry, gfp_mask, vma, addr);
507 }
508