xref: /openbmc/linux/mm/swap_state.c (revision 8bdc2a19)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/swap_state.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  *
8  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
9  */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/pagevec.h>
20 #include <linux/migrate.h>
21 #include <linux/vmalloc.h>
22 #include <linux/swap_slots.h>
23 #include <linux/huge_mm.h>
24 #include <linux/shmem_fs.h>
25 #include "internal.h"
26 #include "swap.h"
27 
28 /*
29  * swapper_space is a fiction, retained to simplify the path through
30  * vmscan's shrink_page_list.
31  */
32 static const struct address_space_operations swap_aops = {
33 	.writepage	= swap_writepage,
34 	.dirty_folio	= noop_dirty_folio,
35 #ifdef CONFIG_MIGRATION
36 	.migratepage	= migrate_page,
37 #endif
38 };
39 
40 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
42 static bool enable_vma_readahead __read_mostly = true;
43 
44 #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
45 #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
46 #define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
47 #define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48 
49 #define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
50 #define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51 #define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
52 
53 #define SWAP_RA_VAL(addr, win, hits)				\
54 	(((addr) & PAGE_MASK) |					\
55 	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
56 	 ((hits) & SWAP_RA_HITS_MASK))
57 
58 /* Initial readahead hits is 4 to start up with a small window */
59 #define GET_SWAP_RA_VAL(vma)					\
60 	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61 
62 #define INC_CACHE_INFO(x)	data_race(swap_cache_info.x++)
63 #define ADD_CACHE_INFO(x, nr)	data_race(swap_cache_info.x += (nr))
64 
65 static struct {
66 	unsigned long add_total;
67 	unsigned long del_total;
68 	unsigned long find_success;
69 	unsigned long find_total;
70 } swap_cache_info;
71 
72 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
73 
74 void show_swap_cache_info(void)
75 {
76 	printk("%lu pages in swap cache\n", total_swapcache_pages());
77 	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
78 		swap_cache_info.add_total, swap_cache_info.del_total,
79 		swap_cache_info.find_success, swap_cache_info.find_total);
80 	printk("Free swap  = %ldkB\n",
81 		get_nr_swap_pages() << (PAGE_SHIFT - 10));
82 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
83 }
84 
85 void *get_shadow_from_swap_cache(swp_entry_t entry)
86 {
87 	struct address_space *address_space = swap_address_space(entry);
88 	pgoff_t idx = swp_offset(entry);
89 	struct page *page;
90 
91 	page = xa_load(&address_space->i_pages, idx);
92 	if (xa_is_value(page))
93 		return page;
94 	return NULL;
95 }
96 
97 /*
98  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
99  * but sets SwapCache flag and private instead of mapping and index.
100  */
101 int add_to_swap_cache(struct page *page, swp_entry_t entry,
102 			gfp_t gfp, void **shadowp)
103 {
104 	struct address_space *address_space = swap_address_space(entry);
105 	pgoff_t idx = swp_offset(entry);
106 	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
107 	unsigned long i, nr = thp_nr_pages(page);
108 	void *old;
109 
110 	VM_BUG_ON_PAGE(!PageLocked(page), page);
111 	VM_BUG_ON_PAGE(PageSwapCache(page), page);
112 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
113 
114 	page_ref_add(page, nr);
115 	SetPageSwapCache(page);
116 
117 	do {
118 		xas_lock_irq(&xas);
119 		xas_create_range(&xas);
120 		if (xas_error(&xas))
121 			goto unlock;
122 		for (i = 0; i < nr; i++) {
123 			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
124 			old = xas_load(&xas);
125 			if (xa_is_value(old)) {
126 				if (shadowp)
127 					*shadowp = old;
128 			}
129 			set_page_private(page + i, entry.val + i);
130 			xas_store(&xas, page);
131 			xas_next(&xas);
132 		}
133 		address_space->nrpages += nr;
134 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
135 		__mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
136 		ADD_CACHE_INFO(add_total, nr);
137 unlock:
138 		xas_unlock_irq(&xas);
139 	} while (xas_nomem(&xas, gfp));
140 
141 	if (!xas_error(&xas))
142 		return 0;
143 
144 	ClearPageSwapCache(page);
145 	page_ref_sub(page, nr);
146 	return xas_error(&xas);
147 }
148 
149 /*
150  * This must be called only on pages that have
151  * been verified to be in the swap cache.
152  */
153 void __delete_from_swap_cache(struct page *page,
154 			swp_entry_t entry, void *shadow)
155 {
156 	struct address_space *address_space = swap_address_space(entry);
157 	int i, nr = thp_nr_pages(page);
158 	pgoff_t idx = swp_offset(entry);
159 	XA_STATE(xas, &address_space->i_pages, idx);
160 
161 	VM_BUG_ON_PAGE(!PageLocked(page), page);
162 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
163 	VM_BUG_ON_PAGE(PageWriteback(page), page);
164 
165 	for (i = 0; i < nr; i++) {
166 		void *entry = xas_store(&xas, shadow);
167 		VM_BUG_ON_PAGE(entry != page, entry);
168 		set_page_private(page + i, 0);
169 		xas_next(&xas);
170 	}
171 	ClearPageSwapCache(page);
172 	address_space->nrpages -= nr;
173 	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
174 	__mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
175 	ADD_CACHE_INFO(del_total, nr);
176 }
177 
178 /**
179  * add_to_swap - allocate swap space for a folio
180  * @folio: folio we want to move to swap
181  *
182  * Allocate swap space for the folio and add the folio to the
183  * swap cache.
184  *
185  * Context: Caller needs to hold the folio lock.
186  * Return: Whether the folio was added to the swap cache.
187  */
188 bool add_to_swap(struct folio *folio)
189 {
190 	swp_entry_t entry;
191 	int err;
192 
193 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
194 	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
195 
196 	entry = folio_alloc_swap(folio);
197 	if (!entry.val)
198 		return false;
199 
200 	/*
201 	 * XArray node allocations from PF_MEMALLOC contexts could
202 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
203 	 * stops emergency reserves from being allocated.
204 	 *
205 	 * TODO: this could cause a theoretical memory reclaim
206 	 * deadlock in the swap out path.
207 	 */
208 	/*
209 	 * Add it to the swap cache.
210 	 */
211 	err = add_to_swap_cache(&folio->page, entry,
212 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
213 	if (err)
214 		/*
215 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
216 		 * clear SWAP_HAS_CACHE flag.
217 		 */
218 		goto fail;
219 	/*
220 	 * Normally the folio will be dirtied in unmap because its
221 	 * pte should be dirty. A special case is MADV_FREE page. The
222 	 * page's pte could have dirty bit cleared but the folio's
223 	 * SwapBacked flag is still set because clearing the dirty bit
224 	 * and SwapBacked flag has no lock protected. For such folio,
225 	 * unmap will not set dirty bit for it, so folio reclaim will
226 	 * not write the folio out. This can cause data corruption when
227 	 * the folio is swapped in later. Always setting the dirty flag
228 	 * for the folio solves the problem.
229 	 */
230 	folio_mark_dirty(folio);
231 
232 	return true;
233 
234 fail:
235 	put_swap_page(&folio->page, entry);
236 	return false;
237 }
238 
239 /*
240  * This must be called only on pages that have
241  * been verified to be in the swap cache and locked.
242  * It will never put the page into the free list,
243  * the caller has a reference on the page.
244  */
245 void delete_from_swap_cache(struct page *page)
246 {
247 	swp_entry_t entry = { .val = page_private(page) };
248 	struct address_space *address_space = swap_address_space(entry);
249 
250 	xa_lock_irq(&address_space->i_pages);
251 	__delete_from_swap_cache(page, entry, NULL);
252 	xa_unlock_irq(&address_space->i_pages);
253 
254 	put_swap_page(page, entry);
255 	page_ref_sub(page, thp_nr_pages(page));
256 }
257 
258 void clear_shadow_from_swap_cache(int type, unsigned long begin,
259 				unsigned long end)
260 {
261 	unsigned long curr = begin;
262 	void *old;
263 
264 	for (;;) {
265 		swp_entry_t entry = swp_entry(type, curr);
266 		struct address_space *address_space = swap_address_space(entry);
267 		XA_STATE(xas, &address_space->i_pages, curr);
268 
269 		xa_lock_irq(&address_space->i_pages);
270 		xas_for_each(&xas, old, end) {
271 			if (!xa_is_value(old))
272 				continue;
273 			xas_store(&xas, NULL);
274 		}
275 		xa_unlock_irq(&address_space->i_pages);
276 
277 		/* search the next swapcache until we meet end */
278 		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
279 		curr++;
280 		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
281 		if (curr > end)
282 			break;
283 	}
284 }
285 
286 /*
287  * If we are the only user, then try to free up the swap cache.
288  *
289  * Its ok to check for PageSwapCache without the page lock
290  * here because we are going to recheck again inside
291  * try_to_free_swap() _with_ the lock.
292  * 					- Marcelo
293  */
294 void free_swap_cache(struct page *page)
295 {
296 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
297 		try_to_free_swap(page);
298 		unlock_page(page);
299 	}
300 }
301 
302 /*
303  * Perform a free_page(), also freeing any swap cache associated with
304  * this page if it is the last user of the page.
305  */
306 void free_page_and_swap_cache(struct page *page)
307 {
308 	free_swap_cache(page);
309 	if (!is_huge_zero_page(page))
310 		put_page(page);
311 }
312 
313 /*
314  * Passed an array of pages, drop them all from swapcache and then release
315  * them.  They are removed from the LRU and freed if this is their last use.
316  */
317 void free_pages_and_swap_cache(struct page **pages, int nr)
318 {
319 	struct page **pagep = pages;
320 	int i;
321 
322 	lru_add_drain();
323 	for (i = 0; i < nr; i++)
324 		free_swap_cache(pagep[i]);
325 	release_pages(pagep, nr);
326 }
327 
328 static inline bool swap_use_vma_readahead(void)
329 {
330 	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
331 }
332 
333 /*
334  * Lookup a swap entry in the swap cache. A found page will be returned
335  * unlocked and with its refcount incremented - we rely on the kernel
336  * lock getting page table operations atomic even if we drop the page
337  * lock before returning.
338  */
339 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
340 			       unsigned long addr)
341 {
342 	struct page *page;
343 	struct swap_info_struct *si;
344 
345 	si = get_swap_device(entry);
346 	if (!si)
347 		return NULL;
348 	page = find_get_page(swap_address_space(entry), swp_offset(entry));
349 	put_swap_device(si);
350 
351 	INC_CACHE_INFO(find_total);
352 	if (page) {
353 		bool vma_ra = swap_use_vma_readahead();
354 		bool readahead;
355 
356 		INC_CACHE_INFO(find_success);
357 		/*
358 		 * At the moment, we don't support PG_readahead for anon THP
359 		 * so let's bail out rather than confusing the readahead stat.
360 		 */
361 		if (unlikely(PageTransCompound(page)))
362 			return page;
363 
364 		readahead = TestClearPageReadahead(page);
365 		if (vma && vma_ra) {
366 			unsigned long ra_val;
367 			int win, hits;
368 
369 			ra_val = GET_SWAP_RA_VAL(vma);
370 			win = SWAP_RA_WIN(ra_val);
371 			hits = SWAP_RA_HITS(ra_val);
372 			if (readahead)
373 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
374 			atomic_long_set(&vma->swap_readahead_info,
375 					SWAP_RA_VAL(addr, win, hits));
376 		}
377 
378 		if (readahead) {
379 			count_vm_event(SWAP_RA_HIT);
380 			if (!vma || !vma_ra)
381 				atomic_inc(&swapin_readahead_hits);
382 		}
383 	}
384 
385 	return page;
386 }
387 
388 /**
389  * find_get_incore_page - Find and get a page from the page or swap caches.
390  * @mapping: The address_space to search.
391  * @index: The page cache index.
392  *
393  * This differs from find_get_page() in that it will also look for the
394  * page in the swap cache.
395  *
396  * Return: The found page or %NULL.
397  */
398 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
399 {
400 	swp_entry_t swp;
401 	struct swap_info_struct *si;
402 	struct page *page = pagecache_get_page(mapping, index,
403 						FGP_ENTRY | FGP_HEAD, 0);
404 
405 	if (!page)
406 		return page;
407 	if (!xa_is_value(page))
408 		return find_subpage(page, index);
409 	if (!shmem_mapping(mapping))
410 		return NULL;
411 
412 	swp = radix_to_swp_entry(page);
413 	/* Prevent swapoff from happening to us */
414 	si = get_swap_device(swp);
415 	if (!si)
416 		return NULL;
417 	page = find_get_page(swap_address_space(swp), swp_offset(swp));
418 	put_swap_device(si);
419 	return page;
420 }
421 
422 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
423 			struct vm_area_struct *vma, unsigned long addr,
424 			bool *new_page_allocated)
425 {
426 	struct swap_info_struct *si;
427 	struct page *page;
428 	void *shadow = NULL;
429 
430 	*new_page_allocated = false;
431 
432 	for (;;) {
433 		int err;
434 		/*
435 		 * First check the swap cache.  Since this is normally
436 		 * called after lookup_swap_cache() failed, re-calling
437 		 * that would confuse statistics.
438 		 */
439 		si = get_swap_device(entry);
440 		if (!si)
441 			return NULL;
442 		page = find_get_page(swap_address_space(entry),
443 				     swp_offset(entry));
444 		put_swap_device(si);
445 		if (page)
446 			return page;
447 
448 		/*
449 		 * Just skip read ahead for unused swap slot.
450 		 * During swap_off when swap_slot_cache is disabled,
451 		 * we have to handle the race between putting
452 		 * swap entry in swap cache and marking swap slot
453 		 * as SWAP_HAS_CACHE.  That's done in later part of code or
454 		 * else swap_off will be aborted if we return NULL.
455 		 */
456 		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
457 			return NULL;
458 
459 		/*
460 		 * Get a new page to read into from swap.  Allocate it now,
461 		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
462 		 * cause any racers to loop around until we add it to cache.
463 		 */
464 		page = alloc_page_vma(gfp_mask, vma, addr);
465 		if (!page)
466 			return NULL;
467 
468 		/*
469 		 * Swap entry may have been freed since our caller observed it.
470 		 */
471 		err = swapcache_prepare(entry);
472 		if (!err)
473 			break;
474 
475 		put_page(page);
476 		if (err != -EEXIST)
477 			return NULL;
478 
479 		/*
480 		 * We might race against __delete_from_swap_cache(), and
481 		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
482 		 * has not yet been cleared.  Or race against another
483 		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
484 		 * in swap_map, but not yet added its page to swap cache.
485 		 */
486 		schedule_timeout_uninterruptible(1);
487 	}
488 
489 	/*
490 	 * The swap entry is ours to swap in. Prepare the new page.
491 	 */
492 
493 	__SetPageLocked(page);
494 	__SetPageSwapBacked(page);
495 
496 	if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry))
497 		goto fail_unlock;
498 
499 	/* May fail (-ENOMEM) if XArray node allocation failed. */
500 	if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
501 		goto fail_unlock;
502 
503 	mem_cgroup_swapin_uncharge_swap(entry);
504 
505 	if (shadow)
506 		workingset_refault(page_folio(page), shadow);
507 
508 	/* Caller will initiate read into locked page */
509 	lru_cache_add(page);
510 	*new_page_allocated = true;
511 	return page;
512 
513 fail_unlock:
514 	put_swap_page(page, entry);
515 	unlock_page(page);
516 	put_page(page);
517 	return NULL;
518 }
519 
520 /*
521  * Locate a page of swap in physical memory, reserving swap cache space
522  * and reading the disk if it is not already cached.
523  * A failure return means that either the page allocation failed or that
524  * the swap entry is no longer in use.
525  */
526 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
527 				   struct vm_area_struct *vma,
528 				   unsigned long addr, bool do_poll,
529 				   struct swap_iocb **plug)
530 {
531 	bool page_was_allocated;
532 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
533 			vma, addr, &page_was_allocated);
534 
535 	if (page_was_allocated)
536 		swap_readpage(retpage, do_poll, plug);
537 
538 	return retpage;
539 }
540 
541 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
542 				      unsigned long offset,
543 				      int hits,
544 				      int max_pages,
545 				      int prev_win)
546 {
547 	unsigned int pages, last_ra;
548 
549 	/*
550 	 * This heuristic has been found to work well on both sequential and
551 	 * random loads, swapping to hard disk or to SSD: please don't ask
552 	 * what the "+ 2" means, it just happens to work well, that's all.
553 	 */
554 	pages = hits + 2;
555 	if (pages == 2) {
556 		/*
557 		 * We can have no readahead hits to judge by: but must not get
558 		 * stuck here forever, so check for an adjacent offset instead
559 		 * (and don't even bother to check whether swap type is same).
560 		 */
561 		if (offset != prev_offset + 1 && offset != prev_offset - 1)
562 			pages = 1;
563 	} else {
564 		unsigned int roundup = 4;
565 		while (roundup < pages)
566 			roundup <<= 1;
567 		pages = roundup;
568 	}
569 
570 	if (pages > max_pages)
571 		pages = max_pages;
572 
573 	/* Don't shrink readahead too fast */
574 	last_ra = prev_win / 2;
575 	if (pages < last_ra)
576 		pages = last_ra;
577 
578 	return pages;
579 }
580 
581 static unsigned long swapin_nr_pages(unsigned long offset)
582 {
583 	static unsigned long prev_offset;
584 	unsigned int hits, pages, max_pages;
585 	static atomic_t last_readahead_pages;
586 
587 	max_pages = 1 << READ_ONCE(page_cluster);
588 	if (max_pages <= 1)
589 		return 1;
590 
591 	hits = atomic_xchg(&swapin_readahead_hits, 0);
592 	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
593 				  max_pages,
594 				  atomic_read(&last_readahead_pages));
595 	if (!hits)
596 		WRITE_ONCE(prev_offset, offset);
597 	atomic_set(&last_readahead_pages, pages);
598 
599 	return pages;
600 }
601 
602 /**
603  * swap_cluster_readahead - swap in pages in hope we need them soon
604  * @entry: swap entry of this memory
605  * @gfp_mask: memory allocation flags
606  * @vmf: fault information
607  *
608  * Returns the struct page for entry and addr, after queueing swapin.
609  *
610  * Primitive swap readahead code. We simply read an aligned block of
611  * (1 << page_cluster) entries in the swap area. This method is chosen
612  * because it doesn't cost us any seek time.  We also make sure to queue
613  * the 'original' request together with the readahead ones...
614  *
615  * This has been extended to use the NUMA policies from the mm triggering
616  * the readahead.
617  *
618  * Caller must hold read mmap_lock if vmf->vma is not NULL.
619  */
620 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
621 				struct vm_fault *vmf)
622 {
623 	struct page *page;
624 	unsigned long entry_offset = swp_offset(entry);
625 	unsigned long offset = entry_offset;
626 	unsigned long start_offset, end_offset;
627 	unsigned long mask;
628 	struct swap_info_struct *si = swp_swap_info(entry);
629 	struct blk_plug plug;
630 	struct swap_iocb *splug = NULL;
631 	bool do_poll = true, page_allocated;
632 	struct vm_area_struct *vma = vmf->vma;
633 	unsigned long addr = vmf->address;
634 
635 	mask = swapin_nr_pages(offset) - 1;
636 	if (!mask)
637 		goto skip;
638 
639 	do_poll = false;
640 	/* Read a page_cluster sized and aligned cluster around offset. */
641 	start_offset = offset & ~mask;
642 	end_offset = offset | mask;
643 	if (!start_offset)	/* First page is swap header. */
644 		start_offset++;
645 	if (end_offset >= si->max)
646 		end_offset = si->max - 1;
647 
648 	blk_start_plug(&plug);
649 	for (offset = start_offset; offset <= end_offset ; offset++) {
650 		/* Ok, do the async read-ahead now */
651 		page = __read_swap_cache_async(
652 			swp_entry(swp_type(entry), offset),
653 			gfp_mask, vma, addr, &page_allocated);
654 		if (!page)
655 			continue;
656 		if (page_allocated) {
657 			swap_readpage(page, false, &splug);
658 			if (offset != entry_offset) {
659 				SetPageReadahead(page);
660 				count_vm_event(SWAP_RA);
661 			}
662 		}
663 		put_page(page);
664 	}
665 	blk_finish_plug(&plug);
666 	swap_read_unplug(splug);
667 
668 	lru_add_drain();	/* Push any new pages onto the LRU now */
669 skip:
670 	/* The page was likely read above, so no need for plugging here */
671 	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
672 }
673 
674 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
675 {
676 	struct address_space *spaces, *space;
677 	unsigned int i, nr;
678 
679 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
680 	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
681 	if (!spaces)
682 		return -ENOMEM;
683 	for (i = 0; i < nr; i++) {
684 		space = spaces + i;
685 		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
686 		atomic_set(&space->i_mmap_writable, 0);
687 		space->a_ops = &swap_aops;
688 		/* swap cache doesn't use writeback related tags */
689 		mapping_set_no_writeback_tags(space);
690 	}
691 	nr_swapper_spaces[type] = nr;
692 	swapper_spaces[type] = spaces;
693 
694 	return 0;
695 }
696 
697 void exit_swap_address_space(unsigned int type)
698 {
699 	int i;
700 	struct address_space *spaces = swapper_spaces[type];
701 
702 	for (i = 0; i < nr_swapper_spaces[type]; i++)
703 		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
704 	kvfree(spaces);
705 	nr_swapper_spaces[type] = 0;
706 	swapper_spaces[type] = NULL;
707 }
708 
709 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
710 				     unsigned long faddr,
711 				     unsigned long lpfn,
712 				     unsigned long rpfn,
713 				     unsigned long *start,
714 				     unsigned long *end)
715 {
716 	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
717 		      PFN_DOWN(faddr & PMD_MASK));
718 	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
719 		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
720 }
721 
722 static void swap_ra_info(struct vm_fault *vmf,
723 			struct vma_swap_readahead *ra_info)
724 {
725 	struct vm_area_struct *vma = vmf->vma;
726 	unsigned long ra_val;
727 	unsigned long faddr, pfn, fpfn;
728 	unsigned long start, end;
729 	pte_t *pte, *orig_pte;
730 	unsigned int max_win, hits, prev_win, win, left;
731 #ifndef CONFIG_64BIT
732 	pte_t *tpte;
733 #endif
734 
735 	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
736 			     SWAP_RA_ORDER_CEILING);
737 	if (max_win == 1) {
738 		ra_info->win = 1;
739 		return;
740 	}
741 
742 	faddr = vmf->address;
743 	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
744 
745 	fpfn = PFN_DOWN(faddr);
746 	ra_val = GET_SWAP_RA_VAL(vma);
747 	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
748 	prev_win = SWAP_RA_WIN(ra_val);
749 	hits = SWAP_RA_HITS(ra_val);
750 	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
751 					       max_win, prev_win);
752 	atomic_long_set(&vma->swap_readahead_info,
753 			SWAP_RA_VAL(faddr, win, 0));
754 
755 	if (win == 1) {
756 		pte_unmap(orig_pte);
757 		return;
758 	}
759 
760 	/* Copy the PTEs because the page table may be unmapped */
761 	if (fpfn == pfn + 1)
762 		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
763 	else if (pfn == fpfn + 1)
764 		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
765 				  &start, &end);
766 	else {
767 		left = (win - 1) / 2;
768 		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
769 				  &start, &end);
770 	}
771 	ra_info->nr_pte = end - start;
772 	ra_info->offset = fpfn - start;
773 	pte -= ra_info->offset;
774 #ifdef CONFIG_64BIT
775 	ra_info->ptes = pte;
776 #else
777 	tpte = ra_info->ptes;
778 	for (pfn = start; pfn != end; pfn++)
779 		*tpte++ = *pte++;
780 #endif
781 	pte_unmap(orig_pte);
782 }
783 
784 /**
785  * swap_vma_readahead - swap in pages in hope we need them soon
786  * @fentry: swap entry of this memory
787  * @gfp_mask: memory allocation flags
788  * @vmf: fault information
789  *
790  * Returns the struct page for entry and addr, after queueing swapin.
791  *
792  * Primitive swap readahead code. We simply read in a few pages whose
793  * virtual addresses are around the fault address in the same vma.
794  *
795  * Caller must hold read mmap_lock if vmf->vma is not NULL.
796  *
797  */
798 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
799 				       struct vm_fault *vmf)
800 {
801 	struct blk_plug plug;
802 	struct swap_iocb *splug = NULL;
803 	struct vm_area_struct *vma = vmf->vma;
804 	struct page *page;
805 	pte_t *pte, pentry;
806 	swp_entry_t entry;
807 	unsigned int i;
808 	bool page_allocated;
809 	struct vma_swap_readahead ra_info = {
810 		.win = 1,
811 	};
812 
813 	swap_ra_info(vmf, &ra_info);
814 	if (ra_info.win == 1)
815 		goto skip;
816 
817 	blk_start_plug(&plug);
818 	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
819 	     i++, pte++) {
820 		pentry = *pte;
821 		if (!is_swap_pte(pentry))
822 			continue;
823 		entry = pte_to_swp_entry(pentry);
824 		if (unlikely(non_swap_entry(entry)))
825 			continue;
826 		page = __read_swap_cache_async(entry, gfp_mask, vma,
827 					       vmf->address, &page_allocated);
828 		if (!page)
829 			continue;
830 		if (page_allocated) {
831 			swap_readpage(page, false, &splug);
832 			if (i != ra_info.offset) {
833 				SetPageReadahead(page);
834 				count_vm_event(SWAP_RA);
835 			}
836 		}
837 		put_page(page);
838 	}
839 	blk_finish_plug(&plug);
840 	swap_read_unplug(splug);
841 	lru_add_drain();
842 skip:
843 	/* The page was likely read above, so no need for plugging here */
844 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
845 				     ra_info.win == 1, NULL);
846 }
847 
848 /**
849  * swapin_readahead - swap in pages in hope we need them soon
850  * @entry: swap entry of this memory
851  * @gfp_mask: memory allocation flags
852  * @vmf: fault information
853  *
854  * Returns the struct page for entry and addr, after queueing swapin.
855  *
856  * It's a main entry function for swap readahead. By the configuration,
857  * it will read ahead blocks by cluster-based(ie, physical disk based)
858  * or vma-based(ie, virtual address based on faulty address) readahead.
859  */
860 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
861 				struct vm_fault *vmf)
862 {
863 	return swap_use_vma_readahead() ?
864 			swap_vma_readahead(entry, gfp_mask, vmf) :
865 			swap_cluster_readahead(entry, gfp_mask, vmf);
866 }
867 
868 #ifdef CONFIG_SYSFS
869 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
870 				     struct kobj_attribute *attr, char *buf)
871 {
872 	return sysfs_emit(buf, "%s\n",
873 			  enable_vma_readahead ? "true" : "false");
874 }
875 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
876 				      struct kobj_attribute *attr,
877 				      const char *buf, size_t count)
878 {
879 	ssize_t ret;
880 
881 	ret = kstrtobool(buf, &enable_vma_readahead);
882 	if (ret)
883 		return ret;
884 
885 	return count;
886 }
887 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
888 
889 static struct attribute *swap_attrs[] = {
890 	&vma_ra_enabled_attr.attr,
891 	NULL,
892 };
893 
894 static const struct attribute_group swap_attr_group = {
895 	.attrs = swap_attrs,
896 };
897 
898 static int __init swap_init_sysfs(void)
899 {
900 	int err;
901 	struct kobject *swap_kobj;
902 
903 	swap_kobj = kobject_create_and_add("swap", mm_kobj);
904 	if (!swap_kobj) {
905 		pr_err("failed to create swap kobject\n");
906 		return -ENOMEM;
907 	}
908 	err = sysfs_create_group(swap_kobj, &swap_attr_group);
909 	if (err) {
910 		pr_err("failed to register swap group\n");
911 		goto delete_obj;
912 	}
913 	return 0;
914 
915 delete_obj:
916 	kobject_put(swap_kobj);
917 	return err;
918 }
919 subsys_initcall(swap_init_sysfs);
920 #endif
921