xref: /openbmc/linux/mm/swap.c (revision cb325ddd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/swap.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7 
8 /*
9  * This file contains the default values for the operation of the
10  * Linux VM subsystem. Fine-tuning documentation can be found in
11  * Documentation/admin-guide/sysctl/vm.rst.
12  * Started 18.12.91
13  * Swap aging added 23.2.95, Stephen Tweedie.
14  * Buffermem limits added 12.3.98, Rik van Riel.
15  */
16 
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/pagevec.h>
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/mm_inline.h>
27 #include <linux/percpu_counter.h>
28 #include <linux/memremap.h>
29 #include <linux/percpu.h>
30 #include <linux/cpu.h>
31 #include <linux/notifier.h>
32 #include <linux/backing-dev.h>
33 #include <linux/memcontrol.h>
34 #include <linux/gfp.h>
35 #include <linux/uio.h>
36 #include <linux/hugetlb.h>
37 #include <linux/page_idle.h>
38 #include <linux/local_lock.h>
39 #include <linux/buffer_head.h>
40 
41 #include "internal.h"
42 
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/pagemap.h>
45 
46 /* How many pages do we try to swap or page in/out together? */
47 int page_cluster;
48 
49 /* Protecting only lru_rotate.pvec which requires disabling interrupts */
50 struct lru_rotate {
51 	local_lock_t lock;
52 	struct pagevec pvec;
53 };
54 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
55 	.lock = INIT_LOCAL_LOCK(lock),
56 };
57 
58 /*
59  * The following struct pagevec are grouped together because they are protected
60  * by disabling preemption (and interrupts remain enabled).
61  */
62 struct lru_pvecs {
63 	local_lock_t lock;
64 	struct pagevec lru_add;
65 	struct pagevec lru_deactivate_file;
66 	struct pagevec lru_deactivate;
67 	struct pagevec lru_lazyfree;
68 #ifdef CONFIG_SMP
69 	struct pagevec activate_page;
70 #endif
71 };
72 static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
73 	.lock = INIT_LOCAL_LOCK(lock),
74 };
75 
76 /*
77  * This path almost never happens for VM activity - pages are normally
78  * freed via pagevecs.  But it gets used by networking.
79  */
80 static void __page_cache_release(struct page *page)
81 {
82 	if (PageLRU(page)) {
83 		struct folio *folio = page_folio(page);
84 		struct lruvec *lruvec;
85 		unsigned long flags;
86 
87 		lruvec = folio_lruvec_lock_irqsave(folio, &flags);
88 		del_page_from_lru_list(page, lruvec);
89 		__clear_page_lru_flags(page);
90 		unlock_page_lruvec_irqrestore(lruvec, flags);
91 	}
92 	__ClearPageWaiters(page);
93 }
94 
95 static void __put_single_page(struct page *page)
96 {
97 	__page_cache_release(page);
98 	mem_cgroup_uncharge(page_folio(page));
99 	free_unref_page(page, 0);
100 }
101 
102 static void __put_compound_page(struct page *page)
103 {
104 	/*
105 	 * __page_cache_release() is supposed to be called for thp, not for
106 	 * hugetlb. This is because hugetlb page does never have PageLRU set
107 	 * (it's never listed to any LRU lists) and no memcg routines should
108 	 * be called for hugetlb (it has a separate hugetlb_cgroup.)
109 	 */
110 	if (!PageHuge(page))
111 		__page_cache_release(page);
112 	destroy_compound_page(page);
113 }
114 
115 void __put_page(struct page *page)
116 {
117 	if (is_zone_device_page(page)) {
118 		put_dev_pagemap(page->pgmap);
119 
120 		/*
121 		 * The page belongs to the device that created pgmap. Do
122 		 * not return it to page allocator.
123 		 */
124 		return;
125 	}
126 
127 	if (unlikely(PageCompound(page)))
128 		__put_compound_page(page);
129 	else
130 		__put_single_page(page);
131 }
132 EXPORT_SYMBOL(__put_page);
133 
134 /**
135  * put_pages_list() - release a list of pages
136  * @pages: list of pages threaded on page->lru
137  *
138  * Release a list of pages which are strung together on page.lru.
139  */
140 void put_pages_list(struct list_head *pages)
141 {
142 	struct page *page, *next;
143 
144 	list_for_each_entry_safe(page, next, pages, lru) {
145 		if (!put_page_testzero(page)) {
146 			list_del(&page->lru);
147 			continue;
148 		}
149 		if (PageHead(page)) {
150 			list_del(&page->lru);
151 			__put_compound_page(page);
152 			continue;
153 		}
154 		/* Cannot be PageLRU because it's passed to us using the lru */
155 		__ClearPageWaiters(page);
156 	}
157 
158 	free_unref_page_list(pages);
159 	INIT_LIST_HEAD(pages);
160 }
161 EXPORT_SYMBOL(put_pages_list);
162 
163 /*
164  * get_kernel_pages() - pin kernel pages in memory
165  * @kiov:	An array of struct kvec structures
166  * @nr_segs:	number of segments to pin
167  * @write:	pinning for read/write, currently ignored
168  * @pages:	array that receives pointers to the pages pinned.
169  *		Should be at least nr_segs long.
170  *
171  * Returns number of pages pinned. This may be fewer than the number
172  * requested. If nr_pages is 0 or negative, returns 0. If no pages
173  * were pinned, returns -errno. Each page returned must be released
174  * with a put_page() call when it is finished with.
175  */
176 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
177 		struct page **pages)
178 {
179 	int seg;
180 
181 	for (seg = 0; seg < nr_segs; seg++) {
182 		if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
183 			return seg;
184 
185 		pages[seg] = kmap_to_page(kiov[seg].iov_base);
186 		get_page(pages[seg]);
187 	}
188 
189 	return seg;
190 }
191 EXPORT_SYMBOL_GPL(get_kernel_pages);
192 
193 static void pagevec_lru_move_fn(struct pagevec *pvec,
194 	void (*move_fn)(struct page *page, struct lruvec *lruvec))
195 {
196 	int i;
197 	struct lruvec *lruvec = NULL;
198 	unsigned long flags = 0;
199 
200 	for (i = 0; i < pagevec_count(pvec); i++) {
201 		struct page *page = pvec->pages[i];
202 		struct folio *folio = page_folio(page);
203 
204 		/* block memcg migration during page moving between lru */
205 		if (!TestClearPageLRU(page))
206 			continue;
207 
208 		lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
209 		(*move_fn)(page, lruvec);
210 
211 		SetPageLRU(page);
212 	}
213 	if (lruvec)
214 		unlock_page_lruvec_irqrestore(lruvec, flags);
215 	release_pages(pvec->pages, pvec->nr);
216 	pagevec_reinit(pvec);
217 }
218 
219 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
220 {
221 	struct folio *folio = page_folio(page);
222 
223 	if (!folio_test_unevictable(folio)) {
224 		lruvec_del_folio(lruvec, folio);
225 		folio_clear_active(folio);
226 		lruvec_add_folio_tail(lruvec, folio);
227 		__count_vm_events(PGROTATED, folio_nr_pages(folio));
228 	}
229 }
230 
231 /* return true if pagevec needs to drain */
232 static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
233 {
234 	bool ret = false;
235 
236 	if (!pagevec_add(pvec, page) || PageCompound(page) ||
237 			lru_cache_disabled())
238 		ret = true;
239 
240 	return ret;
241 }
242 
243 /*
244  * Writeback is about to end against a folio which has been marked for
245  * immediate reclaim.  If it still appears to be reclaimable, move it
246  * to the tail of the inactive list.
247  *
248  * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
249  */
250 void folio_rotate_reclaimable(struct folio *folio)
251 {
252 	if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
253 	    !folio_test_unevictable(folio) && folio_test_lru(folio)) {
254 		struct pagevec *pvec;
255 		unsigned long flags;
256 
257 		folio_get(folio);
258 		local_lock_irqsave(&lru_rotate.lock, flags);
259 		pvec = this_cpu_ptr(&lru_rotate.pvec);
260 		if (pagevec_add_and_need_flush(pvec, &folio->page))
261 			pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
262 		local_unlock_irqrestore(&lru_rotate.lock, flags);
263 	}
264 }
265 
266 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
267 {
268 	do {
269 		unsigned long lrusize;
270 
271 		/*
272 		 * Hold lruvec->lru_lock is safe here, since
273 		 * 1) The pinned lruvec in reclaim, or
274 		 * 2) From a pre-LRU page during refault (which also holds the
275 		 *    rcu lock, so would be safe even if the page was on the LRU
276 		 *    and could move simultaneously to a new lruvec).
277 		 */
278 		spin_lock_irq(&lruvec->lru_lock);
279 		/* Record cost event */
280 		if (file)
281 			lruvec->file_cost += nr_pages;
282 		else
283 			lruvec->anon_cost += nr_pages;
284 
285 		/*
286 		 * Decay previous events
287 		 *
288 		 * Because workloads change over time (and to avoid
289 		 * overflow) we keep these statistics as a floating
290 		 * average, which ends up weighing recent refaults
291 		 * more than old ones.
292 		 */
293 		lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
294 			  lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
295 			  lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
296 			  lruvec_page_state(lruvec, NR_ACTIVE_FILE);
297 
298 		if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
299 			lruvec->file_cost /= 2;
300 			lruvec->anon_cost /= 2;
301 		}
302 		spin_unlock_irq(&lruvec->lru_lock);
303 	} while ((lruvec = parent_lruvec(lruvec)));
304 }
305 
306 void lru_note_cost_folio(struct folio *folio)
307 {
308 	lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
309 			folio_nr_pages(folio));
310 }
311 
312 static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
313 {
314 	if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
315 		long nr_pages = folio_nr_pages(folio);
316 
317 		lruvec_del_folio(lruvec, folio);
318 		folio_set_active(folio);
319 		lruvec_add_folio(lruvec, folio);
320 		trace_mm_lru_activate(folio);
321 
322 		__count_vm_events(PGACTIVATE, nr_pages);
323 		__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
324 				     nr_pages);
325 	}
326 }
327 
328 #ifdef CONFIG_SMP
329 static void __activate_page(struct page *page, struct lruvec *lruvec)
330 {
331 	return __folio_activate(page_folio(page), lruvec);
332 }
333 
334 static void activate_page_drain(int cpu)
335 {
336 	struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
337 
338 	if (pagevec_count(pvec))
339 		pagevec_lru_move_fn(pvec, __activate_page);
340 }
341 
342 static bool need_activate_page_drain(int cpu)
343 {
344 	return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
345 }
346 
347 static void folio_activate(struct folio *folio)
348 {
349 	if (folio_test_lru(folio) && !folio_test_active(folio) &&
350 	    !folio_test_unevictable(folio)) {
351 		struct pagevec *pvec;
352 
353 		folio_get(folio);
354 		local_lock(&lru_pvecs.lock);
355 		pvec = this_cpu_ptr(&lru_pvecs.activate_page);
356 		if (pagevec_add_and_need_flush(pvec, &folio->page))
357 			pagevec_lru_move_fn(pvec, __activate_page);
358 		local_unlock(&lru_pvecs.lock);
359 	}
360 }
361 
362 #else
363 static inline void activate_page_drain(int cpu)
364 {
365 }
366 
367 static void folio_activate(struct folio *folio)
368 {
369 	struct lruvec *lruvec;
370 
371 	if (folio_test_clear_lru(folio)) {
372 		lruvec = folio_lruvec_lock_irq(folio);
373 		__folio_activate(folio, lruvec);
374 		unlock_page_lruvec_irq(lruvec);
375 		folio_set_lru(folio);
376 	}
377 }
378 #endif
379 
380 static void __lru_cache_activate_folio(struct folio *folio)
381 {
382 	struct pagevec *pvec;
383 	int i;
384 
385 	local_lock(&lru_pvecs.lock);
386 	pvec = this_cpu_ptr(&lru_pvecs.lru_add);
387 
388 	/*
389 	 * Search backwards on the optimistic assumption that the page being
390 	 * activated has just been added to this pagevec. Note that only
391 	 * the local pagevec is examined as a !PageLRU page could be in the
392 	 * process of being released, reclaimed, migrated or on a remote
393 	 * pagevec that is currently being drained. Furthermore, marking
394 	 * a remote pagevec's page PageActive potentially hits a race where
395 	 * a page is marked PageActive just after it is added to the inactive
396 	 * list causing accounting errors and BUG_ON checks to trigger.
397 	 */
398 	for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
399 		struct page *pagevec_page = pvec->pages[i];
400 
401 		if (pagevec_page == &folio->page) {
402 			folio_set_active(folio);
403 			break;
404 		}
405 	}
406 
407 	local_unlock(&lru_pvecs.lock);
408 }
409 
410 /*
411  * Mark a page as having seen activity.
412  *
413  * inactive,unreferenced	->	inactive,referenced
414  * inactive,referenced		->	active,unreferenced
415  * active,unreferenced		->	active,referenced
416  *
417  * When a newly allocated page is not yet visible, so safe for non-atomic ops,
418  * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
419  */
420 void folio_mark_accessed(struct folio *folio)
421 {
422 	if (!folio_test_referenced(folio)) {
423 		folio_set_referenced(folio);
424 	} else if (folio_test_unevictable(folio)) {
425 		/*
426 		 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
427 		 * this list is never rotated or maintained, so marking an
428 		 * unevictable page accessed has no effect.
429 		 */
430 	} else if (!folio_test_active(folio)) {
431 		/*
432 		 * If the page is on the LRU, queue it for activation via
433 		 * lru_pvecs.activate_page. Otherwise, assume the page is on a
434 		 * pagevec, mark it active and it'll be moved to the active
435 		 * LRU on the next drain.
436 		 */
437 		if (folio_test_lru(folio))
438 			folio_activate(folio);
439 		else
440 			__lru_cache_activate_folio(folio);
441 		folio_clear_referenced(folio);
442 		workingset_activation(folio);
443 	}
444 	if (folio_test_idle(folio))
445 		folio_clear_idle(folio);
446 }
447 EXPORT_SYMBOL(folio_mark_accessed);
448 
449 /**
450  * folio_add_lru - Add a folio to an LRU list.
451  * @folio: The folio to be added to the LRU.
452  *
453  * Queue the folio for addition to the LRU. The decision on whether
454  * to add the page to the [in]active [file|anon] list is deferred until the
455  * pagevec is drained. This gives a chance for the caller of folio_add_lru()
456  * have the folio added to the active list using folio_mark_accessed().
457  */
458 void folio_add_lru(struct folio *folio)
459 {
460 	struct pagevec *pvec;
461 
462 	VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
463 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
464 
465 	folio_get(folio);
466 	local_lock(&lru_pvecs.lock);
467 	pvec = this_cpu_ptr(&lru_pvecs.lru_add);
468 	if (pagevec_add_and_need_flush(pvec, &folio->page))
469 		__pagevec_lru_add(pvec);
470 	local_unlock(&lru_pvecs.lock);
471 }
472 EXPORT_SYMBOL(folio_add_lru);
473 
474 /**
475  * lru_cache_add_inactive_or_unevictable
476  * @page:  the page to be added to LRU
477  * @vma:   vma in which page is mapped for determining reclaimability
478  *
479  * Place @page on the inactive or unevictable LRU list, depending on its
480  * evictability.
481  */
482 void lru_cache_add_inactive_or_unevictable(struct page *page,
483 					 struct vm_area_struct *vma)
484 {
485 	bool unevictable;
486 
487 	VM_BUG_ON_PAGE(PageLRU(page), page);
488 
489 	unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
490 	if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
491 		int nr_pages = thp_nr_pages(page);
492 		/*
493 		 * We use the irq-unsafe __mod_zone_page_state because this
494 		 * counter is not modified from interrupt context, and the pte
495 		 * lock is held(spinlock), which implies preemption disabled.
496 		 */
497 		__mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
498 		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
499 	}
500 	lru_cache_add(page);
501 }
502 
503 /*
504  * If the page can not be invalidated, it is moved to the
505  * inactive list to speed up its reclaim.  It is moved to the
506  * head of the list, rather than the tail, to give the flusher
507  * threads some time to write it out, as this is much more
508  * effective than the single-page writeout from reclaim.
509  *
510  * If the page isn't page_mapped and dirty/writeback, the page
511  * could reclaim asap using PG_reclaim.
512  *
513  * 1. active, mapped page -> none
514  * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
515  * 3. inactive, mapped page -> none
516  * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
517  * 5. inactive, clean -> inactive, tail
518  * 6. Others -> none
519  *
520  * In 4, why it moves inactive's head, the VM expects the page would
521  * be write it out by flusher threads as this is much more effective
522  * than the single-page writeout from reclaim.
523  */
524 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
525 {
526 	bool active = PageActive(page);
527 	int nr_pages = thp_nr_pages(page);
528 
529 	if (PageUnevictable(page))
530 		return;
531 
532 	/* Some processes are using the page */
533 	if (page_mapped(page))
534 		return;
535 
536 	del_page_from_lru_list(page, lruvec);
537 	ClearPageActive(page);
538 	ClearPageReferenced(page);
539 
540 	if (PageWriteback(page) || PageDirty(page)) {
541 		/*
542 		 * PG_reclaim could be raced with end_page_writeback
543 		 * It can make readahead confusing.  But race window
544 		 * is _really_ small and  it's non-critical problem.
545 		 */
546 		add_page_to_lru_list(page, lruvec);
547 		SetPageReclaim(page);
548 	} else {
549 		/*
550 		 * The page's writeback ends up during pagevec
551 		 * We move that page into tail of inactive.
552 		 */
553 		add_page_to_lru_list_tail(page, lruvec);
554 		__count_vm_events(PGROTATED, nr_pages);
555 	}
556 
557 	if (active) {
558 		__count_vm_events(PGDEACTIVATE, nr_pages);
559 		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
560 				     nr_pages);
561 	}
562 }
563 
564 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
565 {
566 	if (PageActive(page) && !PageUnevictable(page)) {
567 		int nr_pages = thp_nr_pages(page);
568 
569 		del_page_from_lru_list(page, lruvec);
570 		ClearPageActive(page);
571 		ClearPageReferenced(page);
572 		add_page_to_lru_list(page, lruvec);
573 
574 		__count_vm_events(PGDEACTIVATE, nr_pages);
575 		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
576 				     nr_pages);
577 	}
578 }
579 
580 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
581 {
582 	if (PageAnon(page) && PageSwapBacked(page) &&
583 	    !PageSwapCache(page) && !PageUnevictable(page)) {
584 		int nr_pages = thp_nr_pages(page);
585 
586 		del_page_from_lru_list(page, lruvec);
587 		ClearPageActive(page);
588 		ClearPageReferenced(page);
589 		/*
590 		 * Lazyfree pages are clean anonymous pages.  They have
591 		 * PG_swapbacked flag cleared, to distinguish them from normal
592 		 * anonymous pages
593 		 */
594 		ClearPageSwapBacked(page);
595 		add_page_to_lru_list(page, lruvec);
596 
597 		__count_vm_events(PGLAZYFREE, nr_pages);
598 		__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
599 				     nr_pages);
600 	}
601 }
602 
603 /*
604  * Drain pages out of the cpu's pagevecs.
605  * Either "cpu" is the current CPU, and preemption has already been
606  * disabled; or "cpu" is being hot-unplugged, and is already dead.
607  */
608 void lru_add_drain_cpu(int cpu)
609 {
610 	struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
611 
612 	if (pagevec_count(pvec))
613 		__pagevec_lru_add(pvec);
614 
615 	pvec = &per_cpu(lru_rotate.pvec, cpu);
616 	/* Disabling interrupts below acts as a compiler barrier. */
617 	if (data_race(pagevec_count(pvec))) {
618 		unsigned long flags;
619 
620 		/* No harm done if a racing interrupt already did this */
621 		local_lock_irqsave(&lru_rotate.lock, flags);
622 		pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
623 		local_unlock_irqrestore(&lru_rotate.lock, flags);
624 	}
625 
626 	pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
627 	if (pagevec_count(pvec))
628 		pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
629 
630 	pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
631 	if (pagevec_count(pvec))
632 		pagevec_lru_move_fn(pvec, lru_deactivate_fn);
633 
634 	pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
635 	if (pagevec_count(pvec))
636 		pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
637 
638 	activate_page_drain(cpu);
639 }
640 
641 /**
642  * deactivate_file_page - forcefully deactivate a file page
643  * @page: page to deactivate
644  *
645  * This function hints the VM that @page is a good reclaim candidate,
646  * for example if its invalidation fails due to the page being dirty
647  * or under writeback.
648  */
649 void deactivate_file_page(struct page *page)
650 {
651 	/*
652 	 * In a workload with many unevictable page such as mprotect,
653 	 * unevictable page deactivation for accelerating reclaim is pointless.
654 	 */
655 	if (PageUnevictable(page))
656 		return;
657 
658 	if (likely(get_page_unless_zero(page))) {
659 		struct pagevec *pvec;
660 
661 		local_lock(&lru_pvecs.lock);
662 		pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
663 
664 		if (pagevec_add_and_need_flush(pvec, page))
665 			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
666 		local_unlock(&lru_pvecs.lock);
667 	}
668 }
669 
670 /*
671  * deactivate_page - deactivate a page
672  * @page: page to deactivate
673  *
674  * deactivate_page() moves @page to the inactive list if @page was on the active
675  * list and was not an unevictable page.  This is done to accelerate the reclaim
676  * of @page.
677  */
678 void deactivate_page(struct page *page)
679 {
680 	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
681 		struct pagevec *pvec;
682 
683 		local_lock(&lru_pvecs.lock);
684 		pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
685 		get_page(page);
686 		if (pagevec_add_and_need_flush(pvec, page))
687 			pagevec_lru_move_fn(pvec, lru_deactivate_fn);
688 		local_unlock(&lru_pvecs.lock);
689 	}
690 }
691 
692 /**
693  * mark_page_lazyfree - make an anon page lazyfree
694  * @page: page to deactivate
695  *
696  * mark_page_lazyfree() moves @page to the inactive file list.
697  * This is done to accelerate the reclaim of @page.
698  */
699 void mark_page_lazyfree(struct page *page)
700 {
701 	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
702 	    !PageSwapCache(page) && !PageUnevictable(page)) {
703 		struct pagevec *pvec;
704 
705 		local_lock(&lru_pvecs.lock);
706 		pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
707 		get_page(page);
708 		if (pagevec_add_and_need_flush(pvec, page))
709 			pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
710 		local_unlock(&lru_pvecs.lock);
711 	}
712 }
713 
714 void lru_add_drain(void)
715 {
716 	local_lock(&lru_pvecs.lock);
717 	lru_add_drain_cpu(smp_processor_id());
718 	local_unlock(&lru_pvecs.lock);
719 }
720 
721 /*
722  * It's called from per-cpu workqueue context in SMP case so
723  * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
724  * the same cpu. It shouldn't be a problem in !SMP case since
725  * the core is only one and the locks will disable preemption.
726  */
727 static void lru_add_and_bh_lrus_drain(void)
728 {
729 	local_lock(&lru_pvecs.lock);
730 	lru_add_drain_cpu(smp_processor_id());
731 	local_unlock(&lru_pvecs.lock);
732 	invalidate_bh_lrus_cpu();
733 }
734 
735 void lru_add_drain_cpu_zone(struct zone *zone)
736 {
737 	local_lock(&lru_pvecs.lock);
738 	lru_add_drain_cpu(smp_processor_id());
739 	drain_local_pages(zone);
740 	local_unlock(&lru_pvecs.lock);
741 }
742 
743 #ifdef CONFIG_SMP
744 
745 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
746 
747 static void lru_add_drain_per_cpu(struct work_struct *dummy)
748 {
749 	lru_add_and_bh_lrus_drain();
750 }
751 
752 /*
753  * Doesn't need any cpu hotplug locking because we do rely on per-cpu
754  * kworkers being shut down before our page_alloc_cpu_dead callback is
755  * executed on the offlined cpu.
756  * Calling this function with cpu hotplug locks held can actually lead
757  * to obscure indirect dependencies via WQ context.
758  */
759 inline void __lru_add_drain_all(bool force_all_cpus)
760 {
761 	/*
762 	 * lru_drain_gen - Global pages generation number
763 	 *
764 	 * (A) Definition: global lru_drain_gen = x implies that all generations
765 	 *     0 < n <= x are already *scheduled* for draining.
766 	 *
767 	 * This is an optimization for the highly-contended use case where a
768 	 * user space workload keeps constantly generating a flow of pages for
769 	 * each CPU.
770 	 */
771 	static unsigned int lru_drain_gen;
772 	static struct cpumask has_work;
773 	static DEFINE_MUTEX(lock);
774 	unsigned cpu, this_gen;
775 
776 	/*
777 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
778 	 * initialized.
779 	 */
780 	if (WARN_ON(!mm_percpu_wq))
781 		return;
782 
783 	/*
784 	 * Guarantee pagevec counter stores visible by this CPU are visible to
785 	 * other CPUs before loading the current drain generation.
786 	 */
787 	smp_mb();
788 
789 	/*
790 	 * (B) Locally cache global LRU draining generation number
791 	 *
792 	 * The read barrier ensures that the counter is loaded before the mutex
793 	 * is taken. It pairs with smp_mb() inside the mutex critical section
794 	 * at (D).
795 	 */
796 	this_gen = smp_load_acquire(&lru_drain_gen);
797 
798 	mutex_lock(&lock);
799 
800 	/*
801 	 * (C) Exit the draining operation if a newer generation, from another
802 	 * lru_add_drain_all(), was already scheduled for draining. Check (A).
803 	 */
804 	if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
805 		goto done;
806 
807 	/*
808 	 * (D) Increment global generation number
809 	 *
810 	 * Pairs with smp_load_acquire() at (B), outside of the critical
811 	 * section. Use a full memory barrier to guarantee that the new global
812 	 * drain generation number is stored before loading pagevec counters.
813 	 *
814 	 * This pairing must be done here, before the for_each_online_cpu loop
815 	 * below which drains the page vectors.
816 	 *
817 	 * Let x, y, and z represent some system CPU numbers, where x < y < z.
818 	 * Assume CPU #z is in the middle of the for_each_online_cpu loop
819 	 * below and has already reached CPU #y's per-cpu data. CPU #x comes
820 	 * along, adds some pages to its per-cpu vectors, then calls
821 	 * lru_add_drain_all().
822 	 *
823 	 * If the paired barrier is done at any later step, e.g. after the
824 	 * loop, CPU #x will just exit at (C) and miss flushing out all of its
825 	 * added pages.
826 	 */
827 	WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
828 	smp_mb();
829 
830 	cpumask_clear(&has_work);
831 	for_each_online_cpu(cpu) {
832 		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
833 
834 		if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
835 		    data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
836 		    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
837 		    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
838 		    pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
839 		    need_activate_page_drain(cpu) ||
840 		    has_bh_in_lru(cpu, NULL)) {
841 			INIT_WORK(work, lru_add_drain_per_cpu);
842 			queue_work_on(cpu, mm_percpu_wq, work);
843 			__cpumask_set_cpu(cpu, &has_work);
844 		}
845 	}
846 
847 	for_each_cpu(cpu, &has_work)
848 		flush_work(&per_cpu(lru_add_drain_work, cpu));
849 
850 done:
851 	mutex_unlock(&lock);
852 }
853 
854 void lru_add_drain_all(void)
855 {
856 	__lru_add_drain_all(false);
857 }
858 #else
859 void lru_add_drain_all(void)
860 {
861 	lru_add_drain();
862 }
863 #endif /* CONFIG_SMP */
864 
865 atomic_t lru_disable_count = ATOMIC_INIT(0);
866 
867 /*
868  * lru_cache_disable() needs to be called before we start compiling
869  * a list of pages to be migrated using isolate_lru_page().
870  * It drains pages on LRU cache and then disable on all cpus until
871  * lru_cache_enable is called.
872  *
873  * Must be paired with a call to lru_cache_enable().
874  */
875 void lru_cache_disable(void)
876 {
877 	atomic_inc(&lru_disable_count);
878 	/*
879 	 * Readers of lru_disable_count are protected by either disabling
880 	 * preemption or rcu_read_lock:
881 	 *
882 	 * preempt_disable, local_irq_disable  [bh_lru_lock()]
883 	 * rcu_read_lock		       [rt_spin_lock CONFIG_PREEMPT_RT]
884 	 * preempt_disable		       [local_lock !CONFIG_PREEMPT_RT]
885 	 *
886 	 * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on
887 	 * preempt_disable() regions of code. So any CPU which sees
888 	 * lru_disable_count = 0 will have exited the critical
889 	 * section when synchronize_rcu() returns.
890 	 */
891 	synchronize_rcu();
892 #ifdef CONFIG_SMP
893 	__lru_add_drain_all(true);
894 #else
895 	lru_add_and_bh_lrus_drain();
896 #endif
897 }
898 
899 /**
900  * release_pages - batched put_page()
901  * @pages: array of pages to release
902  * @nr: number of pages
903  *
904  * Decrement the reference count on all the pages in @pages.  If it
905  * fell to zero, remove the page from the LRU and free it.
906  */
907 void release_pages(struct page **pages, int nr)
908 {
909 	int i;
910 	LIST_HEAD(pages_to_free);
911 	struct lruvec *lruvec = NULL;
912 	unsigned long flags = 0;
913 	unsigned int lock_batch;
914 
915 	for (i = 0; i < nr; i++) {
916 		struct page *page = pages[i];
917 		struct folio *folio = page_folio(page);
918 
919 		/*
920 		 * Make sure the IRQ-safe lock-holding time does not get
921 		 * excessive with a continuous string of pages from the
922 		 * same lruvec. The lock is held only if lruvec != NULL.
923 		 */
924 		if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
925 			unlock_page_lruvec_irqrestore(lruvec, flags);
926 			lruvec = NULL;
927 		}
928 
929 		page = &folio->page;
930 		if (is_huge_zero_page(page))
931 			continue;
932 
933 		if (is_zone_device_page(page)) {
934 			if (lruvec) {
935 				unlock_page_lruvec_irqrestore(lruvec, flags);
936 				lruvec = NULL;
937 			}
938 			/*
939 			 * ZONE_DEVICE pages that return 'false' from
940 			 * page_is_devmap_managed() do not require special
941 			 * processing, and instead, expect a call to
942 			 * put_page_testzero().
943 			 */
944 			if (page_is_devmap_managed(page)) {
945 				put_devmap_managed_page(page);
946 				continue;
947 			}
948 			if (put_page_testzero(page))
949 				put_dev_pagemap(page->pgmap);
950 			continue;
951 		}
952 
953 		if (!put_page_testzero(page))
954 			continue;
955 
956 		if (PageCompound(page)) {
957 			if (lruvec) {
958 				unlock_page_lruvec_irqrestore(lruvec, flags);
959 				lruvec = NULL;
960 			}
961 			__put_compound_page(page);
962 			continue;
963 		}
964 
965 		if (PageLRU(page)) {
966 			struct lruvec *prev_lruvec = lruvec;
967 
968 			lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
969 									&flags);
970 			if (prev_lruvec != lruvec)
971 				lock_batch = 0;
972 
973 			del_page_from_lru_list(page, lruvec);
974 			__clear_page_lru_flags(page);
975 		}
976 
977 		__ClearPageWaiters(page);
978 
979 		list_add(&page->lru, &pages_to_free);
980 	}
981 	if (lruvec)
982 		unlock_page_lruvec_irqrestore(lruvec, flags);
983 
984 	mem_cgroup_uncharge_list(&pages_to_free);
985 	free_unref_page_list(&pages_to_free);
986 }
987 EXPORT_SYMBOL(release_pages);
988 
989 /*
990  * The pages which we're about to release may be in the deferred lru-addition
991  * queues.  That would prevent them from really being freed right now.  That's
992  * OK from a correctness point of view but is inefficient - those pages may be
993  * cache-warm and we want to give them back to the page allocator ASAP.
994  *
995  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
996  * and __pagevec_lru_add_active() call release_pages() directly to avoid
997  * mutual recursion.
998  */
999 void __pagevec_release(struct pagevec *pvec)
1000 {
1001 	if (!pvec->percpu_pvec_drained) {
1002 		lru_add_drain();
1003 		pvec->percpu_pvec_drained = true;
1004 	}
1005 	release_pages(pvec->pages, pagevec_count(pvec));
1006 	pagevec_reinit(pvec);
1007 }
1008 EXPORT_SYMBOL(__pagevec_release);
1009 
1010 static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
1011 {
1012 	int was_unevictable = folio_test_clear_unevictable(folio);
1013 	long nr_pages = folio_nr_pages(folio);
1014 
1015 	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
1016 
1017 	/*
1018 	 * A folio becomes evictable in two ways:
1019 	 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
1020 	 * 2) Before acquiring LRU lock to put the folio on the correct LRU
1021 	 *    and then
1022 	 *   a) do PageLRU check with lock [check_move_unevictable_pages]
1023 	 *   b) do PageLRU check before lock [clear_page_mlock]
1024 	 *
1025 	 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
1026 	 * following strict ordering:
1027 	 *
1028 	 * #0: __pagevec_lru_add_fn		#1: clear_page_mlock
1029 	 *
1030 	 * folio_set_lru()			folio_test_clear_mlocked()
1031 	 * smp_mb() // explicit ordering	// above provides strict
1032 	 *					// ordering
1033 	 * folio_test_mlocked()			folio_test_lru()
1034 	 *
1035 	 *
1036 	 * if '#1' does not observe setting of PG_lru by '#0' and
1037 	 * fails isolation, the explicit barrier will make sure that
1038 	 * folio_evictable check will put the folio on the correct
1039 	 * LRU. Without smp_mb(), folio_set_lru() can be reordered
1040 	 * after folio_test_mlocked() check and can make '#1' fail the
1041 	 * isolation of the folio whose mlocked bit is cleared (#0 is
1042 	 * also looking at the same folio) and the evictable folio will
1043 	 * be stranded on an unevictable LRU.
1044 	 */
1045 	folio_set_lru(folio);
1046 	smp_mb__after_atomic();
1047 
1048 	if (folio_evictable(folio)) {
1049 		if (was_unevictable)
1050 			__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
1051 	} else {
1052 		folio_clear_active(folio);
1053 		folio_set_unevictable(folio);
1054 		if (!was_unevictable)
1055 			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
1056 	}
1057 
1058 	lruvec_add_folio(lruvec, folio);
1059 	trace_mm_lru_insertion(folio);
1060 }
1061 
1062 /*
1063  * Add the passed pages to the LRU, then drop the caller's refcount
1064  * on them.  Reinitialises the caller's pagevec.
1065  */
1066 void __pagevec_lru_add(struct pagevec *pvec)
1067 {
1068 	int i;
1069 	struct lruvec *lruvec = NULL;
1070 	unsigned long flags = 0;
1071 
1072 	for (i = 0; i < pagevec_count(pvec); i++) {
1073 		struct folio *folio = page_folio(pvec->pages[i]);
1074 
1075 		lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
1076 		__pagevec_lru_add_fn(folio, lruvec);
1077 	}
1078 	if (lruvec)
1079 		unlock_page_lruvec_irqrestore(lruvec, flags);
1080 	release_pages(pvec->pages, pvec->nr);
1081 	pagevec_reinit(pvec);
1082 }
1083 
1084 /**
1085  * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
1086  * @fbatch: The batch to prune
1087  *
1088  * find_get_entries() fills a batch with both folios and shadow/swap/DAX
1089  * entries.  This function prunes all the non-folio entries from @fbatch
1090  * without leaving holes, so that it can be passed on to folio-only batch
1091  * operations.
1092  */
1093 void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1094 {
1095 	unsigned int i, j;
1096 
1097 	for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1098 		struct folio *folio = fbatch->folios[i];
1099 		if (!xa_is_value(folio))
1100 			fbatch->folios[j++] = folio;
1101 	}
1102 	fbatch->nr = j;
1103 }
1104 
1105 /**
1106  * pagevec_lookup_range - gang pagecache lookup
1107  * @pvec:	Where the resulting pages are placed
1108  * @mapping:	The address_space to search
1109  * @start:	The starting page index
1110  * @end:	The final page index
1111  *
1112  * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
1113  * pages in the mapping starting from index @start and upto index @end
1114  * (inclusive).  The pages are placed in @pvec.  pagevec_lookup() takes a
1115  * reference against the pages in @pvec.
1116  *
1117  * The search returns a group of mapping-contiguous pages with ascending
1118  * indexes.  There may be holes in the indices due to not-present pages. We
1119  * also update @start to index the next page for the traversal.
1120  *
1121  * pagevec_lookup_range() returns the number of pages which were found. If this
1122  * number is smaller than PAGEVEC_SIZE, the end of specified range has been
1123  * reached.
1124  */
1125 unsigned pagevec_lookup_range(struct pagevec *pvec,
1126 		struct address_space *mapping, pgoff_t *start, pgoff_t end)
1127 {
1128 	pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1129 					pvec->pages);
1130 	return pagevec_count(pvec);
1131 }
1132 EXPORT_SYMBOL(pagevec_lookup_range);
1133 
1134 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1135 		struct address_space *mapping, pgoff_t *index, pgoff_t end,
1136 		xa_mark_t tag)
1137 {
1138 	pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1139 					PAGEVEC_SIZE, pvec->pages);
1140 	return pagevec_count(pvec);
1141 }
1142 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1143 
1144 /*
1145  * Perform any setup for the swap system
1146  */
1147 void __init swap_setup(void)
1148 {
1149 	unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1150 
1151 	/* Use a smaller cluster for small-memory machines */
1152 	if (megs < 16)
1153 		page_cluster = 2;
1154 	else
1155 		page_cluster = 3;
1156 	/*
1157 	 * Right now other parts of the system means that we
1158 	 * _really_ don't want to cluster much more
1159 	 */
1160 }
1161 
1162 #ifdef CONFIG_DEV_PAGEMAP_OPS
1163 void put_devmap_managed_page(struct page *page)
1164 {
1165 	int count;
1166 
1167 	if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1168 		return;
1169 
1170 	count = page_ref_dec_return(page);
1171 
1172 	/*
1173 	 * devmap page refcounts are 1-based, rather than 0-based: if
1174 	 * refcount is 1, then the page is free and the refcount is
1175 	 * stable because nobody holds a reference on the page.
1176 	 */
1177 	if (count == 1)
1178 		free_devmap_managed_page(page);
1179 	else if (!count)
1180 		__put_page(page);
1181 }
1182 EXPORT_SYMBOL(put_devmap_managed_page);
1183 #endif
1184