xref: /openbmc/linux/mm/swap.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  *  linux/mm/swap.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6 
7 /*
8  * This file contains the default values for the operation of the
9  * Linux VM subsystem. Fine-tuning documentation can be found in
10  * Documentation/sysctl/vm.txt.
11  * Started 18.12.91
12  * Swap aging added 23.2.95, Stephen Tweedie.
13  * Buffermem limits added 12.3.98, Rik van Riel.
14  */
15 
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/swap.h>
20 #include <linux/mman.h>
21 #include <linux/pagemap.h>
22 #include <linux/pagevec.h>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/mm_inline.h>
26 #include <linux/buffer_head.h>	/* for try_to_release_page() */
27 #include <linux/percpu_counter.h>
28 #include <linux/percpu.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/backing-dev.h>
32 #include <linux/memcontrol.h>
33 #include <linux/gfp.h>
34 
35 #include "internal.h"
36 
37 /* How many pages do we try to swap or page in/out together? */
38 int page_cluster;
39 
40 static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42 
43 /*
44  * This path almost never happens for VM activity - pages are normally
45  * freed via pagevecs.  But it gets used by networking.
46  */
47 static void __page_cache_release(struct page *page)
48 {
49 	if (PageLRU(page)) {
50 		unsigned long flags;
51 		struct zone *zone = page_zone(page);
52 
53 		spin_lock_irqsave(&zone->lru_lock, flags);
54 		VM_BUG_ON(!PageLRU(page));
55 		__ClearPageLRU(page);
56 		del_page_from_lru(zone, page);
57 		spin_unlock_irqrestore(&zone->lru_lock, flags);
58 	}
59 	free_hot_cold_page(page, 0);
60 }
61 
62 static void put_compound_page(struct page *page)
63 {
64 	page = compound_head(page);
65 	if (put_page_testzero(page)) {
66 		compound_page_dtor *dtor;
67 
68 		dtor = get_compound_page_dtor(page);
69 		(*dtor)(page);
70 	}
71 }
72 
73 void put_page(struct page *page)
74 {
75 	if (unlikely(PageCompound(page)))
76 		put_compound_page(page);
77 	else if (put_page_testzero(page))
78 		__page_cache_release(page);
79 }
80 EXPORT_SYMBOL(put_page);
81 
82 /**
83  * put_pages_list() - release a list of pages
84  * @pages: list of pages threaded on page->lru
85  *
86  * Release a list of pages which are strung together on page.lru.  Currently
87  * used by read_cache_pages() and related error recovery code.
88  */
89 void put_pages_list(struct list_head *pages)
90 {
91 	while (!list_empty(pages)) {
92 		struct page *victim;
93 
94 		victim = list_entry(pages->prev, struct page, lru);
95 		list_del(&victim->lru);
96 		page_cache_release(victim);
97 	}
98 }
99 EXPORT_SYMBOL(put_pages_list);
100 
101 /*
102  * pagevec_move_tail() must be called with IRQ disabled.
103  * Otherwise this may cause nasty races.
104  */
105 static void pagevec_move_tail(struct pagevec *pvec)
106 {
107 	int i;
108 	int pgmoved = 0;
109 	struct zone *zone = NULL;
110 
111 	for (i = 0; i < pagevec_count(pvec); i++) {
112 		struct page *page = pvec->pages[i];
113 		struct zone *pagezone = page_zone(page);
114 
115 		if (pagezone != zone) {
116 			if (zone)
117 				spin_unlock(&zone->lru_lock);
118 			zone = pagezone;
119 			spin_lock(&zone->lru_lock);
120 		}
121 		if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
122 			int lru = page_lru_base_type(page);
123 			list_move_tail(&page->lru, &zone->lru[lru].list);
124 			pgmoved++;
125 		}
126 	}
127 	if (zone)
128 		spin_unlock(&zone->lru_lock);
129 	__count_vm_events(PGROTATED, pgmoved);
130 	release_pages(pvec->pages, pvec->nr, pvec->cold);
131 	pagevec_reinit(pvec);
132 }
133 
134 /*
135  * Writeback is about to end against a page which has been marked for immediate
136  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
137  * inactive list.
138  */
139 void  rotate_reclaimable_page(struct page *page)
140 {
141 	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
142 	    !PageUnevictable(page) && PageLRU(page)) {
143 		struct pagevec *pvec;
144 		unsigned long flags;
145 
146 		page_cache_get(page);
147 		local_irq_save(flags);
148 		pvec = &__get_cpu_var(lru_rotate_pvecs);
149 		if (!pagevec_add(pvec, page))
150 			pagevec_move_tail(pvec);
151 		local_irq_restore(flags);
152 	}
153 }
154 
155 static void update_page_reclaim_stat(struct zone *zone, struct page *page,
156 				     int file, int rotated)
157 {
158 	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
159 	struct zone_reclaim_stat *memcg_reclaim_stat;
160 
161 	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
162 
163 	reclaim_stat->recent_scanned[file]++;
164 	if (rotated)
165 		reclaim_stat->recent_rotated[file]++;
166 
167 	if (!memcg_reclaim_stat)
168 		return;
169 
170 	memcg_reclaim_stat->recent_scanned[file]++;
171 	if (rotated)
172 		memcg_reclaim_stat->recent_rotated[file]++;
173 }
174 
175 /*
176  * FIXME: speed this up?
177  */
178 void activate_page(struct page *page)
179 {
180 	struct zone *zone = page_zone(page);
181 
182 	spin_lock_irq(&zone->lru_lock);
183 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
184 		int file = page_is_file_cache(page);
185 		int lru = page_lru_base_type(page);
186 		del_page_from_lru_list(zone, page, lru);
187 
188 		SetPageActive(page);
189 		lru += LRU_ACTIVE;
190 		add_page_to_lru_list(zone, page, lru);
191 		__count_vm_event(PGACTIVATE);
192 
193 		update_page_reclaim_stat(zone, page, file, 1);
194 	}
195 	spin_unlock_irq(&zone->lru_lock);
196 }
197 
198 /*
199  * Mark a page as having seen activity.
200  *
201  * inactive,unreferenced	->	inactive,referenced
202  * inactive,referenced		->	active,unreferenced
203  * active,unreferenced		->	active,referenced
204  */
205 void mark_page_accessed(struct page *page)
206 {
207 	if (!PageActive(page) && !PageUnevictable(page) &&
208 			PageReferenced(page) && PageLRU(page)) {
209 		activate_page(page);
210 		ClearPageReferenced(page);
211 	} else if (!PageReferenced(page)) {
212 		SetPageReferenced(page);
213 	}
214 }
215 
216 EXPORT_SYMBOL(mark_page_accessed);
217 
218 void __lru_cache_add(struct page *page, enum lru_list lru)
219 {
220 	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
221 
222 	page_cache_get(page);
223 	if (!pagevec_add(pvec, page))
224 		____pagevec_lru_add(pvec, lru);
225 	put_cpu_var(lru_add_pvecs);
226 }
227 EXPORT_SYMBOL(__lru_cache_add);
228 
229 /**
230  * lru_cache_add_lru - add a page to a page list
231  * @page: the page to be added to the LRU.
232  * @lru: the LRU list to which the page is added.
233  */
234 void lru_cache_add_lru(struct page *page, enum lru_list lru)
235 {
236 	if (PageActive(page)) {
237 		VM_BUG_ON(PageUnevictable(page));
238 		ClearPageActive(page);
239 	} else if (PageUnevictable(page)) {
240 		VM_BUG_ON(PageActive(page));
241 		ClearPageUnevictable(page);
242 	}
243 
244 	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
245 	__lru_cache_add(page, lru);
246 }
247 
248 /**
249  * add_page_to_unevictable_list - add a page to the unevictable list
250  * @page:  the page to be added to the unevictable list
251  *
252  * Add page directly to its zone's unevictable list.  To avoid races with
253  * tasks that might be making the page evictable, through eg. munlock,
254  * munmap or exit, while it's not on the lru, we want to add the page
255  * while it's locked or otherwise "invisible" to other tasks.  This is
256  * difficult to do when using the pagevec cache, so bypass that.
257  */
258 void add_page_to_unevictable_list(struct page *page)
259 {
260 	struct zone *zone = page_zone(page);
261 
262 	spin_lock_irq(&zone->lru_lock);
263 	SetPageUnevictable(page);
264 	SetPageLRU(page);
265 	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
266 	spin_unlock_irq(&zone->lru_lock);
267 }
268 
269 /*
270  * Drain pages out of the cpu's pagevecs.
271  * Either "cpu" is the current CPU, and preemption has already been
272  * disabled; or "cpu" is being hot-unplugged, and is already dead.
273  */
274 static void drain_cpu_pagevecs(int cpu)
275 {
276 	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
277 	struct pagevec *pvec;
278 	int lru;
279 
280 	for_each_lru(lru) {
281 		pvec = &pvecs[lru - LRU_BASE];
282 		if (pagevec_count(pvec))
283 			____pagevec_lru_add(pvec, lru);
284 	}
285 
286 	pvec = &per_cpu(lru_rotate_pvecs, cpu);
287 	if (pagevec_count(pvec)) {
288 		unsigned long flags;
289 
290 		/* No harm done if a racing interrupt already did this */
291 		local_irq_save(flags);
292 		pagevec_move_tail(pvec);
293 		local_irq_restore(flags);
294 	}
295 }
296 
297 void lru_add_drain(void)
298 {
299 	drain_cpu_pagevecs(get_cpu());
300 	put_cpu();
301 }
302 
303 static void lru_add_drain_per_cpu(struct work_struct *dummy)
304 {
305 	lru_add_drain();
306 }
307 
308 /*
309  * Returns 0 for success
310  */
311 int lru_add_drain_all(void)
312 {
313 	return schedule_on_each_cpu(lru_add_drain_per_cpu);
314 }
315 
316 /*
317  * Batched page_cache_release().  Decrement the reference count on all the
318  * passed pages.  If it fell to zero then remove the page from the LRU and
319  * free it.
320  *
321  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
322  * for the remainder of the operation.
323  *
324  * The locking in this function is against shrink_inactive_list(): we recheck
325  * the page count inside the lock to see whether shrink_inactive_list()
326  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
327  * will free it.
328  */
329 void release_pages(struct page **pages, int nr, int cold)
330 {
331 	int i;
332 	struct pagevec pages_to_free;
333 	struct zone *zone = NULL;
334 	unsigned long uninitialized_var(flags);
335 
336 	pagevec_init(&pages_to_free, cold);
337 	for (i = 0; i < nr; i++) {
338 		struct page *page = pages[i];
339 
340 		if (unlikely(PageCompound(page))) {
341 			if (zone) {
342 				spin_unlock_irqrestore(&zone->lru_lock, flags);
343 				zone = NULL;
344 			}
345 			put_compound_page(page);
346 			continue;
347 		}
348 
349 		if (!put_page_testzero(page))
350 			continue;
351 
352 		if (PageLRU(page)) {
353 			struct zone *pagezone = page_zone(page);
354 
355 			if (pagezone != zone) {
356 				if (zone)
357 					spin_unlock_irqrestore(&zone->lru_lock,
358 									flags);
359 				zone = pagezone;
360 				spin_lock_irqsave(&zone->lru_lock, flags);
361 			}
362 			VM_BUG_ON(!PageLRU(page));
363 			__ClearPageLRU(page);
364 			del_page_from_lru(zone, page);
365 		}
366 
367 		if (!pagevec_add(&pages_to_free, page)) {
368 			if (zone) {
369 				spin_unlock_irqrestore(&zone->lru_lock, flags);
370 				zone = NULL;
371 			}
372 			__pagevec_free(&pages_to_free);
373 			pagevec_reinit(&pages_to_free);
374   		}
375 	}
376 	if (zone)
377 		spin_unlock_irqrestore(&zone->lru_lock, flags);
378 
379 	pagevec_free(&pages_to_free);
380 }
381 EXPORT_SYMBOL(release_pages);
382 
383 /*
384  * The pages which we're about to release may be in the deferred lru-addition
385  * queues.  That would prevent them from really being freed right now.  That's
386  * OK from a correctness point of view but is inefficient - those pages may be
387  * cache-warm and we want to give them back to the page allocator ASAP.
388  *
389  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
390  * and __pagevec_lru_add_active() call release_pages() directly to avoid
391  * mutual recursion.
392  */
393 void __pagevec_release(struct pagevec *pvec)
394 {
395 	lru_add_drain();
396 	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
397 	pagevec_reinit(pvec);
398 }
399 
400 EXPORT_SYMBOL(__pagevec_release);
401 
402 /*
403  * Add the passed pages to the LRU, then drop the caller's refcount
404  * on them.  Reinitialises the caller's pagevec.
405  */
406 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
407 {
408 	int i;
409 	struct zone *zone = NULL;
410 
411 	VM_BUG_ON(is_unevictable_lru(lru));
412 
413 	for (i = 0; i < pagevec_count(pvec); i++) {
414 		struct page *page = pvec->pages[i];
415 		struct zone *pagezone = page_zone(page);
416 		int file;
417 		int active;
418 
419 		if (pagezone != zone) {
420 			if (zone)
421 				spin_unlock_irq(&zone->lru_lock);
422 			zone = pagezone;
423 			spin_lock_irq(&zone->lru_lock);
424 		}
425 		VM_BUG_ON(PageActive(page));
426 		VM_BUG_ON(PageUnevictable(page));
427 		VM_BUG_ON(PageLRU(page));
428 		SetPageLRU(page);
429 		active = is_active_lru(lru);
430 		file = is_file_lru(lru);
431 		if (active)
432 			SetPageActive(page);
433 		update_page_reclaim_stat(zone, page, file, active);
434 		add_page_to_lru_list(zone, page, lru);
435 	}
436 	if (zone)
437 		spin_unlock_irq(&zone->lru_lock);
438 	release_pages(pvec->pages, pvec->nr, pvec->cold);
439 	pagevec_reinit(pvec);
440 }
441 
442 EXPORT_SYMBOL(____pagevec_lru_add);
443 
444 /*
445  * Try to drop buffers from the pages in a pagevec
446  */
447 void pagevec_strip(struct pagevec *pvec)
448 {
449 	int i;
450 
451 	for (i = 0; i < pagevec_count(pvec); i++) {
452 		struct page *page = pvec->pages[i];
453 
454 		if (page_has_private(page) && trylock_page(page)) {
455 			if (page_has_private(page))
456 				try_to_release_page(page, 0);
457 			unlock_page(page);
458 		}
459 	}
460 }
461 
462 /**
463  * pagevec_lookup - gang pagecache lookup
464  * @pvec:	Where the resulting pages are placed
465  * @mapping:	The address_space to search
466  * @start:	The starting page index
467  * @nr_pages:	The maximum number of pages
468  *
469  * pagevec_lookup() will search for and return a group of up to @nr_pages pages
470  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
471  * reference against the pages in @pvec.
472  *
473  * The search returns a group of mapping-contiguous pages with ascending
474  * indexes.  There may be holes in the indices due to not-present pages.
475  *
476  * pagevec_lookup() returns the number of pages which were found.
477  */
478 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
479 		pgoff_t start, unsigned nr_pages)
480 {
481 	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
482 	return pagevec_count(pvec);
483 }
484 
485 EXPORT_SYMBOL(pagevec_lookup);
486 
487 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
488 		pgoff_t *index, int tag, unsigned nr_pages)
489 {
490 	pvec->nr = find_get_pages_tag(mapping, index, tag,
491 					nr_pages, pvec->pages);
492 	return pagevec_count(pvec);
493 }
494 
495 EXPORT_SYMBOL(pagevec_lookup_tag);
496 
497 /*
498  * Perform any setup for the swap system
499  */
500 void __init swap_setup(void)
501 {
502 	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
503 
504 #ifdef CONFIG_SWAP
505 	bdi_init(swapper_space.backing_dev_info);
506 #endif
507 
508 	/* Use a smaller cluster for small-memory machines */
509 	if (megs < 16)
510 		page_cluster = 2;
511 	else
512 		page_cluster = 3;
513 	/*
514 	 * Right now other parts of the system means that we
515 	 * _really_ don't want to cluster much more
516 	 */
517 }
518