vmscan.c (bc57e00f5e0b2480ef222c775c49552d3a930db7) vmscan.c (7d3579e8e61937cbba268ea9b218d006b6d64221)
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 37 unchanged lines hidden (view full) ---

46
47#include <linux/swapops.h>
48
49#include "internal.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/vmscan.h>
53
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 37 unchanged lines hidden (view full) ---

46
47#include <linux/swapops.h>
48
49#include "internal.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/vmscan.h>
53
54enum lumpy_mode {
55 LUMPY_MODE_NONE,
56 LUMPY_MODE_ASYNC,
57 LUMPY_MODE_SYNC,
58};
59
54struct scan_control {
55 /* Incremented by the number of inactive pages that were scanned */
56 unsigned long nr_scanned;
57
58 /* Number of pages freed so far during a call to shrink_zones() */
59 unsigned long nr_reclaimed;
60
61 /* How many pages shrink_list() should reclaim */

--- 15 unchanged lines hidden (view full) ---

77 int swappiness;
78
79 int order;
80
81 /*
82 * Intend to reclaim enough continuous memory rather than reclaim
83 * enough amount of memory. i.e, mode for high order allocation.
84 */
60struct scan_control {
61 /* Incremented by the number of inactive pages that were scanned */
62 unsigned long nr_scanned;
63
64 /* Number of pages freed so far during a call to shrink_zones() */
65 unsigned long nr_reclaimed;
66
67 /* How many pages shrink_list() should reclaim */

--- 15 unchanged lines hidden (view full) ---

83 int swappiness;
84
85 int order;
86
87 /*
88 * Intend to reclaim enough continuous memory rather than reclaim
89 * enough amount of memory. i.e, mode for high order allocation.
90 */
85 bool lumpy_reclaim_mode;
91 enum lumpy_mode lumpy_reclaim_mode;
86
87 /* Which cgroup do we reclaim from */
88 struct mem_cgroup *mem_cgroup;
89
90 /*
91 * Nodemask of nodes allowed by the caller. If NULL, all nodes
92 * are scanned.
93 */

--- 166 unchanged lines hidden (view full) ---

260 }
261
262 shrinker->nr += total_scan;
263 }
264 up_read(&shrinker_rwsem);
265 return ret;
266}
267
92
93 /* Which cgroup do we reclaim from */
94 struct mem_cgroup *mem_cgroup;
95
96 /*
97 * Nodemask of nodes allowed by the caller. If NULL, all nodes
98 * are scanned.
99 */

--- 166 unchanged lines hidden (view full) ---

266 }
267
268 shrinker->nr += total_scan;
269 }
270 up_read(&shrinker_rwsem);
271 return ret;
272}
273
274static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
275 bool sync)
276{
277 enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
278
279 /*
280 * Some reclaim have alredy been failed. No worth to try synchronous
281 * lumpy reclaim.
282 */
283 if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
284 return;
285
286 /*
287 * If we need a large contiguous chunk of memory, or have
288 * trouble getting a small set of contiguous pages, we
289 * will reclaim both active and inactive pages.
290 */
291 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
292 sc->lumpy_reclaim_mode = mode;
293 else if (sc->order && priority < DEF_PRIORITY - 2)
294 sc->lumpy_reclaim_mode = mode;
295 else
296 sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
297}
298
299static void disable_lumpy_reclaim_mode(struct scan_control *sc)
300{
301 sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
302}
303
268static inline int is_page_cache_freeable(struct page *page)
269{
270 /*
271 * A freeable page cache page is referenced only by the caller
272 * that isolated the page, the page cache radix tree and
273 * optional buffer heads at page->private.
274 */
275 return page_count(page) - page_has_private(page) == 2;
276}
277
304static inline int is_page_cache_freeable(struct page *page)
305{
306 /*
307 * A freeable page cache page is referenced only by the caller
308 * that isolated the page, the page cache radix tree and
309 * optional buffer heads at page->private.
310 */
311 return page_count(page) - page_has_private(page) == 2;
312}
313
278static int may_write_to_queue(struct backing_dev_info *bdi)
314static int may_write_to_queue(struct backing_dev_info *bdi,
315 struct scan_control *sc)
279{
280 if (current->flags & PF_SWAPWRITE)
281 return 1;
282 if (!bdi_write_congested(bdi))
283 return 1;
284 if (bdi == current->backing_dev_info)
285 return 1;
316{
317 if (current->flags & PF_SWAPWRITE)
318 return 1;
319 if (!bdi_write_congested(bdi))
320 return 1;
321 if (bdi == current->backing_dev_info)
322 return 1;
323
324 /* lumpy reclaim for hugepage often need a lot of write */
325 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
326 return 1;
286 return 0;
287}
288
289/*
290 * We detected a synchronous write error writing a page out. Probably
291 * -ENOSPC. We need to propagate that into the address_space for a subsequent
292 * fsync(), msync() or close().
293 *

--- 8 unchanged lines hidden (view full) ---

302 struct page *page, int error)
303{
304 lock_page_nosync(page);
305 if (page_mapping(page) == mapping)
306 mapping_set_error(mapping, error);
307 unlock_page(page);
308}
309
327 return 0;
328}
329
330/*
331 * We detected a synchronous write error writing a page out. Probably
332 * -ENOSPC. We need to propagate that into the address_space for a subsequent
333 * fsync(), msync() or close().
334 *

--- 8 unchanged lines hidden (view full) ---

343 struct page *page, int error)
344{
345 lock_page_nosync(page);
346 if (page_mapping(page) == mapping)
347 mapping_set_error(mapping, error);
348 unlock_page(page);
349}
350
310/* Request for sync pageout. */
311enum pageout_io {
312 PAGEOUT_IO_ASYNC,
313 PAGEOUT_IO_SYNC,
314};
315
316/* possible outcome of pageout() */
317typedef enum {
318 /* failed to write page out, page is locked */
319 PAGE_KEEP,
320 /* move page to the active list, page is locked */
321 PAGE_ACTIVATE,
322 /* page has been sent to the disk successfully, page is unlocked */
323 PAGE_SUCCESS,
324 /* page is clean and locked */
325 PAGE_CLEAN,
326} pageout_t;
327
328/*
329 * pageout is called by shrink_page_list() for each dirty page.
330 * Calls ->writepage().
331 */
332static pageout_t pageout(struct page *page, struct address_space *mapping,
351/* possible outcome of pageout() */
352typedef enum {
353 /* failed to write page out, page is locked */
354 PAGE_KEEP,
355 /* move page to the active list, page is locked */
356 PAGE_ACTIVATE,
357 /* page has been sent to the disk successfully, page is unlocked */
358 PAGE_SUCCESS,
359 /* page is clean and locked */
360 PAGE_CLEAN,
361} pageout_t;
362
363/*
364 * pageout is called by shrink_page_list() for each dirty page.
365 * Calls ->writepage().
366 */
367static pageout_t pageout(struct page *page, struct address_space *mapping,
333 enum pageout_io sync_writeback)
368 struct scan_control *sc)
334{
335 /*
336 * If the page is dirty, only perform writeback if that write
337 * will be non-blocking. To prevent this allocation from being
338 * stalled by pagecache activity. But note that there may be
339 * stalls if we need to run get_block(). We could test
340 * PagePrivate for that.
341 *

--- 19 unchanged lines hidden (view full) ---

361 printk("%s: orphaned page\n", __func__);
362 return PAGE_CLEAN;
363 }
364 }
365 return PAGE_KEEP;
366 }
367 if (mapping->a_ops->writepage == NULL)
368 return PAGE_ACTIVATE;
369{
370 /*
371 * If the page is dirty, only perform writeback if that write
372 * will be non-blocking. To prevent this allocation from being
373 * stalled by pagecache activity. But note that there may be
374 * stalls if we need to run get_block(). We could test
375 * PagePrivate for that.
376 *

--- 19 unchanged lines hidden (view full) ---

396 printk("%s: orphaned page\n", __func__);
397 return PAGE_CLEAN;
398 }
399 }
400 return PAGE_KEEP;
401 }
402 if (mapping->a_ops->writepage == NULL)
403 return PAGE_ACTIVATE;
369 if (!may_write_to_queue(mapping->backing_dev_info))
404 if (!may_write_to_queue(mapping->backing_dev_info, sc)) {
405 disable_lumpy_reclaim_mode(sc);
370 return PAGE_KEEP;
406 return PAGE_KEEP;
407 }
371
372 if (clear_page_dirty_for_io(page)) {
373 int res;
374 struct writeback_control wbc = {
375 .sync_mode = WB_SYNC_NONE,
376 .nr_to_write = SWAP_CLUSTER_MAX,
377 .range_start = 0,
378 .range_end = LLONG_MAX,

--- 9 unchanged lines hidden (view full) ---

388 return PAGE_ACTIVATE;
389 }
390
391 /*
392 * Wait on writeback if requested to. This happens when
393 * direct reclaiming a large contiguous area and the
394 * first attempt to free a range of pages fails.
395 */
408
409 if (clear_page_dirty_for_io(page)) {
410 int res;
411 struct writeback_control wbc = {
412 .sync_mode = WB_SYNC_NONE,
413 .nr_to_write = SWAP_CLUSTER_MAX,
414 .range_start = 0,
415 .range_end = LLONG_MAX,

--- 9 unchanged lines hidden (view full) ---

425 return PAGE_ACTIVATE;
426 }
427
428 /*
429 * Wait on writeback if requested to. This happens when
430 * direct reclaiming a large contiguous area and the
431 * first attempt to free a range of pages fails.
432 */
396 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
433 if (PageWriteback(page) &&
434 sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
397 wait_on_page_writeback(page);
398
399 if (!PageWriteback(page)) {
400 /* synchronous write or broken a_ops? */
401 ClearPageReclaim(page);
402 }
403 trace_mm_vmscan_writepage(page,
435 wait_on_page_writeback(page);
436
437 if (!PageWriteback(page)) {
438 /* synchronous write or broken a_ops? */
439 ClearPageReclaim(page);
440 }
441 trace_mm_vmscan_writepage(page,
404 trace_reclaim_flags(page, sync_writeback));
442 trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
405 inc_zone_page_state(page, NR_VMSCAN_WRITE);
406 return PAGE_SUCCESS;
407 }
408
409 return PAGE_CLEAN;
410}
411
412/*

--- 161 unchanged lines hidden (view full) ---

574{
575 int referenced_ptes, referenced_page;
576 unsigned long vm_flags;
577
578 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
579 referenced_page = TestClearPageReferenced(page);
580
581 /* Lumpy reclaim - ignore references */
443 inc_zone_page_state(page, NR_VMSCAN_WRITE);
444 return PAGE_SUCCESS;
445 }
446
447 return PAGE_CLEAN;
448}
449
450/*

--- 161 unchanged lines hidden (view full) ---

612{
613 int referenced_ptes, referenced_page;
614 unsigned long vm_flags;
615
616 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
617 referenced_page = TestClearPageReferenced(page);
618
619 /* Lumpy reclaim - ignore references */
582 if (sc->lumpy_reclaim_mode)
620 if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
583 return PAGEREF_RECLAIM;
584
585 /*
586 * Mlock lost the isolation race with us. Let try_to_unmap()
587 * move the page to the unevictable list.
588 */
589 if (vm_flags & VM_LOCKED)
590 return PAGEREF_RECLAIM;

--- 47 unchanged lines hidden (view full) ---

638
639 pagevec_free(&freed_pvec);
640}
641
642/*
643 * shrink_page_list() returns the number of reclaimed pages
644 */
645static unsigned long shrink_page_list(struct list_head *page_list,
621 return PAGEREF_RECLAIM;
622
623 /*
624 * Mlock lost the isolation race with us. Let try_to_unmap()
625 * move the page to the unevictable list.
626 */
627 if (vm_flags & VM_LOCKED)
628 return PAGEREF_RECLAIM;

--- 47 unchanged lines hidden (view full) ---

676
677 pagevec_free(&freed_pvec);
678}
679
680/*
681 * shrink_page_list() returns the number of reclaimed pages
682 */
683static unsigned long shrink_page_list(struct list_head *page_list,
646 struct scan_control *sc,
647 enum pageout_io sync_writeback)
684 struct scan_control *sc)
648{
649 LIST_HEAD(ret_pages);
650 LIST_HEAD(free_pages);
651 int pgactivate = 0;
652 unsigned long nr_reclaimed = 0;
653
654 cond_resched();
655

--- 32 unchanged lines hidden (view full) ---

688 /*
689 * Synchronous reclaim is performed in two passes,
690 * first an asynchronous pass over the list to
691 * start parallel writeback, and a second synchronous
692 * pass to wait for the IO to complete. Wait here
693 * for any page for which writeback has already
694 * started.
695 */
685{
686 LIST_HEAD(ret_pages);
687 LIST_HEAD(free_pages);
688 int pgactivate = 0;
689 unsigned long nr_reclaimed = 0;
690
691 cond_resched();
692

--- 32 unchanged lines hidden (view full) ---

725 /*
726 * Synchronous reclaim is performed in two passes,
727 * first an asynchronous pass over the list to
728 * start parallel writeback, and a second synchronous
729 * pass to wait for the IO to complete. Wait here
730 * for any page for which writeback has already
731 * started.
732 */
696 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
733 if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
734 may_enter_fs)
697 wait_on_page_writeback(page);
735 wait_on_page_writeback(page);
698 else
699 goto keep_locked;
736 else {
737 unlock_page(page);
738 goto keep_lumpy;
739 }
700 }
701
702 references = page_check_references(page, sc);
703 switch (references) {
704 case PAGEREF_ACTIVATE:
705 goto activate_locked;
706 case PAGEREF_KEEP:
707 goto keep_locked;

--- 37 unchanged lines hidden (view full) ---

745 if (references == PAGEREF_RECLAIM_CLEAN)
746 goto keep_locked;
747 if (!may_enter_fs)
748 goto keep_locked;
749 if (!sc->may_writepage)
750 goto keep_locked;
751
752 /* Page is dirty, try to write it out here */
740 }
741
742 references = page_check_references(page, sc);
743 switch (references) {
744 case PAGEREF_ACTIVATE:
745 goto activate_locked;
746 case PAGEREF_KEEP:
747 goto keep_locked;

--- 37 unchanged lines hidden (view full) ---

785 if (references == PAGEREF_RECLAIM_CLEAN)
786 goto keep_locked;
787 if (!may_enter_fs)
788 goto keep_locked;
789 if (!sc->may_writepage)
790 goto keep_locked;
791
792 /* Page is dirty, try to write it out here */
753 switch (pageout(page, mapping, sync_writeback)) {
793 switch (pageout(page, mapping, sc)) {
754 case PAGE_KEEP:
755 goto keep_locked;
756 case PAGE_ACTIVATE:
757 goto activate_locked;
758 case PAGE_SUCCESS:
794 case PAGE_KEEP:
795 goto keep_locked;
796 case PAGE_ACTIVATE:
797 goto activate_locked;
798 case PAGE_SUCCESS:
759 if (PageWriteback(page) || PageDirty(page))
799 if (PageWriteback(page))
800 goto keep_lumpy;
801 if (PageDirty(page))
760 goto keep;
802 goto keep;
803
761 /*
762 * A synchronous write - probably a ramdisk. Go
763 * ahead and try to reclaim the page.
764 */
765 if (!trylock_page(page))
766 goto keep;
767 if (PageDirty(page) || PageWriteback(page))
768 goto keep_locked;

--- 66 unchanged lines hidden (view full) ---

835 list_add(&page->lru, &free_pages);
836 continue;
837
838cull_mlocked:
839 if (PageSwapCache(page))
840 try_to_free_swap(page);
841 unlock_page(page);
842 putback_lru_page(page);
804 /*
805 * A synchronous write - probably a ramdisk. Go
806 * ahead and try to reclaim the page.
807 */
808 if (!trylock_page(page))
809 goto keep;
810 if (PageDirty(page) || PageWriteback(page))
811 goto keep_locked;

--- 66 unchanged lines hidden (view full) ---

878 list_add(&page->lru, &free_pages);
879 continue;
880
881cull_mlocked:
882 if (PageSwapCache(page))
883 try_to_free_swap(page);
884 unlock_page(page);
885 putback_lru_page(page);
886 disable_lumpy_reclaim_mode(sc);
843 continue;
844
845activate_locked:
846 /* Not a candidate for swapping, so reclaim swap space. */
847 if (PageSwapCache(page) && vm_swap_full())
848 try_to_free_swap(page);
849 VM_BUG_ON(PageActive(page));
850 SetPageActive(page);
851 pgactivate++;
852keep_locked:
853 unlock_page(page);
854keep:
887 continue;
888
889activate_locked:
890 /* Not a candidate for swapping, so reclaim swap space. */
891 if (PageSwapCache(page) && vm_swap_full())
892 try_to_free_swap(page);
893 VM_BUG_ON(PageActive(page));
894 SetPageActive(page);
895 pgactivate++;
896keep_locked:
897 unlock_page(page);
898keep:
899 disable_lumpy_reclaim_mode(sc);
900keep_lumpy:
855 list_add(&page->lru, &ret_pages);
856 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
857 }
858
859 free_page_list(&free_pages);
860
861 list_splice(&ret_pages, page_list);
862 count_vm_events(PGACTIVATE, pgactivate);

--- 384 unchanged lines hidden (view full) ---

1247{
1248 int lumpy_stall_priority;
1249
1250 /* kswapd should not stall on sync IO */
1251 if (current_is_kswapd())
1252 return false;
1253
1254 /* Only stall on lumpy reclaim */
901 list_add(&page->lru, &ret_pages);
902 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
903 }
904
905 free_page_list(&free_pages);
906
907 list_splice(&ret_pages, page_list);
908 count_vm_events(PGACTIVATE, pgactivate);

--- 384 unchanged lines hidden (view full) ---

1293{
1294 int lumpy_stall_priority;
1295
1296 /* kswapd should not stall on sync IO */
1297 if (current_is_kswapd())
1298 return false;
1299
1300 /* Only stall on lumpy reclaim */
1255 if (!sc->lumpy_reclaim_mode)
1301 if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
1256 return false;
1257
1258 /* If we have relaimed everything on the isolated list, no stall */
1259 if (nr_freed == nr_taken)
1260 return false;
1261
1262 /*
1263 * For high-order allocations, there are two stall thresholds.

--- 28 unchanged lines hidden (view full) ---

1292 while (unlikely(too_many_isolated(zone, file, sc))) {
1293 congestion_wait(BLK_RW_ASYNC, HZ/10);
1294
1295 /* We are about to die and free our memory. Return now. */
1296 if (fatal_signal_pending(current))
1297 return SWAP_CLUSTER_MAX;
1298 }
1299
1302 return false;
1303
1304 /* If we have relaimed everything on the isolated list, no stall */
1305 if (nr_freed == nr_taken)
1306 return false;
1307
1308 /*
1309 * For high-order allocations, there are two stall thresholds.

--- 28 unchanged lines hidden (view full) ---

1338 while (unlikely(too_many_isolated(zone, file, sc))) {
1339 congestion_wait(BLK_RW_ASYNC, HZ/10);
1340
1341 /* We are about to die and free our memory. Return now. */
1342 if (fatal_signal_pending(current))
1343 return SWAP_CLUSTER_MAX;
1344 }
1345
1300
1346 set_lumpy_reclaim_mode(priority, sc, false);
1301 lru_add_drain();
1302 spin_lock_irq(&zone->lru_lock);
1303
1304 if (scanning_global_lru(sc)) {
1305 nr_taken = isolate_pages_global(nr_to_scan,
1306 &page_list, &nr_scanned, sc->order,
1347 lru_add_drain();
1348 spin_lock_irq(&zone->lru_lock);
1349
1350 if (scanning_global_lru(sc)) {
1351 nr_taken = isolate_pages_global(nr_to_scan,
1352 &page_list, &nr_scanned, sc->order,
1307 sc->lumpy_reclaim_mode ?
1308 ISOLATE_BOTH : ISOLATE_INACTIVE,
1353 sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
1354 ISOLATE_INACTIVE : ISOLATE_BOTH,
1309 zone, 0, file);
1310 zone->pages_scanned += nr_scanned;
1311 if (current_is_kswapd())
1312 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1313 nr_scanned);
1314 else
1315 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1316 nr_scanned);
1317 } else {
1318 nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1319 &page_list, &nr_scanned, sc->order,
1355 zone, 0, file);
1356 zone->pages_scanned += nr_scanned;
1357 if (current_is_kswapd())
1358 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1359 nr_scanned);
1360 else
1361 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1362 nr_scanned);
1363 } else {
1364 nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1365 &page_list, &nr_scanned, sc->order,
1320 sc->lumpy_reclaim_mode ?
1321 ISOLATE_BOTH : ISOLATE_INACTIVE,
1366 sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
1367 ISOLATE_INACTIVE : ISOLATE_BOTH,
1322 zone, sc->mem_cgroup,
1323 0, file);
1324 /*
1325 * mem_cgroup_isolate_pages() keeps track of
1326 * scanned pages on its own.
1327 */
1328 }
1329
1330 if (nr_taken == 0) {
1331 spin_unlock_irq(&zone->lru_lock);
1332 return 0;
1333 }
1334
1335 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1336
1337 spin_unlock_irq(&zone->lru_lock);
1338
1368 zone, sc->mem_cgroup,
1369 0, file);
1370 /*
1371 * mem_cgroup_isolate_pages() keeps track of
1372 * scanned pages on its own.
1373 */
1374 }
1375
1376 if (nr_taken == 0) {
1377 spin_unlock_irq(&zone->lru_lock);
1378 return 0;
1379 }
1380
1381 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1382
1383 spin_unlock_irq(&zone->lru_lock);
1384
1339 nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1385 nr_reclaimed = shrink_page_list(&page_list, sc);
1340
1341 /* Check if we should syncronously wait for writeback */
1342 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1343 /*
1344 * The attempt at page out may have made some
1345 * of the pages active, mark them inactive again.
1346 */
1347 nr_active = clear_active_flags(&page_list, NULL);
1348 count_vm_events(PGDEACTIVATE, nr_active);
1349
1386
1387 /* Check if we should syncronously wait for writeback */
1388 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1389 /*
1390 * The attempt at page out may have made some
1391 * of the pages active, mark them inactive again.
1392 */
1393 nr_active = clear_active_flags(&page_list, NULL);
1394 count_vm_events(PGDEACTIVATE, nr_active);
1395
1350 nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
1396 set_lumpy_reclaim_mode(priority, sc, true);
1397 nr_reclaimed += shrink_page_list(&page_list, sc);
1351 }
1352
1353 local_irq_disable();
1354 if (current_is_kswapd())
1355 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1356 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1357
1358 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);

--- 375 unchanged lines hidden (view full) ---

1734 scan >>= priority;
1735 scan = div64_u64(scan * fraction[file], denominator);
1736 }
1737 nr[l] = nr_scan_try_batch(scan,
1738 &reclaim_stat->nr_saved_scan[l]);
1739 }
1740}
1741
1398 }
1399
1400 local_irq_disable();
1401 if (current_is_kswapd())
1402 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1403 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1404
1405 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);

--- 375 unchanged lines hidden (view full) ---

1781 scan >>= priority;
1782 scan = div64_u64(scan * fraction[file], denominator);
1783 }
1784 nr[l] = nr_scan_try_batch(scan,
1785 &reclaim_stat->nr_saved_scan[l]);
1786 }
1787}
1788
1742static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
1743{
1744 /*
1745 * If we need a large contiguous chunk of memory, or have
1746 * trouble getting a small set of contiguous pages, we
1747 * will reclaim both active and inactive pages.
1748 */
1749 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1750 sc->lumpy_reclaim_mode = 1;
1751 else if (sc->order && priority < DEF_PRIORITY - 2)
1752 sc->lumpy_reclaim_mode = 1;
1753 else
1754 sc->lumpy_reclaim_mode = 0;
1755}
1756
1757/*
1758 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1759 */
1760static void shrink_zone(int priority, struct zone *zone,
1761 struct scan_control *sc)
1762{
1763 unsigned long nr[NR_LRU_LISTS];
1764 unsigned long nr_to_scan;
1765 enum lru_list l;
1766 unsigned long nr_reclaimed = sc->nr_reclaimed;
1767 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1768
1769 get_scan_count(zone, sc, nr, priority);
1770
1789/*
1790 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1791 */
1792static void shrink_zone(int priority, struct zone *zone,
1793 struct scan_control *sc)
1794{
1795 unsigned long nr[NR_LRU_LISTS];
1796 unsigned long nr_to_scan;
1797 enum lru_list l;
1798 unsigned long nr_reclaimed = sc->nr_reclaimed;
1799 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1800
1801 get_scan_count(zone, sc, nr, priority);
1802
1771 set_lumpy_reclaim_mode(priority, sc);
1772
1773 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1774 nr[LRU_INACTIVE_FILE]) {
1775 for_each_evictable_lru(l) {
1776 if (nr[l]) {
1777 nr_to_scan = min_t(unsigned long,
1778 nr[l], SWAP_CLUSTER_MAX);
1779 nr[l] -= nr_to_scan;
1780

--- 1265 unchanged lines hidden ---
1803 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1804 nr[LRU_INACTIVE_FILE]) {
1805 for_each_evictable_lru(l) {
1806 if (nr[l]) {
1807 nr_to_scan = min_t(unsigned long,
1808 nr[l], SWAP_CLUSTER_MAX);
1809 nr[l] -= nr_to_scan;
1810

--- 1265 unchanged lines hidden ---