vmscan.c (af5d440365894b5ca51f29866c1a01496dce52c4) vmscan.c (98879b3b9edc1604f2d1a6686576ef4d08ed3310)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct

--- 1104 unchanged lines hidden (view full) ---

1113 cond_resched();
1114
1115 while (!list_empty(page_list)) {
1116 struct address_space *mapping;
1117 struct page *page;
1118 int may_enter_fs;
1119 enum page_references references = PAGEREF_RECLAIM_CLEAN;
1120 bool dirty, writeback;
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/vmscan.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95, Stephen Tweedie.
8 * kswapd added: 7.1.96 sct

--- 1104 unchanged lines hidden (view full) ---

1113 cond_resched();
1114
1115 while (!list_empty(page_list)) {
1116 struct address_space *mapping;
1117 struct page *page;
1118 int may_enter_fs;
1119 enum page_references references = PAGEREF_RECLAIM_CLEAN;
1120 bool dirty, writeback;
1121 unsigned int nr_pages;
1121
1122 cond_resched();
1123
1124 page = lru_to_page(page_list);
1125 list_del(&page->lru);
1126
1127 if (!trylock_page(page))
1128 goto keep;
1129
1130 VM_BUG_ON_PAGE(PageActive(page), page);
1131
1122
1123 cond_resched();
1124
1125 page = lru_to_page(page_list);
1126 list_del(&page->lru);
1127
1128 if (!trylock_page(page))
1129 goto keep;
1130
1131 VM_BUG_ON_PAGE(PageActive(page), page);
1132
1132 sc->nr_scanned++;
1133 nr_pages = 1 << compound_order(page);
1133
1134
1135 /* Account the number of base pages even though THP */
1136 sc->nr_scanned += nr_pages;
1137
1134 if (unlikely(!page_evictable(page)))
1135 goto activate_locked;
1136
1137 if (!sc->may_unmap && page_mapped(page))
1138 goto keep_locked;
1139
1140 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1141 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

--- 103 unchanged lines hidden (view full) ---

1245
1246 if (!force_reclaim)
1247 references = page_check_references(page, sc);
1248
1249 switch (references) {
1250 case PAGEREF_ACTIVATE:
1251 goto activate_locked;
1252 case PAGEREF_KEEP:
1138 if (unlikely(!page_evictable(page)))
1139 goto activate_locked;
1140
1141 if (!sc->may_unmap && page_mapped(page))
1142 goto keep_locked;
1143
1144 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1145 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

--- 103 unchanged lines hidden (view full) ---

1249
1250 if (!force_reclaim)
1251 references = page_check_references(page, sc);
1252
1253 switch (references) {
1254 case PAGEREF_ACTIVATE:
1255 goto activate_locked;
1256 case PAGEREF_KEEP:
1253 stat->nr_ref_keep++;
1257 stat->nr_ref_keep += nr_pages;
1254 goto keep_locked;
1255 case PAGEREF_RECLAIM:
1256 case PAGEREF_RECLAIM_CLEAN:
1257 ; /* try to reclaim the page below */
1258 }
1259
1260 /*
1261 * Anonymous process memory has backing store?

--- 15 unchanged lines hidden (view full) ---

1277 */
1278 if (!compound_mapcount(page) &&
1279 split_huge_page_to_list(page,
1280 page_list))
1281 goto activate_locked;
1282 }
1283 if (!add_to_swap(page)) {
1284 if (!PageTransHuge(page))
1258 goto keep_locked;
1259 case PAGEREF_RECLAIM:
1260 case PAGEREF_RECLAIM_CLEAN:
1261 ; /* try to reclaim the page below */
1262 }
1263
1264 /*
1265 * Anonymous process memory has backing store?

--- 15 unchanged lines hidden (view full) ---

1281 */
1282 if (!compound_mapcount(page) &&
1283 split_huge_page_to_list(page,
1284 page_list))
1285 goto activate_locked;
1286 }
1287 if (!add_to_swap(page)) {
1288 if (!PageTransHuge(page))
1285 goto activate_locked;
1289 goto activate_locked_split;
1286 /* Fallback to swap normal pages */
1287 if (split_huge_page_to_list(page,
1288 page_list))
1289 goto activate_locked;
1290#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1291 count_vm_event(THP_SWPOUT_FALLBACK);
1292#endif
1293 if (!add_to_swap(page))
1290 /* Fallback to swap normal pages */
1291 if (split_huge_page_to_list(page,
1292 page_list))
1293 goto activate_locked;
1294#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1295 count_vm_event(THP_SWPOUT_FALLBACK);
1296#endif
1297 if (!add_to_swap(page))
1294 goto activate_locked;
1298 goto activate_locked_split;
1295 }
1296
1297 may_enter_fs = 1;
1298
1299 /* Adding to swap updated mapping */
1300 mapping = page_mapping(page);
1301 }
1302 } else if (unlikely(PageTransHuge(page))) {
1303 /* Split file THP */
1304 if (split_huge_page_to_list(page, page_list))
1305 goto keep_locked;
1306 }
1307
1308 /*
1299 }
1300
1301 may_enter_fs = 1;
1302
1303 /* Adding to swap updated mapping */
1304 mapping = page_mapping(page);
1305 }
1306 } else if (unlikely(PageTransHuge(page))) {
1307 /* Split file THP */
1308 if (split_huge_page_to_list(page, page_list))
1309 goto keep_locked;
1310 }
1311
1312 /*
1313 * THP may get split above, need minus tail pages and update
1314 * nr_pages to avoid accounting tail pages twice.
1315 *
1316 * The tail pages that are added into swap cache successfully
1317 * reach here.
1318 */
1319 if ((nr_pages > 1) && !PageTransHuge(page)) {
1320 sc->nr_scanned -= (nr_pages - 1);
1321 nr_pages = 1;
1322 }
1323
1324 /*
1309 * The page is mapped into the page tables of one or more
1310 * processes. Try to unmap it here.
1311 */
1312 if (page_mapped(page)) {
1313 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1314
1315 if (unlikely(PageTransHuge(page)))
1316 flags |= TTU_SPLIT_HUGE_PMD;
1317 if (!try_to_unmap(page, flags)) {
1325 * The page is mapped into the page tables of one or more
1326 * processes. Try to unmap it here.
1327 */
1328 if (page_mapped(page)) {
1329 enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1330
1331 if (unlikely(PageTransHuge(page)))
1332 flags |= TTU_SPLIT_HUGE_PMD;
1333 if (!try_to_unmap(page, flags)) {
1318 stat->nr_unmap_fail++;
1334 stat->nr_unmap_fail += nr_pages;
1319 goto activate_locked;
1320 }
1321 }
1322
1323 if (PageDirty(page)) {
1324 /*
1325 * Only kswapd can writeback filesystem pages
1326 * to avoid risk of stack overflow. But avoid

--- 110 unchanged lines hidden (view full) ---

1437
1438 count_vm_event(PGLAZYFREED);
1439 count_memcg_page_event(page, PGLAZYFREED);
1440 } else if (!mapping || !__remove_mapping(mapping, page, true))
1441 goto keep_locked;
1442
1443 unlock_page(page);
1444free_it:
1335 goto activate_locked;
1336 }
1337 }
1338
1339 if (PageDirty(page)) {
1340 /*
1341 * Only kswapd can writeback filesystem pages
1342 * to avoid risk of stack overflow. But avoid

--- 110 unchanged lines hidden (view full) ---

1453
1454 count_vm_event(PGLAZYFREED);
1455 count_memcg_page_event(page, PGLAZYFREED);
1456 } else if (!mapping || !__remove_mapping(mapping, page, true))
1457 goto keep_locked;
1458
1459 unlock_page(page);
1460free_it:
1445 nr_reclaimed++;
1461 /*
1462 * THP may get swapped out in a whole, need account
1463 * all base pages.
1464 */
1465 nr_reclaimed += nr_pages;
1446
1447 /*
1448 * Is there need to periodically free_page_list? It would
1449 * appear not as the counts should be low
1450 */
1451 if (unlikely(PageTransHuge(page))) {
1452 mem_cgroup_uncharge(page);
1453 (*get_compound_page_dtor(page))(page);
1454 } else
1455 list_add(&page->lru, &free_pages);
1456 continue;
1457
1466
1467 /*
1468 * Is there need to periodically free_page_list? It would
1469 * appear not as the counts should be low
1470 */
1471 if (unlikely(PageTransHuge(page))) {
1472 mem_cgroup_uncharge(page);
1473 (*get_compound_page_dtor(page))(page);
1474 } else
1475 list_add(&page->lru, &free_pages);
1476 continue;
1477
1478activate_locked_split:
1479 /*
1480 * The tail pages that are failed to add into swap cache
1481 * reach here. Fixup nr_scanned and nr_pages.
1482 */
1483 if (nr_pages > 1) {
1484 sc->nr_scanned -= (nr_pages - 1);
1485 nr_pages = 1;
1486 }
1458activate_locked:
1459 /* Not a candidate for swapping, so reclaim swap space. */
1460 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1461 PageMlocked(page)))
1462 try_to_free_swap(page);
1463 VM_BUG_ON_PAGE(PageActive(page), page);
1464 if (!PageMlocked(page)) {
1465 int type = page_is_file_cache(page);
1466 SetPageActive(page);
1487activate_locked:
1488 /* Not a candidate for swapping, so reclaim swap space. */
1489 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1490 PageMlocked(page)))
1491 try_to_free_swap(page);
1492 VM_BUG_ON_PAGE(PageActive(page), page);
1493 if (!PageMlocked(page)) {
1494 int type = page_is_file_cache(page);
1495 SetPageActive(page);
1467 pgactivate++;
1468 stat->nr_activate[type] += hpage_nr_pages(page);
1496 stat->nr_activate[type] += nr_pages;
1469 count_memcg_page_event(page, PGACTIVATE);
1470 }
1471keep_locked:
1472 unlock_page(page);
1473keep:
1474 list_add(&page->lru, &ret_pages);
1475 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1476 }
1477
1497 count_memcg_page_event(page, PGACTIVATE);
1498 }
1499keep_locked:
1500 unlock_page(page);
1501keep:
1502 list_add(&page->lru, &ret_pages);
1503 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1504 }
1505
1506 pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
1507
1478 mem_cgroup_uncharge_list(&free_pages);
1479 try_to_unmap_flush();
1480 free_unref_page_list(&free_pages);
1481
1482 list_splice(&ret_pages, page_list);
1483 count_vm_events(PGACTIVATE, pgactivate);
1484
1485 return nr_reclaimed;

--- 155 unchanged lines hidden (view full) ---

1641 unsigned long nr_taken = 0;
1642 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1643 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1644 unsigned long skipped = 0;
1645 unsigned long scan, total_scan, nr_pages;
1646 LIST_HEAD(pages_skipped);
1647 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1648
1508 mem_cgroup_uncharge_list(&free_pages);
1509 try_to_unmap_flush();
1510 free_unref_page_list(&free_pages);
1511
1512 list_splice(&ret_pages, page_list);
1513 count_vm_events(PGACTIVATE, pgactivate);
1514
1515 return nr_reclaimed;

--- 155 unchanged lines hidden (view full) ---

1671 unsigned long nr_taken = 0;
1672 unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1673 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1674 unsigned long skipped = 0;
1675 unsigned long scan, total_scan, nr_pages;
1676 LIST_HEAD(pages_skipped);
1677 isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1678
1679 total_scan = 0;
1649 scan = 0;
1680 scan = 0;
1650 for (total_scan = 0;
1651 scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
1652 total_scan++) {
1681 while (scan < nr_to_scan && !list_empty(src)) {
1653 struct page *page;
1654
1655 page = lru_to_page(src);
1656 prefetchw_prev_lru_page(page, src, flags);
1657
1658 VM_BUG_ON_PAGE(!PageLRU(page), page);
1659
1682 struct page *page;
1683
1684 page = lru_to_page(src);
1685 prefetchw_prev_lru_page(page, src, flags);
1686
1687 VM_BUG_ON_PAGE(!PageLRU(page), page);
1688
1689 nr_pages = 1 << compound_order(page);
1690 total_scan += nr_pages;
1691
1660 if (page_zonenum(page) > sc->reclaim_idx) {
1661 list_move(&page->lru, &pages_skipped);
1692 if (page_zonenum(page) > sc->reclaim_idx) {
1693 list_move(&page->lru, &pages_skipped);
1662 nr_skipped[page_zonenum(page)]++;
1694 nr_skipped[page_zonenum(page)] += nr_pages;
1663 continue;
1664 }
1665
1666 /*
1667 * Do not count skipped pages because that makes the function
1668 * return with no isolated pages if the LRU mostly contains
1669 * ineligible pages. This causes the VM to not reclaim any
1670 * pages, triggering a premature OOM.
1695 continue;
1696 }
1697
1698 /*
1699 * Do not count skipped pages because that makes the function
1700 * return with no isolated pages if the LRU mostly contains
1701 * ineligible pages. This causes the VM to not reclaim any
1702 * pages, triggering a premature OOM.
1703 *
1704 * Account all tail pages of THP. This would not cause
1705 * premature OOM since __isolate_lru_page() returns -EBUSY
1706 * only when the page is being freed somewhere else.
1671 */
1707 */
1672 scan++;
1708 scan += nr_pages;
1673 switch (__isolate_lru_page(page, mode)) {
1674 case 0:
1709 switch (__isolate_lru_page(page, mode)) {
1710 case 0:
1675 nr_pages = hpage_nr_pages(page);
1676 nr_taken += nr_pages;
1677 nr_zone_taken[page_zonenum(page)] += nr_pages;
1678 list_move(&page->lru, dst);
1679 break;
1680
1681 case -EBUSY:
1682 /* else it is being freed elsewhere */
1683 list_move(&page->lru, src);

--- 2562 unchanged lines hidden ---
1711 nr_taken += nr_pages;
1712 nr_zone_taken[page_zonenum(page)] += nr_pages;
1713 list_move(&page->lru, dst);
1714 break;
1715
1716 case -EBUSY:
1717 /* else it is being freed elsewhere */
1718 list_move(&page->lru, src);

--- 2562 unchanged lines hidden ---