vmscan.c (4f98a2fee8acdb4ac84545df98cccecfd130f8db) | vmscan.c (556adecba110bf5f1db6c6b56416cfab5bcab698) |
---|---|
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed --- 1076 unchanged lines hidden (view full) --- 1085 } 1086 1087 if (file) 1088 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1089 else 1090 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); 1091 spin_unlock_irq(&zone->lru_lock); 1092 | 1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed --- 1076 unchanged lines hidden (view full) --- 1085 } 1086 1087 if (file) 1088 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1089 else 1090 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); 1091 spin_unlock_irq(&zone->lru_lock); 1092 |
1093 pgmoved = 0; |
|
1093 while (!list_empty(&l_hold)) { 1094 cond_resched(); 1095 page = lru_to_page(&l_hold); 1096 list_del(&page->lru); 1097 list_add(&page->lru, &l_inactive); 1098 } 1099 1100 /* | 1094 while (!list_empty(&l_hold)) { 1095 cond_resched(); 1096 page = lru_to_page(&l_hold); 1097 list_del(&page->lru); 1098 list_add(&page->lru, &l_inactive); 1099 } 1100 1101 /* |
1102 * Count the referenced pages as rotated, even when they are moved 1103 * to the inactive list. This helps balance scan pressure between 1104 * file and anonymous pages in get_scan_ratio. 1105 */ 1106 zone->recent_rotated[!!file] += pgmoved; 1107 1108 /* |
|
1101 * Now put the pages back on the appropriate [file or anon] inactive 1102 * and active lists. 1103 */ 1104 pagevec_init(&pvec, 1); 1105 pgmoved = 0; 1106 lru = LRU_BASE + file * LRU_FILE; 1107 spin_lock_irq(&zone->lru_lock); 1108 while (!list_empty(&l_inactive)) { --- 44 unchanged lines hidden (view full) --- 1153 spin_unlock_irq(&zone->lru_lock); 1154 if (vm_swap_full()) 1155 pagevec_swap_free(&pvec); 1156 __pagevec_release(&pvec); 1157 spin_lock_irq(&zone->lru_lock); 1158 } 1159 } 1160 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); | 1109 * Now put the pages back on the appropriate [file or anon] inactive 1110 * and active lists. 1111 */ 1112 pagevec_init(&pvec, 1); 1113 pgmoved = 0; 1114 lru = LRU_BASE + file * LRU_FILE; 1115 spin_lock_irq(&zone->lru_lock); 1116 while (!list_empty(&l_inactive)) { --- 44 unchanged lines hidden (view full) --- 1161 spin_unlock_irq(&zone->lru_lock); 1162 if (vm_swap_full()) 1163 pagevec_swap_free(&pvec); 1164 __pagevec_release(&pvec); 1165 spin_lock_irq(&zone->lru_lock); 1166 } 1167 } 1168 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); |
1161 zone->recent_rotated[!!file] += pgmoved; | |
1162 1163 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1164 __count_vm_events(PGDEACTIVATE, pgdeactivate); 1165 spin_unlock_irq(&zone->lru_lock); 1166 if (vm_swap_full()) 1167 pagevec_swap_free(&pvec); 1168 1169 pagevec_release(&pvec); 1170} 1171 1172static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1173 struct zone *zone, struct scan_control *sc, int priority) 1174{ 1175 int file = is_file_lru(lru); 1176 | 1169 1170 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1171 __count_vm_events(PGDEACTIVATE, pgdeactivate); 1172 spin_unlock_irq(&zone->lru_lock); 1173 if (vm_swap_full()) 1174 pagevec_swap_free(&pvec); 1175 1176 pagevec_release(&pvec); 1177} 1178 1179static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1180 struct zone *zone, struct scan_control *sc, int priority) 1181{ 1182 int file = is_file_lru(lru); 1183 |
1177 if (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE) { | 1184 if (lru == LRU_ACTIVE_FILE) { |
1178 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1179 return 0; 1180 } | 1185 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1186 return 0; 1187 } |
1188 1189 if (lru == LRU_ACTIVE_ANON && 1190 (!scan_global_lru(sc) || inactive_anon_is_low(zone))) { 1191 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1192 return 0; 1193 } |
|
1181 return shrink_inactive_list(nr_to_scan, zone, sc, file); 1182} 1183 1184/* 1185 * Determine how aggressively the anon and file LRU lists should be 1186 * scanned. The relative value of each set of LRU lists is determined 1187 * by looking at the fraction of the pages scanned we did rotate back 1188 * onto the active list instead of evict. --- 116 unchanged lines hidden (view full) --- 1305 * but because memory controller hits its limit. 1306 * Don't modify zone reclaim related data. 1307 */ 1308 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone, 1309 priority, l); 1310 } 1311 } 1312 | 1194 return shrink_inactive_list(nr_to_scan, zone, sc, file); 1195} 1196 1197/* 1198 * Determine how aggressively the anon and file LRU lists should be 1199 * scanned. The relative value of each set of LRU lists is determined 1200 * by looking at the fraction of the pages scanned we did rotate back 1201 * onto the active list instead of evict. --- 116 unchanged lines hidden (view full) --- 1318 * but because memory controller hits its limit. 1319 * Don't modify zone reclaim related data. 1320 */ 1321 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone, 1322 priority, l); 1323 } 1324 } 1325 |
1313 while (nr[LRU_ACTIVE_ANON] || nr[LRU_INACTIVE_ANON] || 1314 nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { | 1326 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1327 nr[LRU_INACTIVE_FILE]) { |
1315 for_each_lru(l) { 1316 if (nr[l]) { 1317 nr_to_scan = min(nr[l], 1318 (unsigned long)sc->swap_cluster_max); 1319 nr[l] -= nr_to_scan; 1320 1321 nr_reclaimed += shrink_list(l, nr_to_scan, 1322 zone, sc, priority); 1323 } 1324 } 1325 } 1326 | 1328 for_each_lru(l) { 1329 if (nr[l]) { 1330 nr_to_scan = min(nr[l], 1331 (unsigned long)sc->swap_cluster_max); 1332 nr[l] -= nr_to_scan; 1333 1334 nr_reclaimed += shrink_list(l, nr_to_scan, 1335 zone, sc, priority); 1336 } 1337 } 1338 } 1339 |
1340 /* 1341 * Even if we did not try to evict anon pages at all, we want to 1342 * rebalance the anon lru active/inactive ratio. 1343 */ 1344 if (!scan_global_lru(sc) || inactive_anon_is_low(zone)) 1345 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1346 else if (!scan_global_lru(sc)) 1347 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1348 |
|
1327 throttle_vm_writeout(sc->gfp_mask); 1328 return nr_reclaimed; 1329} 1330 1331/* 1332 * This is the direct reclaim path, for page-allocating processes. We only 1333 * try to reclaim pages from zones which will satisfy the caller's allocation 1334 * request. --- 277 unchanged lines hidden (view full) --- 1612 1613 if (!populated_zone(zone)) 1614 continue; 1615 1616 if (zone_is_all_unreclaimable(zone) && 1617 priority != DEF_PRIORITY) 1618 continue; 1619 | 1349 throttle_vm_writeout(sc->gfp_mask); 1350 return nr_reclaimed; 1351} 1352 1353/* 1354 * This is the direct reclaim path, for page-allocating processes. We only 1355 * try to reclaim pages from zones which will satisfy the caller's allocation 1356 * request. --- 277 unchanged lines hidden (view full) --- 1634 1635 if (!populated_zone(zone)) 1636 continue; 1637 1638 if (zone_is_all_unreclaimable(zone) && 1639 priority != DEF_PRIORITY) 1640 continue; 1641 |
1642 /* 1643 * Do some background aging of the anon list, to give 1644 * pages a chance to be referenced before reclaiming. 1645 */ 1646 if (inactive_anon_is_low(zone)) 1647 shrink_active_list(SWAP_CLUSTER_MAX, zone, 1648 &sc, priority, 0); 1649 |
|
1620 if (!zone_watermark_ok(zone, order, zone->pages_high, 1621 0, 0)) { 1622 end_zone = i; 1623 break; 1624 } 1625 } 1626 if (i < 0) 1627 goto out; --- 565 unchanged lines hidden --- | 1650 if (!zone_watermark_ok(zone, order, zone->pages_high, 1651 0, 0)) { 1652 end_zone = i; 1653 break; 1654 } 1655 } 1656 if (i < 0) 1657 goto out; --- 565 unchanged lines hidden --- |