vmscan.c (f0958906cd2bf3730cd7938b8af80a1c23e8ac06) | vmscan.c (fd538803731e50367b7c59ce4ad3454426a3d671) |
---|---|
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed --- 220 unchanged lines hidden (view full) --- 229} 230 231bool pgdat_reclaimable(struct pglist_data *pgdat) 232{ 233 return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) < 234 pgdat_reclaimable_pages(pgdat) * 6; 235} 236 | 1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed --- 220 unchanged lines hidden (view full) --- 229} 230 231bool pgdat_reclaimable(struct pglist_data *pgdat) 232{ 233 return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) < 234 pgdat_reclaimable_pages(pgdat) * 6; 235} 236 |
237unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru) | 237/** 238 * lruvec_lru_size - Returns the number of pages on the given LRU list. 239 * @lruvec: lru vector 240 * @lru: lru to use 241 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list) 242 */ 243unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) |
238{ | 244{ |
245 unsigned long lru_size; 246 int zid; 247 |
|
239 if (!mem_cgroup_disabled()) | 248 if (!mem_cgroup_disabled()) |
240 return mem_cgroup_get_lru_size(lruvec, lru); | 249 lru_size = mem_cgroup_get_lru_size(lruvec, lru); 250 else 251 lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); |
241 | 252 |
242 return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); 243} | 253 for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) { 254 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; 255 unsigned long size; |
244 | 256 |
245unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, 246 int zone_idx) 247{ 248 if (!mem_cgroup_disabled()) 249 return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx); | 257 if (!managed_zone(zone)) 258 continue; |
250 | 259 |
251 return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx], 252 NR_ZONE_LRU_BASE + lru); | 260 if (!mem_cgroup_disabled()) 261 size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); 262 else 263 size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid], 264 NR_ZONE_LRU_BASE + lru); 265 lru_size -= min(size, lru_size); 266 } 267 268 return lru_size; 269 |
253} 254 255/* 256 * Add a shrinker callback to be called from the vm. 257 */ 258int register_shrinker(struct shrinker *shrinker) 259{ 260 size_t size = sizeof(*shrinker->nr_deferred); --- 1783 unchanged lines hidden (view full) --- 2044 * 100GB 31 3GB 2045 * 1TB 101 10GB 2046 * 10TB 320 32GB 2047 */ 2048static bool inactive_list_is_low(struct lruvec *lruvec, bool file, 2049 struct scan_control *sc, bool trace) 2050{ 2051 unsigned long inactive_ratio; | 270} 271 272/* 273 * Add a shrinker callback to be called from the vm. 274 */ 275int register_shrinker(struct shrinker *shrinker) 276{ 277 size_t size = sizeof(*shrinker->nr_deferred); --- 1783 unchanged lines hidden (view full) --- 2061 * 100GB 31 3GB 2062 * 1TB 101 10GB 2063 * 10TB 320 32GB 2064 */ 2065static bool inactive_list_is_low(struct lruvec *lruvec, bool file, 2066 struct scan_control *sc, bool trace) 2067{ 2068 unsigned long inactive_ratio; |
2052 unsigned long total_inactive, inactive; 2053 unsigned long total_active, active; | 2069 unsigned long inactive, active; 2070 enum lru_list inactive_lru = file * LRU_FILE; 2071 enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE; |
2054 unsigned long gb; | 2072 unsigned long gb; |
2055 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 2056 int zid; | |
2057 2058 /* 2059 * If we don't have swap space, anonymous page deactivation 2060 * is pointless. 2061 */ 2062 if (!file && !total_swap_pages) 2063 return false; 2064 | 2073 2074 /* 2075 * If we don't have swap space, anonymous page deactivation 2076 * is pointless. 2077 */ 2078 if (!file && !total_swap_pages) 2079 return false; 2080 |
2065 total_inactive = inactive = lruvec_lru_size(lruvec, file * LRU_FILE); 2066 total_active = active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE); | 2081 inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx); 2082 active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx); |
2067 | 2083 |
2068 /* 2069 * For zone-constrained allocations, it is necessary to check if 2070 * deactivations are required for lowmem to be reclaimed. This 2071 * calculates the inactive/active pages available in eligible zones. 2072 */ 2073 for (zid = sc->reclaim_idx + 1; zid < MAX_NR_ZONES; zid++) { 2074 struct zone *zone = &pgdat->node_zones[zid]; 2075 unsigned long inactive_zone, active_zone; 2076 2077 if (!managed_zone(zone)) 2078 continue; 2079 2080 inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid); 2081 active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid); 2082 2083 inactive -= min(inactive, inactive_zone); 2084 active -= min(active, active_zone); 2085 } 2086 | |
2087 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2088 if (gb) 2089 inactive_ratio = int_sqrt(10 * gb); 2090 else 2091 inactive_ratio = 1; 2092 2093 if (trace) | 2084 gb = (inactive + active) >> (30 - PAGE_SHIFT); 2085 if (gb) 2086 inactive_ratio = int_sqrt(10 * gb); 2087 else 2088 inactive_ratio = 1; 2089 2090 if (trace) |
2094 trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, | 2091 trace_mm_vmscan_inactive_list_is_low(lruvec_pgdat(lruvec)->node_id, |
2095 sc->reclaim_idx, | 2092 sc->reclaim_idx, |
2096 total_inactive, inactive, 2097 total_active, active, inactive_ratio, file); | 2093 lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive, 2094 lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active, 2095 inactive_ratio, file); 2096 |
2098 return inactive * inactive_ratio < active; 2099} 2100 2101static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2102 struct lruvec *lruvec, struct scan_control *sc) 2103{ 2104 if (is_active_lru(lru)) { 2105 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) --- 123 unchanged lines hidden (view full) --- 2229 * inactive list is greater than that of the active list *and* the 2230 * inactive list actually has some pages to scan on this priority, we 2231 * do not reclaim anything from the anonymous working set right now. 2232 * Without the second condition we could end up never scanning an 2233 * lruvec even if it has plenty of old anonymous pages unless the 2234 * system is under heavy pressure. 2235 */ 2236 if (!inactive_list_is_low(lruvec, true, sc, false) && | 2097 return inactive * inactive_ratio < active; 2098} 2099 2100static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 2101 struct lruvec *lruvec, struct scan_control *sc) 2102{ 2103 if (is_active_lru(lru)) { 2104 if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) --- 123 unchanged lines hidden (view full) --- 2228 * inactive list is greater than that of the active list *and* the 2229 * inactive list actually has some pages to scan on this priority, we 2230 * do not reclaim anything from the anonymous working set right now. 2231 * Without the second condition we could end up never scanning an 2232 * lruvec even if it has plenty of old anonymous pages unless the 2233 * system is under heavy pressure. 2234 */ 2235 if (!inactive_list_is_low(lruvec, true, sc, false) && |
2237 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) { | 2236 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES) >> sc->priority) { |
2238 scan_balance = SCAN_FILE; 2239 goto out; 2240 } 2241 2242 scan_balance = SCAN_FRACT; 2243 2244 /* 2245 * With swappiness at 100, anonymous and file have the same priority. --- 9 unchanged lines hidden (view full) --- 2255 * 2256 * Because workloads change over time (and to avoid overflow) 2257 * we keep these statistics as a floating average, which ends 2258 * up weighing recent references more than old ones. 2259 * 2260 * anon in [0], file in [1] 2261 */ 2262 | 2237 scan_balance = SCAN_FILE; 2238 goto out; 2239 } 2240 2241 scan_balance = SCAN_FRACT; 2242 2243 /* 2244 * With swappiness at 100, anonymous and file have the same priority. --- 9 unchanged lines hidden (view full) --- 2254 * 2255 * Because workloads change over time (and to avoid overflow) 2256 * we keep these statistics as a floating average, which ends 2257 * up weighing recent references more than old ones. 2258 * 2259 * anon in [0], file in [1] 2260 */ 2261 |
2263 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) + 2264 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON); 2265 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) + 2266 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE); | 2262 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + 2263 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); 2264 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + 2265 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); |
2267 2268 spin_lock_irq(&pgdat->lru_lock); 2269 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 2270 reclaim_stat->recent_scanned[0] /= 2; 2271 reclaim_stat->recent_rotated[0] /= 2; 2272 } 2273 2274 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { --- 21 unchanged lines hidden (view full) --- 2296 /* Only use force_scan on second pass. */ 2297 for (pass = 0; !some_scanned && pass < 2; pass++) { 2298 *lru_pages = 0; 2299 for_each_evictable_lru(lru) { 2300 int file = is_file_lru(lru); 2301 unsigned long size; 2302 unsigned long scan; 2303 | 2266 2267 spin_lock_irq(&pgdat->lru_lock); 2268 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 2269 reclaim_stat->recent_scanned[0] /= 2; 2270 reclaim_stat->recent_rotated[0] /= 2; 2271 } 2272 2273 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { --- 21 unchanged lines hidden (view full) --- 2295 /* Only use force_scan on second pass. */ 2296 for (pass = 0; !some_scanned && pass < 2; pass++) { 2297 *lru_pages = 0; 2298 for_each_evictable_lru(lru) { 2299 int file = is_file_lru(lru); 2300 unsigned long size; 2301 unsigned long scan; 2302 |
2304 size = lruvec_lru_size(lruvec, lru); | 2303 size = lruvec_lru_size(lruvec, lru, MAX_NR_ZONES); |
2305 scan = size >> sc->priority; 2306 2307 if (!scan && pass && force_scan) 2308 scan = min(size, SWAP_CLUSTER_MAX); 2309 2310 switch (scan_balance) { 2311 case SCAN_EQUAL: 2312 /* Scan lists relative to size */ --- 1600 unchanged lines hidden --- | 2304 scan = size >> sc->priority; 2305 2306 if (!scan && pass && force_scan) 2307 scan = min(size, SWAP_CLUSTER_MAX); 2308 2309 switch (scan_balance) { 2310 case SCAN_EQUAL: 2311 /* Scan lists relative to size */ --- 1600 unchanged lines hidden --- |