vmscan.c (a09ed5e00084448453c8bada4dcd31e5fbfc2f21) | vmscan.c (1495f230fa7750479c79e3656286b9183d662077) |
---|---|
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed --- 188 unchanged lines hidden (view full) --- 197void unregister_shrinker(struct shrinker *shrinker) 198{ 199 down_write(&shrinker_rwsem); 200 list_del(&shrinker->list); 201 up_write(&shrinker_rwsem); 202} 203EXPORT_SYMBOL(unregister_shrinker); 204 | 1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed --- 188 unchanged lines hidden (view full) --- 197void unregister_shrinker(struct shrinker *shrinker) 198{ 199 down_write(&shrinker_rwsem); 200 list_del(&shrinker->list); 201 up_write(&shrinker_rwsem); 202} 203EXPORT_SYMBOL(unregister_shrinker); 204 |
205static inline int do_shrinker_shrink(struct shrinker *shrinker, 206 struct shrink_control *sc, 207 unsigned long nr_to_scan) 208{ 209 sc->nr_to_scan = nr_to_scan; 210 return (*shrinker->shrink)(shrinker, sc); 211} 212 |
|
205#define SHRINK_BATCH 128 206/* 207 * Call the shrink functions to age shrinkable caches 208 * 209 * Here we assume it costs one seek to replace a lru page and that it also 210 * takes a seek to recreate a cache object. With this in mind we age equal 211 * percentages of the lru and ageable caches. This should balance the seeks 212 * generated by these structures. --- 5 unchanged lines hidden (view full) --- 218 * 219 * `lru_pages' represents the number of on-LRU pages in all the zones which 220 * are eligible for the caller's allocation attempt. It is used for balancing 221 * slab reclaim versus page reclaim. 222 * 223 * Returns the number of slab objects which we shrunk. 224 */ 225unsigned long shrink_slab(struct shrink_control *shrink, | 213#define SHRINK_BATCH 128 214/* 215 * Call the shrink functions to age shrinkable caches 216 * 217 * Here we assume it costs one seek to replace a lru page and that it also 218 * takes a seek to recreate a cache object. With this in mind we age equal 219 * percentages of the lru and ageable caches. This should balance the seeks 220 * generated by these structures. --- 5 unchanged lines hidden (view full) --- 226 * 227 * `lru_pages' represents the number of on-LRU pages in all the zones which 228 * are eligible for the caller's allocation attempt. It is used for balancing 229 * slab reclaim versus page reclaim. 230 * 231 * Returns the number of slab objects which we shrunk. 232 */ 233unsigned long shrink_slab(struct shrink_control *shrink, |
234 unsigned long nr_pages_scanned, |
|
226 unsigned long lru_pages) 227{ 228 struct shrinker *shrinker; 229 unsigned long ret = 0; | 235 unsigned long lru_pages) 236{ 237 struct shrinker *shrinker; 238 unsigned long ret = 0; |
230 unsigned long scanned = shrink->nr_scanned; 231 gfp_t gfp_mask = shrink->gfp_mask; | |
232 | 239 |
233 if (scanned == 0) 234 scanned = SWAP_CLUSTER_MAX; | 240 if (nr_pages_scanned == 0) 241 nr_pages_scanned = SWAP_CLUSTER_MAX; |
235 236 if (!down_read_trylock(&shrinker_rwsem)) { 237 /* Assume we'll be able to shrink next time */ 238 ret = 1; 239 goto out; 240 } 241 242 list_for_each_entry(shrinker, &shrinker_list, list) { 243 unsigned long long delta; 244 unsigned long total_scan; 245 unsigned long max_pass; 246 | 242 243 if (!down_read_trylock(&shrinker_rwsem)) { 244 /* Assume we'll be able to shrink next time */ 245 ret = 1; 246 goto out; 247 } 248 249 list_for_each_entry(shrinker, &shrinker_list, list) { 250 unsigned long long delta; 251 unsigned long total_scan; 252 unsigned long max_pass; 253 |
247 max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); 248 delta = (4 * scanned) / shrinker->seeks; | 254 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 255 delta = (4 * nr_pages_scanned) / shrinker->seeks; |
249 delta *= max_pass; 250 do_div(delta, lru_pages + 1); 251 shrinker->nr += delta; 252 if (shrinker->nr < 0) { 253 printk(KERN_ERR "shrink_slab: %pF negative objects to " 254 "delete nr=%ld\n", 255 shrinker->shrink, shrinker->nr); 256 shrinker->nr = max_pass; --- 10 unchanged lines hidden (view full) --- 267 total_scan = shrinker->nr; 268 shrinker->nr = 0; 269 270 while (total_scan >= SHRINK_BATCH) { 271 long this_scan = SHRINK_BATCH; 272 int shrink_ret; 273 int nr_before; 274 | 256 delta *= max_pass; 257 do_div(delta, lru_pages + 1); 258 shrinker->nr += delta; 259 if (shrinker->nr < 0) { 260 printk(KERN_ERR "shrink_slab: %pF negative objects to " 261 "delete nr=%ld\n", 262 shrinker->shrink, shrinker->nr); 263 shrinker->nr = max_pass; --- 10 unchanged lines hidden (view full) --- 274 total_scan = shrinker->nr; 275 shrinker->nr = 0; 276 277 while (total_scan >= SHRINK_BATCH) { 278 long this_scan = SHRINK_BATCH; 279 int shrink_ret; 280 int nr_before; 281 |
275 nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); 276 shrink_ret = (*shrinker->shrink)(shrinker, this_scan, 277 gfp_mask); | 282 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 283 shrink_ret = do_shrinker_shrink(shrinker, shrink, 284 this_scan); |
278 if (shrink_ret == -1) 279 break; 280 if (shrink_ret < nr_before) 281 ret += nr_before - shrink_ret; 282 count_vm_events(SLABS_SCANNED, this_scan); 283 total_scan -= this_scan; 284 285 cond_resched(); --- 1781 unchanged lines hidden (view full) --- 2067 for_each_zone_zonelist(zone, z, zonelist, 2068 gfp_zone(sc->gfp_mask)) { 2069 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2070 continue; 2071 2072 lru_pages += zone_reclaimable_pages(zone); 2073 } 2074 | 285 if (shrink_ret == -1) 286 break; 287 if (shrink_ret < nr_before) 288 ret += nr_before - shrink_ret; 289 count_vm_events(SLABS_SCANNED, this_scan); 290 total_scan -= this_scan; 291 292 cond_resched(); --- 1781 unchanged lines hidden (view full) --- 2074 for_each_zone_zonelist(zone, z, zonelist, 2075 gfp_zone(sc->gfp_mask)) { 2076 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2077 continue; 2078 2079 lru_pages += zone_reclaimable_pages(zone); 2080 } 2081 |
2075 shrink->nr_scanned = sc->nr_scanned; 2076 shrink_slab(shrink, lru_pages); | 2082 shrink_slab(shrink, sc->nr_scanned, lru_pages); |
2077 if (reclaim_state) { 2078 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2079 reclaim_state->reclaimed_slab = 0; 2080 } 2081 } 2082 total_scanned += sc->nr_scanned; 2083 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2084 goto out; --- 366 unchanged lines hidden (view full) --- 2451 (zone->present_pages + 2452 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2453 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2454 if (!zone_watermark_ok_safe(zone, order, 2455 high_wmark_pages(zone) + balance_gap, 2456 end_zone, 0)) 2457 shrink_zone(priority, zone, &sc); 2458 reclaim_state->reclaimed_slab = 0; | 2083 if (reclaim_state) { 2084 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2085 reclaim_state->reclaimed_slab = 0; 2086 } 2087 } 2088 total_scanned += sc->nr_scanned; 2089 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2090 goto out; --- 366 unchanged lines hidden (view full) --- 2457 (zone->present_pages + 2458 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2459 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2460 if (!zone_watermark_ok_safe(zone, order, 2461 high_wmark_pages(zone) + balance_gap, 2462 end_zone, 0)) 2463 shrink_zone(priority, zone, &sc); 2464 reclaim_state->reclaimed_slab = 0; |
2459 shrink.nr_scanned = sc.nr_scanned; 2460 nr_slab = shrink_slab(&shrink, lru_pages); | 2465 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); |
2461 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2462 total_scanned += sc.nr_scanned; 2463 2464 if (zone->all_unreclaimable) 2465 continue; 2466 if (nr_slab == 0 && 2467 !zone_reclaimable(zone)) 2468 zone->all_unreclaimable = 1; --- 551 unchanged lines hidden (view full) --- 3020 priority = ZONE_RECLAIM_PRIORITY; 3021 do { 3022 shrink_zone(priority, zone, &sc); 3023 priority--; 3024 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3025 } 3026 3027 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); | 2466 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2467 total_scanned += sc.nr_scanned; 2468 2469 if (zone->all_unreclaimable) 2470 continue; 2471 if (nr_slab == 0 && 2472 !zone_reclaimable(zone)) 2473 zone->all_unreclaimable = 1; --- 551 unchanged lines hidden (view full) --- 3025 priority = ZONE_RECLAIM_PRIORITY; 3026 do { 3027 shrink_zone(priority, zone, &sc); 3028 priority--; 3029 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3030 } 3031 3032 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); |
3028 shrink.nr_scanned = sc.nr_scanned; | |
3029 if (nr_slab_pages0 > zone->min_slab_pages) { 3030 /* 3031 * shrink_slab() does not currently allow us to determine how 3032 * many pages were freed in this zone. So we take the current 3033 * number of slab pages and shake the slab until it is reduced 3034 * by the same nr_pages that we used for reclaiming unmapped 3035 * pages. 3036 * 3037 * Note that shrink_slab will free memory on all zones and may 3038 * take a long time. 3039 */ 3040 for (;;) { 3041 unsigned long lru_pages = zone_reclaimable_pages(zone); 3042 3043 /* No reclaimable slab or very low memory pressure */ | 3033 if (nr_slab_pages0 > zone->min_slab_pages) { 3034 /* 3035 * shrink_slab() does not currently allow us to determine how 3036 * many pages were freed in this zone. So we take the current 3037 * number of slab pages and shake the slab until it is reduced 3038 * by the same nr_pages that we used for reclaiming unmapped 3039 * pages. 3040 * 3041 * Note that shrink_slab will free memory on all zones and may 3042 * take a long time. 3043 */ 3044 for (;;) { 3045 unsigned long lru_pages = zone_reclaimable_pages(zone); 3046 3047 /* No reclaimable slab or very low memory pressure */ |
3044 if (!shrink_slab(&shrink, lru_pages)) | 3048 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) |
3045 break; 3046 3047 /* Freed enough memory */ 3048 nr_slab_pages1 = zone_page_state(zone, 3049 NR_SLAB_RECLAIMABLE); 3050 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3051 break; 3052 } --- 314 unchanged lines hidden --- | 3049 break; 3050 3051 /* Freed enough memory */ 3052 nr_slab_pages1 = zone_page_state(zone, 3053 NR_SLAB_RECLAIMABLE); 3054 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3055 break; 3056 } --- 314 unchanged lines hidden --- |