page_alloc.c (6bb154504f8b496780ec53ec81aba957a12981fa) page_alloc.c (a921444382b49cc7fdeca3fba3e278bc09484a27)
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie

--- 3362 unchanged lines hidden (view full) ---

3371 */
3372 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3373 if (zone_to_nid(zone) != local_nid) {
3374 alloc_flags &= ~ALLOC_NOFRAGMENT;
3375 goto retry;
3376 }
3377 }
3378
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie

--- 3362 unchanged lines hidden (view full) ---

3371 */
3372 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3373 if (zone_to_nid(zone) != local_nid) {
3374 alloc_flags &= ~ALLOC_NOFRAGMENT;
3375 goto retry;
3376 }
3377 }
3378
3379 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
3379 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3380 if (!zone_watermark_fast(zone, order, mark,
3381 ac_classzone_idx(ac), alloc_flags)) {
3382 int ret;
3383
3384#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3385 /*
3386 * Watermark failed for this zone, but see if we can
3387 * grow this zone if it contains deferred pages.

--- 1400 unchanged lines hidden (view full) ---

4788 unsigned long reclaimable;
4789 struct zone *zone;
4790 int lru;
4791
4792 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4793 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4794
4795 for_each_zone(zone)
3380 if (!zone_watermark_fast(zone, order, mark,
3381 ac_classzone_idx(ac), alloc_flags)) {
3382 int ret;
3383
3384#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3385 /*
3386 * Watermark failed for this zone, but see if we can
3387 * grow this zone if it contains deferred pages.

--- 1400 unchanged lines hidden (view full) ---

4788 unsigned long reclaimable;
4789 struct zone *zone;
4790 int lru;
4791
4792 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4793 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4794
4795 for_each_zone(zone)
4796 wmark_low += zone->watermark[WMARK_LOW];
4796 wmark_low += low_wmark_pages(zone);
4797
4798 /*
4799 * Estimate the amount of memory available for userspace allocations,
4800 * without causing swapping.
4801 */
4802 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
4803
4804 /*

--- 2621 unchanged lines hidden (view full) ---

7426 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7427 * deltas control asynch page reclaim, and so should
7428 * not be capped for highmem.
7429 */
7430 unsigned long min_pages;
7431
7432 min_pages = zone_managed_pages(zone) / 1024;
7433 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
4797
4798 /*
4799 * Estimate the amount of memory available for userspace allocations,
4800 * without causing swapping.
4801 */
4802 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
4803
4804 /*

--- 2621 unchanged lines hidden (view full) ---

7426 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7427 * deltas control asynch page reclaim, and so should
7428 * not be capped for highmem.
7429 */
7430 unsigned long min_pages;
7431
7432 min_pages = zone_managed_pages(zone) / 1024;
7433 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7434 zone->watermark[WMARK_MIN] = min_pages;
7434 zone->_watermark[WMARK_MIN] = min_pages;
7435 } else {
7436 /*
7437 * If it's a lowmem zone, reserve a number of pages
7438 * proportionate to the zone's size.
7439 */
7435 } else {
7436 /*
7437 * If it's a lowmem zone, reserve a number of pages
7438 * proportionate to the zone's size.
7439 */
7440 zone->watermark[WMARK_MIN] = tmp;
7440 zone->_watermark[WMARK_MIN] = tmp;
7441 }
7442
7443 /*
7444 * Set the kswapd watermarks distance according to the
7445 * scale factor in proportion to available memory, but
7446 * ensure a minimum size on small systems.
7447 */
7448 tmp = max_t(u64, tmp >> 2,
7449 mult_frac(zone_managed_pages(zone),
7450 watermark_scale_factor, 10000));
7451
7441 }
7442
7443 /*
7444 * Set the kswapd watermarks distance according to the
7445 * scale factor in proportion to available memory, but
7446 * ensure a minimum size on small systems.
7447 */
7448 tmp = max_t(u64, tmp >> 2,
7449 mult_frac(zone_managed_pages(zone),
7450 watermark_scale_factor, 10000));
7451
7452 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7453 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7452 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
7453 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7454
7455 spin_unlock_irqrestore(&zone->lock, flags);
7456 }
7457
7458 /* update totalreserve_pages */
7459 calculate_totalreserve_pages();
7460}
7461

--- 858 unchanged lines hidden ---
7454
7455 spin_unlock_irqrestore(&zone->lock, flags);
7456 }
7457
7458 /* update totalreserve_pages */
7459 calculate_totalreserve_pages();
7460}
7461

--- 858 unchanged lines hidden ---