Lines Matching refs:zone

79  * shuffle the whole zone).
88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
292 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
305 static bool cond_accept_memory(struct zone *zone, unsigned int order);
331 _deferred_grow_zone(struct zone *zone, unsigned int order)
333 return deferred_grow_zone(zone, order);
441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
449 seq = zone_span_seqbegin(zone);
450 start_pfn = zone->zone_start_pfn;
451 sp = zone->spanned_pages;
452 ret = !zone_spans_pfn(zone, pfn);
453 } while (zone_span_seqretry(zone, seq));
456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
457 pfn, zone_to_nid(zone), zone->name,
464 * Temporary debugging check for pages not lying within a given zone.
466 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
468 if (page_outside_zone_boundaries(zone, page))
470 if (zone != page_zone(page))
476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
615 static inline struct capture_control *task_capc(struct zone *zone)
622 capc->cc->zone == zone ? capc : NULL;
651 static inline struct capture_control *task_capc(struct zone *zone)
665 static inline void add_to_free_list(struct page *page, struct zone *zone,
668 struct free_area *area = &zone->free_area[order];
675 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
678 struct free_area *area = &zone->free_area[order];
689 static inline void move_to_free_list(struct page *page, struct zone *zone,
692 struct free_area *area = &zone->free_area[order];
697 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
707 zone->free_area[order].nr_free--;
768 struct zone *zone, unsigned int order,
771 struct capture_control *capc = task_capc(zone);
777 VM_BUG_ON(!zone_is_initialized(zone));
782 __mod_zone_freepage_state(zone, 1 << order, migratetype);
785 VM_BUG_ON_PAGE(bad_range(zone, page), page);
789 __mod_zone_freepage_state(zone, -(1 << order),
818 clear_page_guard(zone, buddy, order, migratetype);
820 del_page_from_free_list(buddy, zone, order);
838 add_to_free_list_tail(page, zone, order, migratetype);
840 add_to_free_list(page, zone, order, migratetype);
863 struct zone *zone = page_zone(free_page);
874 spin_lock_irqsave(&zone->lock, flags);
883 __mod_zone_freepage_state(zone, -(1UL << order), mt);
885 del_page_from_free_list(free_page, zone, order);
893 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
902 spin_unlock_irqrestore(&zone->lock, flags);
1199 * Assumes all pages on list are in same zone.
1202 static void free_pcppages_bulk(struct zone *zone, int count,
1220 spin_lock_irqsave(&zone->lock, flags);
1221 isolated_pageblocks = has_isolate_pageblock(zone);
1253 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1258 spin_unlock_irqrestore(&zone->lock, flags);
1261 static void free_one_page(struct zone *zone,
1268 spin_lock_irqsave(&zone->lock, flags);
1269 if (unlikely(has_isolate_pageblock(zone) ||
1273 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1274 spin_unlock_irqrestore(&zone->lock, flags);
1283 struct zone *zone = page_zone(page);
1295 spin_lock_irqsave(&zone->lock, flags);
1296 if (unlikely(has_isolate_pageblock(zone) ||
1300 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1301 spin_unlock_irqrestore(&zone->lock, flags);
1344 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1351 * belong to a single zone. We assume that a border between node0 and node1
1367 unsigned long end_pfn, struct zone *zone)
1382 if (page_zone(start_page) != zone)
1408 static inline void expand(struct zone *zone, struct page *page,
1416 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1424 if (set_page_guard(zone, &page[size], high, migratetype))
1427 add_to_free_list(&page[size], zone, high, migratetype);
1584 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1593 area = &(zone->free_area[current_order]);
1597 del_page_from_free_list(page, zone, current_order);
1598 expand(zone, page, order, current_order, migratetype);
1623 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1626 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1629 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1638 static int move_freepages(struct zone *zone,
1663 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1664 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1667 move_to_free_list(page, zone, order, migratetype);
1675 int move_freepages_block(struct zone *zone, struct page *page,
1687 /* Do not cross zone boundaries */
1688 if (!zone_spans_pfn(zone, start_pfn))
1690 if (!zone_spans_pfn(zone, end_pfn))
1693 return move_freepages(zone, start_pfn, end_pfn, migratetype,
1741 static inline bool boost_watermark(struct zone *zone)
1753 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
1756 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
1772 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
1786 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1813 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
1814 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1820 free_pages = move_freepages_block(zone, page, start_type,
1822 /* moving whole block can fail due to zone boundary conditions */
1858 move_to_free_list(page, zone, current_order, start_type);
1899 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
1905 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1908 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
1909 if (zone->nr_reserved_highatomic >= max_managed)
1912 spin_lock_irqsave(&zone->lock, flags);
1915 if (zone->nr_reserved_highatomic >= max_managed)
1922 zone->nr_reserved_highatomic += pageblock_nr_pages;
1924 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
1928 spin_unlock_irqrestore(&zone->lock, flags);
1946 struct zone *zone;
1951 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
1957 if (!force && zone->nr_reserved_highatomic <=
1961 spin_lock_irqsave(&zone->lock, flags);
1963 struct free_area *area = &(zone->free_area[order]);
1984 zone->nr_reserved_highatomic -= min(
1986 zone->nr_reserved_highatomic);
1999 ret = move_freepages_block(zone, page, ac->migratetype,
2002 spin_unlock_irqrestore(&zone->lock, flags);
2006 spin_unlock_irqrestore(&zone->lock, flags);
2023 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2048 area = &(zone->free_area[current_order]);
2073 area = &(zone->free_area[current_order]);
2089 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2101 * Call me with the zone->lock already held.
2104 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2112 * allocating from CMA when over half of the zone's free memory
2116 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2117 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2118 page = __rmqueue_cma_fallback(zone, order);
2124 page = __rmqueue_smallest(zone, order, migratetype);
2127 page = __rmqueue_cma_fallback(zone, order);
2129 if (!page && __rmqueue_fallback(zone, order, migratetype,
2141 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2148 spin_lock_irqsave(&zone->lock, flags);
2150 struct page *page = __rmqueue(zone, order, migratetype,
2167 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2171 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2172 spin_unlock_irqrestore(&zone->lock, flags);
2183 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2191 free_pcppages_bulk(zone, to_drain, pcp, 0);
2198 * Drain pcplists of the indicated processor and zone.
2200 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2202 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2212 free_pcppages_bulk(zone, to_drain, pcp, 0);
2224 struct zone *zone;
2226 for_each_populated_zone(zone) {
2227 drain_pages_zone(cpu, zone);
2234 void drain_local_pages(struct zone *zone)
2238 if (zone)
2239 drain_pages_zone(cpu, zone);
2254 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2266 * a zone. Such callers are primarily CMA and memory hotplug and need
2270 if (!zone)
2283 struct zone *z;
2292 } else if (zone) {
2293 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2313 if (zone)
2314 drain_pages_zone(cpu, zone);
2325 * When zone parameter is non-NULL, spill just the single zone's pages.
2327 void drain_all_pages(struct zone *zone)
2329 __drain_all_pages(zone, false);
2374 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2382 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
2392 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2413 high = nr_pcp_high(pcp, zone, free_high);
2415 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
2426 struct zone *zone;
2449 zone = page_zone(page);
2451 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2453 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2456 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
2469 struct zone *locked_zone = NULL;
2494 struct zone *zone = page_zone(page);
2500 * Either different zone requiring a different pcp lock or
2504 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
2517 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2520 free_one_page(zone, page, page_to_pfn(page),
2525 locked_zone = zone;
2536 free_unref_page_commit(zone, pcp, page, migratetype, 0);
2570 struct zone *zone = page_zone(page);
2581 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2582 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2585 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2588 del_page_from_free_list(page, zone, order);
2622 struct zone *zone = page_zone(page);
2624 /* zone lock should be held when this function is called */
2625 lockdep_assert_held(&zone->lock);
2628 __free_one_page(page, page_to_pfn(page), zone, order, mt,
2635 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2659 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
2668 spin_lock_irqsave(&zone->lock, flags);
2670 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2672 page = __rmqueue(zone, order, migratetype, alloc_flags);
2681 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2684 spin_unlock_irqrestore(&zone->lock, flags);
2688 __mod_zone_freepage_state(zone, -(1 << order),
2690 spin_unlock_irqrestore(&zone->lock, flags);
2694 zone_statistics(preferred_zone, zone, 1);
2701 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
2723 alloced = rmqueue_bulk(zone, order,
2741 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2742 struct zone *zone, unsigned int order,
2752 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2765 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2770 zone_statistics(preferred_zone, zone, 1);
2776 * Allocate a page from the given zone.
2788 struct page *rmqueue(struct zone *preferred_zone,
2789 struct zone *zone, unsigned int order,
2802 page = rmqueue_pcplist(preferred_zone, zone, order,
2808 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
2814 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
2815 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2816 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
2819 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2829 static inline long __zone_watermark_unusable_free(struct zone *z,
2854 * one free page of a suitable size. Checking now avoids taking the zone lock
2857 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2935 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2942 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2987 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3002 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3004 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3008 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3015 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3016 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3017 * premature use of a lower zone may cause lowmem pressure problems that
3018 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3023 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3034 if (!zone)
3037 if (zone_idx(zone) != ZONE_NORMAL)
3042 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3046 if (nr_online_nodes > 1 && !populated_zone(--zone))
3074 struct zone *zone;
3081 * Scan zonelist, looking for a zone with enough free.
3086 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3093 !__cpuset_zone_allowed(zone, gfp_mask))
3115 if (last_pgdat != zone->zone_pgdat) {
3116 last_pgdat = zone->zone_pgdat;
3117 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3125 zone != ac->preferred_zoneref->zone) {
3133 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3134 if (zone_to_nid(zone) != local_nid) {
3140 cond_accept_memory(zone, order);
3142 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3143 if (!zone_watermark_fast(zone, order, mark,
3148 if (cond_accept_memory(zone, order))
3153 * Watermark failed for this zone, but see if we can
3154 * grow this zone if it contains deferred pages.
3157 if (_deferred_grow_zone(zone, order))
3167 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3170 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3180 if (zone_watermark_ok(zone, order, mark,
3189 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3199 reserve_highatomic_pageblock(page, zone);
3203 if (cond_accept_memory(zone, order))
3207 /* Try again if zone has deferred pages */
3209 if (_deferred_grow_zone(zone, order))
3413 * At least in one zone compaction wasn't deferred or skipped, so let's
3427 struct zone *zone = page_zone(page);
3429 zone->compact_blockskip_flush = false;
3430 compaction_defer_reset(zone, order, true);
3527 struct zone *zone;
3539 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3541 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3697 struct zone *zone;
3701 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
3703 if (!managed_zone(zone))
3705 if (last_pgdat != zone->zone_pgdat) {
3706 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3707 last_pgdat = zone->zone_pgdat;
3818 struct zone *zone;
3842 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3846 unsigned long min_wmark = min_wmark_pages(zone);
3849 available = reclaimable = zone_reclaimable_pages(zone);
3850 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3856 wmark = __zone_watermark_ok(zone, order, min_wmark,
3959 if (!ac->preferred_zoneref->zone)
3964 * any suitable zone to satisfy the request - e.g. non-movable
3971 if (!z->zone)
4227 /* Dirty zone balancing only done in the fast path */
4231 * The preferred zone is used for statistics but crucially it is
4268 struct zone *zone;
4319 /* Find an allowed local zone that meets the low watermark. */
4321 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
4325 !__cpuset_zone_allowed(zone, gfp)) {
4329 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
4330 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
4334 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
4335 if (zone_watermark_fast(zone, 0, mark,
4346 if (unlikely(!zone))
4351 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4365 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
4388 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
4389 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
4447 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
4774 * @offset: The zone index of the highest zone
4777 * high watermark within all zones at or below a given zone index. For each
4778 * zone, the number of pages is calculated as:
4787 struct zone *zone;
4794 for_each_zone_zonelist(zone, z, zonelist, offset) {
4795 unsigned long size = zone_managed_pages(zone);
4796 unsigned long high = high_wmark_pages(zone);
4819 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4821 zoneref->zone = zone;
4822 zoneref->zone_idx = zone_idx(zone);
4826 * Builds allocation fallback zone lists.
4832 struct zone *zone;
4838 zone = pgdat->node_zones + zone_type;
4839 if (populated_zone(zone)) {
4840 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
4942 * This results in maximum locality--normal zone overflows into local
4943 * DMA zone, if any--but risks exhausting DMA zone.
4961 zonerefs->zone = NULL;
4976 zonerefs->zone = NULL;
4981 * Build zonelists ordered by zone and nodes within zones.
4982 * This results in conserving DMA zone[s] until all Normal memory is
4984 * may still exist in local DMA zone.
5024 * I.e., first node id of first zone in arg node's generic zonelist.
5035 return zone_to_nid(z->zone);
5076 zonerefs->zone = NULL;
5095 * Other parts of the kernel may not check if the zone is available.
5148 * i.e., the node of the first zone in the generic zonelist.
5173 * each zone will be allocated later when the per cpu
5211 * more accurate, but expensive to check per-zone. This check is
5225 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5229 static int zone_batchsize(struct zone *zone)
5236 * of the zone or 1MB, whichever is smaller. The batch
5238 * and zone lock contention.
5240 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
5278 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
5287 * By default, the high value of the pcp is based on the zone
5291 total_pages = low_wmark_pages(zone);
5296 * zone.
5298 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
5302 * Split the high value across all online CPUs local to the zone. Note
5309 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5371 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
5378 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5385 * zone based on the zone's size.
5387 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
5391 new_batch = max(1, zone_batchsize(zone));
5392 new_high = zone_highsize(zone, new_batch, cpu_online);
5394 if (zone->pageset_high == new_high &&
5395 zone->pageset_batch == new_batch)
5398 zone->pageset_high = new_high;
5399 zone->pageset_batch = new_batch;
5401 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
5404 void __meminit setup_zone_pageset(struct zone *zone)
5410 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
5412 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
5417 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5418 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
5422 zone_set_pageset_high_and_batch(zone, 0);
5426 * The zone indicated has a new number of managed_pages; batch sizes and percpu
5429 static void zone_pcp_update(struct zone *zone, int cpu_online)
5432 zone_set_pageset_high_and_batch(zone, cpu_online);
5443 struct zone *zone;
5446 for_each_populated_zone(zone)
5447 setup_zone_pageset(zone);
5468 __meminit void zone_pcp_init(struct zone *zone)
5475 zone->per_cpu_pageset = &boot_pageset;
5476 zone->per_cpu_zonestats = &boot_zonestats;
5477 zone->pageset_high = BOOT_PAGESET_HIGH;
5478 zone->pageset_batch = BOOT_PAGESET_BATCH;
5480 if (populated_zone(zone))
5481 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
5482 zone->present_pages, zone_batchsize(zone));
5534 struct zone *zone;
5557 for_each_populated_zone(zone)
5558 zone_pcp_update(zone, 0);
5565 struct zone *zone;
5567 for_each_populated_zone(zone)
5568 zone_pcp_update(zone, 1);
5598 struct zone *zone = pgdat->node_zones + i;
5600 unsigned long managed_pages = zone_managed_pages(zone);
5602 /* Find valid and maximum lowmem_reserve in the zone */
5604 if (zone->lowmem_reserve[j] > max)
5605 max = zone->lowmem_reserve[j];
5609 max += high_wmark_pages(zone);
5624 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
5626 * pages are left in the zone after a successful __alloc_pages().
5635 struct zone *zone = &pgdat->node_zones[i];
5637 bool clear = !ratio || !zone_managed_pages(zone);
5641 struct zone *upper_zone = &pgdat->node_zones[j];
5646 zone->lowmem_reserve[j] = 0;
5648 zone->lowmem_reserve[j] = managed_pages / ratio;
5661 struct zone *zone;
5665 for_each_zone(zone) {
5666 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
5667 lowmem_pages += zone_managed_pages(zone);
5670 for_each_zone(zone) {
5673 spin_lock_irqsave(&zone->lock, flags);
5674 tmp = (u64)pages_min * zone_managed_pages(zone);
5676 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
5688 min_pages = zone_managed_pages(zone) / 1024;
5690 zone->_watermark[WMARK_MIN] = min_pages;
5693 * If it's a lowmem zone, reserve a number of pages
5694 * proportionate to the zone's size.
5696 zone->_watermark[WMARK_MIN] = tmp;
5705 mult_frac(zone_managed_pages(zone),
5708 zone->watermark_boost = 0;
5709 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
5710 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
5711 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
5713 spin_unlock_irqrestore(&zone->lock, flags);
5724 * Ensures that the watermark[min,low,high] values for each zone are set
5729 struct zone *zone;
5740 for_each_zone(zone)
5741 zone_pcp_update(zone, 0);
5842 struct zone *zone;
5847 for_each_zone(zone)
5848 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
5870 struct zone *zone;
5875 for_each_zone(zone)
5876 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
5902 * if in function of the boot time zone sizes.
5921 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
5922 * cpu. It is the fraction of total pages in each zone that a hot per cpu
5928 struct zone *zone;
5951 for_each_populated_zone(zone)
5952 zone_set_pageset_high_and_batch(zone, 0);
6049 /* [start, end) must belong to a single zone. */
6059 .nid = zone_to_nid(cc->zone),
6083 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6119 * belong to a single zone.
6139 .zone = page_zone(pfn_to_page(start)),
6173 drain_all_pages(cc.zone);
6203 * We don't have to hold zone->lock here because the pages are
6264 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
6287 static bool zone_spans_last_pfn(const struct zone *zone,
6292 return zone_spans_pfn(zone, last_pfn);
6321 struct zone *zone;
6325 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6327 spin_lock_irqsave(&zone->lock, flags);
6329 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
6330 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
6331 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
6333 * We release the zone lock here because
6334 * alloc_contig_range() will also lock the zone
6339 spin_unlock_irqrestore(&zone->lock, flags);
6344 spin_lock_irqsave(&zone->lock, flags);
6348 spin_unlock_irqrestore(&zone->lock, flags);
6369 * Effectively disable pcplists for the zone by setting the high limit to 0
6376 void zone_pcp_disable(struct zone *zone)
6379 __zone_set_pageset_high_and_batch(zone, 0, 1);
6380 __drain_all_pages(zone, true);
6383 void zone_pcp_enable(struct zone *zone)
6385 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
6389 void zone_pcp_reset(struct zone *zone)
6394 if (zone->per_cpu_pageset != &boot_pageset) {
6396 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6397 drain_zonestat(zone, pzstats);
6399 free_percpu(zone->per_cpu_pageset);
6400 zone->per_cpu_pageset = &boot_pageset;
6401 if (zone->per_cpu_zonestats != &boot_zonestats) {
6402 free_percpu(zone->per_cpu_zonestats);
6403 zone->per_cpu_zonestats = &boot_zonestats;
6410 * All pages in the range must be in a single zone, must not contain holes,
6417 struct zone *zone;
6422 zone = page_zone(pfn_to_page(pfn));
6423 spin_lock_irqsave(&zone->lock, flags);
6448 del_page_from_free_list(page, zone, order);
6451 spin_unlock_irqrestore(&zone->lock, flags);
6456 * This function returns a stable result only if called under zone lock.
6480 static void break_down_buddy_pages(struct zone *zone, struct page *page,
6500 if (set_page_guard(zone, current_buddy, high, migratetype))
6504 add_to_free_list(current_buddy, zone, high, migratetype);
6515 struct zone *zone = page_zone(page);
6521 spin_lock_irqsave(&zone->lock, flags);
6531 del_page_from_free_list(page_head, zone, page_order);
6532 break_down_buddy_pages(zone, page_head, page, 0,
6536 __mod_zone_freepage_state(zone, -1, migratetype);
6543 spin_unlock_irqrestore(&zone->lock, flags);
6552 struct zone *zone = page_zone(page);
6558 spin_lock_irqsave(&zone->lock, flags);
6561 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
6566 spin_unlock_irqrestore(&zone->lock, flags);
6578 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
6580 if (managed_zone(zone))
6623 static bool try_to_accept_memory_one(struct zone *zone)
6629 spin_lock_irqsave(&zone->lock, flags);
6630 page = list_first_entry_or_null(&zone->unaccepted_pages,
6633 spin_unlock_irqrestore(&zone->lock, flags);
6638 last = list_empty(&zone->unaccepted_pages);
6640 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6641 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
6642 spin_unlock_irqrestore(&zone->lock, flags);
6654 static bool cond_accept_memory(struct zone *zone, unsigned int order)
6662 if (list_empty(&zone->unaccepted_pages))
6666 to_accept = high_wmark_pages(zone) -
6667 (zone_page_state(zone, NR_FREE_PAGES) -
6668 __zone_watermark_unusable_free(zone, order, 0) -
6669 zone_page_state(zone, NR_UNACCEPTED));
6672 if (!try_to_accept_memory_one(zone))
6688 struct zone *zone = page_zone(page);
6695 spin_lock_irqsave(&zone->lock, flags);
6696 first = list_empty(&zone->unaccepted_pages);
6697 list_add_tail(&page->lru, &zone->unaccepted_pages);
6698 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6699 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
6700 spin_unlock_irqrestore(&zone->lock, flags);
6719 static bool cond_accept_memory(struct zone *zone, unsigned int order)