Lines Matching refs:zone

59  * the "fragmentation score" of a node/zone.
153 static void defer_compaction(struct zone *zone, int order)
155 zone->compact_considered = 0;
156 zone->compact_defer_shift++;
158 if (order < zone->compact_order_failed)
159 zone->compact_order_failed = order;
161 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
162 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
164 trace_mm_compaction_defer_compaction(zone, order);
168 static bool compaction_deferred(struct zone *zone, int order)
170 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
172 if (order < zone->compact_order_failed)
176 if (++zone->compact_considered >= defer_limit) {
177 zone->compact_considered = defer_limit;
181 trace_mm_compaction_deferred(zone, order);
191 void compaction_defer_reset(struct zone *zone, int order,
195 zone->compact_considered = 0;
196 zone->compact_defer_shift = 0;
198 if (order >= zone->compact_order_failed)
199 zone->compact_order_failed = order + 1;
201 trace_mm_compaction_defer_reset(zone, order);
205 static bool compaction_restarting(struct zone *zone, int order)
207 if (order < zone->compact_order_failed)
210 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
211 zone->compact_considered >= 1UL << zone->compact_defer_shift;
224 static void reset_cached_positions(struct zone *zone)
226 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
227 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
228 zone->compact_cached_free_pfn =
229 pageblock_start_pfn(zone_end_pfn(zone) - 1);
303 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
313 if (zone != page_zone(page))
333 /* Ensure the start of the pageblock or zone is online and valid */
335 block_pfn = max(block_pfn, zone->zone_start_pfn);
342 /* Ensure the end of the pageblock or zone is online and valid */
344 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
376 static void __reset_isolation_suitable(struct zone *zone)
378 unsigned long migrate_pfn = zone->zone_start_pfn;
379 unsigned long free_pfn = zone_end_pfn(zone) - 1;
385 if (!zone->compact_blockskip_flush)
388 zone->compact_blockskip_flush = false;
391 * Walk the zone and update pageblock skip information. Source looks
401 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
405 zone->compact_init_migrate_pfn = reset_migrate;
406 zone->compact_cached_migrate_pfn[0] = reset_migrate;
407 zone->compact_cached_migrate_pfn[1] = reset_migrate;
411 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
415 zone->compact_init_free_pfn = reset_free;
416 zone->compact_cached_free_pfn = reset_free;
422 zone->compact_cached_migrate_pfn[0] = migrate_pfn;
423 zone->compact_cached_migrate_pfn[1] = migrate_pfn;
424 zone->compact_cached_free_pfn = free_pfn;
433 struct zone *zone = &pgdat->node_zones[zoneid];
434 if (!populated_zone(zone))
438 if (zone->compact_blockskip_flush)
439 __reset_isolation_suitable(zone);
464 struct zone *zone = cc->zone;
473 if (pfn > zone->compact_cached_migrate_pfn[0])
474 zone->compact_cached_migrate_pfn[0] = pfn;
476 pfn > zone->compact_cached_migrate_pfn[1])
477 zone->compact_cached_migrate_pfn[1] = pfn;
487 struct zone *zone = cc->zone;
494 if (pfn < zone->compact_cached_free_pfn)
495 zone->compact_cached_free_pfn = pfn;
614 && compact_unlock_should_abort(&cc->zone->lock, flags,
642 locked = compact_lock_irqsave(&cc->zone->lock,
678 spin_unlock_irqrestore(&cc->zone->lock, flags);
713 * Non-free pages, invalid PFNs, or zone boundaries within the
730 if (block_start_pfn < cc->zone->zone_start_pfn)
731 block_start_pfn = cc->zone->zone_start_pfn;
753 block_end_pfn, cc->zone))
790 pg_data_t *pgdat = cc->zone->zone_pgdat;
841 pg_data_t *pgdat = cc->zone->zone_pgdat;
941 low_pfn == cc->zone->zone_start_pfn)) {
990 * Skip if free. We read page order here without zone lock
1295 if (block_start_pfn < cc->zone->zone_start_pfn)
1296 block_start_pfn = cc->zone->zone_start_pfn;
1306 block_end_pfn, cc->zone))
1351 * We are checking page_order without zone->lock taken. But
1436 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1437 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1439 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1487 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1512 struct free_area *area = &cc->zone->free_area[order];
1522 spin_lock_irqsave(&cc->zone->lock, flags);
1533 cc->zone->zone_start_pfn);
1581 spin_unlock_irqrestore(&cc->zone->lock, flags);
1613 zone_end_pfn(cc->zone)),
1614 cc->zone);
1621 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1623 cc->zone->compact_cached_free_pfn = highest;
1640 struct zone *zone = cc->zone;
1656 * successfully isolated from, zone-cached value, or the end of the
1657 * zone when isolating for the first time. For looping we also need
1661 * zone which ends in the middle of a pageblock.
1668 zone_end_pfn(zone));
1684 * This can iterate a massively long zone without finding any
1691 zone);
1784 * freelist. All pages on the freelist are from the same zone, so there is no
1867 * If the migrate_pfn is not at the start of a zone or the start
1871 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1898 if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1905 struct free_area *area = &cc->zone->free_area[order];
1913 spin_lock_irqsave(&cc->zone->lock, flags);
1939 if (pfn < cc->zone->zone_start_pfn)
1940 pfn = cc->zone->zone_start_pfn;
1946 spin_unlock_irqrestore(&cc->zone->lock, flags);
1979 * Start at where we last stopped, or beginning of the zone as
1985 if (block_start_pfn < cc->zone->zone_start_pfn)
1986 block_start_pfn = cc->zone->zone_start_pfn;
2009 * This can potentially iterate a massively long zone with
2017 block_end_pfn, cc->zone);
2035 low_pfn == cc->zone->zone_start_pfn) &&
2095 * A zone's fragmentation score is the external fragmentation wrt to the
2098 static unsigned int fragmentation_score_zone(struct zone *zone)
2100 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
2104 * A weighted zone's fragmentation score is the external fragmentation
2105 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
2113 static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
2117 score = zone->present_pages * fragmentation_score_zone(zone);
2118 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
2134 struct zone *zone;
2136 zone = &pgdat->node_zones[zoneid];
2137 if (!populated_zone(zone))
2139 score += fragmentation_score_zone_weighted(zone);
2178 reset_cached_positions(cc->zone);
2187 cc->zone->compact_blockskip_flush = true;
2199 pgdat = cc->zone->zone_pgdat;
2203 score = fragmentation_score_zone(cc->zone);
2229 struct free_area *area = &cc->zone->free_area[order];
2271 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2278 static bool __compaction_suitable(struct zone *zone, int order,
2298 low_wmark_pages(zone) : min_wmark_pages(zone);
2300 return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
2305 * compaction_suitable: Is this suitable to run compaction on this zone now?
2307 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
2312 suitable = __compaction_suitable(zone, order, highest_zoneidx,
2313 zone_page_state(zone, NR_FREE_PAGES));
2333 int fragindex = fragmentation_index(zone, order);
2345 trace_mm_compaction_suitable(zone, order, compact_result);
2353 struct zone *zone;
2357 * Make sure at least one zone would pass __compaction_suitable if we continue
2360 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2370 available = zone_reclaimable_pages(zone) / order;
2371 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2372 if (__compaction_suitable(zone, order, ac->highest_zoneidx,
2384 unsigned long start_pfn = cc->zone->zone_start_pfn;
2385 unsigned long end_pfn = zone_end_pfn(cc->zone);
2392 * These counters track activities during zone compaction. Initialize
2393 * them before compacting a new zone.
2408 watermark = wmark_pages(cc->zone,
2410 if (zone_watermark_ok(cc->zone, cc->order, watermark,
2415 if (!compaction_suitable(cc->zone, cc->order,
2424 if (compaction_restarting(cc->zone, cc->order))
2425 __reset_isolation_suitable(cc->zone);
2428 * Setup to move all movable pages to the end of the zone. Used cached
2430 * want to compact the whole zone), but check that it is initialised
2431 * by ensuring the values are within zone boundaries.
2438 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2439 cc->free_pfn = cc->zone->compact_cached_free_pfn;
2442 cc->zone->compact_cached_free_pfn = cc->free_pfn;
2446 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2447 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2450 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2465 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2499 cc->zone->compact_cached_migrate_pfn[1] =
2500 cc->zone->compact_cached_migrate_pfn[0];
2511 last_migrated_pfn = max(cc->zone->zone_start_pfn,
2580 lru_add_drain_cpu_zone(cc->zone);
2601 * already reset to zone end in compact_finished()
2603 if (free_pfn > cc->zone->compact_cached_free_pfn)
2604 cc->zone->compact_cached_free_pfn = free_pfn;
2618 static enum compact_result compact_zone_order(struct zone *zone, int order,
2628 .zone = zone,
2688 struct zone *zone;
2696 /* Compact each zone in the list */
2697 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2702 && compaction_deferred(zone, order)) {
2707 status = compact_zone_order(zone, order, gfp_mask, prio,
2714 * We think the allocation will succeed in this zone,
2717 * succeeds in this zone.
2719 compaction_defer_reset(zone, order, false);
2727 * We think that allocation won't succeed in this zone
2731 defer_compaction(zone, order);
2747 * Compact all zones within a node till each zone's fragmentation score
2753 * per-zone locks.
2758 struct zone *zone;
2769 zone = &pgdat->node_zones[zoneid];
2770 if (!populated_zone(zone))
2773 cc.zone = zone;
2789 struct zone *zone;
2801 zone = &pgdat->node_zones[zoneid];
2802 if (!populated_zone(zone))
2805 cc.zone = zone;
2909 struct zone *zone;
2913 zone = &pgdat->node_zones[zoneid];
2915 if (!populated_zone(zone))
2919 if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
2920 min_wmark_pages(zone),
2924 if (compaction_suitable(zone, pgdat->kcompactd_max_order,
2939 struct zone *zone;
2955 zone = &pgdat->node_zones[zoneid];
2956 if (!populated_zone(zone))
2959 if (compaction_deferred(zone, cc.order))
2963 if (zone_watermark_ok(zone, cc.order,
2964 min_wmark_pages(zone), zoneid, 0))
2967 if (!compaction_suitable(zone, cc.order, zoneid))
2973 cc.zone = zone;
2977 compaction_defer_reset(zone, cc.order, false);
2981 * otherwise coalesce on the zone's free area for
2985 drain_all_pages(zone);
2991 defer_compaction(zone, cc.order);
3079 * as the condition of the zone changing substantionally