1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
295144c78SKAMEZAWA Hiroyuki /*
395144c78SKAMEZAWA Hiroyuki * linux/mm/mmzone.c
495144c78SKAMEZAWA Hiroyuki *
54468b8f1SMel Gorman * management codes for pgdats, zones and page flags
695144c78SKAMEZAWA Hiroyuki */
795144c78SKAMEZAWA Hiroyuki
895144c78SKAMEZAWA Hiroyuki
995144c78SKAMEZAWA Hiroyuki #include <linux/stddef.h>
10eb33575cSMel Gorman #include <linux/mm.h>
1195144c78SKAMEZAWA Hiroyuki #include <linux/mmzone.h>
1295144c78SKAMEZAWA Hiroyuki
first_online_pgdat(void)1395144c78SKAMEZAWA Hiroyuki struct pglist_data *first_online_pgdat(void)
1495144c78SKAMEZAWA Hiroyuki {
1595144c78SKAMEZAWA Hiroyuki return NODE_DATA(first_online_node);
1695144c78SKAMEZAWA Hiroyuki }
1795144c78SKAMEZAWA Hiroyuki
next_online_pgdat(struct pglist_data * pgdat)1895144c78SKAMEZAWA Hiroyuki struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
1995144c78SKAMEZAWA Hiroyuki {
2095144c78SKAMEZAWA Hiroyuki int nid = next_online_node(pgdat->node_id);
2195144c78SKAMEZAWA Hiroyuki
2295144c78SKAMEZAWA Hiroyuki if (nid == MAX_NUMNODES)
2395144c78SKAMEZAWA Hiroyuki return NULL;
2495144c78SKAMEZAWA Hiroyuki return NODE_DATA(nid);
2595144c78SKAMEZAWA Hiroyuki }
2695144c78SKAMEZAWA Hiroyuki
2795144c78SKAMEZAWA Hiroyuki /*
2895144c78SKAMEZAWA Hiroyuki * next_zone - helper magic for for_each_zone()
2995144c78SKAMEZAWA Hiroyuki */
next_zone(struct zone * zone)3095144c78SKAMEZAWA Hiroyuki struct zone *next_zone(struct zone *zone)
3195144c78SKAMEZAWA Hiroyuki {
3295144c78SKAMEZAWA Hiroyuki pg_data_t *pgdat = zone->zone_pgdat;
3395144c78SKAMEZAWA Hiroyuki
3495144c78SKAMEZAWA Hiroyuki if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
3595144c78SKAMEZAWA Hiroyuki zone++;
3695144c78SKAMEZAWA Hiroyuki else {
3795144c78SKAMEZAWA Hiroyuki pgdat = next_online_pgdat(pgdat);
3895144c78SKAMEZAWA Hiroyuki if (pgdat)
3995144c78SKAMEZAWA Hiroyuki zone = pgdat->node_zones;
4095144c78SKAMEZAWA Hiroyuki else
4195144c78SKAMEZAWA Hiroyuki zone = NULL;
4295144c78SKAMEZAWA Hiroyuki }
4395144c78SKAMEZAWA Hiroyuki return zone;
4495144c78SKAMEZAWA Hiroyuki }
4595144c78SKAMEZAWA Hiroyuki
zref_in_nodemask(struct zoneref * zref,nodemask_t * nodes)4619770b32SMel Gorman static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
4719770b32SMel Gorman {
4819770b32SMel Gorman #ifdef CONFIG_NUMA
4919770b32SMel Gorman return node_isset(zonelist_node_idx(zref), *nodes);
5019770b32SMel Gorman #else
5119770b32SMel Gorman return 1;
5219770b32SMel Gorman #endif /* CONFIG_NUMA */
5319770b32SMel Gorman }
5419770b32SMel Gorman
5519770b32SMel Gorman /* Returns the next zone at or below highest_zoneidx in a zonelist */
__next_zones_zonelist(struct zoneref * z,enum zone_type highest_zoneidx,nodemask_t * nodes)56682a3385SMel Gorman struct zoneref *__next_zones_zonelist(struct zoneref *z,
5719770b32SMel Gorman enum zone_type highest_zoneidx,
5805891fb0SVlastimil Babka nodemask_t *nodes)
5919770b32SMel Gorman {
6019770b32SMel Gorman /*
6119770b32SMel Gorman * Find the next suitable zone to use for the allocation.
6219770b32SMel Gorman * Only filter based on nodemask if it's set
6319770b32SMel Gorman */
64e57b9d8cSSteven Rostedt if (unlikely(nodes == NULL))
6519770b32SMel Gorman while (zonelist_zone_idx(z) > highest_zoneidx)
6619770b32SMel Gorman z++;
6719770b32SMel Gorman else
6819770b32SMel Gorman while (zonelist_zone_idx(z) > highest_zoneidx ||
6919770b32SMel Gorman (z->zone && !zref_in_nodemask(z, nodes)))
7019770b32SMel Gorman z++;
7119770b32SMel Gorman
7219770b32SMel Gorman return z;
7319770b32SMel Gorman }
74eb33575cSMel Gorman
lruvec_init(struct lruvec * lruvec)75bea8c150SHugh Dickins void lruvec_init(struct lruvec *lruvec)
767f5e86c2SKonstantin Khlebnikov {
777f5e86c2SKonstantin Khlebnikov enum lru_list lru;
787f5e86c2SKonstantin Khlebnikov
797f5e86c2SKonstantin Khlebnikov memset(lruvec, 0, sizeof(struct lruvec));
806168d0daSAlex Shi spin_lock_init(&lruvec->lru_lock);
817f5e86c2SKonstantin Khlebnikov
827f5e86c2SKonstantin Khlebnikov for_each_lru(lru)
837f5e86c2SKonstantin Khlebnikov INIT_LIST_HEAD(&lruvec->lists[lru]);
8407ca7606SHugh Dickins /*
8507ca7606SHugh Dickins * The "Unevictable LRU" is imaginary: though its size is maintained,
8607ca7606SHugh Dickins * it is never scanned, and unevictable pages are not threaded on it
8707ca7606SHugh Dickins * (so that their lru fields can be reused to hold mlock_count).
8807ca7606SHugh Dickins * Poison its list head, so that any operations on it would crash.
8907ca7606SHugh Dickins */
9007ca7606SHugh Dickins list_del(&lruvec->lists[LRU_UNEVICTABLE]);
91*ec1c86b2SYu Zhao
92*ec1c86b2SYu Zhao lru_gen_init_lruvec(lruvec);
937f5e86c2SKonstantin Khlebnikov }
944468b8f1SMel Gorman
9590572890SPeter Zijlstra #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
page_cpupid_xchg_last(struct page * page,int cpupid)9690572890SPeter Zijlstra int page_cpupid_xchg_last(struct page *page, int cpupid)
974468b8f1SMel Gorman {
984468b8f1SMel Gorman unsigned long old_flags, flags;
9990572890SPeter Zijlstra int last_cpupid;
1004468b8f1SMel Gorman
101abe8b2aeSPeter Collingbourne old_flags = READ_ONCE(page->flags);
1024468b8f1SMel Gorman do {
103abe8b2aeSPeter Collingbourne flags = old_flags;
104abe8b2aeSPeter Collingbourne last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1054468b8f1SMel Gorman
10690572890SPeter Zijlstra flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
10790572890SPeter Zijlstra flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
108abe8b2aeSPeter Collingbourne } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1094468b8f1SMel Gorman
11090572890SPeter Zijlstra return last_cpupid;
1114468b8f1SMel Gorman }
1124468b8f1SMel Gorman #endif
113