xref: /openbmc/linux/mm/mmzone.c (revision 5b4fc395)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/mmzone.c
4  *
5  * management codes for pgdats, zones and page flags
6  */
7 
8 
9 #include <linux/stddef.h>
10 #include <linux/mm.h>
11 #include <linux/mmzone.h>
12 
13 struct pglist_data *first_online_pgdat(void)
14 {
15 	return NODE_DATA(first_online_node);
16 }
17 
18 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
19 {
20 	int nid = next_online_node(pgdat->node_id);
21 
22 	if (nid == MAX_NUMNODES)
23 		return NULL;
24 	return NODE_DATA(nid);
25 }
26 
27 /*
28  * next_zone - helper magic for for_each_zone()
29  */
30 struct zone *next_zone(struct zone *zone)
31 {
32 	pg_data_t *pgdat = zone->zone_pgdat;
33 
34 	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
35 		zone++;
36 	else {
37 		pgdat = next_online_pgdat(pgdat);
38 		if (pgdat)
39 			zone = pgdat->node_zones;
40 		else
41 			zone = NULL;
42 	}
43 	return zone;
44 }
45 
46 static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
47 {
48 #ifdef CONFIG_NUMA
49 	return node_isset(zonelist_node_idx(zref), *nodes);
50 #else
51 	return 1;
52 #endif /* CONFIG_NUMA */
53 }
54 
55 /* Returns the next zone at or below highest_zoneidx in a zonelist */
56 struct zoneref *__next_zones_zonelist(struct zoneref *z,
57 					enum zone_type highest_zoneidx,
58 					nodemask_t *nodes)
59 {
60 	/*
61 	 * Find the next suitable zone to use for the allocation.
62 	 * Only filter based on nodemask if it's set
63 	 */
64 	if (unlikely(nodes == NULL))
65 		while (zonelist_zone_idx(z) > highest_zoneidx)
66 			z++;
67 	else
68 		while (zonelist_zone_idx(z) > highest_zoneidx ||
69 				(z->zone && !zref_in_nodemask(z, nodes)))
70 			z++;
71 
72 	return z;
73 }
74 
75 void lruvec_init(struct lruvec *lruvec)
76 {
77 	enum lru_list lru;
78 
79 	memset(lruvec, 0, sizeof(struct lruvec));
80 	spin_lock_init(&lruvec->lru_lock);
81 
82 	for_each_lru(lru)
83 		INIT_LIST_HEAD(&lruvec->lists[lru]);
84 	/*
85 	 * The "Unevictable LRU" is imaginary: though its size is maintained,
86 	 * it is never scanned, and unevictable pages are not threaded on it
87 	 * (so that their lru fields can be reused to hold mlock_count).
88 	 * Poison its list head, so that any operations on it would crash.
89 	 */
90 	list_del(&lruvec->lists[LRU_UNEVICTABLE]);
91 }
92 
93 #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
94 int page_cpupid_xchg_last(struct page *page, int cpupid)
95 {
96 	unsigned long old_flags, flags;
97 	int last_cpupid;
98 
99 	old_flags = READ_ONCE(page->flags);
100 	do {
101 		flags = old_flags;
102 		last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
103 
104 		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
105 		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
106 	} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
107 
108 	return last_cpupid;
109 }
110 #endif
111