xref: /openbmc/linux/include/linux/nodemask.h (revision 8e694cd2)
1 #ifndef __LINUX_NODEMASK_H
2 #define __LINUX_NODEMASK_H
3 
4 /*
5  * Nodemasks provide a bitmap suitable for representing the
6  * set of Node's in a system, one bit position per Node number.
7  *
8  * See detailed comments in the file linux/bitmap.h describing the
9  * data type on which these nodemasks are based.
10  *
11  * For details of nodemask_parse_user(), see bitmap_parse_user() in
12  * lib/bitmap.c.  For details of nodelist_parse(), see bitmap_parselist(),
13  * also in bitmap.c.  For details of node_remap(), see bitmap_bitremap in
14  * lib/bitmap.c.  For details of nodes_remap(), see bitmap_remap in
15  * lib/bitmap.c.  For details of nodes_onto(), see bitmap_onto in
16  * lib/bitmap.c.  For details of nodes_fold(), see bitmap_fold in
17  * lib/bitmap.c.
18  *
19  * The available nodemask operations are:
20  *
21  * void node_set(node, mask)		turn on bit 'node' in mask
22  * void node_clear(node, mask)		turn off bit 'node' in mask
23  * void nodes_setall(mask)		set all bits
24  * void nodes_clear(mask)		clear all bits
25  * int node_isset(node, mask)		true iff bit 'node' set in mask
26  * int node_test_and_set(node, mask)	test and set bit 'node' in mask
27  *
28  * void nodes_and(dst, src1, src2)	dst = src1 & src2  [intersection]
29  * void nodes_or(dst, src1, src2)	dst = src1 | src2  [union]
30  * void nodes_xor(dst, src1, src2)	dst = src1 ^ src2
31  * void nodes_andnot(dst, src1, src2)	dst = src1 & ~src2
32  * void nodes_complement(dst, src)	dst = ~src
33  *
34  * int nodes_equal(mask1, mask2)	Does mask1 == mask2?
35  * int nodes_intersects(mask1, mask2)	Do mask1 and mask2 intersect?
36  * int nodes_subset(mask1, mask2)	Is mask1 a subset of mask2?
37  * int nodes_empty(mask)		Is mask empty (no bits sets)?
38  * int nodes_full(mask)			Is mask full (all bits sets)?
39  * int nodes_weight(mask)		Hamming weight - number of set bits
40  *
41  * void nodes_shift_right(dst, src, n)	Shift right
42  * void nodes_shift_left(dst, src, n)	Shift left
43  *
44  * int first_node(mask)			Number lowest set bit, or MAX_NUMNODES
45  * int next_node(node, mask)		Next node past 'node', or MAX_NUMNODES
46  * int next_node_in(node, mask)		Next node past 'node', or wrap to first,
47  *					or MAX_NUMNODES
48  * int first_unset_node(mask)		First node not set in mask, or
49  *					MAX_NUMNODES
50  *
51  * nodemask_t nodemask_of_node(node)	Return nodemask with bit 'node' set
52  * NODE_MASK_ALL			Initializer - all bits set
53  * NODE_MASK_NONE			Initializer - no bits set
54  * unsigned long *nodes_addr(mask)	Array of unsigned long's in mask
55  *
56  * int nodemask_parse_user(ubuf, ulen, mask)	Parse ascii string as nodemask
57  * int nodelist_parse(buf, map)		Parse ascii string as nodelist
58  * int node_remap(oldbit, old, new)	newbit = map(old, new)(oldbit)
59  * void nodes_remap(dst, src, old, new)	*dst = map(old, new)(src)
60  * void nodes_onto(dst, orig, relmap)	*dst = orig relative to relmap
61  * void nodes_fold(dst, orig, sz)	dst bits = orig bits mod sz
62  *
63  * for_each_node_mask(node, mask)	for-loop node over mask
64  *
65  * int num_online_nodes()		Number of online Nodes
66  * int num_possible_nodes()		Number of all possible Nodes
67  *
68  * int node_random(mask)		Random node with set bit in mask
69  *
70  * int node_online(node)		Is some node online?
71  * int node_possible(node)		Is some node possible?
72  *
73  * node_set_online(node)		set bit 'node' in node_online_map
74  * node_set_offline(node)		clear bit 'node' in node_online_map
75  *
76  * for_each_node(node)			for-loop node over node_possible_map
77  * for_each_online_node(node)		for-loop node over node_online_map
78  *
79  * Subtlety:
80  * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
81  *    to generate slightly worse code.  So use a simple one-line #define
82  *    for node_isset(), instead of wrapping an inline inside a macro, the
83  *    way we do the other calls.
84  *
85  * NODEMASK_SCRATCH
86  * When doing above logical AND, OR, XOR, Remap operations the callers tend to
87  * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
88  * nodemask_t's consume too much stack space.  NODEMASK_SCRATCH is a helper
89  * for such situations. See below and CPUMASK_ALLOC also.
90  */
91 
92 #include <linux/kernel.h>
93 #include <linux/threads.h>
94 #include <linux/bitmap.h>
95 #include <linux/numa.h>
96 
97 typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
98 extern nodemask_t _unused_nodemask_arg_;
99 
100 /**
101  * nodemask_pr_args - printf args to output a nodemask
102  * @maskp: nodemask to be printed
103  *
104  * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
105  */
106 #define nodemask_pr_args(maskp)		MAX_NUMNODES, (maskp)->bits
107 
108 /*
109  * The inline keyword gives the compiler room to decide to inline, or
110  * not inline a function as it sees best.  However, as these functions
111  * are called in both __init and non-__init functions, if they are not
112  * inlined we will end up with a section mis-match error (of the type of
113  * freeable items not being freed).  So we must use __always_inline here
114  * to fix the problem.  If other functions in the future also end up in
115  * this situation they will also need to be annotated as __always_inline
116  */
117 #define node_set(node, dst) __node_set((node), &(dst))
118 static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
119 {
120 	set_bit(node, dstp->bits);
121 }
122 
123 #define node_clear(node, dst) __node_clear((node), &(dst))
124 static inline void __node_clear(int node, volatile nodemask_t *dstp)
125 {
126 	clear_bit(node, dstp->bits);
127 }
128 
129 #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
130 static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
131 {
132 	bitmap_fill(dstp->bits, nbits);
133 }
134 
135 #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
136 static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
137 {
138 	bitmap_zero(dstp->bits, nbits);
139 }
140 
141 /* No static inline type checking - see Subtlety (1) above. */
142 #define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
143 
144 #define node_test_and_set(node, nodemask) \
145 			__node_test_and_set((node), &(nodemask))
146 static inline int __node_test_and_set(int node, nodemask_t *addr)
147 {
148 	return test_and_set_bit(node, addr->bits);
149 }
150 
151 #define nodes_and(dst, src1, src2) \
152 			__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
153 static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
154 					const nodemask_t *src2p, unsigned int nbits)
155 {
156 	bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
157 }
158 
159 #define nodes_or(dst, src1, src2) \
160 			__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
161 static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
162 					const nodemask_t *src2p, unsigned int nbits)
163 {
164 	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
165 }
166 
167 #define nodes_xor(dst, src1, src2) \
168 			__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
169 static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
170 					const nodemask_t *src2p, unsigned int nbits)
171 {
172 	bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
173 }
174 
175 #define nodes_andnot(dst, src1, src2) \
176 			__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
177 static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
178 					const nodemask_t *src2p, unsigned int nbits)
179 {
180 	bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
181 }
182 
183 #define nodes_complement(dst, src) \
184 			__nodes_complement(&(dst), &(src), MAX_NUMNODES)
185 static inline void __nodes_complement(nodemask_t *dstp,
186 					const nodemask_t *srcp, unsigned int nbits)
187 {
188 	bitmap_complement(dstp->bits, srcp->bits, nbits);
189 }
190 
191 #define nodes_equal(src1, src2) \
192 			__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
193 static inline int __nodes_equal(const nodemask_t *src1p,
194 					const nodemask_t *src2p, unsigned int nbits)
195 {
196 	return bitmap_equal(src1p->bits, src2p->bits, nbits);
197 }
198 
199 #define nodes_intersects(src1, src2) \
200 			__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
201 static inline int __nodes_intersects(const nodemask_t *src1p,
202 					const nodemask_t *src2p, unsigned int nbits)
203 {
204 	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
205 }
206 
207 #define nodes_subset(src1, src2) \
208 			__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
209 static inline int __nodes_subset(const nodemask_t *src1p,
210 					const nodemask_t *src2p, unsigned int nbits)
211 {
212 	return bitmap_subset(src1p->bits, src2p->bits, nbits);
213 }
214 
215 #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
216 static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
217 {
218 	return bitmap_empty(srcp->bits, nbits);
219 }
220 
221 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
222 static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
223 {
224 	return bitmap_full(srcp->bits, nbits);
225 }
226 
227 #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
228 static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
229 {
230 	return bitmap_weight(srcp->bits, nbits);
231 }
232 
233 #define nodes_shift_right(dst, src, n) \
234 			__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
235 static inline void __nodes_shift_right(nodemask_t *dstp,
236 					const nodemask_t *srcp, int n, int nbits)
237 {
238 	bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
239 }
240 
241 #define nodes_shift_left(dst, src, n) \
242 			__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
243 static inline void __nodes_shift_left(nodemask_t *dstp,
244 					const nodemask_t *srcp, int n, int nbits)
245 {
246 	bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
247 }
248 
249 /* FIXME: better would be to fix all architectures to never return
250           > MAX_NUMNODES, then the silly min_ts could be dropped. */
251 
252 #define first_node(src) __first_node(&(src))
253 static inline int __first_node(const nodemask_t *srcp)
254 {
255 	return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
256 }
257 
258 #define next_node(n, src) __next_node((n), &(src))
259 static inline int __next_node(int n, const nodemask_t *srcp)
260 {
261 	return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
262 }
263 
264 /*
265  * Find the next present node in src, starting after node n, wrapping around to
266  * the first node in src if needed.  Returns MAX_NUMNODES if src is empty.
267  */
268 #define next_node_in(n, src) __next_node_in((n), &(src))
269 int __next_node_in(int node, const nodemask_t *srcp);
270 
271 static inline void init_nodemask_of_node(nodemask_t *mask, int node)
272 {
273 	nodes_clear(*mask);
274 	node_set(node, *mask);
275 }
276 
277 #define nodemask_of_node(node)						\
278 ({									\
279 	typeof(_unused_nodemask_arg_) m;				\
280 	if (sizeof(m) == sizeof(unsigned long)) {			\
281 		m.bits[0] = 1UL << (node);				\
282 	} else {							\
283 		init_nodemask_of_node(&m, (node));			\
284 	}								\
285 	m;								\
286 })
287 
288 #define first_unset_node(mask) __first_unset_node(&(mask))
289 static inline int __first_unset_node(const nodemask_t *maskp)
290 {
291 	return min_t(int,MAX_NUMNODES,
292 			find_first_zero_bit(maskp->bits, MAX_NUMNODES));
293 }
294 
295 #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
296 
297 #if MAX_NUMNODES <= BITS_PER_LONG
298 
299 #define NODE_MASK_ALL							\
300 ((nodemask_t) { {							\
301 	[BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD		\
302 } })
303 
304 #else
305 
306 #define NODE_MASK_ALL							\
307 ((nodemask_t) { {							\
308 	[0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL,			\
309 	[BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD		\
310 } })
311 
312 #endif
313 
314 #define NODE_MASK_NONE							\
315 ((nodemask_t) { {							\
316 	[0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] =  0UL			\
317 } })
318 
319 #define nodes_addr(src) ((src).bits)
320 
321 #define nodemask_parse_user(ubuf, ulen, dst) \
322 		__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
323 static inline int __nodemask_parse_user(const char __user *buf, int len,
324 					nodemask_t *dstp, int nbits)
325 {
326 	return bitmap_parse_user(buf, len, dstp->bits, nbits);
327 }
328 
329 #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
330 static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
331 {
332 	return bitmap_parselist(buf, dstp->bits, nbits);
333 }
334 
335 #define node_remap(oldbit, old, new) \
336 		__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
337 static inline int __node_remap(int oldbit,
338 		const nodemask_t *oldp, const nodemask_t *newp, int nbits)
339 {
340 	return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
341 }
342 
343 #define nodes_remap(dst, src, old, new) \
344 		__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
345 static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
346 		const nodemask_t *oldp, const nodemask_t *newp, int nbits)
347 {
348 	bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
349 }
350 
351 #define nodes_onto(dst, orig, relmap) \
352 		__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
353 static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
354 		const nodemask_t *relmapp, int nbits)
355 {
356 	bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
357 }
358 
359 #define nodes_fold(dst, orig, sz) \
360 		__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
361 static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
362 		int sz, int nbits)
363 {
364 	bitmap_fold(dstp->bits, origp->bits, sz, nbits);
365 }
366 
367 #if MAX_NUMNODES > 1
368 #define for_each_node_mask(node, mask)			\
369 	for ((node) = first_node(mask);			\
370 		(node) < MAX_NUMNODES;			\
371 		(node) = next_node((node), (mask)))
372 #else /* MAX_NUMNODES == 1 */
373 #define for_each_node_mask(node, mask)			\
374 	if (!nodes_empty(mask))				\
375 		for ((node) = 0; (node) < 1; (node)++)
376 #endif /* MAX_NUMNODES */
377 
378 /*
379  * Bitmasks that are kept for all the nodes.
380  */
381 enum node_states {
382 	N_POSSIBLE,		/* The node could become online at some point */
383 	N_ONLINE,		/* The node is online */
384 	N_NORMAL_MEMORY,	/* The node has regular memory */
385 #ifdef CONFIG_HIGHMEM
386 	N_HIGH_MEMORY,		/* The node has regular or high memory */
387 #else
388 	N_HIGH_MEMORY = N_NORMAL_MEMORY,
389 #endif
390 #ifdef CONFIG_MOVABLE_NODE
391 	N_MEMORY,		/* The node has memory(regular, high, movable) */
392 #else
393 	N_MEMORY = N_HIGH_MEMORY,
394 #endif
395 	N_CPU,		/* The node has one or more cpus */
396 	NR_NODE_STATES
397 };
398 
399 /*
400  * The following particular system nodemasks and operations
401  * on them manage all possible and online nodes.
402  */
403 
404 extern nodemask_t node_states[NR_NODE_STATES];
405 
406 #if MAX_NUMNODES > 1
407 static inline int node_state(int node, enum node_states state)
408 {
409 	return node_isset(node, node_states[state]);
410 }
411 
412 static inline void node_set_state(int node, enum node_states state)
413 {
414 	__node_set(node, &node_states[state]);
415 }
416 
417 static inline void node_clear_state(int node, enum node_states state)
418 {
419 	__node_clear(node, &node_states[state]);
420 }
421 
422 static inline int num_node_state(enum node_states state)
423 {
424 	return nodes_weight(node_states[state]);
425 }
426 
427 #define for_each_node_state(__node, __state) \
428 	for_each_node_mask((__node), node_states[__state])
429 
430 #define first_online_node	first_node(node_states[N_ONLINE])
431 #define first_memory_node	first_node(node_states[N_MEMORY])
432 static inline int next_online_node(int nid)
433 {
434 	return next_node(nid, node_states[N_ONLINE]);
435 }
436 static inline int next_memory_node(int nid)
437 {
438 	return next_node(nid, node_states[N_MEMORY]);
439 }
440 
441 extern int nr_node_ids;
442 extern int nr_online_nodes;
443 
444 static inline void node_set_online(int nid)
445 {
446 	node_set_state(nid, N_ONLINE);
447 	nr_online_nodes = num_node_state(N_ONLINE);
448 }
449 
450 static inline void node_set_offline(int nid)
451 {
452 	node_clear_state(nid, N_ONLINE);
453 	nr_online_nodes = num_node_state(N_ONLINE);
454 }
455 
456 #else
457 
458 static inline int node_state(int node, enum node_states state)
459 {
460 	return node == 0;
461 }
462 
463 static inline void node_set_state(int node, enum node_states state)
464 {
465 }
466 
467 static inline void node_clear_state(int node, enum node_states state)
468 {
469 }
470 
471 static inline int num_node_state(enum node_states state)
472 {
473 	return 1;
474 }
475 
476 #define for_each_node_state(node, __state) \
477 	for ( (node) = 0; (node) == 0; (node) = 1)
478 
479 #define first_online_node	0
480 #define first_memory_node	0
481 #define next_online_node(nid)	(MAX_NUMNODES)
482 #define nr_node_ids		1
483 #define nr_online_nodes		1
484 
485 #define node_set_online(node)	   node_set_state((node), N_ONLINE)
486 #define node_set_offline(node)	   node_clear_state((node), N_ONLINE)
487 
488 #endif
489 
490 #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
491 extern int node_random(const nodemask_t *maskp);
492 #else
493 static inline int node_random(const nodemask_t *mask)
494 {
495 	return 0;
496 }
497 #endif
498 
499 #define node_online_map 	node_states[N_ONLINE]
500 #define node_possible_map 	node_states[N_POSSIBLE]
501 
502 #define num_online_nodes()	num_node_state(N_ONLINE)
503 #define num_possible_nodes()	num_node_state(N_POSSIBLE)
504 #define node_online(node)	node_state((node), N_ONLINE)
505 #define node_possible(node)	node_state((node), N_POSSIBLE)
506 
507 #define for_each_node(node)	   for_each_node_state(node, N_POSSIBLE)
508 #define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
509 
510 /*
511  * For nodemask scrach area.
512  * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
513  * name.
514  */
515 #if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
516 #define NODEMASK_ALLOC(type, name, gfp_flags)	\
517 			type *name = kmalloc(sizeof(*name), gfp_flags)
518 #define NODEMASK_FREE(m)			kfree(m)
519 #else
520 #define NODEMASK_ALLOC(type, name, gfp_flags)	type _##name, *name = &_##name
521 #define NODEMASK_FREE(m)			do {} while (0)
522 #endif
523 
524 /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
525 struct nodemask_scratch {
526 	nodemask_t	mask1;
527 	nodemask_t	mask2;
528 };
529 
530 #define NODEMASK_SCRATCH(x)						\
531 			NODEMASK_ALLOC(struct nodemask_scratch, x,	\
532 					GFP_KERNEL | __GFP_NORETRY)
533 #define NODEMASK_SCRATCH_FREE(x)	NODEMASK_FREE(x)
534 
535 
536 #endif /* __LINUX_NODEMASK_H */
537