xref: /openbmc/linux/lib/cpumask.c (revision e2028c8e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/kernel.h>
4 #include <linux/bitops.h>
5 #include <linux/cpumask.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/numa.h>
9 #include <linux/sched/isolation.h>
10 
11 /**
12  * cpumask_next - get the next cpu in a cpumask
13  * @n: the cpu prior to the place to search (ie. return will be > @n)
14  * @srcp: the cpumask pointer
15  *
16  * Returns >= nr_cpu_ids if no further cpus set.
17  */
18 unsigned int cpumask_next(int n, const struct cpumask *srcp)
19 {
20 	/* -1 is a legal arg here. */
21 	if (n != -1)
22 		cpumask_check(n);
23 	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
24 }
25 EXPORT_SYMBOL(cpumask_next);
26 
27 /**
28  * cpumask_next_and - get the next cpu in *src1p & *src2p
29  * @n: the cpu prior to the place to search (ie. return will be > @n)
30  * @src1p: the first cpumask pointer
31  * @src2p: the second cpumask pointer
32  *
33  * Returns >= nr_cpu_ids if no further cpus set in both.
34  */
35 int cpumask_next_and(int n, const struct cpumask *src1p,
36 		     const struct cpumask *src2p)
37 {
38 	/* -1 is a legal arg here. */
39 	if (n != -1)
40 		cpumask_check(n);
41 	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
42 		nr_cpumask_bits, n + 1);
43 }
44 EXPORT_SYMBOL(cpumask_next_and);
45 
46 /**
47  * cpumask_any_but - return a "random" in a cpumask, but not this one.
48  * @mask: the cpumask to search
49  * @cpu: the cpu to ignore.
50  *
51  * Often used to find any cpu but smp_processor_id() in a mask.
52  * Returns >= nr_cpu_ids if no cpus set.
53  */
54 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
55 {
56 	unsigned int i;
57 
58 	cpumask_check(cpu);
59 	for_each_cpu(i, mask)
60 		if (i != cpu)
61 			break;
62 	return i;
63 }
64 EXPORT_SYMBOL(cpumask_any_but);
65 
66 /**
67  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
68  * @n: the cpu prior to the place to search
69  * @mask: the cpumask pointer
70  * @start: the start point of the iteration
71  * @wrap: assume @n crossing @start terminates the iteration
72  *
73  * Returns >= nr_cpu_ids on completion
74  *
75  * Note: the @wrap argument is required for the start condition when
76  * we cannot assume @start is set in @mask.
77  */
78 int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
79 {
80 	int next;
81 
82 again:
83 	next = cpumask_next(n, mask);
84 
85 	if (wrap && n < start && next >= start) {
86 		return nr_cpumask_bits;
87 
88 	} else if (next >= nr_cpumask_bits) {
89 		wrap = true;
90 		n = -1;
91 		goto again;
92 	}
93 
94 	return next;
95 }
96 EXPORT_SYMBOL(cpumask_next_wrap);
97 
98 /* These are not inline because of header tangles. */
99 #ifdef CONFIG_CPUMASK_OFFSTACK
100 /**
101  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
102  * @mask: pointer to cpumask_var_t where the cpumask is returned
103  * @flags: GFP_ flags
104  *
105  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
106  * a nop returning a constant 1 (in <linux/cpumask.h>)
107  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
108  *
109  * In addition, mask will be NULL if this fails.  Note that gcc is
110  * usually smart enough to know that mask can never be NULL if
111  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
112  * too.
113  */
114 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
115 {
116 	*mask = kmalloc_node(cpumask_size(), flags, node);
117 
118 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
119 	if (!*mask) {
120 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
121 		dump_stack();
122 	}
123 #endif
124 
125 	return *mask != NULL;
126 }
127 EXPORT_SYMBOL(alloc_cpumask_var_node);
128 
129 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
130 {
131 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
132 }
133 EXPORT_SYMBOL(zalloc_cpumask_var_node);
134 
135 /**
136  * alloc_cpumask_var - allocate a struct cpumask
137  * @mask: pointer to cpumask_var_t where the cpumask is returned
138  * @flags: GFP_ flags
139  *
140  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
141  * a nop returning a constant 1 (in <linux/cpumask.h>).
142  *
143  * See alloc_cpumask_var_node.
144  */
145 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
146 {
147 	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
148 }
149 EXPORT_SYMBOL(alloc_cpumask_var);
150 
151 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
152 {
153 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
154 }
155 EXPORT_SYMBOL(zalloc_cpumask_var);
156 
157 /**
158  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
159  * @mask: pointer to cpumask_var_t where the cpumask is returned
160  *
161  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
162  * a nop (in <linux/cpumask.h>).
163  * Either returns an allocated (zero-filled) cpumask, or causes the
164  * system to panic.
165  */
166 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
167 {
168 	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
169 	if (!*mask)
170 		panic("%s: Failed to allocate %u bytes\n", __func__,
171 		      cpumask_size());
172 }
173 
174 /**
175  * free_cpumask_var - frees memory allocated for a struct cpumask.
176  * @mask: cpumask to free
177  *
178  * This is safe on a NULL mask.
179  */
180 void free_cpumask_var(cpumask_var_t mask)
181 {
182 	kfree(mask);
183 }
184 EXPORT_SYMBOL(free_cpumask_var);
185 
186 /**
187  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
188  * @mask: cpumask to free
189  */
190 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
191 {
192 	memblock_free_early(__pa(mask), cpumask_size());
193 }
194 #endif
195 
196 /**
197  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
198  * @i: index number
199  * @node: local numa_node
200  *
201  * This function selects an online CPU according to a numa aware policy;
202  * local cpus are returned first, followed by non-local ones, then it
203  * wraps around.
204  *
205  * It's not very efficient, but useful for setup.
206  */
207 unsigned int cpumask_local_spread(unsigned int i, int node)
208 {
209 	int cpu, hk_flags;
210 	const struct cpumask *mask;
211 
212 	hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
213 	mask = housekeeping_cpumask(hk_flags);
214 	/* Wrap: we always want a cpu. */
215 	i %= cpumask_weight(mask);
216 
217 	if (node == NUMA_NO_NODE) {
218 		for_each_cpu(cpu, mask) {
219 			if (i-- == 0)
220 				return cpu;
221 		}
222 	} else {
223 		/* NUMA first. */
224 		for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
225 			if (i-- == 0)
226 				return cpu;
227 		}
228 
229 		for_each_cpu(cpu, mask) {
230 			/* Skip NUMA nodes, done above. */
231 			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
232 				continue;
233 
234 			if (i-- == 0)
235 				return cpu;
236 		}
237 	}
238 	BUG();
239 }
240 EXPORT_SYMBOL(cpumask_local_spread);
241 
242 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
243 
244 /**
245  * Returns an arbitrary cpu within srcp1 & srcp2.
246  *
247  * Iterated calls using the same srcp1 and srcp2 will be distributed within
248  * their intersection.
249  *
250  * Returns >= nr_cpu_ids if the intersection is empty.
251  */
252 int cpumask_any_and_distribute(const struct cpumask *src1p,
253 			       const struct cpumask *src2p)
254 {
255 	int next, prev;
256 
257 	/* NOTE: our first selection will skip 0. */
258 	prev = __this_cpu_read(distribute_cpu_mask_prev);
259 
260 	next = cpumask_next_and(prev, src1p, src2p);
261 	if (next >= nr_cpu_ids)
262 		next = cpumask_first_and(src1p, src2p);
263 
264 	if (next < nr_cpu_ids)
265 		__this_cpu_write(distribute_cpu_mask_prev, next);
266 
267 	return next;
268 }
269 EXPORT_SYMBOL(cpumask_any_and_distribute);
270