xref: /openbmc/linux/lib/cpumask.c (revision 3712bba1a260ad851f3aa8ddea9cb7326f6aa0b3)
15a0e3ad6STejun Heo #include <linux/slab.h>
2ccb46000SAndrew Morton #include <linux/kernel.h>
3ccb46000SAndrew Morton #include <linux/bitops.h>
4ccb46000SAndrew Morton #include <linux/cpumask.h>
58bc3bcc9SPaul Gortmaker #include <linux/export.h>
62d3854a3SRusty Russell #include <linux/bootmem.h>
7ccb46000SAndrew Morton 
82d3854a3SRusty Russell /**
92d3854a3SRusty Russell  * cpumask_next_and - get the next cpu in *src1p & *src2p
102d3854a3SRusty Russell  * @n: the cpu prior to the place to search (ie. return will be > @n)
112d3854a3SRusty Russell  * @src1p: the first cpumask pointer
122d3854a3SRusty Russell  * @src2p: the second cpumask pointer
132d3854a3SRusty Russell  *
142d3854a3SRusty Russell  * Returns >= nr_cpu_ids if no further cpus set in both.
152d3854a3SRusty Russell  */
162d3854a3SRusty Russell int cpumask_next_and(int n, const struct cpumask *src1p,
172d3854a3SRusty Russell 		     const struct cpumask *src2p)
182d3854a3SRusty Russell {
195ca62d65SAndrew Morton 	while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
205ca62d65SAndrew Morton 		if (cpumask_test_cpu(n, src2p))
215ca62d65SAndrew Morton 			break;
225ca62d65SAndrew Morton 	return n;
232d3854a3SRusty Russell }
242d3854a3SRusty Russell EXPORT_SYMBOL(cpumask_next_and);
252d3854a3SRusty Russell 
262d3854a3SRusty Russell /**
272d3854a3SRusty Russell  * cpumask_any_but - return a "random" in a cpumask, but not this one.
282d3854a3SRusty Russell  * @mask: the cpumask to search
292d3854a3SRusty Russell  * @cpu: the cpu to ignore.
302d3854a3SRusty Russell  *
312d3854a3SRusty Russell  * Often used to find any cpu but smp_processor_id() in a mask.
322d3854a3SRusty Russell  * Returns >= nr_cpu_ids if no cpus set.
332d3854a3SRusty Russell  */
342d3854a3SRusty Russell int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
352d3854a3SRusty Russell {
362d3854a3SRusty Russell 	unsigned int i;
372d3854a3SRusty Russell 
38984f2f37SRusty Russell 	cpumask_check(cpu);
392d3854a3SRusty Russell 	for_each_cpu(i, mask)
402d3854a3SRusty Russell 		if (i != cpu)
412d3854a3SRusty Russell 			break;
422d3854a3SRusty Russell 	return i;
432d3854a3SRusty Russell }
44*3712bba1SThomas Gleixner EXPORT_SYMBOL(cpumask_any_but);
452d3854a3SRusty Russell 
462d3854a3SRusty Russell /* These are not inline because of header tangles. */
472d3854a3SRusty Russell #ifdef CONFIG_CPUMASK_OFFSTACK
48ec26b805SMike Travis /**
49ec26b805SMike Travis  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
50ec26b805SMike Travis  * @mask: pointer to cpumask_var_t where the cpumask is returned
51ec26b805SMike Travis  * @flags: GFP_ flags
52ec26b805SMike Travis  *
53ec26b805SMike Travis  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
54ec26b805SMike Travis  * a nop returning a constant 1 (in <linux/cpumask.h>)
55ec26b805SMike Travis  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
56ec26b805SMike Travis  *
57ec26b805SMike Travis  * In addition, mask will be NULL if this fails.  Note that gcc is
58ec26b805SMike Travis  * usually smart enough to know that mask can never be NULL if
59ec26b805SMike Travis  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
60ec26b805SMike Travis  * too.
61ec26b805SMike Travis  */
627b4967c5SMike Travis bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
632d3854a3SRusty Russell {
647b4967c5SMike Travis 	*mask = kmalloc_node(cpumask_size(), flags, node);
6538c7fed2SYinghai Lu 
662d3854a3SRusty Russell #ifdef CONFIG_DEBUG_PER_CPU_MAPS
672d3854a3SRusty Russell 	if (!*mask) {
682d3854a3SRusty Russell 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
692d3854a3SRusty Russell 		dump_stack();
702d3854a3SRusty Russell 	}
712d3854a3SRusty Russell #endif
722a530080SRusty Russell 
732d3854a3SRusty Russell 	return *mask != NULL;
742d3854a3SRusty Russell }
757b4967c5SMike Travis EXPORT_SYMBOL(alloc_cpumask_var_node);
767b4967c5SMike Travis 
770281b5dcSYinghai Lu bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
780281b5dcSYinghai Lu {
790281b5dcSYinghai Lu 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
800281b5dcSYinghai Lu }
810281b5dcSYinghai Lu EXPORT_SYMBOL(zalloc_cpumask_var_node);
820281b5dcSYinghai Lu 
83ec26b805SMike Travis /**
84ec26b805SMike Travis  * alloc_cpumask_var - allocate a struct cpumask
85ec26b805SMike Travis  * @mask: pointer to cpumask_var_t where the cpumask is returned
86ec26b805SMike Travis  * @flags: GFP_ flags
87ec26b805SMike Travis  *
88ec26b805SMike Travis  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
89ec26b805SMike Travis  * a nop returning a constant 1 (in <linux/cpumask.h>).
90ec26b805SMike Travis  *
91ec26b805SMike Travis  * See alloc_cpumask_var_node.
92ec26b805SMike Travis  */
937b4967c5SMike Travis bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
947b4967c5SMike Travis {
9537e7b5f1SKOSAKI Motohiro 	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
967b4967c5SMike Travis }
972d3854a3SRusty Russell EXPORT_SYMBOL(alloc_cpumask_var);
982d3854a3SRusty Russell 
990281b5dcSYinghai Lu bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1000281b5dcSYinghai Lu {
1010281b5dcSYinghai Lu 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
1020281b5dcSYinghai Lu }
1030281b5dcSYinghai Lu EXPORT_SYMBOL(zalloc_cpumask_var);
1040281b5dcSYinghai Lu 
105ec26b805SMike Travis /**
106ec26b805SMike Travis  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
107ec26b805SMike Travis  * @mask: pointer to cpumask_var_t where the cpumask is returned
108ec26b805SMike Travis  *
109ec26b805SMike Travis  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
110e9690a6eSLi Zefan  * a nop (in <linux/cpumask.h>).
111ec26b805SMike Travis  * Either returns an allocated (zero-filled) cpumask, or causes the
112ec26b805SMike Travis  * system to panic.
113ec26b805SMike Travis  */
1142d3854a3SRusty Russell void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1152d3854a3SRusty Russell {
116c1529500SSantosh Shilimkar 	*mask = memblock_virt_alloc(cpumask_size(), 0);
1172d3854a3SRusty Russell }
1182d3854a3SRusty Russell 
119ec26b805SMike Travis /**
120ec26b805SMike Travis  * free_cpumask_var - frees memory allocated for a struct cpumask.
121ec26b805SMike Travis  * @mask: cpumask to free
122ec26b805SMike Travis  *
123ec26b805SMike Travis  * This is safe on a NULL mask.
124ec26b805SMike Travis  */
1252d3854a3SRusty Russell void free_cpumask_var(cpumask_var_t mask)
1262d3854a3SRusty Russell {
1272d3854a3SRusty Russell 	kfree(mask);
1282d3854a3SRusty Russell }
1292d3854a3SRusty Russell EXPORT_SYMBOL(free_cpumask_var);
130cd83e42cSRusty Russell 
131ec26b805SMike Travis /**
132ec26b805SMike Travis  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
133ec26b805SMike Travis  * @mask: cpumask to free
134ec26b805SMike Travis  */
135984f2f37SRusty Russell void __init free_bootmem_cpumask_var(cpumask_var_t mask)
136cd83e42cSRusty Russell {
137c1529500SSantosh Shilimkar 	memblock_free_early(__pa(mask), cpumask_size());
138cd83e42cSRusty Russell }
1392d3854a3SRusty Russell #endif
140da91309eSAmir Vadai 
141da91309eSAmir Vadai /**
142f36963c9SRusty Russell  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
143da91309eSAmir Vadai  * @i: index number
144f36963c9SRusty Russell  * @node: local numa_node
145da91309eSAmir Vadai  *
146f36963c9SRusty Russell  * This function selects an online CPU according to a numa aware policy;
147f36963c9SRusty Russell  * local cpus are returned first, followed by non-local ones, then it
148f36963c9SRusty Russell  * wraps around.
149da91309eSAmir Vadai  *
150f36963c9SRusty Russell  * It's not very efficient, but useful for setup.
151da91309eSAmir Vadai  */
152f36963c9SRusty Russell unsigned int cpumask_local_spread(unsigned int i, int node)
153da91309eSAmir Vadai {
154da91309eSAmir Vadai 	int cpu;
155da91309eSAmir Vadai 
156f36963c9SRusty Russell 	/* Wrap: we always want a cpu. */
157da91309eSAmir Vadai 	i %= num_online_cpus();
158da91309eSAmir Vadai 
159f36963c9SRusty Russell 	if (node == -1) {
160f36963c9SRusty Russell 		for_each_cpu(cpu, cpu_online_mask)
161f36963c9SRusty Russell 			if (i-- == 0)
162f36963c9SRusty Russell 				return cpu;
163da91309eSAmir Vadai 	} else {
164f36963c9SRusty Russell 		/* NUMA first. */
165f36963c9SRusty Russell 		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
166f36963c9SRusty Russell 			if (i-- == 0)
167f36963c9SRusty Russell 				return cpu;
168da91309eSAmir Vadai 
169f36963c9SRusty Russell 		for_each_cpu(cpu, cpu_online_mask) {
170f36963c9SRusty Russell 			/* Skip NUMA nodes, done above. */
171f36963c9SRusty Russell 			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
172f36963c9SRusty Russell 				continue;
173da91309eSAmir Vadai 
174f36963c9SRusty Russell 			if (i-- == 0)
175f36963c9SRusty Russell 				return cpu;
176da91309eSAmir Vadai 		}
177da91309eSAmir Vadai 	}
178f36963c9SRusty Russell 	BUG();
179da91309eSAmir Vadai }
180f36963c9SRusty Russell EXPORT_SYMBOL(cpumask_local_spread);
181