xref: /openbmc/linux/lib/cpumask.c (revision 406d394abfcd8f16dc1dbcc8fc1b828252befb6d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
25a0e3ad6STejun Heo #include <linux/slab.h>
3ccb46000SAndrew Morton #include <linux/kernel.h>
4ccb46000SAndrew Morton #include <linux/bitops.h>
5ccb46000SAndrew Morton #include <linux/cpumask.h>
68bc3bcc9SPaul Gortmaker #include <linux/export.h>
757c8a661SMike Rapoport #include <linux/memblock.h>
898fa15f3SAnshuman Khandual #include <linux/numa.h>
9ccb46000SAndrew Morton 
102d3854a3SRusty Russell /**
11c743f0a5SPeter Zijlstra  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12c743f0a5SPeter Zijlstra  * @n: the cpu prior to the place to search
13c743f0a5SPeter Zijlstra  * @mask: the cpumask pointer
14c743f0a5SPeter Zijlstra  * @start: the start point of the iteration
15c743f0a5SPeter Zijlstra  * @wrap: assume @n crossing @start terminates the iteration
16c743f0a5SPeter Zijlstra  *
17c743f0a5SPeter Zijlstra  * Returns >= nr_cpu_ids on completion
18c743f0a5SPeter Zijlstra  *
19c743f0a5SPeter Zijlstra  * Note: the @wrap argument is required for the start condition when
20c743f0a5SPeter Zijlstra  * we cannot assume @start is set in @mask.
21c743f0a5SPeter Zijlstra  */
228b6b795dSYury Norov unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
23c743f0a5SPeter Zijlstra {
248b6b795dSYury Norov 	unsigned int next;
25c743f0a5SPeter Zijlstra 
26c743f0a5SPeter Zijlstra again:
27c743f0a5SPeter Zijlstra 	next = cpumask_next(n, mask);
28c743f0a5SPeter Zijlstra 
29c743f0a5SPeter Zijlstra 	if (wrap && n < start && next >= start) {
30c743f0a5SPeter Zijlstra 		return nr_cpumask_bits;
31c743f0a5SPeter Zijlstra 
32c743f0a5SPeter Zijlstra 	} else if (next >= nr_cpumask_bits) {
33c743f0a5SPeter Zijlstra 		wrap = true;
34c743f0a5SPeter Zijlstra 		n = -1;
35c743f0a5SPeter Zijlstra 		goto again;
36c743f0a5SPeter Zijlstra 	}
37c743f0a5SPeter Zijlstra 
38c743f0a5SPeter Zijlstra 	return next;
39c743f0a5SPeter Zijlstra }
40c743f0a5SPeter Zijlstra EXPORT_SYMBOL(cpumask_next_wrap);
41c743f0a5SPeter Zijlstra 
422d3854a3SRusty Russell /* These are not inline because of header tangles. */
432d3854a3SRusty Russell #ifdef CONFIG_CPUMASK_OFFSTACK
44ec26b805SMike Travis /**
45ec26b805SMike Travis  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46ec26b805SMike Travis  * @mask: pointer to cpumask_var_t where the cpumask is returned
47ec26b805SMike Travis  * @flags: GFP_ flags
48ec26b805SMike Travis  *
49ec26b805SMike Travis  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
50ec26b805SMike Travis  * a nop returning a constant 1 (in <linux/cpumask.h>)
51ec26b805SMike Travis  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
52ec26b805SMike Travis  *
53ec26b805SMike Travis  * In addition, mask will be NULL if this fails.  Note that gcc is
54ec26b805SMike Travis  * usually smart enough to know that mask can never be NULL if
55ec26b805SMike Travis  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
56ec26b805SMike Travis  * too.
57ec26b805SMike Travis  */
587b4967c5SMike Travis bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
592d3854a3SRusty Russell {
607b4967c5SMike Travis 	*mask = kmalloc_node(cpumask_size(), flags, node);
6138c7fed2SYinghai Lu 
622d3854a3SRusty Russell #ifdef CONFIG_DEBUG_PER_CPU_MAPS
632d3854a3SRusty Russell 	if (!*mask) {
642d3854a3SRusty Russell 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
652d3854a3SRusty Russell 		dump_stack();
662d3854a3SRusty Russell 	}
672d3854a3SRusty Russell #endif
682a530080SRusty Russell 
692d3854a3SRusty Russell 	return *mask != NULL;
702d3854a3SRusty Russell }
717b4967c5SMike Travis EXPORT_SYMBOL(alloc_cpumask_var_node);
727b4967c5SMike Travis 
73ec26b805SMike Travis /**
74ec26b805SMike Travis  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
75ec26b805SMike Travis  * @mask: pointer to cpumask_var_t where the cpumask is returned
76ec26b805SMike Travis  *
77ec26b805SMike Travis  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
78e9690a6eSLi Zefan  * a nop (in <linux/cpumask.h>).
79ec26b805SMike Travis  * Either returns an allocated (zero-filled) cpumask, or causes the
80ec26b805SMike Travis  * system to panic.
81ec26b805SMike Travis  */
822d3854a3SRusty Russell void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
832d3854a3SRusty Russell {
847e1c4e27SMike Rapoport 	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
858a7f97b9SMike Rapoport 	if (!*mask)
868a7f97b9SMike Rapoport 		panic("%s: Failed to allocate %u bytes\n", __func__,
878a7f97b9SMike Rapoport 		      cpumask_size());
882d3854a3SRusty Russell }
892d3854a3SRusty Russell 
90ec26b805SMike Travis /**
91ec26b805SMike Travis  * free_cpumask_var - frees memory allocated for a struct cpumask.
92ec26b805SMike Travis  * @mask: cpumask to free
93ec26b805SMike Travis  *
94ec26b805SMike Travis  * This is safe on a NULL mask.
95ec26b805SMike Travis  */
962d3854a3SRusty Russell void free_cpumask_var(cpumask_var_t mask)
972d3854a3SRusty Russell {
982d3854a3SRusty Russell 	kfree(mask);
992d3854a3SRusty Russell }
1002d3854a3SRusty Russell EXPORT_SYMBOL(free_cpumask_var);
101cd83e42cSRusty Russell 
102ec26b805SMike Travis /**
103ec26b805SMike Travis  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
104ec26b805SMike Travis  * @mask: cpumask to free
105ec26b805SMike Travis  */
106984f2f37SRusty Russell void __init free_bootmem_cpumask_var(cpumask_var_t mask)
107cd83e42cSRusty Russell {
1084421cca0SMike Rapoport 	memblock_free(mask, cpumask_size());
109cd83e42cSRusty Russell }
1102d3854a3SRusty Russell #endif
111da91309eSAmir Vadai 
112da91309eSAmir Vadai /**
113*406d394aSYury Norov  * cpumask_local_spread - select the i'th cpu based on NUMA distances
114da91309eSAmir Vadai  * @i: index number
115f36963c9SRusty Russell  * @node: local numa_node
116da91309eSAmir Vadai  *
117f36963c9SRusty Russell  * This function selects an online CPU according to a numa aware policy;
118f36963c9SRusty Russell  * local cpus are returned first, followed by non-local ones, then it
119f36963c9SRusty Russell  * wraps around.
120da91309eSAmir Vadai  *
121f36963c9SRusty Russell  * It's not very efficient, but useful for setup.
122da91309eSAmir Vadai  */
123f36963c9SRusty Russell unsigned int cpumask_local_spread(unsigned int i, int node)
124da91309eSAmir Vadai {
1258b6b795dSYury Norov 	unsigned int cpu;
126da91309eSAmir Vadai 
127f36963c9SRusty Russell 	/* Wrap: we always want a cpu. */
1282452483dSThomas Gleixner 	i %= num_online_cpus();
129da91309eSAmir Vadai 
13098fa15f3SAnshuman Khandual 	if (node == NUMA_NO_NODE) {
131944c417dSYury Norov 		cpu = cpumask_nth(i, cpu_online_mask);
132944c417dSYury Norov 		if (cpu < nr_cpu_ids)
133f36963c9SRusty Russell 			return cpu;
134da91309eSAmir Vadai 	} else {
135*406d394aSYury Norov 		cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
136944c417dSYury Norov 		if (cpu < nr_cpu_ids)
137f36963c9SRusty Russell 			return cpu;
138da91309eSAmir Vadai 	}
139f36963c9SRusty Russell 	BUG();
140da91309eSAmir Vadai }
141f36963c9SRusty Russell EXPORT_SYMBOL(cpumask_local_spread);
14246a87b38SPaul Turner 
14346a87b38SPaul Turner static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
14446a87b38SPaul Turner 
14546a87b38SPaul Turner /**
14646a87b38SPaul Turner  * Returns an arbitrary cpu within srcp1 & srcp2.
14746a87b38SPaul Turner  *
14846a87b38SPaul Turner  * Iterated calls using the same srcp1 and srcp2 will be distributed within
14946a87b38SPaul Turner  * their intersection.
15046a87b38SPaul Turner  *
15146a87b38SPaul Turner  * Returns >= nr_cpu_ids if the intersection is empty.
15246a87b38SPaul Turner  */
1538b6b795dSYury Norov unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
15446a87b38SPaul Turner 			       const struct cpumask *src2p)
15546a87b38SPaul Turner {
1568b6b795dSYury Norov 	unsigned int next, prev;
15746a87b38SPaul Turner 
15846a87b38SPaul Turner 	/* NOTE: our first selection will skip 0. */
15946a87b38SPaul Turner 	prev = __this_cpu_read(distribute_cpu_mask_prev);
16046a87b38SPaul Turner 
1616cc18331SYury Norov 	next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
1626cc18331SYury Norov 					nr_cpumask_bits, prev + 1);
16346a87b38SPaul Turner 	if (next < nr_cpu_ids)
16446a87b38SPaul Turner 		__this_cpu_write(distribute_cpu_mask_prev, next);
16546a87b38SPaul Turner 
16646a87b38SPaul Turner 	return next;
16746a87b38SPaul Turner }
16846a87b38SPaul Turner EXPORT_SYMBOL(cpumask_any_and_distribute);
16914e292f8SPeter Zijlstra 
1708b6b795dSYury Norov unsigned int cpumask_any_distribute(const struct cpumask *srcp)
17114e292f8SPeter Zijlstra {
1728b6b795dSYury Norov 	unsigned int next, prev;
17314e292f8SPeter Zijlstra 
17414e292f8SPeter Zijlstra 	/* NOTE: our first selection will skip 0. */
17514e292f8SPeter Zijlstra 	prev = __this_cpu_read(distribute_cpu_mask_prev);
1766cc18331SYury Norov 	next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
17714e292f8SPeter Zijlstra 	if (next < nr_cpu_ids)
17814e292f8SPeter Zijlstra 		__this_cpu_write(distribute_cpu_mask_prev, next);
17914e292f8SPeter Zijlstra 
18014e292f8SPeter Zijlstra 	return next;
18114e292f8SPeter Zijlstra }
18214e292f8SPeter Zijlstra EXPORT_SYMBOL(cpumask_any_distribute);
183