1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/slab.h> 3 #include <linux/kernel.h> 4 #include <linux/bitops.h> 5 #include <linux/cpumask.h> 6 #include <linux/export.h> 7 #include <linux/memblock.h> 8 #include <linux/numa.h> 9 10 /** 11 * cpumask_next_wrap - helper to implement for_each_cpu_wrap 12 * @n: the cpu prior to the place to search 13 * @mask: the cpumask pointer 14 * @start: the start point of the iteration 15 * @wrap: assume @n crossing @start terminates the iteration 16 * 17 * Returns >= nr_cpu_ids on completion 18 * 19 * Note: the @wrap argument is required for the start condition when 20 * we cannot assume @start is set in @mask. 21 */ 22 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) 23 { 24 unsigned int next; 25 26 again: 27 next = cpumask_next(n, mask); 28 29 if (wrap && n < start && next >= start) { 30 return nr_cpumask_bits; 31 32 } else if (next >= nr_cpumask_bits) { 33 wrap = true; 34 n = -1; 35 goto again; 36 } 37 38 return next; 39 } 40 EXPORT_SYMBOL(cpumask_next_wrap); 41 42 /* These are not inline because of header tangles. */ 43 #ifdef CONFIG_CPUMASK_OFFSTACK 44 /** 45 * alloc_cpumask_var_node - allocate a struct cpumask on a given node 46 * @mask: pointer to cpumask_var_t where the cpumask is returned 47 * @flags: GFP_ flags 48 * @node: memory node from which to allocate or %NUMA_NO_NODE 49 * 50 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 51 * a nop returning a constant 1 (in <linux/cpumask.h>) 52 * Returns TRUE if memory allocation succeeded, FALSE otherwise. 53 * 54 * In addition, mask will be NULL if this fails. Note that gcc is 55 * usually smart enough to know that mask can never be NULL if 56 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case 57 * too. 58 */ 59 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 60 { 61 *mask = kmalloc_node(cpumask_size(), flags, node); 62 63 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 64 if (!*mask) { 65 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); 66 dump_stack(); 67 } 68 #endif 69 70 return *mask != NULL; 71 } 72 EXPORT_SYMBOL(alloc_cpumask_var_node); 73 74 /** 75 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. 76 * @mask: pointer to cpumask_var_t where the cpumask is returned 77 * 78 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is 79 * a nop (in <linux/cpumask.h>). 80 * Either returns an allocated (zero-filled) cpumask, or causes the 81 * system to panic. 82 */ 83 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 84 { 85 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); 86 if (!*mask) 87 panic("%s: Failed to allocate %u bytes\n", __func__, 88 cpumask_size()); 89 } 90 91 /** 92 * free_cpumask_var - frees memory allocated for a struct cpumask. 93 * @mask: cpumask to free 94 * 95 * This is safe on a NULL mask. 96 */ 97 void free_cpumask_var(cpumask_var_t mask) 98 { 99 kfree(mask); 100 } 101 EXPORT_SYMBOL(free_cpumask_var); 102 103 /** 104 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var 105 * @mask: cpumask to free 106 */ 107 void __init free_bootmem_cpumask_var(cpumask_var_t mask) 108 { 109 memblock_free(mask, cpumask_size()); 110 } 111 #endif 112 113 /** 114 * cpumask_local_spread - select the i'th cpu based on NUMA distances 115 * @i: index number 116 * @node: local numa_node 117 * 118 * Returns online CPU according to a numa aware policy; local cpus are returned 119 * first, followed by non-local ones, then it wraps around. 120 * 121 * For those who wants to enumerate all CPUs based on their NUMA distances, 122 * i.e. call this function in a loop, like: 123 * 124 * for (i = 0; i < num_online_cpus(); i++) { 125 * cpu = cpumask_local_spread(i, node); 126 * do_something(cpu); 127 * } 128 * 129 * There's a better alternative based on for_each()-like iterators: 130 * 131 * for_each_numa_hop_mask(mask, node) { 132 * for_each_cpu_andnot(cpu, mask, prev) 133 * do_something(cpu); 134 * prev = mask; 135 * } 136 * 137 * It's simpler and more verbose than above. Complexity of iterator-based 138 * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while 139 * cpumask_local_spread() when called for each cpu is 140 * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)). 141 */ 142 unsigned int cpumask_local_spread(unsigned int i, int node) 143 { 144 unsigned int cpu; 145 146 /* Wrap: we always want a cpu. */ 147 i %= num_online_cpus(); 148 149 cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node); 150 151 WARN_ON(cpu >= nr_cpu_ids); 152 return cpu; 153 } 154 EXPORT_SYMBOL(cpumask_local_spread); 155 156 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); 157 158 /** 159 * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p. 160 * @src1p: first &cpumask for intersection 161 * @src2p: second &cpumask for intersection 162 * 163 * Iterated calls using the same srcp1 and srcp2 will be distributed within 164 * their intersection. 165 * 166 * Returns >= nr_cpu_ids if the intersection is empty. 167 */ 168 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, 169 const struct cpumask *src2p) 170 { 171 unsigned int next, prev; 172 173 /* NOTE: our first selection will skip 0. */ 174 prev = __this_cpu_read(distribute_cpu_mask_prev); 175 176 next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p), 177 nr_cpumask_bits, prev + 1); 178 if (next < nr_cpu_ids) 179 __this_cpu_write(distribute_cpu_mask_prev, next); 180 181 return next; 182 } 183 EXPORT_SYMBOL(cpumask_any_and_distribute); 184 185 unsigned int cpumask_any_distribute(const struct cpumask *srcp) 186 { 187 unsigned int next, prev; 188 189 /* NOTE: our first selection will skip 0. */ 190 prev = __this_cpu_read(distribute_cpu_mask_prev); 191 next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1); 192 if (next < nr_cpu_ids) 193 __this_cpu_write(distribute_cpu_mask_prev, next); 194 195 return next; 196 } 197 EXPORT_SYMBOL(cpumask_any_distribute); 198