1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016 Thomas Gleixner. 4 * Copyright (C) 2016-2017 Christoph Hellwig. 5 */ 6 #include <linux/interrupt.h> 7 #include <linux/kernel.h> 8 #include <linux/slab.h> 9 #include <linux/cpu.h> 10 11 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, 12 int cpus_per_vec) 13 { 14 const struct cpumask *siblmsk; 15 int cpu, sibl; 16 17 for ( ; cpus_per_vec > 0; ) { 18 cpu = cpumask_first(nmsk); 19 20 /* Should not happen, but I'm too lazy to think about it */ 21 if (cpu >= nr_cpu_ids) 22 return; 23 24 cpumask_clear_cpu(cpu, nmsk); 25 cpumask_set_cpu(cpu, irqmsk); 26 cpus_per_vec--; 27 28 /* If the cpu has siblings, use them first */ 29 siblmsk = topology_sibling_cpumask(cpu); 30 for (sibl = -1; cpus_per_vec > 0; ) { 31 sibl = cpumask_next(sibl, siblmsk); 32 if (sibl >= nr_cpu_ids) 33 break; 34 if (!cpumask_test_and_clear_cpu(sibl, nmsk)) 35 continue; 36 cpumask_set_cpu(sibl, irqmsk); 37 cpus_per_vec--; 38 } 39 } 40 } 41 42 static cpumask_var_t *alloc_node_to_cpumask(void) 43 { 44 cpumask_var_t *masks; 45 int node; 46 47 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); 48 if (!masks) 49 return NULL; 50 51 for (node = 0; node < nr_node_ids; node++) { 52 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) 53 goto out_unwind; 54 } 55 56 return masks; 57 58 out_unwind: 59 while (--node >= 0) 60 free_cpumask_var(masks[node]); 61 kfree(masks); 62 return NULL; 63 } 64 65 static void free_node_to_cpumask(cpumask_var_t *masks) 66 { 67 int node; 68 69 for (node = 0; node < nr_node_ids; node++) 70 free_cpumask_var(masks[node]); 71 kfree(masks); 72 } 73 74 static void build_node_to_cpumask(cpumask_var_t *masks) 75 { 76 int cpu; 77 78 for_each_possible_cpu(cpu) 79 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); 80 } 81 82 static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, 83 const struct cpumask *mask, nodemask_t *nodemsk) 84 { 85 int n, nodes = 0; 86 87 /* Calculate the number of nodes in the supplied affinity mask */ 88 for_each_node(n) { 89 if (cpumask_intersects(mask, node_to_cpumask[n])) { 90 node_set(n, *nodemsk); 91 nodes++; 92 } 93 } 94 return nodes; 95 } 96 97 static int irq_build_affinity_masks(const struct irq_affinity *affd, 98 int startvec, int numvecs, 99 cpumask_var_t *node_to_cpumask, 100 const struct cpumask *cpu_mask, 101 struct cpumask *nmsk, 102 struct cpumask *masks) 103 { 104 int n, nodes, cpus_per_vec, extra_vecs, done = 0; 105 int last_affv = affd->pre_vectors + numvecs; 106 int curvec = startvec; 107 nodemask_t nodemsk = NODE_MASK_NONE; 108 109 if (!cpumask_weight(cpu_mask)) 110 return 0; 111 112 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); 113 114 /* 115 * If the number of nodes in the mask is greater than or equal the 116 * number of vectors we just spread the vectors across the nodes. 117 */ 118 if (numvecs <= nodes) { 119 for_each_node_mask(n, nodemsk) { 120 cpumask_copy(masks + curvec, node_to_cpumask[n]); 121 if (++done == numvecs) 122 break; 123 if (++curvec == last_affv) 124 curvec = affd->pre_vectors; 125 } 126 goto out; 127 } 128 129 for_each_node_mask(n, nodemsk) { 130 int ncpus, v, vecs_to_assign, vecs_per_node; 131 132 /* Spread the vectors per node */ 133 vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes; 134 135 /* Get the cpus on this node which are in the mask */ 136 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); 137 138 /* Calculate the number of cpus per vector */ 139 ncpus = cpumask_weight(nmsk); 140 vecs_to_assign = min(vecs_per_node, ncpus); 141 142 /* Account for rounding errors */ 143 extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); 144 145 for (v = 0; curvec < last_affv && v < vecs_to_assign; 146 curvec++, v++) { 147 cpus_per_vec = ncpus / vecs_to_assign; 148 149 /* Account for extra vectors to compensate rounding errors */ 150 if (extra_vecs) { 151 cpus_per_vec++; 152 --extra_vecs; 153 } 154 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 155 } 156 157 done += v; 158 if (done >= numvecs) 159 break; 160 if (curvec >= last_affv) 161 curvec = affd->pre_vectors; 162 --nodes; 163 } 164 165 out: 166 return done; 167 } 168 169 /** 170 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading 171 * @nvecs: The total number of vectors 172 * @affd: Description of the affinity requirements 173 * 174 * Returns the masks pointer or NULL if allocation failed. 175 */ 176 struct cpumask * 177 irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 178 { 179 int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; 180 int curvec, usedvecs; 181 cpumask_var_t nmsk, npresmsk, *node_to_cpumask; 182 struct cpumask *masks = NULL; 183 184 /* 185 * If there aren't any vectors left after applying the pre/post 186 * vectors don't bother with assigning affinity. 187 */ 188 if (nvecs == affd->pre_vectors + affd->post_vectors) 189 return NULL; 190 191 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) 192 return NULL; 193 194 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) 195 goto outcpumsk; 196 197 node_to_cpumask = alloc_node_to_cpumask(); 198 if (!node_to_cpumask) 199 goto outnpresmsk; 200 201 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); 202 if (!masks) 203 goto outnodemsk; 204 205 /* Fill out vectors at the beginning that don't need affinity */ 206 for (curvec = 0; curvec < affd->pre_vectors; curvec++) 207 cpumask_copy(masks + curvec, irq_default_affinity); 208 209 /* Stabilize the cpumasks */ 210 get_online_cpus(); 211 build_node_to_cpumask(node_to_cpumask); 212 213 /* Spread on present CPUs starting from affd->pre_vectors */ 214 usedvecs = irq_build_affinity_masks(affd, curvec, affvecs, 215 node_to_cpumask, cpu_present_mask, 216 nmsk, masks); 217 218 /* 219 * Spread on non present CPUs starting from the next vector to be 220 * handled. If the spreading of present CPUs already exhausted the 221 * vector space, assign the non present CPUs to the already spread 222 * out vectors. 223 */ 224 if (usedvecs >= affvecs) 225 curvec = affd->pre_vectors; 226 else 227 curvec = affd->pre_vectors + usedvecs; 228 cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); 229 usedvecs += irq_build_affinity_masks(affd, curvec, affvecs, 230 node_to_cpumask, npresmsk, 231 nmsk, masks); 232 put_online_cpus(); 233 234 /* Fill out vectors at the end that don't need affinity */ 235 if (usedvecs >= affvecs) 236 curvec = affd->pre_vectors + affvecs; 237 else 238 curvec = affd->pre_vectors + usedvecs; 239 for (; curvec < nvecs; curvec++) 240 cpumask_copy(masks + curvec, irq_default_affinity); 241 242 outnodemsk: 243 free_node_to_cpumask(node_to_cpumask); 244 outnpresmsk: 245 free_cpumask_var(npresmsk); 246 outcpumsk: 247 free_cpumask_var(nmsk); 248 return masks; 249 } 250 251 /** 252 * irq_calc_affinity_vectors - Calculate the optimal number of vectors 253 * @minvec: The minimum number of vectors available 254 * @maxvec: The maximum number of vectors available 255 * @affd: Description of the affinity requirements 256 */ 257 int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) 258 { 259 int resv = affd->pre_vectors + affd->post_vectors; 260 int vecs = maxvec - resv; 261 int ret; 262 263 if (resv > minvec) 264 return 0; 265 266 get_online_cpus(); 267 ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv; 268 put_online_cpus(); 269 return ret; 270 } 271