xref: /openbmc/linux/kernel/sched/topology.c (revision 7f434dff)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Scheduler topology setup/handling methods
4  */
5 #include "sched.h"
6 
7 DEFINE_MUTEX(sched_domains_mutex);
8 
9 /* Protected by sched_domains_mutex: */
10 static cpumask_var_t sched_domains_tmpmask;
11 static cpumask_var_t sched_domains_tmpmask2;
12 
13 #ifdef CONFIG_SCHED_DEBUG
14 
15 static int __init sched_debug_setup(char *str)
16 {
17 	sched_debug_verbose = true;
18 
19 	return 0;
20 }
21 early_param("sched_verbose", sched_debug_setup);
22 
23 static inline bool sched_debug(void)
24 {
25 	return sched_debug_verbose;
26 }
27 
28 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
29 const struct sd_flag_debug sd_flag_debug[] = {
30 #include <linux/sched/sd_flags.h>
31 };
32 #undef SD_FLAG
33 
34 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
35 				  struct cpumask *groupmask)
36 {
37 	struct sched_group *group = sd->groups;
38 	unsigned long flags = sd->flags;
39 	unsigned int idx;
40 
41 	cpumask_clear(groupmask);
42 
43 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
44 	printk(KERN_CONT "span=%*pbl level=%s\n",
45 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
46 
47 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
48 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
49 	}
50 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
51 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
52 	}
53 
54 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
55 		unsigned int flag = BIT(idx);
56 		unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
57 
58 		if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
59 		    !(sd->child->flags & flag))
60 			printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
61 			       sd_flag_debug[idx].name);
62 
63 		if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
64 		    !(sd->parent->flags & flag))
65 			printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
66 			       sd_flag_debug[idx].name);
67 	}
68 
69 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
70 	do {
71 		if (!group) {
72 			printk("\n");
73 			printk(KERN_ERR "ERROR: group is NULL\n");
74 			break;
75 		}
76 
77 		if (cpumask_empty(sched_group_span(group))) {
78 			printk(KERN_CONT "\n");
79 			printk(KERN_ERR "ERROR: empty group\n");
80 			break;
81 		}
82 
83 		if (!(sd->flags & SD_OVERLAP) &&
84 		    cpumask_intersects(groupmask, sched_group_span(group))) {
85 			printk(KERN_CONT "\n");
86 			printk(KERN_ERR "ERROR: repeated CPUs\n");
87 			break;
88 		}
89 
90 		cpumask_or(groupmask, groupmask, sched_group_span(group));
91 
92 		printk(KERN_CONT " %d:{ span=%*pbl",
93 				group->sgc->id,
94 				cpumask_pr_args(sched_group_span(group)));
95 
96 		if ((sd->flags & SD_OVERLAP) &&
97 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
98 			printk(KERN_CONT " mask=%*pbl",
99 				cpumask_pr_args(group_balance_mask(group)));
100 		}
101 
102 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
103 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
104 
105 		if (group == sd->groups && sd->child &&
106 		    !cpumask_equal(sched_domain_span(sd->child),
107 				   sched_group_span(group))) {
108 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
109 		}
110 
111 		printk(KERN_CONT " }");
112 
113 		group = group->next;
114 
115 		if (group != sd->groups)
116 			printk(KERN_CONT ",");
117 
118 	} while (group != sd->groups);
119 	printk(KERN_CONT "\n");
120 
121 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
122 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
123 
124 	if (sd->parent &&
125 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
126 		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
127 	return 0;
128 }
129 
130 static void sched_domain_debug(struct sched_domain *sd, int cpu)
131 {
132 	int level = 0;
133 
134 	if (!sched_debug_verbose)
135 		return;
136 
137 	if (!sd) {
138 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
139 		return;
140 	}
141 
142 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
143 
144 	for (;;) {
145 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
146 			break;
147 		level++;
148 		sd = sd->parent;
149 		if (!sd)
150 			break;
151 	}
152 }
153 #else /* !CONFIG_SCHED_DEBUG */
154 
155 # define sched_debug_verbose 0
156 # define sched_domain_debug(sd, cpu) do { } while (0)
157 static inline bool sched_debug(void)
158 {
159 	return false;
160 }
161 #endif /* CONFIG_SCHED_DEBUG */
162 
163 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
164 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
165 static const unsigned int SD_DEGENERATE_GROUPS_MASK =
166 #include <linux/sched/sd_flags.h>
167 0;
168 #undef SD_FLAG
169 
170 static int sd_degenerate(struct sched_domain *sd)
171 {
172 	if (cpumask_weight(sched_domain_span(sd)) == 1)
173 		return 1;
174 
175 	/* Following flags need at least 2 groups */
176 	if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
177 	    (sd->groups != sd->groups->next))
178 		return 0;
179 
180 	/* Following flags don't use groups */
181 	if (sd->flags & (SD_WAKE_AFFINE))
182 		return 0;
183 
184 	return 1;
185 }
186 
187 static int
188 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
189 {
190 	unsigned long cflags = sd->flags, pflags = parent->flags;
191 
192 	if (sd_degenerate(parent))
193 		return 1;
194 
195 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
196 		return 0;
197 
198 	/* Flags needing groups don't count if only 1 group in parent */
199 	if (parent->groups == parent->groups->next)
200 		pflags &= ~SD_DEGENERATE_GROUPS_MASK;
201 
202 	if (~cflags & pflags)
203 		return 0;
204 
205 	return 1;
206 }
207 
208 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
209 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
210 unsigned int sysctl_sched_energy_aware = 1;
211 DEFINE_MUTEX(sched_energy_mutex);
212 bool sched_energy_update;
213 
214 void rebuild_sched_domains_energy(void)
215 {
216 	mutex_lock(&sched_energy_mutex);
217 	sched_energy_update = true;
218 	rebuild_sched_domains();
219 	sched_energy_update = false;
220 	mutex_unlock(&sched_energy_mutex);
221 }
222 
223 #ifdef CONFIG_PROC_SYSCTL
224 int sched_energy_aware_handler(struct ctl_table *table, int write,
225 		void *buffer, size_t *lenp, loff_t *ppos)
226 {
227 	int ret, state;
228 
229 	if (write && !capable(CAP_SYS_ADMIN))
230 		return -EPERM;
231 
232 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
233 	if (!ret && write) {
234 		state = static_branch_unlikely(&sched_energy_present);
235 		if (state != sysctl_sched_energy_aware)
236 			rebuild_sched_domains_energy();
237 	}
238 
239 	return ret;
240 }
241 #endif
242 
243 static void free_pd(struct perf_domain *pd)
244 {
245 	struct perf_domain *tmp;
246 
247 	while (pd) {
248 		tmp = pd->next;
249 		kfree(pd);
250 		pd = tmp;
251 	}
252 }
253 
254 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
255 {
256 	while (pd) {
257 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
258 			return pd;
259 		pd = pd->next;
260 	}
261 
262 	return NULL;
263 }
264 
265 static struct perf_domain *pd_init(int cpu)
266 {
267 	struct em_perf_domain *obj = em_cpu_get(cpu);
268 	struct perf_domain *pd;
269 
270 	if (!obj) {
271 		if (sched_debug())
272 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
273 		return NULL;
274 	}
275 
276 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
277 	if (!pd)
278 		return NULL;
279 	pd->em_pd = obj;
280 
281 	return pd;
282 }
283 
284 static void perf_domain_debug(const struct cpumask *cpu_map,
285 						struct perf_domain *pd)
286 {
287 	if (!sched_debug() || !pd)
288 		return;
289 
290 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
291 
292 	while (pd) {
293 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
294 				cpumask_first(perf_domain_span(pd)),
295 				cpumask_pr_args(perf_domain_span(pd)),
296 				em_pd_nr_perf_states(pd->em_pd));
297 		pd = pd->next;
298 	}
299 
300 	printk(KERN_CONT "\n");
301 }
302 
303 static void destroy_perf_domain_rcu(struct rcu_head *rp)
304 {
305 	struct perf_domain *pd;
306 
307 	pd = container_of(rp, struct perf_domain, rcu);
308 	free_pd(pd);
309 }
310 
311 static void sched_energy_set(bool has_eas)
312 {
313 	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
314 		if (sched_debug())
315 			pr_info("%s: stopping EAS\n", __func__);
316 		static_branch_disable_cpuslocked(&sched_energy_present);
317 	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
318 		if (sched_debug())
319 			pr_info("%s: starting EAS\n", __func__);
320 		static_branch_enable_cpuslocked(&sched_energy_present);
321 	}
322 }
323 
324 /*
325  * EAS can be used on a root domain if it meets all the following conditions:
326  *    1. an Energy Model (EM) is available;
327  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
328  *    3. no SMT is detected.
329  *    4. the EM complexity is low enough to keep scheduling overheads low;
330  *    5. schedutil is driving the frequency of all CPUs of the rd;
331  *    6. frequency invariance support is present;
332  *
333  * The complexity of the Energy Model is defined as:
334  *
335  *              C = nr_pd * (nr_cpus + nr_ps)
336  *
337  * with parameters defined as:
338  *  - nr_pd:    the number of performance domains
339  *  - nr_cpus:  the number of CPUs
340  *  - nr_ps:    the sum of the number of performance states of all performance
341  *              domains (for example, on a system with 2 performance domains,
342  *              with 10 performance states each, nr_ps = 2 * 10 = 20).
343  *
344  * It is generally not a good idea to use such a model in the wake-up path on
345  * very complex platforms because of the associated scheduling overheads. The
346  * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
347  * with per-CPU DVFS and less than 8 performance states each, for example.
348  */
349 #define EM_MAX_COMPLEXITY 2048
350 
351 extern struct cpufreq_governor schedutil_gov;
352 static bool build_perf_domains(const struct cpumask *cpu_map)
353 {
354 	int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
355 	struct perf_domain *pd = NULL, *tmp;
356 	int cpu = cpumask_first(cpu_map);
357 	struct root_domain *rd = cpu_rq(cpu)->rd;
358 	struct cpufreq_policy *policy;
359 	struct cpufreq_governor *gov;
360 
361 	if (!sysctl_sched_energy_aware)
362 		goto free;
363 
364 	/* EAS is enabled for asymmetric CPU capacity topologies. */
365 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
366 		if (sched_debug()) {
367 			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
368 					cpumask_pr_args(cpu_map));
369 		}
370 		goto free;
371 	}
372 
373 	/* EAS definitely does *not* handle SMT */
374 	if (sched_smt_active()) {
375 		pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
376 			cpumask_pr_args(cpu_map));
377 		goto free;
378 	}
379 
380 	if (!arch_scale_freq_invariant()) {
381 		if (sched_debug()) {
382 			pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported",
383 				cpumask_pr_args(cpu_map));
384 		}
385 		goto free;
386 	}
387 
388 	for_each_cpu(i, cpu_map) {
389 		/* Skip already covered CPUs. */
390 		if (find_pd(pd, i))
391 			continue;
392 
393 		/* Do not attempt EAS if schedutil is not being used. */
394 		policy = cpufreq_cpu_get(i);
395 		if (!policy)
396 			goto free;
397 		gov = policy->governor;
398 		cpufreq_cpu_put(policy);
399 		if (gov != &schedutil_gov) {
400 			if (rd->pd)
401 				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
402 						cpumask_pr_args(cpu_map));
403 			goto free;
404 		}
405 
406 		/* Create the new pd and add it to the local list. */
407 		tmp = pd_init(i);
408 		if (!tmp)
409 			goto free;
410 		tmp->next = pd;
411 		pd = tmp;
412 
413 		/*
414 		 * Count performance domains and performance states for the
415 		 * complexity check.
416 		 */
417 		nr_pd++;
418 		nr_ps += em_pd_nr_perf_states(pd->em_pd);
419 	}
420 
421 	/* Bail out if the Energy Model complexity is too high. */
422 	if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
423 		WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
424 						cpumask_pr_args(cpu_map));
425 		goto free;
426 	}
427 
428 	perf_domain_debug(cpu_map, pd);
429 
430 	/* Attach the new list of performance domains to the root domain. */
431 	tmp = rd->pd;
432 	rcu_assign_pointer(rd->pd, pd);
433 	if (tmp)
434 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
435 
436 	return !!pd;
437 
438 free:
439 	free_pd(pd);
440 	tmp = rd->pd;
441 	rcu_assign_pointer(rd->pd, NULL);
442 	if (tmp)
443 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
444 
445 	return false;
446 }
447 #else
448 static void free_pd(struct perf_domain *pd) { }
449 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
450 
451 static void free_rootdomain(struct rcu_head *rcu)
452 {
453 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
454 
455 	cpupri_cleanup(&rd->cpupri);
456 	cpudl_cleanup(&rd->cpudl);
457 	free_cpumask_var(rd->dlo_mask);
458 	free_cpumask_var(rd->rto_mask);
459 	free_cpumask_var(rd->online);
460 	free_cpumask_var(rd->span);
461 	free_pd(rd->pd);
462 	kfree(rd);
463 }
464 
465 void rq_attach_root(struct rq *rq, struct root_domain *rd)
466 {
467 	struct root_domain *old_rd = NULL;
468 	unsigned long flags;
469 
470 	raw_spin_rq_lock_irqsave(rq, flags);
471 
472 	if (rq->rd) {
473 		old_rd = rq->rd;
474 
475 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
476 			set_rq_offline(rq);
477 
478 		cpumask_clear_cpu(rq->cpu, old_rd->span);
479 
480 		/*
481 		 * If we dont want to free the old_rd yet then
482 		 * set old_rd to NULL to skip the freeing later
483 		 * in this function:
484 		 */
485 		if (!atomic_dec_and_test(&old_rd->refcount))
486 			old_rd = NULL;
487 	}
488 
489 	atomic_inc(&rd->refcount);
490 	rq->rd = rd;
491 
492 	cpumask_set_cpu(rq->cpu, rd->span);
493 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
494 		set_rq_online(rq);
495 
496 	raw_spin_rq_unlock_irqrestore(rq, flags);
497 
498 	if (old_rd)
499 		call_rcu(&old_rd->rcu, free_rootdomain);
500 }
501 
502 void sched_get_rd(struct root_domain *rd)
503 {
504 	atomic_inc(&rd->refcount);
505 }
506 
507 void sched_put_rd(struct root_domain *rd)
508 {
509 	if (!atomic_dec_and_test(&rd->refcount))
510 		return;
511 
512 	call_rcu(&rd->rcu, free_rootdomain);
513 }
514 
515 static int init_rootdomain(struct root_domain *rd)
516 {
517 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
518 		goto out;
519 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
520 		goto free_span;
521 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
522 		goto free_online;
523 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
524 		goto free_dlo_mask;
525 
526 #ifdef HAVE_RT_PUSH_IPI
527 	rd->rto_cpu = -1;
528 	raw_spin_lock_init(&rd->rto_lock);
529 	rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
530 #endif
531 
532 	rd->visit_gen = 0;
533 	init_dl_bw(&rd->dl_bw);
534 	if (cpudl_init(&rd->cpudl) != 0)
535 		goto free_rto_mask;
536 
537 	if (cpupri_init(&rd->cpupri) != 0)
538 		goto free_cpudl;
539 	return 0;
540 
541 free_cpudl:
542 	cpudl_cleanup(&rd->cpudl);
543 free_rto_mask:
544 	free_cpumask_var(rd->rto_mask);
545 free_dlo_mask:
546 	free_cpumask_var(rd->dlo_mask);
547 free_online:
548 	free_cpumask_var(rd->online);
549 free_span:
550 	free_cpumask_var(rd->span);
551 out:
552 	return -ENOMEM;
553 }
554 
555 /*
556  * By default the system creates a single root-domain with all CPUs as
557  * members (mimicking the global state we have today).
558  */
559 struct root_domain def_root_domain;
560 
561 void init_defrootdomain(void)
562 {
563 	init_rootdomain(&def_root_domain);
564 
565 	atomic_set(&def_root_domain.refcount, 1);
566 }
567 
568 static struct root_domain *alloc_rootdomain(void)
569 {
570 	struct root_domain *rd;
571 
572 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
573 	if (!rd)
574 		return NULL;
575 
576 	if (init_rootdomain(rd) != 0) {
577 		kfree(rd);
578 		return NULL;
579 	}
580 
581 	return rd;
582 }
583 
584 static void free_sched_groups(struct sched_group *sg, int free_sgc)
585 {
586 	struct sched_group *tmp, *first;
587 
588 	if (!sg)
589 		return;
590 
591 	first = sg;
592 	do {
593 		tmp = sg->next;
594 
595 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
596 			kfree(sg->sgc);
597 
598 		if (atomic_dec_and_test(&sg->ref))
599 			kfree(sg);
600 		sg = tmp;
601 	} while (sg != first);
602 }
603 
604 static void destroy_sched_domain(struct sched_domain *sd)
605 {
606 	/*
607 	 * A normal sched domain may have multiple group references, an
608 	 * overlapping domain, having private groups, only one.  Iterate,
609 	 * dropping group/capacity references, freeing where none remain.
610 	 */
611 	free_sched_groups(sd->groups, 1);
612 
613 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
614 		kfree(sd->shared);
615 	kfree(sd);
616 }
617 
618 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
619 {
620 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
621 
622 	while (sd) {
623 		struct sched_domain *parent = sd->parent;
624 		destroy_sched_domain(sd);
625 		sd = parent;
626 	}
627 }
628 
629 static void destroy_sched_domains(struct sched_domain *sd)
630 {
631 	if (sd)
632 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
633 }
634 
635 /*
636  * Keep a special pointer to the highest sched_domain that has
637  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
638  * allows us to avoid some pointer chasing select_idle_sibling().
639  *
640  * Also keep a unique ID per domain (we use the first CPU number in
641  * the cpumask of the domain), this allows us to quickly tell if
642  * two CPUs are in the same cache domain, see cpus_share_cache().
643  */
644 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
645 DEFINE_PER_CPU(int, sd_llc_size);
646 DEFINE_PER_CPU(int, sd_llc_id);
647 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
648 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
649 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
650 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
651 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
652 
653 static void update_top_cache_domain(int cpu)
654 {
655 	struct sched_domain_shared *sds = NULL;
656 	struct sched_domain *sd;
657 	int id = cpu;
658 	int size = 1;
659 
660 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
661 	if (sd) {
662 		id = cpumask_first(sched_domain_span(sd));
663 		size = cpumask_weight(sched_domain_span(sd));
664 		sds = sd->shared;
665 	}
666 
667 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
668 	per_cpu(sd_llc_size, cpu) = size;
669 	per_cpu(sd_llc_id, cpu) = id;
670 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
671 
672 	sd = lowest_flag_domain(cpu, SD_NUMA);
673 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
674 
675 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
676 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
677 
678 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
679 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
680 }
681 
682 /*
683  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
684  * hold the hotplug lock.
685  */
686 static void
687 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
688 {
689 	struct rq *rq = cpu_rq(cpu);
690 	struct sched_domain *tmp;
691 
692 	/* Remove the sched domains which do not contribute to scheduling. */
693 	for (tmp = sd; tmp; ) {
694 		struct sched_domain *parent = tmp->parent;
695 		if (!parent)
696 			break;
697 
698 		if (sd_parent_degenerate(tmp, parent)) {
699 			tmp->parent = parent->parent;
700 			if (parent->parent)
701 				parent->parent->child = tmp;
702 			/*
703 			 * Transfer SD_PREFER_SIBLING down in case of a
704 			 * degenerate parent; the spans match for this
705 			 * so the property transfers.
706 			 */
707 			if (parent->flags & SD_PREFER_SIBLING)
708 				tmp->flags |= SD_PREFER_SIBLING;
709 			destroy_sched_domain(parent);
710 		} else
711 			tmp = tmp->parent;
712 	}
713 
714 	if (sd && sd_degenerate(sd)) {
715 		tmp = sd;
716 		sd = sd->parent;
717 		destroy_sched_domain(tmp);
718 		if (sd) {
719 			struct sched_group *sg = sd->groups;
720 
721 			/*
722 			 * sched groups hold the flags of the child sched
723 			 * domain for convenience. Clear such flags since
724 			 * the child is being destroyed.
725 			 */
726 			do {
727 				sg->flags = 0;
728 			} while (sg != sd->groups);
729 
730 			sd->child = NULL;
731 		}
732 	}
733 
734 	sched_domain_debug(sd, cpu);
735 
736 	rq_attach_root(rq, rd);
737 	tmp = rq->sd;
738 	rcu_assign_pointer(rq->sd, sd);
739 	dirty_sched_domain_sysctl(cpu);
740 	destroy_sched_domains(tmp);
741 
742 	update_top_cache_domain(cpu);
743 }
744 
745 struct s_data {
746 	struct sched_domain * __percpu *sd;
747 	struct root_domain	*rd;
748 };
749 
750 enum s_alloc {
751 	sa_rootdomain,
752 	sa_sd,
753 	sa_sd_storage,
754 	sa_none,
755 };
756 
757 /*
758  * Return the canonical balance CPU for this group, this is the first CPU
759  * of this group that's also in the balance mask.
760  *
761  * The balance mask are all those CPUs that could actually end up at this
762  * group. See build_balance_mask().
763  *
764  * Also see should_we_balance().
765  */
766 int group_balance_cpu(struct sched_group *sg)
767 {
768 	return cpumask_first(group_balance_mask(sg));
769 }
770 
771 
772 /*
773  * NUMA topology (first read the regular topology blurb below)
774  *
775  * Given a node-distance table, for example:
776  *
777  *   node   0   1   2   3
778  *     0:  10  20  30  20
779  *     1:  20  10  20  30
780  *     2:  30  20  10  20
781  *     3:  20  30  20  10
782  *
783  * which represents a 4 node ring topology like:
784  *
785  *   0 ----- 1
786  *   |       |
787  *   |       |
788  *   |       |
789  *   3 ----- 2
790  *
791  * We want to construct domains and groups to represent this. The way we go
792  * about doing this is to build the domains on 'hops'. For each NUMA level we
793  * construct the mask of all nodes reachable in @level hops.
794  *
795  * For the above NUMA topology that gives 3 levels:
796  *
797  * NUMA-2	0-3		0-3		0-3		0-3
798  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
799  *
800  * NUMA-1	0-1,3		0-2		1-3		0,2-3
801  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
802  *
803  * NUMA-0	0		1		2		3
804  *
805  *
806  * As can be seen; things don't nicely line up as with the regular topology.
807  * When we iterate a domain in child domain chunks some nodes can be
808  * represented multiple times -- hence the "overlap" naming for this part of
809  * the topology.
810  *
811  * In order to minimize this overlap, we only build enough groups to cover the
812  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
813  *
814  * Because:
815  *
816  *  - the first group of each domain is its child domain; this
817  *    gets us the first 0-1,3
818  *  - the only uncovered node is 2, who's child domain is 1-3.
819  *
820  * However, because of the overlap, computing a unique CPU for each group is
821  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
822  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
823  * end up at those groups (they would end up in group: 0-1,3).
824  *
825  * To correct this we have to introduce the group balance mask. This mask
826  * will contain those CPUs in the group that can reach this group given the
827  * (child) domain tree.
828  *
829  * With this we can once again compute balance_cpu and sched_group_capacity
830  * relations.
831  *
832  * XXX include words on how balance_cpu is unique and therefore can be
833  * used for sched_group_capacity links.
834  *
835  *
836  * Another 'interesting' topology is:
837  *
838  *   node   0   1   2   3
839  *     0:  10  20  20  30
840  *     1:  20  10  20  20
841  *     2:  20  20  10  20
842  *     3:  30  20  20  10
843  *
844  * Which looks a little like:
845  *
846  *   0 ----- 1
847  *   |     / |
848  *   |   /   |
849  *   | /     |
850  *   2 ----- 3
851  *
852  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
853  * are not.
854  *
855  * This leads to a few particularly weird cases where the sched_domain's are
856  * not of the same number for each CPU. Consider:
857  *
858  * NUMA-2	0-3						0-3
859  *  groups:	{0-2},{1-3}					{1-3},{0-2}
860  *
861  * NUMA-1	0-2		0-3		0-3		1-3
862  *
863  * NUMA-0	0		1		2		3
864  *
865  */
866 
867 
868 /*
869  * Build the balance mask; it contains only those CPUs that can arrive at this
870  * group and should be considered to continue balancing.
871  *
872  * We do this during the group creation pass, therefore the group information
873  * isn't complete yet, however since each group represents a (child) domain we
874  * can fully construct this using the sched_domain bits (which are already
875  * complete).
876  */
877 static void
878 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
879 {
880 	const struct cpumask *sg_span = sched_group_span(sg);
881 	struct sd_data *sdd = sd->private;
882 	struct sched_domain *sibling;
883 	int i;
884 
885 	cpumask_clear(mask);
886 
887 	for_each_cpu(i, sg_span) {
888 		sibling = *per_cpu_ptr(sdd->sd, i);
889 
890 		/*
891 		 * Can happen in the asymmetric case, where these siblings are
892 		 * unused. The mask will not be empty because those CPUs that
893 		 * do have the top domain _should_ span the domain.
894 		 */
895 		if (!sibling->child)
896 			continue;
897 
898 		/* If we would not end up here, we can't continue from here */
899 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
900 			continue;
901 
902 		cpumask_set_cpu(i, mask);
903 	}
904 
905 	/* We must not have empty masks here */
906 	WARN_ON_ONCE(cpumask_empty(mask));
907 }
908 
909 /*
910  * XXX: This creates per-node group entries; since the load-balancer will
911  * immediately access remote memory to construct this group's load-balance
912  * statistics having the groups node local is of dubious benefit.
913  */
914 static struct sched_group *
915 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
916 {
917 	struct sched_group *sg;
918 	struct cpumask *sg_span;
919 
920 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
921 			GFP_KERNEL, cpu_to_node(cpu));
922 
923 	if (!sg)
924 		return NULL;
925 
926 	sg_span = sched_group_span(sg);
927 	if (sd->child) {
928 		cpumask_copy(sg_span, sched_domain_span(sd->child));
929 		sg->flags = sd->child->flags;
930 	} else {
931 		cpumask_copy(sg_span, sched_domain_span(sd));
932 	}
933 
934 	atomic_inc(&sg->ref);
935 	return sg;
936 }
937 
938 static void init_overlap_sched_group(struct sched_domain *sd,
939 				     struct sched_group *sg)
940 {
941 	struct cpumask *mask = sched_domains_tmpmask2;
942 	struct sd_data *sdd = sd->private;
943 	struct cpumask *sg_span;
944 	int cpu;
945 
946 	build_balance_mask(sd, sg, mask);
947 	cpu = cpumask_first(mask);
948 
949 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
950 	if (atomic_inc_return(&sg->sgc->ref) == 1)
951 		cpumask_copy(group_balance_mask(sg), mask);
952 	else
953 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
954 
955 	/*
956 	 * Initialize sgc->capacity such that even if we mess up the
957 	 * domains and no possible iteration will get us here, we won't
958 	 * die on a /0 trap.
959 	 */
960 	sg_span = sched_group_span(sg);
961 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
962 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
963 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
964 }
965 
966 static struct sched_domain *
967 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
968 {
969 	/*
970 	 * The proper descendant would be the one whose child won't span out
971 	 * of sd
972 	 */
973 	while (sibling->child &&
974 	       !cpumask_subset(sched_domain_span(sibling->child),
975 			       sched_domain_span(sd)))
976 		sibling = sibling->child;
977 
978 	/*
979 	 * As we are referencing sgc across different topology level, we need
980 	 * to go down to skip those sched_domains which don't contribute to
981 	 * scheduling because they will be degenerated in cpu_attach_domain
982 	 */
983 	while (sibling->child &&
984 	       cpumask_equal(sched_domain_span(sibling->child),
985 			     sched_domain_span(sibling)))
986 		sibling = sibling->child;
987 
988 	return sibling;
989 }
990 
991 static int
992 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
993 {
994 	struct sched_group *first = NULL, *last = NULL, *sg;
995 	const struct cpumask *span = sched_domain_span(sd);
996 	struct cpumask *covered = sched_domains_tmpmask;
997 	struct sd_data *sdd = sd->private;
998 	struct sched_domain *sibling;
999 	int i;
1000 
1001 	cpumask_clear(covered);
1002 
1003 	for_each_cpu_wrap(i, span, cpu) {
1004 		struct cpumask *sg_span;
1005 
1006 		if (cpumask_test_cpu(i, covered))
1007 			continue;
1008 
1009 		sibling = *per_cpu_ptr(sdd->sd, i);
1010 
1011 		/*
1012 		 * Asymmetric node setups can result in situations where the
1013 		 * domain tree is of unequal depth, make sure to skip domains
1014 		 * that already cover the entire range.
1015 		 *
1016 		 * In that case build_sched_domains() will have terminated the
1017 		 * iteration early and our sibling sd spans will be empty.
1018 		 * Domains should always include the CPU they're built on, so
1019 		 * check that.
1020 		 */
1021 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
1022 			continue;
1023 
1024 		/*
1025 		 * Usually we build sched_group by sibling's child sched_domain
1026 		 * But for machines whose NUMA diameter are 3 or above, we move
1027 		 * to build sched_group by sibling's proper descendant's child
1028 		 * domain because sibling's child sched_domain will span out of
1029 		 * the sched_domain being built as below.
1030 		 *
1031 		 * Smallest diameter=3 topology is:
1032 		 *
1033 		 *   node   0   1   2   3
1034 		 *     0:  10  20  30  40
1035 		 *     1:  20  10  20  30
1036 		 *     2:  30  20  10  20
1037 		 *     3:  40  30  20  10
1038 		 *
1039 		 *   0 --- 1 --- 2 --- 3
1040 		 *
1041 		 * NUMA-3       0-3             N/A             N/A             0-3
1042 		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
1043 		 *
1044 		 * NUMA-2       0-2             0-3             0-3             1-3
1045 		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
1046 		 *
1047 		 * NUMA-1       0-1             0-2             1-3             2-3
1048 		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
1049 		 *
1050 		 * NUMA-0       0               1               2               3
1051 		 *
1052 		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
1053 		 * group span isn't a subset of the domain span.
1054 		 */
1055 		if (sibling->child &&
1056 		    !cpumask_subset(sched_domain_span(sibling->child), span))
1057 			sibling = find_descended_sibling(sd, sibling);
1058 
1059 		sg = build_group_from_child_sched_domain(sibling, cpu);
1060 		if (!sg)
1061 			goto fail;
1062 
1063 		sg_span = sched_group_span(sg);
1064 		cpumask_or(covered, covered, sg_span);
1065 
1066 		init_overlap_sched_group(sibling, sg);
1067 
1068 		if (!first)
1069 			first = sg;
1070 		if (last)
1071 			last->next = sg;
1072 		last = sg;
1073 		last->next = first;
1074 	}
1075 	sd->groups = first;
1076 
1077 	return 0;
1078 
1079 fail:
1080 	free_sched_groups(first, 0);
1081 
1082 	return -ENOMEM;
1083 }
1084 
1085 
1086 /*
1087  * Package topology (also see the load-balance blurb in fair.c)
1088  *
1089  * The scheduler builds a tree structure to represent a number of important
1090  * topology features. By default (default_topology[]) these include:
1091  *
1092  *  - Simultaneous multithreading (SMT)
1093  *  - Multi-Core Cache (MC)
1094  *  - Package (DIE)
1095  *
1096  * Where the last one more or less denotes everything up to a NUMA node.
1097  *
1098  * The tree consists of 3 primary data structures:
1099  *
1100  *	sched_domain -> sched_group -> sched_group_capacity
1101  *	    ^ ^             ^ ^
1102  *          `-'             `-'
1103  *
1104  * The sched_domains are per-CPU and have a two way link (parent & child) and
1105  * denote the ever growing mask of CPUs belonging to that level of topology.
1106  *
1107  * Each sched_domain has a circular (double) linked list of sched_group's, each
1108  * denoting the domains of the level below (or individual CPUs in case of the
1109  * first domain level). The sched_group linked by a sched_domain includes the
1110  * CPU of that sched_domain [*].
1111  *
1112  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1113  *
1114  * CPU   0   1   2   3   4   5   6   7
1115  *
1116  * DIE  [                             ]
1117  * MC   [             ] [             ]
1118  * SMT  [     ] [     ] [     ] [     ]
1119  *
1120  *  - or -
1121  *
1122  * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1123  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1124  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1125  *
1126  * CPU   0   1   2   3   4   5   6   7
1127  *
1128  * One way to think about it is: sched_domain moves you up and down among these
1129  * topology levels, while sched_group moves you sideways through it, at child
1130  * domain granularity.
1131  *
1132  * sched_group_capacity ensures each unique sched_group has shared storage.
1133  *
1134  * There are two related construction problems, both require a CPU that
1135  * uniquely identify each group (for a given domain):
1136  *
1137  *  - The first is the balance_cpu (see should_we_balance() and the
1138  *    load-balance blub in fair.c); for each group we only want 1 CPU to
1139  *    continue balancing at a higher domain.
1140  *
1141  *  - The second is the sched_group_capacity; we want all identical groups
1142  *    to share a single sched_group_capacity.
1143  *
1144  * Since these topologies are exclusive by construction. That is, its
1145  * impossible for an SMT thread to belong to multiple cores, and cores to
1146  * be part of multiple caches. There is a very clear and unique location
1147  * for each CPU in the hierarchy.
1148  *
1149  * Therefore computing a unique CPU for each group is trivial (the iteration
1150  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1151  * group), we can simply pick the first CPU in each group.
1152  *
1153  *
1154  * [*] in other words, the first group of each domain is its child domain.
1155  */
1156 
1157 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1158 {
1159 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1160 	struct sched_domain *child = sd->child;
1161 	struct sched_group *sg;
1162 	bool already_visited;
1163 
1164 	if (child)
1165 		cpu = cpumask_first(sched_domain_span(child));
1166 
1167 	sg = *per_cpu_ptr(sdd->sg, cpu);
1168 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1169 
1170 	/* Increase refcounts for claim_allocations: */
1171 	already_visited = atomic_inc_return(&sg->ref) > 1;
1172 	/* sgc visits should follow a similar trend as sg */
1173 	WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1174 
1175 	/* If we have already visited that group, it's already initialized. */
1176 	if (already_visited)
1177 		return sg;
1178 
1179 	if (child) {
1180 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1181 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1182 		sg->flags = child->flags;
1183 	} else {
1184 		cpumask_set_cpu(cpu, sched_group_span(sg));
1185 		cpumask_set_cpu(cpu, group_balance_mask(sg));
1186 	}
1187 
1188 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1189 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1190 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1191 
1192 	return sg;
1193 }
1194 
1195 /*
1196  * build_sched_groups will build a circular linked list of the groups
1197  * covered by the given span, will set each group's ->cpumask correctly,
1198  * and will initialize their ->sgc.
1199  *
1200  * Assumes the sched_domain tree is fully constructed
1201  */
1202 static int
1203 build_sched_groups(struct sched_domain *sd, int cpu)
1204 {
1205 	struct sched_group *first = NULL, *last = NULL;
1206 	struct sd_data *sdd = sd->private;
1207 	const struct cpumask *span = sched_domain_span(sd);
1208 	struct cpumask *covered;
1209 	int i;
1210 
1211 	lockdep_assert_held(&sched_domains_mutex);
1212 	covered = sched_domains_tmpmask;
1213 
1214 	cpumask_clear(covered);
1215 
1216 	for_each_cpu_wrap(i, span, cpu) {
1217 		struct sched_group *sg;
1218 
1219 		if (cpumask_test_cpu(i, covered))
1220 			continue;
1221 
1222 		sg = get_group(i, sdd);
1223 
1224 		cpumask_or(covered, covered, sched_group_span(sg));
1225 
1226 		if (!first)
1227 			first = sg;
1228 		if (last)
1229 			last->next = sg;
1230 		last = sg;
1231 	}
1232 	last->next = first;
1233 	sd->groups = first;
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * Initialize sched groups cpu_capacity.
1240  *
1241  * cpu_capacity indicates the capacity of sched group, which is used while
1242  * distributing the load between different sched groups in a sched domain.
1243  * Typically cpu_capacity for all the groups in a sched domain will be same
1244  * unless there are asymmetries in the topology. If there are asymmetries,
1245  * group having more cpu_capacity will pickup more load compared to the
1246  * group having less cpu_capacity.
1247  */
1248 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1249 {
1250 	struct sched_group *sg = sd->groups;
1251 
1252 	WARN_ON(!sg);
1253 
1254 	do {
1255 		int cpu, max_cpu = -1;
1256 
1257 		sg->group_weight = cpumask_weight(sched_group_span(sg));
1258 
1259 		if (!(sd->flags & SD_ASYM_PACKING))
1260 			goto next;
1261 
1262 		for_each_cpu(cpu, sched_group_span(sg)) {
1263 			if (max_cpu < 0)
1264 				max_cpu = cpu;
1265 			else if (sched_asym_prefer(cpu, max_cpu))
1266 				max_cpu = cpu;
1267 		}
1268 		sg->asym_prefer_cpu = max_cpu;
1269 
1270 next:
1271 		sg = sg->next;
1272 	} while (sg != sd->groups);
1273 
1274 	if (cpu != group_balance_cpu(sg))
1275 		return;
1276 
1277 	update_group_capacity(sd, cpu);
1278 }
1279 
1280 /*
1281  * Asymmetric CPU capacity bits
1282  */
1283 struct asym_cap_data {
1284 	struct list_head link;
1285 	unsigned long capacity;
1286 	unsigned long cpus[];
1287 };
1288 
1289 /*
1290  * Set of available CPUs grouped by their corresponding capacities
1291  * Each list entry contains a CPU mask reflecting CPUs that share the same
1292  * capacity.
1293  * The lifespan of data is unlimited.
1294  */
1295 static LIST_HEAD(asym_cap_list);
1296 
1297 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
1298 
1299 /*
1300  * Verify whether there is any CPU capacity asymmetry in a given sched domain.
1301  * Provides sd_flags reflecting the asymmetry scope.
1302  */
1303 static inline int
1304 asym_cpu_capacity_classify(const struct cpumask *sd_span,
1305 			   const struct cpumask *cpu_map)
1306 {
1307 	struct asym_cap_data *entry;
1308 	int count = 0, miss = 0;
1309 
1310 	/*
1311 	 * Count how many unique CPU capacities this domain spans across
1312 	 * (compare sched_domain CPUs mask with ones representing  available
1313 	 * CPUs capacities). Take into account CPUs that might be offline:
1314 	 * skip those.
1315 	 */
1316 	list_for_each_entry(entry, &asym_cap_list, link) {
1317 		if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
1318 			++count;
1319 		else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
1320 			++miss;
1321 	}
1322 
1323 	WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
1324 
1325 	/* No asymmetry detected */
1326 	if (count < 2)
1327 		return 0;
1328 	/* Some of the available CPU capacity values have not been detected */
1329 	if (miss)
1330 		return SD_ASYM_CPUCAPACITY;
1331 
1332 	/* Full asymmetry */
1333 	return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
1334 
1335 }
1336 
1337 static inline void asym_cpu_capacity_update_data(int cpu)
1338 {
1339 	unsigned long capacity = arch_scale_cpu_capacity(cpu);
1340 	struct asym_cap_data *entry = NULL;
1341 
1342 	list_for_each_entry(entry, &asym_cap_list, link) {
1343 		if (capacity == entry->capacity)
1344 			goto done;
1345 	}
1346 
1347 	entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
1348 	if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
1349 		return;
1350 	entry->capacity = capacity;
1351 	list_add(&entry->link, &asym_cap_list);
1352 done:
1353 	__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
1354 }
1355 
1356 /*
1357  * Build-up/update list of CPUs grouped by their capacities
1358  * An update requires explicit request to rebuild sched domains
1359  * with state indicating CPU topology changes.
1360  */
1361 static void asym_cpu_capacity_scan(void)
1362 {
1363 	struct asym_cap_data *entry, *next;
1364 	int cpu;
1365 
1366 	list_for_each_entry(entry, &asym_cap_list, link)
1367 		cpumask_clear(cpu_capacity_span(entry));
1368 
1369 	for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
1370 		asym_cpu_capacity_update_data(cpu);
1371 
1372 	list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
1373 		if (cpumask_empty(cpu_capacity_span(entry))) {
1374 			list_del(&entry->link);
1375 			kfree(entry);
1376 		}
1377 	}
1378 
1379 	/*
1380 	 * Only one capacity value has been detected i.e. this system is symmetric.
1381 	 * No need to keep this data around.
1382 	 */
1383 	if (list_is_singular(&asym_cap_list)) {
1384 		entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
1385 		list_del(&entry->link);
1386 		kfree(entry);
1387 	}
1388 }
1389 
1390 /*
1391  * Initializers for schedule domains
1392  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1393  */
1394 
1395 static int default_relax_domain_level = -1;
1396 int sched_domain_level_max;
1397 
1398 static int __init setup_relax_domain_level(char *str)
1399 {
1400 	if (kstrtoint(str, 0, &default_relax_domain_level))
1401 		pr_warn("Unable to set relax_domain_level\n");
1402 
1403 	return 1;
1404 }
1405 __setup("relax_domain_level=", setup_relax_domain_level);
1406 
1407 static void set_domain_attribute(struct sched_domain *sd,
1408 				 struct sched_domain_attr *attr)
1409 {
1410 	int request;
1411 
1412 	if (!attr || attr->relax_domain_level < 0) {
1413 		if (default_relax_domain_level < 0)
1414 			return;
1415 		request = default_relax_domain_level;
1416 	} else
1417 		request = attr->relax_domain_level;
1418 
1419 	if (sd->level > request) {
1420 		/* Turn off idle balance on this domain: */
1421 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1422 	}
1423 }
1424 
1425 static void __sdt_free(const struct cpumask *cpu_map);
1426 static int __sdt_alloc(const struct cpumask *cpu_map);
1427 
1428 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1429 				 const struct cpumask *cpu_map)
1430 {
1431 	switch (what) {
1432 	case sa_rootdomain:
1433 		if (!atomic_read(&d->rd->refcount))
1434 			free_rootdomain(&d->rd->rcu);
1435 		fallthrough;
1436 	case sa_sd:
1437 		free_percpu(d->sd);
1438 		fallthrough;
1439 	case sa_sd_storage:
1440 		__sdt_free(cpu_map);
1441 		fallthrough;
1442 	case sa_none:
1443 		break;
1444 	}
1445 }
1446 
1447 static enum s_alloc
1448 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1449 {
1450 	memset(d, 0, sizeof(*d));
1451 
1452 	if (__sdt_alloc(cpu_map))
1453 		return sa_sd_storage;
1454 	d->sd = alloc_percpu(struct sched_domain *);
1455 	if (!d->sd)
1456 		return sa_sd_storage;
1457 	d->rd = alloc_rootdomain();
1458 	if (!d->rd)
1459 		return sa_sd;
1460 
1461 	return sa_rootdomain;
1462 }
1463 
1464 /*
1465  * NULL the sd_data elements we've used to build the sched_domain and
1466  * sched_group structure so that the subsequent __free_domain_allocs()
1467  * will not free the data we're using.
1468  */
1469 static void claim_allocations(int cpu, struct sched_domain *sd)
1470 {
1471 	struct sd_data *sdd = sd->private;
1472 
1473 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1474 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1475 
1476 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1477 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1478 
1479 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1480 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1481 
1482 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1483 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1484 }
1485 
1486 #ifdef CONFIG_NUMA
1487 enum numa_topology_type sched_numa_topology_type;
1488 
1489 static int			sched_domains_numa_levels;
1490 static int			sched_domains_curr_level;
1491 
1492 int				sched_max_numa_distance;
1493 static int			*sched_domains_numa_distance;
1494 static struct cpumask		***sched_domains_numa_masks;
1495 #endif
1496 
1497 /*
1498  * SD_flags allowed in topology descriptions.
1499  *
1500  * These flags are purely descriptive of the topology and do not prescribe
1501  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1502  * function:
1503  *
1504  *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1505  *   SD_SHARE_PKG_RESOURCES - describes shared caches
1506  *   SD_NUMA                - describes NUMA topologies
1507  *
1508  * Odd one out, which beside describing the topology has a quirk also
1509  * prescribes the desired behaviour that goes along with it:
1510  *
1511  *   SD_ASYM_PACKING        - describes SMT quirks
1512  */
1513 #define TOPOLOGY_SD_FLAGS		\
1514 	(SD_SHARE_CPUCAPACITY	|	\
1515 	 SD_SHARE_PKG_RESOURCES |	\
1516 	 SD_NUMA		|	\
1517 	 SD_ASYM_PACKING)
1518 
1519 static struct sched_domain *
1520 sd_init(struct sched_domain_topology_level *tl,
1521 	const struct cpumask *cpu_map,
1522 	struct sched_domain *child, int cpu)
1523 {
1524 	struct sd_data *sdd = &tl->data;
1525 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1526 	int sd_id, sd_weight, sd_flags = 0;
1527 	struct cpumask *sd_span;
1528 
1529 #ifdef CONFIG_NUMA
1530 	/*
1531 	 * Ugly hack to pass state to sd_numa_mask()...
1532 	 */
1533 	sched_domains_curr_level = tl->numa_level;
1534 #endif
1535 
1536 	sd_weight = cpumask_weight(tl->mask(cpu));
1537 
1538 	if (tl->sd_flags)
1539 		sd_flags = (*tl->sd_flags)();
1540 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1541 			"wrong sd_flags in topology description\n"))
1542 		sd_flags &= TOPOLOGY_SD_FLAGS;
1543 
1544 	*sd = (struct sched_domain){
1545 		.min_interval		= sd_weight,
1546 		.max_interval		= 2*sd_weight,
1547 		.busy_factor		= 16,
1548 		.imbalance_pct		= 117,
1549 
1550 		.cache_nice_tries	= 0,
1551 
1552 		.flags			= 1*SD_BALANCE_NEWIDLE
1553 					| 1*SD_BALANCE_EXEC
1554 					| 1*SD_BALANCE_FORK
1555 					| 0*SD_BALANCE_WAKE
1556 					| 1*SD_WAKE_AFFINE
1557 					| 0*SD_SHARE_CPUCAPACITY
1558 					| 0*SD_SHARE_PKG_RESOURCES
1559 					| 0*SD_SERIALIZE
1560 					| 1*SD_PREFER_SIBLING
1561 					| 0*SD_NUMA
1562 					| sd_flags
1563 					,
1564 
1565 		.last_balance		= jiffies,
1566 		.balance_interval	= sd_weight,
1567 		.max_newidle_lb_cost	= 0,
1568 		.last_decay_max_lb_cost	= jiffies,
1569 		.child			= child,
1570 #ifdef CONFIG_SCHED_DEBUG
1571 		.name			= tl->name,
1572 #endif
1573 	};
1574 
1575 	sd_span = sched_domain_span(sd);
1576 	cpumask_and(sd_span, cpu_map, tl->mask(cpu));
1577 	sd_id = cpumask_first(sd_span);
1578 
1579 	sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
1580 
1581 	WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
1582 		  (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
1583 		  "CPU capacity asymmetry not supported on SMT\n");
1584 
1585 	/*
1586 	 * Convert topological properties into behaviour.
1587 	 */
1588 	/* Don't attempt to spread across CPUs of different capacities. */
1589 	if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1590 		sd->child->flags &= ~SD_PREFER_SIBLING;
1591 
1592 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1593 		sd->imbalance_pct = 110;
1594 
1595 	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1596 		sd->imbalance_pct = 117;
1597 		sd->cache_nice_tries = 1;
1598 
1599 #ifdef CONFIG_NUMA
1600 	} else if (sd->flags & SD_NUMA) {
1601 		sd->cache_nice_tries = 2;
1602 
1603 		sd->flags &= ~SD_PREFER_SIBLING;
1604 		sd->flags |= SD_SERIALIZE;
1605 		if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1606 			sd->flags &= ~(SD_BALANCE_EXEC |
1607 				       SD_BALANCE_FORK |
1608 				       SD_WAKE_AFFINE);
1609 		}
1610 
1611 #endif
1612 	} else {
1613 		sd->cache_nice_tries = 1;
1614 	}
1615 
1616 	/*
1617 	 * For all levels sharing cache; connect a sched_domain_shared
1618 	 * instance.
1619 	 */
1620 	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1621 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1622 		atomic_inc(&sd->shared->ref);
1623 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1624 	}
1625 
1626 	sd->private = sdd;
1627 
1628 	return sd;
1629 }
1630 
1631 /*
1632  * Topology list, bottom-up.
1633  */
1634 static struct sched_domain_topology_level default_topology[] = {
1635 #ifdef CONFIG_SCHED_SMT
1636 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1637 #endif
1638 
1639 #ifdef CONFIG_SCHED_CLUSTER
1640 	{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
1641 #endif
1642 
1643 #ifdef CONFIG_SCHED_MC
1644 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1645 #endif
1646 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1647 	{ NULL, },
1648 };
1649 
1650 static struct sched_domain_topology_level *sched_domain_topology =
1651 	default_topology;
1652 static struct sched_domain_topology_level *sched_domain_topology_saved;
1653 
1654 #define for_each_sd_topology(tl)			\
1655 	for (tl = sched_domain_topology; tl->mask; tl++)
1656 
1657 void set_sched_topology(struct sched_domain_topology_level *tl)
1658 {
1659 	if (WARN_ON_ONCE(sched_smp_initialized))
1660 		return;
1661 
1662 	sched_domain_topology = tl;
1663 	sched_domain_topology_saved = NULL;
1664 }
1665 
1666 #ifdef CONFIG_NUMA
1667 
1668 static const struct cpumask *sd_numa_mask(int cpu)
1669 {
1670 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1671 }
1672 
1673 static void sched_numa_warn(const char *str)
1674 {
1675 	static int done = false;
1676 	int i,j;
1677 
1678 	if (done)
1679 		return;
1680 
1681 	done = true;
1682 
1683 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1684 
1685 	for (i = 0; i < nr_node_ids; i++) {
1686 		printk(KERN_WARNING "  ");
1687 		for (j = 0; j < nr_node_ids; j++) {
1688 			if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
1689 				printk(KERN_CONT "(%02d) ", node_distance(i,j));
1690 			else
1691 				printk(KERN_CONT " %02d  ", node_distance(i,j));
1692 		}
1693 		printk(KERN_CONT "\n");
1694 	}
1695 	printk(KERN_WARNING "\n");
1696 }
1697 
1698 bool find_numa_distance(int distance)
1699 {
1700 	bool found = false;
1701 	int i, *distances;
1702 
1703 	if (distance == node_distance(0, 0))
1704 		return true;
1705 
1706 	rcu_read_lock();
1707 	distances = rcu_dereference(sched_domains_numa_distance);
1708 	if (!distances)
1709 		goto unlock;
1710 	for (i = 0; i < sched_domains_numa_levels; i++) {
1711 		if (distances[i] == distance) {
1712 			found = true;
1713 			break;
1714 		}
1715 	}
1716 unlock:
1717 	rcu_read_unlock();
1718 
1719 	return found;
1720 }
1721 
1722 #define for_each_cpu_node_but(n, nbut)		\
1723 	for_each_node_state(n, N_CPU)		\
1724 		if (n == nbut)			\
1725 			continue;		\
1726 		else
1727 
1728 /*
1729  * A system can have three types of NUMA topology:
1730  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1731  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1732  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1733  *
1734  * The difference between a glueless mesh topology and a backplane
1735  * topology lies in whether communication between not directly
1736  * connected nodes goes through intermediary nodes (where programs
1737  * could run), or through backplane controllers. This affects
1738  * placement of programs.
1739  *
1740  * The type of topology can be discerned with the following tests:
1741  * - If the maximum distance between any nodes is 1 hop, the system
1742  *   is directly connected.
1743  * - If for two nodes A and B, located N > 1 hops away from each other,
1744  *   there is an intermediary node C, which is < N hops away from both
1745  *   nodes A and B, the system is a glueless mesh.
1746  */
1747 static void init_numa_topology_type(int offline_node)
1748 {
1749 	int a, b, c, n;
1750 
1751 	n = sched_max_numa_distance;
1752 
1753 	if (sched_domains_numa_levels <= 2) {
1754 		sched_numa_topology_type = NUMA_DIRECT;
1755 		return;
1756 	}
1757 
1758 	for_each_cpu_node_but(a, offline_node) {
1759 		for_each_cpu_node_but(b, offline_node) {
1760 			/* Find two nodes furthest removed from each other. */
1761 			if (node_distance(a, b) < n)
1762 				continue;
1763 
1764 			/* Is there an intermediary node between a and b? */
1765 			for_each_cpu_node_but(c, offline_node) {
1766 				if (node_distance(a, c) < n &&
1767 				    node_distance(b, c) < n) {
1768 					sched_numa_topology_type =
1769 							NUMA_GLUELESS_MESH;
1770 					return;
1771 				}
1772 			}
1773 
1774 			sched_numa_topology_type = NUMA_BACKPLANE;
1775 			return;
1776 		}
1777 	}
1778 
1779 	pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
1780 	sched_numa_topology_type = NUMA_DIRECT;
1781 }
1782 
1783 
1784 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1785 
1786 void sched_init_numa(int offline_node)
1787 {
1788 	struct sched_domain_topology_level *tl;
1789 	unsigned long *distance_map;
1790 	int nr_levels = 0;
1791 	int i, j;
1792 	int *distances;
1793 	struct cpumask ***masks;
1794 
1795 	/*
1796 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1797 	 * unique distances in the node_distance() table.
1798 	 */
1799 	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1800 	if (!distance_map)
1801 		return;
1802 
1803 	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1804 	for_each_cpu_node_but(i, offline_node) {
1805 		for_each_cpu_node_but(j, offline_node) {
1806 			int distance = node_distance(i, j);
1807 
1808 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1809 				sched_numa_warn("Invalid distance value range");
1810 				bitmap_free(distance_map);
1811 				return;
1812 			}
1813 
1814 			bitmap_set(distance_map, distance, 1);
1815 		}
1816 	}
1817 	/*
1818 	 * We can now figure out how many unique distance values there are and
1819 	 * allocate memory accordingly.
1820 	 */
1821 	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1822 
1823 	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1824 	if (!distances) {
1825 		bitmap_free(distance_map);
1826 		return;
1827 	}
1828 
1829 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
1830 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1831 		distances[i] = j;
1832 	}
1833 	rcu_assign_pointer(sched_domains_numa_distance, distances);
1834 
1835 	bitmap_free(distance_map);
1836 
1837 	/*
1838 	 * 'nr_levels' contains the number of unique distances
1839 	 *
1840 	 * The sched_domains_numa_distance[] array includes the actual distance
1841 	 * numbers.
1842 	 */
1843 
1844 	/*
1845 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1846 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1847 	 * the array will contain less then 'nr_levels' members. This could be
1848 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1849 	 * in other functions.
1850 	 *
1851 	 * We reset it to 'nr_levels' at the end of this function.
1852 	 */
1853 	sched_domains_numa_levels = 0;
1854 
1855 	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1856 	if (!masks)
1857 		return;
1858 
1859 	/*
1860 	 * Now for each level, construct a mask per node which contains all
1861 	 * CPUs of nodes that are that many hops away from us.
1862 	 */
1863 	for (i = 0; i < nr_levels; i++) {
1864 		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1865 		if (!masks[i])
1866 			return;
1867 
1868 		for_each_cpu_node_but(j, offline_node) {
1869 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1870 			int k;
1871 
1872 			if (!mask)
1873 				return;
1874 
1875 			masks[i][j] = mask;
1876 
1877 			for_each_cpu_node_but(k, offline_node) {
1878 				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1879 					sched_numa_warn("Node-distance not symmetric");
1880 
1881 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1882 					continue;
1883 
1884 				cpumask_or(mask, mask, cpumask_of_node(k));
1885 			}
1886 		}
1887 	}
1888 	rcu_assign_pointer(sched_domains_numa_masks, masks);
1889 
1890 	/* Compute default topology size */
1891 	for (i = 0; sched_domain_topology[i].mask; i++);
1892 
1893 	tl = kzalloc((i + nr_levels + 1) *
1894 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1895 	if (!tl)
1896 		return;
1897 
1898 	/*
1899 	 * Copy the default topology bits..
1900 	 */
1901 	for (i = 0; sched_domain_topology[i].mask; i++)
1902 		tl[i] = sched_domain_topology[i];
1903 
1904 	/*
1905 	 * Add the NUMA identity distance, aka single NODE.
1906 	 */
1907 	tl[i++] = (struct sched_domain_topology_level){
1908 		.mask = sd_numa_mask,
1909 		.numa_level = 0,
1910 		SD_INIT_NAME(NODE)
1911 	};
1912 
1913 	/*
1914 	 * .. and append 'j' levels of NUMA goodness.
1915 	 */
1916 	for (j = 1; j < nr_levels; i++, j++) {
1917 		tl[i] = (struct sched_domain_topology_level){
1918 			.mask = sd_numa_mask,
1919 			.sd_flags = cpu_numa_flags,
1920 			.flags = SDTL_OVERLAP,
1921 			.numa_level = j,
1922 			SD_INIT_NAME(NUMA)
1923 		};
1924 	}
1925 
1926 	sched_domain_topology_saved = sched_domain_topology;
1927 	sched_domain_topology = tl;
1928 
1929 	sched_domains_numa_levels = nr_levels;
1930 	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
1931 
1932 	init_numa_topology_type(offline_node);
1933 }
1934 
1935 
1936 static void sched_reset_numa(void)
1937 {
1938 	int nr_levels, *distances;
1939 	struct cpumask ***masks;
1940 
1941 	nr_levels = sched_domains_numa_levels;
1942 	sched_domains_numa_levels = 0;
1943 	sched_max_numa_distance = 0;
1944 	sched_numa_topology_type = NUMA_DIRECT;
1945 	distances = sched_domains_numa_distance;
1946 	rcu_assign_pointer(sched_domains_numa_distance, NULL);
1947 	masks = sched_domains_numa_masks;
1948 	rcu_assign_pointer(sched_domains_numa_masks, NULL);
1949 	if (distances || masks) {
1950 		int i, j;
1951 
1952 		synchronize_rcu();
1953 		kfree(distances);
1954 		for (i = 0; i < nr_levels && masks; i++) {
1955 			if (!masks[i])
1956 				continue;
1957 			for_each_node(j)
1958 				kfree(masks[i][j]);
1959 			kfree(masks[i]);
1960 		}
1961 		kfree(masks);
1962 	}
1963 	if (sched_domain_topology_saved) {
1964 		kfree(sched_domain_topology);
1965 		sched_domain_topology = sched_domain_topology_saved;
1966 		sched_domain_topology_saved = NULL;
1967 	}
1968 }
1969 
1970 /*
1971  * Call with hotplug lock held
1972  */
1973 void sched_update_numa(int cpu, bool online)
1974 {
1975 	int node;
1976 
1977 	node = cpu_to_node(cpu);
1978 	/*
1979 	 * Scheduler NUMA topology is updated when the first CPU of a
1980 	 * node is onlined or the last CPU of a node is offlined.
1981 	 */
1982 	if (cpumask_weight(cpumask_of_node(node)) != 1)
1983 		return;
1984 
1985 	sched_reset_numa();
1986 	sched_init_numa(online ? NUMA_NO_NODE : node);
1987 }
1988 
1989 void sched_domains_numa_masks_set(unsigned int cpu)
1990 {
1991 	int node = cpu_to_node(cpu);
1992 	int i, j;
1993 
1994 	for (i = 0; i < sched_domains_numa_levels; i++) {
1995 		for (j = 0; j < nr_node_ids; j++) {
1996 			if (!node_state(j, N_CPU))
1997 				continue;
1998 
1999 			/* Set ourselves in the remote node's masks */
2000 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
2001 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
2002 		}
2003 	}
2004 }
2005 
2006 void sched_domains_numa_masks_clear(unsigned int cpu)
2007 {
2008 	int i, j;
2009 
2010 	for (i = 0; i < sched_domains_numa_levels; i++) {
2011 		for (j = 0; j < nr_node_ids; j++) {
2012 			if (sched_domains_numa_masks[i][j])
2013 				cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2014 		}
2015 	}
2016 }
2017 
2018 /*
2019  * sched_numa_find_closest() - given the NUMA topology, find the cpu
2020  *                             closest to @cpu from @cpumask.
2021  * cpumask: cpumask to find a cpu from
2022  * cpu: cpu to be close to
2023  *
2024  * returns: cpu, or nr_cpu_ids when nothing found.
2025  */
2026 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2027 {
2028 	int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
2029 	struct cpumask ***masks;
2030 
2031 	rcu_read_lock();
2032 	masks = rcu_dereference(sched_domains_numa_masks);
2033 	if (!masks)
2034 		goto unlock;
2035 	for (i = 0; i < sched_domains_numa_levels; i++) {
2036 		if (!masks[i][j])
2037 			break;
2038 		cpu = cpumask_any_and(cpus, masks[i][j]);
2039 		if (cpu < nr_cpu_ids) {
2040 			found = cpu;
2041 			break;
2042 		}
2043 	}
2044 unlock:
2045 	rcu_read_unlock();
2046 
2047 	return found;
2048 }
2049 
2050 #endif /* CONFIG_NUMA */
2051 
2052 static int __sdt_alloc(const struct cpumask *cpu_map)
2053 {
2054 	struct sched_domain_topology_level *tl;
2055 	int j;
2056 
2057 	for_each_sd_topology(tl) {
2058 		struct sd_data *sdd = &tl->data;
2059 
2060 		sdd->sd = alloc_percpu(struct sched_domain *);
2061 		if (!sdd->sd)
2062 			return -ENOMEM;
2063 
2064 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
2065 		if (!sdd->sds)
2066 			return -ENOMEM;
2067 
2068 		sdd->sg = alloc_percpu(struct sched_group *);
2069 		if (!sdd->sg)
2070 			return -ENOMEM;
2071 
2072 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
2073 		if (!sdd->sgc)
2074 			return -ENOMEM;
2075 
2076 		for_each_cpu(j, cpu_map) {
2077 			struct sched_domain *sd;
2078 			struct sched_domain_shared *sds;
2079 			struct sched_group *sg;
2080 			struct sched_group_capacity *sgc;
2081 
2082 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
2083 					GFP_KERNEL, cpu_to_node(j));
2084 			if (!sd)
2085 				return -ENOMEM;
2086 
2087 			*per_cpu_ptr(sdd->sd, j) = sd;
2088 
2089 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
2090 					GFP_KERNEL, cpu_to_node(j));
2091 			if (!sds)
2092 				return -ENOMEM;
2093 
2094 			*per_cpu_ptr(sdd->sds, j) = sds;
2095 
2096 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
2097 					GFP_KERNEL, cpu_to_node(j));
2098 			if (!sg)
2099 				return -ENOMEM;
2100 
2101 			sg->next = sg;
2102 
2103 			*per_cpu_ptr(sdd->sg, j) = sg;
2104 
2105 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
2106 					GFP_KERNEL, cpu_to_node(j));
2107 			if (!sgc)
2108 				return -ENOMEM;
2109 
2110 #ifdef CONFIG_SCHED_DEBUG
2111 			sgc->id = j;
2112 #endif
2113 
2114 			*per_cpu_ptr(sdd->sgc, j) = sgc;
2115 		}
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 static void __sdt_free(const struct cpumask *cpu_map)
2122 {
2123 	struct sched_domain_topology_level *tl;
2124 	int j;
2125 
2126 	for_each_sd_topology(tl) {
2127 		struct sd_data *sdd = &tl->data;
2128 
2129 		for_each_cpu(j, cpu_map) {
2130 			struct sched_domain *sd;
2131 
2132 			if (sdd->sd) {
2133 				sd = *per_cpu_ptr(sdd->sd, j);
2134 				if (sd && (sd->flags & SD_OVERLAP))
2135 					free_sched_groups(sd->groups, 0);
2136 				kfree(*per_cpu_ptr(sdd->sd, j));
2137 			}
2138 
2139 			if (sdd->sds)
2140 				kfree(*per_cpu_ptr(sdd->sds, j));
2141 			if (sdd->sg)
2142 				kfree(*per_cpu_ptr(sdd->sg, j));
2143 			if (sdd->sgc)
2144 				kfree(*per_cpu_ptr(sdd->sgc, j));
2145 		}
2146 		free_percpu(sdd->sd);
2147 		sdd->sd = NULL;
2148 		free_percpu(sdd->sds);
2149 		sdd->sds = NULL;
2150 		free_percpu(sdd->sg);
2151 		sdd->sg = NULL;
2152 		free_percpu(sdd->sgc);
2153 		sdd->sgc = NULL;
2154 	}
2155 }
2156 
2157 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
2158 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
2159 		struct sched_domain *child, int cpu)
2160 {
2161 	struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
2162 
2163 	if (child) {
2164 		sd->level = child->level + 1;
2165 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
2166 		child->parent = sd;
2167 
2168 		if (!cpumask_subset(sched_domain_span(child),
2169 				    sched_domain_span(sd))) {
2170 			pr_err("BUG: arch topology borken\n");
2171 #ifdef CONFIG_SCHED_DEBUG
2172 			pr_err("     the %s domain not a subset of the %s domain\n",
2173 					child->name, sd->name);
2174 #endif
2175 			/* Fixup, ensure @sd has at least @child CPUs. */
2176 			cpumask_or(sched_domain_span(sd),
2177 				   sched_domain_span(sd),
2178 				   sched_domain_span(child));
2179 		}
2180 
2181 	}
2182 	set_domain_attribute(sd, attr);
2183 
2184 	return sd;
2185 }
2186 
2187 /*
2188  * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2189  * any two given CPUs at this (non-NUMA) topology level.
2190  */
2191 static bool topology_span_sane(struct sched_domain_topology_level *tl,
2192 			      const struct cpumask *cpu_map, int cpu)
2193 {
2194 	int i;
2195 
2196 	/* NUMA levels are allowed to overlap */
2197 	if (tl->flags & SDTL_OVERLAP)
2198 		return true;
2199 
2200 	/*
2201 	 * Non-NUMA levels cannot partially overlap - they must be either
2202 	 * completely equal or completely disjoint. Otherwise we can end up
2203 	 * breaking the sched_group lists - i.e. a later get_group() pass
2204 	 * breaks the linking done for an earlier span.
2205 	 */
2206 	for_each_cpu(i, cpu_map) {
2207 		if (i == cpu)
2208 			continue;
2209 		/*
2210 		 * We should 'and' all those masks with 'cpu_map' to exactly
2211 		 * match the topology we're about to build, but that can only
2212 		 * remove CPUs, which only lessens our ability to detect
2213 		 * overlaps
2214 		 */
2215 		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
2216 		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
2217 			return false;
2218 	}
2219 
2220 	return true;
2221 }
2222 
2223 /*
2224  * Build sched domains for a given set of CPUs and attach the sched domains
2225  * to the individual CPUs
2226  */
2227 static int
2228 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
2229 {
2230 	enum s_alloc alloc_state = sa_none;
2231 	struct sched_domain *sd;
2232 	struct s_data d;
2233 	struct rq *rq = NULL;
2234 	int i, ret = -ENOMEM;
2235 	bool has_asym = false;
2236 
2237 	if (WARN_ON(cpumask_empty(cpu_map)))
2238 		goto error;
2239 
2240 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
2241 	if (alloc_state != sa_rootdomain)
2242 		goto error;
2243 
2244 	/* Set up domains for CPUs specified by the cpu_map: */
2245 	for_each_cpu(i, cpu_map) {
2246 		struct sched_domain_topology_level *tl;
2247 
2248 		sd = NULL;
2249 		for_each_sd_topology(tl) {
2250 
2251 			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2252 				goto error;
2253 
2254 			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
2255 
2256 			has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
2257 
2258 			if (tl == sched_domain_topology)
2259 				*per_cpu_ptr(d.sd, i) = sd;
2260 			if (tl->flags & SDTL_OVERLAP)
2261 				sd->flags |= SD_OVERLAP;
2262 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2263 				break;
2264 		}
2265 	}
2266 
2267 	/* Build the groups for the domains */
2268 	for_each_cpu(i, cpu_map) {
2269 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2270 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
2271 			if (sd->flags & SD_OVERLAP) {
2272 				if (build_overlap_sched_groups(sd, i))
2273 					goto error;
2274 			} else {
2275 				if (build_sched_groups(sd, i))
2276 					goto error;
2277 			}
2278 		}
2279 	}
2280 
2281 	/*
2282 	 * Calculate an allowed NUMA imbalance such that LLCs do not get
2283 	 * imbalanced.
2284 	 */
2285 	for_each_cpu(i, cpu_map) {
2286 		unsigned int imb = 0;
2287 		unsigned int imb_span = 1;
2288 
2289 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2290 			struct sched_domain *child = sd->child;
2291 
2292 			if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
2293 			    (child->flags & SD_SHARE_PKG_RESOURCES)) {
2294 				struct sched_domain __rcu *top_p;
2295 				unsigned int nr_llcs;
2296 
2297 				/*
2298 				 * For a single LLC per node, allow an
2299 				 * imbalance up to 25% of the node. This is an
2300 				 * arbitrary cutoff based on SMT-2 to balance
2301 				 * between memory bandwidth and avoiding
2302 				 * premature sharing of HT resources and SMT-4
2303 				 * or SMT-8 *may* benefit from a different
2304 				 * cutoff.
2305 				 *
2306 				 * For multiple LLCs, allow an imbalance
2307 				 * until multiple tasks would share an LLC
2308 				 * on one node while LLCs on another node
2309 				 * remain idle.
2310 				 */
2311 				nr_llcs = sd->span_weight / child->span_weight;
2312 				if (nr_llcs == 1)
2313 					imb = sd->span_weight >> 2;
2314 				else
2315 					imb = nr_llcs;
2316 				sd->imb_numa_nr = imb;
2317 
2318 				/* Set span based on the first NUMA domain. */
2319 				top_p = sd->parent;
2320 				while (top_p && !(top_p->flags & SD_NUMA)) {
2321 					top_p = top_p->parent;
2322 				}
2323 				imb_span = top_p ? top_p->span_weight : sd->span_weight;
2324 			} else {
2325 				int factor = max(1U, (sd->span_weight / imb_span));
2326 
2327 				sd->imb_numa_nr = imb * factor;
2328 			}
2329 		}
2330 	}
2331 
2332 	/* Calculate CPU capacity for physical packages and nodes */
2333 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
2334 		if (!cpumask_test_cpu(i, cpu_map))
2335 			continue;
2336 
2337 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2338 			claim_allocations(i, sd);
2339 			init_sched_groups_capacity(i, sd);
2340 		}
2341 	}
2342 
2343 	/* Attach the domains */
2344 	rcu_read_lock();
2345 	for_each_cpu(i, cpu_map) {
2346 		rq = cpu_rq(i);
2347 		sd = *per_cpu_ptr(d.sd, i);
2348 
2349 		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2350 		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2351 			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2352 
2353 		cpu_attach_domain(sd, d.rd, i);
2354 	}
2355 	rcu_read_unlock();
2356 
2357 	if (has_asym)
2358 		static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2359 
2360 	if (rq && sched_debug_verbose) {
2361 		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2362 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2363 	}
2364 
2365 	ret = 0;
2366 error:
2367 	__free_domain_allocs(&d, alloc_state, cpu_map);
2368 
2369 	return ret;
2370 }
2371 
2372 /* Current sched domains: */
2373 static cpumask_var_t			*doms_cur;
2374 
2375 /* Number of sched domains in 'doms_cur': */
2376 static int				ndoms_cur;
2377 
2378 /* Attributes of custom domains in 'doms_cur' */
2379 static struct sched_domain_attr		*dattr_cur;
2380 
2381 /*
2382  * Special case: If a kmalloc() of a doms_cur partition (array of
2383  * cpumask) fails, then fallback to a single sched domain,
2384  * as determined by the single cpumask fallback_doms.
2385  */
2386 static cpumask_var_t			fallback_doms;
2387 
2388 /*
2389  * arch_update_cpu_topology lets virtualized architectures update the
2390  * CPU core maps. It is supposed to return 1 if the topology changed
2391  * or 0 if it stayed the same.
2392  */
2393 int __weak arch_update_cpu_topology(void)
2394 {
2395 	return 0;
2396 }
2397 
2398 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2399 {
2400 	int i;
2401 	cpumask_var_t *doms;
2402 
2403 	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2404 	if (!doms)
2405 		return NULL;
2406 	for (i = 0; i < ndoms; i++) {
2407 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2408 			free_sched_domains(doms, i);
2409 			return NULL;
2410 		}
2411 	}
2412 	return doms;
2413 }
2414 
2415 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2416 {
2417 	unsigned int i;
2418 	for (i = 0; i < ndoms; i++)
2419 		free_cpumask_var(doms[i]);
2420 	kfree(doms);
2421 }
2422 
2423 /*
2424  * Set up scheduler domains and groups.  For now this just excludes isolated
2425  * CPUs, but could be used to exclude other special cases in the future.
2426  */
2427 int sched_init_domains(const struct cpumask *cpu_map)
2428 {
2429 	int err;
2430 
2431 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2432 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2433 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2434 
2435 	arch_update_cpu_topology();
2436 	asym_cpu_capacity_scan();
2437 	ndoms_cur = 1;
2438 	doms_cur = alloc_sched_domains(ndoms_cur);
2439 	if (!doms_cur)
2440 		doms_cur = &fallback_doms;
2441 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN));
2442 	err = build_sched_domains(doms_cur[0], NULL);
2443 
2444 	return err;
2445 }
2446 
2447 /*
2448  * Detach sched domains from a group of CPUs specified in cpu_map
2449  * These CPUs will now be attached to the NULL domain
2450  */
2451 static void detach_destroy_domains(const struct cpumask *cpu_map)
2452 {
2453 	unsigned int cpu = cpumask_any(cpu_map);
2454 	int i;
2455 
2456 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2457 		static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2458 
2459 	rcu_read_lock();
2460 	for_each_cpu(i, cpu_map)
2461 		cpu_attach_domain(NULL, &def_root_domain, i);
2462 	rcu_read_unlock();
2463 }
2464 
2465 /* handle null as "default" */
2466 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2467 			struct sched_domain_attr *new, int idx_new)
2468 {
2469 	struct sched_domain_attr tmp;
2470 
2471 	/* Fast path: */
2472 	if (!new && !cur)
2473 		return 1;
2474 
2475 	tmp = SD_ATTR_INIT;
2476 
2477 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2478 			new ? (new + idx_new) : &tmp,
2479 			sizeof(struct sched_domain_attr));
2480 }
2481 
2482 /*
2483  * Partition sched domains as specified by the 'ndoms_new'
2484  * cpumasks in the array doms_new[] of cpumasks. This compares
2485  * doms_new[] to the current sched domain partitioning, doms_cur[].
2486  * It destroys each deleted domain and builds each new domain.
2487  *
2488  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2489  * The masks don't intersect (don't overlap.) We should setup one
2490  * sched domain for each mask. CPUs not in any of the cpumasks will
2491  * not be load balanced. If the same cpumask appears both in the
2492  * current 'doms_cur' domains and in the new 'doms_new', we can leave
2493  * it as it is.
2494  *
2495  * The passed in 'doms_new' should be allocated using
2496  * alloc_sched_domains.  This routine takes ownership of it and will
2497  * free_sched_domains it when done with it. If the caller failed the
2498  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2499  * and partition_sched_domains() will fallback to the single partition
2500  * 'fallback_doms', it also forces the domains to be rebuilt.
2501  *
2502  * If doms_new == NULL it will be replaced with cpu_online_mask.
2503  * ndoms_new == 0 is a special case for destroying existing domains,
2504  * and it will not create the default domain.
2505  *
2506  * Call with hotplug lock and sched_domains_mutex held
2507  */
2508 void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2509 				    struct sched_domain_attr *dattr_new)
2510 {
2511 	bool __maybe_unused has_eas = false;
2512 	int i, j, n;
2513 	int new_topology;
2514 
2515 	lockdep_assert_held(&sched_domains_mutex);
2516 
2517 	/* Let the architecture update CPU core mappings: */
2518 	new_topology = arch_update_cpu_topology();
2519 	/* Trigger rebuilding CPU capacity asymmetry data */
2520 	if (new_topology)
2521 		asym_cpu_capacity_scan();
2522 
2523 	if (!doms_new) {
2524 		WARN_ON_ONCE(dattr_new);
2525 		n = 0;
2526 		doms_new = alloc_sched_domains(1);
2527 		if (doms_new) {
2528 			n = 1;
2529 			cpumask_and(doms_new[0], cpu_active_mask,
2530 				    housekeeping_cpumask(HK_TYPE_DOMAIN));
2531 		}
2532 	} else {
2533 		n = ndoms_new;
2534 	}
2535 
2536 	/* Destroy deleted domains: */
2537 	for (i = 0; i < ndoms_cur; i++) {
2538 		for (j = 0; j < n && !new_topology; j++) {
2539 			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2540 			    dattrs_equal(dattr_cur, i, dattr_new, j)) {
2541 				struct root_domain *rd;
2542 
2543 				/*
2544 				 * This domain won't be destroyed and as such
2545 				 * its dl_bw->total_bw needs to be cleared.  It
2546 				 * will be recomputed in function
2547 				 * update_tasks_root_domain().
2548 				 */
2549 				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2550 				dl_clear_root_domain(rd);
2551 				goto match1;
2552 			}
2553 		}
2554 		/* No match - a current sched domain not in new doms_new[] */
2555 		detach_destroy_domains(doms_cur[i]);
2556 match1:
2557 		;
2558 	}
2559 
2560 	n = ndoms_cur;
2561 	if (!doms_new) {
2562 		n = 0;
2563 		doms_new = &fallback_doms;
2564 		cpumask_and(doms_new[0], cpu_active_mask,
2565 			    housekeeping_cpumask(HK_TYPE_DOMAIN));
2566 	}
2567 
2568 	/* Build new domains: */
2569 	for (i = 0; i < ndoms_new; i++) {
2570 		for (j = 0; j < n && !new_topology; j++) {
2571 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2572 			    dattrs_equal(dattr_new, i, dattr_cur, j))
2573 				goto match2;
2574 		}
2575 		/* No match - add a new doms_new */
2576 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2577 match2:
2578 		;
2579 	}
2580 
2581 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2582 	/* Build perf. domains: */
2583 	for (i = 0; i < ndoms_new; i++) {
2584 		for (j = 0; j < n && !sched_energy_update; j++) {
2585 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2586 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2587 				has_eas = true;
2588 				goto match3;
2589 			}
2590 		}
2591 		/* No match - add perf. domains for a new rd */
2592 		has_eas |= build_perf_domains(doms_new[i]);
2593 match3:
2594 		;
2595 	}
2596 	sched_energy_set(has_eas);
2597 #endif
2598 
2599 	/* Remember the new sched domains: */
2600 	if (doms_cur != &fallback_doms)
2601 		free_sched_domains(doms_cur, ndoms_cur);
2602 
2603 	kfree(dattr_cur);
2604 	doms_cur = doms_new;
2605 	dattr_cur = dattr_new;
2606 	ndoms_cur = ndoms_new;
2607 
2608 	update_sched_domain_debugfs();
2609 }
2610 
2611 /*
2612  * Call with hotplug lock held
2613  */
2614 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2615 			     struct sched_domain_attr *dattr_new)
2616 {
2617 	mutex_lock(&sched_domains_mutex);
2618 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2619 	mutex_unlock(&sched_domains_mutex);
2620 }
2621