xref: /openbmc/linux/kernel/sched/topology.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Scheduler topology setup/handling methods
4   */
5  
6  #include <linux/bsearch.h>
7  
8  DEFINE_MUTEX(sched_domains_mutex);
9  
10  /* Protected by sched_domains_mutex: */
11  static cpumask_var_t sched_domains_tmpmask;
12  static cpumask_var_t sched_domains_tmpmask2;
13  
14  #ifdef CONFIG_SCHED_DEBUG
15  
sched_debug_setup(char * str)16  static int __init sched_debug_setup(char *str)
17  {
18  	sched_debug_verbose = true;
19  
20  	return 0;
21  }
22  early_param("sched_verbose", sched_debug_setup);
23  
sched_debug(void)24  static inline bool sched_debug(void)
25  {
26  	return sched_debug_verbose;
27  }
28  
29  #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
30  const struct sd_flag_debug sd_flag_debug[] = {
31  #include <linux/sched/sd_flags.h>
32  };
33  #undef SD_FLAG
34  
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)35  static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
36  				  struct cpumask *groupmask)
37  {
38  	struct sched_group *group = sd->groups;
39  	unsigned long flags = sd->flags;
40  	unsigned int idx;
41  
42  	cpumask_clear(groupmask);
43  
44  	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
45  	printk(KERN_CONT "span=%*pbl level=%s\n",
46  	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
47  
48  	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
49  		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
50  	}
51  	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
52  		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
53  	}
54  
55  	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
56  		unsigned int flag = BIT(idx);
57  		unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
58  
59  		if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
60  		    !(sd->child->flags & flag))
61  			printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
62  			       sd_flag_debug[idx].name);
63  
64  		if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
65  		    !(sd->parent->flags & flag))
66  			printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
67  			       sd_flag_debug[idx].name);
68  	}
69  
70  	printk(KERN_DEBUG "%*s groups:", level + 1, "");
71  	do {
72  		if (!group) {
73  			printk("\n");
74  			printk(KERN_ERR "ERROR: group is NULL\n");
75  			break;
76  		}
77  
78  		if (cpumask_empty(sched_group_span(group))) {
79  			printk(KERN_CONT "\n");
80  			printk(KERN_ERR "ERROR: empty group\n");
81  			break;
82  		}
83  
84  		if (!(sd->flags & SD_OVERLAP) &&
85  		    cpumask_intersects(groupmask, sched_group_span(group))) {
86  			printk(KERN_CONT "\n");
87  			printk(KERN_ERR "ERROR: repeated CPUs\n");
88  			break;
89  		}
90  
91  		cpumask_or(groupmask, groupmask, sched_group_span(group));
92  
93  		printk(KERN_CONT " %d:{ span=%*pbl",
94  				group->sgc->id,
95  				cpumask_pr_args(sched_group_span(group)));
96  
97  		if ((sd->flags & SD_OVERLAP) &&
98  		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
99  			printk(KERN_CONT " mask=%*pbl",
100  				cpumask_pr_args(group_balance_mask(group)));
101  		}
102  
103  		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
104  			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
105  
106  		if (group == sd->groups && sd->child &&
107  		    !cpumask_equal(sched_domain_span(sd->child),
108  				   sched_group_span(group))) {
109  			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
110  		}
111  
112  		printk(KERN_CONT " }");
113  
114  		group = group->next;
115  
116  		if (group != sd->groups)
117  			printk(KERN_CONT ",");
118  
119  	} while (group != sd->groups);
120  	printk(KERN_CONT "\n");
121  
122  	if (!cpumask_equal(sched_domain_span(sd), groupmask))
123  		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
124  
125  	if (sd->parent &&
126  	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
127  		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
128  	return 0;
129  }
130  
sched_domain_debug(struct sched_domain * sd,int cpu)131  static void sched_domain_debug(struct sched_domain *sd, int cpu)
132  {
133  	int level = 0;
134  
135  	if (!sched_debug_verbose)
136  		return;
137  
138  	if (!sd) {
139  		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
140  		return;
141  	}
142  
143  	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
144  
145  	for (;;) {
146  		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
147  			break;
148  		level++;
149  		sd = sd->parent;
150  		if (!sd)
151  			break;
152  	}
153  }
154  #else /* !CONFIG_SCHED_DEBUG */
155  
156  # define sched_debug_verbose 0
157  # define sched_domain_debug(sd, cpu) do { } while (0)
sched_debug(void)158  static inline bool sched_debug(void)
159  {
160  	return false;
161  }
162  #endif /* CONFIG_SCHED_DEBUG */
163  
164  /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
165  #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
166  static const unsigned int SD_DEGENERATE_GROUPS_MASK =
167  #include <linux/sched/sd_flags.h>
168  0;
169  #undef SD_FLAG
170  
sd_degenerate(struct sched_domain * sd)171  static int sd_degenerate(struct sched_domain *sd)
172  {
173  	if (cpumask_weight(sched_domain_span(sd)) == 1)
174  		return 1;
175  
176  	/* Following flags need at least 2 groups */
177  	if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
178  	    (sd->groups != sd->groups->next))
179  		return 0;
180  
181  	/* Following flags don't use groups */
182  	if (sd->flags & (SD_WAKE_AFFINE))
183  		return 0;
184  
185  	return 1;
186  }
187  
188  static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)189  sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
190  {
191  	unsigned long cflags = sd->flags, pflags = parent->flags;
192  
193  	if (sd_degenerate(parent))
194  		return 1;
195  
196  	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
197  		return 0;
198  
199  	/* Flags needing groups don't count if only 1 group in parent */
200  	if (parent->groups == parent->groups->next)
201  		pflags &= ~SD_DEGENERATE_GROUPS_MASK;
202  
203  	if (~cflags & pflags)
204  		return 0;
205  
206  	return 1;
207  }
208  
209  #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
210  DEFINE_STATIC_KEY_FALSE(sched_energy_present);
211  static unsigned int sysctl_sched_energy_aware = 1;
212  static DEFINE_MUTEX(sched_energy_mutex);
213  static bool sched_energy_update;
214  
rebuild_sched_domains_energy(void)215  void rebuild_sched_domains_energy(void)
216  {
217  	mutex_lock(&sched_energy_mutex);
218  	sched_energy_update = true;
219  	rebuild_sched_domains();
220  	sched_energy_update = false;
221  	mutex_unlock(&sched_energy_mutex);
222  }
223  
224  #ifdef CONFIG_PROC_SYSCTL
sched_energy_aware_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)225  static int sched_energy_aware_handler(struct ctl_table *table, int write,
226  		void *buffer, size_t *lenp, loff_t *ppos)
227  {
228  	int ret, state;
229  
230  	if (write && !capable(CAP_SYS_ADMIN))
231  		return -EPERM;
232  
233  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
234  	if (!ret && write) {
235  		state = static_branch_unlikely(&sched_energy_present);
236  		if (state != sysctl_sched_energy_aware)
237  			rebuild_sched_domains_energy();
238  	}
239  
240  	return ret;
241  }
242  
243  static struct ctl_table sched_energy_aware_sysctls[] = {
244  	{
245  		.procname       = "sched_energy_aware",
246  		.data           = &sysctl_sched_energy_aware,
247  		.maxlen         = sizeof(unsigned int),
248  		.mode           = 0644,
249  		.proc_handler   = sched_energy_aware_handler,
250  		.extra1         = SYSCTL_ZERO,
251  		.extra2         = SYSCTL_ONE,
252  	},
253  	{}
254  };
255  
sched_energy_aware_sysctl_init(void)256  static int __init sched_energy_aware_sysctl_init(void)
257  {
258  	register_sysctl_init("kernel", sched_energy_aware_sysctls);
259  	return 0;
260  }
261  
262  late_initcall(sched_energy_aware_sysctl_init);
263  #endif
264  
free_pd(struct perf_domain * pd)265  static void free_pd(struct perf_domain *pd)
266  {
267  	struct perf_domain *tmp;
268  
269  	while (pd) {
270  		tmp = pd->next;
271  		kfree(pd);
272  		pd = tmp;
273  	}
274  }
275  
find_pd(struct perf_domain * pd,int cpu)276  static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
277  {
278  	while (pd) {
279  		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
280  			return pd;
281  		pd = pd->next;
282  	}
283  
284  	return NULL;
285  }
286  
pd_init(int cpu)287  static struct perf_domain *pd_init(int cpu)
288  {
289  	struct em_perf_domain *obj = em_cpu_get(cpu);
290  	struct perf_domain *pd;
291  
292  	if (!obj) {
293  		if (sched_debug())
294  			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
295  		return NULL;
296  	}
297  
298  	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
299  	if (!pd)
300  		return NULL;
301  	pd->em_pd = obj;
302  
303  	return pd;
304  }
305  
perf_domain_debug(const struct cpumask * cpu_map,struct perf_domain * pd)306  static void perf_domain_debug(const struct cpumask *cpu_map,
307  						struct perf_domain *pd)
308  {
309  	if (!sched_debug() || !pd)
310  		return;
311  
312  	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
313  
314  	while (pd) {
315  		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
316  				cpumask_first(perf_domain_span(pd)),
317  				cpumask_pr_args(perf_domain_span(pd)),
318  				em_pd_nr_perf_states(pd->em_pd));
319  		pd = pd->next;
320  	}
321  
322  	printk(KERN_CONT "\n");
323  }
324  
destroy_perf_domain_rcu(struct rcu_head * rp)325  static void destroy_perf_domain_rcu(struct rcu_head *rp)
326  {
327  	struct perf_domain *pd;
328  
329  	pd = container_of(rp, struct perf_domain, rcu);
330  	free_pd(pd);
331  }
332  
sched_energy_set(bool has_eas)333  static void sched_energy_set(bool has_eas)
334  {
335  	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
336  		if (sched_debug())
337  			pr_info("%s: stopping EAS\n", __func__);
338  		static_branch_disable_cpuslocked(&sched_energy_present);
339  	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
340  		if (sched_debug())
341  			pr_info("%s: starting EAS\n", __func__);
342  		static_branch_enable_cpuslocked(&sched_energy_present);
343  	}
344  }
345  
346  /*
347   * EAS can be used on a root domain if it meets all the following conditions:
348   *    1. an Energy Model (EM) is available;
349   *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
350   *    3. no SMT is detected.
351   *    4. the EM complexity is low enough to keep scheduling overheads low;
352   *    5. schedutil is driving the frequency of all CPUs of the rd;
353   *    6. frequency invariance support is present;
354   *
355   * The complexity of the Energy Model is defined as:
356   *
357   *              C = nr_pd * (nr_cpus + nr_ps)
358   *
359   * with parameters defined as:
360   *  - nr_pd:    the number of performance domains
361   *  - nr_cpus:  the number of CPUs
362   *  - nr_ps:    the sum of the number of performance states of all performance
363   *              domains (for example, on a system with 2 performance domains,
364   *              with 10 performance states each, nr_ps = 2 * 10 = 20).
365   *
366   * It is generally not a good idea to use such a model in the wake-up path on
367   * very complex platforms because of the associated scheduling overheads. The
368   * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
369   * with per-CPU DVFS and less than 8 performance states each, for example.
370   */
371  #define EM_MAX_COMPLEXITY 2048
372  
373  extern struct cpufreq_governor schedutil_gov;
build_perf_domains(const struct cpumask * cpu_map)374  static bool build_perf_domains(const struct cpumask *cpu_map)
375  {
376  	int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
377  	struct perf_domain *pd = NULL, *tmp;
378  	int cpu = cpumask_first(cpu_map);
379  	struct root_domain *rd = cpu_rq(cpu)->rd;
380  	struct cpufreq_policy *policy;
381  	struct cpufreq_governor *gov;
382  
383  	if (!sysctl_sched_energy_aware)
384  		goto free;
385  
386  	/* EAS is enabled for asymmetric CPU capacity topologies. */
387  	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
388  		if (sched_debug()) {
389  			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
390  					cpumask_pr_args(cpu_map));
391  		}
392  		goto free;
393  	}
394  
395  	/* EAS definitely does *not* handle SMT */
396  	if (sched_smt_active()) {
397  		pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
398  			cpumask_pr_args(cpu_map));
399  		goto free;
400  	}
401  
402  	if (!arch_scale_freq_invariant()) {
403  		if (sched_debug()) {
404  			pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported",
405  				cpumask_pr_args(cpu_map));
406  		}
407  		goto free;
408  	}
409  
410  	for_each_cpu(i, cpu_map) {
411  		/* Skip already covered CPUs. */
412  		if (find_pd(pd, i))
413  			continue;
414  
415  		/* Do not attempt EAS if schedutil is not being used. */
416  		policy = cpufreq_cpu_get(i);
417  		if (!policy)
418  			goto free;
419  		gov = policy->governor;
420  		cpufreq_cpu_put(policy);
421  		if (gov != &schedutil_gov) {
422  			if (rd->pd)
423  				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
424  						cpumask_pr_args(cpu_map));
425  			goto free;
426  		}
427  
428  		/* Create the new pd and add it to the local list. */
429  		tmp = pd_init(i);
430  		if (!tmp)
431  			goto free;
432  		tmp->next = pd;
433  		pd = tmp;
434  
435  		/*
436  		 * Count performance domains and performance states for the
437  		 * complexity check.
438  		 */
439  		nr_pd++;
440  		nr_ps += em_pd_nr_perf_states(pd->em_pd);
441  	}
442  
443  	/* Bail out if the Energy Model complexity is too high. */
444  	if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
445  		WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
446  						cpumask_pr_args(cpu_map));
447  		goto free;
448  	}
449  
450  	perf_domain_debug(cpu_map, pd);
451  
452  	/* Attach the new list of performance domains to the root domain. */
453  	tmp = rd->pd;
454  	rcu_assign_pointer(rd->pd, pd);
455  	if (tmp)
456  		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
457  
458  	return !!pd;
459  
460  free:
461  	free_pd(pd);
462  	tmp = rd->pd;
463  	rcu_assign_pointer(rd->pd, NULL);
464  	if (tmp)
465  		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
466  
467  	return false;
468  }
469  #else
free_pd(struct perf_domain * pd)470  static void free_pd(struct perf_domain *pd) { }
471  #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
472  
free_rootdomain(struct rcu_head * rcu)473  static void free_rootdomain(struct rcu_head *rcu)
474  {
475  	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
476  
477  	cpupri_cleanup(&rd->cpupri);
478  	cpudl_cleanup(&rd->cpudl);
479  	free_cpumask_var(rd->dlo_mask);
480  	free_cpumask_var(rd->rto_mask);
481  	free_cpumask_var(rd->online);
482  	free_cpumask_var(rd->span);
483  	free_pd(rd->pd);
484  	kfree(rd);
485  }
486  
rq_attach_root(struct rq * rq,struct root_domain * rd)487  void rq_attach_root(struct rq *rq, struct root_domain *rd)
488  {
489  	struct root_domain *old_rd = NULL;
490  	struct rq_flags rf;
491  
492  	rq_lock_irqsave(rq, &rf);
493  
494  	if (rq->rd) {
495  		old_rd = rq->rd;
496  
497  		if (cpumask_test_cpu(rq->cpu, old_rd->online))
498  			set_rq_offline(rq);
499  
500  		cpumask_clear_cpu(rq->cpu, old_rd->span);
501  
502  		/*
503  		 * If we dont want to free the old_rd yet then
504  		 * set old_rd to NULL to skip the freeing later
505  		 * in this function:
506  		 */
507  		if (!atomic_dec_and_test(&old_rd->refcount))
508  			old_rd = NULL;
509  	}
510  
511  	atomic_inc(&rd->refcount);
512  	rq->rd = rd;
513  
514  	cpumask_set_cpu(rq->cpu, rd->span);
515  	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
516  		set_rq_online(rq);
517  
518  	rq_unlock_irqrestore(rq, &rf);
519  
520  	if (old_rd)
521  		call_rcu(&old_rd->rcu, free_rootdomain);
522  }
523  
sched_get_rd(struct root_domain * rd)524  void sched_get_rd(struct root_domain *rd)
525  {
526  	atomic_inc(&rd->refcount);
527  }
528  
sched_put_rd(struct root_domain * rd)529  void sched_put_rd(struct root_domain *rd)
530  {
531  	if (!atomic_dec_and_test(&rd->refcount))
532  		return;
533  
534  	call_rcu(&rd->rcu, free_rootdomain);
535  }
536  
init_rootdomain(struct root_domain * rd)537  static int init_rootdomain(struct root_domain *rd)
538  {
539  	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
540  		goto out;
541  	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
542  		goto free_span;
543  	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
544  		goto free_online;
545  	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
546  		goto free_dlo_mask;
547  
548  #ifdef HAVE_RT_PUSH_IPI
549  	rd->rto_cpu = -1;
550  	raw_spin_lock_init(&rd->rto_lock);
551  	rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
552  #endif
553  
554  	rd->visit_gen = 0;
555  	init_dl_bw(&rd->dl_bw);
556  	if (cpudl_init(&rd->cpudl) != 0)
557  		goto free_rto_mask;
558  
559  	if (cpupri_init(&rd->cpupri) != 0)
560  		goto free_cpudl;
561  	return 0;
562  
563  free_cpudl:
564  	cpudl_cleanup(&rd->cpudl);
565  free_rto_mask:
566  	free_cpumask_var(rd->rto_mask);
567  free_dlo_mask:
568  	free_cpumask_var(rd->dlo_mask);
569  free_online:
570  	free_cpumask_var(rd->online);
571  free_span:
572  	free_cpumask_var(rd->span);
573  out:
574  	return -ENOMEM;
575  }
576  
577  /*
578   * By default the system creates a single root-domain with all CPUs as
579   * members (mimicking the global state we have today).
580   */
581  struct root_domain def_root_domain;
582  
init_defrootdomain(void)583  void __init init_defrootdomain(void)
584  {
585  	init_rootdomain(&def_root_domain);
586  
587  	atomic_set(&def_root_domain.refcount, 1);
588  }
589  
alloc_rootdomain(void)590  static struct root_domain *alloc_rootdomain(void)
591  {
592  	struct root_domain *rd;
593  
594  	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
595  	if (!rd)
596  		return NULL;
597  
598  	if (init_rootdomain(rd) != 0) {
599  		kfree(rd);
600  		return NULL;
601  	}
602  
603  	return rd;
604  }
605  
free_sched_groups(struct sched_group * sg,int free_sgc)606  static void free_sched_groups(struct sched_group *sg, int free_sgc)
607  {
608  	struct sched_group *tmp, *first;
609  
610  	if (!sg)
611  		return;
612  
613  	first = sg;
614  	do {
615  		tmp = sg->next;
616  
617  		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
618  			kfree(sg->sgc);
619  
620  		if (atomic_dec_and_test(&sg->ref))
621  			kfree(sg);
622  		sg = tmp;
623  	} while (sg != first);
624  }
625  
destroy_sched_domain(struct sched_domain * sd)626  static void destroy_sched_domain(struct sched_domain *sd)
627  {
628  	/*
629  	 * A normal sched domain may have multiple group references, an
630  	 * overlapping domain, having private groups, only one.  Iterate,
631  	 * dropping group/capacity references, freeing where none remain.
632  	 */
633  	free_sched_groups(sd->groups, 1);
634  
635  	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
636  		kfree(sd->shared);
637  	kfree(sd);
638  }
639  
destroy_sched_domains_rcu(struct rcu_head * rcu)640  static void destroy_sched_domains_rcu(struct rcu_head *rcu)
641  {
642  	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
643  
644  	while (sd) {
645  		struct sched_domain *parent = sd->parent;
646  		destroy_sched_domain(sd);
647  		sd = parent;
648  	}
649  }
650  
destroy_sched_domains(struct sched_domain * sd)651  static void destroy_sched_domains(struct sched_domain *sd)
652  {
653  	if (sd)
654  		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
655  }
656  
657  /*
658   * Keep a special pointer to the highest sched_domain that has
659   * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
660   * allows us to avoid some pointer chasing select_idle_sibling().
661   *
662   * Also keep a unique ID per domain (we use the first CPU number in
663   * the cpumask of the domain), this allows us to quickly tell if
664   * two CPUs are in the same cache domain, see cpus_share_cache().
665   */
666  DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
667  DEFINE_PER_CPU(int, sd_llc_size);
668  DEFINE_PER_CPU(int, sd_llc_id);
669  DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
670  DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
671  DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
672  DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
673  DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
674  
update_top_cache_domain(int cpu)675  static void update_top_cache_domain(int cpu)
676  {
677  	struct sched_domain_shared *sds = NULL;
678  	struct sched_domain *sd;
679  	int id = cpu;
680  	int size = 1;
681  
682  	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
683  	if (sd) {
684  		id = cpumask_first(sched_domain_span(sd));
685  		size = cpumask_weight(sched_domain_span(sd));
686  		sds = sd->shared;
687  	}
688  
689  	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
690  	per_cpu(sd_llc_size, cpu) = size;
691  	per_cpu(sd_llc_id, cpu) = id;
692  	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
693  
694  	sd = lowest_flag_domain(cpu, SD_NUMA);
695  	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
696  
697  	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
698  	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
699  
700  	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
701  	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
702  }
703  
704  /*
705   * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
706   * hold the hotplug lock.
707   */
708  static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)709  cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
710  {
711  	struct rq *rq = cpu_rq(cpu);
712  	struct sched_domain *tmp;
713  
714  	/* Remove the sched domains which do not contribute to scheduling. */
715  	for (tmp = sd; tmp; ) {
716  		struct sched_domain *parent = tmp->parent;
717  		if (!parent)
718  			break;
719  
720  		if (sd_parent_degenerate(tmp, parent)) {
721  			tmp->parent = parent->parent;
722  
723  			if (parent->parent) {
724  				parent->parent->child = tmp;
725  				parent->parent->groups->flags = tmp->flags;
726  			}
727  
728  			/*
729  			 * Transfer SD_PREFER_SIBLING down in case of a
730  			 * degenerate parent; the spans match for this
731  			 * so the property transfers.
732  			 */
733  			if (parent->flags & SD_PREFER_SIBLING)
734  				tmp->flags |= SD_PREFER_SIBLING;
735  			destroy_sched_domain(parent);
736  		} else
737  			tmp = tmp->parent;
738  	}
739  
740  	if (sd && sd_degenerate(sd)) {
741  		tmp = sd;
742  		sd = sd->parent;
743  		destroy_sched_domain(tmp);
744  		if (sd) {
745  			struct sched_group *sg = sd->groups;
746  
747  			/*
748  			 * sched groups hold the flags of the child sched
749  			 * domain for convenience. Clear such flags since
750  			 * the child is being destroyed.
751  			 */
752  			do {
753  				sg->flags = 0;
754  			} while (sg != sd->groups);
755  
756  			sd->child = NULL;
757  		}
758  	}
759  
760  	sched_domain_debug(sd, cpu);
761  
762  	rq_attach_root(rq, rd);
763  	tmp = rq->sd;
764  	rcu_assign_pointer(rq->sd, sd);
765  	dirty_sched_domain_sysctl(cpu);
766  	destroy_sched_domains(tmp);
767  
768  	update_top_cache_domain(cpu);
769  }
770  
771  struct s_data {
772  	struct sched_domain * __percpu *sd;
773  	struct root_domain	*rd;
774  };
775  
776  enum s_alloc {
777  	sa_rootdomain,
778  	sa_sd,
779  	sa_sd_storage,
780  	sa_none,
781  };
782  
783  /*
784   * Return the canonical balance CPU for this group, this is the first CPU
785   * of this group that's also in the balance mask.
786   *
787   * The balance mask are all those CPUs that could actually end up at this
788   * group. See build_balance_mask().
789   *
790   * Also see should_we_balance().
791   */
group_balance_cpu(struct sched_group * sg)792  int group_balance_cpu(struct sched_group *sg)
793  {
794  	return cpumask_first(group_balance_mask(sg));
795  }
796  
797  
798  /*
799   * NUMA topology (first read the regular topology blurb below)
800   *
801   * Given a node-distance table, for example:
802   *
803   *   node   0   1   2   3
804   *     0:  10  20  30  20
805   *     1:  20  10  20  30
806   *     2:  30  20  10  20
807   *     3:  20  30  20  10
808   *
809   * which represents a 4 node ring topology like:
810   *
811   *   0 ----- 1
812   *   |       |
813   *   |       |
814   *   |       |
815   *   3 ----- 2
816   *
817   * We want to construct domains and groups to represent this. The way we go
818   * about doing this is to build the domains on 'hops'. For each NUMA level we
819   * construct the mask of all nodes reachable in @level hops.
820   *
821   * For the above NUMA topology that gives 3 levels:
822   *
823   * NUMA-2	0-3		0-3		0-3		0-3
824   *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
825   *
826   * NUMA-1	0-1,3		0-2		1-3		0,2-3
827   *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
828   *
829   * NUMA-0	0		1		2		3
830   *
831   *
832   * As can be seen; things don't nicely line up as with the regular topology.
833   * When we iterate a domain in child domain chunks some nodes can be
834   * represented multiple times -- hence the "overlap" naming for this part of
835   * the topology.
836   *
837   * In order to minimize this overlap, we only build enough groups to cover the
838   * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
839   *
840   * Because:
841   *
842   *  - the first group of each domain is its child domain; this
843   *    gets us the first 0-1,3
844   *  - the only uncovered node is 2, who's child domain is 1-3.
845   *
846   * However, because of the overlap, computing a unique CPU for each group is
847   * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
848   * groups include the CPUs of Node-0, while those CPUs would not in fact ever
849   * end up at those groups (they would end up in group: 0-1,3).
850   *
851   * To correct this we have to introduce the group balance mask. This mask
852   * will contain those CPUs in the group that can reach this group given the
853   * (child) domain tree.
854   *
855   * With this we can once again compute balance_cpu and sched_group_capacity
856   * relations.
857   *
858   * XXX include words on how balance_cpu is unique and therefore can be
859   * used for sched_group_capacity links.
860   *
861   *
862   * Another 'interesting' topology is:
863   *
864   *   node   0   1   2   3
865   *     0:  10  20  20  30
866   *     1:  20  10  20  20
867   *     2:  20  20  10  20
868   *     3:  30  20  20  10
869   *
870   * Which looks a little like:
871   *
872   *   0 ----- 1
873   *   |     / |
874   *   |   /   |
875   *   | /     |
876   *   2 ----- 3
877   *
878   * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
879   * are not.
880   *
881   * This leads to a few particularly weird cases where the sched_domain's are
882   * not of the same number for each CPU. Consider:
883   *
884   * NUMA-2	0-3						0-3
885   *  groups:	{0-2},{1-3}					{1-3},{0-2}
886   *
887   * NUMA-1	0-2		0-3		0-3		1-3
888   *
889   * NUMA-0	0		1		2		3
890   *
891   */
892  
893  
894  /*
895   * Build the balance mask; it contains only those CPUs that can arrive at this
896   * group and should be considered to continue balancing.
897   *
898   * We do this during the group creation pass, therefore the group information
899   * isn't complete yet, however since each group represents a (child) domain we
900   * can fully construct this using the sched_domain bits (which are already
901   * complete).
902   */
903  static void
build_balance_mask(struct sched_domain * sd,struct sched_group * sg,struct cpumask * mask)904  build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
905  {
906  	const struct cpumask *sg_span = sched_group_span(sg);
907  	struct sd_data *sdd = sd->private;
908  	struct sched_domain *sibling;
909  	int i;
910  
911  	cpumask_clear(mask);
912  
913  	for_each_cpu(i, sg_span) {
914  		sibling = *per_cpu_ptr(sdd->sd, i);
915  
916  		/*
917  		 * Can happen in the asymmetric case, where these siblings are
918  		 * unused. The mask will not be empty because those CPUs that
919  		 * do have the top domain _should_ span the domain.
920  		 */
921  		if (!sibling->child)
922  			continue;
923  
924  		/* If we would not end up here, we can't continue from here */
925  		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
926  			continue;
927  
928  		cpumask_set_cpu(i, mask);
929  	}
930  
931  	/* We must not have empty masks here */
932  	WARN_ON_ONCE(cpumask_empty(mask));
933  }
934  
935  /*
936   * XXX: This creates per-node group entries; since the load-balancer will
937   * immediately access remote memory to construct this group's load-balance
938   * statistics having the groups node local is of dubious benefit.
939   */
940  static struct sched_group *
build_group_from_child_sched_domain(struct sched_domain * sd,int cpu)941  build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
942  {
943  	struct sched_group *sg;
944  	struct cpumask *sg_span;
945  
946  	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
947  			GFP_KERNEL, cpu_to_node(cpu));
948  
949  	if (!sg)
950  		return NULL;
951  
952  	sg_span = sched_group_span(sg);
953  	if (sd->child) {
954  		cpumask_copy(sg_span, sched_domain_span(sd->child));
955  		sg->flags = sd->child->flags;
956  	} else {
957  		cpumask_copy(sg_span, sched_domain_span(sd));
958  	}
959  
960  	atomic_inc(&sg->ref);
961  	return sg;
962  }
963  
init_overlap_sched_group(struct sched_domain * sd,struct sched_group * sg)964  static void init_overlap_sched_group(struct sched_domain *sd,
965  				     struct sched_group *sg)
966  {
967  	struct cpumask *mask = sched_domains_tmpmask2;
968  	struct sd_data *sdd = sd->private;
969  	struct cpumask *sg_span;
970  	int cpu;
971  
972  	build_balance_mask(sd, sg, mask);
973  	cpu = cpumask_first(mask);
974  
975  	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
976  	if (atomic_inc_return(&sg->sgc->ref) == 1)
977  		cpumask_copy(group_balance_mask(sg), mask);
978  	else
979  		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
980  
981  	/*
982  	 * Initialize sgc->capacity such that even if we mess up the
983  	 * domains and no possible iteration will get us here, we won't
984  	 * die on a /0 trap.
985  	 */
986  	sg_span = sched_group_span(sg);
987  	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
988  	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
989  	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
990  }
991  
992  static struct sched_domain *
find_descended_sibling(struct sched_domain * sd,struct sched_domain * sibling)993  find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
994  {
995  	/*
996  	 * The proper descendant would be the one whose child won't span out
997  	 * of sd
998  	 */
999  	while (sibling->child &&
1000  	       !cpumask_subset(sched_domain_span(sibling->child),
1001  			       sched_domain_span(sd)))
1002  		sibling = sibling->child;
1003  
1004  	/*
1005  	 * As we are referencing sgc across different topology level, we need
1006  	 * to go down to skip those sched_domains which don't contribute to
1007  	 * scheduling because they will be degenerated in cpu_attach_domain
1008  	 */
1009  	while (sibling->child &&
1010  	       cpumask_equal(sched_domain_span(sibling->child),
1011  			     sched_domain_span(sibling)))
1012  		sibling = sibling->child;
1013  
1014  	return sibling;
1015  }
1016  
1017  static int
build_overlap_sched_groups(struct sched_domain * sd,int cpu)1018  build_overlap_sched_groups(struct sched_domain *sd, int cpu)
1019  {
1020  	struct sched_group *first = NULL, *last = NULL, *sg;
1021  	const struct cpumask *span = sched_domain_span(sd);
1022  	struct cpumask *covered = sched_domains_tmpmask;
1023  	struct sd_data *sdd = sd->private;
1024  	struct sched_domain *sibling;
1025  	int i;
1026  
1027  	cpumask_clear(covered);
1028  
1029  	for_each_cpu_wrap(i, span, cpu) {
1030  		struct cpumask *sg_span;
1031  
1032  		if (cpumask_test_cpu(i, covered))
1033  			continue;
1034  
1035  		sibling = *per_cpu_ptr(sdd->sd, i);
1036  
1037  		/*
1038  		 * Asymmetric node setups can result in situations where the
1039  		 * domain tree is of unequal depth, make sure to skip domains
1040  		 * that already cover the entire range.
1041  		 *
1042  		 * In that case build_sched_domains() will have terminated the
1043  		 * iteration early and our sibling sd spans will be empty.
1044  		 * Domains should always include the CPU they're built on, so
1045  		 * check that.
1046  		 */
1047  		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
1048  			continue;
1049  
1050  		/*
1051  		 * Usually we build sched_group by sibling's child sched_domain
1052  		 * But for machines whose NUMA diameter are 3 or above, we move
1053  		 * to build sched_group by sibling's proper descendant's child
1054  		 * domain because sibling's child sched_domain will span out of
1055  		 * the sched_domain being built as below.
1056  		 *
1057  		 * Smallest diameter=3 topology is:
1058  		 *
1059  		 *   node   0   1   2   3
1060  		 *     0:  10  20  30  40
1061  		 *     1:  20  10  20  30
1062  		 *     2:  30  20  10  20
1063  		 *     3:  40  30  20  10
1064  		 *
1065  		 *   0 --- 1 --- 2 --- 3
1066  		 *
1067  		 * NUMA-3       0-3             N/A             N/A             0-3
1068  		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
1069  		 *
1070  		 * NUMA-2       0-2             0-3             0-3             1-3
1071  		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
1072  		 *
1073  		 * NUMA-1       0-1             0-2             1-3             2-3
1074  		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
1075  		 *
1076  		 * NUMA-0       0               1               2               3
1077  		 *
1078  		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
1079  		 * group span isn't a subset of the domain span.
1080  		 */
1081  		if (sibling->child &&
1082  		    !cpumask_subset(sched_domain_span(sibling->child), span))
1083  			sibling = find_descended_sibling(sd, sibling);
1084  
1085  		sg = build_group_from_child_sched_domain(sibling, cpu);
1086  		if (!sg)
1087  			goto fail;
1088  
1089  		sg_span = sched_group_span(sg);
1090  		cpumask_or(covered, covered, sg_span);
1091  
1092  		init_overlap_sched_group(sibling, sg);
1093  
1094  		if (!first)
1095  			first = sg;
1096  		if (last)
1097  			last->next = sg;
1098  		last = sg;
1099  		last->next = first;
1100  	}
1101  	sd->groups = first;
1102  
1103  	return 0;
1104  
1105  fail:
1106  	free_sched_groups(first, 0);
1107  
1108  	return -ENOMEM;
1109  }
1110  
1111  
1112  /*
1113   * Package topology (also see the load-balance blurb in fair.c)
1114   *
1115   * The scheduler builds a tree structure to represent a number of important
1116   * topology features. By default (default_topology[]) these include:
1117   *
1118   *  - Simultaneous multithreading (SMT)
1119   *  - Multi-Core Cache (MC)
1120   *  - Package (PKG)
1121   *
1122   * Where the last one more or less denotes everything up to a NUMA node.
1123   *
1124   * The tree consists of 3 primary data structures:
1125   *
1126   *	sched_domain -> sched_group -> sched_group_capacity
1127   *	    ^ ^             ^ ^
1128   *          `-'             `-'
1129   *
1130   * The sched_domains are per-CPU and have a two way link (parent & child) and
1131   * denote the ever growing mask of CPUs belonging to that level of topology.
1132   *
1133   * Each sched_domain has a circular (double) linked list of sched_group's, each
1134   * denoting the domains of the level below (or individual CPUs in case of the
1135   * first domain level). The sched_group linked by a sched_domain includes the
1136   * CPU of that sched_domain [*].
1137   *
1138   * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1139   *
1140   * CPU   0   1   2   3   4   5   6   7
1141   *
1142   * PKG  [                             ]
1143   * MC   [             ] [             ]
1144   * SMT  [     ] [     ] [     ] [     ]
1145   *
1146   *  - or -
1147   *
1148   * PKG  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1149   * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1150   * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1151   *
1152   * CPU   0   1   2   3   4   5   6   7
1153   *
1154   * One way to think about it is: sched_domain moves you up and down among these
1155   * topology levels, while sched_group moves you sideways through it, at child
1156   * domain granularity.
1157   *
1158   * sched_group_capacity ensures each unique sched_group has shared storage.
1159   *
1160   * There are two related construction problems, both require a CPU that
1161   * uniquely identify each group (for a given domain):
1162   *
1163   *  - The first is the balance_cpu (see should_we_balance() and the
1164   *    load-balance blub in fair.c); for each group we only want 1 CPU to
1165   *    continue balancing at a higher domain.
1166   *
1167   *  - The second is the sched_group_capacity; we want all identical groups
1168   *    to share a single sched_group_capacity.
1169   *
1170   * Since these topologies are exclusive by construction. That is, its
1171   * impossible for an SMT thread to belong to multiple cores, and cores to
1172   * be part of multiple caches. There is a very clear and unique location
1173   * for each CPU in the hierarchy.
1174   *
1175   * Therefore computing a unique CPU for each group is trivial (the iteration
1176   * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1177   * group), we can simply pick the first CPU in each group.
1178   *
1179   *
1180   * [*] in other words, the first group of each domain is its child domain.
1181   */
1182  
get_group(int cpu,struct sd_data * sdd)1183  static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1184  {
1185  	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1186  	struct sched_domain *child = sd->child;
1187  	struct sched_group *sg;
1188  	bool already_visited;
1189  
1190  	if (child)
1191  		cpu = cpumask_first(sched_domain_span(child));
1192  
1193  	sg = *per_cpu_ptr(sdd->sg, cpu);
1194  	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1195  
1196  	/* Increase refcounts for claim_allocations: */
1197  	already_visited = atomic_inc_return(&sg->ref) > 1;
1198  	/* sgc visits should follow a similar trend as sg */
1199  	WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1200  
1201  	/* If we have already visited that group, it's already initialized. */
1202  	if (already_visited)
1203  		return sg;
1204  
1205  	if (child) {
1206  		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1207  		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1208  		sg->flags = child->flags;
1209  	} else {
1210  		cpumask_set_cpu(cpu, sched_group_span(sg));
1211  		cpumask_set_cpu(cpu, group_balance_mask(sg));
1212  	}
1213  
1214  	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1215  	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1216  	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1217  
1218  	return sg;
1219  }
1220  
1221  /*
1222   * build_sched_groups will build a circular linked list of the groups
1223   * covered by the given span, will set each group's ->cpumask correctly,
1224   * and will initialize their ->sgc.
1225   *
1226   * Assumes the sched_domain tree is fully constructed
1227   */
1228  static int
build_sched_groups(struct sched_domain * sd,int cpu)1229  build_sched_groups(struct sched_domain *sd, int cpu)
1230  {
1231  	struct sched_group *first = NULL, *last = NULL;
1232  	struct sd_data *sdd = sd->private;
1233  	const struct cpumask *span = sched_domain_span(sd);
1234  	struct cpumask *covered;
1235  	int i;
1236  
1237  	lockdep_assert_held(&sched_domains_mutex);
1238  	covered = sched_domains_tmpmask;
1239  
1240  	cpumask_clear(covered);
1241  
1242  	for_each_cpu_wrap(i, span, cpu) {
1243  		struct sched_group *sg;
1244  
1245  		if (cpumask_test_cpu(i, covered))
1246  			continue;
1247  
1248  		sg = get_group(i, sdd);
1249  
1250  		cpumask_or(covered, covered, sched_group_span(sg));
1251  
1252  		if (!first)
1253  			first = sg;
1254  		if (last)
1255  			last->next = sg;
1256  		last = sg;
1257  	}
1258  	last->next = first;
1259  	sd->groups = first;
1260  
1261  	return 0;
1262  }
1263  
1264  /*
1265   * Initialize sched groups cpu_capacity.
1266   *
1267   * cpu_capacity indicates the capacity of sched group, which is used while
1268   * distributing the load between different sched groups in a sched domain.
1269   * Typically cpu_capacity for all the groups in a sched domain will be same
1270   * unless there are asymmetries in the topology. If there are asymmetries,
1271   * group having more cpu_capacity will pickup more load compared to the
1272   * group having less cpu_capacity.
1273   */
init_sched_groups_capacity(int cpu,struct sched_domain * sd)1274  static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1275  {
1276  	struct sched_group *sg = sd->groups;
1277  	struct cpumask *mask = sched_domains_tmpmask2;
1278  
1279  	WARN_ON(!sg);
1280  
1281  	do {
1282  		int cpu, cores = 0, max_cpu = -1;
1283  
1284  		sg->group_weight = cpumask_weight(sched_group_span(sg));
1285  
1286  		cpumask_copy(mask, sched_group_span(sg));
1287  		for_each_cpu(cpu, mask) {
1288  			cores++;
1289  #ifdef CONFIG_SCHED_SMT
1290  			cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
1291  #endif
1292  		}
1293  		sg->cores = cores;
1294  
1295  		if (!(sd->flags & SD_ASYM_PACKING))
1296  			goto next;
1297  
1298  		for_each_cpu(cpu, sched_group_span(sg)) {
1299  			if (max_cpu < 0)
1300  				max_cpu = cpu;
1301  			else if (sched_asym_prefer(cpu, max_cpu))
1302  				max_cpu = cpu;
1303  		}
1304  		sg->asym_prefer_cpu = max_cpu;
1305  
1306  next:
1307  		sg = sg->next;
1308  	} while (sg != sd->groups);
1309  
1310  	if (cpu != group_balance_cpu(sg))
1311  		return;
1312  
1313  	update_group_capacity(sd, cpu);
1314  }
1315  
1316  /*
1317   * Asymmetric CPU capacity bits
1318   */
1319  struct asym_cap_data {
1320  	struct list_head link;
1321  	unsigned long capacity;
1322  	unsigned long cpus[];
1323  };
1324  
1325  /*
1326   * Set of available CPUs grouped by their corresponding capacities
1327   * Each list entry contains a CPU mask reflecting CPUs that share the same
1328   * capacity.
1329   * The lifespan of data is unlimited.
1330   */
1331  static LIST_HEAD(asym_cap_list);
1332  
1333  #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
1334  
1335  /*
1336   * Verify whether there is any CPU capacity asymmetry in a given sched domain.
1337   * Provides sd_flags reflecting the asymmetry scope.
1338   */
1339  static inline int
asym_cpu_capacity_classify(const struct cpumask * sd_span,const struct cpumask * cpu_map)1340  asym_cpu_capacity_classify(const struct cpumask *sd_span,
1341  			   const struct cpumask *cpu_map)
1342  {
1343  	struct asym_cap_data *entry;
1344  	int count = 0, miss = 0;
1345  
1346  	/*
1347  	 * Count how many unique CPU capacities this domain spans across
1348  	 * (compare sched_domain CPUs mask with ones representing  available
1349  	 * CPUs capacities). Take into account CPUs that might be offline:
1350  	 * skip those.
1351  	 */
1352  	list_for_each_entry(entry, &asym_cap_list, link) {
1353  		if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
1354  			++count;
1355  		else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
1356  			++miss;
1357  	}
1358  
1359  	WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
1360  
1361  	/* No asymmetry detected */
1362  	if (count < 2)
1363  		return 0;
1364  	/* Some of the available CPU capacity values have not been detected */
1365  	if (miss)
1366  		return SD_ASYM_CPUCAPACITY;
1367  
1368  	/* Full asymmetry */
1369  	return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
1370  
1371  }
1372  
asym_cpu_capacity_update_data(int cpu)1373  static inline void asym_cpu_capacity_update_data(int cpu)
1374  {
1375  	unsigned long capacity = arch_scale_cpu_capacity(cpu);
1376  	struct asym_cap_data *entry = NULL;
1377  
1378  	list_for_each_entry(entry, &asym_cap_list, link) {
1379  		if (capacity == entry->capacity)
1380  			goto done;
1381  	}
1382  
1383  	entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
1384  	if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
1385  		return;
1386  	entry->capacity = capacity;
1387  	list_add(&entry->link, &asym_cap_list);
1388  done:
1389  	__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
1390  }
1391  
1392  /*
1393   * Build-up/update list of CPUs grouped by their capacities
1394   * An update requires explicit request to rebuild sched domains
1395   * with state indicating CPU topology changes.
1396   */
asym_cpu_capacity_scan(void)1397  static void asym_cpu_capacity_scan(void)
1398  {
1399  	struct asym_cap_data *entry, *next;
1400  	int cpu;
1401  
1402  	list_for_each_entry(entry, &asym_cap_list, link)
1403  		cpumask_clear(cpu_capacity_span(entry));
1404  
1405  	for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
1406  		asym_cpu_capacity_update_data(cpu);
1407  
1408  	list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
1409  		if (cpumask_empty(cpu_capacity_span(entry))) {
1410  			list_del(&entry->link);
1411  			kfree(entry);
1412  		}
1413  	}
1414  
1415  	/*
1416  	 * Only one capacity value has been detected i.e. this system is symmetric.
1417  	 * No need to keep this data around.
1418  	 */
1419  	if (list_is_singular(&asym_cap_list)) {
1420  		entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
1421  		list_del(&entry->link);
1422  		kfree(entry);
1423  	}
1424  }
1425  
1426  /*
1427   * Initializers for schedule domains
1428   * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1429   */
1430  
1431  static int default_relax_domain_level = -1;
1432  int sched_domain_level_max;
1433  
setup_relax_domain_level(char * str)1434  static int __init setup_relax_domain_level(char *str)
1435  {
1436  	if (kstrtoint(str, 0, &default_relax_domain_level))
1437  		pr_warn("Unable to set relax_domain_level\n");
1438  
1439  	return 1;
1440  }
1441  __setup("relax_domain_level=", setup_relax_domain_level);
1442  
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)1443  static void set_domain_attribute(struct sched_domain *sd,
1444  				 struct sched_domain_attr *attr)
1445  {
1446  	int request;
1447  
1448  	if (!attr || attr->relax_domain_level < 0) {
1449  		if (default_relax_domain_level < 0)
1450  			return;
1451  		request = default_relax_domain_level;
1452  	} else
1453  		request = attr->relax_domain_level;
1454  
1455  	if (sd->level >= request) {
1456  		/* Turn off idle balance on this domain: */
1457  		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1458  	}
1459  }
1460  
1461  static void __sdt_free(const struct cpumask *cpu_map);
1462  static int __sdt_alloc(const struct cpumask *cpu_map);
1463  
__free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map)1464  static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1465  				 const struct cpumask *cpu_map)
1466  {
1467  	switch (what) {
1468  	case sa_rootdomain:
1469  		if (!atomic_read(&d->rd->refcount))
1470  			free_rootdomain(&d->rd->rcu);
1471  		fallthrough;
1472  	case sa_sd:
1473  		free_percpu(d->sd);
1474  		fallthrough;
1475  	case sa_sd_storage:
1476  		__sdt_free(cpu_map);
1477  		fallthrough;
1478  	case sa_none:
1479  		break;
1480  	}
1481  }
1482  
1483  static enum s_alloc
__visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map)1484  __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1485  {
1486  	memset(d, 0, sizeof(*d));
1487  
1488  	if (__sdt_alloc(cpu_map))
1489  		return sa_sd_storage;
1490  	d->sd = alloc_percpu(struct sched_domain *);
1491  	if (!d->sd)
1492  		return sa_sd_storage;
1493  	d->rd = alloc_rootdomain();
1494  	if (!d->rd)
1495  		return sa_sd;
1496  
1497  	return sa_rootdomain;
1498  }
1499  
1500  /*
1501   * NULL the sd_data elements we've used to build the sched_domain and
1502   * sched_group structure so that the subsequent __free_domain_allocs()
1503   * will not free the data we're using.
1504   */
claim_allocations(int cpu,struct sched_domain * sd)1505  static void claim_allocations(int cpu, struct sched_domain *sd)
1506  {
1507  	struct sd_data *sdd = sd->private;
1508  
1509  	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1510  	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1511  
1512  	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1513  		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1514  
1515  	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1516  		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1517  
1518  	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1519  		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1520  }
1521  
1522  #ifdef CONFIG_NUMA
1523  enum numa_topology_type sched_numa_topology_type;
1524  
1525  static int			sched_domains_numa_levels;
1526  static int			sched_domains_curr_level;
1527  
1528  int				sched_max_numa_distance;
1529  static int			*sched_domains_numa_distance;
1530  static struct cpumask		***sched_domains_numa_masks;
1531  #endif
1532  
1533  /*
1534   * SD_flags allowed in topology descriptions.
1535   *
1536   * These flags are purely descriptive of the topology and do not prescribe
1537   * behaviour. Behaviour is artificial and mapped in the below sd_init()
1538   * function:
1539   *
1540   *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1541   *   SD_SHARE_PKG_RESOURCES - describes shared caches
1542   *   SD_NUMA                - describes NUMA topologies
1543   *
1544   * Odd one out, which beside describing the topology has a quirk also
1545   * prescribes the desired behaviour that goes along with it:
1546   *
1547   *   SD_ASYM_PACKING        - describes SMT quirks
1548   */
1549  #define TOPOLOGY_SD_FLAGS		\
1550  	(SD_SHARE_CPUCAPACITY	|	\
1551  	 SD_SHARE_PKG_RESOURCES |	\
1552  	 SD_NUMA		|	\
1553  	 SD_ASYM_PACKING)
1554  
1555  static struct sched_domain *
sd_init(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain * child,int cpu)1556  sd_init(struct sched_domain_topology_level *tl,
1557  	const struct cpumask *cpu_map,
1558  	struct sched_domain *child, int cpu)
1559  {
1560  	struct sd_data *sdd = &tl->data;
1561  	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1562  	int sd_id, sd_weight, sd_flags = 0;
1563  	struct cpumask *sd_span;
1564  
1565  #ifdef CONFIG_NUMA
1566  	/*
1567  	 * Ugly hack to pass state to sd_numa_mask()...
1568  	 */
1569  	sched_domains_curr_level = tl->numa_level;
1570  #endif
1571  
1572  	sd_weight = cpumask_weight(tl->mask(cpu));
1573  
1574  	if (tl->sd_flags)
1575  		sd_flags = (*tl->sd_flags)();
1576  	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1577  			"wrong sd_flags in topology description\n"))
1578  		sd_flags &= TOPOLOGY_SD_FLAGS;
1579  
1580  	*sd = (struct sched_domain){
1581  		.min_interval		= sd_weight,
1582  		.max_interval		= 2*sd_weight,
1583  		.busy_factor		= 16,
1584  		.imbalance_pct		= 117,
1585  
1586  		.cache_nice_tries	= 0,
1587  
1588  		.flags			= 1*SD_BALANCE_NEWIDLE
1589  					| 1*SD_BALANCE_EXEC
1590  					| 1*SD_BALANCE_FORK
1591  					| 0*SD_BALANCE_WAKE
1592  					| 1*SD_WAKE_AFFINE
1593  					| 0*SD_SHARE_CPUCAPACITY
1594  					| 0*SD_SHARE_PKG_RESOURCES
1595  					| 0*SD_SERIALIZE
1596  					| 1*SD_PREFER_SIBLING
1597  					| 0*SD_NUMA
1598  					| sd_flags
1599  					,
1600  
1601  		.last_balance		= jiffies,
1602  		.balance_interval	= sd_weight,
1603  		.max_newidle_lb_cost	= 0,
1604  		.last_decay_max_lb_cost	= jiffies,
1605  		.child			= child,
1606  #ifdef CONFIG_SCHED_DEBUG
1607  		.name			= tl->name,
1608  #endif
1609  	};
1610  
1611  	sd_span = sched_domain_span(sd);
1612  	cpumask_and(sd_span, cpu_map, tl->mask(cpu));
1613  	sd_id = cpumask_first(sd_span);
1614  
1615  	sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
1616  
1617  	WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
1618  		  (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
1619  		  "CPU capacity asymmetry not supported on SMT\n");
1620  
1621  	/*
1622  	 * Convert topological properties into behaviour.
1623  	 */
1624  	/* Don't attempt to spread across CPUs of different capacities. */
1625  	if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1626  		sd->child->flags &= ~SD_PREFER_SIBLING;
1627  
1628  	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1629  		sd->imbalance_pct = 110;
1630  
1631  	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1632  		sd->imbalance_pct = 117;
1633  		sd->cache_nice_tries = 1;
1634  
1635  #ifdef CONFIG_NUMA
1636  	} else if (sd->flags & SD_NUMA) {
1637  		sd->cache_nice_tries = 2;
1638  
1639  		sd->flags &= ~SD_PREFER_SIBLING;
1640  		sd->flags |= SD_SERIALIZE;
1641  		if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1642  			sd->flags &= ~(SD_BALANCE_EXEC |
1643  				       SD_BALANCE_FORK |
1644  				       SD_WAKE_AFFINE);
1645  		}
1646  
1647  #endif
1648  	} else {
1649  		sd->cache_nice_tries = 1;
1650  	}
1651  
1652  	/*
1653  	 * For all levels sharing cache; connect a sched_domain_shared
1654  	 * instance.
1655  	 */
1656  	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1657  		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1658  		atomic_inc(&sd->shared->ref);
1659  		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1660  	}
1661  
1662  	sd->private = sdd;
1663  
1664  	return sd;
1665  }
1666  
1667  /*
1668   * Topology list, bottom-up.
1669   */
1670  static struct sched_domain_topology_level default_topology[] = {
1671  #ifdef CONFIG_SCHED_SMT
1672  	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1673  #endif
1674  
1675  #ifdef CONFIG_SCHED_CLUSTER
1676  	{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
1677  #endif
1678  
1679  #ifdef CONFIG_SCHED_MC
1680  	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1681  #endif
1682  	{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
1683  	{ NULL, },
1684  };
1685  
1686  static struct sched_domain_topology_level *sched_domain_topology =
1687  	default_topology;
1688  static struct sched_domain_topology_level *sched_domain_topology_saved;
1689  
1690  #define for_each_sd_topology(tl)			\
1691  	for (tl = sched_domain_topology; tl->mask; tl++)
1692  
set_sched_topology(struct sched_domain_topology_level * tl)1693  void __init set_sched_topology(struct sched_domain_topology_level *tl)
1694  {
1695  	if (WARN_ON_ONCE(sched_smp_initialized))
1696  		return;
1697  
1698  	sched_domain_topology = tl;
1699  	sched_domain_topology_saved = NULL;
1700  }
1701  
1702  #ifdef CONFIG_NUMA
1703  
sd_numa_mask(int cpu)1704  static const struct cpumask *sd_numa_mask(int cpu)
1705  {
1706  	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1707  }
1708  
sched_numa_warn(const char * str)1709  static void sched_numa_warn(const char *str)
1710  {
1711  	static int done = false;
1712  	int i,j;
1713  
1714  	if (done)
1715  		return;
1716  
1717  	done = true;
1718  
1719  	printk(KERN_WARNING "ERROR: %s\n\n", str);
1720  
1721  	for (i = 0; i < nr_node_ids; i++) {
1722  		printk(KERN_WARNING "  ");
1723  		for (j = 0; j < nr_node_ids; j++) {
1724  			if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
1725  				printk(KERN_CONT "(%02d) ", node_distance(i,j));
1726  			else
1727  				printk(KERN_CONT " %02d  ", node_distance(i,j));
1728  		}
1729  		printk(KERN_CONT "\n");
1730  	}
1731  	printk(KERN_WARNING "\n");
1732  }
1733  
find_numa_distance(int distance)1734  bool find_numa_distance(int distance)
1735  {
1736  	bool found = false;
1737  	int i, *distances;
1738  
1739  	if (distance == node_distance(0, 0))
1740  		return true;
1741  
1742  	rcu_read_lock();
1743  	distances = rcu_dereference(sched_domains_numa_distance);
1744  	if (!distances)
1745  		goto unlock;
1746  	for (i = 0; i < sched_domains_numa_levels; i++) {
1747  		if (distances[i] == distance) {
1748  			found = true;
1749  			break;
1750  		}
1751  	}
1752  unlock:
1753  	rcu_read_unlock();
1754  
1755  	return found;
1756  }
1757  
1758  #define for_each_cpu_node_but(n, nbut)		\
1759  	for_each_node_state(n, N_CPU)		\
1760  		if (n == nbut)			\
1761  			continue;		\
1762  		else
1763  
1764  /*
1765   * A system can have three types of NUMA topology:
1766   * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1767   * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1768   * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1769   *
1770   * The difference between a glueless mesh topology and a backplane
1771   * topology lies in whether communication between not directly
1772   * connected nodes goes through intermediary nodes (where programs
1773   * could run), or through backplane controllers. This affects
1774   * placement of programs.
1775   *
1776   * The type of topology can be discerned with the following tests:
1777   * - If the maximum distance between any nodes is 1 hop, the system
1778   *   is directly connected.
1779   * - If for two nodes A and B, located N > 1 hops away from each other,
1780   *   there is an intermediary node C, which is < N hops away from both
1781   *   nodes A and B, the system is a glueless mesh.
1782   */
init_numa_topology_type(int offline_node)1783  static void init_numa_topology_type(int offline_node)
1784  {
1785  	int a, b, c, n;
1786  
1787  	n = sched_max_numa_distance;
1788  
1789  	if (sched_domains_numa_levels <= 2) {
1790  		sched_numa_topology_type = NUMA_DIRECT;
1791  		return;
1792  	}
1793  
1794  	for_each_cpu_node_but(a, offline_node) {
1795  		for_each_cpu_node_but(b, offline_node) {
1796  			/* Find two nodes furthest removed from each other. */
1797  			if (node_distance(a, b) < n)
1798  				continue;
1799  
1800  			/* Is there an intermediary node between a and b? */
1801  			for_each_cpu_node_but(c, offline_node) {
1802  				if (node_distance(a, c) < n &&
1803  				    node_distance(b, c) < n) {
1804  					sched_numa_topology_type =
1805  							NUMA_GLUELESS_MESH;
1806  					return;
1807  				}
1808  			}
1809  
1810  			sched_numa_topology_type = NUMA_BACKPLANE;
1811  			return;
1812  		}
1813  	}
1814  
1815  	pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
1816  	sched_numa_topology_type = NUMA_DIRECT;
1817  }
1818  
1819  
1820  #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1821  
sched_init_numa(int offline_node)1822  void sched_init_numa(int offline_node)
1823  {
1824  	struct sched_domain_topology_level *tl;
1825  	unsigned long *distance_map;
1826  	int nr_levels = 0;
1827  	int i, j;
1828  	int *distances;
1829  	struct cpumask ***masks;
1830  
1831  	/*
1832  	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1833  	 * unique distances in the node_distance() table.
1834  	 */
1835  	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1836  	if (!distance_map)
1837  		return;
1838  
1839  	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1840  	for_each_cpu_node_but(i, offline_node) {
1841  		for_each_cpu_node_but(j, offline_node) {
1842  			int distance = node_distance(i, j);
1843  
1844  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1845  				sched_numa_warn("Invalid distance value range");
1846  				bitmap_free(distance_map);
1847  				return;
1848  			}
1849  
1850  			bitmap_set(distance_map, distance, 1);
1851  		}
1852  	}
1853  	/*
1854  	 * We can now figure out how many unique distance values there are and
1855  	 * allocate memory accordingly.
1856  	 */
1857  	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1858  
1859  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1860  	if (!distances) {
1861  		bitmap_free(distance_map);
1862  		return;
1863  	}
1864  
1865  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
1866  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1867  		distances[i] = j;
1868  	}
1869  	rcu_assign_pointer(sched_domains_numa_distance, distances);
1870  
1871  	bitmap_free(distance_map);
1872  
1873  	/*
1874  	 * 'nr_levels' contains the number of unique distances
1875  	 *
1876  	 * The sched_domains_numa_distance[] array includes the actual distance
1877  	 * numbers.
1878  	 */
1879  
1880  	/*
1881  	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1882  	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1883  	 * the array will contain less then 'nr_levels' members. This could be
1884  	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1885  	 * in other functions.
1886  	 *
1887  	 * We reset it to 'nr_levels' at the end of this function.
1888  	 */
1889  	sched_domains_numa_levels = 0;
1890  
1891  	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1892  	if (!masks)
1893  		return;
1894  
1895  	/*
1896  	 * Now for each level, construct a mask per node which contains all
1897  	 * CPUs of nodes that are that many hops away from us.
1898  	 */
1899  	for (i = 0; i < nr_levels; i++) {
1900  		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1901  		if (!masks[i])
1902  			return;
1903  
1904  		for_each_cpu_node_but(j, offline_node) {
1905  			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1906  			int k;
1907  
1908  			if (!mask)
1909  				return;
1910  
1911  			masks[i][j] = mask;
1912  
1913  			for_each_cpu_node_but(k, offline_node) {
1914  				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1915  					sched_numa_warn("Node-distance not symmetric");
1916  
1917  				if (node_distance(j, k) > sched_domains_numa_distance[i])
1918  					continue;
1919  
1920  				cpumask_or(mask, mask, cpumask_of_node(k));
1921  			}
1922  		}
1923  	}
1924  	rcu_assign_pointer(sched_domains_numa_masks, masks);
1925  
1926  	/* Compute default topology size */
1927  	for (i = 0; sched_domain_topology[i].mask; i++);
1928  
1929  	tl = kzalloc((i + nr_levels + 1) *
1930  			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1931  	if (!tl)
1932  		return;
1933  
1934  	/*
1935  	 * Copy the default topology bits..
1936  	 */
1937  	for (i = 0; sched_domain_topology[i].mask; i++)
1938  		tl[i] = sched_domain_topology[i];
1939  
1940  	/*
1941  	 * Add the NUMA identity distance, aka single NODE.
1942  	 */
1943  	tl[i++] = (struct sched_domain_topology_level){
1944  		.mask = sd_numa_mask,
1945  		.numa_level = 0,
1946  		SD_INIT_NAME(NODE)
1947  	};
1948  
1949  	/*
1950  	 * .. and append 'j' levels of NUMA goodness.
1951  	 */
1952  	for (j = 1; j < nr_levels; i++, j++) {
1953  		tl[i] = (struct sched_domain_topology_level){
1954  			.mask = sd_numa_mask,
1955  			.sd_flags = cpu_numa_flags,
1956  			.flags = SDTL_OVERLAP,
1957  			.numa_level = j,
1958  			SD_INIT_NAME(NUMA)
1959  		};
1960  	}
1961  
1962  	sched_domain_topology_saved = sched_domain_topology;
1963  	sched_domain_topology = tl;
1964  
1965  	sched_domains_numa_levels = nr_levels;
1966  	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
1967  
1968  	init_numa_topology_type(offline_node);
1969  }
1970  
1971  
sched_reset_numa(void)1972  static void sched_reset_numa(void)
1973  {
1974  	int nr_levels, *distances;
1975  	struct cpumask ***masks;
1976  
1977  	nr_levels = sched_domains_numa_levels;
1978  	sched_domains_numa_levels = 0;
1979  	sched_max_numa_distance = 0;
1980  	sched_numa_topology_type = NUMA_DIRECT;
1981  	distances = sched_domains_numa_distance;
1982  	rcu_assign_pointer(sched_domains_numa_distance, NULL);
1983  	masks = sched_domains_numa_masks;
1984  	rcu_assign_pointer(sched_domains_numa_masks, NULL);
1985  	if (distances || masks) {
1986  		int i, j;
1987  
1988  		synchronize_rcu();
1989  		kfree(distances);
1990  		for (i = 0; i < nr_levels && masks; i++) {
1991  			if (!masks[i])
1992  				continue;
1993  			for_each_node(j)
1994  				kfree(masks[i][j]);
1995  			kfree(masks[i]);
1996  		}
1997  		kfree(masks);
1998  	}
1999  	if (sched_domain_topology_saved) {
2000  		kfree(sched_domain_topology);
2001  		sched_domain_topology = sched_domain_topology_saved;
2002  		sched_domain_topology_saved = NULL;
2003  	}
2004  }
2005  
2006  /*
2007   * Call with hotplug lock held
2008   */
sched_update_numa(int cpu,bool online)2009  void sched_update_numa(int cpu, bool online)
2010  {
2011  	int node;
2012  
2013  	node = cpu_to_node(cpu);
2014  	/*
2015  	 * Scheduler NUMA topology is updated when the first CPU of a
2016  	 * node is onlined or the last CPU of a node is offlined.
2017  	 */
2018  	if (cpumask_weight(cpumask_of_node(node)) != 1)
2019  		return;
2020  
2021  	sched_reset_numa();
2022  	sched_init_numa(online ? NUMA_NO_NODE : node);
2023  }
2024  
sched_domains_numa_masks_set(unsigned int cpu)2025  void sched_domains_numa_masks_set(unsigned int cpu)
2026  {
2027  	int node = cpu_to_node(cpu);
2028  	int i, j;
2029  
2030  	for (i = 0; i < sched_domains_numa_levels; i++) {
2031  		for (j = 0; j < nr_node_ids; j++) {
2032  			if (!node_state(j, N_CPU))
2033  				continue;
2034  
2035  			/* Set ourselves in the remote node's masks */
2036  			if (node_distance(j, node) <= sched_domains_numa_distance[i])
2037  				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
2038  		}
2039  	}
2040  }
2041  
sched_domains_numa_masks_clear(unsigned int cpu)2042  void sched_domains_numa_masks_clear(unsigned int cpu)
2043  {
2044  	int i, j;
2045  
2046  	for (i = 0; i < sched_domains_numa_levels; i++) {
2047  		for (j = 0; j < nr_node_ids; j++) {
2048  			if (sched_domains_numa_masks[i][j])
2049  				cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2050  		}
2051  	}
2052  }
2053  
2054  /*
2055   * sched_numa_find_closest() - given the NUMA topology, find the cpu
2056   *                             closest to @cpu from @cpumask.
2057   * cpumask: cpumask to find a cpu from
2058   * cpu: cpu to be close to
2059   *
2060   * returns: cpu, or nr_cpu_ids when nothing found.
2061   */
sched_numa_find_closest(const struct cpumask * cpus,int cpu)2062  int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2063  {
2064  	int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
2065  	struct cpumask ***masks;
2066  
2067  	rcu_read_lock();
2068  	masks = rcu_dereference(sched_domains_numa_masks);
2069  	if (!masks)
2070  		goto unlock;
2071  	for (i = 0; i < sched_domains_numa_levels; i++) {
2072  		if (!masks[i][j])
2073  			break;
2074  		cpu = cpumask_any_and(cpus, masks[i][j]);
2075  		if (cpu < nr_cpu_ids) {
2076  			found = cpu;
2077  			break;
2078  		}
2079  	}
2080  unlock:
2081  	rcu_read_unlock();
2082  
2083  	return found;
2084  }
2085  
2086  struct __cmp_key {
2087  	const struct cpumask *cpus;
2088  	struct cpumask ***masks;
2089  	int node;
2090  	int cpu;
2091  	int w;
2092  };
2093  
hop_cmp(const void * a,const void * b)2094  static int hop_cmp(const void *a, const void *b)
2095  {
2096  	struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b;
2097  	struct __cmp_key *k = (struct __cmp_key *)a;
2098  
2099  	if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
2100  		return 1;
2101  
2102  	if (b == k->masks) {
2103  		k->w = 0;
2104  		return 0;
2105  	}
2106  
2107  	prev_hop = *((struct cpumask ***)b - 1);
2108  	k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]);
2109  	if (k->w <= k->cpu)
2110  		return 0;
2111  
2112  	return -1;
2113  }
2114  
2115  /*
2116   * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu
2117   *                             closest to @cpu from @cpumask.
2118   * cpumask: cpumask to find a cpu from
2119   * cpu: Nth cpu to find
2120   *
2121   * returns: cpu, or nr_cpu_ids when nothing found.
2122   */
sched_numa_find_nth_cpu(const struct cpumask * cpus,int cpu,int node)2123  int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
2124  {
2125  	struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
2126  	struct cpumask ***hop_masks;
2127  	int hop, ret = nr_cpu_ids;
2128  
2129  	if (node == NUMA_NO_NODE)
2130  		return cpumask_nth_and(cpu, cpus, cpu_online_mask);
2131  
2132  	rcu_read_lock();
2133  
2134  	/* CPU-less node entries are uninitialized in sched_domains_numa_masks */
2135  	node = numa_nearest_node(node, N_CPU);
2136  	k.node = node;
2137  
2138  	k.masks = rcu_dereference(sched_domains_numa_masks);
2139  	if (!k.masks)
2140  		goto unlock;
2141  
2142  	hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
2143  	hop = hop_masks	- k.masks;
2144  
2145  	ret = hop ?
2146  		cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
2147  		cpumask_nth_and(cpu, cpus, k.masks[0][node]);
2148  unlock:
2149  	rcu_read_unlock();
2150  	return ret;
2151  }
2152  EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
2153  
2154  /**
2155   * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
2156   *                         @node
2157   * @node: The node to count hops from.
2158   * @hops: Include CPUs up to that many hops away. 0 means local node.
2159   *
2160   * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
2161   * @node, an error value otherwise.
2162   *
2163   * Requires rcu_lock to be held. Returned cpumask is only valid within that
2164   * read-side section, copy it if required beyond that.
2165   *
2166   * Note that not all hops are equal in distance; see sched_init_numa() for how
2167   * distances and masks are handled.
2168   * Also note that this is a reflection of sched_domains_numa_masks, which may change
2169   * during the lifetime of the system (offline nodes are taken out of the masks).
2170   */
sched_numa_hop_mask(unsigned int node,unsigned int hops)2171  const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
2172  {
2173  	struct cpumask ***masks;
2174  
2175  	if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
2176  		return ERR_PTR(-EINVAL);
2177  
2178  	masks = rcu_dereference(sched_domains_numa_masks);
2179  	if (!masks)
2180  		return ERR_PTR(-EBUSY);
2181  
2182  	return masks[hops][node];
2183  }
2184  EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
2185  
2186  #endif /* CONFIG_NUMA */
2187  
__sdt_alloc(const struct cpumask * cpu_map)2188  static int __sdt_alloc(const struct cpumask *cpu_map)
2189  {
2190  	struct sched_domain_topology_level *tl;
2191  	int j;
2192  
2193  	for_each_sd_topology(tl) {
2194  		struct sd_data *sdd = &tl->data;
2195  
2196  		sdd->sd = alloc_percpu(struct sched_domain *);
2197  		if (!sdd->sd)
2198  			return -ENOMEM;
2199  
2200  		sdd->sds = alloc_percpu(struct sched_domain_shared *);
2201  		if (!sdd->sds)
2202  			return -ENOMEM;
2203  
2204  		sdd->sg = alloc_percpu(struct sched_group *);
2205  		if (!sdd->sg)
2206  			return -ENOMEM;
2207  
2208  		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
2209  		if (!sdd->sgc)
2210  			return -ENOMEM;
2211  
2212  		for_each_cpu(j, cpu_map) {
2213  			struct sched_domain *sd;
2214  			struct sched_domain_shared *sds;
2215  			struct sched_group *sg;
2216  			struct sched_group_capacity *sgc;
2217  
2218  			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
2219  					GFP_KERNEL, cpu_to_node(j));
2220  			if (!sd)
2221  				return -ENOMEM;
2222  
2223  			*per_cpu_ptr(sdd->sd, j) = sd;
2224  
2225  			sds = kzalloc_node(sizeof(struct sched_domain_shared),
2226  					GFP_KERNEL, cpu_to_node(j));
2227  			if (!sds)
2228  				return -ENOMEM;
2229  
2230  			*per_cpu_ptr(sdd->sds, j) = sds;
2231  
2232  			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
2233  					GFP_KERNEL, cpu_to_node(j));
2234  			if (!sg)
2235  				return -ENOMEM;
2236  
2237  			sg->next = sg;
2238  
2239  			*per_cpu_ptr(sdd->sg, j) = sg;
2240  
2241  			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
2242  					GFP_KERNEL, cpu_to_node(j));
2243  			if (!sgc)
2244  				return -ENOMEM;
2245  
2246  #ifdef CONFIG_SCHED_DEBUG
2247  			sgc->id = j;
2248  #endif
2249  
2250  			*per_cpu_ptr(sdd->sgc, j) = sgc;
2251  		}
2252  	}
2253  
2254  	return 0;
2255  }
2256  
__sdt_free(const struct cpumask * cpu_map)2257  static void __sdt_free(const struct cpumask *cpu_map)
2258  {
2259  	struct sched_domain_topology_level *tl;
2260  	int j;
2261  
2262  	for_each_sd_topology(tl) {
2263  		struct sd_data *sdd = &tl->data;
2264  
2265  		for_each_cpu(j, cpu_map) {
2266  			struct sched_domain *sd;
2267  
2268  			if (sdd->sd) {
2269  				sd = *per_cpu_ptr(sdd->sd, j);
2270  				if (sd && (sd->flags & SD_OVERLAP))
2271  					free_sched_groups(sd->groups, 0);
2272  				kfree(*per_cpu_ptr(sdd->sd, j));
2273  			}
2274  
2275  			if (sdd->sds)
2276  				kfree(*per_cpu_ptr(sdd->sds, j));
2277  			if (sdd->sg)
2278  				kfree(*per_cpu_ptr(sdd->sg, j));
2279  			if (sdd->sgc)
2280  				kfree(*per_cpu_ptr(sdd->sgc, j));
2281  		}
2282  		free_percpu(sdd->sd);
2283  		sdd->sd = NULL;
2284  		free_percpu(sdd->sds);
2285  		sdd->sds = NULL;
2286  		free_percpu(sdd->sg);
2287  		sdd->sg = NULL;
2288  		free_percpu(sdd->sgc);
2289  		sdd->sgc = NULL;
2290  	}
2291  }
2292  
build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int cpu)2293  static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
2294  		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
2295  		struct sched_domain *child, int cpu)
2296  {
2297  	struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
2298  
2299  	if (child) {
2300  		sd->level = child->level + 1;
2301  		sched_domain_level_max = max(sched_domain_level_max, sd->level);
2302  		child->parent = sd;
2303  
2304  		if (!cpumask_subset(sched_domain_span(child),
2305  				    sched_domain_span(sd))) {
2306  			pr_err("BUG: arch topology borken\n");
2307  #ifdef CONFIG_SCHED_DEBUG
2308  			pr_err("     the %s domain not a subset of the %s domain\n",
2309  					child->name, sd->name);
2310  #endif
2311  			/* Fixup, ensure @sd has at least @child CPUs. */
2312  			cpumask_or(sched_domain_span(sd),
2313  				   sched_domain_span(sd),
2314  				   sched_domain_span(child));
2315  		}
2316  
2317  	}
2318  	set_domain_attribute(sd, attr);
2319  
2320  	return sd;
2321  }
2322  
2323  /*
2324   * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2325   * any two given CPUs at this (non-NUMA) topology level.
2326   */
topology_span_sane(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,int cpu)2327  static bool topology_span_sane(struct sched_domain_topology_level *tl,
2328  			      const struct cpumask *cpu_map, int cpu)
2329  {
2330  	int i;
2331  
2332  	/* NUMA levels are allowed to overlap */
2333  	if (tl->flags & SDTL_OVERLAP)
2334  		return true;
2335  
2336  	/*
2337  	 * Non-NUMA levels cannot partially overlap - they must be either
2338  	 * completely equal or completely disjoint. Otherwise we can end up
2339  	 * breaking the sched_group lists - i.e. a later get_group() pass
2340  	 * breaks the linking done for an earlier span.
2341  	 */
2342  	for_each_cpu(i, cpu_map) {
2343  		if (i == cpu)
2344  			continue;
2345  		/*
2346  		 * We should 'and' all those masks with 'cpu_map' to exactly
2347  		 * match the topology we're about to build, but that can only
2348  		 * remove CPUs, which only lessens our ability to detect
2349  		 * overlaps
2350  		 */
2351  		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
2352  		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
2353  			return false;
2354  	}
2355  
2356  	return true;
2357  }
2358  
2359  /*
2360   * Build sched domains for a given set of CPUs and attach the sched domains
2361   * to the individual CPUs
2362   */
2363  static int
build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)2364  build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
2365  {
2366  	enum s_alloc alloc_state = sa_none;
2367  	struct sched_domain *sd;
2368  	struct s_data d;
2369  	struct rq *rq = NULL;
2370  	int i, ret = -ENOMEM;
2371  	bool has_asym = false;
2372  
2373  	if (WARN_ON(cpumask_empty(cpu_map)))
2374  		goto error;
2375  
2376  	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
2377  	if (alloc_state != sa_rootdomain)
2378  		goto error;
2379  
2380  	/* Set up domains for CPUs specified by the cpu_map: */
2381  	for_each_cpu(i, cpu_map) {
2382  		struct sched_domain_topology_level *tl;
2383  
2384  		sd = NULL;
2385  		for_each_sd_topology(tl) {
2386  
2387  			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2388  				goto error;
2389  
2390  			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
2391  
2392  			has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
2393  
2394  			if (tl == sched_domain_topology)
2395  				*per_cpu_ptr(d.sd, i) = sd;
2396  			if (tl->flags & SDTL_OVERLAP)
2397  				sd->flags |= SD_OVERLAP;
2398  			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2399  				break;
2400  		}
2401  	}
2402  
2403  	/* Build the groups for the domains */
2404  	for_each_cpu(i, cpu_map) {
2405  		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2406  			sd->span_weight = cpumask_weight(sched_domain_span(sd));
2407  			if (sd->flags & SD_OVERLAP) {
2408  				if (build_overlap_sched_groups(sd, i))
2409  					goto error;
2410  			} else {
2411  				if (build_sched_groups(sd, i))
2412  					goto error;
2413  			}
2414  		}
2415  	}
2416  
2417  	/*
2418  	 * Calculate an allowed NUMA imbalance such that LLCs do not get
2419  	 * imbalanced.
2420  	 */
2421  	for_each_cpu(i, cpu_map) {
2422  		unsigned int imb = 0;
2423  		unsigned int imb_span = 1;
2424  
2425  		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2426  			struct sched_domain *child = sd->child;
2427  
2428  			if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
2429  			    (child->flags & SD_SHARE_PKG_RESOURCES)) {
2430  				struct sched_domain __rcu *top_p;
2431  				unsigned int nr_llcs;
2432  
2433  				/*
2434  				 * For a single LLC per node, allow an
2435  				 * imbalance up to 12.5% of the node. This is
2436  				 * arbitrary cutoff based two factors -- SMT and
2437  				 * memory channels. For SMT-2, the intent is to
2438  				 * avoid premature sharing of HT resources but
2439  				 * SMT-4 or SMT-8 *may* benefit from a different
2440  				 * cutoff. For memory channels, this is a very
2441  				 * rough estimate of how many channels may be
2442  				 * active and is based on recent CPUs with
2443  				 * many cores.
2444  				 *
2445  				 * For multiple LLCs, allow an imbalance
2446  				 * until multiple tasks would share an LLC
2447  				 * on one node while LLCs on another node
2448  				 * remain idle. This assumes that there are
2449  				 * enough logical CPUs per LLC to avoid SMT
2450  				 * factors and that there is a correlation
2451  				 * between LLCs and memory channels.
2452  				 */
2453  				nr_llcs = sd->span_weight / child->span_weight;
2454  				if (nr_llcs == 1)
2455  					imb = sd->span_weight >> 3;
2456  				else
2457  					imb = nr_llcs;
2458  				imb = max(1U, imb);
2459  				sd->imb_numa_nr = imb;
2460  
2461  				/* Set span based on the first NUMA domain. */
2462  				top_p = sd->parent;
2463  				while (top_p && !(top_p->flags & SD_NUMA)) {
2464  					top_p = top_p->parent;
2465  				}
2466  				imb_span = top_p ? top_p->span_weight : sd->span_weight;
2467  			} else {
2468  				int factor = max(1U, (sd->span_weight / imb_span));
2469  
2470  				sd->imb_numa_nr = imb * factor;
2471  			}
2472  		}
2473  	}
2474  
2475  	/* Calculate CPU capacity for physical packages and nodes */
2476  	for (i = nr_cpumask_bits-1; i >= 0; i--) {
2477  		if (!cpumask_test_cpu(i, cpu_map))
2478  			continue;
2479  
2480  		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2481  			claim_allocations(i, sd);
2482  			init_sched_groups_capacity(i, sd);
2483  		}
2484  	}
2485  
2486  	/* Attach the domains */
2487  	rcu_read_lock();
2488  	for_each_cpu(i, cpu_map) {
2489  		rq = cpu_rq(i);
2490  		sd = *per_cpu_ptr(d.sd, i);
2491  
2492  		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2493  		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2494  			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2495  
2496  		cpu_attach_domain(sd, d.rd, i);
2497  	}
2498  	rcu_read_unlock();
2499  
2500  	if (has_asym)
2501  		static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2502  
2503  	if (rq && sched_debug_verbose) {
2504  		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2505  			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2506  	}
2507  
2508  	ret = 0;
2509  error:
2510  	__free_domain_allocs(&d, alloc_state, cpu_map);
2511  
2512  	return ret;
2513  }
2514  
2515  /* Current sched domains: */
2516  static cpumask_var_t			*doms_cur;
2517  
2518  /* Number of sched domains in 'doms_cur': */
2519  static int				ndoms_cur;
2520  
2521  /* Attributes of custom domains in 'doms_cur' */
2522  static struct sched_domain_attr		*dattr_cur;
2523  
2524  /*
2525   * Special case: If a kmalloc() of a doms_cur partition (array of
2526   * cpumask) fails, then fallback to a single sched domain,
2527   * as determined by the single cpumask fallback_doms.
2528   */
2529  static cpumask_var_t			fallback_doms;
2530  
2531  /*
2532   * arch_update_cpu_topology lets virtualized architectures update the
2533   * CPU core maps. It is supposed to return 1 if the topology changed
2534   * or 0 if it stayed the same.
2535   */
arch_update_cpu_topology(void)2536  int __weak arch_update_cpu_topology(void)
2537  {
2538  	return 0;
2539  }
2540  
alloc_sched_domains(unsigned int ndoms)2541  cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2542  {
2543  	int i;
2544  	cpumask_var_t *doms;
2545  
2546  	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2547  	if (!doms)
2548  		return NULL;
2549  	for (i = 0; i < ndoms; i++) {
2550  		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2551  			free_sched_domains(doms, i);
2552  			return NULL;
2553  		}
2554  	}
2555  	return doms;
2556  }
2557  
free_sched_domains(cpumask_var_t doms[],unsigned int ndoms)2558  void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2559  {
2560  	unsigned int i;
2561  	for (i = 0; i < ndoms; i++)
2562  		free_cpumask_var(doms[i]);
2563  	kfree(doms);
2564  }
2565  
2566  /*
2567   * Set up scheduler domains and groups.  For now this just excludes isolated
2568   * CPUs, but could be used to exclude other special cases in the future.
2569   */
sched_init_domains(const struct cpumask * cpu_map)2570  int __init sched_init_domains(const struct cpumask *cpu_map)
2571  {
2572  	int err;
2573  
2574  	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2575  	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2576  	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2577  
2578  	arch_update_cpu_topology();
2579  	asym_cpu_capacity_scan();
2580  	ndoms_cur = 1;
2581  	doms_cur = alloc_sched_domains(ndoms_cur);
2582  	if (!doms_cur)
2583  		doms_cur = &fallback_doms;
2584  	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN));
2585  	err = build_sched_domains(doms_cur[0], NULL);
2586  
2587  	return err;
2588  }
2589  
2590  /*
2591   * Detach sched domains from a group of CPUs specified in cpu_map
2592   * These CPUs will now be attached to the NULL domain
2593   */
detach_destroy_domains(const struct cpumask * cpu_map)2594  static void detach_destroy_domains(const struct cpumask *cpu_map)
2595  {
2596  	unsigned int cpu = cpumask_any(cpu_map);
2597  	int i;
2598  
2599  	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2600  		static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2601  
2602  	rcu_read_lock();
2603  	for_each_cpu(i, cpu_map)
2604  		cpu_attach_domain(NULL, &def_root_domain, i);
2605  	rcu_read_unlock();
2606  }
2607  
2608  /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)2609  static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2610  			struct sched_domain_attr *new, int idx_new)
2611  {
2612  	struct sched_domain_attr tmp;
2613  
2614  	/* Fast path: */
2615  	if (!new && !cur)
2616  		return 1;
2617  
2618  	tmp = SD_ATTR_INIT;
2619  
2620  	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2621  			new ? (new + idx_new) : &tmp,
2622  			sizeof(struct sched_domain_attr));
2623  }
2624  
2625  /*
2626   * Partition sched domains as specified by the 'ndoms_new'
2627   * cpumasks in the array doms_new[] of cpumasks. This compares
2628   * doms_new[] to the current sched domain partitioning, doms_cur[].
2629   * It destroys each deleted domain and builds each new domain.
2630   *
2631   * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2632   * The masks don't intersect (don't overlap.) We should setup one
2633   * sched domain for each mask. CPUs not in any of the cpumasks will
2634   * not be load balanced. If the same cpumask appears both in the
2635   * current 'doms_cur' domains and in the new 'doms_new', we can leave
2636   * it as it is.
2637   *
2638   * The passed in 'doms_new' should be allocated using
2639   * alloc_sched_domains.  This routine takes ownership of it and will
2640   * free_sched_domains it when done with it. If the caller failed the
2641   * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2642   * and partition_sched_domains() will fallback to the single partition
2643   * 'fallback_doms', it also forces the domains to be rebuilt.
2644   *
2645   * If doms_new == NULL it will be replaced with cpu_online_mask.
2646   * ndoms_new == 0 is a special case for destroying existing domains,
2647   * and it will not create the default domain.
2648   *
2649   * Call with hotplug lock and sched_domains_mutex held
2650   */
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2651  void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2652  				    struct sched_domain_attr *dattr_new)
2653  {
2654  	bool __maybe_unused has_eas = false;
2655  	int i, j, n;
2656  	int new_topology;
2657  
2658  	lockdep_assert_held(&sched_domains_mutex);
2659  
2660  	/* Let the architecture update CPU core mappings: */
2661  	new_topology = arch_update_cpu_topology();
2662  	/* Trigger rebuilding CPU capacity asymmetry data */
2663  	if (new_topology)
2664  		asym_cpu_capacity_scan();
2665  
2666  	if (!doms_new) {
2667  		WARN_ON_ONCE(dattr_new);
2668  		n = 0;
2669  		doms_new = alloc_sched_domains(1);
2670  		if (doms_new) {
2671  			n = 1;
2672  			cpumask_and(doms_new[0], cpu_active_mask,
2673  				    housekeeping_cpumask(HK_TYPE_DOMAIN));
2674  		}
2675  	} else {
2676  		n = ndoms_new;
2677  	}
2678  
2679  	/* Destroy deleted domains: */
2680  	for (i = 0; i < ndoms_cur; i++) {
2681  		for (j = 0; j < n && !new_topology; j++) {
2682  			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2683  			    dattrs_equal(dattr_cur, i, dattr_new, j)) {
2684  				struct root_domain *rd;
2685  
2686  				/*
2687  				 * This domain won't be destroyed and as such
2688  				 * its dl_bw->total_bw needs to be cleared.  It
2689  				 * will be recomputed in function
2690  				 * update_tasks_root_domain().
2691  				 */
2692  				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2693  				dl_clear_root_domain(rd);
2694  				goto match1;
2695  			}
2696  		}
2697  		/* No match - a current sched domain not in new doms_new[] */
2698  		detach_destroy_domains(doms_cur[i]);
2699  match1:
2700  		;
2701  	}
2702  
2703  	n = ndoms_cur;
2704  	if (!doms_new) {
2705  		n = 0;
2706  		doms_new = &fallback_doms;
2707  		cpumask_and(doms_new[0], cpu_active_mask,
2708  			    housekeeping_cpumask(HK_TYPE_DOMAIN));
2709  	}
2710  
2711  	/* Build new domains: */
2712  	for (i = 0; i < ndoms_new; i++) {
2713  		for (j = 0; j < n && !new_topology; j++) {
2714  			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2715  			    dattrs_equal(dattr_new, i, dattr_cur, j))
2716  				goto match2;
2717  		}
2718  		/* No match - add a new doms_new */
2719  		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2720  match2:
2721  		;
2722  	}
2723  
2724  #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2725  	/* Build perf. domains: */
2726  	for (i = 0; i < ndoms_new; i++) {
2727  		for (j = 0; j < n && !sched_energy_update; j++) {
2728  			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2729  			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2730  				has_eas = true;
2731  				goto match3;
2732  			}
2733  		}
2734  		/* No match - add perf. domains for a new rd */
2735  		has_eas |= build_perf_domains(doms_new[i]);
2736  match3:
2737  		;
2738  	}
2739  	sched_energy_set(has_eas);
2740  #endif
2741  
2742  	/* Remember the new sched domains: */
2743  	if (doms_cur != &fallback_doms)
2744  		free_sched_domains(doms_cur, ndoms_cur);
2745  
2746  	kfree(dattr_cur);
2747  	doms_cur = doms_new;
2748  	dattr_cur = dattr_new;
2749  	ndoms_cur = ndoms_new;
2750  
2751  	update_sched_domain_debugfs();
2752  }
2753  
2754  /*
2755   * Call with hotplug lock held
2756   */
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2757  void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2758  			     struct sched_domain_attr *dattr_new)
2759  {
2760  	mutex_lock(&sched_domains_mutex);
2761  	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2762  	mutex_unlock(&sched_domains_mutex);
2763  }
2764