xref: /openbmc/linux/kernel/sched/topology.c (revision 04d4e665a60902cf36e7ad39af1179cb5df542ad)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f2cb1360SIngo Molnar /*
3f2cb1360SIngo Molnar  * Scheduler topology setup/handling methods
4f2cb1360SIngo Molnar  */
5f2cb1360SIngo Molnar #include "sched.h"
6f2cb1360SIngo Molnar 
7f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex);
8f2cb1360SIngo Molnar 
9f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */
10ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask;
11ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask2;
12f2cb1360SIngo Molnar 
13f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
14f2cb1360SIngo Molnar 
15f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str)
16f2cb1360SIngo Molnar {
179406415fSPeter Zijlstra 	sched_debug_verbose = true;
18f2cb1360SIngo Molnar 
19f2cb1360SIngo Molnar 	return 0;
20f2cb1360SIngo Molnar }
219406415fSPeter Zijlstra early_param("sched_verbose", sched_debug_setup);
22f2cb1360SIngo Molnar 
23f2cb1360SIngo Molnar static inline bool sched_debug(void)
24f2cb1360SIngo Molnar {
259406415fSPeter Zijlstra 	return sched_debug_verbose;
26f2cb1360SIngo Molnar }
27f2cb1360SIngo Molnar 
28848785dfSValentin Schneider #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
29848785dfSValentin Schneider const struct sd_flag_debug sd_flag_debug[] = {
30848785dfSValentin Schneider #include <linux/sched/sd_flags.h>
31848785dfSValentin Schneider };
32848785dfSValentin Schneider #undef SD_FLAG
33848785dfSValentin Schneider 
34f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
35f2cb1360SIngo Molnar 				  struct cpumask *groupmask)
36f2cb1360SIngo Molnar {
37f2cb1360SIngo Molnar 	struct sched_group *group = sd->groups;
3865c5e253SValentin Schneider 	unsigned long flags = sd->flags;
3965c5e253SValentin Schneider 	unsigned int idx;
40f2cb1360SIngo Molnar 
41f2cb1360SIngo Molnar 	cpumask_clear(groupmask);
42f2cb1360SIngo Molnar 
43005f874dSPeter Zijlstra 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
44005f874dSPeter Zijlstra 	printk(KERN_CONT "span=%*pbl level=%s\n",
45f2cb1360SIngo Molnar 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
46f2cb1360SIngo Molnar 
47f2cb1360SIngo Molnar 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4897fb7a0aSIngo Molnar 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
49f2cb1360SIngo Molnar 	}
506cd0c583SYi Wang 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
5197fb7a0aSIngo Molnar 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
52f2cb1360SIngo Molnar 	}
53f2cb1360SIngo Molnar 
5465c5e253SValentin Schneider 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
5565c5e253SValentin Schneider 		unsigned int flag = BIT(idx);
5665c5e253SValentin Schneider 		unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
5765c5e253SValentin Schneider 
5865c5e253SValentin Schneider 		if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
5965c5e253SValentin Schneider 		    !(sd->child->flags & flag))
6065c5e253SValentin Schneider 			printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
6165c5e253SValentin Schneider 			       sd_flag_debug[idx].name);
6265c5e253SValentin Schneider 
6365c5e253SValentin Schneider 		if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
6465c5e253SValentin Schneider 		    !(sd->parent->flags & flag))
6565c5e253SValentin Schneider 			printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
6665c5e253SValentin Schneider 			       sd_flag_debug[idx].name);
6765c5e253SValentin Schneider 	}
6865c5e253SValentin Schneider 
69f2cb1360SIngo Molnar 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
70f2cb1360SIngo Molnar 	do {
71f2cb1360SIngo Molnar 		if (!group) {
72f2cb1360SIngo Molnar 			printk("\n");
73f2cb1360SIngo Molnar 			printk(KERN_ERR "ERROR: group is NULL\n");
74f2cb1360SIngo Molnar 			break;
75f2cb1360SIngo Molnar 		}
76f2cb1360SIngo Molnar 
771087ad4eSYury Norov 		if (cpumask_empty(sched_group_span(group))) {
78f2cb1360SIngo Molnar 			printk(KERN_CONT "\n");
79f2cb1360SIngo Molnar 			printk(KERN_ERR "ERROR: empty group\n");
80f2cb1360SIngo Molnar 			break;
81f2cb1360SIngo Molnar 		}
82f2cb1360SIngo Molnar 
83f2cb1360SIngo Molnar 		if (!(sd->flags & SD_OVERLAP) &&
84ae4df9d6SPeter Zijlstra 		    cpumask_intersects(groupmask, sched_group_span(group))) {
85f2cb1360SIngo Molnar 			printk(KERN_CONT "\n");
86f2cb1360SIngo Molnar 			printk(KERN_ERR "ERROR: repeated CPUs\n");
87f2cb1360SIngo Molnar 			break;
88f2cb1360SIngo Molnar 		}
89f2cb1360SIngo Molnar 
90ae4df9d6SPeter Zijlstra 		cpumask_or(groupmask, groupmask, sched_group_span(group));
91f2cb1360SIngo Molnar 
92005f874dSPeter Zijlstra 		printk(KERN_CONT " %d:{ span=%*pbl",
93005f874dSPeter Zijlstra 				group->sgc->id,
94ae4df9d6SPeter Zijlstra 				cpumask_pr_args(sched_group_span(group)));
95b0151c25SPeter Zijlstra 
96af218122SPeter Zijlstra 		if ((sd->flags & SD_OVERLAP) &&
97ae4df9d6SPeter Zijlstra 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
98005f874dSPeter Zijlstra 			printk(KERN_CONT " mask=%*pbl",
99e5c14b1fSPeter Zijlstra 				cpumask_pr_args(group_balance_mask(group)));
100b0151c25SPeter Zijlstra 		}
101b0151c25SPeter Zijlstra 
102005f874dSPeter Zijlstra 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
103005f874dSPeter Zijlstra 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
104f2cb1360SIngo Molnar 
105a420b063SPeter Zijlstra 		if (group == sd->groups && sd->child &&
106a420b063SPeter Zijlstra 		    !cpumask_equal(sched_domain_span(sd->child),
107ae4df9d6SPeter Zijlstra 				   sched_group_span(group))) {
108a420b063SPeter Zijlstra 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
109a420b063SPeter Zijlstra 		}
110a420b063SPeter Zijlstra 
111005f874dSPeter Zijlstra 		printk(KERN_CONT " }");
112005f874dSPeter Zijlstra 
113f2cb1360SIngo Molnar 		group = group->next;
114b0151c25SPeter Zijlstra 
115b0151c25SPeter Zijlstra 		if (group != sd->groups)
116b0151c25SPeter Zijlstra 			printk(KERN_CONT ",");
117b0151c25SPeter Zijlstra 
118f2cb1360SIngo Molnar 	} while (group != sd->groups);
119f2cb1360SIngo Molnar 	printk(KERN_CONT "\n");
120f2cb1360SIngo Molnar 
121f2cb1360SIngo Molnar 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
122f2cb1360SIngo Molnar 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
123f2cb1360SIngo Molnar 
124f2cb1360SIngo Molnar 	if (sd->parent &&
125f2cb1360SIngo Molnar 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
12697fb7a0aSIngo Molnar 		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
127f2cb1360SIngo Molnar 	return 0;
128f2cb1360SIngo Molnar }
129f2cb1360SIngo Molnar 
130f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu)
131f2cb1360SIngo Molnar {
132f2cb1360SIngo Molnar 	int level = 0;
133f2cb1360SIngo Molnar 
1349406415fSPeter Zijlstra 	if (!sched_debug_verbose)
135f2cb1360SIngo Molnar 		return;
136f2cb1360SIngo Molnar 
137f2cb1360SIngo Molnar 	if (!sd) {
138f2cb1360SIngo Molnar 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
139f2cb1360SIngo Molnar 		return;
140f2cb1360SIngo Molnar 	}
141f2cb1360SIngo Molnar 
142005f874dSPeter Zijlstra 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
143f2cb1360SIngo Molnar 
144f2cb1360SIngo Molnar 	for (;;) {
145f2cb1360SIngo Molnar 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
146f2cb1360SIngo Molnar 			break;
147f2cb1360SIngo Molnar 		level++;
148f2cb1360SIngo Molnar 		sd = sd->parent;
149f2cb1360SIngo Molnar 		if (!sd)
150f2cb1360SIngo Molnar 			break;
151f2cb1360SIngo Molnar 	}
152f2cb1360SIngo Molnar }
153f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */
154f2cb1360SIngo Molnar 
1559406415fSPeter Zijlstra # define sched_debug_verbose 0
156f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0)
157f2cb1360SIngo Molnar static inline bool sched_debug(void)
158f2cb1360SIngo Molnar {
159f2cb1360SIngo Molnar 	return false;
160f2cb1360SIngo Molnar }
161f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */
162f2cb1360SIngo Molnar 
1634fc472f1SValentin Schneider /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
1644fc472f1SValentin Schneider #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
1654fc472f1SValentin Schneider static const unsigned int SD_DEGENERATE_GROUPS_MASK =
1664fc472f1SValentin Schneider #include <linux/sched/sd_flags.h>
1674fc472f1SValentin Schneider 0;
1684fc472f1SValentin Schneider #undef SD_FLAG
1694fc472f1SValentin Schneider 
170f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd)
171f2cb1360SIngo Molnar {
172f2cb1360SIngo Molnar 	if (cpumask_weight(sched_domain_span(sd)) == 1)
173f2cb1360SIngo Molnar 		return 1;
174f2cb1360SIngo Molnar 
175f2cb1360SIngo Molnar 	/* Following flags need at least 2 groups */
1766f349818SValentin Schneider 	if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
1776f349818SValentin Schneider 	    (sd->groups != sd->groups->next))
178f2cb1360SIngo Molnar 		return 0;
179f2cb1360SIngo Molnar 
180f2cb1360SIngo Molnar 	/* Following flags don't use groups */
181f2cb1360SIngo Molnar 	if (sd->flags & (SD_WAKE_AFFINE))
182f2cb1360SIngo Molnar 		return 0;
183f2cb1360SIngo Molnar 
184f2cb1360SIngo Molnar 	return 1;
185f2cb1360SIngo Molnar }
186f2cb1360SIngo Molnar 
187f2cb1360SIngo Molnar static int
188f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
189f2cb1360SIngo Molnar {
190f2cb1360SIngo Molnar 	unsigned long cflags = sd->flags, pflags = parent->flags;
191f2cb1360SIngo Molnar 
192f2cb1360SIngo Molnar 	if (sd_degenerate(parent))
193f2cb1360SIngo Molnar 		return 1;
194f2cb1360SIngo Molnar 
195f2cb1360SIngo Molnar 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
196f2cb1360SIngo Molnar 		return 0;
197f2cb1360SIngo Molnar 
198f2cb1360SIngo Molnar 	/* Flags needing groups don't count if only 1 group in parent */
199ab65afb0SValentin Schneider 	if (parent->groups == parent->groups->next)
2003a6712c7SValentin Schneider 		pflags &= ~SD_DEGENERATE_GROUPS_MASK;
201ab65afb0SValentin Schneider 
202f2cb1360SIngo Molnar 	if (~cflags & pflags)
203f2cb1360SIngo Molnar 		return 0;
204f2cb1360SIngo Molnar 
205f2cb1360SIngo Molnar 	return 1;
206f2cb1360SIngo Molnar }
207f2cb1360SIngo Molnar 
208531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
209f8a696f2SPeter Zijlstra DEFINE_STATIC_KEY_FALSE(sched_energy_present);
2108d5d0cfbSQuentin Perret unsigned int sysctl_sched_energy_aware = 1;
211531b5c9fSQuentin Perret DEFINE_MUTEX(sched_energy_mutex);
212531b5c9fSQuentin Perret bool sched_energy_update;
213531b5c9fSQuentin Perret 
21431f6a8c0SIonela Voinescu void rebuild_sched_domains_energy(void)
21531f6a8c0SIonela Voinescu {
21631f6a8c0SIonela Voinescu 	mutex_lock(&sched_energy_mutex);
21731f6a8c0SIonela Voinescu 	sched_energy_update = true;
21831f6a8c0SIonela Voinescu 	rebuild_sched_domains();
21931f6a8c0SIonela Voinescu 	sched_energy_update = false;
22031f6a8c0SIonela Voinescu 	mutex_unlock(&sched_energy_mutex);
22131f6a8c0SIonela Voinescu }
22231f6a8c0SIonela Voinescu 
2238d5d0cfbSQuentin Perret #ifdef CONFIG_PROC_SYSCTL
2248d5d0cfbSQuentin Perret int sched_energy_aware_handler(struct ctl_table *table, int write,
22532927393SChristoph Hellwig 		void *buffer, size_t *lenp, loff_t *ppos)
2268d5d0cfbSQuentin Perret {
2278d5d0cfbSQuentin Perret 	int ret, state;
2288d5d0cfbSQuentin Perret 
2298d5d0cfbSQuentin Perret 	if (write && !capable(CAP_SYS_ADMIN))
2308d5d0cfbSQuentin Perret 		return -EPERM;
2318d5d0cfbSQuentin Perret 
2328d5d0cfbSQuentin Perret 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2338d5d0cfbSQuentin Perret 	if (!ret && write) {
2348d5d0cfbSQuentin Perret 		state = static_branch_unlikely(&sched_energy_present);
23531f6a8c0SIonela Voinescu 		if (state != sysctl_sched_energy_aware)
23631f6a8c0SIonela Voinescu 			rebuild_sched_domains_energy();
2378d5d0cfbSQuentin Perret 	}
2388d5d0cfbSQuentin Perret 
2398d5d0cfbSQuentin Perret 	return ret;
2408d5d0cfbSQuentin Perret }
2418d5d0cfbSQuentin Perret #endif
2428d5d0cfbSQuentin Perret 
2436aa140faSQuentin Perret static void free_pd(struct perf_domain *pd)
2446aa140faSQuentin Perret {
2456aa140faSQuentin Perret 	struct perf_domain *tmp;
2466aa140faSQuentin Perret 
2476aa140faSQuentin Perret 	while (pd) {
2486aa140faSQuentin Perret 		tmp = pd->next;
2496aa140faSQuentin Perret 		kfree(pd);
2506aa140faSQuentin Perret 		pd = tmp;
2516aa140faSQuentin Perret 	}
2526aa140faSQuentin Perret }
2536aa140faSQuentin Perret 
2546aa140faSQuentin Perret static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
2556aa140faSQuentin Perret {
2566aa140faSQuentin Perret 	while (pd) {
2576aa140faSQuentin Perret 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
2586aa140faSQuentin Perret 			return pd;
2596aa140faSQuentin Perret 		pd = pd->next;
2606aa140faSQuentin Perret 	}
2616aa140faSQuentin Perret 
2626aa140faSQuentin Perret 	return NULL;
2636aa140faSQuentin Perret }
2646aa140faSQuentin Perret 
2656aa140faSQuentin Perret static struct perf_domain *pd_init(int cpu)
2666aa140faSQuentin Perret {
2676aa140faSQuentin Perret 	struct em_perf_domain *obj = em_cpu_get(cpu);
2686aa140faSQuentin Perret 	struct perf_domain *pd;
2696aa140faSQuentin Perret 
2706aa140faSQuentin Perret 	if (!obj) {
2716aa140faSQuentin Perret 		if (sched_debug())
2726aa140faSQuentin Perret 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
2736aa140faSQuentin Perret 		return NULL;
2746aa140faSQuentin Perret 	}
2756aa140faSQuentin Perret 
2766aa140faSQuentin Perret 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
2776aa140faSQuentin Perret 	if (!pd)
2786aa140faSQuentin Perret 		return NULL;
2796aa140faSQuentin Perret 	pd->em_pd = obj;
2806aa140faSQuentin Perret 
2816aa140faSQuentin Perret 	return pd;
2826aa140faSQuentin Perret }
2836aa140faSQuentin Perret 
2846aa140faSQuentin Perret static void perf_domain_debug(const struct cpumask *cpu_map,
2856aa140faSQuentin Perret 						struct perf_domain *pd)
2866aa140faSQuentin Perret {
2876aa140faSQuentin Perret 	if (!sched_debug() || !pd)
2886aa140faSQuentin Perret 		return;
2896aa140faSQuentin Perret 
2906aa140faSQuentin Perret 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
2916aa140faSQuentin Perret 
2926aa140faSQuentin Perret 	while (pd) {
293521b512bSLukasz Luba 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
2946aa140faSQuentin Perret 				cpumask_first(perf_domain_span(pd)),
2956aa140faSQuentin Perret 				cpumask_pr_args(perf_domain_span(pd)),
296521b512bSLukasz Luba 				em_pd_nr_perf_states(pd->em_pd));
2976aa140faSQuentin Perret 		pd = pd->next;
2986aa140faSQuentin Perret 	}
2996aa140faSQuentin Perret 
3006aa140faSQuentin Perret 	printk(KERN_CONT "\n");
3016aa140faSQuentin Perret }
3026aa140faSQuentin Perret 
3036aa140faSQuentin Perret static void destroy_perf_domain_rcu(struct rcu_head *rp)
3046aa140faSQuentin Perret {
3056aa140faSQuentin Perret 	struct perf_domain *pd;
3066aa140faSQuentin Perret 
3076aa140faSQuentin Perret 	pd = container_of(rp, struct perf_domain, rcu);
3086aa140faSQuentin Perret 	free_pd(pd);
3096aa140faSQuentin Perret }
3106aa140faSQuentin Perret 
3111f74de87SQuentin Perret static void sched_energy_set(bool has_eas)
3121f74de87SQuentin Perret {
3131f74de87SQuentin Perret 	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
3141f74de87SQuentin Perret 		if (sched_debug())
3151f74de87SQuentin Perret 			pr_info("%s: stopping EAS\n", __func__);
3161f74de87SQuentin Perret 		static_branch_disable_cpuslocked(&sched_energy_present);
3171f74de87SQuentin Perret 	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
3181f74de87SQuentin Perret 		if (sched_debug())
3191f74de87SQuentin Perret 			pr_info("%s: starting EAS\n", __func__);
3201f74de87SQuentin Perret 		static_branch_enable_cpuslocked(&sched_energy_present);
3211f74de87SQuentin Perret 	}
3221f74de87SQuentin Perret }
3231f74de87SQuentin Perret 
324b68a4c0dSQuentin Perret /*
325b68a4c0dSQuentin Perret  * EAS can be used on a root domain if it meets all the following conditions:
326b68a4c0dSQuentin Perret  *    1. an Energy Model (EM) is available;
327b68a4c0dSQuentin Perret  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
32838502ab4SValentin Schneider  *    3. no SMT is detected.
32938502ab4SValentin Schneider  *    4. the EM complexity is low enough to keep scheduling overheads low;
33038502ab4SValentin Schneider  *    5. schedutil is driving the frequency of all CPUs of the rd;
331fa50e2b4SIonela Voinescu  *    6. frequency invariance support is present;
332b68a4c0dSQuentin Perret  *
333b68a4c0dSQuentin Perret  * The complexity of the Energy Model is defined as:
334b68a4c0dSQuentin Perret  *
335521b512bSLukasz Luba  *              C = nr_pd * (nr_cpus + nr_ps)
336b68a4c0dSQuentin Perret  *
337b68a4c0dSQuentin Perret  * with parameters defined as:
338b68a4c0dSQuentin Perret  *  - nr_pd:    the number of performance domains
339b68a4c0dSQuentin Perret  *  - nr_cpus:  the number of CPUs
340521b512bSLukasz Luba  *  - nr_ps:    the sum of the number of performance states of all performance
341b68a4c0dSQuentin Perret  *              domains (for example, on a system with 2 performance domains,
342521b512bSLukasz Luba  *              with 10 performance states each, nr_ps = 2 * 10 = 20).
343b68a4c0dSQuentin Perret  *
344b68a4c0dSQuentin Perret  * It is generally not a good idea to use such a model in the wake-up path on
345b68a4c0dSQuentin Perret  * very complex platforms because of the associated scheduling overheads. The
346b68a4c0dSQuentin Perret  * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
347521b512bSLukasz Luba  * with per-CPU DVFS and less than 8 performance states each, for example.
348b68a4c0dSQuentin Perret  */
349b68a4c0dSQuentin Perret #define EM_MAX_COMPLEXITY 2048
350b68a4c0dSQuentin Perret 
351531b5c9fSQuentin Perret extern struct cpufreq_governor schedutil_gov;
3521f74de87SQuentin Perret static bool build_perf_domains(const struct cpumask *cpu_map)
3536aa140faSQuentin Perret {
354521b512bSLukasz Luba 	int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
3556aa140faSQuentin Perret 	struct perf_domain *pd = NULL, *tmp;
3566aa140faSQuentin Perret 	int cpu = cpumask_first(cpu_map);
3576aa140faSQuentin Perret 	struct root_domain *rd = cpu_rq(cpu)->rd;
358531b5c9fSQuentin Perret 	struct cpufreq_policy *policy;
359531b5c9fSQuentin Perret 	struct cpufreq_governor *gov;
360b68a4c0dSQuentin Perret 
3618d5d0cfbSQuentin Perret 	if (!sysctl_sched_energy_aware)
3628d5d0cfbSQuentin Perret 		goto free;
3638d5d0cfbSQuentin Perret 
364b68a4c0dSQuentin Perret 	/* EAS is enabled for asymmetric CPU capacity topologies. */
365b68a4c0dSQuentin Perret 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
366b68a4c0dSQuentin Perret 		if (sched_debug()) {
367b68a4c0dSQuentin Perret 			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
368b68a4c0dSQuentin Perret 					cpumask_pr_args(cpu_map));
369b68a4c0dSQuentin Perret 		}
370b68a4c0dSQuentin Perret 		goto free;
371b68a4c0dSQuentin Perret 	}
3726aa140faSQuentin Perret 
37338502ab4SValentin Schneider 	/* EAS definitely does *not* handle SMT */
37438502ab4SValentin Schneider 	if (sched_smt_active()) {
37538502ab4SValentin Schneider 		pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
37638502ab4SValentin Schneider 			cpumask_pr_args(cpu_map));
37738502ab4SValentin Schneider 		goto free;
37838502ab4SValentin Schneider 	}
37938502ab4SValentin Schneider 
380fa50e2b4SIonela Voinescu 	if (!arch_scale_freq_invariant()) {
381fa50e2b4SIonela Voinescu 		if (sched_debug()) {
382fa50e2b4SIonela Voinescu 			pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported",
383fa50e2b4SIonela Voinescu 				cpumask_pr_args(cpu_map));
384fa50e2b4SIonela Voinescu 		}
385fa50e2b4SIonela Voinescu 		goto free;
386fa50e2b4SIonela Voinescu 	}
387fa50e2b4SIonela Voinescu 
3886aa140faSQuentin Perret 	for_each_cpu(i, cpu_map) {
3896aa140faSQuentin Perret 		/* Skip already covered CPUs. */
3906aa140faSQuentin Perret 		if (find_pd(pd, i))
3916aa140faSQuentin Perret 			continue;
3926aa140faSQuentin Perret 
393531b5c9fSQuentin Perret 		/* Do not attempt EAS if schedutil is not being used. */
394531b5c9fSQuentin Perret 		policy = cpufreq_cpu_get(i);
395531b5c9fSQuentin Perret 		if (!policy)
396531b5c9fSQuentin Perret 			goto free;
397531b5c9fSQuentin Perret 		gov = policy->governor;
398531b5c9fSQuentin Perret 		cpufreq_cpu_put(policy);
399531b5c9fSQuentin Perret 		if (gov != &schedutil_gov) {
400531b5c9fSQuentin Perret 			if (rd->pd)
401531b5c9fSQuentin Perret 				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
402531b5c9fSQuentin Perret 						cpumask_pr_args(cpu_map));
403531b5c9fSQuentin Perret 			goto free;
404531b5c9fSQuentin Perret 		}
405531b5c9fSQuentin Perret 
4066aa140faSQuentin Perret 		/* Create the new pd and add it to the local list. */
4076aa140faSQuentin Perret 		tmp = pd_init(i);
4086aa140faSQuentin Perret 		if (!tmp)
4096aa140faSQuentin Perret 			goto free;
4106aa140faSQuentin Perret 		tmp->next = pd;
4116aa140faSQuentin Perret 		pd = tmp;
412b68a4c0dSQuentin Perret 
413b68a4c0dSQuentin Perret 		/*
414521b512bSLukasz Luba 		 * Count performance domains and performance states for the
415b68a4c0dSQuentin Perret 		 * complexity check.
416b68a4c0dSQuentin Perret 		 */
417b68a4c0dSQuentin Perret 		nr_pd++;
418521b512bSLukasz Luba 		nr_ps += em_pd_nr_perf_states(pd->em_pd);
419b68a4c0dSQuentin Perret 	}
420b68a4c0dSQuentin Perret 
421b68a4c0dSQuentin Perret 	/* Bail out if the Energy Model complexity is too high. */
422521b512bSLukasz Luba 	if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
423b68a4c0dSQuentin Perret 		WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
424b68a4c0dSQuentin Perret 						cpumask_pr_args(cpu_map));
425b68a4c0dSQuentin Perret 		goto free;
4266aa140faSQuentin Perret 	}
4276aa140faSQuentin Perret 
4286aa140faSQuentin Perret 	perf_domain_debug(cpu_map, pd);
4296aa140faSQuentin Perret 
4306aa140faSQuentin Perret 	/* Attach the new list of performance domains to the root domain. */
4316aa140faSQuentin Perret 	tmp = rd->pd;
4326aa140faSQuentin Perret 	rcu_assign_pointer(rd->pd, pd);
4336aa140faSQuentin Perret 	if (tmp)
4346aa140faSQuentin Perret 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
4356aa140faSQuentin Perret 
4361f74de87SQuentin Perret 	return !!pd;
4376aa140faSQuentin Perret 
4386aa140faSQuentin Perret free:
4396aa140faSQuentin Perret 	free_pd(pd);
4406aa140faSQuentin Perret 	tmp = rd->pd;
4416aa140faSQuentin Perret 	rcu_assign_pointer(rd->pd, NULL);
4426aa140faSQuentin Perret 	if (tmp)
4436aa140faSQuentin Perret 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
4441f74de87SQuentin Perret 
4451f74de87SQuentin Perret 	return false;
4466aa140faSQuentin Perret }
4476aa140faSQuentin Perret #else
4486aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) { }
449531b5c9fSQuentin Perret #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
4506aa140faSQuentin Perret 
451f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu)
452f2cb1360SIngo Molnar {
453f2cb1360SIngo Molnar 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
454f2cb1360SIngo Molnar 
455f2cb1360SIngo Molnar 	cpupri_cleanup(&rd->cpupri);
456f2cb1360SIngo Molnar 	cpudl_cleanup(&rd->cpudl);
457f2cb1360SIngo Molnar 	free_cpumask_var(rd->dlo_mask);
458f2cb1360SIngo Molnar 	free_cpumask_var(rd->rto_mask);
459f2cb1360SIngo Molnar 	free_cpumask_var(rd->online);
460f2cb1360SIngo Molnar 	free_cpumask_var(rd->span);
4616aa140faSQuentin Perret 	free_pd(rd->pd);
462f2cb1360SIngo Molnar 	kfree(rd);
463f2cb1360SIngo Molnar }
464f2cb1360SIngo Molnar 
465f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd)
466f2cb1360SIngo Molnar {
467f2cb1360SIngo Molnar 	struct root_domain *old_rd = NULL;
468f2cb1360SIngo Molnar 	unsigned long flags;
469f2cb1360SIngo Molnar 
4705cb9eaa3SPeter Zijlstra 	raw_spin_rq_lock_irqsave(rq, flags);
471f2cb1360SIngo Molnar 
472f2cb1360SIngo Molnar 	if (rq->rd) {
473f2cb1360SIngo Molnar 		old_rd = rq->rd;
474f2cb1360SIngo Molnar 
475f2cb1360SIngo Molnar 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
476f2cb1360SIngo Molnar 			set_rq_offline(rq);
477f2cb1360SIngo Molnar 
478f2cb1360SIngo Molnar 		cpumask_clear_cpu(rq->cpu, old_rd->span);
479f2cb1360SIngo Molnar 
480f2cb1360SIngo Molnar 		/*
481f2cb1360SIngo Molnar 		 * If we dont want to free the old_rd yet then
482f2cb1360SIngo Molnar 		 * set old_rd to NULL to skip the freeing later
483f2cb1360SIngo Molnar 		 * in this function:
484f2cb1360SIngo Molnar 		 */
485f2cb1360SIngo Molnar 		if (!atomic_dec_and_test(&old_rd->refcount))
486f2cb1360SIngo Molnar 			old_rd = NULL;
487f2cb1360SIngo Molnar 	}
488f2cb1360SIngo Molnar 
489f2cb1360SIngo Molnar 	atomic_inc(&rd->refcount);
490f2cb1360SIngo Molnar 	rq->rd = rd;
491f2cb1360SIngo Molnar 
492f2cb1360SIngo Molnar 	cpumask_set_cpu(rq->cpu, rd->span);
493f2cb1360SIngo Molnar 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
494f2cb1360SIngo Molnar 		set_rq_online(rq);
495f2cb1360SIngo Molnar 
4965cb9eaa3SPeter Zijlstra 	raw_spin_rq_unlock_irqrestore(rq, flags);
497f2cb1360SIngo Molnar 
498f2cb1360SIngo Molnar 	if (old_rd)
499337e9b07SPaul E. McKenney 		call_rcu(&old_rd->rcu, free_rootdomain);
500f2cb1360SIngo Molnar }
501f2cb1360SIngo Molnar 
502364f5665SSteven Rostedt (VMware) void sched_get_rd(struct root_domain *rd)
503364f5665SSteven Rostedt (VMware) {
504364f5665SSteven Rostedt (VMware) 	atomic_inc(&rd->refcount);
505364f5665SSteven Rostedt (VMware) }
506364f5665SSteven Rostedt (VMware) 
507364f5665SSteven Rostedt (VMware) void sched_put_rd(struct root_domain *rd)
508364f5665SSteven Rostedt (VMware) {
509364f5665SSteven Rostedt (VMware) 	if (!atomic_dec_and_test(&rd->refcount))
510364f5665SSteven Rostedt (VMware) 		return;
511364f5665SSteven Rostedt (VMware) 
512337e9b07SPaul E. McKenney 	call_rcu(&rd->rcu, free_rootdomain);
513364f5665SSteven Rostedt (VMware) }
514364f5665SSteven Rostedt (VMware) 
515f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd)
516f2cb1360SIngo Molnar {
517f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
518f2cb1360SIngo Molnar 		goto out;
519f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
520f2cb1360SIngo Molnar 		goto free_span;
521f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
522f2cb1360SIngo Molnar 		goto free_online;
523f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
524f2cb1360SIngo Molnar 		goto free_dlo_mask;
525f2cb1360SIngo Molnar 
5264bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI
5274bdced5cSSteven Rostedt (Red Hat) 	rd->rto_cpu = -1;
5284bdced5cSSteven Rostedt (Red Hat) 	raw_spin_lock_init(&rd->rto_lock);
529da6ff099SSebastian Andrzej Siewior 	rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
5304bdced5cSSteven Rostedt (Red Hat) #endif
5314bdced5cSSteven Rostedt (Red Hat) 
53226762423SPeng Liu 	rd->visit_gen = 0;
533f2cb1360SIngo Molnar 	init_dl_bw(&rd->dl_bw);
534f2cb1360SIngo Molnar 	if (cpudl_init(&rd->cpudl) != 0)
535f2cb1360SIngo Molnar 		goto free_rto_mask;
536f2cb1360SIngo Molnar 
537f2cb1360SIngo Molnar 	if (cpupri_init(&rd->cpupri) != 0)
538f2cb1360SIngo Molnar 		goto free_cpudl;
539f2cb1360SIngo Molnar 	return 0;
540f2cb1360SIngo Molnar 
541f2cb1360SIngo Molnar free_cpudl:
542f2cb1360SIngo Molnar 	cpudl_cleanup(&rd->cpudl);
543f2cb1360SIngo Molnar free_rto_mask:
544f2cb1360SIngo Molnar 	free_cpumask_var(rd->rto_mask);
545f2cb1360SIngo Molnar free_dlo_mask:
546f2cb1360SIngo Molnar 	free_cpumask_var(rd->dlo_mask);
547f2cb1360SIngo Molnar free_online:
548f2cb1360SIngo Molnar 	free_cpumask_var(rd->online);
549f2cb1360SIngo Molnar free_span:
550f2cb1360SIngo Molnar 	free_cpumask_var(rd->span);
551f2cb1360SIngo Molnar out:
552f2cb1360SIngo Molnar 	return -ENOMEM;
553f2cb1360SIngo Molnar }
554f2cb1360SIngo Molnar 
555f2cb1360SIngo Molnar /*
556f2cb1360SIngo Molnar  * By default the system creates a single root-domain with all CPUs as
557f2cb1360SIngo Molnar  * members (mimicking the global state we have today).
558f2cb1360SIngo Molnar  */
559f2cb1360SIngo Molnar struct root_domain def_root_domain;
560f2cb1360SIngo Molnar 
561f2cb1360SIngo Molnar void init_defrootdomain(void)
562f2cb1360SIngo Molnar {
563f2cb1360SIngo Molnar 	init_rootdomain(&def_root_domain);
564f2cb1360SIngo Molnar 
565f2cb1360SIngo Molnar 	atomic_set(&def_root_domain.refcount, 1);
566f2cb1360SIngo Molnar }
567f2cb1360SIngo Molnar 
568f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void)
569f2cb1360SIngo Molnar {
570f2cb1360SIngo Molnar 	struct root_domain *rd;
571f2cb1360SIngo Molnar 
5724d13a06dSViresh Kumar 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
573f2cb1360SIngo Molnar 	if (!rd)
574f2cb1360SIngo Molnar 		return NULL;
575f2cb1360SIngo Molnar 
576f2cb1360SIngo Molnar 	if (init_rootdomain(rd) != 0) {
577f2cb1360SIngo Molnar 		kfree(rd);
578f2cb1360SIngo Molnar 		return NULL;
579f2cb1360SIngo Molnar 	}
580f2cb1360SIngo Molnar 
581f2cb1360SIngo Molnar 	return rd;
582f2cb1360SIngo Molnar }
583f2cb1360SIngo Molnar 
584f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc)
585f2cb1360SIngo Molnar {
586f2cb1360SIngo Molnar 	struct sched_group *tmp, *first;
587f2cb1360SIngo Molnar 
588f2cb1360SIngo Molnar 	if (!sg)
589f2cb1360SIngo Molnar 		return;
590f2cb1360SIngo Molnar 
591f2cb1360SIngo Molnar 	first = sg;
592f2cb1360SIngo Molnar 	do {
593f2cb1360SIngo Molnar 		tmp = sg->next;
594f2cb1360SIngo Molnar 
595f2cb1360SIngo Molnar 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
596f2cb1360SIngo Molnar 			kfree(sg->sgc);
597f2cb1360SIngo Molnar 
598213c5a45SShu Wang 		if (atomic_dec_and_test(&sg->ref))
599f2cb1360SIngo Molnar 			kfree(sg);
600f2cb1360SIngo Molnar 		sg = tmp;
601f2cb1360SIngo Molnar 	} while (sg != first);
602f2cb1360SIngo Molnar }
603f2cb1360SIngo Molnar 
604f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd)
605f2cb1360SIngo Molnar {
606f2cb1360SIngo Molnar 	/*
607a090c4f2SPeter Zijlstra 	 * A normal sched domain may have multiple group references, an
608a090c4f2SPeter Zijlstra 	 * overlapping domain, having private groups, only one.  Iterate,
609a090c4f2SPeter Zijlstra 	 * dropping group/capacity references, freeing where none remain.
610f2cb1360SIngo Molnar 	 */
611f2cb1360SIngo Molnar 	free_sched_groups(sd->groups, 1);
612213c5a45SShu Wang 
613f2cb1360SIngo Molnar 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
614f2cb1360SIngo Molnar 		kfree(sd->shared);
615f2cb1360SIngo Molnar 	kfree(sd);
616f2cb1360SIngo Molnar }
617f2cb1360SIngo Molnar 
618f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu)
619f2cb1360SIngo Molnar {
620f2cb1360SIngo Molnar 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
621f2cb1360SIngo Molnar 
622f2cb1360SIngo Molnar 	while (sd) {
623f2cb1360SIngo Molnar 		struct sched_domain *parent = sd->parent;
624f2cb1360SIngo Molnar 		destroy_sched_domain(sd);
625f2cb1360SIngo Molnar 		sd = parent;
626f2cb1360SIngo Molnar 	}
627f2cb1360SIngo Molnar }
628f2cb1360SIngo Molnar 
629f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd)
630f2cb1360SIngo Molnar {
631f2cb1360SIngo Molnar 	if (sd)
632f2cb1360SIngo Molnar 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
633f2cb1360SIngo Molnar }
634f2cb1360SIngo Molnar 
635f2cb1360SIngo Molnar /*
636f2cb1360SIngo Molnar  * Keep a special pointer to the highest sched_domain that has
637f2cb1360SIngo Molnar  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
638f2cb1360SIngo Molnar  * allows us to avoid some pointer chasing select_idle_sibling().
639f2cb1360SIngo Molnar  *
640f2cb1360SIngo Molnar  * Also keep a unique ID per domain (we use the first CPU number in
641f2cb1360SIngo Molnar  * the cpumask of the domain), this allows us to quickly tell if
642f2cb1360SIngo Molnar  * two CPUs are in the same cache domain, see cpus_share_cache().
643f2cb1360SIngo Molnar  */
644994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
645f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size);
646f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id);
647994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
648994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
649994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
650994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
651df054e84SMorten Rasmussen DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
652f2cb1360SIngo Molnar 
653f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu)
654f2cb1360SIngo Molnar {
655f2cb1360SIngo Molnar 	struct sched_domain_shared *sds = NULL;
656f2cb1360SIngo Molnar 	struct sched_domain *sd;
657f2cb1360SIngo Molnar 	int id = cpu;
658f2cb1360SIngo Molnar 	int size = 1;
659f2cb1360SIngo Molnar 
660f2cb1360SIngo Molnar 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
661f2cb1360SIngo Molnar 	if (sd) {
662f2cb1360SIngo Molnar 		id = cpumask_first(sched_domain_span(sd));
663f2cb1360SIngo Molnar 		size = cpumask_weight(sched_domain_span(sd));
664f2cb1360SIngo Molnar 		sds = sd->shared;
665f2cb1360SIngo Molnar 	}
666f2cb1360SIngo Molnar 
667f2cb1360SIngo Molnar 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
668f2cb1360SIngo Molnar 	per_cpu(sd_llc_size, cpu) = size;
669f2cb1360SIngo Molnar 	per_cpu(sd_llc_id, cpu) = id;
670f2cb1360SIngo Molnar 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
671f2cb1360SIngo Molnar 
672f2cb1360SIngo Molnar 	sd = lowest_flag_domain(cpu, SD_NUMA);
673f2cb1360SIngo Molnar 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
674f2cb1360SIngo Molnar 
675f2cb1360SIngo Molnar 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
676011b27bbSQuentin Perret 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
677011b27bbSQuentin Perret 
678c744dc4aSBeata Michalska 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
679011b27bbSQuentin Perret 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
680f2cb1360SIngo Molnar }
681f2cb1360SIngo Molnar 
682f2cb1360SIngo Molnar /*
683f2cb1360SIngo Molnar  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
684f2cb1360SIngo Molnar  * hold the hotplug lock.
685f2cb1360SIngo Molnar  */
686f2cb1360SIngo Molnar static void
687f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
688f2cb1360SIngo Molnar {
689f2cb1360SIngo Molnar 	struct rq *rq = cpu_rq(cpu);
690f2cb1360SIngo Molnar 	struct sched_domain *tmp;
691f2cb1360SIngo Molnar 
692f2cb1360SIngo Molnar 	/* Remove the sched domains which do not contribute to scheduling. */
693f2cb1360SIngo Molnar 	for (tmp = sd; tmp; ) {
694f2cb1360SIngo Molnar 		struct sched_domain *parent = tmp->parent;
695f2cb1360SIngo Molnar 		if (!parent)
696f2cb1360SIngo Molnar 			break;
697f2cb1360SIngo Molnar 
698f2cb1360SIngo Molnar 		if (sd_parent_degenerate(tmp, parent)) {
699f2cb1360SIngo Molnar 			tmp->parent = parent->parent;
700f2cb1360SIngo Molnar 			if (parent->parent)
701f2cb1360SIngo Molnar 				parent->parent->child = tmp;
702f2cb1360SIngo Molnar 			/*
703f2cb1360SIngo Molnar 			 * Transfer SD_PREFER_SIBLING down in case of a
704f2cb1360SIngo Molnar 			 * degenerate parent; the spans match for this
705f2cb1360SIngo Molnar 			 * so the property transfers.
706f2cb1360SIngo Molnar 			 */
707f2cb1360SIngo Molnar 			if (parent->flags & SD_PREFER_SIBLING)
708f2cb1360SIngo Molnar 				tmp->flags |= SD_PREFER_SIBLING;
709f2cb1360SIngo Molnar 			destroy_sched_domain(parent);
710f2cb1360SIngo Molnar 		} else
711f2cb1360SIngo Molnar 			tmp = tmp->parent;
712f2cb1360SIngo Molnar 	}
713f2cb1360SIngo Molnar 
714f2cb1360SIngo Molnar 	if (sd && sd_degenerate(sd)) {
715f2cb1360SIngo Molnar 		tmp = sd;
716f2cb1360SIngo Molnar 		sd = sd->parent;
717f2cb1360SIngo Molnar 		destroy_sched_domain(tmp);
71816d364baSRicardo Neri 		if (sd) {
71916d364baSRicardo Neri 			struct sched_group *sg = sd->groups;
72016d364baSRicardo Neri 
72116d364baSRicardo Neri 			/*
72216d364baSRicardo Neri 			 * sched groups hold the flags of the child sched
72316d364baSRicardo Neri 			 * domain for convenience. Clear such flags since
72416d364baSRicardo Neri 			 * the child is being destroyed.
72516d364baSRicardo Neri 			 */
72616d364baSRicardo Neri 			do {
72716d364baSRicardo Neri 				sg->flags = 0;
72816d364baSRicardo Neri 			} while (sg != sd->groups);
72916d364baSRicardo Neri 
730f2cb1360SIngo Molnar 			sd->child = NULL;
731f2cb1360SIngo Molnar 		}
73216d364baSRicardo Neri 	}
733f2cb1360SIngo Molnar 
734f2cb1360SIngo Molnar 	sched_domain_debug(sd, cpu);
735f2cb1360SIngo Molnar 
736f2cb1360SIngo Molnar 	rq_attach_root(rq, rd);
737f2cb1360SIngo Molnar 	tmp = rq->sd;
738f2cb1360SIngo Molnar 	rcu_assign_pointer(rq->sd, sd);
739bbdacdfeSPeter Zijlstra 	dirty_sched_domain_sysctl(cpu);
740f2cb1360SIngo Molnar 	destroy_sched_domains(tmp);
741f2cb1360SIngo Molnar 
742f2cb1360SIngo Molnar 	update_top_cache_domain(cpu);
743f2cb1360SIngo Molnar }
744f2cb1360SIngo Molnar 
745f2cb1360SIngo Molnar struct s_data {
74699687cdbSLuc Van Oostenryck 	struct sched_domain * __percpu *sd;
747f2cb1360SIngo Molnar 	struct root_domain	*rd;
748f2cb1360SIngo Molnar };
749f2cb1360SIngo Molnar 
750f2cb1360SIngo Molnar enum s_alloc {
751f2cb1360SIngo Molnar 	sa_rootdomain,
752f2cb1360SIngo Molnar 	sa_sd,
753f2cb1360SIngo Molnar 	sa_sd_storage,
754f2cb1360SIngo Molnar 	sa_none,
755f2cb1360SIngo Molnar };
756f2cb1360SIngo Molnar 
757f2cb1360SIngo Molnar /*
75835a566e6SPeter Zijlstra  * Return the canonical balance CPU for this group, this is the first CPU
759e5c14b1fSPeter Zijlstra  * of this group that's also in the balance mask.
76035a566e6SPeter Zijlstra  *
761e5c14b1fSPeter Zijlstra  * The balance mask are all those CPUs that could actually end up at this
762e5c14b1fSPeter Zijlstra  * group. See build_balance_mask().
76335a566e6SPeter Zijlstra  *
76435a566e6SPeter Zijlstra  * Also see should_we_balance().
76535a566e6SPeter Zijlstra  */
76635a566e6SPeter Zijlstra int group_balance_cpu(struct sched_group *sg)
76735a566e6SPeter Zijlstra {
768e5c14b1fSPeter Zijlstra 	return cpumask_first(group_balance_mask(sg));
76935a566e6SPeter Zijlstra }
77035a566e6SPeter Zijlstra 
77135a566e6SPeter Zijlstra 
77235a566e6SPeter Zijlstra /*
77335a566e6SPeter Zijlstra  * NUMA topology (first read the regular topology blurb below)
77435a566e6SPeter Zijlstra  *
77535a566e6SPeter Zijlstra  * Given a node-distance table, for example:
77635a566e6SPeter Zijlstra  *
77735a566e6SPeter Zijlstra  *   node   0   1   2   3
77835a566e6SPeter Zijlstra  *     0:  10  20  30  20
77935a566e6SPeter Zijlstra  *     1:  20  10  20  30
78035a566e6SPeter Zijlstra  *     2:  30  20  10  20
78135a566e6SPeter Zijlstra  *     3:  20  30  20  10
78235a566e6SPeter Zijlstra  *
78335a566e6SPeter Zijlstra  * which represents a 4 node ring topology like:
78435a566e6SPeter Zijlstra  *
78535a566e6SPeter Zijlstra  *   0 ----- 1
78635a566e6SPeter Zijlstra  *   |       |
78735a566e6SPeter Zijlstra  *   |       |
78835a566e6SPeter Zijlstra  *   |       |
78935a566e6SPeter Zijlstra  *   3 ----- 2
79035a566e6SPeter Zijlstra  *
79135a566e6SPeter Zijlstra  * We want to construct domains and groups to represent this. The way we go
79235a566e6SPeter Zijlstra  * about doing this is to build the domains on 'hops'. For each NUMA level we
79335a566e6SPeter Zijlstra  * construct the mask of all nodes reachable in @level hops.
79435a566e6SPeter Zijlstra  *
79535a566e6SPeter Zijlstra  * For the above NUMA topology that gives 3 levels:
79635a566e6SPeter Zijlstra  *
79735a566e6SPeter Zijlstra  * NUMA-2	0-3		0-3		0-3		0-3
79835a566e6SPeter Zijlstra  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
79935a566e6SPeter Zijlstra  *
80035a566e6SPeter Zijlstra  * NUMA-1	0-1,3		0-2		1-3		0,2-3
80135a566e6SPeter Zijlstra  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
80235a566e6SPeter Zijlstra  *
80335a566e6SPeter Zijlstra  * NUMA-0	0		1		2		3
80435a566e6SPeter Zijlstra  *
80535a566e6SPeter Zijlstra  *
80635a566e6SPeter Zijlstra  * As can be seen; things don't nicely line up as with the regular topology.
80735a566e6SPeter Zijlstra  * When we iterate a domain in child domain chunks some nodes can be
80835a566e6SPeter Zijlstra  * represented multiple times -- hence the "overlap" naming for this part of
80935a566e6SPeter Zijlstra  * the topology.
81035a566e6SPeter Zijlstra  *
81135a566e6SPeter Zijlstra  * In order to minimize this overlap, we only build enough groups to cover the
81235a566e6SPeter Zijlstra  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
81335a566e6SPeter Zijlstra  *
81435a566e6SPeter Zijlstra  * Because:
81535a566e6SPeter Zijlstra  *
81635a566e6SPeter Zijlstra  *  - the first group of each domain is its child domain; this
81735a566e6SPeter Zijlstra  *    gets us the first 0-1,3
81835a566e6SPeter Zijlstra  *  - the only uncovered node is 2, who's child domain is 1-3.
81935a566e6SPeter Zijlstra  *
82035a566e6SPeter Zijlstra  * However, because of the overlap, computing a unique CPU for each group is
82135a566e6SPeter Zijlstra  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
82235a566e6SPeter Zijlstra  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
82335a566e6SPeter Zijlstra  * end up at those groups (they would end up in group: 0-1,3).
82435a566e6SPeter Zijlstra  *
825e5c14b1fSPeter Zijlstra  * To correct this we have to introduce the group balance mask. This mask
82635a566e6SPeter Zijlstra  * will contain those CPUs in the group that can reach this group given the
82735a566e6SPeter Zijlstra  * (child) domain tree.
82835a566e6SPeter Zijlstra  *
82935a566e6SPeter Zijlstra  * With this we can once again compute balance_cpu and sched_group_capacity
83035a566e6SPeter Zijlstra  * relations.
83135a566e6SPeter Zijlstra  *
83235a566e6SPeter Zijlstra  * XXX include words on how balance_cpu is unique and therefore can be
83335a566e6SPeter Zijlstra  * used for sched_group_capacity links.
83435a566e6SPeter Zijlstra  *
83535a566e6SPeter Zijlstra  *
83635a566e6SPeter Zijlstra  * Another 'interesting' topology is:
83735a566e6SPeter Zijlstra  *
83835a566e6SPeter Zijlstra  *   node   0   1   2   3
83935a566e6SPeter Zijlstra  *     0:  10  20  20  30
84035a566e6SPeter Zijlstra  *     1:  20  10  20  20
84135a566e6SPeter Zijlstra  *     2:  20  20  10  20
84235a566e6SPeter Zijlstra  *     3:  30  20  20  10
84335a566e6SPeter Zijlstra  *
84435a566e6SPeter Zijlstra  * Which looks a little like:
84535a566e6SPeter Zijlstra  *
84635a566e6SPeter Zijlstra  *   0 ----- 1
84735a566e6SPeter Zijlstra  *   |     / |
84835a566e6SPeter Zijlstra  *   |   /   |
84935a566e6SPeter Zijlstra  *   | /     |
85035a566e6SPeter Zijlstra  *   2 ----- 3
85135a566e6SPeter Zijlstra  *
85235a566e6SPeter Zijlstra  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
85335a566e6SPeter Zijlstra  * are not.
85435a566e6SPeter Zijlstra  *
85535a566e6SPeter Zijlstra  * This leads to a few particularly weird cases where the sched_domain's are
85697fb7a0aSIngo Molnar  * not of the same number for each CPU. Consider:
85735a566e6SPeter Zijlstra  *
85835a566e6SPeter Zijlstra  * NUMA-2	0-3						0-3
85935a566e6SPeter Zijlstra  *  groups:	{0-2},{1-3}					{1-3},{0-2}
86035a566e6SPeter Zijlstra  *
86135a566e6SPeter Zijlstra  * NUMA-1	0-2		0-3		0-3		1-3
86235a566e6SPeter Zijlstra  *
86335a566e6SPeter Zijlstra  * NUMA-0	0		1		2		3
86435a566e6SPeter Zijlstra  *
86535a566e6SPeter Zijlstra  */
86635a566e6SPeter Zijlstra 
86735a566e6SPeter Zijlstra 
86835a566e6SPeter Zijlstra /*
869e5c14b1fSPeter Zijlstra  * Build the balance mask; it contains only those CPUs that can arrive at this
870e5c14b1fSPeter Zijlstra  * group and should be considered to continue balancing.
87135a566e6SPeter Zijlstra  *
87235a566e6SPeter Zijlstra  * We do this during the group creation pass, therefore the group information
87335a566e6SPeter Zijlstra  * isn't complete yet, however since each group represents a (child) domain we
87435a566e6SPeter Zijlstra  * can fully construct this using the sched_domain bits (which are already
87535a566e6SPeter Zijlstra  * complete).
876f2cb1360SIngo Molnar  */
8771676330eSPeter Zijlstra static void
878e5c14b1fSPeter Zijlstra build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
879f2cb1360SIngo Molnar {
880ae4df9d6SPeter Zijlstra 	const struct cpumask *sg_span = sched_group_span(sg);
881f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
882f2cb1360SIngo Molnar 	struct sched_domain *sibling;
883f2cb1360SIngo Molnar 	int i;
884f2cb1360SIngo Molnar 
8851676330eSPeter Zijlstra 	cpumask_clear(mask);
8861676330eSPeter Zijlstra 
887f32d782eSLauro Ramos Venancio 	for_each_cpu(i, sg_span) {
888f2cb1360SIngo Molnar 		sibling = *per_cpu_ptr(sdd->sd, i);
88973bb059fSPeter Zijlstra 
89073bb059fSPeter Zijlstra 		/*
89173bb059fSPeter Zijlstra 		 * Can happen in the asymmetric case, where these siblings are
89273bb059fSPeter Zijlstra 		 * unused. The mask will not be empty because those CPUs that
89373bb059fSPeter Zijlstra 		 * do have the top domain _should_ span the domain.
89473bb059fSPeter Zijlstra 		 */
89573bb059fSPeter Zijlstra 		if (!sibling->child)
89673bb059fSPeter Zijlstra 			continue;
89773bb059fSPeter Zijlstra 
89873bb059fSPeter Zijlstra 		/* If we would not end up here, we can't continue from here */
89973bb059fSPeter Zijlstra 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
900f2cb1360SIngo Molnar 			continue;
901f2cb1360SIngo Molnar 
9021676330eSPeter Zijlstra 		cpumask_set_cpu(i, mask);
903f2cb1360SIngo Molnar 	}
90473bb059fSPeter Zijlstra 
90573bb059fSPeter Zijlstra 	/* We must not have empty masks here */
9061676330eSPeter Zijlstra 	WARN_ON_ONCE(cpumask_empty(mask));
907f2cb1360SIngo Molnar }
908f2cb1360SIngo Molnar 
909f2cb1360SIngo Molnar /*
91035a566e6SPeter Zijlstra  * XXX: This creates per-node group entries; since the load-balancer will
91135a566e6SPeter Zijlstra  * immediately access remote memory to construct this group's load-balance
91235a566e6SPeter Zijlstra  * statistics having the groups node local is of dubious benefit.
913f2cb1360SIngo Molnar  */
9148c033469SLauro Ramos Venancio static struct sched_group *
9158c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
9168c033469SLauro Ramos Venancio {
9178c033469SLauro Ramos Venancio 	struct sched_group *sg;
9188c033469SLauro Ramos Venancio 	struct cpumask *sg_span;
9198c033469SLauro Ramos Venancio 
9208c033469SLauro Ramos Venancio 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
9218c033469SLauro Ramos Venancio 			GFP_KERNEL, cpu_to_node(cpu));
9228c033469SLauro Ramos Venancio 
9238c033469SLauro Ramos Venancio 	if (!sg)
9248c033469SLauro Ramos Venancio 		return NULL;
9258c033469SLauro Ramos Venancio 
926ae4df9d6SPeter Zijlstra 	sg_span = sched_group_span(sg);
92716d364baSRicardo Neri 	if (sd->child) {
9288c033469SLauro Ramos Venancio 		cpumask_copy(sg_span, sched_domain_span(sd->child));
92916d364baSRicardo Neri 		sg->flags = sd->child->flags;
93016d364baSRicardo Neri 	} else {
9318c033469SLauro Ramos Venancio 		cpumask_copy(sg_span, sched_domain_span(sd));
93216d364baSRicardo Neri 	}
9338c033469SLauro Ramos Venancio 
934213c5a45SShu Wang 	atomic_inc(&sg->ref);
9358c033469SLauro Ramos Venancio 	return sg;
9368c033469SLauro Ramos Venancio }
9378c033469SLauro Ramos Venancio 
9388c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd,
9391676330eSPeter Zijlstra 				     struct sched_group *sg)
9408c033469SLauro Ramos Venancio {
9411676330eSPeter Zijlstra 	struct cpumask *mask = sched_domains_tmpmask2;
9428c033469SLauro Ramos Venancio 	struct sd_data *sdd = sd->private;
9438c033469SLauro Ramos Venancio 	struct cpumask *sg_span;
9441676330eSPeter Zijlstra 	int cpu;
9451676330eSPeter Zijlstra 
946e5c14b1fSPeter Zijlstra 	build_balance_mask(sd, sg, mask);
9470a2b65c0SBarry Song 	cpu = cpumask_first(mask);
9488c033469SLauro Ramos Venancio 
9498c033469SLauro Ramos Venancio 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
9508c033469SLauro Ramos Venancio 	if (atomic_inc_return(&sg->sgc->ref) == 1)
951e5c14b1fSPeter Zijlstra 		cpumask_copy(group_balance_mask(sg), mask);
95235a566e6SPeter Zijlstra 	else
953e5c14b1fSPeter Zijlstra 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
9548c033469SLauro Ramos Venancio 
9558c033469SLauro Ramos Venancio 	/*
9568c033469SLauro Ramos Venancio 	 * Initialize sgc->capacity such that even if we mess up the
9578c033469SLauro Ramos Venancio 	 * domains and no possible iteration will get us here, we won't
9588c033469SLauro Ramos Venancio 	 * die on a /0 trap.
9598c033469SLauro Ramos Venancio 	 */
960ae4df9d6SPeter Zijlstra 	sg_span = sched_group_span(sg);
9618c033469SLauro Ramos Venancio 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
9628c033469SLauro Ramos Venancio 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
963e3d6d0cbSMorten Rasmussen 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
9648c033469SLauro Ramos Venancio }
9658c033469SLauro Ramos Venancio 
966585b6d27SBarry Song static struct sched_domain *
967585b6d27SBarry Song find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
968585b6d27SBarry Song {
969585b6d27SBarry Song 	/*
970585b6d27SBarry Song 	 * The proper descendant would be the one whose child won't span out
971585b6d27SBarry Song 	 * of sd
972585b6d27SBarry Song 	 */
973585b6d27SBarry Song 	while (sibling->child &&
974585b6d27SBarry Song 	       !cpumask_subset(sched_domain_span(sibling->child),
975585b6d27SBarry Song 			       sched_domain_span(sd)))
976585b6d27SBarry Song 		sibling = sibling->child;
977585b6d27SBarry Song 
978585b6d27SBarry Song 	/*
979585b6d27SBarry Song 	 * As we are referencing sgc across different topology level, we need
980585b6d27SBarry Song 	 * to go down to skip those sched_domains which don't contribute to
981585b6d27SBarry Song 	 * scheduling because they will be degenerated in cpu_attach_domain
982585b6d27SBarry Song 	 */
983585b6d27SBarry Song 	while (sibling->child &&
984585b6d27SBarry Song 	       cpumask_equal(sched_domain_span(sibling->child),
985585b6d27SBarry Song 			     sched_domain_span(sibling)))
986585b6d27SBarry Song 		sibling = sibling->child;
987585b6d27SBarry Song 
988585b6d27SBarry Song 	return sibling;
989585b6d27SBarry Song }
990585b6d27SBarry Song 
991f2cb1360SIngo Molnar static int
992f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu)
993f2cb1360SIngo Molnar {
99491eaed0dSPeter Zijlstra 	struct sched_group *first = NULL, *last = NULL, *sg;
995f2cb1360SIngo Molnar 	const struct cpumask *span = sched_domain_span(sd);
996f2cb1360SIngo Molnar 	struct cpumask *covered = sched_domains_tmpmask;
997f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
998f2cb1360SIngo Molnar 	struct sched_domain *sibling;
999f2cb1360SIngo Molnar 	int i;
1000f2cb1360SIngo Molnar 
1001f2cb1360SIngo Molnar 	cpumask_clear(covered);
1002f2cb1360SIngo Molnar 
10030372dd27SPeter Zijlstra 	for_each_cpu_wrap(i, span, cpu) {
1004f2cb1360SIngo Molnar 		struct cpumask *sg_span;
1005f2cb1360SIngo Molnar 
1006f2cb1360SIngo Molnar 		if (cpumask_test_cpu(i, covered))
1007f2cb1360SIngo Molnar 			continue;
1008f2cb1360SIngo Molnar 
1009f2cb1360SIngo Molnar 		sibling = *per_cpu_ptr(sdd->sd, i);
1010f2cb1360SIngo Molnar 
1011c20e1ea4SLauro Ramos Venancio 		/*
1012c20e1ea4SLauro Ramos Venancio 		 * Asymmetric node setups can result in situations where the
1013c20e1ea4SLauro Ramos Venancio 		 * domain tree is of unequal depth, make sure to skip domains
1014c20e1ea4SLauro Ramos Venancio 		 * that already cover the entire range.
1015c20e1ea4SLauro Ramos Venancio 		 *
1016c20e1ea4SLauro Ramos Venancio 		 * In that case build_sched_domains() will have terminated the
1017c20e1ea4SLauro Ramos Venancio 		 * iteration early and our sibling sd spans will be empty.
1018c20e1ea4SLauro Ramos Venancio 		 * Domains should always include the CPU they're built on, so
1019c20e1ea4SLauro Ramos Venancio 		 * check that.
1020c20e1ea4SLauro Ramos Venancio 		 */
1021f2cb1360SIngo Molnar 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
1022f2cb1360SIngo Molnar 			continue;
1023f2cb1360SIngo Molnar 
1024585b6d27SBarry Song 		/*
1025585b6d27SBarry Song 		 * Usually we build sched_group by sibling's child sched_domain
1026585b6d27SBarry Song 		 * But for machines whose NUMA diameter are 3 or above, we move
1027585b6d27SBarry Song 		 * to build sched_group by sibling's proper descendant's child
1028585b6d27SBarry Song 		 * domain because sibling's child sched_domain will span out of
1029585b6d27SBarry Song 		 * the sched_domain being built as below.
1030585b6d27SBarry Song 		 *
1031585b6d27SBarry Song 		 * Smallest diameter=3 topology is:
1032585b6d27SBarry Song 		 *
1033585b6d27SBarry Song 		 *   node   0   1   2   3
1034585b6d27SBarry Song 		 *     0:  10  20  30  40
1035585b6d27SBarry Song 		 *     1:  20  10  20  30
1036585b6d27SBarry Song 		 *     2:  30  20  10  20
1037585b6d27SBarry Song 		 *     3:  40  30  20  10
1038585b6d27SBarry Song 		 *
1039585b6d27SBarry Song 		 *   0 --- 1 --- 2 --- 3
1040585b6d27SBarry Song 		 *
1041585b6d27SBarry Song 		 * NUMA-3       0-3             N/A             N/A             0-3
1042585b6d27SBarry Song 		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
1043585b6d27SBarry Song 		 *
1044585b6d27SBarry Song 		 * NUMA-2       0-2             0-3             0-3             1-3
1045585b6d27SBarry Song 		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
1046585b6d27SBarry Song 		 *
1047585b6d27SBarry Song 		 * NUMA-1       0-1             0-2             1-3             2-3
1048585b6d27SBarry Song 		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
1049585b6d27SBarry Song 		 *
1050585b6d27SBarry Song 		 * NUMA-0       0               1               2               3
1051585b6d27SBarry Song 		 *
1052585b6d27SBarry Song 		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
1053585b6d27SBarry Song 		 * group span isn't a subset of the domain span.
1054585b6d27SBarry Song 		 */
1055585b6d27SBarry Song 		if (sibling->child &&
1056585b6d27SBarry Song 		    !cpumask_subset(sched_domain_span(sibling->child), span))
1057585b6d27SBarry Song 			sibling = find_descended_sibling(sd, sibling);
1058585b6d27SBarry Song 
10598c033469SLauro Ramos Venancio 		sg = build_group_from_child_sched_domain(sibling, cpu);
1060f2cb1360SIngo Molnar 		if (!sg)
1061f2cb1360SIngo Molnar 			goto fail;
1062f2cb1360SIngo Molnar 
1063ae4df9d6SPeter Zijlstra 		sg_span = sched_group_span(sg);
1064f2cb1360SIngo Molnar 		cpumask_or(covered, covered, sg_span);
1065f2cb1360SIngo Molnar 
1066585b6d27SBarry Song 		init_overlap_sched_group(sibling, sg);
1067f2cb1360SIngo Molnar 
1068f2cb1360SIngo Molnar 		if (!first)
1069f2cb1360SIngo Molnar 			first = sg;
1070f2cb1360SIngo Molnar 		if (last)
1071f2cb1360SIngo Molnar 			last->next = sg;
1072f2cb1360SIngo Molnar 		last = sg;
1073f2cb1360SIngo Molnar 		last->next = first;
1074f2cb1360SIngo Molnar 	}
107591eaed0dSPeter Zijlstra 	sd->groups = first;
1076f2cb1360SIngo Molnar 
1077f2cb1360SIngo Molnar 	return 0;
1078f2cb1360SIngo Molnar 
1079f2cb1360SIngo Molnar fail:
1080f2cb1360SIngo Molnar 	free_sched_groups(first, 0);
1081f2cb1360SIngo Molnar 
1082f2cb1360SIngo Molnar 	return -ENOMEM;
1083f2cb1360SIngo Molnar }
1084f2cb1360SIngo Molnar 
108535a566e6SPeter Zijlstra 
108635a566e6SPeter Zijlstra /*
108735a566e6SPeter Zijlstra  * Package topology (also see the load-balance blurb in fair.c)
108835a566e6SPeter Zijlstra  *
108935a566e6SPeter Zijlstra  * The scheduler builds a tree structure to represent a number of important
109035a566e6SPeter Zijlstra  * topology features. By default (default_topology[]) these include:
109135a566e6SPeter Zijlstra  *
109235a566e6SPeter Zijlstra  *  - Simultaneous multithreading (SMT)
109335a566e6SPeter Zijlstra  *  - Multi-Core Cache (MC)
109435a566e6SPeter Zijlstra  *  - Package (DIE)
109535a566e6SPeter Zijlstra  *
109635a566e6SPeter Zijlstra  * Where the last one more or less denotes everything up to a NUMA node.
109735a566e6SPeter Zijlstra  *
109835a566e6SPeter Zijlstra  * The tree consists of 3 primary data structures:
109935a566e6SPeter Zijlstra  *
110035a566e6SPeter Zijlstra  *	sched_domain -> sched_group -> sched_group_capacity
110135a566e6SPeter Zijlstra  *	    ^ ^             ^ ^
110235a566e6SPeter Zijlstra  *          `-'             `-'
110335a566e6SPeter Zijlstra  *
110497fb7a0aSIngo Molnar  * The sched_domains are per-CPU and have a two way link (parent & child) and
110535a566e6SPeter Zijlstra  * denote the ever growing mask of CPUs belonging to that level of topology.
110635a566e6SPeter Zijlstra  *
110735a566e6SPeter Zijlstra  * Each sched_domain has a circular (double) linked list of sched_group's, each
110835a566e6SPeter Zijlstra  * denoting the domains of the level below (or individual CPUs in case of the
110935a566e6SPeter Zijlstra  * first domain level). The sched_group linked by a sched_domain includes the
111035a566e6SPeter Zijlstra  * CPU of that sched_domain [*].
111135a566e6SPeter Zijlstra  *
111235a566e6SPeter Zijlstra  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
111335a566e6SPeter Zijlstra  *
111435a566e6SPeter Zijlstra  * CPU   0   1   2   3   4   5   6   7
111535a566e6SPeter Zijlstra  *
111635a566e6SPeter Zijlstra  * DIE  [                             ]
111735a566e6SPeter Zijlstra  * MC   [             ] [             ]
111835a566e6SPeter Zijlstra  * SMT  [     ] [     ] [     ] [     ]
111935a566e6SPeter Zijlstra  *
112035a566e6SPeter Zijlstra  *  - or -
112135a566e6SPeter Zijlstra  *
112235a566e6SPeter Zijlstra  * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
112335a566e6SPeter Zijlstra  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
112435a566e6SPeter Zijlstra  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
112535a566e6SPeter Zijlstra  *
112635a566e6SPeter Zijlstra  * CPU   0   1   2   3   4   5   6   7
112735a566e6SPeter Zijlstra  *
112835a566e6SPeter Zijlstra  * One way to think about it is: sched_domain moves you up and down among these
112935a566e6SPeter Zijlstra  * topology levels, while sched_group moves you sideways through it, at child
113035a566e6SPeter Zijlstra  * domain granularity.
113135a566e6SPeter Zijlstra  *
113235a566e6SPeter Zijlstra  * sched_group_capacity ensures each unique sched_group has shared storage.
113335a566e6SPeter Zijlstra  *
113435a566e6SPeter Zijlstra  * There are two related construction problems, both require a CPU that
113535a566e6SPeter Zijlstra  * uniquely identify each group (for a given domain):
113635a566e6SPeter Zijlstra  *
113735a566e6SPeter Zijlstra  *  - The first is the balance_cpu (see should_we_balance() and the
113835a566e6SPeter Zijlstra  *    load-balance blub in fair.c); for each group we only want 1 CPU to
113935a566e6SPeter Zijlstra  *    continue balancing at a higher domain.
114035a566e6SPeter Zijlstra  *
114135a566e6SPeter Zijlstra  *  - The second is the sched_group_capacity; we want all identical groups
114235a566e6SPeter Zijlstra  *    to share a single sched_group_capacity.
114335a566e6SPeter Zijlstra  *
114435a566e6SPeter Zijlstra  * Since these topologies are exclusive by construction. That is, its
114535a566e6SPeter Zijlstra  * impossible for an SMT thread to belong to multiple cores, and cores to
114635a566e6SPeter Zijlstra  * be part of multiple caches. There is a very clear and unique location
114735a566e6SPeter Zijlstra  * for each CPU in the hierarchy.
114835a566e6SPeter Zijlstra  *
114935a566e6SPeter Zijlstra  * Therefore computing a unique CPU for each group is trivial (the iteration
115035a566e6SPeter Zijlstra  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
115135a566e6SPeter Zijlstra  * group), we can simply pick the first CPU in each group.
115235a566e6SPeter Zijlstra  *
115335a566e6SPeter Zijlstra  *
115435a566e6SPeter Zijlstra  * [*] in other words, the first group of each domain is its child domain.
115535a566e6SPeter Zijlstra  */
115635a566e6SPeter Zijlstra 
11570c0e776aSPeter Zijlstra static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1158f2cb1360SIngo Molnar {
1159f2cb1360SIngo Molnar 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1160f2cb1360SIngo Molnar 	struct sched_domain *child = sd->child;
11610c0e776aSPeter Zijlstra 	struct sched_group *sg;
116267d4f6ffSValentin Schneider 	bool already_visited;
1163f2cb1360SIngo Molnar 
1164f2cb1360SIngo Molnar 	if (child)
1165f2cb1360SIngo Molnar 		cpu = cpumask_first(sched_domain_span(child));
1166f2cb1360SIngo Molnar 
11670c0e776aSPeter Zijlstra 	sg = *per_cpu_ptr(sdd->sg, cpu);
11680c0e776aSPeter Zijlstra 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1169f2cb1360SIngo Molnar 
117067d4f6ffSValentin Schneider 	/* Increase refcounts for claim_allocations: */
117167d4f6ffSValentin Schneider 	already_visited = atomic_inc_return(&sg->ref) > 1;
117267d4f6ffSValentin Schneider 	/* sgc visits should follow a similar trend as sg */
117367d4f6ffSValentin Schneider 	WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
117467d4f6ffSValentin Schneider 
117567d4f6ffSValentin Schneider 	/* If we have already visited that group, it's already initialized. */
117667d4f6ffSValentin Schneider 	if (already_visited)
117767d4f6ffSValentin Schneider 		return sg;
11780c0e776aSPeter Zijlstra 
11790c0e776aSPeter Zijlstra 	if (child) {
1180ae4df9d6SPeter Zijlstra 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1181ae4df9d6SPeter Zijlstra 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
118216d364baSRicardo Neri 		sg->flags = child->flags;
11830c0e776aSPeter Zijlstra 	} else {
1184ae4df9d6SPeter Zijlstra 		cpumask_set_cpu(cpu, sched_group_span(sg));
1185e5c14b1fSPeter Zijlstra 		cpumask_set_cpu(cpu, group_balance_mask(sg));
1186f2cb1360SIngo Molnar 	}
1187f2cb1360SIngo Molnar 
1188ae4df9d6SPeter Zijlstra 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
11890c0e776aSPeter Zijlstra 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1190e3d6d0cbSMorten Rasmussen 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
11910c0e776aSPeter Zijlstra 
11920c0e776aSPeter Zijlstra 	return sg;
1193f2cb1360SIngo Molnar }
1194f2cb1360SIngo Molnar 
1195f2cb1360SIngo Molnar /*
1196f2cb1360SIngo Molnar  * build_sched_groups will build a circular linked list of the groups
1197d8743230SValentin Schneider  * covered by the given span, will set each group's ->cpumask correctly,
1198d8743230SValentin Schneider  * and will initialize their ->sgc.
1199f2cb1360SIngo Molnar  *
1200f2cb1360SIngo Molnar  * Assumes the sched_domain tree is fully constructed
1201f2cb1360SIngo Molnar  */
1202f2cb1360SIngo Molnar static int
1203f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu)
1204f2cb1360SIngo Molnar {
1205f2cb1360SIngo Molnar 	struct sched_group *first = NULL, *last = NULL;
1206f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
1207f2cb1360SIngo Molnar 	const struct cpumask *span = sched_domain_span(sd);
1208f2cb1360SIngo Molnar 	struct cpumask *covered;
1209f2cb1360SIngo Molnar 	int i;
1210f2cb1360SIngo Molnar 
1211f2cb1360SIngo Molnar 	lockdep_assert_held(&sched_domains_mutex);
1212f2cb1360SIngo Molnar 	covered = sched_domains_tmpmask;
1213f2cb1360SIngo Molnar 
1214f2cb1360SIngo Molnar 	cpumask_clear(covered);
1215f2cb1360SIngo Molnar 
12160c0e776aSPeter Zijlstra 	for_each_cpu_wrap(i, span, cpu) {
1217f2cb1360SIngo Molnar 		struct sched_group *sg;
1218f2cb1360SIngo Molnar 
1219f2cb1360SIngo Molnar 		if (cpumask_test_cpu(i, covered))
1220f2cb1360SIngo Molnar 			continue;
1221f2cb1360SIngo Molnar 
12220c0e776aSPeter Zijlstra 		sg = get_group(i, sdd);
1223f2cb1360SIngo Molnar 
1224ae4df9d6SPeter Zijlstra 		cpumask_or(covered, covered, sched_group_span(sg));
1225f2cb1360SIngo Molnar 
1226f2cb1360SIngo Molnar 		if (!first)
1227f2cb1360SIngo Molnar 			first = sg;
1228f2cb1360SIngo Molnar 		if (last)
1229f2cb1360SIngo Molnar 			last->next = sg;
1230f2cb1360SIngo Molnar 		last = sg;
1231f2cb1360SIngo Molnar 	}
1232f2cb1360SIngo Molnar 	last->next = first;
12330c0e776aSPeter Zijlstra 	sd->groups = first;
1234f2cb1360SIngo Molnar 
1235f2cb1360SIngo Molnar 	return 0;
1236f2cb1360SIngo Molnar }
1237f2cb1360SIngo Molnar 
1238f2cb1360SIngo Molnar /*
1239f2cb1360SIngo Molnar  * Initialize sched groups cpu_capacity.
1240f2cb1360SIngo Molnar  *
1241f2cb1360SIngo Molnar  * cpu_capacity indicates the capacity of sched group, which is used while
1242f2cb1360SIngo Molnar  * distributing the load between different sched groups in a sched domain.
1243f2cb1360SIngo Molnar  * Typically cpu_capacity for all the groups in a sched domain will be same
1244f2cb1360SIngo Molnar  * unless there are asymmetries in the topology. If there are asymmetries,
1245f2cb1360SIngo Molnar  * group having more cpu_capacity will pickup more load compared to the
1246f2cb1360SIngo Molnar  * group having less cpu_capacity.
1247f2cb1360SIngo Molnar  */
1248f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1249f2cb1360SIngo Molnar {
1250f2cb1360SIngo Molnar 	struct sched_group *sg = sd->groups;
1251f2cb1360SIngo Molnar 
1252f2cb1360SIngo Molnar 	WARN_ON(!sg);
1253f2cb1360SIngo Molnar 
1254f2cb1360SIngo Molnar 	do {
1255f2cb1360SIngo Molnar 		int cpu, max_cpu = -1;
1256f2cb1360SIngo Molnar 
1257ae4df9d6SPeter Zijlstra 		sg->group_weight = cpumask_weight(sched_group_span(sg));
1258f2cb1360SIngo Molnar 
1259f2cb1360SIngo Molnar 		if (!(sd->flags & SD_ASYM_PACKING))
1260f2cb1360SIngo Molnar 			goto next;
1261f2cb1360SIngo Molnar 
1262ae4df9d6SPeter Zijlstra 		for_each_cpu(cpu, sched_group_span(sg)) {
1263f2cb1360SIngo Molnar 			if (max_cpu < 0)
1264f2cb1360SIngo Molnar 				max_cpu = cpu;
1265f2cb1360SIngo Molnar 			else if (sched_asym_prefer(cpu, max_cpu))
1266f2cb1360SIngo Molnar 				max_cpu = cpu;
1267f2cb1360SIngo Molnar 		}
1268f2cb1360SIngo Molnar 		sg->asym_prefer_cpu = max_cpu;
1269f2cb1360SIngo Molnar 
1270f2cb1360SIngo Molnar next:
1271f2cb1360SIngo Molnar 		sg = sg->next;
1272f2cb1360SIngo Molnar 	} while (sg != sd->groups);
1273f2cb1360SIngo Molnar 
1274f2cb1360SIngo Molnar 	if (cpu != group_balance_cpu(sg))
1275f2cb1360SIngo Molnar 		return;
1276f2cb1360SIngo Molnar 
1277f2cb1360SIngo Molnar 	update_group_capacity(sd, cpu);
1278f2cb1360SIngo Molnar }
1279f2cb1360SIngo Molnar 
1280f2cb1360SIngo Molnar /*
1281c744dc4aSBeata Michalska  * Asymmetric CPU capacity bits
1282c744dc4aSBeata Michalska  */
1283c744dc4aSBeata Michalska struct asym_cap_data {
1284c744dc4aSBeata Michalska 	struct list_head link;
1285c744dc4aSBeata Michalska 	unsigned long capacity;
1286c744dc4aSBeata Michalska 	unsigned long cpus[];
1287c744dc4aSBeata Michalska };
1288c744dc4aSBeata Michalska 
1289c744dc4aSBeata Michalska /*
1290c744dc4aSBeata Michalska  * Set of available CPUs grouped by their corresponding capacities
1291c744dc4aSBeata Michalska  * Each list entry contains a CPU mask reflecting CPUs that share the same
1292c744dc4aSBeata Michalska  * capacity.
1293c744dc4aSBeata Michalska  * The lifespan of data is unlimited.
1294c744dc4aSBeata Michalska  */
1295c744dc4aSBeata Michalska static LIST_HEAD(asym_cap_list);
1296c744dc4aSBeata Michalska 
1297c744dc4aSBeata Michalska #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
1298c744dc4aSBeata Michalska 
1299c744dc4aSBeata Michalska /*
1300c744dc4aSBeata Michalska  * Verify whether there is any CPU capacity asymmetry in a given sched domain.
1301c744dc4aSBeata Michalska  * Provides sd_flags reflecting the asymmetry scope.
1302c744dc4aSBeata Michalska  */
1303c744dc4aSBeata Michalska static inline int
1304c744dc4aSBeata Michalska asym_cpu_capacity_classify(const struct cpumask *sd_span,
1305c744dc4aSBeata Michalska 			   const struct cpumask *cpu_map)
1306c744dc4aSBeata Michalska {
1307c744dc4aSBeata Michalska 	struct asym_cap_data *entry;
1308c744dc4aSBeata Michalska 	int count = 0, miss = 0;
1309c744dc4aSBeata Michalska 
1310c744dc4aSBeata Michalska 	/*
1311c744dc4aSBeata Michalska 	 * Count how many unique CPU capacities this domain spans across
1312c744dc4aSBeata Michalska 	 * (compare sched_domain CPUs mask with ones representing  available
1313c744dc4aSBeata Michalska 	 * CPUs capacities). Take into account CPUs that might be offline:
1314c744dc4aSBeata Michalska 	 * skip those.
1315c744dc4aSBeata Michalska 	 */
1316c744dc4aSBeata Michalska 	list_for_each_entry(entry, &asym_cap_list, link) {
1317c744dc4aSBeata Michalska 		if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
1318c744dc4aSBeata Michalska 			++count;
1319c744dc4aSBeata Michalska 		else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
1320c744dc4aSBeata Michalska 			++miss;
1321c744dc4aSBeata Michalska 	}
1322c744dc4aSBeata Michalska 
1323c744dc4aSBeata Michalska 	WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
1324c744dc4aSBeata Michalska 
1325c744dc4aSBeata Michalska 	/* No asymmetry detected */
1326c744dc4aSBeata Michalska 	if (count < 2)
1327c744dc4aSBeata Michalska 		return 0;
1328c744dc4aSBeata Michalska 	/* Some of the available CPU capacity values have not been detected */
1329c744dc4aSBeata Michalska 	if (miss)
1330c744dc4aSBeata Michalska 		return SD_ASYM_CPUCAPACITY;
1331c744dc4aSBeata Michalska 
1332c744dc4aSBeata Michalska 	/* Full asymmetry */
1333c744dc4aSBeata Michalska 	return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
1334c744dc4aSBeata Michalska 
1335c744dc4aSBeata Michalska }
1336c744dc4aSBeata Michalska 
1337c744dc4aSBeata Michalska static inline void asym_cpu_capacity_update_data(int cpu)
1338c744dc4aSBeata Michalska {
1339c744dc4aSBeata Michalska 	unsigned long capacity = arch_scale_cpu_capacity(cpu);
1340c744dc4aSBeata Michalska 	struct asym_cap_data *entry = NULL;
1341c744dc4aSBeata Michalska 
1342c744dc4aSBeata Michalska 	list_for_each_entry(entry, &asym_cap_list, link) {
1343c744dc4aSBeata Michalska 		if (capacity == entry->capacity)
1344c744dc4aSBeata Michalska 			goto done;
1345c744dc4aSBeata Michalska 	}
1346c744dc4aSBeata Michalska 
1347c744dc4aSBeata Michalska 	entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
1348c744dc4aSBeata Michalska 	if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
1349c744dc4aSBeata Michalska 		return;
1350c744dc4aSBeata Michalska 	entry->capacity = capacity;
1351c744dc4aSBeata Michalska 	list_add(&entry->link, &asym_cap_list);
1352c744dc4aSBeata Michalska done:
1353c744dc4aSBeata Michalska 	__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
1354c744dc4aSBeata Michalska }
1355c744dc4aSBeata Michalska 
1356c744dc4aSBeata Michalska /*
1357c744dc4aSBeata Michalska  * Build-up/update list of CPUs grouped by their capacities
1358c744dc4aSBeata Michalska  * An update requires explicit request to rebuild sched domains
1359c744dc4aSBeata Michalska  * with state indicating CPU topology changes.
1360c744dc4aSBeata Michalska  */
1361c744dc4aSBeata Michalska static void asym_cpu_capacity_scan(void)
1362c744dc4aSBeata Michalska {
1363c744dc4aSBeata Michalska 	struct asym_cap_data *entry, *next;
1364c744dc4aSBeata Michalska 	int cpu;
1365c744dc4aSBeata Michalska 
1366c744dc4aSBeata Michalska 	list_for_each_entry(entry, &asym_cap_list, link)
1367c744dc4aSBeata Michalska 		cpumask_clear(cpu_capacity_span(entry));
1368c744dc4aSBeata Michalska 
1369*04d4e665SFrederic Weisbecker 	for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
1370c744dc4aSBeata Michalska 		asym_cpu_capacity_update_data(cpu);
1371c744dc4aSBeata Michalska 
1372c744dc4aSBeata Michalska 	list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
1373c744dc4aSBeata Michalska 		if (cpumask_empty(cpu_capacity_span(entry))) {
1374c744dc4aSBeata Michalska 			list_del(&entry->link);
1375c744dc4aSBeata Michalska 			kfree(entry);
1376c744dc4aSBeata Michalska 		}
1377c744dc4aSBeata Michalska 	}
1378c744dc4aSBeata Michalska 
1379c744dc4aSBeata Michalska 	/*
1380c744dc4aSBeata Michalska 	 * Only one capacity value has been detected i.e. this system is symmetric.
1381c744dc4aSBeata Michalska 	 * No need to keep this data around.
1382c744dc4aSBeata Michalska 	 */
1383c744dc4aSBeata Michalska 	if (list_is_singular(&asym_cap_list)) {
1384c744dc4aSBeata Michalska 		entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
1385c744dc4aSBeata Michalska 		list_del(&entry->link);
1386c744dc4aSBeata Michalska 		kfree(entry);
1387c744dc4aSBeata Michalska 	}
1388c744dc4aSBeata Michalska }
1389c744dc4aSBeata Michalska 
1390c744dc4aSBeata Michalska /*
1391f2cb1360SIngo Molnar  * Initializers for schedule domains
1392f2cb1360SIngo Molnar  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1393f2cb1360SIngo Molnar  */
1394f2cb1360SIngo Molnar 
1395f2cb1360SIngo Molnar static int default_relax_domain_level = -1;
1396f2cb1360SIngo Molnar int sched_domain_level_max;
1397f2cb1360SIngo Molnar 
1398f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str)
1399f2cb1360SIngo Molnar {
1400f2cb1360SIngo Molnar 	if (kstrtoint(str, 0, &default_relax_domain_level))
1401f2cb1360SIngo Molnar 		pr_warn("Unable to set relax_domain_level\n");
1402f2cb1360SIngo Molnar 
1403f2cb1360SIngo Molnar 	return 1;
1404f2cb1360SIngo Molnar }
1405f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level);
1406f2cb1360SIngo Molnar 
1407f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd,
1408f2cb1360SIngo Molnar 				 struct sched_domain_attr *attr)
1409f2cb1360SIngo Molnar {
1410f2cb1360SIngo Molnar 	int request;
1411f2cb1360SIngo Molnar 
1412f2cb1360SIngo Molnar 	if (!attr || attr->relax_domain_level < 0) {
1413f2cb1360SIngo Molnar 		if (default_relax_domain_level < 0)
1414f2cb1360SIngo Molnar 			return;
1415f2cb1360SIngo Molnar 		request = default_relax_domain_level;
1416f2cb1360SIngo Molnar 	} else
1417f2cb1360SIngo Molnar 		request = attr->relax_domain_level;
14189ae7ab20SValentin Schneider 
14199ae7ab20SValentin Schneider 	if (sd->level > request) {
1420f2cb1360SIngo Molnar 		/* Turn off idle balance on this domain: */
1421f2cb1360SIngo Molnar 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1422f2cb1360SIngo Molnar 	}
1423f2cb1360SIngo Molnar }
1424f2cb1360SIngo Molnar 
1425f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map);
1426f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map);
1427f2cb1360SIngo Molnar 
1428f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1429f2cb1360SIngo Molnar 				 const struct cpumask *cpu_map)
1430f2cb1360SIngo Molnar {
1431f2cb1360SIngo Molnar 	switch (what) {
1432f2cb1360SIngo Molnar 	case sa_rootdomain:
1433f2cb1360SIngo Molnar 		if (!atomic_read(&d->rd->refcount))
1434f2cb1360SIngo Molnar 			free_rootdomain(&d->rd->rcu);
1435df561f66SGustavo A. R. Silva 		fallthrough;
1436f2cb1360SIngo Molnar 	case sa_sd:
1437f2cb1360SIngo Molnar 		free_percpu(d->sd);
1438df561f66SGustavo A. R. Silva 		fallthrough;
1439f2cb1360SIngo Molnar 	case sa_sd_storage:
1440f2cb1360SIngo Molnar 		__sdt_free(cpu_map);
1441df561f66SGustavo A. R. Silva 		fallthrough;
1442f2cb1360SIngo Molnar 	case sa_none:
1443f2cb1360SIngo Molnar 		break;
1444f2cb1360SIngo Molnar 	}
1445f2cb1360SIngo Molnar }
1446f2cb1360SIngo Molnar 
1447f2cb1360SIngo Molnar static enum s_alloc
1448f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1449f2cb1360SIngo Molnar {
1450f2cb1360SIngo Molnar 	memset(d, 0, sizeof(*d));
1451f2cb1360SIngo Molnar 
1452f2cb1360SIngo Molnar 	if (__sdt_alloc(cpu_map))
1453f2cb1360SIngo Molnar 		return sa_sd_storage;
1454f2cb1360SIngo Molnar 	d->sd = alloc_percpu(struct sched_domain *);
1455f2cb1360SIngo Molnar 	if (!d->sd)
1456f2cb1360SIngo Molnar 		return sa_sd_storage;
1457f2cb1360SIngo Molnar 	d->rd = alloc_rootdomain();
1458f2cb1360SIngo Molnar 	if (!d->rd)
1459f2cb1360SIngo Molnar 		return sa_sd;
146097fb7a0aSIngo Molnar 
1461f2cb1360SIngo Molnar 	return sa_rootdomain;
1462f2cb1360SIngo Molnar }
1463f2cb1360SIngo Molnar 
1464f2cb1360SIngo Molnar /*
1465f2cb1360SIngo Molnar  * NULL the sd_data elements we've used to build the sched_domain and
1466f2cb1360SIngo Molnar  * sched_group structure so that the subsequent __free_domain_allocs()
1467f2cb1360SIngo Molnar  * will not free the data we're using.
1468f2cb1360SIngo Molnar  */
1469f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd)
1470f2cb1360SIngo Molnar {
1471f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
1472f2cb1360SIngo Molnar 
1473f2cb1360SIngo Molnar 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1474f2cb1360SIngo Molnar 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1475f2cb1360SIngo Molnar 
1476f2cb1360SIngo Molnar 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1477f2cb1360SIngo Molnar 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1478f2cb1360SIngo Molnar 
1479f2cb1360SIngo Molnar 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1480f2cb1360SIngo Molnar 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1481f2cb1360SIngo Molnar 
1482f2cb1360SIngo Molnar 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1483f2cb1360SIngo Molnar 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1484f2cb1360SIngo Molnar }
1485f2cb1360SIngo Molnar 
1486f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1487f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type;
148897fb7a0aSIngo Molnar 
148997fb7a0aSIngo Molnar static int			sched_domains_numa_levels;
1490f2cb1360SIngo Molnar static int			sched_domains_curr_level;
149197fb7a0aSIngo Molnar 
149297fb7a0aSIngo Molnar int				sched_max_numa_distance;
149397fb7a0aSIngo Molnar static int			*sched_domains_numa_distance;
149497fb7a0aSIngo Molnar static struct cpumask		***sched_domains_numa_masks;
1495f2cb1360SIngo Molnar #endif
1496f2cb1360SIngo Molnar 
1497f2cb1360SIngo Molnar /*
1498f2cb1360SIngo Molnar  * SD_flags allowed in topology descriptions.
1499f2cb1360SIngo Molnar  *
1500f2cb1360SIngo Molnar  * These flags are purely descriptive of the topology and do not prescribe
1501f2cb1360SIngo Molnar  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1502f2cb1360SIngo Molnar  * function:
1503f2cb1360SIngo Molnar  *
1504f2cb1360SIngo Molnar  *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1505f2cb1360SIngo Molnar  *   SD_SHARE_PKG_RESOURCES - describes shared caches
1506f2cb1360SIngo Molnar  *   SD_NUMA                - describes NUMA topologies
1507f2cb1360SIngo Molnar  *
1508f2cb1360SIngo Molnar  * Odd one out, which beside describing the topology has a quirk also
1509f2cb1360SIngo Molnar  * prescribes the desired behaviour that goes along with it:
1510f2cb1360SIngo Molnar  *
1511f2cb1360SIngo Molnar  *   SD_ASYM_PACKING        - describes SMT quirks
1512f2cb1360SIngo Molnar  */
1513f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS		\
1514f2cb1360SIngo Molnar 	(SD_SHARE_CPUCAPACITY	|	\
1515f2cb1360SIngo Molnar 	 SD_SHARE_PKG_RESOURCES |	\
1516f2cb1360SIngo Molnar 	 SD_NUMA		|	\
1517cfe7ddcbSValentin Schneider 	 SD_ASYM_PACKING)
1518f2cb1360SIngo Molnar 
1519f2cb1360SIngo Molnar static struct sched_domain *
1520f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl,
1521f2cb1360SIngo Molnar 	const struct cpumask *cpu_map,
1522c744dc4aSBeata Michalska 	struct sched_domain *child, int cpu)
1523f2cb1360SIngo Molnar {
1524f2cb1360SIngo Molnar 	struct sd_data *sdd = &tl->data;
1525f2cb1360SIngo Molnar 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1526f2cb1360SIngo Molnar 	int sd_id, sd_weight, sd_flags = 0;
1527c744dc4aSBeata Michalska 	struct cpumask *sd_span;
1528f2cb1360SIngo Molnar 
1529f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1530f2cb1360SIngo Molnar 	/*
1531f2cb1360SIngo Molnar 	 * Ugly hack to pass state to sd_numa_mask()...
1532f2cb1360SIngo Molnar 	 */
1533f2cb1360SIngo Molnar 	sched_domains_curr_level = tl->numa_level;
1534f2cb1360SIngo Molnar #endif
1535f2cb1360SIngo Molnar 
1536f2cb1360SIngo Molnar 	sd_weight = cpumask_weight(tl->mask(cpu));
1537f2cb1360SIngo Molnar 
1538f2cb1360SIngo Molnar 	if (tl->sd_flags)
1539f2cb1360SIngo Molnar 		sd_flags = (*tl->sd_flags)();
1540f2cb1360SIngo Molnar 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1541f2cb1360SIngo Molnar 			"wrong sd_flags in topology description\n"))
15429b1b234bSPeng Liu 		sd_flags &= TOPOLOGY_SD_FLAGS;
1543f2cb1360SIngo Molnar 
1544f2cb1360SIngo Molnar 	*sd = (struct sched_domain){
1545f2cb1360SIngo Molnar 		.min_interval		= sd_weight,
1546f2cb1360SIngo Molnar 		.max_interval		= 2*sd_weight,
15476e749913SVincent Guittot 		.busy_factor		= 16,
15482208cdaaSVincent Guittot 		.imbalance_pct		= 117,
1549f2cb1360SIngo Molnar 
1550f2cb1360SIngo Molnar 		.cache_nice_tries	= 0,
1551f2cb1360SIngo Molnar 
155236c5bdc4SValentin Schneider 		.flags			= 1*SD_BALANCE_NEWIDLE
1553f2cb1360SIngo Molnar 					| 1*SD_BALANCE_EXEC
1554f2cb1360SIngo Molnar 					| 1*SD_BALANCE_FORK
1555f2cb1360SIngo Molnar 					| 0*SD_BALANCE_WAKE
1556f2cb1360SIngo Molnar 					| 1*SD_WAKE_AFFINE
1557f2cb1360SIngo Molnar 					| 0*SD_SHARE_CPUCAPACITY
1558f2cb1360SIngo Molnar 					| 0*SD_SHARE_PKG_RESOURCES
1559f2cb1360SIngo Molnar 					| 0*SD_SERIALIZE
15609c63e84dSMorten Rasmussen 					| 1*SD_PREFER_SIBLING
1561f2cb1360SIngo Molnar 					| 0*SD_NUMA
1562f2cb1360SIngo Molnar 					| sd_flags
1563f2cb1360SIngo Molnar 					,
1564f2cb1360SIngo Molnar 
1565f2cb1360SIngo Molnar 		.last_balance		= jiffies,
1566f2cb1360SIngo Molnar 		.balance_interval	= sd_weight,
1567f2cb1360SIngo Molnar 		.max_newidle_lb_cost	= 0,
1568e60b56e4SVincent Guittot 		.last_decay_max_lb_cost	= jiffies,
1569f2cb1360SIngo Molnar 		.child			= child,
1570f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
1571f2cb1360SIngo Molnar 		.name			= tl->name,
1572f2cb1360SIngo Molnar #endif
1573f2cb1360SIngo Molnar 	};
1574f2cb1360SIngo Molnar 
1575c744dc4aSBeata Michalska 	sd_span = sched_domain_span(sd);
1576c744dc4aSBeata Michalska 	cpumask_and(sd_span, cpu_map, tl->mask(cpu));
1577c744dc4aSBeata Michalska 	sd_id = cpumask_first(sd_span);
1578c744dc4aSBeata Michalska 
1579c744dc4aSBeata Michalska 	sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
1580c744dc4aSBeata Michalska 
1581c744dc4aSBeata Michalska 	WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
1582c744dc4aSBeata Michalska 		  (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
1583c744dc4aSBeata Michalska 		  "CPU capacity asymmetry not supported on SMT\n");
1584f2cb1360SIngo Molnar 
1585f2cb1360SIngo Molnar 	/*
1586f2cb1360SIngo Molnar 	 * Convert topological properties into behaviour.
1587f2cb1360SIngo Molnar 	 */
1588a526d466SMorten Rasmussen 	/* Don't attempt to spread across CPUs of different capacities. */
1589a526d466SMorten Rasmussen 	if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
15909c63e84dSMorten Rasmussen 		sd->child->flags &= ~SD_PREFER_SIBLING;
15919c63e84dSMorten Rasmussen 
1592f2cb1360SIngo Molnar 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1593f2cb1360SIngo Molnar 		sd->imbalance_pct = 110;
1594f2cb1360SIngo Molnar 
1595f2cb1360SIngo Molnar 	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1596f2cb1360SIngo Molnar 		sd->imbalance_pct = 117;
1597f2cb1360SIngo Molnar 		sd->cache_nice_tries = 1;
1598f2cb1360SIngo Molnar 
1599f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1600f2cb1360SIngo Molnar 	} else if (sd->flags & SD_NUMA) {
1601f2cb1360SIngo Molnar 		sd->cache_nice_tries = 2;
1602f2cb1360SIngo Molnar 
16039c63e84dSMorten Rasmussen 		sd->flags &= ~SD_PREFER_SIBLING;
1604f2cb1360SIngo Molnar 		sd->flags |= SD_SERIALIZE;
1605a55c7454SMatt Fleming 		if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1606f2cb1360SIngo Molnar 			sd->flags &= ~(SD_BALANCE_EXEC |
1607f2cb1360SIngo Molnar 				       SD_BALANCE_FORK |
1608f2cb1360SIngo Molnar 				       SD_WAKE_AFFINE);
1609f2cb1360SIngo Molnar 		}
1610f2cb1360SIngo Molnar 
1611f2cb1360SIngo Molnar #endif
1612f2cb1360SIngo Molnar 	} else {
1613f2cb1360SIngo Molnar 		sd->cache_nice_tries = 1;
1614f2cb1360SIngo Molnar 	}
1615f2cb1360SIngo Molnar 
1616f2cb1360SIngo Molnar 	/*
1617f2cb1360SIngo Molnar 	 * For all levels sharing cache; connect a sched_domain_shared
1618f2cb1360SIngo Molnar 	 * instance.
1619f2cb1360SIngo Molnar 	 */
1620f2cb1360SIngo Molnar 	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1621f2cb1360SIngo Molnar 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1622f2cb1360SIngo Molnar 		atomic_inc(&sd->shared->ref);
1623f2cb1360SIngo Molnar 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1624f2cb1360SIngo Molnar 	}
1625f2cb1360SIngo Molnar 
1626f2cb1360SIngo Molnar 	sd->private = sdd;
1627f2cb1360SIngo Molnar 
1628f2cb1360SIngo Molnar 	return sd;
1629f2cb1360SIngo Molnar }
1630f2cb1360SIngo Molnar 
1631f2cb1360SIngo Molnar /*
1632f2cb1360SIngo Molnar  * Topology list, bottom-up.
1633f2cb1360SIngo Molnar  */
1634f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = {
1635f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT
1636f2cb1360SIngo Molnar 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1637f2cb1360SIngo Molnar #endif
1638778c558fSBarry Song 
1639778c558fSBarry Song #ifdef CONFIG_SCHED_CLUSTER
1640778c558fSBarry Song 	{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
1641778c558fSBarry Song #endif
1642778c558fSBarry Song 
1643f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC
1644f2cb1360SIngo Molnar 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1645f2cb1360SIngo Molnar #endif
1646f2cb1360SIngo Molnar 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1647f2cb1360SIngo Molnar 	{ NULL, },
1648f2cb1360SIngo Molnar };
1649f2cb1360SIngo Molnar 
1650f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology =
1651f2cb1360SIngo Molnar 	default_topology;
16520fb3978bSHuang Ying static struct sched_domain_topology_level *sched_domain_topology_saved;
1653f2cb1360SIngo Molnar 
1654f2cb1360SIngo Molnar #define for_each_sd_topology(tl)			\
1655f2cb1360SIngo Molnar 	for (tl = sched_domain_topology; tl->mask; tl++)
1656f2cb1360SIngo Molnar 
1657f2cb1360SIngo Molnar void set_sched_topology(struct sched_domain_topology_level *tl)
1658f2cb1360SIngo Molnar {
1659f2cb1360SIngo Molnar 	if (WARN_ON_ONCE(sched_smp_initialized))
1660f2cb1360SIngo Molnar 		return;
1661f2cb1360SIngo Molnar 
1662f2cb1360SIngo Molnar 	sched_domain_topology = tl;
16630fb3978bSHuang Ying 	sched_domain_topology_saved = NULL;
1664f2cb1360SIngo Molnar }
1665f2cb1360SIngo Molnar 
1666f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1667f2cb1360SIngo Molnar 
1668f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu)
1669f2cb1360SIngo Molnar {
1670f2cb1360SIngo Molnar 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1671f2cb1360SIngo Molnar }
1672f2cb1360SIngo Molnar 
1673f2cb1360SIngo Molnar static void sched_numa_warn(const char *str)
1674f2cb1360SIngo Molnar {
1675f2cb1360SIngo Molnar 	static int done = false;
1676f2cb1360SIngo Molnar 	int i,j;
1677f2cb1360SIngo Molnar 
1678f2cb1360SIngo Molnar 	if (done)
1679f2cb1360SIngo Molnar 		return;
1680f2cb1360SIngo Molnar 
1681f2cb1360SIngo Molnar 	done = true;
1682f2cb1360SIngo Molnar 
1683f2cb1360SIngo Molnar 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1684f2cb1360SIngo Molnar 
1685f2cb1360SIngo Molnar 	for (i = 0; i < nr_node_ids; i++) {
1686f2cb1360SIngo Molnar 		printk(KERN_WARNING "  ");
16870fb3978bSHuang Ying 		for (j = 0; j < nr_node_ids; j++) {
16880fb3978bSHuang Ying 			if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
16890fb3978bSHuang Ying 				printk(KERN_CONT "(%02d) ", node_distance(i,j));
16900fb3978bSHuang Ying 			else
1691f2cb1360SIngo Molnar 				printk(KERN_CONT " %02d  ", node_distance(i,j));
16920fb3978bSHuang Ying 		}
1693f2cb1360SIngo Molnar 		printk(KERN_CONT "\n");
1694f2cb1360SIngo Molnar 	}
1695f2cb1360SIngo Molnar 	printk(KERN_WARNING "\n");
1696f2cb1360SIngo Molnar }
1697f2cb1360SIngo Molnar 
1698f2cb1360SIngo Molnar bool find_numa_distance(int distance)
1699f2cb1360SIngo Molnar {
17000fb3978bSHuang Ying 	bool found = false;
17010fb3978bSHuang Ying 	int i, *distances;
1702f2cb1360SIngo Molnar 
1703f2cb1360SIngo Molnar 	if (distance == node_distance(0, 0))
1704f2cb1360SIngo Molnar 		return true;
1705f2cb1360SIngo Molnar 
17060fb3978bSHuang Ying 	rcu_read_lock();
17070fb3978bSHuang Ying 	distances = rcu_dereference(sched_domains_numa_distance);
17080fb3978bSHuang Ying 	if (!distances)
17090fb3978bSHuang Ying 		goto unlock;
1710f2cb1360SIngo Molnar 	for (i = 0; i < sched_domains_numa_levels; i++) {
17110fb3978bSHuang Ying 		if (distances[i] == distance) {
17120fb3978bSHuang Ying 			found = true;
17130fb3978bSHuang Ying 			break;
17140fb3978bSHuang Ying 		}
17150fb3978bSHuang Ying 	}
17160fb3978bSHuang Ying unlock:
17170fb3978bSHuang Ying 	rcu_read_unlock();
17180fb3978bSHuang Ying 
17190fb3978bSHuang Ying 	return found;
1720f2cb1360SIngo Molnar }
1721f2cb1360SIngo Molnar 
17220fb3978bSHuang Ying #define for_each_cpu_node_but(n, nbut)		\
17230fb3978bSHuang Ying 	for_each_node_state(n, N_CPU)		\
17240fb3978bSHuang Ying 		if (n == nbut)			\
17250fb3978bSHuang Ying 			continue;		\
17260fb3978bSHuang Ying 		else
1727f2cb1360SIngo Molnar 
1728f2cb1360SIngo Molnar /*
1729f2cb1360SIngo Molnar  * A system can have three types of NUMA topology:
1730f2cb1360SIngo Molnar  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1731f2cb1360SIngo Molnar  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1732f2cb1360SIngo Molnar  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1733f2cb1360SIngo Molnar  *
1734f2cb1360SIngo Molnar  * The difference between a glueless mesh topology and a backplane
1735f2cb1360SIngo Molnar  * topology lies in whether communication between not directly
1736f2cb1360SIngo Molnar  * connected nodes goes through intermediary nodes (where programs
1737f2cb1360SIngo Molnar  * could run), or through backplane controllers. This affects
1738f2cb1360SIngo Molnar  * placement of programs.
1739f2cb1360SIngo Molnar  *
1740f2cb1360SIngo Molnar  * The type of topology can be discerned with the following tests:
1741f2cb1360SIngo Molnar  * - If the maximum distance between any nodes is 1 hop, the system
1742f2cb1360SIngo Molnar  *   is directly connected.
1743f2cb1360SIngo Molnar  * - If for two nodes A and B, located N > 1 hops away from each other,
1744f2cb1360SIngo Molnar  *   there is an intermediary node C, which is < N hops away from both
1745f2cb1360SIngo Molnar  *   nodes A and B, the system is a glueless mesh.
1746f2cb1360SIngo Molnar  */
17470fb3978bSHuang Ying static void init_numa_topology_type(int offline_node)
1748f2cb1360SIngo Molnar {
1749f2cb1360SIngo Molnar 	int a, b, c, n;
1750f2cb1360SIngo Molnar 
1751f2cb1360SIngo Molnar 	n = sched_max_numa_distance;
1752f2cb1360SIngo Molnar 
1753e5e96fafSSrikar Dronamraju 	if (sched_domains_numa_levels <= 2) {
1754f2cb1360SIngo Molnar 		sched_numa_topology_type = NUMA_DIRECT;
1755f2cb1360SIngo Molnar 		return;
1756f2cb1360SIngo Molnar 	}
1757f2cb1360SIngo Molnar 
17580fb3978bSHuang Ying 	for_each_cpu_node_but(a, offline_node) {
17590fb3978bSHuang Ying 		for_each_cpu_node_but(b, offline_node) {
1760f2cb1360SIngo Molnar 			/* Find two nodes furthest removed from each other. */
1761f2cb1360SIngo Molnar 			if (node_distance(a, b) < n)
1762f2cb1360SIngo Molnar 				continue;
1763f2cb1360SIngo Molnar 
1764f2cb1360SIngo Molnar 			/* Is there an intermediary node between a and b? */
17650fb3978bSHuang Ying 			for_each_cpu_node_but(c, offline_node) {
1766f2cb1360SIngo Molnar 				if (node_distance(a, c) < n &&
1767f2cb1360SIngo Molnar 				    node_distance(b, c) < n) {
1768f2cb1360SIngo Molnar 					sched_numa_topology_type =
1769f2cb1360SIngo Molnar 							NUMA_GLUELESS_MESH;
1770f2cb1360SIngo Molnar 					return;
1771f2cb1360SIngo Molnar 				}
1772f2cb1360SIngo Molnar 			}
1773f2cb1360SIngo Molnar 
1774f2cb1360SIngo Molnar 			sched_numa_topology_type = NUMA_BACKPLANE;
1775f2cb1360SIngo Molnar 			return;
1776f2cb1360SIngo Molnar 		}
1777f2cb1360SIngo Molnar 	}
17780fb3978bSHuang Ying 
17790fb3978bSHuang Ying 	pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
17800fb3978bSHuang Ying 	sched_numa_topology_type = NUMA_DIRECT;
1781f2cb1360SIngo Molnar }
1782f2cb1360SIngo Molnar 
1783620a6dc4SValentin Schneider 
1784620a6dc4SValentin Schneider #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1785620a6dc4SValentin Schneider 
17860fb3978bSHuang Ying void sched_init_numa(int offline_node)
1787f2cb1360SIngo Molnar {
1788f2cb1360SIngo Molnar 	struct sched_domain_topology_level *tl;
1789620a6dc4SValentin Schneider 	unsigned long *distance_map;
1790620a6dc4SValentin Schneider 	int nr_levels = 0;
1791620a6dc4SValentin Schneider 	int i, j;
17920fb3978bSHuang Ying 	int *distances;
17930fb3978bSHuang Ying 	struct cpumask ***masks;
1794051f3ca0SSuravee Suthikulpanit 
1795f2cb1360SIngo Molnar 	/*
1796f2cb1360SIngo Molnar 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1797f2cb1360SIngo Molnar 	 * unique distances in the node_distance() table.
1798f2cb1360SIngo Molnar 	 */
1799620a6dc4SValentin Schneider 	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1800620a6dc4SValentin Schneider 	if (!distance_map)
1801620a6dc4SValentin Schneider 		return;
1802620a6dc4SValentin Schneider 
1803620a6dc4SValentin Schneider 	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
18040fb3978bSHuang Ying 	for_each_cpu_node_but(i, offline_node) {
18050fb3978bSHuang Ying 		for_each_cpu_node_but(j, offline_node) {
1806620a6dc4SValentin Schneider 			int distance = node_distance(i, j);
1807f2cb1360SIngo Molnar 
1808620a6dc4SValentin Schneider 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1809620a6dc4SValentin Schneider 				sched_numa_warn("Invalid distance value range");
18100fb3978bSHuang Ying 				bitmap_free(distance_map);
1811620a6dc4SValentin Schneider 				return;
1812620a6dc4SValentin Schneider 			}
1813f2cb1360SIngo Molnar 
1814620a6dc4SValentin Schneider 			bitmap_set(distance_map, distance, 1);
1815620a6dc4SValentin Schneider 		}
1816620a6dc4SValentin Schneider 	}
1817f2cb1360SIngo Molnar 	/*
1818620a6dc4SValentin Schneider 	 * We can now figure out how many unique distance values there are and
1819620a6dc4SValentin Schneider 	 * allocate memory accordingly.
1820f2cb1360SIngo Molnar 	 */
1821620a6dc4SValentin Schneider 	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1822f2cb1360SIngo Molnar 
18230fb3978bSHuang Ying 	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
18240fb3978bSHuang Ying 	if (!distances) {
1825620a6dc4SValentin Schneider 		bitmap_free(distance_map);
1826620a6dc4SValentin Schneider 		return;
1827f2cb1360SIngo Molnar 	}
1828620a6dc4SValentin Schneider 
1829620a6dc4SValentin Schneider 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
1830620a6dc4SValentin Schneider 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
18310fb3978bSHuang Ying 		distances[i] = j;
1832f2cb1360SIngo Molnar 	}
18330fb3978bSHuang Ying 	rcu_assign_pointer(sched_domains_numa_distance, distances);
1834f2cb1360SIngo Molnar 
1835620a6dc4SValentin Schneider 	bitmap_free(distance_map);
1836620a6dc4SValentin Schneider 
1837f2cb1360SIngo Molnar 	/*
1838620a6dc4SValentin Schneider 	 * 'nr_levels' contains the number of unique distances
1839f2cb1360SIngo Molnar 	 *
1840f2cb1360SIngo Molnar 	 * The sched_domains_numa_distance[] array includes the actual distance
1841f2cb1360SIngo Molnar 	 * numbers.
1842f2cb1360SIngo Molnar 	 */
1843f2cb1360SIngo Molnar 
1844f2cb1360SIngo Molnar 	/*
1845f2cb1360SIngo Molnar 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1846f2cb1360SIngo Molnar 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1847620a6dc4SValentin Schneider 	 * the array will contain less then 'nr_levels' members. This could be
1848f2cb1360SIngo Molnar 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1849f2cb1360SIngo Molnar 	 * in other functions.
1850f2cb1360SIngo Molnar 	 *
1851620a6dc4SValentin Schneider 	 * We reset it to 'nr_levels' at the end of this function.
1852f2cb1360SIngo Molnar 	 */
1853f2cb1360SIngo Molnar 	sched_domains_numa_levels = 0;
1854f2cb1360SIngo Molnar 
18550fb3978bSHuang Ying 	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
18560fb3978bSHuang Ying 	if (!masks)
1857f2cb1360SIngo Molnar 		return;
1858f2cb1360SIngo Molnar 
1859f2cb1360SIngo Molnar 	/*
1860f2cb1360SIngo Molnar 	 * Now for each level, construct a mask per node which contains all
1861f2cb1360SIngo Molnar 	 * CPUs of nodes that are that many hops away from us.
1862f2cb1360SIngo Molnar 	 */
1863620a6dc4SValentin Schneider 	for (i = 0; i < nr_levels; i++) {
18640fb3978bSHuang Ying 		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
18650fb3978bSHuang Ying 		if (!masks[i])
1866f2cb1360SIngo Molnar 			return;
1867f2cb1360SIngo Molnar 
18680fb3978bSHuang Ying 		for_each_cpu_node_but(j, offline_node) {
1869f2cb1360SIngo Molnar 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1870620a6dc4SValentin Schneider 			int k;
1871620a6dc4SValentin Schneider 
1872f2cb1360SIngo Molnar 			if (!mask)
1873f2cb1360SIngo Molnar 				return;
1874f2cb1360SIngo Molnar 
18750fb3978bSHuang Ying 			masks[i][j] = mask;
1876f2cb1360SIngo Molnar 
18770fb3978bSHuang Ying 			for_each_cpu_node_but(k, offline_node) {
1878620a6dc4SValentin Schneider 				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1879620a6dc4SValentin Schneider 					sched_numa_warn("Node-distance not symmetric");
1880620a6dc4SValentin Schneider 
1881f2cb1360SIngo Molnar 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1882f2cb1360SIngo Molnar 					continue;
1883f2cb1360SIngo Molnar 
1884f2cb1360SIngo Molnar 				cpumask_or(mask, mask, cpumask_of_node(k));
1885f2cb1360SIngo Molnar 			}
1886f2cb1360SIngo Molnar 		}
1887f2cb1360SIngo Molnar 	}
18880fb3978bSHuang Ying 	rcu_assign_pointer(sched_domains_numa_masks, masks);
1889f2cb1360SIngo Molnar 
1890f2cb1360SIngo Molnar 	/* Compute default topology size */
1891f2cb1360SIngo Molnar 	for (i = 0; sched_domain_topology[i].mask; i++);
1892f2cb1360SIngo Molnar 
189371e5f664SDietmar Eggemann 	tl = kzalloc((i + nr_levels + 1) *
1894f2cb1360SIngo Molnar 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1895f2cb1360SIngo Molnar 	if (!tl)
1896f2cb1360SIngo Molnar 		return;
1897f2cb1360SIngo Molnar 
1898f2cb1360SIngo Molnar 	/*
1899f2cb1360SIngo Molnar 	 * Copy the default topology bits..
1900f2cb1360SIngo Molnar 	 */
1901f2cb1360SIngo Molnar 	for (i = 0; sched_domain_topology[i].mask; i++)
1902f2cb1360SIngo Molnar 		tl[i] = sched_domain_topology[i];
1903f2cb1360SIngo Molnar 
1904f2cb1360SIngo Molnar 	/*
1905051f3ca0SSuravee Suthikulpanit 	 * Add the NUMA identity distance, aka single NODE.
1906051f3ca0SSuravee Suthikulpanit 	 */
1907051f3ca0SSuravee Suthikulpanit 	tl[i++] = (struct sched_domain_topology_level){
1908051f3ca0SSuravee Suthikulpanit 		.mask = sd_numa_mask,
1909051f3ca0SSuravee Suthikulpanit 		.numa_level = 0,
1910051f3ca0SSuravee Suthikulpanit 		SD_INIT_NAME(NODE)
1911051f3ca0SSuravee Suthikulpanit 	};
1912051f3ca0SSuravee Suthikulpanit 
1913051f3ca0SSuravee Suthikulpanit 	/*
1914f2cb1360SIngo Molnar 	 * .. and append 'j' levels of NUMA goodness.
1915f2cb1360SIngo Molnar 	 */
1916620a6dc4SValentin Schneider 	for (j = 1; j < nr_levels; i++, j++) {
1917f2cb1360SIngo Molnar 		tl[i] = (struct sched_domain_topology_level){
1918f2cb1360SIngo Molnar 			.mask = sd_numa_mask,
1919f2cb1360SIngo Molnar 			.sd_flags = cpu_numa_flags,
1920f2cb1360SIngo Molnar 			.flags = SDTL_OVERLAP,
1921f2cb1360SIngo Molnar 			.numa_level = j,
1922f2cb1360SIngo Molnar 			SD_INIT_NAME(NUMA)
1923f2cb1360SIngo Molnar 		};
1924f2cb1360SIngo Molnar 	}
1925f2cb1360SIngo Molnar 
19260fb3978bSHuang Ying 	sched_domain_topology_saved = sched_domain_topology;
1927f2cb1360SIngo Molnar 	sched_domain_topology = tl;
1928f2cb1360SIngo Molnar 
1929620a6dc4SValentin Schneider 	sched_domains_numa_levels = nr_levels;
19300fb3978bSHuang Ying 	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
1931f2cb1360SIngo Molnar 
19320fb3978bSHuang Ying 	init_numa_topology_type(offline_node);
19330083242cSValentin Schneider }
19340083242cSValentin Schneider 
19350fb3978bSHuang Ying 
19360fb3978bSHuang Ying static void sched_reset_numa(void)
19370083242cSValentin Schneider {
19380fb3978bSHuang Ying 	int nr_levels, *distances;
19390fb3978bSHuang Ying 	struct cpumask ***masks;
19400fb3978bSHuang Ying 
19410fb3978bSHuang Ying 	nr_levels = sched_domains_numa_levels;
19420fb3978bSHuang Ying 	sched_domains_numa_levels = 0;
19430fb3978bSHuang Ying 	sched_max_numa_distance = 0;
19440fb3978bSHuang Ying 	sched_numa_topology_type = NUMA_DIRECT;
19450fb3978bSHuang Ying 	distances = sched_domains_numa_distance;
19460fb3978bSHuang Ying 	rcu_assign_pointer(sched_domains_numa_distance, NULL);
19470fb3978bSHuang Ying 	masks = sched_domains_numa_masks;
19480fb3978bSHuang Ying 	rcu_assign_pointer(sched_domains_numa_masks, NULL);
19490fb3978bSHuang Ying 	if (distances || masks) {
19500083242cSValentin Schneider 		int i, j;
19510083242cSValentin Schneider 
19520fb3978bSHuang Ying 		synchronize_rcu();
19530fb3978bSHuang Ying 		kfree(distances);
19540fb3978bSHuang Ying 		for (i = 0; i < nr_levels && masks; i++) {
19550fb3978bSHuang Ying 			if (!masks[i])
19560fb3978bSHuang Ying 				continue;
19570fb3978bSHuang Ying 			for_each_node(j)
19580fb3978bSHuang Ying 				kfree(masks[i][j]);
19590fb3978bSHuang Ying 			kfree(masks[i]);
19600fb3978bSHuang Ying 		}
19610fb3978bSHuang Ying 		kfree(masks);
19620fb3978bSHuang Ying 	}
19630fb3978bSHuang Ying 	if (sched_domain_topology_saved) {
19640fb3978bSHuang Ying 		kfree(sched_domain_topology);
19650fb3978bSHuang Ying 		sched_domain_topology = sched_domain_topology_saved;
19660fb3978bSHuang Ying 		sched_domain_topology_saved = NULL;
19670fb3978bSHuang Ying 	}
19680fb3978bSHuang Ying }
19690fb3978bSHuang Ying 
19700083242cSValentin Schneider /*
19710fb3978bSHuang Ying  * Call with hotplug lock held
19720083242cSValentin Schneider  */
19730fb3978bSHuang Ying void sched_update_numa(int cpu, bool online)
19740fb3978bSHuang Ying {
19750fb3978bSHuang Ying 	int node;
19760fb3978bSHuang Ying 
19770fb3978bSHuang Ying 	node = cpu_to_node(cpu);
19780fb3978bSHuang Ying 	/*
19790fb3978bSHuang Ying 	 * Scheduler NUMA topology is updated when the first CPU of a
19800fb3978bSHuang Ying 	 * node is onlined or the last CPU of a node is offlined.
19810fb3978bSHuang Ying 	 */
19820fb3978bSHuang Ying 	if (cpumask_weight(cpumask_of_node(node)) != 1)
19830083242cSValentin Schneider 		return;
19840083242cSValentin Schneider 
19850fb3978bSHuang Ying 	sched_reset_numa();
19860fb3978bSHuang Ying 	sched_init_numa(online ? NUMA_NO_NODE : node);
1987f2cb1360SIngo Molnar }
1988f2cb1360SIngo Molnar 
1989f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu)
1990f2cb1360SIngo Molnar {
1991f2cb1360SIngo Molnar 	int node = cpu_to_node(cpu);
1992f2cb1360SIngo Molnar 	int i, j;
1993f2cb1360SIngo Molnar 
1994f2cb1360SIngo Molnar 	for (i = 0; i < sched_domains_numa_levels; i++) {
1995f2cb1360SIngo Molnar 		for (j = 0; j < nr_node_ids; j++) {
19960fb3978bSHuang Ying 			if (!node_state(j, N_CPU))
19970083242cSValentin Schneider 				continue;
19980083242cSValentin Schneider 
19990083242cSValentin Schneider 			/* Set ourselves in the remote node's masks */
2000f2cb1360SIngo Molnar 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
2001f2cb1360SIngo Molnar 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
2002f2cb1360SIngo Molnar 		}
2003f2cb1360SIngo Molnar 	}
2004f2cb1360SIngo Molnar }
2005f2cb1360SIngo Molnar 
2006f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu)
2007f2cb1360SIngo Molnar {
2008f2cb1360SIngo Molnar 	int i, j;
2009f2cb1360SIngo Molnar 
2010f2cb1360SIngo Molnar 	for (i = 0; i < sched_domains_numa_levels; i++) {
20110fb3978bSHuang Ying 		for (j = 0; j < nr_node_ids; j++) {
20120fb3978bSHuang Ying 			if (sched_domains_numa_masks[i][j])
2013f2cb1360SIngo Molnar 				cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2014f2cb1360SIngo Molnar 		}
2015f2cb1360SIngo Molnar 	}
20160fb3978bSHuang Ying }
2017f2cb1360SIngo Molnar 
2018e0e8d491SWanpeng Li /*
2019e0e8d491SWanpeng Li  * sched_numa_find_closest() - given the NUMA topology, find the cpu
2020e0e8d491SWanpeng Li  *                             closest to @cpu from @cpumask.
2021e0e8d491SWanpeng Li  * cpumask: cpumask to find a cpu from
2022e0e8d491SWanpeng Li  * cpu: cpu to be close to
2023e0e8d491SWanpeng Li  *
2024e0e8d491SWanpeng Li  * returns: cpu, or nr_cpu_ids when nothing found.
2025e0e8d491SWanpeng Li  */
2026e0e8d491SWanpeng Li int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2027e0e8d491SWanpeng Li {
20280fb3978bSHuang Ying 	int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
20290fb3978bSHuang Ying 	struct cpumask ***masks;
2030e0e8d491SWanpeng Li 
20310fb3978bSHuang Ying 	rcu_read_lock();
20320fb3978bSHuang Ying 	masks = rcu_dereference(sched_domains_numa_masks);
20330fb3978bSHuang Ying 	if (!masks)
20340fb3978bSHuang Ying 		goto unlock;
2035e0e8d491SWanpeng Li 	for (i = 0; i < sched_domains_numa_levels; i++) {
20360fb3978bSHuang Ying 		if (!masks[i][j])
20370fb3978bSHuang Ying 			break;
20380fb3978bSHuang Ying 		cpu = cpumask_any_and(cpus, masks[i][j]);
20390fb3978bSHuang Ying 		if (cpu < nr_cpu_ids) {
20400fb3978bSHuang Ying 			found = cpu;
20410fb3978bSHuang Ying 			break;
2042e0e8d491SWanpeng Li 		}
20430fb3978bSHuang Ying 	}
20440fb3978bSHuang Ying unlock:
20450fb3978bSHuang Ying 	rcu_read_unlock();
20460fb3978bSHuang Ying 
20470fb3978bSHuang Ying 	return found;
2048e0e8d491SWanpeng Li }
2049e0e8d491SWanpeng Li 
2050f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */
2051f2cb1360SIngo Molnar 
2052f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map)
2053f2cb1360SIngo Molnar {
2054f2cb1360SIngo Molnar 	struct sched_domain_topology_level *tl;
2055f2cb1360SIngo Molnar 	int j;
2056f2cb1360SIngo Molnar 
2057f2cb1360SIngo Molnar 	for_each_sd_topology(tl) {
2058f2cb1360SIngo Molnar 		struct sd_data *sdd = &tl->data;
2059f2cb1360SIngo Molnar 
2060f2cb1360SIngo Molnar 		sdd->sd = alloc_percpu(struct sched_domain *);
2061f2cb1360SIngo Molnar 		if (!sdd->sd)
2062f2cb1360SIngo Molnar 			return -ENOMEM;
2063f2cb1360SIngo Molnar 
2064f2cb1360SIngo Molnar 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
2065f2cb1360SIngo Molnar 		if (!sdd->sds)
2066f2cb1360SIngo Molnar 			return -ENOMEM;
2067f2cb1360SIngo Molnar 
2068f2cb1360SIngo Molnar 		sdd->sg = alloc_percpu(struct sched_group *);
2069f2cb1360SIngo Molnar 		if (!sdd->sg)
2070f2cb1360SIngo Molnar 			return -ENOMEM;
2071f2cb1360SIngo Molnar 
2072f2cb1360SIngo Molnar 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
2073f2cb1360SIngo Molnar 		if (!sdd->sgc)
2074f2cb1360SIngo Molnar 			return -ENOMEM;
2075f2cb1360SIngo Molnar 
2076f2cb1360SIngo Molnar 		for_each_cpu(j, cpu_map) {
2077f2cb1360SIngo Molnar 			struct sched_domain *sd;
2078f2cb1360SIngo Molnar 			struct sched_domain_shared *sds;
2079f2cb1360SIngo Molnar 			struct sched_group *sg;
2080f2cb1360SIngo Molnar 			struct sched_group_capacity *sgc;
2081f2cb1360SIngo Molnar 
2082f2cb1360SIngo Molnar 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
2083f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
2084f2cb1360SIngo Molnar 			if (!sd)
2085f2cb1360SIngo Molnar 				return -ENOMEM;
2086f2cb1360SIngo Molnar 
2087f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sd, j) = sd;
2088f2cb1360SIngo Molnar 
2089f2cb1360SIngo Molnar 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
2090f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
2091f2cb1360SIngo Molnar 			if (!sds)
2092f2cb1360SIngo Molnar 				return -ENOMEM;
2093f2cb1360SIngo Molnar 
2094f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sds, j) = sds;
2095f2cb1360SIngo Molnar 
2096f2cb1360SIngo Molnar 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
2097f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
2098f2cb1360SIngo Molnar 			if (!sg)
2099f2cb1360SIngo Molnar 				return -ENOMEM;
2100f2cb1360SIngo Molnar 
2101f2cb1360SIngo Molnar 			sg->next = sg;
2102f2cb1360SIngo Molnar 
2103f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sg, j) = sg;
2104f2cb1360SIngo Molnar 
2105f2cb1360SIngo Molnar 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
2106f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
2107f2cb1360SIngo Molnar 			if (!sgc)
2108f2cb1360SIngo Molnar 				return -ENOMEM;
2109f2cb1360SIngo Molnar 
2110005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
2111005f874dSPeter Zijlstra 			sgc->id = j;
2112005f874dSPeter Zijlstra #endif
2113005f874dSPeter Zijlstra 
2114f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sgc, j) = sgc;
2115f2cb1360SIngo Molnar 		}
2116f2cb1360SIngo Molnar 	}
2117f2cb1360SIngo Molnar 
2118f2cb1360SIngo Molnar 	return 0;
2119f2cb1360SIngo Molnar }
2120f2cb1360SIngo Molnar 
2121f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map)
2122f2cb1360SIngo Molnar {
2123f2cb1360SIngo Molnar 	struct sched_domain_topology_level *tl;
2124f2cb1360SIngo Molnar 	int j;
2125f2cb1360SIngo Molnar 
2126f2cb1360SIngo Molnar 	for_each_sd_topology(tl) {
2127f2cb1360SIngo Molnar 		struct sd_data *sdd = &tl->data;
2128f2cb1360SIngo Molnar 
2129f2cb1360SIngo Molnar 		for_each_cpu(j, cpu_map) {
2130f2cb1360SIngo Molnar 			struct sched_domain *sd;
2131f2cb1360SIngo Molnar 
2132f2cb1360SIngo Molnar 			if (sdd->sd) {
2133f2cb1360SIngo Molnar 				sd = *per_cpu_ptr(sdd->sd, j);
2134f2cb1360SIngo Molnar 				if (sd && (sd->flags & SD_OVERLAP))
2135f2cb1360SIngo Molnar 					free_sched_groups(sd->groups, 0);
2136f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sd, j));
2137f2cb1360SIngo Molnar 			}
2138f2cb1360SIngo Molnar 
2139f2cb1360SIngo Molnar 			if (sdd->sds)
2140f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sds, j));
2141f2cb1360SIngo Molnar 			if (sdd->sg)
2142f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sg, j));
2143f2cb1360SIngo Molnar 			if (sdd->sgc)
2144f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sgc, j));
2145f2cb1360SIngo Molnar 		}
2146f2cb1360SIngo Molnar 		free_percpu(sdd->sd);
2147f2cb1360SIngo Molnar 		sdd->sd = NULL;
2148f2cb1360SIngo Molnar 		free_percpu(sdd->sds);
2149f2cb1360SIngo Molnar 		sdd->sds = NULL;
2150f2cb1360SIngo Molnar 		free_percpu(sdd->sg);
2151f2cb1360SIngo Molnar 		sdd->sg = NULL;
2152f2cb1360SIngo Molnar 		free_percpu(sdd->sgc);
2153f2cb1360SIngo Molnar 		sdd->sgc = NULL;
2154f2cb1360SIngo Molnar 	}
2155f2cb1360SIngo Molnar }
2156f2cb1360SIngo Molnar 
2157181a80d1SViresh Kumar static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
2158f2cb1360SIngo Molnar 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
2159c744dc4aSBeata Michalska 		struct sched_domain *child, int cpu)
2160f2cb1360SIngo Molnar {
2161c744dc4aSBeata Michalska 	struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
2162f2cb1360SIngo Molnar 
2163f2cb1360SIngo Molnar 	if (child) {
2164f2cb1360SIngo Molnar 		sd->level = child->level + 1;
2165f2cb1360SIngo Molnar 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
2166f2cb1360SIngo Molnar 		child->parent = sd;
2167f2cb1360SIngo Molnar 
2168f2cb1360SIngo Molnar 		if (!cpumask_subset(sched_domain_span(child),
2169f2cb1360SIngo Molnar 				    sched_domain_span(sd))) {
2170f2cb1360SIngo Molnar 			pr_err("BUG: arch topology borken\n");
2171f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
2172f2cb1360SIngo Molnar 			pr_err("     the %s domain not a subset of the %s domain\n",
2173f2cb1360SIngo Molnar 					child->name, sd->name);
2174f2cb1360SIngo Molnar #endif
217597fb7a0aSIngo Molnar 			/* Fixup, ensure @sd has at least @child CPUs. */
2176f2cb1360SIngo Molnar 			cpumask_or(sched_domain_span(sd),
2177f2cb1360SIngo Molnar 				   sched_domain_span(sd),
2178f2cb1360SIngo Molnar 				   sched_domain_span(child));
2179f2cb1360SIngo Molnar 		}
2180f2cb1360SIngo Molnar 
2181f2cb1360SIngo Molnar 	}
2182f2cb1360SIngo Molnar 	set_domain_attribute(sd, attr);
2183f2cb1360SIngo Molnar 
2184f2cb1360SIngo Molnar 	return sd;
2185f2cb1360SIngo Molnar }
2186f2cb1360SIngo Molnar 
2187f2cb1360SIngo Molnar /*
2188ccf74128SValentin Schneider  * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2189ccf74128SValentin Schneider  * any two given CPUs at this (non-NUMA) topology level.
2190ccf74128SValentin Schneider  */
2191ccf74128SValentin Schneider static bool topology_span_sane(struct sched_domain_topology_level *tl,
2192ccf74128SValentin Schneider 			      const struct cpumask *cpu_map, int cpu)
2193ccf74128SValentin Schneider {
2194ccf74128SValentin Schneider 	int i;
2195ccf74128SValentin Schneider 
2196ccf74128SValentin Schneider 	/* NUMA levels are allowed to overlap */
2197ccf74128SValentin Schneider 	if (tl->flags & SDTL_OVERLAP)
2198ccf74128SValentin Schneider 		return true;
2199ccf74128SValentin Schneider 
2200ccf74128SValentin Schneider 	/*
2201ccf74128SValentin Schneider 	 * Non-NUMA levels cannot partially overlap - they must be either
2202ccf74128SValentin Schneider 	 * completely equal or completely disjoint. Otherwise we can end up
2203ccf74128SValentin Schneider 	 * breaking the sched_group lists - i.e. a later get_group() pass
2204ccf74128SValentin Schneider 	 * breaks the linking done for an earlier span.
2205ccf74128SValentin Schneider 	 */
2206ccf74128SValentin Schneider 	for_each_cpu(i, cpu_map) {
2207ccf74128SValentin Schneider 		if (i == cpu)
2208ccf74128SValentin Schneider 			continue;
2209ccf74128SValentin Schneider 		/*
2210ccf74128SValentin Schneider 		 * We should 'and' all those masks with 'cpu_map' to exactly
2211ccf74128SValentin Schneider 		 * match the topology we're about to build, but that can only
2212ccf74128SValentin Schneider 		 * remove CPUs, which only lessens our ability to detect
2213ccf74128SValentin Schneider 		 * overlaps
2214ccf74128SValentin Schneider 		 */
2215ccf74128SValentin Schneider 		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
2216ccf74128SValentin Schneider 		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
2217ccf74128SValentin Schneider 			return false;
2218ccf74128SValentin Schneider 	}
2219ccf74128SValentin Schneider 
2220ccf74128SValentin Schneider 	return true;
2221ccf74128SValentin Schneider }
2222ccf74128SValentin Schneider 
2223ccf74128SValentin Schneider /*
2224f2cb1360SIngo Molnar  * Build sched domains for a given set of CPUs and attach the sched domains
2225f2cb1360SIngo Molnar  * to the individual CPUs
2226f2cb1360SIngo Molnar  */
2227f2cb1360SIngo Molnar static int
2228f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
2229f2cb1360SIngo Molnar {
2230cd1cb335SValentin Schneider 	enum s_alloc alloc_state = sa_none;
2231f2cb1360SIngo Molnar 	struct sched_domain *sd;
2232f2cb1360SIngo Molnar 	struct s_data d;
2233f2cb1360SIngo Molnar 	struct rq *rq = NULL;
2234f2cb1360SIngo Molnar 	int i, ret = -ENOMEM;
2235df054e84SMorten Rasmussen 	bool has_asym = false;
2236f2cb1360SIngo Molnar 
2237cd1cb335SValentin Schneider 	if (WARN_ON(cpumask_empty(cpu_map)))
2238cd1cb335SValentin Schneider 		goto error;
2239cd1cb335SValentin Schneider 
2240f2cb1360SIngo Molnar 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
2241f2cb1360SIngo Molnar 	if (alloc_state != sa_rootdomain)
2242f2cb1360SIngo Molnar 		goto error;
2243f2cb1360SIngo Molnar 
2244f2cb1360SIngo Molnar 	/* Set up domains for CPUs specified by the cpu_map: */
2245f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map) {
2246f2cb1360SIngo Molnar 		struct sched_domain_topology_level *tl;
2247f2cb1360SIngo Molnar 
2248f2cb1360SIngo Molnar 		sd = NULL;
2249f2cb1360SIngo Molnar 		for_each_sd_topology(tl) {
225005484e09SMorten Rasmussen 
2251ccf74128SValentin Schneider 			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2252ccf74128SValentin Schneider 				goto error;
2253ccf74128SValentin Schneider 
2254c744dc4aSBeata Michalska 			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
2255c744dc4aSBeata Michalska 
2256c744dc4aSBeata Michalska 			has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
225705484e09SMorten Rasmussen 
2258f2cb1360SIngo Molnar 			if (tl == sched_domain_topology)
2259f2cb1360SIngo Molnar 				*per_cpu_ptr(d.sd, i) = sd;
2260af85596cSPeter Zijlstra 			if (tl->flags & SDTL_OVERLAP)
2261f2cb1360SIngo Molnar 				sd->flags |= SD_OVERLAP;
2262f2cb1360SIngo Molnar 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2263f2cb1360SIngo Molnar 				break;
2264f2cb1360SIngo Molnar 		}
2265f2cb1360SIngo Molnar 	}
2266f2cb1360SIngo Molnar 
2267f2cb1360SIngo Molnar 	/* Build the groups for the domains */
2268f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map) {
2269f2cb1360SIngo Molnar 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2270f2cb1360SIngo Molnar 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
2271f2cb1360SIngo Molnar 			if (sd->flags & SD_OVERLAP) {
2272f2cb1360SIngo Molnar 				if (build_overlap_sched_groups(sd, i))
2273f2cb1360SIngo Molnar 					goto error;
2274f2cb1360SIngo Molnar 			} else {
2275f2cb1360SIngo Molnar 				if (build_sched_groups(sd, i))
2276f2cb1360SIngo Molnar 					goto error;
2277f2cb1360SIngo Molnar 			}
2278f2cb1360SIngo Molnar 		}
2279f2cb1360SIngo Molnar 	}
2280f2cb1360SIngo Molnar 
2281e496132eSMel Gorman 	/*
2282e496132eSMel Gorman 	 * Calculate an allowed NUMA imbalance such that LLCs do not get
2283e496132eSMel Gorman 	 * imbalanced.
2284e496132eSMel Gorman 	 */
2285e496132eSMel Gorman 	for_each_cpu(i, cpu_map) {
2286e496132eSMel Gorman 		unsigned int imb = 0;
2287e496132eSMel Gorman 		unsigned int imb_span = 1;
2288e496132eSMel Gorman 
2289e496132eSMel Gorman 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2290e496132eSMel Gorman 			struct sched_domain *child = sd->child;
2291e496132eSMel Gorman 
2292e496132eSMel Gorman 			if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
2293e496132eSMel Gorman 			    (child->flags & SD_SHARE_PKG_RESOURCES)) {
2294e496132eSMel Gorman 				struct sched_domain *top, *top_p;
2295e496132eSMel Gorman 				unsigned int nr_llcs;
2296e496132eSMel Gorman 
2297e496132eSMel Gorman 				/*
2298e496132eSMel Gorman 				 * For a single LLC per node, allow an
2299e496132eSMel Gorman 				 * imbalance up to 25% of the node. This is an
2300e496132eSMel Gorman 				 * arbitrary cutoff based on SMT-2 to balance
2301e496132eSMel Gorman 				 * between memory bandwidth and avoiding
2302e496132eSMel Gorman 				 * premature sharing of HT resources and SMT-4
2303e496132eSMel Gorman 				 * or SMT-8 *may* benefit from a different
2304e496132eSMel Gorman 				 * cutoff.
2305e496132eSMel Gorman 				 *
2306e496132eSMel Gorman 				 * For multiple LLCs, allow an imbalance
2307e496132eSMel Gorman 				 * until multiple tasks would share an LLC
2308e496132eSMel Gorman 				 * on one node while LLCs on another node
2309e496132eSMel Gorman 				 * remain idle.
2310e496132eSMel Gorman 				 */
2311e496132eSMel Gorman 				nr_llcs = sd->span_weight / child->span_weight;
2312e496132eSMel Gorman 				if (nr_llcs == 1)
2313e496132eSMel Gorman 					imb = sd->span_weight >> 2;
2314e496132eSMel Gorman 				else
2315e496132eSMel Gorman 					imb = nr_llcs;
2316e496132eSMel Gorman 				sd->imb_numa_nr = imb;
2317e496132eSMel Gorman 
2318e496132eSMel Gorman 				/* Set span based on the first NUMA domain. */
2319e496132eSMel Gorman 				top = sd;
2320e496132eSMel Gorman 				top_p = top->parent;
2321e496132eSMel Gorman 				while (top_p && !(top_p->flags & SD_NUMA)) {
2322e496132eSMel Gorman 					top = top->parent;
2323e496132eSMel Gorman 					top_p = top->parent;
2324e496132eSMel Gorman 				}
2325e496132eSMel Gorman 				imb_span = top_p ? top_p->span_weight : sd->span_weight;
2326e496132eSMel Gorman 			} else {
2327e496132eSMel Gorman 				int factor = max(1U, (sd->span_weight / imb_span));
2328e496132eSMel Gorman 
2329e496132eSMel Gorman 				sd->imb_numa_nr = imb * factor;
2330e496132eSMel Gorman 			}
2331e496132eSMel Gorman 		}
2332e496132eSMel Gorman 	}
2333e496132eSMel Gorman 
2334f2cb1360SIngo Molnar 	/* Calculate CPU capacity for physical packages and nodes */
2335f2cb1360SIngo Molnar 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
2336f2cb1360SIngo Molnar 		if (!cpumask_test_cpu(i, cpu_map))
2337f2cb1360SIngo Molnar 			continue;
2338f2cb1360SIngo Molnar 
2339f2cb1360SIngo Molnar 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2340f2cb1360SIngo Molnar 			claim_allocations(i, sd);
2341f2cb1360SIngo Molnar 			init_sched_groups_capacity(i, sd);
2342f2cb1360SIngo Molnar 		}
2343f2cb1360SIngo Molnar 	}
2344f2cb1360SIngo Molnar 
2345f2cb1360SIngo Molnar 	/* Attach the domains */
2346f2cb1360SIngo Molnar 	rcu_read_lock();
2347f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map) {
2348f2cb1360SIngo Molnar 		rq = cpu_rq(i);
2349f2cb1360SIngo Molnar 		sd = *per_cpu_ptr(d.sd, i);
2350f2cb1360SIngo Molnar 
2351f2cb1360SIngo Molnar 		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2352f2cb1360SIngo Molnar 		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2353f2cb1360SIngo Molnar 			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2354f2cb1360SIngo Molnar 
2355f2cb1360SIngo Molnar 		cpu_attach_domain(sd, d.rd, i);
2356f2cb1360SIngo Molnar 	}
2357f2cb1360SIngo Molnar 	rcu_read_unlock();
2358f2cb1360SIngo Molnar 
2359df054e84SMorten Rasmussen 	if (has_asym)
2360e284df70SValentin Schneider 		static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2361df054e84SMorten Rasmussen 
23629406415fSPeter Zijlstra 	if (rq && sched_debug_verbose) {
2363bf5015a5SJuri Lelli 		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2364f2cb1360SIngo Molnar 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2365f2cb1360SIngo Molnar 	}
2366f2cb1360SIngo Molnar 
2367f2cb1360SIngo Molnar 	ret = 0;
2368f2cb1360SIngo Molnar error:
2369f2cb1360SIngo Molnar 	__free_domain_allocs(&d, alloc_state, cpu_map);
237097fb7a0aSIngo Molnar 
2371f2cb1360SIngo Molnar 	return ret;
2372f2cb1360SIngo Molnar }
2373f2cb1360SIngo Molnar 
2374f2cb1360SIngo Molnar /* Current sched domains: */
2375f2cb1360SIngo Molnar static cpumask_var_t			*doms_cur;
2376f2cb1360SIngo Molnar 
2377f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */
2378f2cb1360SIngo Molnar static int				ndoms_cur;
2379f2cb1360SIngo Molnar 
23803b03706fSIngo Molnar /* Attributes of custom domains in 'doms_cur' */
2381f2cb1360SIngo Molnar static struct sched_domain_attr		*dattr_cur;
2382f2cb1360SIngo Molnar 
2383f2cb1360SIngo Molnar /*
2384f2cb1360SIngo Molnar  * Special case: If a kmalloc() of a doms_cur partition (array of
2385f2cb1360SIngo Molnar  * cpumask) fails, then fallback to a single sched domain,
2386f2cb1360SIngo Molnar  * as determined by the single cpumask fallback_doms.
2387f2cb1360SIngo Molnar  */
23888d5dc512SPeter Zijlstra static cpumask_var_t			fallback_doms;
2389f2cb1360SIngo Molnar 
2390f2cb1360SIngo Molnar /*
2391f2cb1360SIngo Molnar  * arch_update_cpu_topology lets virtualized architectures update the
2392f2cb1360SIngo Molnar  * CPU core maps. It is supposed to return 1 if the topology changed
2393f2cb1360SIngo Molnar  * or 0 if it stayed the same.
2394f2cb1360SIngo Molnar  */
2395f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void)
2396f2cb1360SIngo Molnar {
2397f2cb1360SIngo Molnar 	return 0;
2398f2cb1360SIngo Molnar }
2399f2cb1360SIngo Molnar 
2400f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2401f2cb1360SIngo Molnar {
2402f2cb1360SIngo Molnar 	int i;
2403f2cb1360SIngo Molnar 	cpumask_var_t *doms;
2404f2cb1360SIngo Molnar 
24056da2ec56SKees Cook 	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2406f2cb1360SIngo Molnar 	if (!doms)
2407f2cb1360SIngo Molnar 		return NULL;
2408f2cb1360SIngo Molnar 	for (i = 0; i < ndoms; i++) {
2409f2cb1360SIngo Molnar 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2410f2cb1360SIngo Molnar 			free_sched_domains(doms, i);
2411f2cb1360SIngo Molnar 			return NULL;
2412f2cb1360SIngo Molnar 		}
2413f2cb1360SIngo Molnar 	}
2414f2cb1360SIngo Molnar 	return doms;
2415f2cb1360SIngo Molnar }
2416f2cb1360SIngo Molnar 
2417f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2418f2cb1360SIngo Molnar {
2419f2cb1360SIngo Molnar 	unsigned int i;
2420f2cb1360SIngo Molnar 	for (i = 0; i < ndoms; i++)
2421f2cb1360SIngo Molnar 		free_cpumask_var(doms[i]);
2422f2cb1360SIngo Molnar 	kfree(doms);
2423f2cb1360SIngo Molnar }
2424f2cb1360SIngo Molnar 
2425f2cb1360SIngo Molnar /*
2426cb0c0414SJuri Lelli  * Set up scheduler domains and groups.  For now this just excludes isolated
2427cb0c0414SJuri Lelli  * CPUs, but could be used to exclude other special cases in the future.
2428f2cb1360SIngo Molnar  */
24298d5dc512SPeter Zijlstra int sched_init_domains(const struct cpumask *cpu_map)
2430f2cb1360SIngo Molnar {
2431f2cb1360SIngo Molnar 	int err;
2432f2cb1360SIngo Molnar 
24338d5dc512SPeter Zijlstra 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
24341676330eSPeter Zijlstra 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
24358d5dc512SPeter Zijlstra 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
24368d5dc512SPeter Zijlstra 
2437f2cb1360SIngo Molnar 	arch_update_cpu_topology();
2438c744dc4aSBeata Michalska 	asym_cpu_capacity_scan();
2439f2cb1360SIngo Molnar 	ndoms_cur = 1;
2440f2cb1360SIngo Molnar 	doms_cur = alloc_sched_domains(ndoms_cur);
2441f2cb1360SIngo Molnar 	if (!doms_cur)
2442f2cb1360SIngo Molnar 		doms_cur = &fallback_doms;
2443*04d4e665SFrederic Weisbecker 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN));
2444f2cb1360SIngo Molnar 	err = build_sched_domains(doms_cur[0], NULL);
2445f2cb1360SIngo Molnar 
2446f2cb1360SIngo Molnar 	return err;
2447f2cb1360SIngo Molnar }
2448f2cb1360SIngo Molnar 
2449f2cb1360SIngo Molnar /*
2450f2cb1360SIngo Molnar  * Detach sched domains from a group of CPUs specified in cpu_map
2451f2cb1360SIngo Molnar  * These CPUs will now be attached to the NULL domain
2452f2cb1360SIngo Molnar  */
2453f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map)
2454f2cb1360SIngo Molnar {
2455e284df70SValentin Schneider 	unsigned int cpu = cpumask_any(cpu_map);
2456f2cb1360SIngo Molnar 	int i;
2457f2cb1360SIngo Molnar 
2458e284df70SValentin Schneider 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2459e284df70SValentin Schneider 		static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2460e284df70SValentin Schneider 
2461f2cb1360SIngo Molnar 	rcu_read_lock();
2462f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map)
2463f2cb1360SIngo Molnar 		cpu_attach_domain(NULL, &def_root_domain, i);
2464f2cb1360SIngo Molnar 	rcu_read_unlock();
2465f2cb1360SIngo Molnar }
2466f2cb1360SIngo Molnar 
2467f2cb1360SIngo Molnar /* handle null as "default" */
2468f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2469f2cb1360SIngo Molnar 			struct sched_domain_attr *new, int idx_new)
2470f2cb1360SIngo Molnar {
2471f2cb1360SIngo Molnar 	struct sched_domain_attr tmp;
2472f2cb1360SIngo Molnar 
2473f2cb1360SIngo Molnar 	/* Fast path: */
2474f2cb1360SIngo Molnar 	if (!new && !cur)
2475f2cb1360SIngo Molnar 		return 1;
2476f2cb1360SIngo Molnar 
2477f2cb1360SIngo Molnar 	tmp = SD_ATTR_INIT;
247897fb7a0aSIngo Molnar 
2479f2cb1360SIngo Molnar 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2480f2cb1360SIngo Molnar 			new ? (new + idx_new) : &tmp,
2481f2cb1360SIngo Molnar 			sizeof(struct sched_domain_attr));
2482f2cb1360SIngo Molnar }
2483f2cb1360SIngo Molnar 
2484f2cb1360SIngo Molnar /*
2485f2cb1360SIngo Molnar  * Partition sched domains as specified by the 'ndoms_new'
2486f2cb1360SIngo Molnar  * cpumasks in the array doms_new[] of cpumasks. This compares
2487f2cb1360SIngo Molnar  * doms_new[] to the current sched domain partitioning, doms_cur[].
2488f2cb1360SIngo Molnar  * It destroys each deleted domain and builds each new domain.
2489f2cb1360SIngo Molnar  *
2490f2cb1360SIngo Molnar  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2491f2cb1360SIngo Molnar  * The masks don't intersect (don't overlap.) We should setup one
2492f2cb1360SIngo Molnar  * sched domain for each mask. CPUs not in any of the cpumasks will
2493f2cb1360SIngo Molnar  * not be load balanced. If the same cpumask appears both in the
2494f2cb1360SIngo Molnar  * current 'doms_cur' domains and in the new 'doms_new', we can leave
2495f2cb1360SIngo Molnar  * it as it is.
2496f2cb1360SIngo Molnar  *
2497f2cb1360SIngo Molnar  * The passed in 'doms_new' should be allocated using
2498f2cb1360SIngo Molnar  * alloc_sched_domains.  This routine takes ownership of it and will
2499f2cb1360SIngo Molnar  * free_sched_domains it when done with it. If the caller failed the
2500f2cb1360SIngo Molnar  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2501f2cb1360SIngo Molnar  * and partition_sched_domains() will fallback to the single partition
2502f2cb1360SIngo Molnar  * 'fallback_doms', it also forces the domains to be rebuilt.
2503f2cb1360SIngo Molnar  *
2504f2cb1360SIngo Molnar  * If doms_new == NULL it will be replaced with cpu_online_mask.
2505f2cb1360SIngo Molnar  * ndoms_new == 0 is a special case for destroying existing domains,
2506f2cb1360SIngo Molnar  * and it will not create the default domain.
2507f2cb1360SIngo Molnar  *
2508c22645f4SMathieu Poirier  * Call with hotplug lock and sched_domains_mutex held
2509f2cb1360SIngo Molnar  */
2510c22645f4SMathieu Poirier void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2511f2cb1360SIngo Molnar 				    struct sched_domain_attr *dattr_new)
2512f2cb1360SIngo Molnar {
25131f74de87SQuentin Perret 	bool __maybe_unused has_eas = false;
2514f2cb1360SIngo Molnar 	int i, j, n;
2515f2cb1360SIngo Molnar 	int new_topology;
2516f2cb1360SIngo Molnar 
2517c22645f4SMathieu Poirier 	lockdep_assert_held(&sched_domains_mutex);
2518f2cb1360SIngo Molnar 
2519f2cb1360SIngo Molnar 	/* Let the architecture update CPU core mappings: */
2520f2cb1360SIngo Molnar 	new_topology = arch_update_cpu_topology();
2521c744dc4aSBeata Michalska 	/* Trigger rebuilding CPU capacity asymmetry data */
2522c744dc4aSBeata Michalska 	if (new_topology)
2523c744dc4aSBeata Michalska 		asym_cpu_capacity_scan();
2524f2cb1360SIngo Molnar 
252509e0dd8eSPeter Zijlstra 	if (!doms_new) {
252609e0dd8eSPeter Zijlstra 		WARN_ON_ONCE(dattr_new);
252709e0dd8eSPeter Zijlstra 		n = 0;
252809e0dd8eSPeter Zijlstra 		doms_new = alloc_sched_domains(1);
252909e0dd8eSPeter Zijlstra 		if (doms_new) {
253009e0dd8eSPeter Zijlstra 			n = 1;
2531edb93821SFrederic Weisbecker 			cpumask_and(doms_new[0], cpu_active_mask,
2532*04d4e665SFrederic Weisbecker 				    housekeeping_cpumask(HK_TYPE_DOMAIN));
253309e0dd8eSPeter Zijlstra 		}
253409e0dd8eSPeter Zijlstra 	} else {
253509e0dd8eSPeter Zijlstra 		n = ndoms_new;
253609e0dd8eSPeter Zijlstra 	}
2537f2cb1360SIngo Molnar 
2538f2cb1360SIngo Molnar 	/* Destroy deleted domains: */
2539f2cb1360SIngo Molnar 	for (i = 0; i < ndoms_cur; i++) {
2540f2cb1360SIngo Molnar 		for (j = 0; j < n && !new_topology; j++) {
25416aa140faSQuentin Perret 			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2542f9a25f77SMathieu Poirier 			    dattrs_equal(dattr_cur, i, dattr_new, j)) {
2543f9a25f77SMathieu Poirier 				struct root_domain *rd;
2544f9a25f77SMathieu Poirier 
2545f9a25f77SMathieu Poirier 				/*
2546f9a25f77SMathieu Poirier 				 * This domain won't be destroyed and as such
2547f9a25f77SMathieu Poirier 				 * its dl_bw->total_bw needs to be cleared.  It
2548f9a25f77SMathieu Poirier 				 * will be recomputed in function
2549f9a25f77SMathieu Poirier 				 * update_tasks_root_domain().
2550f9a25f77SMathieu Poirier 				 */
2551f9a25f77SMathieu Poirier 				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2552f9a25f77SMathieu Poirier 				dl_clear_root_domain(rd);
2553f2cb1360SIngo Molnar 				goto match1;
2554f2cb1360SIngo Molnar 			}
2555f9a25f77SMathieu Poirier 		}
2556f2cb1360SIngo Molnar 		/* No match - a current sched domain not in new doms_new[] */
2557f2cb1360SIngo Molnar 		detach_destroy_domains(doms_cur[i]);
2558f2cb1360SIngo Molnar match1:
2559f2cb1360SIngo Molnar 		;
2560f2cb1360SIngo Molnar 	}
2561f2cb1360SIngo Molnar 
2562f2cb1360SIngo Molnar 	n = ndoms_cur;
256309e0dd8eSPeter Zijlstra 	if (!doms_new) {
2564f2cb1360SIngo Molnar 		n = 0;
2565f2cb1360SIngo Molnar 		doms_new = &fallback_doms;
2566edb93821SFrederic Weisbecker 		cpumask_and(doms_new[0], cpu_active_mask,
2567*04d4e665SFrederic Weisbecker 			    housekeeping_cpumask(HK_TYPE_DOMAIN));
2568f2cb1360SIngo Molnar 	}
2569f2cb1360SIngo Molnar 
2570f2cb1360SIngo Molnar 	/* Build new domains: */
2571f2cb1360SIngo Molnar 	for (i = 0; i < ndoms_new; i++) {
2572f2cb1360SIngo Molnar 		for (j = 0; j < n && !new_topology; j++) {
25736aa140faSQuentin Perret 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
25746aa140faSQuentin Perret 			    dattrs_equal(dattr_new, i, dattr_cur, j))
2575f2cb1360SIngo Molnar 				goto match2;
2576f2cb1360SIngo Molnar 		}
2577f2cb1360SIngo Molnar 		/* No match - add a new doms_new */
2578f2cb1360SIngo Molnar 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2579f2cb1360SIngo Molnar match2:
2580f2cb1360SIngo Molnar 		;
2581f2cb1360SIngo Molnar 	}
2582f2cb1360SIngo Molnar 
2583531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
25846aa140faSQuentin Perret 	/* Build perf. domains: */
25856aa140faSQuentin Perret 	for (i = 0; i < ndoms_new; i++) {
2586531b5c9fSQuentin Perret 		for (j = 0; j < n && !sched_energy_update; j++) {
25876aa140faSQuentin Perret 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
25881f74de87SQuentin Perret 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
25891f74de87SQuentin Perret 				has_eas = true;
25906aa140faSQuentin Perret 				goto match3;
25916aa140faSQuentin Perret 			}
25921f74de87SQuentin Perret 		}
25936aa140faSQuentin Perret 		/* No match - add perf. domains for a new rd */
25941f74de87SQuentin Perret 		has_eas |= build_perf_domains(doms_new[i]);
25956aa140faSQuentin Perret match3:
25966aa140faSQuentin Perret 		;
25976aa140faSQuentin Perret 	}
25981f74de87SQuentin Perret 	sched_energy_set(has_eas);
25996aa140faSQuentin Perret #endif
26006aa140faSQuentin Perret 
2601f2cb1360SIngo Molnar 	/* Remember the new sched domains: */
2602f2cb1360SIngo Molnar 	if (doms_cur != &fallback_doms)
2603f2cb1360SIngo Molnar 		free_sched_domains(doms_cur, ndoms_cur);
2604f2cb1360SIngo Molnar 
2605f2cb1360SIngo Molnar 	kfree(dattr_cur);
2606f2cb1360SIngo Molnar 	doms_cur = doms_new;
2607f2cb1360SIngo Molnar 	dattr_cur = dattr_new;
2608f2cb1360SIngo Molnar 	ndoms_cur = ndoms_new;
2609f2cb1360SIngo Molnar 
26103b87f136SPeter Zijlstra 	update_sched_domain_debugfs();
2611c22645f4SMathieu Poirier }
2612f2cb1360SIngo Molnar 
2613c22645f4SMathieu Poirier /*
2614c22645f4SMathieu Poirier  * Call with hotplug lock held
2615c22645f4SMathieu Poirier  */
2616c22645f4SMathieu Poirier void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2617c22645f4SMathieu Poirier 			     struct sched_domain_attr *dattr_new)
2618c22645f4SMathieu Poirier {
2619c22645f4SMathieu Poirier 	mutex_lock(&sched_domains_mutex);
2620c22645f4SMathieu Poirier 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2621f2cb1360SIngo Molnar 	mutex_unlock(&sched_domains_mutex);
2622f2cb1360SIngo Molnar }
2623