xref: /openbmc/linux/kernel/sched/topology.c (revision 994aeb7a93e43d28f6074195ccb03a384342e1bf)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f2cb1360SIngo Molnar /*
3f2cb1360SIngo Molnar  * Scheduler topology setup/handling methods
4f2cb1360SIngo Molnar  */
5f2cb1360SIngo Molnar #include "sched.h"
6f2cb1360SIngo Molnar 
7f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex);
8f2cb1360SIngo Molnar 
9f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */
10ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask;
11ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask2;
12f2cb1360SIngo Molnar 
13f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
14f2cb1360SIngo Molnar 
15f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str)
16f2cb1360SIngo Molnar {
179469eb01SPeter Zijlstra 	sched_debug_enabled = true;
18f2cb1360SIngo Molnar 
19f2cb1360SIngo Molnar 	return 0;
20f2cb1360SIngo Molnar }
21f2cb1360SIngo Molnar early_param("sched_debug", sched_debug_setup);
22f2cb1360SIngo Molnar 
23f2cb1360SIngo Molnar static inline bool sched_debug(void)
24f2cb1360SIngo Molnar {
25f2cb1360SIngo Molnar 	return sched_debug_enabled;
26f2cb1360SIngo Molnar }
27f2cb1360SIngo Molnar 
28f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
29f2cb1360SIngo Molnar 				  struct cpumask *groupmask)
30f2cb1360SIngo Molnar {
31f2cb1360SIngo Molnar 	struct sched_group *group = sd->groups;
32f2cb1360SIngo Molnar 
33f2cb1360SIngo Molnar 	cpumask_clear(groupmask);
34f2cb1360SIngo Molnar 
35005f874dSPeter Zijlstra 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
36f2cb1360SIngo Molnar 
37f2cb1360SIngo Molnar 	if (!(sd->flags & SD_LOAD_BALANCE)) {
38f2cb1360SIngo Molnar 		printk("does not load-balance\n");
39f2cb1360SIngo Molnar 		if (sd->parent)
4097fb7a0aSIngo Molnar 			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
41f2cb1360SIngo Molnar 		return -1;
42f2cb1360SIngo Molnar 	}
43f2cb1360SIngo Molnar 
44005f874dSPeter Zijlstra 	printk(KERN_CONT "span=%*pbl level=%s\n",
45f2cb1360SIngo Molnar 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
46f2cb1360SIngo Molnar 
47f2cb1360SIngo Molnar 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4897fb7a0aSIngo Molnar 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
49f2cb1360SIngo Molnar 	}
506cd0c583SYi Wang 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
5197fb7a0aSIngo Molnar 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
52f2cb1360SIngo Molnar 	}
53f2cb1360SIngo Molnar 
54f2cb1360SIngo Molnar 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
55f2cb1360SIngo Molnar 	do {
56f2cb1360SIngo Molnar 		if (!group) {
57f2cb1360SIngo Molnar 			printk("\n");
58f2cb1360SIngo Molnar 			printk(KERN_ERR "ERROR: group is NULL\n");
59f2cb1360SIngo Molnar 			break;
60f2cb1360SIngo Molnar 		}
61f2cb1360SIngo Molnar 
62ae4df9d6SPeter Zijlstra 		if (!cpumask_weight(sched_group_span(group))) {
63f2cb1360SIngo Molnar 			printk(KERN_CONT "\n");
64f2cb1360SIngo Molnar 			printk(KERN_ERR "ERROR: empty group\n");
65f2cb1360SIngo Molnar 			break;
66f2cb1360SIngo Molnar 		}
67f2cb1360SIngo Molnar 
68f2cb1360SIngo Molnar 		if (!(sd->flags & SD_OVERLAP) &&
69ae4df9d6SPeter Zijlstra 		    cpumask_intersects(groupmask, sched_group_span(group))) {
70f2cb1360SIngo Molnar 			printk(KERN_CONT "\n");
71f2cb1360SIngo Molnar 			printk(KERN_ERR "ERROR: repeated CPUs\n");
72f2cb1360SIngo Molnar 			break;
73f2cb1360SIngo Molnar 		}
74f2cb1360SIngo Molnar 
75ae4df9d6SPeter Zijlstra 		cpumask_or(groupmask, groupmask, sched_group_span(group));
76f2cb1360SIngo Molnar 
77005f874dSPeter Zijlstra 		printk(KERN_CONT " %d:{ span=%*pbl",
78005f874dSPeter Zijlstra 				group->sgc->id,
79ae4df9d6SPeter Zijlstra 				cpumask_pr_args(sched_group_span(group)));
80b0151c25SPeter Zijlstra 
81af218122SPeter Zijlstra 		if ((sd->flags & SD_OVERLAP) &&
82ae4df9d6SPeter Zijlstra 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
83005f874dSPeter Zijlstra 			printk(KERN_CONT " mask=%*pbl",
84e5c14b1fSPeter Zijlstra 				cpumask_pr_args(group_balance_mask(group)));
85b0151c25SPeter Zijlstra 		}
86b0151c25SPeter Zijlstra 
87005f874dSPeter Zijlstra 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
88005f874dSPeter Zijlstra 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
89f2cb1360SIngo Molnar 
90a420b063SPeter Zijlstra 		if (group == sd->groups && sd->child &&
91a420b063SPeter Zijlstra 		    !cpumask_equal(sched_domain_span(sd->child),
92ae4df9d6SPeter Zijlstra 				   sched_group_span(group))) {
93a420b063SPeter Zijlstra 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
94a420b063SPeter Zijlstra 		}
95a420b063SPeter Zijlstra 
96005f874dSPeter Zijlstra 		printk(KERN_CONT " }");
97005f874dSPeter Zijlstra 
98f2cb1360SIngo Molnar 		group = group->next;
99b0151c25SPeter Zijlstra 
100b0151c25SPeter Zijlstra 		if (group != sd->groups)
101b0151c25SPeter Zijlstra 			printk(KERN_CONT ",");
102b0151c25SPeter Zijlstra 
103f2cb1360SIngo Molnar 	} while (group != sd->groups);
104f2cb1360SIngo Molnar 	printk(KERN_CONT "\n");
105f2cb1360SIngo Molnar 
106f2cb1360SIngo Molnar 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
107f2cb1360SIngo Molnar 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
108f2cb1360SIngo Molnar 
109f2cb1360SIngo Molnar 	if (sd->parent &&
110f2cb1360SIngo Molnar 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
11197fb7a0aSIngo Molnar 		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
112f2cb1360SIngo Molnar 	return 0;
113f2cb1360SIngo Molnar }
114f2cb1360SIngo Molnar 
115f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu)
116f2cb1360SIngo Molnar {
117f2cb1360SIngo Molnar 	int level = 0;
118f2cb1360SIngo Molnar 
119f2cb1360SIngo Molnar 	if (!sched_debug_enabled)
120f2cb1360SIngo Molnar 		return;
121f2cb1360SIngo Molnar 
122f2cb1360SIngo Molnar 	if (!sd) {
123f2cb1360SIngo Molnar 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
124f2cb1360SIngo Molnar 		return;
125f2cb1360SIngo Molnar 	}
126f2cb1360SIngo Molnar 
127005f874dSPeter Zijlstra 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
128f2cb1360SIngo Molnar 
129f2cb1360SIngo Molnar 	for (;;) {
130f2cb1360SIngo Molnar 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
131f2cb1360SIngo Molnar 			break;
132f2cb1360SIngo Molnar 		level++;
133f2cb1360SIngo Molnar 		sd = sd->parent;
134f2cb1360SIngo Molnar 		if (!sd)
135f2cb1360SIngo Molnar 			break;
136f2cb1360SIngo Molnar 	}
137f2cb1360SIngo Molnar }
138f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */
139f2cb1360SIngo Molnar 
140f2cb1360SIngo Molnar # define sched_debug_enabled 0
141f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0)
142f2cb1360SIngo Molnar static inline bool sched_debug(void)
143f2cb1360SIngo Molnar {
144f2cb1360SIngo Molnar 	return false;
145f2cb1360SIngo Molnar }
146f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */
147f2cb1360SIngo Molnar 
148f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd)
149f2cb1360SIngo Molnar {
150f2cb1360SIngo Molnar 	if (cpumask_weight(sched_domain_span(sd)) == 1)
151f2cb1360SIngo Molnar 		return 1;
152f2cb1360SIngo Molnar 
153f2cb1360SIngo Molnar 	/* Following flags need at least 2 groups */
154f2cb1360SIngo Molnar 	if (sd->flags & (SD_LOAD_BALANCE |
155f2cb1360SIngo Molnar 			 SD_BALANCE_NEWIDLE |
156f2cb1360SIngo Molnar 			 SD_BALANCE_FORK |
157f2cb1360SIngo Molnar 			 SD_BALANCE_EXEC |
158f2cb1360SIngo Molnar 			 SD_SHARE_CPUCAPACITY |
159f2cb1360SIngo Molnar 			 SD_ASYM_CPUCAPACITY |
160f2cb1360SIngo Molnar 			 SD_SHARE_PKG_RESOURCES |
161f2cb1360SIngo Molnar 			 SD_SHARE_POWERDOMAIN)) {
162f2cb1360SIngo Molnar 		if (sd->groups != sd->groups->next)
163f2cb1360SIngo Molnar 			return 0;
164f2cb1360SIngo Molnar 	}
165f2cb1360SIngo Molnar 
166f2cb1360SIngo Molnar 	/* Following flags don't use groups */
167f2cb1360SIngo Molnar 	if (sd->flags & (SD_WAKE_AFFINE))
168f2cb1360SIngo Molnar 		return 0;
169f2cb1360SIngo Molnar 
170f2cb1360SIngo Molnar 	return 1;
171f2cb1360SIngo Molnar }
172f2cb1360SIngo Molnar 
173f2cb1360SIngo Molnar static int
174f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
175f2cb1360SIngo Molnar {
176f2cb1360SIngo Molnar 	unsigned long cflags = sd->flags, pflags = parent->flags;
177f2cb1360SIngo Molnar 
178f2cb1360SIngo Molnar 	if (sd_degenerate(parent))
179f2cb1360SIngo Molnar 		return 1;
180f2cb1360SIngo Molnar 
181f2cb1360SIngo Molnar 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
182f2cb1360SIngo Molnar 		return 0;
183f2cb1360SIngo Molnar 
184f2cb1360SIngo Molnar 	/* Flags needing groups don't count if only 1 group in parent */
185f2cb1360SIngo Molnar 	if (parent->groups == parent->groups->next) {
186f2cb1360SIngo Molnar 		pflags &= ~(SD_LOAD_BALANCE |
187f2cb1360SIngo Molnar 				SD_BALANCE_NEWIDLE |
188f2cb1360SIngo Molnar 				SD_BALANCE_FORK |
189f2cb1360SIngo Molnar 				SD_BALANCE_EXEC |
190f2cb1360SIngo Molnar 				SD_ASYM_CPUCAPACITY |
191f2cb1360SIngo Molnar 				SD_SHARE_CPUCAPACITY |
192f2cb1360SIngo Molnar 				SD_SHARE_PKG_RESOURCES |
193f2cb1360SIngo Molnar 				SD_PREFER_SIBLING |
194f2cb1360SIngo Molnar 				SD_SHARE_POWERDOMAIN);
195f2cb1360SIngo Molnar 		if (nr_node_ids == 1)
196f2cb1360SIngo Molnar 			pflags &= ~SD_SERIALIZE;
197f2cb1360SIngo Molnar 	}
198f2cb1360SIngo Molnar 	if (~cflags & pflags)
199f2cb1360SIngo Molnar 		return 0;
200f2cb1360SIngo Molnar 
201f2cb1360SIngo Molnar 	return 1;
202f2cb1360SIngo Molnar }
203f2cb1360SIngo Molnar 
204531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
205f8a696f2SPeter Zijlstra DEFINE_STATIC_KEY_FALSE(sched_energy_present);
2068d5d0cfbSQuentin Perret unsigned int sysctl_sched_energy_aware = 1;
207531b5c9fSQuentin Perret DEFINE_MUTEX(sched_energy_mutex);
208531b5c9fSQuentin Perret bool sched_energy_update;
209531b5c9fSQuentin Perret 
2108d5d0cfbSQuentin Perret #ifdef CONFIG_PROC_SYSCTL
2118d5d0cfbSQuentin Perret int sched_energy_aware_handler(struct ctl_table *table, int write,
2128d5d0cfbSQuentin Perret 			 void __user *buffer, size_t *lenp, loff_t *ppos)
2138d5d0cfbSQuentin Perret {
2148d5d0cfbSQuentin Perret 	int ret, state;
2158d5d0cfbSQuentin Perret 
2168d5d0cfbSQuentin Perret 	if (write && !capable(CAP_SYS_ADMIN))
2178d5d0cfbSQuentin Perret 		return -EPERM;
2188d5d0cfbSQuentin Perret 
2198d5d0cfbSQuentin Perret 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2208d5d0cfbSQuentin Perret 	if (!ret && write) {
2218d5d0cfbSQuentin Perret 		state = static_branch_unlikely(&sched_energy_present);
2228d5d0cfbSQuentin Perret 		if (state != sysctl_sched_energy_aware) {
2238d5d0cfbSQuentin Perret 			mutex_lock(&sched_energy_mutex);
2248d5d0cfbSQuentin Perret 			sched_energy_update = 1;
2258d5d0cfbSQuentin Perret 			rebuild_sched_domains();
2268d5d0cfbSQuentin Perret 			sched_energy_update = 0;
2278d5d0cfbSQuentin Perret 			mutex_unlock(&sched_energy_mutex);
2288d5d0cfbSQuentin Perret 		}
2298d5d0cfbSQuentin Perret 	}
2308d5d0cfbSQuentin Perret 
2318d5d0cfbSQuentin Perret 	return ret;
2328d5d0cfbSQuentin Perret }
2338d5d0cfbSQuentin Perret #endif
2348d5d0cfbSQuentin Perret 
2356aa140faSQuentin Perret static void free_pd(struct perf_domain *pd)
2366aa140faSQuentin Perret {
2376aa140faSQuentin Perret 	struct perf_domain *tmp;
2386aa140faSQuentin Perret 
2396aa140faSQuentin Perret 	while (pd) {
2406aa140faSQuentin Perret 		tmp = pd->next;
2416aa140faSQuentin Perret 		kfree(pd);
2426aa140faSQuentin Perret 		pd = tmp;
2436aa140faSQuentin Perret 	}
2446aa140faSQuentin Perret }
2456aa140faSQuentin Perret 
2466aa140faSQuentin Perret static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
2476aa140faSQuentin Perret {
2486aa140faSQuentin Perret 	while (pd) {
2496aa140faSQuentin Perret 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
2506aa140faSQuentin Perret 			return pd;
2516aa140faSQuentin Perret 		pd = pd->next;
2526aa140faSQuentin Perret 	}
2536aa140faSQuentin Perret 
2546aa140faSQuentin Perret 	return NULL;
2556aa140faSQuentin Perret }
2566aa140faSQuentin Perret 
2576aa140faSQuentin Perret static struct perf_domain *pd_init(int cpu)
2586aa140faSQuentin Perret {
2596aa140faSQuentin Perret 	struct em_perf_domain *obj = em_cpu_get(cpu);
2606aa140faSQuentin Perret 	struct perf_domain *pd;
2616aa140faSQuentin Perret 
2626aa140faSQuentin Perret 	if (!obj) {
2636aa140faSQuentin Perret 		if (sched_debug())
2646aa140faSQuentin Perret 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
2656aa140faSQuentin Perret 		return NULL;
2666aa140faSQuentin Perret 	}
2676aa140faSQuentin Perret 
2686aa140faSQuentin Perret 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
2696aa140faSQuentin Perret 	if (!pd)
2706aa140faSQuentin Perret 		return NULL;
2716aa140faSQuentin Perret 	pd->em_pd = obj;
2726aa140faSQuentin Perret 
2736aa140faSQuentin Perret 	return pd;
2746aa140faSQuentin Perret }
2756aa140faSQuentin Perret 
2766aa140faSQuentin Perret static void perf_domain_debug(const struct cpumask *cpu_map,
2776aa140faSQuentin Perret 						struct perf_domain *pd)
2786aa140faSQuentin Perret {
2796aa140faSQuentin Perret 	if (!sched_debug() || !pd)
2806aa140faSQuentin Perret 		return;
2816aa140faSQuentin Perret 
2826aa140faSQuentin Perret 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
2836aa140faSQuentin Perret 
2846aa140faSQuentin Perret 	while (pd) {
2856aa140faSQuentin Perret 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
2866aa140faSQuentin Perret 				cpumask_first(perf_domain_span(pd)),
2876aa140faSQuentin Perret 				cpumask_pr_args(perf_domain_span(pd)),
2886aa140faSQuentin Perret 				em_pd_nr_cap_states(pd->em_pd));
2896aa140faSQuentin Perret 		pd = pd->next;
2906aa140faSQuentin Perret 	}
2916aa140faSQuentin Perret 
2926aa140faSQuentin Perret 	printk(KERN_CONT "\n");
2936aa140faSQuentin Perret }
2946aa140faSQuentin Perret 
2956aa140faSQuentin Perret static void destroy_perf_domain_rcu(struct rcu_head *rp)
2966aa140faSQuentin Perret {
2976aa140faSQuentin Perret 	struct perf_domain *pd;
2986aa140faSQuentin Perret 
2996aa140faSQuentin Perret 	pd = container_of(rp, struct perf_domain, rcu);
3006aa140faSQuentin Perret 	free_pd(pd);
3016aa140faSQuentin Perret }
3026aa140faSQuentin Perret 
3031f74de87SQuentin Perret static void sched_energy_set(bool has_eas)
3041f74de87SQuentin Perret {
3051f74de87SQuentin Perret 	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
3061f74de87SQuentin Perret 		if (sched_debug())
3071f74de87SQuentin Perret 			pr_info("%s: stopping EAS\n", __func__);
3081f74de87SQuentin Perret 		static_branch_disable_cpuslocked(&sched_energy_present);
3091f74de87SQuentin Perret 	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
3101f74de87SQuentin Perret 		if (sched_debug())
3111f74de87SQuentin Perret 			pr_info("%s: starting EAS\n", __func__);
3121f74de87SQuentin Perret 		static_branch_enable_cpuslocked(&sched_energy_present);
3131f74de87SQuentin Perret 	}
3141f74de87SQuentin Perret }
3151f74de87SQuentin Perret 
316b68a4c0dSQuentin Perret /*
317b68a4c0dSQuentin Perret  * EAS can be used on a root domain if it meets all the following conditions:
318b68a4c0dSQuentin Perret  *    1. an Energy Model (EM) is available;
319b68a4c0dSQuentin Perret  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
320b68a4c0dSQuentin Perret  *    3. the EM complexity is low enough to keep scheduling overheads low;
321531b5c9fSQuentin Perret  *    4. schedutil is driving the frequency of all CPUs of the rd;
322b68a4c0dSQuentin Perret  *
323b68a4c0dSQuentin Perret  * The complexity of the Energy Model is defined as:
324b68a4c0dSQuentin Perret  *
325b68a4c0dSQuentin Perret  *              C = nr_pd * (nr_cpus + nr_cs)
326b68a4c0dSQuentin Perret  *
327b68a4c0dSQuentin Perret  * with parameters defined as:
328b68a4c0dSQuentin Perret  *  - nr_pd:    the number of performance domains
329b68a4c0dSQuentin Perret  *  - nr_cpus:  the number of CPUs
330b68a4c0dSQuentin Perret  *  - nr_cs:    the sum of the number of capacity states of all performance
331b68a4c0dSQuentin Perret  *              domains (for example, on a system with 2 performance domains,
332b68a4c0dSQuentin Perret  *              with 10 capacity states each, nr_cs = 2 * 10 = 20).
333b68a4c0dSQuentin Perret  *
334b68a4c0dSQuentin Perret  * It is generally not a good idea to use such a model in the wake-up path on
335b68a4c0dSQuentin Perret  * very complex platforms because of the associated scheduling overheads. The
336b68a4c0dSQuentin Perret  * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
337b68a4c0dSQuentin Perret  * with per-CPU DVFS and less than 8 capacity states each, for example.
338b68a4c0dSQuentin Perret  */
339b68a4c0dSQuentin Perret #define EM_MAX_COMPLEXITY 2048
340b68a4c0dSQuentin Perret 
341531b5c9fSQuentin Perret extern struct cpufreq_governor schedutil_gov;
3421f74de87SQuentin Perret static bool build_perf_domains(const struct cpumask *cpu_map)
3436aa140faSQuentin Perret {
344b68a4c0dSQuentin Perret 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
3456aa140faSQuentin Perret 	struct perf_domain *pd = NULL, *tmp;
3466aa140faSQuentin Perret 	int cpu = cpumask_first(cpu_map);
3476aa140faSQuentin Perret 	struct root_domain *rd = cpu_rq(cpu)->rd;
348531b5c9fSQuentin Perret 	struct cpufreq_policy *policy;
349531b5c9fSQuentin Perret 	struct cpufreq_governor *gov;
350b68a4c0dSQuentin Perret 
3518d5d0cfbSQuentin Perret 	if (!sysctl_sched_energy_aware)
3528d5d0cfbSQuentin Perret 		goto free;
3538d5d0cfbSQuentin Perret 
354b68a4c0dSQuentin Perret 	/* EAS is enabled for asymmetric CPU capacity topologies. */
355b68a4c0dSQuentin Perret 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
356b68a4c0dSQuentin Perret 		if (sched_debug()) {
357b68a4c0dSQuentin Perret 			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
358b68a4c0dSQuentin Perret 					cpumask_pr_args(cpu_map));
359b68a4c0dSQuentin Perret 		}
360b68a4c0dSQuentin Perret 		goto free;
361b68a4c0dSQuentin Perret 	}
3626aa140faSQuentin Perret 
3636aa140faSQuentin Perret 	for_each_cpu(i, cpu_map) {
3646aa140faSQuentin Perret 		/* Skip already covered CPUs. */
3656aa140faSQuentin Perret 		if (find_pd(pd, i))
3666aa140faSQuentin Perret 			continue;
3676aa140faSQuentin Perret 
368531b5c9fSQuentin Perret 		/* Do not attempt EAS if schedutil is not being used. */
369531b5c9fSQuentin Perret 		policy = cpufreq_cpu_get(i);
370531b5c9fSQuentin Perret 		if (!policy)
371531b5c9fSQuentin Perret 			goto free;
372531b5c9fSQuentin Perret 		gov = policy->governor;
373531b5c9fSQuentin Perret 		cpufreq_cpu_put(policy);
374531b5c9fSQuentin Perret 		if (gov != &schedutil_gov) {
375531b5c9fSQuentin Perret 			if (rd->pd)
376531b5c9fSQuentin Perret 				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
377531b5c9fSQuentin Perret 						cpumask_pr_args(cpu_map));
378531b5c9fSQuentin Perret 			goto free;
379531b5c9fSQuentin Perret 		}
380531b5c9fSQuentin Perret 
3816aa140faSQuentin Perret 		/* Create the new pd and add it to the local list. */
3826aa140faSQuentin Perret 		tmp = pd_init(i);
3836aa140faSQuentin Perret 		if (!tmp)
3846aa140faSQuentin Perret 			goto free;
3856aa140faSQuentin Perret 		tmp->next = pd;
3866aa140faSQuentin Perret 		pd = tmp;
387b68a4c0dSQuentin Perret 
388b68a4c0dSQuentin Perret 		/*
389b68a4c0dSQuentin Perret 		 * Count performance domains and capacity states for the
390b68a4c0dSQuentin Perret 		 * complexity check.
391b68a4c0dSQuentin Perret 		 */
392b68a4c0dSQuentin Perret 		nr_pd++;
393b68a4c0dSQuentin Perret 		nr_cs += em_pd_nr_cap_states(pd->em_pd);
394b68a4c0dSQuentin Perret 	}
395b68a4c0dSQuentin Perret 
396b68a4c0dSQuentin Perret 	/* Bail out if the Energy Model complexity is too high. */
397b68a4c0dSQuentin Perret 	if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) {
398b68a4c0dSQuentin Perret 		WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
399b68a4c0dSQuentin Perret 						cpumask_pr_args(cpu_map));
400b68a4c0dSQuentin Perret 		goto free;
4016aa140faSQuentin Perret 	}
4026aa140faSQuentin Perret 
4036aa140faSQuentin Perret 	perf_domain_debug(cpu_map, pd);
4046aa140faSQuentin Perret 
4056aa140faSQuentin Perret 	/* Attach the new list of performance domains to the root domain. */
4066aa140faSQuentin Perret 	tmp = rd->pd;
4076aa140faSQuentin Perret 	rcu_assign_pointer(rd->pd, pd);
4086aa140faSQuentin Perret 	if (tmp)
4096aa140faSQuentin Perret 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
4106aa140faSQuentin Perret 
4111f74de87SQuentin Perret 	return !!pd;
4126aa140faSQuentin Perret 
4136aa140faSQuentin Perret free:
4146aa140faSQuentin Perret 	free_pd(pd);
4156aa140faSQuentin Perret 	tmp = rd->pd;
4166aa140faSQuentin Perret 	rcu_assign_pointer(rd->pd, NULL);
4176aa140faSQuentin Perret 	if (tmp)
4186aa140faSQuentin Perret 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
4191f74de87SQuentin Perret 
4201f74de87SQuentin Perret 	return false;
4216aa140faSQuentin Perret }
4226aa140faSQuentin Perret #else
4236aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) { }
424531b5c9fSQuentin Perret #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
4256aa140faSQuentin Perret 
426f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu)
427f2cb1360SIngo Molnar {
428f2cb1360SIngo Molnar 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
429f2cb1360SIngo Molnar 
430f2cb1360SIngo Molnar 	cpupri_cleanup(&rd->cpupri);
431f2cb1360SIngo Molnar 	cpudl_cleanup(&rd->cpudl);
432f2cb1360SIngo Molnar 	free_cpumask_var(rd->dlo_mask);
433f2cb1360SIngo Molnar 	free_cpumask_var(rd->rto_mask);
434f2cb1360SIngo Molnar 	free_cpumask_var(rd->online);
435f2cb1360SIngo Molnar 	free_cpumask_var(rd->span);
4366aa140faSQuentin Perret 	free_pd(rd->pd);
437f2cb1360SIngo Molnar 	kfree(rd);
438f2cb1360SIngo Molnar }
439f2cb1360SIngo Molnar 
440f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd)
441f2cb1360SIngo Molnar {
442f2cb1360SIngo Molnar 	struct root_domain *old_rd = NULL;
443f2cb1360SIngo Molnar 	unsigned long flags;
444f2cb1360SIngo Molnar 
445f2cb1360SIngo Molnar 	raw_spin_lock_irqsave(&rq->lock, flags);
446f2cb1360SIngo Molnar 
447f2cb1360SIngo Molnar 	if (rq->rd) {
448f2cb1360SIngo Molnar 		old_rd = rq->rd;
449f2cb1360SIngo Molnar 
450f2cb1360SIngo Molnar 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
451f2cb1360SIngo Molnar 			set_rq_offline(rq);
452f2cb1360SIngo Molnar 
453f2cb1360SIngo Molnar 		cpumask_clear_cpu(rq->cpu, old_rd->span);
454f2cb1360SIngo Molnar 
455f2cb1360SIngo Molnar 		/*
456f2cb1360SIngo Molnar 		 * If we dont want to free the old_rd yet then
457f2cb1360SIngo Molnar 		 * set old_rd to NULL to skip the freeing later
458f2cb1360SIngo Molnar 		 * in this function:
459f2cb1360SIngo Molnar 		 */
460f2cb1360SIngo Molnar 		if (!atomic_dec_and_test(&old_rd->refcount))
461f2cb1360SIngo Molnar 			old_rd = NULL;
462f2cb1360SIngo Molnar 	}
463f2cb1360SIngo Molnar 
464f2cb1360SIngo Molnar 	atomic_inc(&rd->refcount);
465f2cb1360SIngo Molnar 	rq->rd = rd;
466f2cb1360SIngo Molnar 
467f2cb1360SIngo Molnar 	cpumask_set_cpu(rq->cpu, rd->span);
468f2cb1360SIngo Molnar 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
469f2cb1360SIngo Molnar 		set_rq_online(rq);
470f2cb1360SIngo Molnar 
471f2cb1360SIngo Molnar 	raw_spin_unlock_irqrestore(&rq->lock, flags);
472f2cb1360SIngo Molnar 
473f2cb1360SIngo Molnar 	if (old_rd)
474337e9b07SPaul E. McKenney 		call_rcu(&old_rd->rcu, free_rootdomain);
475f2cb1360SIngo Molnar }
476f2cb1360SIngo Molnar 
477364f5665SSteven Rostedt (VMware) void sched_get_rd(struct root_domain *rd)
478364f5665SSteven Rostedt (VMware) {
479364f5665SSteven Rostedt (VMware) 	atomic_inc(&rd->refcount);
480364f5665SSteven Rostedt (VMware) }
481364f5665SSteven Rostedt (VMware) 
482364f5665SSteven Rostedt (VMware) void sched_put_rd(struct root_domain *rd)
483364f5665SSteven Rostedt (VMware) {
484364f5665SSteven Rostedt (VMware) 	if (!atomic_dec_and_test(&rd->refcount))
485364f5665SSteven Rostedt (VMware) 		return;
486364f5665SSteven Rostedt (VMware) 
487337e9b07SPaul E. McKenney 	call_rcu(&rd->rcu, free_rootdomain);
488364f5665SSteven Rostedt (VMware) }
489364f5665SSteven Rostedt (VMware) 
490f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd)
491f2cb1360SIngo Molnar {
492f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
493f2cb1360SIngo Molnar 		goto out;
494f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
495f2cb1360SIngo Molnar 		goto free_span;
496f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
497f2cb1360SIngo Molnar 		goto free_online;
498f2cb1360SIngo Molnar 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
499f2cb1360SIngo Molnar 		goto free_dlo_mask;
500f2cb1360SIngo Molnar 
5014bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI
5024bdced5cSSteven Rostedt (Red Hat) 	rd->rto_cpu = -1;
5034bdced5cSSteven Rostedt (Red Hat) 	raw_spin_lock_init(&rd->rto_lock);
5044bdced5cSSteven Rostedt (Red Hat) 	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
5054bdced5cSSteven Rostedt (Red Hat) #endif
5064bdced5cSSteven Rostedt (Red Hat) 
507f2cb1360SIngo Molnar 	init_dl_bw(&rd->dl_bw);
508f2cb1360SIngo Molnar 	if (cpudl_init(&rd->cpudl) != 0)
509f2cb1360SIngo Molnar 		goto free_rto_mask;
510f2cb1360SIngo Molnar 
511f2cb1360SIngo Molnar 	if (cpupri_init(&rd->cpupri) != 0)
512f2cb1360SIngo Molnar 		goto free_cpudl;
513f2cb1360SIngo Molnar 	return 0;
514f2cb1360SIngo Molnar 
515f2cb1360SIngo Molnar free_cpudl:
516f2cb1360SIngo Molnar 	cpudl_cleanup(&rd->cpudl);
517f2cb1360SIngo Molnar free_rto_mask:
518f2cb1360SIngo Molnar 	free_cpumask_var(rd->rto_mask);
519f2cb1360SIngo Molnar free_dlo_mask:
520f2cb1360SIngo Molnar 	free_cpumask_var(rd->dlo_mask);
521f2cb1360SIngo Molnar free_online:
522f2cb1360SIngo Molnar 	free_cpumask_var(rd->online);
523f2cb1360SIngo Molnar free_span:
524f2cb1360SIngo Molnar 	free_cpumask_var(rd->span);
525f2cb1360SIngo Molnar out:
526f2cb1360SIngo Molnar 	return -ENOMEM;
527f2cb1360SIngo Molnar }
528f2cb1360SIngo Molnar 
529f2cb1360SIngo Molnar /*
530f2cb1360SIngo Molnar  * By default the system creates a single root-domain with all CPUs as
531f2cb1360SIngo Molnar  * members (mimicking the global state we have today).
532f2cb1360SIngo Molnar  */
533f2cb1360SIngo Molnar struct root_domain def_root_domain;
534f2cb1360SIngo Molnar 
535f2cb1360SIngo Molnar void init_defrootdomain(void)
536f2cb1360SIngo Molnar {
537f2cb1360SIngo Molnar 	init_rootdomain(&def_root_domain);
538f2cb1360SIngo Molnar 
539f2cb1360SIngo Molnar 	atomic_set(&def_root_domain.refcount, 1);
540f2cb1360SIngo Molnar }
541f2cb1360SIngo Molnar 
542f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void)
543f2cb1360SIngo Molnar {
544f2cb1360SIngo Molnar 	struct root_domain *rd;
545f2cb1360SIngo Molnar 
5464d13a06dSViresh Kumar 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
547f2cb1360SIngo Molnar 	if (!rd)
548f2cb1360SIngo Molnar 		return NULL;
549f2cb1360SIngo Molnar 
550f2cb1360SIngo Molnar 	if (init_rootdomain(rd) != 0) {
551f2cb1360SIngo Molnar 		kfree(rd);
552f2cb1360SIngo Molnar 		return NULL;
553f2cb1360SIngo Molnar 	}
554f2cb1360SIngo Molnar 
555f2cb1360SIngo Molnar 	return rd;
556f2cb1360SIngo Molnar }
557f2cb1360SIngo Molnar 
558f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc)
559f2cb1360SIngo Molnar {
560f2cb1360SIngo Molnar 	struct sched_group *tmp, *first;
561f2cb1360SIngo Molnar 
562f2cb1360SIngo Molnar 	if (!sg)
563f2cb1360SIngo Molnar 		return;
564f2cb1360SIngo Molnar 
565f2cb1360SIngo Molnar 	first = sg;
566f2cb1360SIngo Molnar 	do {
567f2cb1360SIngo Molnar 		tmp = sg->next;
568f2cb1360SIngo Molnar 
569f2cb1360SIngo Molnar 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
570f2cb1360SIngo Molnar 			kfree(sg->sgc);
571f2cb1360SIngo Molnar 
572213c5a45SShu Wang 		if (atomic_dec_and_test(&sg->ref))
573f2cb1360SIngo Molnar 			kfree(sg);
574f2cb1360SIngo Molnar 		sg = tmp;
575f2cb1360SIngo Molnar 	} while (sg != first);
576f2cb1360SIngo Molnar }
577f2cb1360SIngo Molnar 
578f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd)
579f2cb1360SIngo Molnar {
580f2cb1360SIngo Molnar 	/*
581a090c4f2SPeter Zijlstra 	 * A normal sched domain may have multiple group references, an
582a090c4f2SPeter Zijlstra 	 * overlapping domain, having private groups, only one.  Iterate,
583a090c4f2SPeter Zijlstra 	 * dropping group/capacity references, freeing where none remain.
584f2cb1360SIngo Molnar 	 */
585f2cb1360SIngo Molnar 	free_sched_groups(sd->groups, 1);
586213c5a45SShu Wang 
587f2cb1360SIngo Molnar 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
588f2cb1360SIngo Molnar 		kfree(sd->shared);
589f2cb1360SIngo Molnar 	kfree(sd);
590f2cb1360SIngo Molnar }
591f2cb1360SIngo Molnar 
592f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu)
593f2cb1360SIngo Molnar {
594f2cb1360SIngo Molnar 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
595f2cb1360SIngo Molnar 
596f2cb1360SIngo Molnar 	while (sd) {
597f2cb1360SIngo Molnar 		struct sched_domain *parent = sd->parent;
598f2cb1360SIngo Molnar 		destroy_sched_domain(sd);
599f2cb1360SIngo Molnar 		sd = parent;
600f2cb1360SIngo Molnar 	}
601f2cb1360SIngo Molnar }
602f2cb1360SIngo Molnar 
603f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd)
604f2cb1360SIngo Molnar {
605f2cb1360SIngo Molnar 	if (sd)
606f2cb1360SIngo Molnar 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
607f2cb1360SIngo Molnar }
608f2cb1360SIngo Molnar 
609f2cb1360SIngo Molnar /*
610f2cb1360SIngo Molnar  * Keep a special pointer to the highest sched_domain that has
611f2cb1360SIngo Molnar  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
612f2cb1360SIngo Molnar  * allows us to avoid some pointer chasing select_idle_sibling().
613f2cb1360SIngo Molnar  *
614f2cb1360SIngo Molnar  * Also keep a unique ID per domain (we use the first CPU number in
615f2cb1360SIngo Molnar  * the cpumask of the domain), this allows us to quickly tell if
616f2cb1360SIngo Molnar  * two CPUs are in the same cache domain, see cpus_share_cache().
617f2cb1360SIngo Molnar  */
618*994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
619f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size);
620f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id);
621*994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
622*994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
623*994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
624*994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
625df054e84SMorten Rasmussen DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
626f2cb1360SIngo Molnar 
627f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu)
628f2cb1360SIngo Molnar {
629f2cb1360SIngo Molnar 	struct sched_domain_shared *sds = NULL;
630f2cb1360SIngo Molnar 	struct sched_domain *sd;
631f2cb1360SIngo Molnar 	int id = cpu;
632f2cb1360SIngo Molnar 	int size = 1;
633f2cb1360SIngo Molnar 
634f2cb1360SIngo Molnar 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
635f2cb1360SIngo Molnar 	if (sd) {
636f2cb1360SIngo Molnar 		id = cpumask_first(sched_domain_span(sd));
637f2cb1360SIngo Molnar 		size = cpumask_weight(sched_domain_span(sd));
638f2cb1360SIngo Molnar 		sds = sd->shared;
639f2cb1360SIngo Molnar 	}
640f2cb1360SIngo Molnar 
641f2cb1360SIngo Molnar 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
642f2cb1360SIngo Molnar 	per_cpu(sd_llc_size, cpu) = size;
643f2cb1360SIngo Molnar 	per_cpu(sd_llc_id, cpu) = id;
644f2cb1360SIngo Molnar 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
645f2cb1360SIngo Molnar 
646f2cb1360SIngo Molnar 	sd = lowest_flag_domain(cpu, SD_NUMA);
647f2cb1360SIngo Molnar 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
648f2cb1360SIngo Molnar 
649f2cb1360SIngo Molnar 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
650011b27bbSQuentin Perret 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
651011b27bbSQuentin Perret 
652011b27bbSQuentin Perret 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
653011b27bbSQuentin Perret 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
654f2cb1360SIngo Molnar }
655f2cb1360SIngo Molnar 
656f2cb1360SIngo Molnar /*
657f2cb1360SIngo Molnar  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
658f2cb1360SIngo Molnar  * hold the hotplug lock.
659f2cb1360SIngo Molnar  */
660f2cb1360SIngo Molnar static void
661f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
662f2cb1360SIngo Molnar {
663f2cb1360SIngo Molnar 	struct rq *rq = cpu_rq(cpu);
664f2cb1360SIngo Molnar 	struct sched_domain *tmp;
665f2cb1360SIngo Molnar 
666f2cb1360SIngo Molnar 	/* Remove the sched domains which do not contribute to scheduling. */
667f2cb1360SIngo Molnar 	for (tmp = sd; tmp; ) {
668f2cb1360SIngo Molnar 		struct sched_domain *parent = tmp->parent;
669f2cb1360SIngo Molnar 		if (!parent)
670f2cb1360SIngo Molnar 			break;
671f2cb1360SIngo Molnar 
672f2cb1360SIngo Molnar 		if (sd_parent_degenerate(tmp, parent)) {
673f2cb1360SIngo Molnar 			tmp->parent = parent->parent;
674f2cb1360SIngo Molnar 			if (parent->parent)
675f2cb1360SIngo Molnar 				parent->parent->child = tmp;
676f2cb1360SIngo Molnar 			/*
677f2cb1360SIngo Molnar 			 * Transfer SD_PREFER_SIBLING down in case of a
678f2cb1360SIngo Molnar 			 * degenerate parent; the spans match for this
679f2cb1360SIngo Molnar 			 * so the property transfers.
680f2cb1360SIngo Molnar 			 */
681f2cb1360SIngo Molnar 			if (parent->flags & SD_PREFER_SIBLING)
682f2cb1360SIngo Molnar 				tmp->flags |= SD_PREFER_SIBLING;
683f2cb1360SIngo Molnar 			destroy_sched_domain(parent);
684f2cb1360SIngo Molnar 		} else
685f2cb1360SIngo Molnar 			tmp = tmp->parent;
686f2cb1360SIngo Molnar 	}
687f2cb1360SIngo Molnar 
688f2cb1360SIngo Molnar 	if (sd && sd_degenerate(sd)) {
689f2cb1360SIngo Molnar 		tmp = sd;
690f2cb1360SIngo Molnar 		sd = sd->parent;
691f2cb1360SIngo Molnar 		destroy_sched_domain(tmp);
692f2cb1360SIngo Molnar 		if (sd)
693f2cb1360SIngo Molnar 			sd->child = NULL;
694f2cb1360SIngo Molnar 	}
695f2cb1360SIngo Molnar 
696f2cb1360SIngo Molnar 	sched_domain_debug(sd, cpu);
697f2cb1360SIngo Molnar 
698f2cb1360SIngo Molnar 	rq_attach_root(rq, rd);
699f2cb1360SIngo Molnar 	tmp = rq->sd;
700f2cb1360SIngo Molnar 	rcu_assign_pointer(rq->sd, sd);
701bbdacdfeSPeter Zijlstra 	dirty_sched_domain_sysctl(cpu);
702f2cb1360SIngo Molnar 	destroy_sched_domains(tmp);
703f2cb1360SIngo Molnar 
704f2cb1360SIngo Molnar 	update_top_cache_domain(cpu);
705f2cb1360SIngo Molnar }
706f2cb1360SIngo Molnar 
707f2cb1360SIngo Molnar struct s_data {
70899687cdbSLuc Van Oostenryck 	struct sched_domain * __percpu *sd;
709f2cb1360SIngo Molnar 	struct root_domain	*rd;
710f2cb1360SIngo Molnar };
711f2cb1360SIngo Molnar 
712f2cb1360SIngo Molnar enum s_alloc {
713f2cb1360SIngo Molnar 	sa_rootdomain,
714f2cb1360SIngo Molnar 	sa_sd,
715f2cb1360SIngo Molnar 	sa_sd_storage,
716f2cb1360SIngo Molnar 	sa_none,
717f2cb1360SIngo Molnar };
718f2cb1360SIngo Molnar 
719f2cb1360SIngo Molnar /*
72035a566e6SPeter Zijlstra  * Return the canonical balance CPU for this group, this is the first CPU
721e5c14b1fSPeter Zijlstra  * of this group that's also in the balance mask.
72235a566e6SPeter Zijlstra  *
723e5c14b1fSPeter Zijlstra  * The balance mask are all those CPUs that could actually end up at this
724e5c14b1fSPeter Zijlstra  * group. See build_balance_mask().
72535a566e6SPeter Zijlstra  *
72635a566e6SPeter Zijlstra  * Also see should_we_balance().
72735a566e6SPeter Zijlstra  */
72835a566e6SPeter Zijlstra int group_balance_cpu(struct sched_group *sg)
72935a566e6SPeter Zijlstra {
730e5c14b1fSPeter Zijlstra 	return cpumask_first(group_balance_mask(sg));
73135a566e6SPeter Zijlstra }
73235a566e6SPeter Zijlstra 
73335a566e6SPeter Zijlstra 
73435a566e6SPeter Zijlstra /*
73535a566e6SPeter Zijlstra  * NUMA topology (first read the regular topology blurb below)
73635a566e6SPeter Zijlstra  *
73735a566e6SPeter Zijlstra  * Given a node-distance table, for example:
73835a566e6SPeter Zijlstra  *
73935a566e6SPeter Zijlstra  *   node   0   1   2   3
74035a566e6SPeter Zijlstra  *     0:  10  20  30  20
74135a566e6SPeter Zijlstra  *     1:  20  10  20  30
74235a566e6SPeter Zijlstra  *     2:  30  20  10  20
74335a566e6SPeter Zijlstra  *     3:  20  30  20  10
74435a566e6SPeter Zijlstra  *
74535a566e6SPeter Zijlstra  * which represents a 4 node ring topology like:
74635a566e6SPeter Zijlstra  *
74735a566e6SPeter Zijlstra  *   0 ----- 1
74835a566e6SPeter Zijlstra  *   |       |
74935a566e6SPeter Zijlstra  *   |       |
75035a566e6SPeter Zijlstra  *   |       |
75135a566e6SPeter Zijlstra  *   3 ----- 2
75235a566e6SPeter Zijlstra  *
75335a566e6SPeter Zijlstra  * We want to construct domains and groups to represent this. The way we go
75435a566e6SPeter Zijlstra  * about doing this is to build the domains on 'hops'. For each NUMA level we
75535a566e6SPeter Zijlstra  * construct the mask of all nodes reachable in @level hops.
75635a566e6SPeter Zijlstra  *
75735a566e6SPeter Zijlstra  * For the above NUMA topology that gives 3 levels:
75835a566e6SPeter Zijlstra  *
75935a566e6SPeter Zijlstra  * NUMA-2	0-3		0-3		0-3		0-3
76035a566e6SPeter Zijlstra  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
76135a566e6SPeter Zijlstra  *
76235a566e6SPeter Zijlstra  * NUMA-1	0-1,3		0-2		1-3		0,2-3
76335a566e6SPeter Zijlstra  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
76435a566e6SPeter Zijlstra  *
76535a566e6SPeter Zijlstra  * NUMA-0	0		1		2		3
76635a566e6SPeter Zijlstra  *
76735a566e6SPeter Zijlstra  *
76835a566e6SPeter Zijlstra  * As can be seen; things don't nicely line up as with the regular topology.
76935a566e6SPeter Zijlstra  * When we iterate a domain in child domain chunks some nodes can be
77035a566e6SPeter Zijlstra  * represented multiple times -- hence the "overlap" naming for this part of
77135a566e6SPeter Zijlstra  * the topology.
77235a566e6SPeter Zijlstra  *
77335a566e6SPeter Zijlstra  * In order to minimize this overlap, we only build enough groups to cover the
77435a566e6SPeter Zijlstra  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
77535a566e6SPeter Zijlstra  *
77635a566e6SPeter Zijlstra  * Because:
77735a566e6SPeter Zijlstra  *
77835a566e6SPeter Zijlstra  *  - the first group of each domain is its child domain; this
77935a566e6SPeter Zijlstra  *    gets us the first 0-1,3
78035a566e6SPeter Zijlstra  *  - the only uncovered node is 2, who's child domain is 1-3.
78135a566e6SPeter Zijlstra  *
78235a566e6SPeter Zijlstra  * However, because of the overlap, computing a unique CPU for each group is
78335a566e6SPeter Zijlstra  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
78435a566e6SPeter Zijlstra  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
78535a566e6SPeter Zijlstra  * end up at those groups (they would end up in group: 0-1,3).
78635a566e6SPeter Zijlstra  *
787e5c14b1fSPeter Zijlstra  * To correct this we have to introduce the group balance mask. This mask
78835a566e6SPeter Zijlstra  * will contain those CPUs in the group that can reach this group given the
78935a566e6SPeter Zijlstra  * (child) domain tree.
79035a566e6SPeter Zijlstra  *
79135a566e6SPeter Zijlstra  * With this we can once again compute balance_cpu and sched_group_capacity
79235a566e6SPeter Zijlstra  * relations.
79335a566e6SPeter Zijlstra  *
79435a566e6SPeter Zijlstra  * XXX include words on how balance_cpu is unique and therefore can be
79535a566e6SPeter Zijlstra  * used for sched_group_capacity links.
79635a566e6SPeter Zijlstra  *
79735a566e6SPeter Zijlstra  *
79835a566e6SPeter Zijlstra  * Another 'interesting' topology is:
79935a566e6SPeter Zijlstra  *
80035a566e6SPeter Zijlstra  *   node   0   1   2   3
80135a566e6SPeter Zijlstra  *     0:  10  20  20  30
80235a566e6SPeter Zijlstra  *     1:  20  10  20  20
80335a566e6SPeter Zijlstra  *     2:  20  20  10  20
80435a566e6SPeter Zijlstra  *     3:  30  20  20  10
80535a566e6SPeter Zijlstra  *
80635a566e6SPeter Zijlstra  * Which looks a little like:
80735a566e6SPeter Zijlstra  *
80835a566e6SPeter Zijlstra  *   0 ----- 1
80935a566e6SPeter Zijlstra  *   |     / |
81035a566e6SPeter Zijlstra  *   |   /   |
81135a566e6SPeter Zijlstra  *   | /     |
81235a566e6SPeter Zijlstra  *   2 ----- 3
81335a566e6SPeter Zijlstra  *
81435a566e6SPeter Zijlstra  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
81535a566e6SPeter Zijlstra  * are not.
81635a566e6SPeter Zijlstra  *
81735a566e6SPeter Zijlstra  * This leads to a few particularly weird cases where the sched_domain's are
81897fb7a0aSIngo Molnar  * not of the same number for each CPU. Consider:
81935a566e6SPeter Zijlstra  *
82035a566e6SPeter Zijlstra  * NUMA-2	0-3						0-3
82135a566e6SPeter Zijlstra  *  groups:	{0-2},{1-3}					{1-3},{0-2}
82235a566e6SPeter Zijlstra  *
82335a566e6SPeter Zijlstra  * NUMA-1	0-2		0-3		0-3		1-3
82435a566e6SPeter Zijlstra  *
82535a566e6SPeter Zijlstra  * NUMA-0	0		1		2		3
82635a566e6SPeter Zijlstra  *
82735a566e6SPeter Zijlstra  */
82835a566e6SPeter Zijlstra 
82935a566e6SPeter Zijlstra 
83035a566e6SPeter Zijlstra /*
831e5c14b1fSPeter Zijlstra  * Build the balance mask; it contains only those CPUs that can arrive at this
832e5c14b1fSPeter Zijlstra  * group and should be considered to continue balancing.
83335a566e6SPeter Zijlstra  *
83435a566e6SPeter Zijlstra  * We do this during the group creation pass, therefore the group information
83535a566e6SPeter Zijlstra  * isn't complete yet, however since each group represents a (child) domain we
83635a566e6SPeter Zijlstra  * can fully construct this using the sched_domain bits (which are already
83735a566e6SPeter Zijlstra  * complete).
838f2cb1360SIngo Molnar  */
8391676330eSPeter Zijlstra static void
840e5c14b1fSPeter Zijlstra build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
841f2cb1360SIngo Molnar {
842ae4df9d6SPeter Zijlstra 	const struct cpumask *sg_span = sched_group_span(sg);
843f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
844f2cb1360SIngo Molnar 	struct sched_domain *sibling;
845f2cb1360SIngo Molnar 	int i;
846f2cb1360SIngo Molnar 
8471676330eSPeter Zijlstra 	cpumask_clear(mask);
8481676330eSPeter Zijlstra 
849f32d782eSLauro Ramos Venancio 	for_each_cpu(i, sg_span) {
850f2cb1360SIngo Molnar 		sibling = *per_cpu_ptr(sdd->sd, i);
85173bb059fSPeter Zijlstra 
85273bb059fSPeter Zijlstra 		/*
85373bb059fSPeter Zijlstra 		 * Can happen in the asymmetric case, where these siblings are
85473bb059fSPeter Zijlstra 		 * unused. The mask will not be empty because those CPUs that
85573bb059fSPeter Zijlstra 		 * do have the top domain _should_ span the domain.
85673bb059fSPeter Zijlstra 		 */
85773bb059fSPeter Zijlstra 		if (!sibling->child)
85873bb059fSPeter Zijlstra 			continue;
85973bb059fSPeter Zijlstra 
86073bb059fSPeter Zijlstra 		/* If we would not end up here, we can't continue from here */
86173bb059fSPeter Zijlstra 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
862f2cb1360SIngo Molnar 			continue;
863f2cb1360SIngo Molnar 
8641676330eSPeter Zijlstra 		cpumask_set_cpu(i, mask);
865f2cb1360SIngo Molnar 	}
86673bb059fSPeter Zijlstra 
86773bb059fSPeter Zijlstra 	/* We must not have empty masks here */
8681676330eSPeter Zijlstra 	WARN_ON_ONCE(cpumask_empty(mask));
869f2cb1360SIngo Molnar }
870f2cb1360SIngo Molnar 
871f2cb1360SIngo Molnar /*
87235a566e6SPeter Zijlstra  * XXX: This creates per-node group entries; since the load-balancer will
87335a566e6SPeter Zijlstra  * immediately access remote memory to construct this group's load-balance
87435a566e6SPeter Zijlstra  * statistics having the groups node local is of dubious benefit.
875f2cb1360SIngo Molnar  */
8768c033469SLauro Ramos Venancio static struct sched_group *
8778c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
8788c033469SLauro Ramos Venancio {
8798c033469SLauro Ramos Venancio 	struct sched_group *sg;
8808c033469SLauro Ramos Venancio 	struct cpumask *sg_span;
8818c033469SLauro Ramos Venancio 
8828c033469SLauro Ramos Venancio 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
8838c033469SLauro Ramos Venancio 			GFP_KERNEL, cpu_to_node(cpu));
8848c033469SLauro Ramos Venancio 
8858c033469SLauro Ramos Venancio 	if (!sg)
8868c033469SLauro Ramos Venancio 		return NULL;
8878c033469SLauro Ramos Venancio 
888ae4df9d6SPeter Zijlstra 	sg_span = sched_group_span(sg);
8898c033469SLauro Ramos Venancio 	if (sd->child)
8908c033469SLauro Ramos Venancio 		cpumask_copy(sg_span, sched_domain_span(sd->child));
8918c033469SLauro Ramos Venancio 	else
8928c033469SLauro Ramos Venancio 		cpumask_copy(sg_span, sched_domain_span(sd));
8938c033469SLauro Ramos Venancio 
894213c5a45SShu Wang 	atomic_inc(&sg->ref);
8958c033469SLauro Ramos Venancio 	return sg;
8968c033469SLauro Ramos Venancio }
8978c033469SLauro Ramos Venancio 
8988c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd,
8991676330eSPeter Zijlstra 				     struct sched_group *sg)
9008c033469SLauro Ramos Venancio {
9011676330eSPeter Zijlstra 	struct cpumask *mask = sched_domains_tmpmask2;
9028c033469SLauro Ramos Venancio 	struct sd_data *sdd = sd->private;
9038c033469SLauro Ramos Venancio 	struct cpumask *sg_span;
9041676330eSPeter Zijlstra 	int cpu;
9051676330eSPeter Zijlstra 
906e5c14b1fSPeter Zijlstra 	build_balance_mask(sd, sg, mask);
907ae4df9d6SPeter Zijlstra 	cpu = cpumask_first_and(sched_group_span(sg), mask);
9088c033469SLauro Ramos Venancio 
9098c033469SLauro Ramos Venancio 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
9108c033469SLauro Ramos Venancio 	if (atomic_inc_return(&sg->sgc->ref) == 1)
911e5c14b1fSPeter Zijlstra 		cpumask_copy(group_balance_mask(sg), mask);
91235a566e6SPeter Zijlstra 	else
913e5c14b1fSPeter Zijlstra 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
9148c033469SLauro Ramos Venancio 
9158c033469SLauro Ramos Venancio 	/*
9168c033469SLauro Ramos Venancio 	 * Initialize sgc->capacity such that even if we mess up the
9178c033469SLauro Ramos Venancio 	 * domains and no possible iteration will get us here, we won't
9188c033469SLauro Ramos Venancio 	 * die on a /0 trap.
9198c033469SLauro Ramos Venancio 	 */
920ae4df9d6SPeter Zijlstra 	sg_span = sched_group_span(sg);
9218c033469SLauro Ramos Venancio 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
9228c033469SLauro Ramos Venancio 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
923e3d6d0cbSMorten Rasmussen 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
9248c033469SLauro Ramos Venancio }
9258c033469SLauro Ramos Venancio 
926f2cb1360SIngo Molnar static int
927f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu)
928f2cb1360SIngo Molnar {
92991eaed0dSPeter Zijlstra 	struct sched_group *first = NULL, *last = NULL, *sg;
930f2cb1360SIngo Molnar 	const struct cpumask *span = sched_domain_span(sd);
931f2cb1360SIngo Molnar 	struct cpumask *covered = sched_domains_tmpmask;
932f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
933f2cb1360SIngo Molnar 	struct sched_domain *sibling;
934f2cb1360SIngo Molnar 	int i;
935f2cb1360SIngo Molnar 
936f2cb1360SIngo Molnar 	cpumask_clear(covered);
937f2cb1360SIngo Molnar 
9380372dd27SPeter Zijlstra 	for_each_cpu_wrap(i, span, cpu) {
939f2cb1360SIngo Molnar 		struct cpumask *sg_span;
940f2cb1360SIngo Molnar 
941f2cb1360SIngo Molnar 		if (cpumask_test_cpu(i, covered))
942f2cb1360SIngo Molnar 			continue;
943f2cb1360SIngo Molnar 
944f2cb1360SIngo Molnar 		sibling = *per_cpu_ptr(sdd->sd, i);
945f2cb1360SIngo Molnar 
946c20e1ea4SLauro Ramos Venancio 		/*
947c20e1ea4SLauro Ramos Venancio 		 * Asymmetric node setups can result in situations where the
948c20e1ea4SLauro Ramos Venancio 		 * domain tree is of unequal depth, make sure to skip domains
949c20e1ea4SLauro Ramos Venancio 		 * that already cover the entire range.
950c20e1ea4SLauro Ramos Venancio 		 *
951c20e1ea4SLauro Ramos Venancio 		 * In that case build_sched_domains() will have terminated the
952c20e1ea4SLauro Ramos Venancio 		 * iteration early and our sibling sd spans will be empty.
953c20e1ea4SLauro Ramos Venancio 		 * Domains should always include the CPU they're built on, so
954c20e1ea4SLauro Ramos Venancio 		 * check that.
955c20e1ea4SLauro Ramos Venancio 		 */
956f2cb1360SIngo Molnar 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
957f2cb1360SIngo Molnar 			continue;
958f2cb1360SIngo Molnar 
9598c033469SLauro Ramos Venancio 		sg = build_group_from_child_sched_domain(sibling, cpu);
960f2cb1360SIngo Molnar 		if (!sg)
961f2cb1360SIngo Molnar 			goto fail;
962f2cb1360SIngo Molnar 
963ae4df9d6SPeter Zijlstra 		sg_span = sched_group_span(sg);
964f2cb1360SIngo Molnar 		cpumask_or(covered, covered, sg_span);
965f2cb1360SIngo Molnar 
9661676330eSPeter Zijlstra 		init_overlap_sched_group(sd, sg);
967f2cb1360SIngo Molnar 
968f2cb1360SIngo Molnar 		if (!first)
969f2cb1360SIngo Molnar 			first = sg;
970f2cb1360SIngo Molnar 		if (last)
971f2cb1360SIngo Molnar 			last->next = sg;
972f2cb1360SIngo Molnar 		last = sg;
973f2cb1360SIngo Molnar 		last->next = first;
974f2cb1360SIngo Molnar 	}
97591eaed0dSPeter Zijlstra 	sd->groups = first;
976f2cb1360SIngo Molnar 
977f2cb1360SIngo Molnar 	return 0;
978f2cb1360SIngo Molnar 
979f2cb1360SIngo Molnar fail:
980f2cb1360SIngo Molnar 	free_sched_groups(first, 0);
981f2cb1360SIngo Molnar 
982f2cb1360SIngo Molnar 	return -ENOMEM;
983f2cb1360SIngo Molnar }
984f2cb1360SIngo Molnar 
98535a566e6SPeter Zijlstra 
98635a566e6SPeter Zijlstra /*
98735a566e6SPeter Zijlstra  * Package topology (also see the load-balance blurb in fair.c)
98835a566e6SPeter Zijlstra  *
98935a566e6SPeter Zijlstra  * The scheduler builds a tree structure to represent a number of important
99035a566e6SPeter Zijlstra  * topology features. By default (default_topology[]) these include:
99135a566e6SPeter Zijlstra  *
99235a566e6SPeter Zijlstra  *  - Simultaneous multithreading (SMT)
99335a566e6SPeter Zijlstra  *  - Multi-Core Cache (MC)
99435a566e6SPeter Zijlstra  *  - Package (DIE)
99535a566e6SPeter Zijlstra  *
99635a566e6SPeter Zijlstra  * Where the last one more or less denotes everything up to a NUMA node.
99735a566e6SPeter Zijlstra  *
99835a566e6SPeter Zijlstra  * The tree consists of 3 primary data structures:
99935a566e6SPeter Zijlstra  *
100035a566e6SPeter Zijlstra  *	sched_domain -> sched_group -> sched_group_capacity
100135a566e6SPeter Zijlstra  *	    ^ ^             ^ ^
100235a566e6SPeter Zijlstra  *          `-'             `-'
100335a566e6SPeter Zijlstra  *
100497fb7a0aSIngo Molnar  * The sched_domains are per-CPU and have a two way link (parent & child) and
100535a566e6SPeter Zijlstra  * denote the ever growing mask of CPUs belonging to that level of topology.
100635a566e6SPeter Zijlstra  *
100735a566e6SPeter Zijlstra  * Each sched_domain has a circular (double) linked list of sched_group's, each
100835a566e6SPeter Zijlstra  * denoting the domains of the level below (or individual CPUs in case of the
100935a566e6SPeter Zijlstra  * first domain level). The sched_group linked by a sched_domain includes the
101035a566e6SPeter Zijlstra  * CPU of that sched_domain [*].
101135a566e6SPeter Zijlstra  *
101235a566e6SPeter Zijlstra  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
101335a566e6SPeter Zijlstra  *
101435a566e6SPeter Zijlstra  * CPU   0   1   2   3   4   5   6   7
101535a566e6SPeter Zijlstra  *
101635a566e6SPeter Zijlstra  * DIE  [                             ]
101735a566e6SPeter Zijlstra  * MC   [             ] [             ]
101835a566e6SPeter Zijlstra  * SMT  [     ] [     ] [     ] [     ]
101935a566e6SPeter Zijlstra  *
102035a566e6SPeter Zijlstra  *  - or -
102135a566e6SPeter Zijlstra  *
102235a566e6SPeter Zijlstra  * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
102335a566e6SPeter Zijlstra  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
102435a566e6SPeter Zijlstra  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
102535a566e6SPeter Zijlstra  *
102635a566e6SPeter Zijlstra  * CPU   0   1   2   3   4   5   6   7
102735a566e6SPeter Zijlstra  *
102835a566e6SPeter Zijlstra  * One way to think about it is: sched_domain moves you up and down among these
102935a566e6SPeter Zijlstra  * topology levels, while sched_group moves you sideways through it, at child
103035a566e6SPeter Zijlstra  * domain granularity.
103135a566e6SPeter Zijlstra  *
103235a566e6SPeter Zijlstra  * sched_group_capacity ensures each unique sched_group has shared storage.
103335a566e6SPeter Zijlstra  *
103435a566e6SPeter Zijlstra  * There are two related construction problems, both require a CPU that
103535a566e6SPeter Zijlstra  * uniquely identify each group (for a given domain):
103635a566e6SPeter Zijlstra  *
103735a566e6SPeter Zijlstra  *  - The first is the balance_cpu (see should_we_balance() and the
103835a566e6SPeter Zijlstra  *    load-balance blub in fair.c); for each group we only want 1 CPU to
103935a566e6SPeter Zijlstra  *    continue balancing at a higher domain.
104035a566e6SPeter Zijlstra  *
104135a566e6SPeter Zijlstra  *  - The second is the sched_group_capacity; we want all identical groups
104235a566e6SPeter Zijlstra  *    to share a single sched_group_capacity.
104335a566e6SPeter Zijlstra  *
104435a566e6SPeter Zijlstra  * Since these topologies are exclusive by construction. That is, its
104535a566e6SPeter Zijlstra  * impossible for an SMT thread to belong to multiple cores, and cores to
104635a566e6SPeter Zijlstra  * be part of multiple caches. There is a very clear and unique location
104735a566e6SPeter Zijlstra  * for each CPU in the hierarchy.
104835a566e6SPeter Zijlstra  *
104935a566e6SPeter Zijlstra  * Therefore computing a unique CPU for each group is trivial (the iteration
105035a566e6SPeter Zijlstra  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
105135a566e6SPeter Zijlstra  * group), we can simply pick the first CPU in each group.
105235a566e6SPeter Zijlstra  *
105335a566e6SPeter Zijlstra  *
105435a566e6SPeter Zijlstra  * [*] in other words, the first group of each domain is its child domain.
105535a566e6SPeter Zijlstra  */
105635a566e6SPeter Zijlstra 
10570c0e776aSPeter Zijlstra static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1058f2cb1360SIngo Molnar {
1059f2cb1360SIngo Molnar 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1060f2cb1360SIngo Molnar 	struct sched_domain *child = sd->child;
10610c0e776aSPeter Zijlstra 	struct sched_group *sg;
1062f2cb1360SIngo Molnar 
1063f2cb1360SIngo Molnar 	if (child)
1064f2cb1360SIngo Molnar 		cpu = cpumask_first(sched_domain_span(child));
1065f2cb1360SIngo Molnar 
10660c0e776aSPeter Zijlstra 	sg = *per_cpu_ptr(sdd->sg, cpu);
10670c0e776aSPeter Zijlstra 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1068f2cb1360SIngo Molnar 
1069f2cb1360SIngo Molnar 	/* For claim_allocations: */
10700c0e776aSPeter Zijlstra 	atomic_inc(&sg->ref);
10710c0e776aSPeter Zijlstra 	atomic_inc(&sg->sgc->ref);
10720c0e776aSPeter Zijlstra 
10730c0e776aSPeter Zijlstra 	if (child) {
1074ae4df9d6SPeter Zijlstra 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1075ae4df9d6SPeter Zijlstra 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
10760c0e776aSPeter Zijlstra 	} else {
1077ae4df9d6SPeter Zijlstra 		cpumask_set_cpu(cpu, sched_group_span(sg));
1078e5c14b1fSPeter Zijlstra 		cpumask_set_cpu(cpu, group_balance_mask(sg));
1079f2cb1360SIngo Molnar 	}
1080f2cb1360SIngo Molnar 
1081ae4df9d6SPeter Zijlstra 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
10820c0e776aSPeter Zijlstra 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1083e3d6d0cbSMorten Rasmussen 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
10840c0e776aSPeter Zijlstra 
10850c0e776aSPeter Zijlstra 	return sg;
1086f2cb1360SIngo Molnar }
1087f2cb1360SIngo Molnar 
1088f2cb1360SIngo Molnar /*
1089f2cb1360SIngo Molnar  * build_sched_groups will build a circular linked list of the groups
1090f2cb1360SIngo Molnar  * covered by the given span, and will set each group's ->cpumask correctly,
1091f2cb1360SIngo Molnar  * and ->cpu_capacity to 0.
1092f2cb1360SIngo Molnar  *
1093f2cb1360SIngo Molnar  * Assumes the sched_domain tree is fully constructed
1094f2cb1360SIngo Molnar  */
1095f2cb1360SIngo Molnar static int
1096f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu)
1097f2cb1360SIngo Molnar {
1098f2cb1360SIngo Molnar 	struct sched_group *first = NULL, *last = NULL;
1099f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
1100f2cb1360SIngo Molnar 	const struct cpumask *span = sched_domain_span(sd);
1101f2cb1360SIngo Molnar 	struct cpumask *covered;
1102f2cb1360SIngo Molnar 	int i;
1103f2cb1360SIngo Molnar 
1104f2cb1360SIngo Molnar 	lockdep_assert_held(&sched_domains_mutex);
1105f2cb1360SIngo Molnar 	covered = sched_domains_tmpmask;
1106f2cb1360SIngo Molnar 
1107f2cb1360SIngo Molnar 	cpumask_clear(covered);
1108f2cb1360SIngo Molnar 
11090c0e776aSPeter Zijlstra 	for_each_cpu_wrap(i, span, cpu) {
1110f2cb1360SIngo Molnar 		struct sched_group *sg;
1111f2cb1360SIngo Molnar 
1112f2cb1360SIngo Molnar 		if (cpumask_test_cpu(i, covered))
1113f2cb1360SIngo Molnar 			continue;
1114f2cb1360SIngo Molnar 
11150c0e776aSPeter Zijlstra 		sg = get_group(i, sdd);
1116f2cb1360SIngo Molnar 
1117ae4df9d6SPeter Zijlstra 		cpumask_or(covered, covered, sched_group_span(sg));
1118f2cb1360SIngo Molnar 
1119f2cb1360SIngo Molnar 		if (!first)
1120f2cb1360SIngo Molnar 			first = sg;
1121f2cb1360SIngo Molnar 		if (last)
1122f2cb1360SIngo Molnar 			last->next = sg;
1123f2cb1360SIngo Molnar 		last = sg;
1124f2cb1360SIngo Molnar 	}
1125f2cb1360SIngo Molnar 	last->next = first;
11260c0e776aSPeter Zijlstra 	sd->groups = first;
1127f2cb1360SIngo Molnar 
1128f2cb1360SIngo Molnar 	return 0;
1129f2cb1360SIngo Molnar }
1130f2cb1360SIngo Molnar 
1131f2cb1360SIngo Molnar /*
1132f2cb1360SIngo Molnar  * Initialize sched groups cpu_capacity.
1133f2cb1360SIngo Molnar  *
1134f2cb1360SIngo Molnar  * cpu_capacity indicates the capacity of sched group, which is used while
1135f2cb1360SIngo Molnar  * distributing the load between different sched groups in a sched domain.
1136f2cb1360SIngo Molnar  * Typically cpu_capacity for all the groups in a sched domain will be same
1137f2cb1360SIngo Molnar  * unless there are asymmetries in the topology. If there are asymmetries,
1138f2cb1360SIngo Molnar  * group having more cpu_capacity will pickup more load compared to the
1139f2cb1360SIngo Molnar  * group having less cpu_capacity.
1140f2cb1360SIngo Molnar  */
1141f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1142f2cb1360SIngo Molnar {
1143f2cb1360SIngo Molnar 	struct sched_group *sg = sd->groups;
1144f2cb1360SIngo Molnar 
1145f2cb1360SIngo Molnar 	WARN_ON(!sg);
1146f2cb1360SIngo Molnar 
1147f2cb1360SIngo Molnar 	do {
1148f2cb1360SIngo Molnar 		int cpu, max_cpu = -1;
1149f2cb1360SIngo Molnar 
1150ae4df9d6SPeter Zijlstra 		sg->group_weight = cpumask_weight(sched_group_span(sg));
1151f2cb1360SIngo Molnar 
1152f2cb1360SIngo Molnar 		if (!(sd->flags & SD_ASYM_PACKING))
1153f2cb1360SIngo Molnar 			goto next;
1154f2cb1360SIngo Molnar 
1155ae4df9d6SPeter Zijlstra 		for_each_cpu(cpu, sched_group_span(sg)) {
1156f2cb1360SIngo Molnar 			if (max_cpu < 0)
1157f2cb1360SIngo Molnar 				max_cpu = cpu;
1158f2cb1360SIngo Molnar 			else if (sched_asym_prefer(cpu, max_cpu))
1159f2cb1360SIngo Molnar 				max_cpu = cpu;
1160f2cb1360SIngo Molnar 		}
1161f2cb1360SIngo Molnar 		sg->asym_prefer_cpu = max_cpu;
1162f2cb1360SIngo Molnar 
1163f2cb1360SIngo Molnar next:
1164f2cb1360SIngo Molnar 		sg = sg->next;
1165f2cb1360SIngo Molnar 	} while (sg != sd->groups);
1166f2cb1360SIngo Molnar 
1167f2cb1360SIngo Molnar 	if (cpu != group_balance_cpu(sg))
1168f2cb1360SIngo Molnar 		return;
1169f2cb1360SIngo Molnar 
1170f2cb1360SIngo Molnar 	update_group_capacity(sd, cpu);
1171f2cb1360SIngo Molnar }
1172f2cb1360SIngo Molnar 
1173f2cb1360SIngo Molnar /*
1174f2cb1360SIngo Molnar  * Initializers for schedule domains
1175f2cb1360SIngo Molnar  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1176f2cb1360SIngo Molnar  */
1177f2cb1360SIngo Molnar 
1178f2cb1360SIngo Molnar static int default_relax_domain_level = -1;
1179f2cb1360SIngo Molnar int sched_domain_level_max;
1180f2cb1360SIngo Molnar 
1181f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str)
1182f2cb1360SIngo Molnar {
1183f2cb1360SIngo Molnar 	if (kstrtoint(str, 0, &default_relax_domain_level))
1184f2cb1360SIngo Molnar 		pr_warn("Unable to set relax_domain_level\n");
1185f2cb1360SIngo Molnar 
1186f2cb1360SIngo Molnar 	return 1;
1187f2cb1360SIngo Molnar }
1188f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level);
1189f2cb1360SIngo Molnar 
1190f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd,
1191f2cb1360SIngo Molnar 				 struct sched_domain_attr *attr)
1192f2cb1360SIngo Molnar {
1193f2cb1360SIngo Molnar 	int request;
1194f2cb1360SIngo Molnar 
1195f2cb1360SIngo Molnar 	if (!attr || attr->relax_domain_level < 0) {
1196f2cb1360SIngo Molnar 		if (default_relax_domain_level < 0)
1197f2cb1360SIngo Molnar 			return;
1198f2cb1360SIngo Molnar 		else
1199f2cb1360SIngo Molnar 			request = default_relax_domain_level;
1200f2cb1360SIngo Molnar 	} else
1201f2cb1360SIngo Molnar 		request = attr->relax_domain_level;
1202f2cb1360SIngo Molnar 	if (request < sd->level) {
1203f2cb1360SIngo Molnar 		/* Turn off idle balance on this domain: */
1204f2cb1360SIngo Molnar 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1205f2cb1360SIngo Molnar 	} else {
1206f2cb1360SIngo Molnar 		/* Turn on idle balance on this domain: */
1207f2cb1360SIngo Molnar 		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1208f2cb1360SIngo Molnar 	}
1209f2cb1360SIngo Molnar }
1210f2cb1360SIngo Molnar 
1211f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map);
1212f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map);
1213f2cb1360SIngo Molnar 
1214f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1215f2cb1360SIngo Molnar 				 const struct cpumask *cpu_map)
1216f2cb1360SIngo Molnar {
1217f2cb1360SIngo Molnar 	switch (what) {
1218f2cb1360SIngo Molnar 	case sa_rootdomain:
1219f2cb1360SIngo Molnar 		if (!atomic_read(&d->rd->refcount))
1220f2cb1360SIngo Molnar 			free_rootdomain(&d->rd->rcu);
1221f2cb1360SIngo Molnar 		/* Fall through */
1222f2cb1360SIngo Molnar 	case sa_sd:
1223f2cb1360SIngo Molnar 		free_percpu(d->sd);
1224f2cb1360SIngo Molnar 		/* Fall through */
1225f2cb1360SIngo Molnar 	case sa_sd_storage:
1226f2cb1360SIngo Molnar 		__sdt_free(cpu_map);
1227f2cb1360SIngo Molnar 		/* Fall through */
1228f2cb1360SIngo Molnar 	case sa_none:
1229f2cb1360SIngo Molnar 		break;
1230f2cb1360SIngo Molnar 	}
1231f2cb1360SIngo Molnar }
1232f2cb1360SIngo Molnar 
1233f2cb1360SIngo Molnar static enum s_alloc
1234f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1235f2cb1360SIngo Molnar {
1236f2cb1360SIngo Molnar 	memset(d, 0, sizeof(*d));
1237f2cb1360SIngo Molnar 
1238f2cb1360SIngo Molnar 	if (__sdt_alloc(cpu_map))
1239f2cb1360SIngo Molnar 		return sa_sd_storage;
1240f2cb1360SIngo Molnar 	d->sd = alloc_percpu(struct sched_domain *);
1241f2cb1360SIngo Molnar 	if (!d->sd)
1242f2cb1360SIngo Molnar 		return sa_sd_storage;
1243f2cb1360SIngo Molnar 	d->rd = alloc_rootdomain();
1244f2cb1360SIngo Molnar 	if (!d->rd)
1245f2cb1360SIngo Molnar 		return sa_sd;
124697fb7a0aSIngo Molnar 
1247f2cb1360SIngo Molnar 	return sa_rootdomain;
1248f2cb1360SIngo Molnar }
1249f2cb1360SIngo Molnar 
1250f2cb1360SIngo Molnar /*
1251f2cb1360SIngo Molnar  * NULL the sd_data elements we've used to build the sched_domain and
1252f2cb1360SIngo Molnar  * sched_group structure so that the subsequent __free_domain_allocs()
1253f2cb1360SIngo Molnar  * will not free the data we're using.
1254f2cb1360SIngo Molnar  */
1255f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd)
1256f2cb1360SIngo Molnar {
1257f2cb1360SIngo Molnar 	struct sd_data *sdd = sd->private;
1258f2cb1360SIngo Molnar 
1259f2cb1360SIngo Molnar 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1260f2cb1360SIngo Molnar 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1261f2cb1360SIngo Molnar 
1262f2cb1360SIngo Molnar 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1263f2cb1360SIngo Molnar 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1264f2cb1360SIngo Molnar 
1265f2cb1360SIngo Molnar 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1266f2cb1360SIngo Molnar 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1267f2cb1360SIngo Molnar 
1268f2cb1360SIngo Molnar 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1269f2cb1360SIngo Molnar 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1270f2cb1360SIngo Molnar }
1271f2cb1360SIngo Molnar 
1272f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1273f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type;
127497fb7a0aSIngo Molnar 
127597fb7a0aSIngo Molnar static int			sched_domains_numa_levels;
1276f2cb1360SIngo Molnar static int			sched_domains_curr_level;
127797fb7a0aSIngo Molnar 
127897fb7a0aSIngo Molnar int				sched_max_numa_distance;
127997fb7a0aSIngo Molnar static int			*sched_domains_numa_distance;
128097fb7a0aSIngo Molnar static struct cpumask		***sched_domains_numa_masks;
1281f2cb1360SIngo Molnar #endif
1282f2cb1360SIngo Molnar 
1283f2cb1360SIngo Molnar /*
1284f2cb1360SIngo Molnar  * SD_flags allowed in topology descriptions.
1285f2cb1360SIngo Molnar  *
1286f2cb1360SIngo Molnar  * These flags are purely descriptive of the topology and do not prescribe
1287f2cb1360SIngo Molnar  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1288f2cb1360SIngo Molnar  * function:
1289f2cb1360SIngo Molnar  *
1290f2cb1360SIngo Molnar  *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1291f2cb1360SIngo Molnar  *   SD_SHARE_PKG_RESOURCES - describes shared caches
1292f2cb1360SIngo Molnar  *   SD_NUMA                - describes NUMA topologies
1293f2cb1360SIngo Molnar  *   SD_SHARE_POWERDOMAIN   - describes shared power domain
1294f2cb1360SIngo Molnar  *
1295f2cb1360SIngo Molnar  * Odd one out, which beside describing the topology has a quirk also
1296f2cb1360SIngo Molnar  * prescribes the desired behaviour that goes along with it:
1297f2cb1360SIngo Molnar  *
1298f2cb1360SIngo Molnar  *   SD_ASYM_PACKING        - describes SMT quirks
1299f2cb1360SIngo Molnar  */
1300f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS		\
1301f2cb1360SIngo Molnar 	(SD_SHARE_CPUCAPACITY	|	\
1302f2cb1360SIngo Molnar 	 SD_SHARE_PKG_RESOURCES |	\
1303f2cb1360SIngo Molnar 	 SD_NUMA		|	\
1304f2cb1360SIngo Molnar 	 SD_ASYM_PACKING	|	\
1305f2cb1360SIngo Molnar 	 SD_SHARE_POWERDOMAIN)
1306f2cb1360SIngo Molnar 
1307f2cb1360SIngo Molnar static struct sched_domain *
1308f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl,
1309f2cb1360SIngo Molnar 	const struct cpumask *cpu_map,
131005484e09SMorten Rasmussen 	struct sched_domain *child, int dflags, int cpu)
1311f2cb1360SIngo Molnar {
1312f2cb1360SIngo Molnar 	struct sd_data *sdd = &tl->data;
1313f2cb1360SIngo Molnar 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1314f2cb1360SIngo Molnar 	int sd_id, sd_weight, sd_flags = 0;
1315f2cb1360SIngo Molnar 
1316f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1317f2cb1360SIngo Molnar 	/*
1318f2cb1360SIngo Molnar 	 * Ugly hack to pass state to sd_numa_mask()...
1319f2cb1360SIngo Molnar 	 */
1320f2cb1360SIngo Molnar 	sched_domains_curr_level = tl->numa_level;
1321f2cb1360SIngo Molnar #endif
1322f2cb1360SIngo Molnar 
1323f2cb1360SIngo Molnar 	sd_weight = cpumask_weight(tl->mask(cpu));
1324f2cb1360SIngo Molnar 
1325f2cb1360SIngo Molnar 	if (tl->sd_flags)
1326f2cb1360SIngo Molnar 		sd_flags = (*tl->sd_flags)();
1327f2cb1360SIngo Molnar 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1328f2cb1360SIngo Molnar 			"wrong sd_flags in topology description\n"))
1329f2cb1360SIngo Molnar 		sd_flags &= ~TOPOLOGY_SD_FLAGS;
1330f2cb1360SIngo Molnar 
133105484e09SMorten Rasmussen 	/* Apply detected topology flags */
133205484e09SMorten Rasmussen 	sd_flags |= dflags;
133305484e09SMorten Rasmussen 
1334f2cb1360SIngo Molnar 	*sd = (struct sched_domain){
1335f2cb1360SIngo Molnar 		.min_interval		= sd_weight,
1336f2cb1360SIngo Molnar 		.max_interval		= 2*sd_weight,
1337f2cb1360SIngo Molnar 		.busy_factor		= 32,
1338f2cb1360SIngo Molnar 		.imbalance_pct		= 125,
1339f2cb1360SIngo Molnar 
1340f2cb1360SIngo Molnar 		.cache_nice_tries	= 0,
1341f2cb1360SIngo Molnar 		.busy_idx		= 0,
1342f2cb1360SIngo Molnar 		.idle_idx		= 0,
1343f2cb1360SIngo Molnar 		.newidle_idx		= 0,
1344f2cb1360SIngo Molnar 		.wake_idx		= 0,
1345f2cb1360SIngo Molnar 		.forkexec_idx		= 0,
1346f2cb1360SIngo Molnar 
1347f2cb1360SIngo Molnar 		.flags			= 1*SD_LOAD_BALANCE
1348f2cb1360SIngo Molnar 					| 1*SD_BALANCE_NEWIDLE
1349f2cb1360SIngo Molnar 					| 1*SD_BALANCE_EXEC
1350f2cb1360SIngo Molnar 					| 1*SD_BALANCE_FORK
1351f2cb1360SIngo Molnar 					| 0*SD_BALANCE_WAKE
1352f2cb1360SIngo Molnar 					| 1*SD_WAKE_AFFINE
1353f2cb1360SIngo Molnar 					| 0*SD_SHARE_CPUCAPACITY
1354f2cb1360SIngo Molnar 					| 0*SD_SHARE_PKG_RESOURCES
1355f2cb1360SIngo Molnar 					| 0*SD_SERIALIZE
13569c63e84dSMorten Rasmussen 					| 1*SD_PREFER_SIBLING
1357f2cb1360SIngo Molnar 					| 0*SD_NUMA
1358f2cb1360SIngo Molnar 					| sd_flags
1359f2cb1360SIngo Molnar 					,
1360f2cb1360SIngo Molnar 
1361f2cb1360SIngo Molnar 		.last_balance		= jiffies,
1362f2cb1360SIngo Molnar 		.balance_interval	= sd_weight,
1363f2cb1360SIngo Molnar 		.max_newidle_lb_cost	= 0,
1364f2cb1360SIngo Molnar 		.next_decay_max_lb_cost	= jiffies,
1365f2cb1360SIngo Molnar 		.child			= child,
1366f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
1367f2cb1360SIngo Molnar 		.name			= tl->name,
1368f2cb1360SIngo Molnar #endif
1369f2cb1360SIngo Molnar 	};
1370f2cb1360SIngo Molnar 
1371f2cb1360SIngo Molnar 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1372f2cb1360SIngo Molnar 	sd_id = cpumask_first(sched_domain_span(sd));
1373f2cb1360SIngo Molnar 
1374f2cb1360SIngo Molnar 	/*
1375f2cb1360SIngo Molnar 	 * Convert topological properties into behaviour.
1376f2cb1360SIngo Molnar 	 */
1377f2cb1360SIngo Molnar 
1378f2cb1360SIngo Molnar 	if (sd->flags & SD_ASYM_CPUCAPACITY) {
1379f2cb1360SIngo Molnar 		struct sched_domain *t = sd;
1380f2cb1360SIngo Molnar 
13819c63e84dSMorten Rasmussen 		/*
13829c63e84dSMorten Rasmussen 		 * Don't attempt to spread across CPUs of different capacities.
13839c63e84dSMorten Rasmussen 		 */
13849c63e84dSMorten Rasmussen 		if (sd->child)
13859c63e84dSMorten Rasmussen 			sd->child->flags &= ~SD_PREFER_SIBLING;
13869c63e84dSMorten Rasmussen 
1387f2cb1360SIngo Molnar 		for_each_lower_domain(t)
1388f2cb1360SIngo Molnar 			t->flags |= SD_BALANCE_WAKE;
1389f2cb1360SIngo Molnar 	}
1390f2cb1360SIngo Molnar 
1391f2cb1360SIngo Molnar 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1392f2cb1360SIngo Molnar 		sd->imbalance_pct = 110;
1393f2cb1360SIngo Molnar 
1394f2cb1360SIngo Molnar 	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1395f2cb1360SIngo Molnar 		sd->imbalance_pct = 117;
1396f2cb1360SIngo Molnar 		sd->cache_nice_tries = 1;
1397f2cb1360SIngo Molnar 		sd->busy_idx = 2;
1398f2cb1360SIngo Molnar 
1399f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1400f2cb1360SIngo Molnar 	} else if (sd->flags & SD_NUMA) {
1401f2cb1360SIngo Molnar 		sd->cache_nice_tries = 2;
1402f2cb1360SIngo Molnar 		sd->busy_idx = 3;
1403f2cb1360SIngo Molnar 		sd->idle_idx = 2;
1404f2cb1360SIngo Molnar 
14059c63e84dSMorten Rasmussen 		sd->flags &= ~SD_PREFER_SIBLING;
1406f2cb1360SIngo Molnar 		sd->flags |= SD_SERIALIZE;
1407f2cb1360SIngo Molnar 		if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
1408f2cb1360SIngo Molnar 			sd->flags &= ~(SD_BALANCE_EXEC |
1409f2cb1360SIngo Molnar 				       SD_BALANCE_FORK |
1410f2cb1360SIngo Molnar 				       SD_WAKE_AFFINE);
1411f2cb1360SIngo Molnar 		}
1412f2cb1360SIngo Molnar 
1413f2cb1360SIngo Molnar #endif
1414f2cb1360SIngo Molnar 	} else {
1415f2cb1360SIngo Molnar 		sd->cache_nice_tries = 1;
1416f2cb1360SIngo Molnar 		sd->busy_idx = 2;
1417f2cb1360SIngo Molnar 		sd->idle_idx = 1;
1418f2cb1360SIngo Molnar 	}
1419f2cb1360SIngo Molnar 
1420f2cb1360SIngo Molnar 	/*
1421f2cb1360SIngo Molnar 	 * For all levels sharing cache; connect a sched_domain_shared
1422f2cb1360SIngo Molnar 	 * instance.
1423f2cb1360SIngo Molnar 	 */
1424f2cb1360SIngo Molnar 	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1425f2cb1360SIngo Molnar 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1426f2cb1360SIngo Molnar 		atomic_inc(&sd->shared->ref);
1427f2cb1360SIngo Molnar 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1428f2cb1360SIngo Molnar 	}
1429f2cb1360SIngo Molnar 
1430f2cb1360SIngo Molnar 	sd->private = sdd;
1431f2cb1360SIngo Molnar 
1432f2cb1360SIngo Molnar 	return sd;
1433f2cb1360SIngo Molnar }
1434f2cb1360SIngo Molnar 
1435f2cb1360SIngo Molnar /*
1436f2cb1360SIngo Molnar  * Topology list, bottom-up.
1437f2cb1360SIngo Molnar  */
1438f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = {
1439f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT
1440f2cb1360SIngo Molnar 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1441f2cb1360SIngo Molnar #endif
1442f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC
1443f2cb1360SIngo Molnar 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1444f2cb1360SIngo Molnar #endif
1445f2cb1360SIngo Molnar 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1446f2cb1360SIngo Molnar 	{ NULL, },
1447f2cb1360SIngo Molnar };
1448f2cb1360SIngo Molnar 
1449f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology =
1450f2cb1360SIngo Molnar 	default_topology;
1451f2cb1360SIngo Molnar 
1452f2cb1360SIngo Molnar #define for_each_sd_topology(tl)			\
1453f2cb1360SIngo Molnar 	for (tl = sched_domain_topology; tl->mask; tl++)
1454f2cb1360SIngo Molnar 
1455f2cb1360SIngo Molnar void set_sched_topology(struct sched_domain_topology_level *tl)
1456f2cb1360SIngo Molnar {
1457f2cb1360SIngo Molnar 	if (WARN_ON_ONCE(sched_smp_initialized))
1458f2cb1360SIngo Molnar 		return;
1459f2cb1360SIngo Molnar 
1460f2cb1360SIngo Molnar 	sched_domain_topology = tl;
1461f2cb1360SIngo Molnar }
1462f2cb1360SIngo Molnar 
1463f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1464f2cb1360SIngo Molnar 
1465f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu)
1466f2cb1360SIngo Molnar {
1467f2cb1360SIngo Molnar 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1468f2cb1360SIngo Molnar }
1469f2cb1360SIngo Molnar 
1470f2cb1360SIngo Molnar static void sched_numa_warn(const char *str)
1471f2cb1360SIngo Molnar {
1472f2cb1360SIngo Molnar 	static int done = false;
1473f2cb1360SIngo Molnar 	int i,j;
1474f2cb1360SIngo Molnar 
1475f2cb1360SIngo Molnar 	if (done)
1476f2cb1360SIngo Molnar 		return;
1477f2cb1360SIngo Molnar 
1478f2cb1360SIngo Molnar 	done = true;
1479f2cb1360SIngo Molnar 
1480f2cb1360SIngo Molnar 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1481f2cb1360SIngo Molnar 
1482f2cb1360SIngo Molnar 	for (i = 0; i < nr_node_ids; i++) {
1483f2cb1360SIngo Molnar 		printk(KERN_WARNING "  ");
1484f2cb1360SIngo Molnar 		for (j = 0; j < nr_node_ids; j++)
1485f2cb1360SIngo Molnar 			printk(KERN_CONT "%02d ", node_distance(i,j));
1486f2cb1360SIngo Molnar 		printk(KERN_CONT "\n");
1487f2cb1360SIngo Molnar 	}
1488f2cb1360SIngo Molnar 	printk(KERN_WARNING "\n");
1489f2cb1360SIngo Molnar }
1490f2cb1360SIngo Molnar 
1491f2cb1360SIngo Molnar bool find_numa_distance(int distance)
1492f2cb1360SIngo Molnar {
1493f2cb1360SIngo Molnar 	int i;
1494f2cb1360SIngo Molnar 
1495f2cb1360SIngo Molnar 	if (distance == node_distance(0, 0))
1496f2cb1360SIngo Molnar 		return true;
1497f2cb1360SIngo Molnar 
1498f2cb1360SIngo Molnar 	for (i = 0; i < sched_domains_numa_levels; i++) {
1499f2cb1360SIngo Molnar 		if (sched_domains_numa_distance[i] == distance)
1500f2cb1360SIngo Molnar 			return true;
1501f2cb1360SIngo Molnar 	}
1502f2cb1360SIngo Molnar 
1503f2cb1360SIngo Molnar 	return false;
1504f2cb1360SIngo Molnar }
1505f2cb1360SIngo Molnar 
1506f2cb1360SIngo Molnar /*
1507f2cb1360SIngo Molnar  * A system can have three types of NUMA topology:
1508f2cb1360SIngo Molnar  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1509f2cb1360SIngo Molnar  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1510f2cb1360SIngo Molnar  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1511f2cb1360SIngo Molnar  *
1512f2cb1360SIngo Molnar  * The difference between a glueless mesh topology and a backplane
1513f2cb1360SIngo Molnar  * topology lies in whether communication between not directly
1514f2cb1360SIngo Molnar  * connected nodes goes through intermediary nodes (where programs
1515f2cb1360SIngo Molnar  * could run), or through backplane controllers. This affects
1516f2cb1360SIngo Molnar  * placement of programs.
1517f2cb1360SIngo Molnar  *
1518f2cb1360SIngo Molnar  * The type of topology can be discerned with the following tests:
1519f2cb1360SIngo Molnar  * - If the maximum distance between any nodes is 1 hop, the system
1520f2cb1360SIngo Molnar  *   is directly connected.
1521f2cb1360SIngo Molnar  * - If for two nodes A and B, located N > 1 hops away from each other,
1522f2cb1360SIngo Molnar  *   there is an intermediary node C, which is < N hops away from both
1523f2cb1360SIngo Molnar  *   nodes A and B, the system is a glueless mesh.
1524f2cb1360SIngo Molnar  */
1525f2cb1360SIngo Molnar static void init_numa_topology_type(void)
1526f2cb1360SIngo Molnar {
1527f2cb1360SIngo Molnar 	int a, b, c, n;
1528f2cb1360SIngo Molnar 
1529f2cb1360SIngo Molnar 	n = sched_max_numa_distance;
1530f2cb1360SIngo Molnar 
1531e5e96fafSSrikar Dronamraju 	if (sched_domains_numa_levels <= 2) {
1532f2cb1360SIngo Molnar 		sched_numa_topology_type = NUMA_DIRECT;
1533f2cb1360SIngo Molnar 		return;
1534f2cb1360SIngo Molnar 	}
1535f2cb1360SIngo Molnar 
1536f2cb1360SIngo Molnar 	for_each_online_node(a) {
1537f2cb1360SIngo Molnar 		for_each_online_node(b) {
1538f2cb1360SIngo Molnar 			/* Find two nodes furthest removed from each other. */
1539f2cb1360SIngo Molnar 			if (node_distance(a, b) < n)
1540f2cb1360SIngo Molnar 				continue;
1541f2cb1360SIngo Molnar 
1542f2cb1360SIngo Molnar 			/* Is there an intermediary node between a and b? */
1543f2cb1360SIngo Molnar 			for_each_online_node(c) {
1544f2cb1360SIngo Molnar 				if (node_distance(a, c) < n &&
1545f2cb1360SIngo Molnar 				    node_distance(b, c) < n) {
1546f2cb1360SIngo Molnar 					sched_numa_topology_type =
1547f2cb1360SIngo Molnar 							NUMA_GLUELESS_MESH;
1548f2cb1360SIngo Molnar 					return;
1549f2cb1360SIngo Molnar 				}
1550f2cb1360SIngo Molnar 			}
1551f2cb1360SIngo Molnar 
1552f2cb1360SIngo Molnar 			sched_numa_topology_type = NUMA_BACKPLANE;
1553f2cb1360SIngo Molnar 			return;
1554f2cb1360SIngo Molnar 		}
1555f2cb1360SIngo Molnar 	}
1556f2cb1360SIngo Molnar }
1557f2cb1360SIngo Molnar 
1558f2cb1360SIngo Molnar void sched_init_numa(void)
1559f2cb1360SIngo Molnar {
1560f2cb1360SIngo Molnar 	int next_distance, curr_distance = node_distance(0, 0);
1561f2cb1360SIngo Molnar 	struct sched_domain_topology_level *tl;
1562f2cb1360SIngo Molnar 	int level = 0;
1563f2cb1360SIngo Molnar 	int i, j, k;
1564f2cb1360SIngo Molnar 
1565993f0b05SPeter Zijlstra 	sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL);
1566f2cb1360SIngo Molnar 	if (!sched_domains_numa_distance)
1567f2cb1360SIngo Molnar 		return;
1568f2cb1360SIngo Molnar 
1569051f3ca0SSuravee Suthikulpanit 	/* Includes NUMA identity node at level 0. */
1570051f3ca0SSuravee Suthikulpanit 	sched_domains_numa_distance[level++] = curr_distance;
1571051f3ca0SSuravee Suthikulpanit 	sched_domains_numa_levels = level;
1572051f3ca0SSuravee Suthikulpanit 
1573f2cb1360SIngo Molnar 	/*
1574f2cb1360SIngo Molnar 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1575f2cb1360SIngo Molnar 	 * unique distances in the node_distance() table.
1576f2cb1360SIngo Molnar 	 *
1577f2cb1360SIngo Molnar 	 * Assumes node_distance(0,j) includes all distances in
1578f2cb1360SIngo Molnar 	 * node_distance(i,j) in order to avoid cubic time.
1579f2cb1360SIngo Molnar 	 */
1580f2cb1360SIngo Molnar 	next_distance = curr_distance;
1581f2cb1360SIngo Molnar 	for (i = 0; i < nr_node_ids; i++) {
1582f2cb1360SIngo Molnar 		for (j = 0; j < nr_node_ids; j++) {
1583f2cb1360SIngo Molnar 			for (k = 0; k < nr_node_ids; k++) {
1584f2cb1360SIngo Molnar 				int distance = node_distance(i, k);
1585f2cb1360SIngo Molnar 
1586f2cb1360SIngo Molnar 				if (distance > curr_distance &&
1587f2cb1360SIngo Molnar 				    (distance < next_distance ||
1588f2cb1360SIngo Molnar 				     next_distance == curr_distance))
1589f2cb1360SIngo Molnar 					next_distance = distance;
1590f2cb1360SIngo Molnar 
1591f2cb1360SIngo Molnar 				/*
1592f2cb1360SIngo Molnar 				 * While not a strong assumption it would be nice to know
1593f2cb1360SIngo Molnar 				 * about cases where if node A is connected to B, B is not
1594f2cb1360SIngo Molnar 				 * equally connected to A.
1595f2cb1360SIngo Molnar 				 */
1596f2cb1360SIngo Molnar 				if (sched_debug() && node_distance(k, i) != distance)
1597f2cb1360SIngo Molnar 					sched_numa_warn("Node-distance not symmetric");
1598f2cb1360SIngo Molnar 
1599f2cb1360SIngo Molnar 				if (sched_debug() && i && !find_numa_distance(distance))
1600f2cb1360SIngo Molnar 					sched_numa_warn("Node-0 not representative");
1601f2cb1360SIngo Molnar 			}
1602f2cb1360SIngo Molnar 			if (next_distance != curr_distance) {
1603f2cb1360SIngo Molnar 				sched_domains_numa_distance[level++] = next_distance;
1604f2cb1360SIngo Molnar 				sched_domains_numa_levels = level;
1605f2cb1360SIngo Molnar 				curr_distance = next_distance;
1606f2cb1360SIngo Molnar 			} else break;
1607f2cb1360SIngo Molnar 		}
1608f2cb1360SIngo Molnar 
1609f2cb1360SIngo Molnar 		/*
1610f2cb1360SIngo Molnar 		 * In case of sched_debug() we verify the above assumption.
1611f2cb1360SIngo Molnar 		 */
1612f2cb1360SIngo Molnar 		if (!sched_debug())
1613f2cb1360SIngo Molnar 			break;
1614f2cb1360SIngo Molnar 	}
1615f2cb1360SIngo Molnar 
1616f2cb1360SIngo Molnar 	/*
1617051f3ca0SSuravee Suthikulpanit 	 * 'level' contains the number of unique distances
1618f2cb1360SIngo Molnar 	 *
1619f2cb1360SIngo Molnar 	 * The sched_domains_numa_distance[] array includes the actual distance
1620f2cb1360SIngo Molnar 	 * numbers.
1621f2cb1360SIngo Molnar 	 */
1622f2cb1360SIngo Molnar 
1623f2cb1360SIngo Molnar 	/*
1624f2cb1360SIngo Molnar 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1625f2cb1360SIngo Molnar 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1626f2cb1360SIngo Molnar 	 * the array will contain less then 'level' members. This could be
1627f2cb1360SIngo Molnar 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1628f2cb1360SIngo Molnar 	 * in other functions.
1629f2cb1360SIngo Molnar 	 *
1630f2cb1360SIngo Molnar 	 * We reset it to 'level' at the end of this function.
1631f2cb1360SIngo Molnar 	 */
1632f2cb1360SIngo Molnar 	sched_domains_numa_levels = 0;
1633f2cb1360SIngo Molnar 
1634f2cb1360SIngo Molnar 	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
1635f2cb1360SIngo Molnar 	if (!sched_domains_numa_masks)
1636f2cb1360SIngo Molnar 		return;
1637f2cb1360SIngo Molnar 
1638f2cb1360SIngo Molnar 	/*
1639f2cb1360SIngo Molnar 	 * Now for each level, construct a mask per node which contains all
1640f2cb1360SIngo Molnar 	 * CPUs of nodes that are that many hops away from us.
1641f2cb1360SIngo Molnar 	 */
1642f2cb1360SIngo Molnar 	for (i = 0; i < level; i++) {
1643f2cb1360SIngo Molnar 		sched_domains_numa_masks[i] =
1644f2cb1360SIngo Molnar 			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1645f2cb1360SIngo Molnar 		if (!sched_domains_numa_masks[i])
1646f2cb1360SIngo Molnar 			return;
1647f2cb1360SIngo Molnar 
1648f2cb1360SIngo Molnar 		for (j = 0; j < nr_node_ids; j++) {
1649f2cb1360SIngo Molnar 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1650f2cb1360SIngo Molnar 			if (!mask)
1651f2cb1360SIngo Molnar 				return;
1652f2cb1360SIngo Molnar 
1653f2cb1360SIngo Molnar 			sched_domains_numa_masks[i][j] = mask;
1654f2cb1360SIngo Molnar 
1655f2cb1360SIngo Molnar 			for_each_node(k) {
1656f2cb1360SIngo Molnar 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1657f2cb1360SIngo Molnar 					continue;
1658f2cb1360SIngo Molnar 
1659f2cb1360SIngo Molnar 				cpumask_or(mask, mask, cpumask_of_node(k));
1660f2cb1360SIngo Molnar 			}
1661f2cb1360SIngo Molnar 		}
1662f2cb1360SIngo Molnar 	}
1663f2cb1360SIngo Molnar 
1664f2cb1360SIngo Molnar 	/* Compute default topology size */
1665f2cb1360SIngo Molnar 	for (i = 0; sched_domain_topology[i].mask; i++);
1666f2cb1360SIngo Molnar 
1667f2cb1360SIngo Molnar 	tl = kzalloc((i + level + 1) *
1668f2cb1360SIngo Molnar 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1669f2cb1360SIngo Molnar 	if (!tl)
1670f2cb1360SIngo Molnar 		return;
1671f2cb1360SIngo Molnar 
1672f2cb1360SIngo Molnar 	/*
1673f2cb1360SIngo Molnar 	 * Copy the default topology bits..
1674f2cb1360SIngo Molnar 	 */
1675f2cb1360SIngo Molnar 	for (i = 0; sched_domain_topology[i].mask; i++)
1676f2cb1360SIngo Molnar 		tl[i] = sched_domain_topology[i];
1677f2cb1360SIngo Molnar 
1678f2cb1360SIngo Molnar 	/*
1679051f3ca0SSuravee Suthikulpanit 	 * Add the NUMA identity distance, aka single NODE.
1680051f3ca0SSuravee Suthikulpanit 	 */
1681051f3ca0SSuravee Suthikulpanit 	tl[i++] = (struct sched_domain_topology_level){
1682051f3ca0SSuravee Suthikulpanit 		.mask = sd_numa_mask,
1683051f3ca0SSuravee Suthikulpanit 		.numa_level = 0,
1684051f3ca0SSuravee Suthikulpanit 		SD_INIT_NAME(NODE)
1685051f3ca0SSuravee Suthikulpanit 	};
1686051f3ca0SSuravee Suthikulpanit 
1687051f3ca0SSuravee Suthikulpanit 	/*
1688f2cb1360SIngo Molnar 	 * .. and append 'j' levels of NUMA goodness.
1689f2cb1360SIngo Molnar 	 */
1690051f3ca0SSuravee Suthikulpanit 	for (j = 1; j < level; i++, j++) {
1691f2cb1360SIngo Molnar 		tl[i] = (struct sched_domain_topology_level){
1692f2cb1360SIngo Molnar 			.mask = sd_numa_mask,
1693f2cb1360SIngo Molnar 			.sd_flags = cpu_numa_flags,
1694f2cb1360SIngo Molnar 			.flags = SDTL_OVERLAP,
1695f2cb1360SIngo Molnar 			.numa_level = j,
1696f2cb1360SIngo Molnar 			SD_INIT_NAME(NUMA)
1697f2cb1360SIngo Molnar 		};
1698f2cb1360SIngo Molnar 	}
1699f2cb1360SIngo Molnar 
1700f2cb1360SIngo Molnar 	sched_domain_topology = tl;
1701f2cb1360SIngo Molnar 
1702f2cb1360SIngo Molnar 	sched_domains_numa_levels = level;
1703f2cb1360SIngo Molnar 	sched_max_numa_distance = sched_domains_numa_distance[level - 1];
1704f2cb1360SIngo Molnar 
1705f2cb1360SIngo Molnar 	init_numa_topology_type();
1706f2cb1360SIngo Molnar }
1707f2cb1360SIngo Molnar 
1708f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu)
1709f2cb1360SIngo Molnar {
1710f2cb1360SIngo Molnar 	int node = cpu_to_node(cpu);
1711f2cb1360SIngo Molnar 	int i, j;
1712f2cb1360SIngo Molnar 
1713f2cb1360SIngo Molnar 	for (i = 0; i < sched_domains_numa_levels; i++) {
1714f2cb1360SIngo Molnar 		for (j = 0; j < nr_node_ids; j++) {
1715f2cb1360SIngo Molnar 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
1716f2cb1360SIngo Molnar 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1717f2cb1360SIngo Molnar 		}
1718f2cb1360SIngo Molnar 	}
1719f2cb1360SIngo Molnar }
1720f2cb1360SIngo Molnar 
1721f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu)
1722f2cb1360SIngo Molnar {
1723f2cb1360SIngo Molnar 	int i, j;
1724f2cb1360SIngo Molnar 
1725f2cb1360SIngo Molnar 	for (i = 0; i < sched_domains_numa_levels; i++) {
1726f2cb1360SIngo Molnar 		for (j = 0; j < nr_node_ids; j++)
1727f2cb1360SIngo Molnar 			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1728f2cb1360SIngo Molnar 	}
1729f2cb1360SIngo Molnar }
1730f2cb1360SIngo Molnar 
1731f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */
1732f2cb1360SIngo Molnar 
1733f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map)
1734f2cb1360SIngo Molnar {
1735f2cb1360SIngo Molnar 	struct sched_domain_topology_level *tl;
1736f2cb1360SIngo Molnar 	int j;
1737f2cb1360SIngo Molnar 
1738f2cb1360SIngo Molnar 	for_each_sd_topology(tl) {
1739f2cb1360SIngo Molnar 		struct sd_data *sdd = &tl->data;
1740f2cb1360SIngo Molnar 
1741f2cb1360SIngo Molnar 		sdd->sd = alloc_percpu(struct sched_domain *);
1742f2cb1360SIngo Molnar 		if (!sdd->sd)
1743f2cb1360SIngo Molnar 			return -ENOMEM;
1744f2cb1360SIngo Molnar 
1745f2cb1360SIngo Molnar 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
1746f2cb1360SIngo Molnar 		if (!sdd->sds)
1747f2cb1360SIngo Molnar 			return -ENOMEM;
1748f2cb1360SIngo Molnar 
1749f2cb1360SIngo Molnar 		sdd->sg = alloc_percpu(struct sched_group *);
1750f2cb1360SIngo Molnar 		if (!sdd->sg)
1751f2cb1360SIngo Molnar 			return -ENOMEM;
1752f2cb1360SIngo Molnar 
1753f2cb1360SIngo Molnar 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1754f2cb1360SIngo Molnar 		if (!sdd->sgc)
1755f2cb1360SIngo Molnar 			return -ENOMEM;
1756f2cb1360SIngo Molnar 
1757f2cb1360SIngo Molnar 		for_each_cpu(j, cpu_map) {
1758f2cb1360SIngo Molnar 			struct sched_domain *sd;
1759f2cb1360SIngo Molnar 			struct sched_domain_shared *sds;
1760f2cb1360SIngo Molnar 			struct sched_group *sg;
1761f2cb1360SIngo Molnar 			struct sched_group_capacity *sgc;
1762f2cb1360SIngo Molnar 
1763f2cb1360SIngo Molnar 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1764f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
1765f2cb1360SIngo Molnar 			if (!sd)
1766f2cb1360SIngo Molnar 				return -ENOMEM;
1767f2cb1360SIngo Molnar 
1768f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sd, j) = sd;
1769f2cb1360SIngo Molnar 
1770f2cb1360SIngo Molnar 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
1771f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
1772f2cb1360SIngo Molnar 			if (!sds)
1773f2cb1360SIngo Molnar 				return -ENOMEM;
1774f2cb1360SIngo Molnar 
1775f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sds, j) = sds;
1776f2cb1360SIngo Molnar 
1777f2cb1360SIngo Molnar 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1778f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
1779f2cb1360SIngo Molnar 			if (!sg)
1780f2cb1360SIngo Molnar 				return -ENOMEM;
1781f2cb1360SIngo Molnar 
1782f2cb1360SIngo Molnar 			sg->next = sg;
1783f2cb1360SIngo Molnar 
1784f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sg, j) = sg;
1785f2cb1360SIngo Molnar 
1786f2cb1360SIngo Molnar 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1787f2cb1360SIngo Molnar 					GFP_KERNEL, cpu_to_node(j));
1788f2cb1360SIngo Molnar 			if (!sgc)
1789f2cb1360SIngo Molnar 				return -ENOMEM;
1790f2cb1360SIngo Molnar 
1791005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
1792005f874dSPeter Zijlstra 			sgc->id = j;
1793005f874dSPeter Zijlstra #endif
1794005f874dSPeter Zijlstra 
1795f2cb1360SIngo Molnar 			*per_cpu_ptr(sdd->sgc, j) = sgc;
1796f2cb1360SIngo Molnar 		}
1797f2cb1360SIngo Molnar 	}
1798f2cb1360SIngo Molnar 
1799f2cb1360SIngo Molnar 	return 0;
1800f2cb1360SIngo Molnar }
1801f2cb1360SIngo Molnar 
1802f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map)
1803f2cb1360SIngo Molnar {
1804f2cb1360SIngo Molnar 	struct sched_domain_topology_level *tl;
1805f2cb1360SIngo Molnar 	int j;
1806f2cb1360SIngo Molnar 
1807f2cb1360SIngo Molnar 	for_each_sd_topology(tl) {
1808f2cb1360SIngo Molnar 		struct sd_data *sdd = &tl->data;
1809f2cb1360SIngo Molnar 
1810f2cb1360SIngo Molnar 		for_each_cpu(j, cpu_map) {
1811f2cb1360SIngo Molnar 			struct sched_domain *sd;
1812f2cb1360SIngo Molnar 
1813f2cb1360SIngo Molnar 			if (sdd->sd) {
1814f2cb1360SIngo Molnar 				sd = *per_cpu_ptr(sdd->sd, j);
1815f2cb1360SIngo Molnar 				if (sd && (sd->flags & SD_OVERLAP))
1816f2cb1360SIngo Molnar 					free_sched_groups(sd->groups, 0);
1817f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sd, j));
1818f2cb1360SIngo Molnar 			}
1819f2cb1360SIngo Molnar 
1820f2cb1360SIngo Molnar 			if (sdd->sds)
1821f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sds, j));
1822f2cb1360SIngo Molnar 			if (sdd->sg)
1823f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sg, j));
1824f2cb1360SIngo Molnar 			if (sdd->sgc)
1825f2cb1360SIngo Molnar 				kfree(*per_cpu_ptr(sdd->sgc, j));
1826f2cb1360SIngo Molnar 		}
1827f2cb1360SIngo Molnar 		free_percpu(sdd->sd);
1828f2cb1360SIngo Molnar 		sdd->sd = NULL;
1829f2cb1360SIngo Molnar 		free_percpu(sdd->sds);
1830f2cb1360SIngo Molnar 		sdd->sds = NULL;
1831f2cb1360SIngo Molnar 		free_percpu(sdd->sg);
1832f2cb1360SIngo Molnar 		sdd->sg = NULL;
1833f2cb1360SIngo Molnar 		free_percpu(sdd->sgc);
1834f2cb1360SIngo Molnar 		sdd->sgc = NULL;
1835f2cb1360SIngo Molnar 	}
1836f2cb1360SIngo Molnar }
1837f2cb1360SIngo Molnar 
1838181a80d1SViresh Kumar static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1839f2cb1360SIngo Molnar 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
184005484e09SMorten Rasmussen 		struct sched_domain *child, int dflags, int cpu)
1841f2cb1360SIngo Molnar {
184205484e09SMorten Rasmussen 	struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
1843f2cb1360SIngo Molnar 
1844f2cb1360SIngo Molnar 	if (child) {
1845f2cb1360SIngo Molnar 		sd->level = child->level + 1;
1846f2cb1360SIngo Molnar 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
1847f2cb1360SIngo Molnar 		child->parent = sd;
1848f2cb1360SIngo Molnar 
1849f2cb1360SIngo Molnar 		if (!cpumask_subset(sched_domain_span(child),
1850f2cb1360SIngo Molnar 				    sched_domain_span(sd))) {
1851f2cb1360SIngo Molnar 			pr_err("BUG: arch topology borken\n");
1852f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
1853f2cb1360SIngo Molnar 			pr_err("     the %s domain not a subset of the %s domain\n",
1854f2cb1360SIngo Molnar 					child->name, sd->name);
1855f2cb1360SIngo Molnar #endif
185697fb7a0aSIngo Molnar 			/* Fixup, ensure @sd has at least @child CPUs. */
1857f2cb1360SIngo Molnar 			cpumask_or(sched_domain_span(sd),
1858f2cb1360SIngo Molnar 				   sched_domain_span(sd),
1859f2cb1360SIngo Molnar 				   sched_domain_span(child));
1860f2cb1360SIngo Molnar 		}
1861f2cb1360SIngo Molnar 
1862f2cb1360SIngo Molnar 	}
1863f2cb1360SIngo Molnar 	set_domain_attribute(sd, attr);
1864f2cb1360SIngo Molnar 
1865f2cb1360SIngo Molnar 	return sd;
1866f2cb1360SIngo Molnar }
1867f2cb1360SIngo Molnar 
1868f2cb1360SIngo Molnar /*
186905484e09SMorten Rasmussen  * Find the sched_domain_topology_level where all CPU capacities are visible
187005484e09SMorten Rasmussen  * for all CPUs.
187105484e09SMorten Rasmussen  */
187205484e09SMorten Rasmussen static struct sched_domain_topology_level
187305484e09SMorten Rasmussen *asym_cpu_capacity_level(const struct cpumask *cpu_map)
187405484e09SMorten Rasmussen {
187505484e09SMorten Rasmussen 	int i, j, asym_level = 0;
187605484e09SMorten Rasmussen 	bool asym = false;
187705484e09SMorten Rasmussen 	struct sched_domain_topology_level *tl, *asym_tl = NULL;
187805484e09SMorten Rasmussen 	unsigned long cap;
187905484e09SMorten Rasmussen 
188005484e09SMorten Rasmussen 	/* Is there any asymmetry? */
188105484e09SMorten Rasmussen 	cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map));
188205484e09SMorten Rasmussen 
188305484e09SMorten Rasmussen 	for_each_cpu(i, cpu_map) {
188405484e09SMorten Rasmussen 		if (arch_scale_cpu_capacity(NULL, i) != cap) {
188505484e09SMorten Rasmussen 			asym = true;
188605484e09SMorten Rasmussen 			break;
188705484e09SMorten Rasmussen 		}
188805484e09SMorten Rasmussen 	}
188905484e09SMorten Rasmussen 
189005484e09SMorten Rasmussen 	if (!asym)
189105484e09SMorten Rasmussen 		return NULL;
189205484e09SMorten Rasmussen 
189305484e09SMorten Rasmussen 	/*
189405484e09SMorten Rasmussen 	 * Examine topology from all CPU's point of views to detect the lowest
189505484e09SMorten Rasmussen 	 * sched_domain_topology_level where a highest capacity CPU is visible
189605484e09SMorten Rasmussen 	 * to everyone.
189705484e09SMorten Rasmussen 	 */
189805484e09SMorten Rasmussen 	for_each_cpu(i, cpu_map) {
189905484e09SMorten Rasmussen 		unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i);
190005484e09SMorten Rasmussen 		int tl_id = 0;
190105484e09SMorten Rasmussen 
190205484e09SMorten Rasmussen 		for_each_sd_topology(tl) {
190305484e09SMorten Rasmussen 			if (tl_id < asym_level)
190405484e09SMorten Rasmussen 				goto next_level;
190505484e09SMorten Rasmussen 
190605484e09SMorten Rasmussen 			for_each_cpu_and(j, tl->mask(i), cpu_map) {
190705484e09SMorten Rasmussen 				unsigned long capacity;
190805484e09SMorten Rasmussen 
190905484e09SMorten Rasmussen 				capacity = arch_scale_cpu_capacity(NULL, j);
191005484e09SMorten Rasmussen 
191105484e09SMorten Rasmussen 				if (capacity <= max_capacity)
191205484e09SMorten Rasmussen 					continue;
191305484e09SMorten Rasmussen 
191405484e09SMorten Rasmussen 				max_capacity = capacity;
191505484e09SMorten Rasmussen 				asym_level = tl_id;
191605484e09SMorten Rasmussen 				asym_tl = tl;
191705484e09SMorten Rasmussen 			}
191805484e09SMorten Rasmussen next_level:
191905484e09SMorten Rasmussen 			tl_id++;
192005484e09SMorten Rasmussen 		}
192105484e09SMorten Rasmussen 	}
192205484e09SMorten Rasmussen 
192305484e09SMorten Rasmussen 	return asym_tl;
192405484e09SMorten Rasmussen }
192505484e09SMorten Rasmussen 
192605484e09SMorten Rasmussen 
192705484e09SMorten Rasmussen /*
1928f2cb1360SIngo Molnar  * Build sched domains for a given set of CPUs and attach the sched domains
1929f2cb1360SIngo Molnar  * to the individual CPUs
1930f2cb1360SIngo Molnar  */
1931f2cb1360SIngo Molnar static int
1932f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1933f2cb1360SIngo Molnar {
1934f2cb1360SIngo Molnar 	enum s_alloc alloc_state;
1935f2cb1360SIngo Molnar 	struct sched_domain *sd;
1936f2cb1360SIngo Molnar 	struct s_data d;
1937f2cb1360SIngo Molnar 	struct rq *rq = NULL;
1938f2cb1360SIngo Molnar 	int i, ret = -ENOMEM;
193905484e09SMorten Rasmussen 	struct sched_domain_topology_level *tl_asym;
1940df054e84SMorten Rasmussen 	bool has_asym = false;
1941f2cb1360SIngo Molnar 
1942f2cb1360SIngo Molnar 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1943f2cb1360SIngo Molnar 	if (alloc_state != sa_rootdomain)
1944f2cb1360SIngo Molnar 		goto error;
1945f2cb1360SIngo Molnar 
194605484e09SMorten Rasmussen 	tl_asym = asym_cpu_capacity_level(cpu_map);
194705484e09SMorten Rasmussen 
1948f2cb1360SIngo Molnar 	/* Set up domains for CPUs specified by the cpu_map: */
1949f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map) {
1950f2cb1360SIngo Molnar 		struct sched_domain_topology_level *tl;
1951f2cb1360SIngo Molnar 
1952f2cb1360SIngo Molnar 		sd = NULL;
1953f2cb1360SIngo Molnar 		for_each_sd_topology(tl) {
195405484e09SMorten Rasmussen 			int dflags = 0;
195505484e09SMorten Rasmussen 
1956df054e84SMorten Rasmussen 			if (tl == tl_asym) {
195705484e09SMorten Rasmussen 				dflags |= SD_ASYM_CPUCAPACITY;
1958df054e84SMorten Rasmussen 				has_asym = true;
1959df054e84SMorten Rasmussen 			}
196005484e09SMorten Rasmussen 
196105484e09SMorten Rasmussen 			sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
196205484e09SMorten Rasmussen 
1963f2cb1360SIngo Molnar 			if (tl == sched_domain_topology)
1964f2cb1360SIngo Molnar 				*per_cpu_ptr(d.sd, i) = sd;
1965af85596cSPeter Zijlstra 			if (tl->flags & SDTL_OVERLAP)
1966f2cb1360SIngo Molnar 				sd->flags |= SD_OVERLAP;
1967f2cb1360SIngo Molnar 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
1968f2cb1360SIngo Molnar 				break;
1969f2cb1360SIngo Molnar 		}
1970f2cb1360SIngo Molnar 	}
1971f2cb1360SIngo Molnar 
1972f2cb1360SIngo Molnar 	/* Build the groups for the domains */
1973f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map) {
1974f2cb1360SIngo Molnar 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1975f2cb1360SIngo Molnar 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
1976f2cb1360SIngo Molnar 			if (sd->flags & SD_OVERLAP) {
1977f2cb1360SIngo Molnar 				if (build_overlap_sched_groups(sd, i))
1978f2cb1360SIngo Molnar 					goto error;
1979f2cb1360SIngo Molnar 			} else {
1980f2cb1360SIngo Molnar 				if (build_sched_groups(sd, i))
1981f2cb1360SIngo Molnar 					goto error;
1982f2cb1360SIngo Molnar 			}
1983f2cb1360SIngo Molnar 		}
1984f2cb1360SIngo Molnar 	}
1985f2cb1360SIngo Molnar 
1986f2cb1360SIngo Molnar 	/* Calculate CPU capacity for physical packages and nodes */
1987f2cb1360SIngo Molnar 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
1988f2cb1360SIngo Molnar 		if (!cpumask_test_cpu(i, cpu_map))
1989f2cb1360SIngo Molnar 			continue;
1990f2cb1360SIngo Molnar 
1991f2cb1360SIngo Molnar 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1992f2cb1360SIngo Molnar 			claim_allocations(i, sd);
1993f2cb1360SIngo Molnar 			init_sched_groups_capacity(i, sd);
1994f2cb1360SIngo Molnar 		}
1995f2cb1360SIngo Molnar 	}
1996f2cb1360SIngo Molnar 
1997f2cb1360SIngo Molnar 	/* Attach the domains */
1998f2cb1360SIngo Molnar 	rcu_read_lock();
1999f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map) {
2000f2cb1360SIngo Molnar 		rq = cpu_rq(i);
2001f2cb1360SIngo Molnar 		sd = *per_cpu_ptr(d.sd, i);
2002f2cb1360SIngo Molnar 
2003f2cb1360SIngo Molnar 		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2004f2cb1360SIngo Molnar 		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2005f2cb1360SIngo Molnar 			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2006f2cb1360SIngo Molnar 
2007f2cb1360SIngo Molnar 		cpu_attach_domain(sd, d.rd, i);
2008f2cb1360SIngo Molnar 	}
2009f2cb1360SIngo Molnar 	rcu_read_unlock();
2010f2cb1360SIngo Molnar 
2011df054e84SMorten Rasmussen 	if (has_asym)
2012df054e84SMorten Rasmussen 		static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
2013df054e84SMorten Rasmussen 
2014f2cb1360SIngo Molnar 	if (rq && sched_debug_enabled) {
2015bf5015a5SJuri Lelli 		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2016f2cb1360SIngo Molnar 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2017f2cb1360SIngo Molnar 	}
2018f2cb1360SIngo Molnar 
2019f2cb1360SIngo Molnar 	ret = 0;
2020f2cb1360SIngo Molnar error:
2021f2cb1360SIngo Molnar 	__free_domain_allocs(&d, alloc_state, cpu_map);
202297fb7a0aSIngo Molnar 
2023f2cb1360SIngo Molnar 	return ret;
2024f2cb1360SIngo Molnar }
2025f2cb1360SIngo Molnar 
2026f2cb1360SIngo Molnar /* Current sched domains: */
2027f2cb1360SIngo Molnar static cpumask_var_t			*doms_cur;
2028f2cb1360SIngo Molnar 
2029f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */
2030f2cb1360SIngo Molnar static int				ndoms_cur;
2031f2cb1360SIngo Molnar 
2032f2cb1360SIngo Molnar /* Attribues of custom domains in 'doms_cur' */
2033f2cb1360SIngo Molnar static struct sched_domain_attr		*dattr_cur;
2034f2cb1360SIngo Molnar 
2035f2cb1360SIngo Molnar /*
2036f2cb1360SIngo Molnar  * Special case: If a kmalloc() of a doms_cur partition (array of
2037f2cb1360SIngo Molnar  * cpumask) fails, then fallback to a single sched domain,
2038f2cb1360SIngo Molnar  * as determined by the single cpumask fallback_doms.
2039f2cb1360SIngo Molnar  */
20408d5dc512SPeter Zijlstra static cpumask_var_t			fallback_doms;
2041f2cb1360SIngo Molnar 
2042f2cb1360SIngo Molnar /*
2043f2cb1360SIngo Molnar  * arch_update_cpu_topology lets virtualized architectures update the
2044f2cb1360SIngo Molnar  * CPU core maps. It is supposed to return 1 if the topology changed
2045f2cb1360SIngo Molnar  * or 0 if it stayed the same.
2046f2cb1360SIngo Molnar  */
2047f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void)
2048f2cb1360SIngo Molnar {
2049f2cb1360SIngo Molnar 	return 0;
2050f2cb1360SIngo Molnar }
2051f2cb1360SIngo Molnar 
2052f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2053f2cb1360SIngo Molnar {
2054f2cb1360SIngo Molnar 	int i;
2055f2cb1360SIngo Molnar 	cpumask_var_t *doms;
2056f2cb1360SIngo Molnar 
20576da2ec56SKees Cook 	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2058f2cb1360SIngo Molnar 	if (!doms)
2059f2cb1360SIngo Molnar 		return NULL;
2060f2cb1360SIngo Molnar 	for (i = 0; i < ndoms; i++) {
2061f2cb1360SIngo Molnar 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2062f2cb1360SIngo Molnar 			free_sched_domains(doms, i);
2063f2cb1360SIngo Molnar 			return NULL;
2064f2cb1360SIngo Molnar 		}
2065f2cb1360SIngo Molnar 	}
2066f2cb1360SIngo Molnar 	return doms;
2067f2cb1360SIngo Molnar }
2068f2cb1360SIngo Molnar 
2069f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2070f2cb1360SIngo Molnar {
2071f2cb1360SIngo Molnar 	unsigned int i;
2072f2cb1360SIngo Molnar 	for (i = 0; i < ndoms; i++)
2073f2cb1360SIngo Molnar 		free_cpumask_var(doms[i]);
2074f2cb1360SIngo Molnar 	kfree(doms);
2075f2cb1360SIngo Molnar }
2076f2cb1360SIngo Molnar 
2077f2cb1360SIngo Molnar /*
2078f2cb1360SIngo Molnar  * Set up scheduler domains and groups. Callers must hold the hotplug lock.
2079f2cb1360SIngo Molnar  * For now this just excludes isolated CPUs, but could be used to
2080f2cb1360SIngo Molnar  * exclude other special cases in the future.
2081f2cb1360SIngo Molnar  */
20828d5dc512SPeter Zijlstra int sched_init_domains(const struct cpumask *cpu_map)
2083f2cb1360SIngo Molnar {
2084f2cb1360SIngo Molnar 	int err;
2085f2cb1360SIngo Molnar 
20868d5dc512SPeter Zijlstra 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
20871676330eSPeter Zijlstra 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
20888d5dc512SPeter Zijlstra 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
20898d5dc512SPeter Zijlstra 
2090f2cb1360SIngo Molnar 	arch_update_cpu_topology();
2091f2cb1360SIngo Molnar 	ndoms_cur = 1;
2092f2cb1360SIngo Molnar 	doms_cur = alloc_sched_domains(ndoms_cur);
2093f2cb1360SIngo Molnar 	if (!doms_cur)
2094f2cb1360SIngo Molnar 		doms_cur = &fallback_doms;
2095edb93821SFrederic Weisbecker 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
2096f2cb1360SIngo Molnar 	err = build_sched_domains(doms_cur[0], NULL);
2097f2cb1360SIngo Molnar 	register_sched_domain_sysctl();
2098f2cb1360SIngo Molnar 
2099f2cb1360SIngo Molnar 	return err;
2100f2cb1360SIngo Molnar }
2101f2cb1360SIngo Molnar 
2102f2cb1360SIngo Molnar /*
2103f2cb1360SIngo Molnar  * Detach sched domains from a group of CPUs specified in cpu_map
2104f2cb1360SIngo Molnar  * These CPUs will now be attached to the NULL domain
2105f2cb1360SIngo Molnar  */
2106f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map)
2107f2cb1360SIngo Molnar {
2108f2cb1360SIngo Molnar 	int i;
2109f2cb1360SIngo Molnar 
2110f2cb1360SIngo Molnar 	rcu_read_lock();
2111f2cb1360SIngo Molnar 	for_each_cpu(i, cpu_map)
2112f2cb1360SIngo Molnar 		cpu_attach_domain(NULL, &def_root_domain, i);
2113f2cb1360SIngo Molnar 	rcu_read_unlock();
2114f2cb1360SIngo Molnar }
2115f2cb1360SIngo Molnar 
2116f2cb1360SIngo Molnar /* handle null as "default" */
2117f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2118f2cb1360SIngo Molnar 			struct sched_domain_attr *new, int idx_new)
2119f2cb1360SIngo Molnar {
2120f2cb1360SIngo Molnar 	struct sched_domain_attr tmp;
2121f2cb1360SIngo Molnar 
2122f2cb1360SIngo Molnar 	/* Fast path: */
2123f2cb1360SIngo Molnar 	if (!new && !cur)
2124f2cb1360SIngo Molnar 		return 1;
2125f2cb1360SIngo Molnar 
2126f2cb1360SIngo Molnar 	tmp = SD_ATTR_INIT;
212797fb7a0aSIngo Molnar 
2128f2cb1360SIngo Molnar 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2129f2cb1360SIngo Molnar 			new ? (new + idx_new) : &tmp,
2130f2cb1360SIngo Molnar 			sizeof(struct sched_domain_attr));
2131f2cb1360SIngo Molnar }
2132f2cb1360SIngo Molnar 
2133f2cb1360SIngo Molnar /*
2134f2cb1360SIngo Molnar  * Partition sched domains as specified by the 'ndoms_new'
2135f2cb1360SIngo Molnar  * cpumasks in the array doms_new[] of cpumasks. This compares
2136f2cb1360SIngo Molnar  * doms_new[] to the current sched domain partitioning, doms_cur[].
2137f2cb1360SIngo Molnar  * It destroys each deleted domain and builds each new domain.
2138f2cb1360SIngo Molnar  *
2139f2cb1360SIngo Molnar  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2140f2cb1360SIngo Molnar  * The masks don't intersect (don't overlap.) We should setup one
2141f2cb1360SIngo Molnar  * sched domain for each mask. CPUs not in any of the cpumasks will
2142f2cb1360SIngo Molnar  * not be load balanced. If the same cpumask appears both in the
2143f2cb1360SIngo Molnar  * current 'doms_cur' domains and in the new 'doms_new', we can leave
2144f2cb1360SIngo Molnar  * it as it is.
2145f2cb1360SIngo Molnar  *
2146f2cb1360SIngo Molnar  * The passed in 'doms_new' should be allocated using
2147f2cb1360SIngo Molnar  * alloc_sched_domains.  This routine takes ownership of it and will
2148f2cb1360SIngo Molnar  * free_sched_domains it when done with it. If the caller failed the
2149f2cb1360SIngo Molnar  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2150f2cb1360SIngo Molnar  * and partition_sched_domains() will fallback to the single partition
2151f2cb1360SIngo Molnar  * 'fallback_doms', it also forces the domains to be rebuilt.
2152f2cb1360SIngo Molnar  *
2153f2cb1360SIngo Molnar  * If doms_new == NULL it will be replaced with cpu_online_mask.
2154f2cb1360SIngo Molnar  * ndoms_new == 0 is a special case for destroying existing domains,
2155f2cb1360SIngo Molnar  * and it will not create the default domain.
2156f2cb1360SIngo Molnar  *
2157f2cb1360SIngo Molnar  * Call with hotplug lock held
2158f2cb1360SIngo Molnar  */
2159f2cb1360SIngo Molnar void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2160f2cb1360SIngo Molnar 			     struct sched_domain_attr *dattr_new)
2161f2cb1360SIngo Molnar {
21621f74de87SQuentin Perret 	bool __maybe_unused has_eas = false;
2163f2cb1360SIngo Molnar 	int i, j, n;
2164f2cb1360SIngo Molnar 	int new_topology;
2165f2cb1360SIngo Molnar 
2166f2cb1360SIngo Molnar 	mutex_lock(&sched_domains_mutex);
2167f2cb1360SIngo Molnar 
2168f2cb1360SIngo Molnar 	/* Always unregister in case we don't destroy any domains: */
2169f2cb1360SIngo Molnar 	unregister_sched_domain_sysctl();
2170f2cb1360SIngo Molnar 
2171f2cb1360SIngo Molnar 	/* Let the architecture update CPU core mappings: */
2172f2cb1360SIngo Molnar 	new_topology = arch_update_cpu_topology();
2173f2cb1360SIngo Molnar 
217409e0dd8eSPeter Zijlstra 	if (!doms_new) {
217509e0dd8eSPeter Zijlstra 		WARN_ON_ONCE(dattr_new);
217609e0dd8eSPeter Zijlstra 		n = 0;
217709e0dd8eSPeter Zijlstra 		doms_new = alloc_sched_domains(1);
217809e0dd8eSPeter Zijlstra 		if (doms_new) {
217909e0dd8eSPeter Zijlstra 			n = 1;
2180edb93821SFrederic Weisbecker 			cpumask_and(doms_new[0], cpu_active_mask,
2181edb93821SFrederic Weisbecker 				    housekeeping_cpumask(HK_FLAG_DOMAIN));
218209e0dd8eSPeter Zijlstra 		}
218309e0dd8eSPeter Zijlstra 	} else {
218409e0dd8eSPeter Zijlstra 		n = ndoms_new;
218509e0dd8eSPeter Zijlstra 	}
2186f2cb1360SIngo Molnar 
2187f2cb1360SIngo Molnar 	/* Destroy deleted domains: */
2188f2cb1360SIngo Molnar 	for (i = 0; i < ndoms_cur; i++) {
2189f2cb1360SIngo Molnar 		for (j = 0; j < n && !new_topology; j++) {
21906aa140faSQuentin Perret 			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
21916aa140faSQuentin Perret 			    dattrs_equal(dattr_cur, i, dattr_new, j))
2192f2cb1360SIngo Molnar 				goto match1;
2193f2cb1360SIngo Molnar 		}
2194f2cb1360SIngo Molnar 		/* No match - a current sched domain not in new doms_new[] */
2195f2cb1360SIngo Molnar 		detach_destroy_domains(doms_cur[i]);
2196f2cb1360SIngo Molnar match1:
2197f2cb1360SIngo Molnar 		;
2198f2cb1360SIngo Molnar 	}
2199f2cb1360SIngo Molnar 
2200f2cb1360SIngo Molnar 	n = ndoms_cur;
220109e0dd8eSPeter Zijlstra 	if (!doms_new) {
2202f2cb1360SIngo Molnar 		n = 0;
2203f2cb1360SIngo Molnar 		doms_new = &fallback_doms;
2204edb93821SFrederic Weisbecker 		cpumask_and(doms_new[0], cpu_active_mask,
2205edb93821SFrederic Weisbecker 			    housekeeping_cpumask(HK_FLAG_DOMAIN));
2206f2cb1360SIngo Molnar 	}
2207f2cb1360SIngo Molnar 
2208f2cb1360SIngo Molnar 	/* Build new domains: */
2209f2cb1360SIngo Molnar 	for (i = 0; i < ndoms_new; i++) {
2210f2cb1360SIngo Molnar 		for (j = 0; j < n && !new_topology; j++) {
22116aa140faSQuentin Perret 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
22126aa140faSQuentin Perret 			    dattrs_equal(dattr_new, i, dattr_cur, j))
2213f2cb1360SIngo Molnar 				goto match2;
2214f2cb1360SIngo Molnar 		}
2215f2cb1360SIngo Molnar 		/* No match - add a new doms_new */
2216f2cb1360SIngo Molnar 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2217f2cb1360SIngo Molnar match2:
2218f2cb1360SIngo Molnar 		;
2219f2cb1360SIngo Molnar 	}
2220f2cb1360SIngo Molnar 
2221531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
22226aa140faSQuentin Perret 	/* Build perf. domains: */
22236aa140faSQuentin Perret 	for (i = 0; i < ndoms_new; i++) {
2224531b5c9fSQuentin Perret 		for (j = 0; j < n && !sched_energy_update; j++) {
22256aa140faSQuentin Perret 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
22261f74de87SQuentin Perret 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
22271f74de87SQuentin Perret 				has_eas = true;
22286aa140faSQuentin Perret 				goto match3;
22296aa140faSQuentin Perret 			}
22301f74de87SQuentin Perret 		}
22316aa140faSQuentin Perret 		/* No match - add perf. domains for a new rd */
22321f74de87SQuentin Perret 		has_eas |= build_perf_domains(doms_new[i]);
22336aa140faSQuentin Perret match3:
22346aa140faSQuentin Perret 		;
22356aa140faSQuentin Perret 	}
22361f74de87SQuentin Perret 	sched_energy_set(has_eas);
22376aa140faSQuentin Perret #endif
22386aa140faSQuentin Perret 
2239f2cb1360SIngo Molnar 	/* Remember the new sched domains: */
2240f2cb1360SIngo Molnar 	if (doms_cur != &fallback_doms)
2241f2cb1360SIngo Molnar 		free_sched_domains(doms_cur, ndoms_cur);
2242f2cb1360SIngo Molnar 
2243f2cb1360SIngo Molnar 	kfree(dattr_cur);
2244f2cb1360SIngo Molnar 	doms_cur = doms_new;
2245f2cb1360SIngo Molnar 	dattr_cur = dattr_new;
2246f2cb1360SIngo Molnar 	ndoms_cur = ndoms_new;
2247f2cb1360SIngo Molnar 
2248f2cb1360SIngo Molnar 	register_sched_domain_sysctl();
2249f2cb1360SIngo Molnar 
2250f2cb1360SIngo Molnar 	mutex_unlock(&sched_domains_mutex);
2251f2cb1360SIngo Molnar }
2252