1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2f2cb1360SIngo Molnar /* 3f2cb1360SIngo Molnar * Scheduler topology setup/handling methods 4f2cb1360SIngo Molnar */ 5f2cb1360SIngo Molnar #include "sched.h" 6f2cb1360SIngo Molnar 7f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex); 8f2cb1360SIngo Molnar 9f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */ 10ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask; 11ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask2; 12f2cb1360SIngo Molnar 13f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 14f2cb1360SIngo Molnar 15f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str) 16f2cb1360SIngo Molnar { 179469eb01SPeter Zijlstra sched_debug_enabled = true; 18f2cb1360SIngo Molnar 19f2cb1360SIngo Molnar return 0; 20f2cb1360SIngo Molnar } 21f2cb1360SIngo Molnar early_param("sched_debug", sched_debug_setup); 22f2cb1360SIngo Molnar 23f2cb1360SIngo Molnar static inline bool sched_debug(void) 24f2cb1360SIngo Molnar { 25f2cb1360SIngo Molnar return sched_debug_enabled; 26f2cb1360SIngo Molnar } 27f2cb1360SIngo Molnar 28f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 29f2cb1360SIngo Molnar struct cpumask *groupmask) 30f2cb1360SIngo Molnar { 31f2cb1360SIngo Molnar struct sched_group *group = sd->groups; 32f2cb1360SIngo Molnar 33f2cb1360SIngo Molnar cpumask_clear(groupmask); 34f2cb1360SIngo Molnar 35005f874dSPeter Zijlstra printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 36f2cb1360SIngo Molnar 37f2cb1360SIngo Molnar if (!(sd->flags & SD_LOAD_BALANCE)) { 38f2cb1360SIngo Molnar printk("does not load-balance\n"); 39f2cb1360SIngo Molnar if (sd->parent) 4097fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent"); 41f2cb1360SIngo Molnar return -1; 42f2cb1360SIngo Molnar } 43f2cb1360SIngo Molnar 44005f874dSPeter Zijlstra printk(KERN_CONT "span=%*pbl level=%s\n", 45f2cb1360SIngo Molnar cpumask_pr_args(sched_domain_span(sd)), sd->name); 46f2cb1360SIngo Molnar 47f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 4897fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 49f2cb1360SIngo Molnar } 506cd0c583SYi Wang if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 5197fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 52f2cb1360SIngo Molnar } 53f2cb1360SIngo Molnar 54f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s groups:", level + 1, ""); 55f2cb1360SIngo Molnar do { 56f2cb1360SIngo Molnar if (!group) { 57f2cb1360SIngo Molnar printk("\n"); 58f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: group is NULL\n"); 59f2cb1360SIngo Molnar break; 60f2cb1360SIngo Molnar } 61f2cb1360SIngo Molnar 62ae4df9d6SPeter Zijlstra if (!cpumask_weight(sched_group_span(group))) { 63f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 64f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: empty group\n"); 65f2cb1360SIngo Molnar break; 66f2cb1360SIngo Molnar } 67f2cb1360SIngo Molnar 68f2cb1360SIngo Molnar if (!(sd->flags & SD_OVERLAP) && 69ae4df9d6SPeter Zijlstra cpumask_intersects(groupmask, sched_group_span(group))) { 70f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 71f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: repeated CPUs\n"); 72f2cb1360SIngo Molnar break; 73f2cb1360SIngo Molnar } 74f2cb1360SIngo Molnar 75ae4df9d6SPeter Zijlstra cpumask_or(groupmask, groupmask, sched_group_span(group)); 76f2cb1360SIngo Molnar 77005f874dSPeter Zijlstra printk(KERN_CONT " %d:{ span=%*pbl", 78005f874dSPeter Zijlstra group->sgc->id, 79ae4df9d6SPeter Zijlstra cpumask_pr_args(sched_group_span(group))); 80b0151c25SPeter Zijlstra 81af218122SPeter Zijlstra if ((sd->flags & SD_OVERLAP) && 82ae4df9d6SPeter Zijlstra !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 83005f874dSPeter Zijlstra printk(KERN_CONT " mask=%*pbl", 84e5c14b1fSPeter Zijlstra cpumask_pr_args(group_balance_mask(group))); 85b0151c25SPeter Zijlstra } 86b0151c25SPeter Zijlstra 87005f874dSPeter Zijlstra if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 88005f874dSPeter Zijlstra printk(KERN_CONT " cap=%lu", group->sgc->capacity); 89f2cb1360SIngo Molnar 90a420b063SPeter Zijlstra if (group == sd->groups && sd->child && 91a420b063SPeter Zijlstra !cpumask_equal(sched_domain_span(sd->child), 92ae4df9d6SPeter Zijlstra sched_group_span(group))) { 93a420b063SPeter Zijlstra printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 94a420b063SPeter Zijlstra } 95a420b063SPeter Zijlstra 96005f874dSPeter Zijlstra printk(KERN_CONT " }"); 97005f874dSPeter Zijlstra 98f2cb1360SIngo Molnar group = group->next; 99b0151c25SPeter Zijlstra 100b0151c25SPeter Zijlstra if (group != sd->groups) 101b0151c25SPeter Zijlstra printk(KERN_CONT ","); 102b0151c25SPeter Zijlstra 103f2cb1360SIngo Molnar } while (group != sd->groups); 104f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 105f2cb1360SIngo Molnar 106f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), groupmask)) 107f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 108f2cb1360SIngo Molnar 109f2cb1360SIngo Molnar if (sd->parent && 110f2cb1360SIngo Molnar !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 11197fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 112f2cb1360SIngo Molnar return 0; 113f2cb1360SIngo Molnar } 114f2cb1360SIngo Molnar 115f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu) 116f2cb1360SIngo Molnar { 117f2cb1360SIngo Molnar int level = 0; 118f2cb1360SIngo Molnar 119f2cb1360SIngo Molnar if (!sched_debug_enabled) 120f2cb1360SIngo Molnar return; 121f2cb1360SIngo Molnar 122f2cb1360SIngo Molnar if (!sd) { 123f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 124f2cb1360SIngo Molnar return; 125f2cb1360SIngo Molnar } 126f2cb1360SIngo Molnar 127005f874dSPeter Zijlstra printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 128f2cb1360SIngo Molnar 129f2cb1360SIngo Molnar for (;;) { 130f2cb1360SIngo Molnar if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 131f2cb1360SIngo Molnar break; 132f2cb1360SIngo Molnar level++; 133f2cb1360SIngo Molnar sd = sd->parent; 134f2cb1360SIngo Molnar if (!sd) 135f2cb1360SIngo Molnar break; 136f2cb1360SIngo Molnar } 137f2cb1360SIngo Molnar } 138f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */ 139f2cb1360SIngo Molnar 140f2cb1360SIngo Molnar # define sched_debug_enabled 0 141f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0) 142f2cb1360SIngo Molnar static inline bool sched_debug(void) 143f2cb1360SIngo Molnar { 144f2cb1360SIngo Molnar return false; 145f2cb1360SIngo Molnar } 146f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */ 147f2cb1360SIngo Molnar 148f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd) 149f2cb1360SIngo Molnar { 150f2cb1360SIngo Molnar if (cpumask_weight(sched_domain_span(sd)) == 1) 151f2cb1360SIngo Molnar return 1; 152f2cb1360SIngo Molnar 153f2cb1360SIngo Molnar /* Following flags need at least 2 groups */ 154f2cb1360SIngo Molnar if (sd->flags & (SD_LOAD_BALANCE | 155f2cb1360SIngo Molnar SD_BALANCE_NEWIDLE | 156f2cb1360SIngo Molnar SD_BALANCE_FORK | 157f2cb1360SIngo Molnar SD_BALANCE_EXEC | 158f2cb1360SIngo Molnar SD_SHARE_CPUCAPACITY | 159f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | 160f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | 161f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN)) { 162f2cb1360SIngo Molnar if (sd->groups != sd->groups->next) 163f2cb1360SIngo Molnar return 0; 164f2cb1360SIngo Molnar } 165f2cb1360SIngo Molnar 166f2cb1360SIngo Molnar /* Following flags don't use groups */ 167f2cb1360SIngo Molnar if (sd->flags & (SD_WAKE_AFFINE)) 168f2cb1360SIngo Molnar return 0; 169f2cb1360SIngo Molnar 170f2cb1360SIngo Molnar return 1; 171f2cb1360SIngo Molnar } 172f2cb1360SIngo Molnar 173f2cb1360SIngo Molnar static int 174f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 175f2cb1360SIngo Molnar { 176f2cb1360SIngo Molnar unsigned long cflags = sd->flags, pflags = parent->flags; 177f2cb1360SIngo Molnar 178f2cb1360SIngo Molnar if (sd_degenerate(parent)) 179f2cb1360SIngo Molnar return 1; 180f2cb1360SIngo Molnar 181f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 182f2cb1360SIngo Molnar return 0; 183f2cb1360SIngo Molnar 184f2cb1360SIngo Molnar /* Flags needing groups don't count if only 1 group in parent */ 185f2cb1360SIngo Molnar if (parent->groups == parent->groups->next) { 186f2cb1360SIngo Molnar pflags &= ~(SD_LOAD_BALANCE | 187f2cb1360SIngo Molnar SD_BALANCE_NEWIDLE | 188f2cb1360SIngo Molnar SD_BALANCE_FORK | 189f2cb1360SIngo Molnar SD_BALANCE_EXEC | 190f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | 191f2cb1360SIngo Molnar SD_SHARE_CPUCAPACITY | 192f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | 193f2cb1360SIngo Molnar SD_PREFER_SIBLING | 194f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN); 195f2cb1360SIngo Molnar if (nr_node_ids == 1) 196f2cb1360SIngo Molnar pflags &= ~SD_SERIALIZE; 197f2cb1360SIngo Molnar } 198f2cb1360SIngo Molnar if (~cflags & pflags) 199f2cb1360SIngo Molnar return 0; 200f2cb1360SIngo Molnar 201f2cb1360SIngo Molnar return 1; 202f2cb1360SIngo Molnar } 203f2cb1360SIngo Molnar 2046aa140faSQuentin Perret #ifdef CONFIG_ENERGY_MODEL 2056aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) 2066aa140faSQuentin Perret { 2076aa140faSQuentin Perret struct perf_domain *tmp; 2086aa140faSQuentin Perret 2096aa140faSQuentin Perret while (pd) { 2106aa140faSQuentin Perret tmp = pd->next; 2116aa140faSQuentin Perret kfree(pd); 2126aa140faSQuentin Perret pd = tmp; 2136aa140faSQuentin Perret } 2146aa140faSQuentin Perret } 2156aa140faSQuentin Perret 2166aa140faSQuentin Perret static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 2176aa140faSQuentin Perret { 2186aa140faSQuentin Perret while (pd) { 2196aa140faSQuentin Perret if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 2206aa140faSQuentin Perret return pd; 2216aa140faSQuentin Perret pd = pd->next; 2226aa140faSQuentin Perret } 2236aa140faSQuentin Perret 2246aa140faSQuentin Perret return NULL; 2256aa140faSQuentin Perret } 2266aa140faSQuentin Perret 2276aa140faSQuentin Perret static struct perf_domain *pd_init(int cpu) 2286aa140faSQuentin Perret { 2296aa140faSQuentin Perret struct em_perf_domain *obj = em_cpu_get(cpu); 2306aa140faSQuentin Perret struct perf_domain *pd; 2316aa140faSQuentin Perret 2326aa140faSQuentin Perret if (!obj) { 2336aa140faSQuentin Perret if (sched_debug()) 2346aa140faSQuentin Perret pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 2356aa140faSQuentin Perret return NULL; 2366aa140faSQuentin Perret } 2376aa140faSQuentin Perret 2386aa140faSQuentin Perret pd = kzalloc(sizeof(*pd), GFP_KERNEL); 2396aa140faSQuentin Perret if (!pd) 2406aa140faSQuentin Perret return NULL; 2416aa140faSQuentin Perret pd->em_pd = obj; 2426aa140faSQuentin Perret 2436aa140faSQuentin Perret return pd; 2446aa140faSQuentin Perret } 2456aa140faSQuentin Perret 2466aa140faSQuentin Perret static void perf_domain_debug(const struct cpumask *cpu_map, 2476aa140faSQuentin Perret struct perf_domain *pd) 2486aa140faSQuentin Perret { 2496aa140faSQuentin Perret if (!sched_debug() || !pd) 2506aa140faSQuentin Perret return; 2516aa140faSQuentin Perret 2526aa140faSQuentin Perret printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 2536aa140faSQuentin Perret 2546aa140faSQuentin Perret while (pd) { 2556aa140faSQuentin Perret printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }", 2566aa140faSQuentin Perret cpumask_first(perf_domain_span(pd)), 2576aa140faSQuentin Perret cpumask_pr_args(perf_domain_span(pd)), 2586aa140faSQuentin Perret em_pd_nr_cap_states(pd->em_pd)); 2596aa140faSQuentin Perret pd = pd->next; 2606aa140faSQuentin Perret } 2616aa140faSQuentin Perret 2626aa140faSQuentin Perret printk(KERN_CONT "\n"); 2636aa140faSQuentin Perret } 2646aa140faSQuentin Perret 2656aa140faSQuentin Perret static void destroy_perf_domain_rcu(struct rcu_head *rp) 2666aa140faSQuentin Perret { 2676aa140faSQuentin Perret struct perf_domain *pd; 2686aa140faSQuentin Perret 2696aa140faSQuentin Perret pd = container_of(rp, struct perf_domain, rcu); 2706aa140faSQuentin Perret free_pd(pd); 2716aa140faSQuentin Perret } 2726aa140faSQuentin Perret 273*b68a4c0dSQuentin Perret /* 274*b68a4c0dSQuentin Perret * EAS can be used on a root domain if it meets all the following conditions: 275*b68a4c0dSQuentin Perret * 1. an Energy Model (EM) is available; 276*b68a4c0dSQuentin Perret * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 277*b68a4c0dSQuentin Perret * 3. the EM complexity is low enough to keep scheduling overheads low; 278*b68a4c0dSQuentin Perret * 279*b68a4c0dSQuentin Perret * The complexity of the Energy Model is defined as: 280*b68a4c0dSQuentin Perret * 281*b68a4c0dSQuentin Perret * C = nr_pd * (nr_cpus + nr_cs) 282*b68a4c0dSQuentin Perret * 283*b68a4c0dSQuentin Perret * with parameters defined as: 284*b68a4c0dSQuentin Perret * - nr_pd: the number of performance domains 285*b68a4c0dSQuentin Perret * - nr_cpus: the number of CPUs 286*b68a4c0dSQuentin Perret * - nr_cs: the sum of the number of capacity states of all performance 287*b68a4c0dSQuentin Perret * domains (for example, on a system with 2 performance domains, 288*b68a4c0dSQuentin Perret * with 10 capacity states each, nr_cs = 2 * 10 = 20). 289*b68a4c0dSQuentin Perret * 290*b68a4c0dSQuentin Perret * It is generally not a good idea to use such a model in the wake-up path on 291*b68a4c0dSQuentin Perret * very complex platforms because of the associated scheduling overheads. The 292*b68a4c0dSQuentin Perret * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs 293*b68a4c0dSQuentin Perret * with per-CPU DVFS and less than 8 capacity states each, for example. 294*b68a4c0dSQuentin Perret */ 295*b68a4c0dSQuentin Perret #define EM_MAX_COMPLEXITY 2048 296*b68a4c0dSQuentin Perret 2976aa140faSQuentin Perret static void build_perf_domains(const struct cpumask *cpu_map) 2986aa140faSQuentin Perret { 299*b68a4c0dSQuentin Perret int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map); 3006aa140faSQuentin Perret struct perf_domain *pd = NULL, *tmp; 3016aa140faSQuentin Perret int cpu = cpumask_first(cpu_map); 3026aa140faSQuentin Perret struct root_domain *rd = cpu_rq(cpu)->rd; 303*b68a4c0dSQuentin Perret 304*b68a4c0dSQuentin Perret /* EAS is enabled for asymmetric CPU capacity topologies. */ 305*b68a4c0dSQuentin Perret if (!per_cpu(sd_asym_cpucapacity, cpu)) { 306*b68a4c0dSQuentin Perret if (sched_debug()) { 307*b68a4c0dSQuentin Perret pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n", 308*b68a4c0dSQuentin Perret cpumask_pr_args(cpu_map)); 309*b68a4c0dSQuentin Perret } 310*b68a4c0dSQuentin Perret goto free; 311*b68a4c0dSQuentin Perret } 3126aa140faSQuentin Perret 3136aa140faSQuentin Perret for_each_cpu(i, cpu_map) { 3146aa140faSQuentin Perret /* Skip already covered CPUs. */ 3156aa140faSQuentin Perret if (find_pd(pd, i)) 3166aa140faSQuentin Perret continue; 3176aa140faSQuentin Perret 3186aa140faSQuentin Perret /* Create the new pd and add it to the local list. */ 3196aa140faSQuentin Perret tmp = pd_init(i); 3206aa140faSQuentin Perret if (!tmp) 3216aa140faSQuentin Perret goto free; 3226aa140faSQuentin Perret tmp->next = pd; 3236aa140faSQuentin Perret pd = tmp; 324*b68a4c0dSQuentin Perret 325*b68a4c0dSQuentin Perret /* 326*b68a4c0dSQuentin Perret * Count performance domains and capacity states for the 327*b68a4c0dSQuentin Perret * complexity check. 328*b68a4c0dSQuentin Perret */ 329*b68a4c0dSQuentin Perret nr_pd++; 330*b68a4c0dSQuentin Perret nr_cs += em_pd_nr_cap_states(pd->em_pd); 331*b68a4c0dSQuentin Perret } 332*b68a4c0dSQuentin Perret 333*b68a4c0dSQuentin Perret /* Bail out if the Energy Model complexity is too high. */ 334*b68a4c0dSQuentin Perret if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) { 335*b68a4c0dSQuentin Perret WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", 336*b68a4c0dSQuentin Perret cpumask_pr_args(cpu_map)); 337*b68a4c0dSQuentin Perret goto free; 3386aa140faSQuentin Perret } 3396aa140faSQuentin Perret 3406aa140faSQuentin Perret perf_domain_debug(cpu_map, pd); 3416aa140faSQuentin Perret 3426aa140faSQuentin Perret /* Attach the new list of performance domains to the root domain. */ 3436aa140faSQuentin Perret tmp = rd->pd; 3446aa140faSQuentin Perret rcu_assign_pointer(rd->pd, pd); 3456aa140faSQuentin Perret if (tmp) 3466aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 3476aa140faSQuentin Perret 3486aa140faSQuentin Perret return; 3496aa140faSQuentin Perret 3506aa140faSQuentin Perret free: 3516aa140faSQuentin Perret free_pd(pd); 3526aa140faSQuentin Perret tmp = rd->pd; 3536aa140faSQuentin Perret rcu_assign_pointer(rd->pd, NULL); 3546aa140faSQuentin Perret if (tmp) 3556aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 3566aa140faSQuentin Perret } 3576aa140faSQuentin Perret #else 3586aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) { } 3596aa140faSQuentin Perret #endif /* CONFIG_ENERGY_MODEL */ 3606aa140faSQuentin Perret 361f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu) 362f2cb1360SIngo Molnar { 363f2cb1360SIngo Molnar struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 364f2cb1360SIngo Molnar 365f2cb1360SIngo Molnar cpupri_cleanup(&rd->cpupri); 366f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 367f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 368f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 369f2cb1360SIngo Molnar free_cpumask_var(rd->online); 370f2cb1360SIngo Molnar free_cpumask_var(rd->span); 3716aa140faSQuentin Perret free_pd(rd->pd); 372f2cb1360SIngo Molnar kfree(rd); 373f2cb1360SIngo Molnar } 374f2cb1360SIngo Molnar 375f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd) 376f2cb1360SIngo Molnar { 377f2cb1360SIngo Molnar struct root_domain *old_rd = NULL; 378f2cb1360SIngo Molnar unsigned long flags; 379f2cb1360SIngo Molnar 380f2cb1360SIngo Molnar raw_spin_lock_irqsave(&rq->lock, flags); 381f2cb1360SIngo Molnar 382f2cb1360SIngo Molnar if (rq->rd) { 383f2cb1360SIngo Molnar old_rd = rq->rd; 384f2cb1360SIngo Molnar 385f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, old_rd->online)) 386f2cb1360SIngo Molnar set_rq_offline(rq); 387f2cb1360SIngo Molnar 388f2cb1360SIngo Molnar cpumask_clear_cpu(rq->cpu, old_rd->span); 389f2cb1360SIngo Molnar 390f2cb1360SIngo Molnar /* 391f2cb1360SIngo Molnar * If we dont want to free the old_rd yet then 392f2cb1360SIngo Molnar * set old_rd to NULL to skip the freeing later 393f2cb1360SIngo Molnar * in this function: 394f2cb1360SIngo Molnar */ 395f2cb1360SIngo Molnar if (!atomic_dec_and_test(&old_rd->refcount)) 396f2cb1360SIngo Molnar old_rd = NULL; 397f2cb1360SIngo Molnar } 398f2cb1360SIngo Molnar 399f2cb1360SIngo Molnar atomic_inc(&rd->refcount); 400f2cb1360SIngo Molnar rq->rd = rd; 401f2cb1360SIngo Molnar 402f2cb1360SIngo Molnar cpumask_set_cpu(rq->cpu, rd->span); 403f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 404f2cb1360SIngo Molnar set_rq_online(rq); 405f2cb1360SIngo Molnar 406f2cb1360SIngo Molnar raw_spin_unlock_irqrestore(&rq->lock, flags); 407f2cb1360SIngo Molnar 408f2cb1360SIngo Molnar if (old_rd) 409f2cb1360SIngo Molnar call_rcu_sched(&old_rd->rcu, free_rootdomain); 410f2cb1360SIngo Molnar } 411f2cb1360SIngo Molnar 412364f5665SSteven Rostedt (VMware) void sched_get_rd(struct root_domain *rd) 413364f5665SSteven Rostedt (VMware) { 414364f5665SSteven Rostedt (VMware) atomic_inc(&rd->refcount); 415364f5665SSteven Rostedt (VMware) } 416364f5665SSteven Rostedt (VMware) 417364f5665SSteven Rostedt (VMware) void sched_put_rd(struct root_domain *rd) 418364f5665SSteven Rostedt (VMware) { 419364f5665SSteven Rostedt (VMware) if (!atomic_dec_and_test(&rd->refcount)) 420364f5665SSteven Rostedt (VMware) return; 421364f5665SSteven Rostedt (VMware) 422364f5665SSteven Rostedt (VMware) call_rcu_sched(&rd->rcu, free_rootdomain); 423364f5665SSteven Rostedt (VMware) } 424364f5665SSteven Rostedt (VMware) 425f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd) 426f2cb1360SIngo Molnar { 427f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 428f2cb1360SIngo Molnar goto out; 429f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 430f2cb1360SIngo Molnar goto free_span; 431f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 432f2cb1360SIngo Molnar goto free_online; 433f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 434f2cb1360SIngo Molnar goto free_dlo_mask; 435f2cb1360SIngo Molnar 4364bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 4374bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 4384bdced5cSSteven Rostedt (Red Hat) raw_spin_lock_init(&rd->rto_lock); 4394bdced5cSSteven Rostedt (Red Hat) init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); 4404bdced5cSSteven Rostedt (Red Hat) #endif 4414bdced5cSSteven Rostedt (Red Hat) 442f2cb1360SIngo Molnar init_dl_bw(&rd->dl_bw); 443f2cb1360SIngo Molnar if (cpudl_init(&rd->cpudl) != 0) 444f2cb1360SIngo Molnar goto free_rto_mask; 445f2cb1360SIngo Molnar 446f2cb1360SIngo Molnar if (cpupri_init(&rd->cpupri) != 0) 447f2cb1360SIngo Molnar goto free_cpudl; 448f2cb1360SIngo Molnar return 0; 449f2cb1360SIngo Molnar 450f2cb1360SIngo Molnar free_cpudl: 451f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 452f2cb1360SIngo Molnar free_rto_mask: 453f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 454f2cb1360SIngo Molnar free_dlo_mask: 455f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 456f2cb1360SIngo Molnar free_online: 457f2cb1360SIngo Molnar free_cpumask_var(rd->online); 458f2cb1360SIngo Molnar free_span: 459f2cb1360SIngo Molnar free_cpumask_var(rd->span); 460f2cb1360SIngo Molnar out: 461f2cb1360SIngo Molnar return -ENOMEM; 462f2cb1360SIngo Molnar } 463f2cb1360SIngo Molnar 464f2cb1360SIngo Molnar /* 465f2cb1360SIngo Molnar * By default the system creates a single root-domain with all CPUs as 466f2cb1360SIngo Molnar * members (mimicking the global state we have today). 467f2cb1360SIngo Molnar */ 468f2cb1360SIngo Molnar struct root_domain def_root_domain; 469f2cb1360SIngo Molnar 470f2cb1360SIngo Molnar void init_defrootdomain(void) 471f2cb1360SIngo Molnar { 472f2cb1360SIngo Molnar init_rootdomain(&def_root_domain); 473f2cb1360SIngo Molnar 474f2cb1360SIngo Molnar atomic_set(&def_root_domain.refcount, 1); 475f2cb1360SIngo Molnar } 476f2cb1360SIngo Molnar 477f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void) 478f2cb1360SIngo Molnar { 479f2cb1360SIngo Molnar struct root_domain *rd; 480f2cb1360SIngo Molnar 4814d13a06dSViresh Kumar rd = kzalloc(sizeof(*rd), GFP_KERNEL); 482f2cb1360SIngo Molnar if (!rd) 483f2cb1360SIngo Molnar return NULL; 484f2cb1360SIngo Molnar 485f2cb1360SIngo Molnar if (init_rootdomain(rd) != 0) { 486f2cb1360SIngo Molnar kfree(rd); 487f2cb1360SIngo Molnar return NULL; 488f2cb1360SIngo Molnar } 489f2cb1360SIngo Molnar 490f2cb1360SIngo Molnar return rd; 491f2cb1360SIngo Molnar } 492f2cb1360SIngo Molnar 493f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc) 494f2cb1360SIngo Molnar { 495f2cb1360SIngo Molnar struct sched_group *tmp, *first; 496f2cb1360SIngo Molnar 497f2cb1360SIngo Molnar if (!sg) 498f2cb1360SIngo Molnar return; 499f2cb1360SIngo Molnar 500f2cb1360SIngo Molnar first = sg; 501f2cb1360SIngo Molnar do { 502f2cb1360SIngo Molnar tmp = sg->next; 503f2cb1360SIngo Molnar 504f2cb1360SIngo Molnar if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 505f2cb1360SIngo Molnar kfree(sg->sgc); 506f2cb1360SIngo Molnar 507213c5a45SShu Wang if (atomic_dec_and_test(&sg->ref)) 508f2cb1360SIngo Molnar kfree(sg); 509f2cb1360SIngo Molnar sg = tmp; 510f2cb1360SIngo Molnar } while (sg != first); 511f2cb1360SIngo Molnar } 512f2cb1360SIngo Molnar 513f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd) 514f2cb1360SIngo Molnar { 515f2cb1360SIngo Molnar /* 516a090c4f2SPeter Zijlstra * A normal sched domain may have multiple group references, an 517a090c4f2SPeter Zijlstra * overlapping domain, having private groups, only one. Iterate, 518a090c4f2SPeter Zijlstra * dropping group/capacity references, freeing where none remain. 519f2cb1360SIngo Molnar */ 520f2cb1360SIngo Molnar free_sched_groups(sd->groups, 1); 521213c5a45SShu Wang 522f2cb1360SIngo Molnar if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 523f2cb1360SIngo Molnar kfree(sd->shared); 524f2cb1360SIngo Molnar kfree(sd); 525f2cb1360SIngo Molnar } 526f2cb1360SIngo Molnar 527f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu) 528f2cb1360SIngo Molnar { 529f2cb1360SIngo Molnar struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 530f2cb1360SIngo Molnar 531f2cb1360SIngo Molnar while (sd) { 532f2cb1360SIngo Molnar struct sched_domain *parent = sd->parent; 533f2cb1360SIngo Molnar destroy_sched_domain(sd); 534f2cb1360SIngo Molnar sd = parent; 535f2cb1360SIngo Molnar } 536f2cb1360SIngo Molnar } 537f2cb1360SIngo Molnar 538f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd) 539f2cb1360SIngo Molnar { 540f2cb1360SIngo Molnar if (sd) 541f2cb1360SIngo Molnar call_rcu(&sd->rcu, destroy_sched_domains_rcu); 542f2cb1360SIngo Molnar } 543f2cb1360SIngo Molnar 544f2cb1360SIngo Molnar /* 545f2cb1360SIngo Molnar * Keep a special pointer to the highest sched_domain that has 546f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 547f2cb1360SIngo Molnar * allows us to avoid some pointer chasing select_idle_sibling(). 548f2cb1360SIngo Molnar * 549f2cb1360SIngo Molnar * Also keep a unique ID per domain (we use the first CPU number in 550f2cb1360SIngo Molnar * the cpumask of the domain), this allows us to quickly tell if 551f2cb1360SIngo Molnar * two CPUs are in the same cache domain, see cpus_share_cache(). 552f2cb1360SIngo Molnar */ 553f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_llc); 554f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size); 555f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id); 556f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 557f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_numa); 558011b27bbSQuentin Perret DEFINE_PER_CPU(struct sched_domain *, sd_asym_packing); 559011b27bbSQuentin Perret DEFINE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); 560df054e84SMorten Rasmussen DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 561f2cb1360SIngo Molnar 562f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu) 563f2cb1360SIngo Molnar { 564f2cb1360SIngo Molnar struct sched_domain_shared *sds = NULL; 565f2cb1360SIngo Molnar struct sched_domain *sd; 566f2cb1360SIngo Molnar int id = cpu; 567f2cb1360SIngo Molnar int size = 1; 568f2cb1360SIngo Molnar 569f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 570f2cb1360SIngo Molnar if (sd) { 571f2cb1360SIngo Molnar id = cpumask_first(sched_domain_span(sd)); 572f2cb1360SIngo Molnar size = cpumask_weight(sched_domain_span(sd)); 573f2cb1360SIngo Molnar sds = sd->shared; 574f2cb1360SIngo Molnar } 575f2cb1360SIngo Molnar 576f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 577f2cb1360SIngo Molnar per_cpu(sd_llc_size, cpu) = size; 578f2cb1360SIngo Molnar per_cpu(sd_llc_id, cpu) = id; 579f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 580f2cb1360SIngo Molnar 581f2cb1360SIngo Molnar sd = lowest_flag_domain(cpu, SD_NUMA); 582f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 583f2cb1360SIngo Molnar 584f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 585011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 586011b27bbSQuentin Perret 587011b27bbSQuentin Perret sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY); 588011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 589f2cb1360SIngo Molnar } 590f2cb1360SIngo Molnar 591f2cb1360SIngo Molnar /* 592f2cb1360SIngo Molnar * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 593f2cb1360SIngo Molnar * hold the hotplug lock. 594f2cb1360SIngo Molnar */ 595f2cb1360SIngo Molnar static void 596f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 597f2cb1360SIngo Molnar { 598f2cb1360SIngo Molnar struct rq *rq = cpu_rq(cpu); 599f2cb1360SIngo Molnar struct sched_domain *tmp; 600f2cb1360SIngo Molnar 601f2cb1360SIngo Molnar /* Remove the sched domains which do not contribute to scheduling. */ 602f2cb1360SIngo Molnar for (tmp = sd; tmp; ) { 603f2cb1360SIngo Molnar struct sched_domain *parent = tmp->parent; 604f2cb1360SIngo Molnar if (!parent) 605f2cb1360SIngo Molnar break; 606f2cb1360SIngo Molnar 607f2cb1360SIngo Molnar if (sd_parent_degenerate(tmp, parent)) { 608f2cb1360SIngo Molnar tmp->parent = parent->parent; 609f2cb1360SIngo Molnar if (parent->parent) 610f2cb1360SIngo Molnar parent->parent->child = tmp; 611f2cb1360SIngo Molnar /* 612f2cb1360SIngo Molnar * Transfer SD_PREFER_SIBLING down in case of a 613f2cb1360SIngo Molnar * degenerate parent; the spans match for this 614f2cb1360SIngo Molnar * so the property transfers. 615f2cb1360SIngo Molnar */ 616f2cb1360SIngo Molnar if (parent->flags & SD_PREFER_SIBLING) 617f2cb1360SIngo Molnar tmp->flags |= SD_PREFER_SIBLING; 618f2cb1360SIngo Molnar destroy_sched_domain(parent); 619f2cb1360SIngo Molnar } else 620f2cb1360SIngo Molnar tmp = tmp->parent; 621f2cb1360SIngo Molnar } 622f2cb1360SIngo Molnar 623f2cb1360SIngo Molnar if (sd && sd_degenerate(sd)) { 624f2cb1360SIngo Molnar tmp = sd; 625f2cb1360SIngo Molnar sd = sd->parent; 626f2cb1360SIngo Molnar destroy_sched_domain(tmp); 627f2cb1360SIngo Molnar if (sd) 628f2cb1360SIngo Molnar sd->child = NULL; 629f2cb1360SIngo Molnar } 630f2cb1360SIngo Molnar 631f2cb1360SIngo Molnar sched_domain_debug(sd, cpu); 632f2cb1360SIngo Molnar 633f2cb1360SIngo Molnar rq_attach_root(rq, rd); 634f2cb1360SIngo Molnar tmp = rq->sd; 635f2cb1360SIngo Molnar rcu_assign_pointer(rq->sd, sd); 636bbdacdfeSPeter Zijlstra dirty_sched_domain_sysctl(cpu); 637f2cb1360SIngo Molnar destroy_sched_domains(tmp); 638f2cb1360SIngo Molnar 639f2cb1360SIngo Molnar update_top_cache_domain(cpu); 640f2cb1360SIngo Molnar } 641f2cb1360SIngo Molnar 642f2cb1360SIngo Molnar struct s_data { 643f2cb1360SIngo Molnar struct sched_domain ** __percpu sd; 644f2cb1360SIngo Molnar struct root_domain *rd; 645f2cb1360SIngo Molnar }; 646f2cb1360SIngo Molnar 647f2cb1360SIngo Molnar enum s_alloc { 648f2cb1360SIngo Molnar sa_rootdomain, 649f2cb1360SIngo Molnar sa_sd, 650f2cb1360SIngo Molnar sa_sd_storage, 651f2cb1360SIngo Molnar sa_none, 652f2cb1360SIngo Molnar }; 653f2cb1360SIngo Molnar 654f2cb1360SIngo Molnar /* 65535a566e6SPeter Zijlstra * Return the canonical balance CPU for this group, this is the first CPU 656e5c14b1fSPeter Zijlstra * of this group that's also in the balance mask. 65735a566e6SPeter Zijlstra * 658e5c14b1fSPeter Zijlstra * The balance mask are all those CPUs that could actually end up at this 659e5c14b1fSPeter Zijlstra * group. See build_balance_mask(). 66035a566e6SPeter Zijlstra * 66135a566e6SPeter Zijlstra * Also see should_we_balance(). 66235a566e6SPeter Zijlstra */ 66335a566e6SPeter Zijlstra int group_balance_cpu(struct sched_group *sg) 66435a566e6SPeter Zijlstra { 665e5c14b1fSPeter Zijlstra return cpumask_first(group_balance_mask(sg)); 66635a566e6SPeter Zijlstra } 66735a566e6SPeter Zijlstra 66835a566e6SPeter Zijlstra 66935a566e6SPeter Zijlstra /* 67035a566e6SPeter Zijlstra * NUMA topology (first read the regular topology blurb below) 67135a566e6SPeter Zijlstra * 67235a566e6SPeter Zijlstra * Given a node-distance table, for example: 67335a566e6SPeter Zijlstra * 67435a566e6SPeter Zijlstra * node 0 1 2 3 67535a566e6SPeter Zijlstra * 0: 10 20 30 20 67635a566e6SPeter Zijlstra * 1: 20 10 20 30 67735a566e6SPeter Zijlstra * 2: 30 20 10 20 67835a566e6SPeter Zijlstra * 3: 20 30 20 10 67935a566e6SPeter Zijlstra * 68035a566e6SPeter Zijlstra * which represents a 4 node ring topology like: 68135a566e6SPeter Zijlstra * 68235a566e6SPeter Zijlstra * 0 ----- 1 68335a566e6SPeter Zijlstra * | | 68435a566e6SPeter Zijlstra * | | 68535a566e6SPeter Zijlstra * | | 68635a566e6SPeter Zijlstra * 3 ----- 2 68735a566e6SPeter Zijlstra * 68835a566e6SPeter Zijlstra * We want to construct domains and groups to represent this. The way we go 68935a566e6SPeter Zijlstra * about doing this is to build the domains on 'hops'. For each NUMA level we 69035a566e6SPeter Zijlstra * construct the mask of all nodes reachable in @level hops. 69135a566e6SPeter Zijlstra * 69235a566e6SPeter Zijlstra * For the above NUMA topology that gives 3 levels: 69335a566e6SPeter Zijlstra * 69435a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 0-3 0-3 69535a566e6SPeter Zijlstra * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 69635a566e6SPeter Zijlstra * 69735a566e6SPeter Zijlstra * NUMA-1 0-1,3 0-2 1-3 0,2-3 69835a566e6SPeter Zijlstra * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 69935a566e6SPeter Zijlstra * 70035a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 70135a566e6SPeter Zijlstra * 70235a566e6SPeter Zijlstra * 70335a566e6SPeter Zijlstra * As can be seen; things don't nicely line up as with the regular topology. 70435a566e6SPeter Zijlstra * When we iterate a domain in child domain chunks some nodes can be 70535a566e6SPeter Zijlstra * represented multiple times -- hence the "overlap" naming for this part of 70635a566e6SPeter Zijlstra * the topology. 70735a566e6SPeter Zijlstra * 70835a566e6SPeter Zijlstra * In order to minimize this overlap, we only build enough groups to cover the 70935a566e6SPeter Zijlstra * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 71035a566e6SPeter Zijlstra * 71135a566e6SPeter Zijlstra * Because: 71235a566e6SPeter Zijlstra * 71335a566e6SPeter Zijlstra * - the first group of each domain is its child domain; this 71435a566e6SPeter Zijlstra * gets us the first 0-1,3 71535a566e6SPeter Zijlstra * - the only uncovered node is 2, who's child domain is 1-3. 71635a566e6SPeter Zijlstra * 71735a566e6SPeter Zijlstra * However, because of the overlap, computing a unique CPU for each group is 71835a566e6SPeter Zijlstra * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 71935a566e6SPeter Zijlstra * groups include the CPUs of Node-0, while those CPUs would not in fact ever 72035a566e6SPeter Zijlstra * end up at those groups (they would end up in group: 0-1,3). 72135a566e6SPeter Zijlstra * 722e5c14b1fSPeter Zijlstra * To correct this we have to introduce the group balance mask. This mask 72335a566e6SPeter Zijlstra * will contain those CPUs in the group that can reach this group given the 72435a566e6SPeter Zijlstra * (child) domain tree. 72535a566e6SPeter Zijlstra * 72635a566e6SPeter Zijlstra * With this we can once again compute balance_cpu and sched_group_capacity 72735a566e6SPeter Zijlstra * relations. 72835a566e6SPeter Zijlstra * 72935a566e6SPeter Zijlstra * XXX include words on how balance_cpu is unique and therefore can be 73035a566e6SPeter Zijlstra * used for sched_group_capacity links. 73135a566e6SPeter Zijlstra * 73235a566e6SPeter Zijlstra * 73335a566e6SPeter Zijlstra * Another 'interesting' topology is: 73435a566e6SPeter Zijlstra * 73535a566e6SPeter Zijlstra * node 0 1 2 3 73635a566e6SPeter Zijlstra * 0: 10 20 20 30 73735a566e6SPeter Zijlstra * 1: 20 10 20 20 73835a566e6SPeter Zijlstra * 2: 20 20 10 20 73935a566e6SPeter Zijlstra * 3: 30 20 20 10 74035a566e6SPeter Zijlstra * 74135a566e6SPeter Zijlstra * Which looks a little like: 74235a566e6SPeter Zijlstra * 74335a566e6SPeter Zijlstra * 0 ----- 1 74435a566e6SPeter Zijlstra * | / | 74535a566e6SPeter Zijlstra * | / | 74635a566e6SPeter Zijlstra * | / | 74735a566e6SPeter Zijlstra * 2 ----- 3 74835a566e6SPeter Zijlstra * 74935a566e6SPeter Zijlstra * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 75035a566e6SPeter Zijlstra * are not. 75135a566e6SPeter Zijlstra * 75235a566e6SPeter Zijlstra * This leads to a few particularly weird cases where the sched_domain's are 75397fb7a0aSIngo Molnar * not of the same number for each CPU. Consider: 75435a566e6SPeter Zijlstra * 75535a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 75635a566e6SPeter Zijlstra * groups: {0-2},{1-3} {1-3},{0-2} 75735a566e6SPeter Zijlstra * 75835a566e6SPeter Zijlstra * NUMA-1 0-2 0-3 0-3 1-3 75935a566e6SPeter Zijlstra * 76035a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 76135a566e6SPeter Zijlstra * 76235a566e6SPeter Zijlstra */ 76335a566e6SPeter Zijlstra 76435a566e6SPeter Zijlstra 76535a566e6SPeter Zijlstra /* 766e5c14b1fSPeter Zijlstra * Build the balance mask; it contains only those CPUs that can arrive at this 767e5c14b1fSPeter Zijlstra * group and should be considered to continue balancing. 76835a566e6SPeter Zijlstra * 76935a566e6SPeter Zijlstra * We do this during the group creation pass, therefore the group information 77035a566e6SPeter Zijlstra * isn't complete yet, however since each group represents a (child) domain we 77135a566e6SPeter Zijlstra * can fully construct this using the sched_domain bits (which are already 77235a566e6SPeter Zijlstra * complete). 773f2cb1360SIngo Molnar */ 7741676330eSPeter Zijlstra static void 775e5c14b1fSPeter Zijlstra build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 776f2cb1360SIngo Molnar { 777ae4df9d6SPeter Zijlstra const struct cpumask *sg_span = sched_group_span(sg); 778f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 779f2cb1360SIngo Molnar struct sched_domain *sibling; 780f2cb1360SIngo Molnar int i; 781f2cb1360SIngo Molnar 7821676330eSPeter Zijlstra cpumask_clear(mask); 7831676330eSPeter Zijlstra 784f32d782eSLauro Ramos Venancio for_each_cpu(i, sg_span) { 785f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 78673bb059fSPeter Zijlstra 78773bb059fSPeter Zijlstra /* 78873bb059fSPeter Zijlstra * Can happen in the asymmetric case, where these siblings are 78973bb059fSPeter Zijlstra * unused. The mask will not be empty because those CPUs that 79073bb059fSPeter Zijlstra * do have the top domain _should_ span the domain. 79173bb059fSPeter Zijlstra */ 79273bb059fSPeter Zijlstra if (!sibling->child) 79373bb059fSPeter Zijlstra continue; 79473bb059fSPeter Zijlstra 79573bb059fSPeter Zijlstra /* If we would not end up here, we can't continue from here */ 79673bb059fSPeter Zijlstra if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 797f2cb1360SIngo Molnar continue; 798f2cb1360SIngo Molnar 7991676330eSPeter Zijlstra cpumask_set_cpu(i, mask); 800f2cb1360SIngo Molnar } 80173bb059fSPeter Zijlstra 80273bb059fSPeter Zijlstra /* We must not have empty masks here */ 8031676330eSPeter Zijlstra WARN_ON_ONCE(cpumask_empty(mask)); 804f2cb1360SIngo Molnar } 805f2cb1360SIngo Molnar 806f2cb1360SIngo Molnar /* 80735a566e6SPeter Zijlstra * XXX: This creates per-node group entries; since the load-balancer will 80835a566e6SPeter Zijlstra * immediately access remote memory to construct this group's load-balance 80935a566e6SPeter Zijlstra * statistics having the groups node local is of dubious benefit. 810f2cb1360SIngo Molnar */ 8118c033469SLauro Ramos Venancio static struct sched_group * 8128c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 8138c033469SLauro Ramos Venancio { 8148c033469SLauro Ramos Venancio struct sched_group *sg; 8158c033469SLauro Ramos Venancio struct cpumask *sg_span; 8168c033469SLauro Ramos Venancio 8178c033469SLauro Ramos Venancio sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 8188c033469SLauro Ramos Venancio GFP_KERNEL, cpu_to_node(cpu)); 8198c033469SLauro Ramos Venancio 8208c033469SLauro Ramos Venancio if (!sg) 8218c033469SLauro Ramos Venancio return NULL; 8228c033469SLauro Ramos Venancio 823ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 8248c033469SLauro Ramos Venancio if (sd->child) 8258c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd->child)); 8268c033469SLauro Ramos Venancio else 8278c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd)); 8288c033469SLauro Ramos Venancio 829213c5a45SShu Wang atomic_inc(&sg->ref); 8308c033469SLauro Ramos Venancio return sg; 8318c033469SLauro Ramos Venancio } 8328c033469SLauro Ramos Venancio 8338c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd, 8341676330eSPeter Zijlstra struct sched_group *sg) 8358c033469SLauro Ramos Venancio { 8361676330eSPeter Zijlstra struct cpumask *mask = sched_domains_tmpmask2; 8378c033469SLauro Ramos Venancio struct sd_data *sdd = sd->private; 8388c033469SLauro Ramos Venancio struct cpumask *sg_span; 8391676330eSPeter Zijlstra int cpu; 8401676330eSPeter Zijlstra 841e5c14b1fSPeter Zijlstra build_balance_mask(sd, sg, mask); 842ae4df9d6SPeter Zijlstra cpu = cpumask_first_and(sched_group_span(sg), mask); 8438c033469SLauro Ramos Venancio 8448c033469SLauro Ramos Venancio sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 8458c033469SLauro Ramos Venancio if (atomic_inc_return(&sg->sgc->ref) == 1) 846e5c14b1fSPeter Zijlstra cpumask_copy(group_balance_mask(sg), mask); 84735a566e6SPeter Zijlstra else 848e5c14b1fSPeter Zijlstra WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 8498c033469SLauro Ramos Venancio 8508c033469SLauro Ramos Venancio /* 8518c033469SLauro Ramos Venancio * Initialize sgc->capacity such that even if we mess up the 8528c033469SLauro Ramos Venancio * domains and no possible iteration will get us here, we won't 8538c033469SLauro Ramos Venancio * die on a /0 trap. 8548c033469SLauro Ramos Venancio */ 855ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 8568c033469SLauro Ramos Venancio sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 8578c033469SLauro Ramos Venancio sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 858e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 8598c033469SLauro Ramos Venancio } 8608c033469SLauro Ramos Venancio 861f2cb1360SIngo Molnar static int 862f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu) 863f2cb1360SIngo Molnar { 86491eaed0dSPeter Zijlstra struct sched_group *first = NULL, *last = NULL, *sg; 865f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 866f2cb1360SIngo Molnar struct cpumask *covered = sched_domains_tmpmask; 867f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 868f2cb1360SIngo Molnar struct sched_domain *sibling; 869f2cb1360SIngo Molnar int i; 870f2cb1360SIngo Molnar 871f2cb1360SIngo Molnar cpumask_clear(covered); 872f2cb1360SIngo Molnar 8730372dd27SPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 874f2cb1360SIngo Molnar struct cpumask *sg_span; 875f2cb1360SIngo Molnar 876f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 877f2cb1360SIngo Molnar continue; 878f2cb1360SIngo Molnar 879f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 880f2cb1360SIngo Molnar 881c20e1ea4SLauro Ramos Venancio /* 882c20e1ea4SLauro Ramos Venancio * Asymmetric node setups can result in situations where the 883c20e1ea4SLauro Ramos Venancio * domain tree is of unequal depth, make sure to skip domains 884c20e1ea4SLauro Ramos Venancio * that already cover the entire range. 885c20e1ea4SLauro Ramos Venancio * 886c20e1ea4SLauro Ramos Venancio * In that case build_sched_domains() will have terminated the 887c20e1ea4SLauro Ramos Venancio * iteration early and our sibling sd spans will be empty. 888c20e1ea4SLauro Ramos Venancio * Domains should always include the CPU they're built on, so 889c20e1ea4SLauro Ramos Venancio * check that. 890c20e1ea4SLauro Ramos Venancio */ 891f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 892f2cb1360SIngo Molnar continue; 893f2cb1360SIngo Molnar 8948c033469SLauro Ramos Venancio sg = build_group_from_child_sched_domain(sibling, cpu); 895f2cb1360SIngo Molnar if (!sg) 896f2cb1360SIngo Molnar goto fail; 897f2cb1360SIngo Molnar 898ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 899f2cb1360SIngo Molnar cpumask_or(covered, covered, sg_span); 900f2cb1360SIngo Molnar 9011676330eSPeter Zijlstra init_overlap_sched_group(sd, sg); 902f2cb1360SIngo Molnar 903f2cb1360SIngo Molnar if (!first) 904f2cb1360SIngo Molnar first = sg; 905f2cb1360SIngo Molnar if (last) 906f2cb1360SIngo Molnar last->next = sg; 907f2cb1360SIngo Molnar last = sg; 908f2cb1360SIngo Molnar last->next = first; 909f2cb1360SIngo Molnar } 91091eaed0dSPeter Zijlstra sd->groups = first; 911f2cb1360SIngo Molnar 912f2cb1360SIngo Molnar return 0; 913f2cb1360SIngo Molnar 914f2cb1360SIngo Molnar fail: 915f2cb1360SIngo Molnar free_sched_groups(first, 0); 916f2cb1360SIngo Molnar 917f2cb1360SIngo Molnar return -ENOMEM; 918f2cb1360SIngo Molnar } 919f2cb1360SIngo Molnar 92035a566e6SPeter Zijlstra 92135a566e6SPeter Zijlstra /* 92235a566e6SPeter Zijlstra * Package topology (also see the load-balance blurb in fair.c) 92335a566e6SPeter Zijlstra * 92435a566e6SPeter Zijlstra * The scheduler builds a tree structure to represent a number of important 92535a566e6SPeter Zijlstra * topology features. By default (default_topology[]) these include: 92635a566e6SPeter Zijlstra * 92735a566e6SPeter Zijlstra * - Simultaneous multithreading (SMT) 92835a566e6SPeter Zijlstra * - Multi-Core Cache (MC) 92935a566e6SPeter Zijlstra * - Package (DIE) 93035a566e6SPeter Zijlstra * 93135a566e6SPeter Zijlstra * Where the last one more or less denotes everything up to a NUMA node. 93235a566e6SPeter Zijlstra * 93335a566e6SPeter Zijlstra * The tree consists of 3 primary data structures: 93435a566e6SPeter Zijlstra * 93535a566e6SPeter Zijlstra * sched_domain -> sched_group -> sched_group_capacity 93635a566e6SPeter Zijlstra * ^ ^ ^ ^ 93735a566e6SPeter Zijlstra * `-' `-' 93835a566e6SPeter Zijlstra * 93997fb7a0aSIngo Molnar * The sched_domains are per-CPU and have a two way link (parent & child) and 94035a566e6SPeter Zijlstra * denote the ever growing mask of CPUs belonging to that level of topology. 94135a566e6SPeter Zijlstra * 94235a566e6SPeter Zijlstra * Each sched_domain has a circular (double) linked list of sched_group's, each 94335a566e6SPeter Zijlstra * denoting the domains of the level below (or individual CPUs in case of the 94435a566e6SPeter Zijlstra * first domain level). The sched_group linked by a sched_domain includes the 94535a566e6SPeter Zijlstra * CPU of that sched_domain [*]. 94635a566e6SPeter Zijlstra * 94735a566e6SPeter Zijlstra * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 94835a566e6SPeter Zijlstra * 94935a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 95035a566e6SPeter Zijlstra * 95135a566e6SPeter Zijlstra * DIE [ ] 95235a566e6SPeter Zijlstra * MC [ ] [ ] 95335a566e6SPeter Zijlstra * SMT [ ] [ ] [ ] [ ] 95435a566e6SPeter Zijlstra * 95535a566e6SPeter Zijlstra * - or - 95635a566e6SPeter Zijlstra * 95735a566e6SPeter Zijlstra * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 95835a566e6SPeter Zijlstra * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 95935a566e6SPeter Zijlstra * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 96035a566e6SPeter Zijlstra * 96135a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 96235a566e6SPeter Zijlstra * 96335a566e6SPeter Zijlstra * One way to think about it is: sched_domain moves you up and down among these 96435a566e6SPeter Zijlstra * topology levels, while sched_group moves you sideways through it, at child 96535a566e6SPeter Zijlstra * domain granularity. 96635a566e6SPeter Zijlstra * 96735a566e6SPeter Zijlstra * sched_group_capacity ensures each unique sched_group has shared storage. 96835a566e6SPeter Zijlstra * 96935a566e6SPeter Zijlstra * There are two related construction problems, both require a CPU that 97035a566e6SPeter Zijlstra * uniquely identify each group (for a given domain): 97135a566e6SPeter Zijlstra * 97235a566e6SPeter Zijlstra * - The first is the balance_cpu (see should_we_balance() and the 97335a566e6SPeter Zijlstra * load-balance blub in fair.c); for each group we only want 1 CPU to 97435a566e6SPeter Zijlstra * continue balancing at a higher domain. 97535a566e6SPeter Zijlstra * 97635a566e6SPeter Zijlstra * - The second is the sched_group_capacity; we want all identical groups 97735a566e6SPeter Zijlstra * to share a single sched_group_capacity. 97835a566e6SPeter Zijlstra * 97935a566e6SPeter Zijlstra * Since these topologies are exclusive by construction. That is, its 98035a566e6SPeter Zijlstra * impossible for an SMT thread to belong to multiple cores, and cores to 98135a566e6SPeter Zijlstra * be part of multiple caches. There is a very clear and unique location 98235a566e6SPeter Zijlstra * for each CPU in the hierarchy. 98335a566e6SPeter Zijlstra * 98435a566e6SPeter Zijlstra * Therefore computing a unique CPU for each group is trivial (the iteration 98535a566e6SPeter Zijlstra * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 98635a566e6SPeter Zijlstra * group), we can simply pick the first CPU in each group. 98735a566e6SPeter Zijlstra * 98835a566e6SPeter Zijlstra * 98935a566e6SPeter Zijlstra * [*] in other words, the first group of each domain is its child domain. 99035a566e6SPeter Zijlstra */ 99135a566e6SPeter Zijlstra 9920c0e776aSPeter Zijlstra static struct sched_group *get_group(int cpu, struct sd_data *sdd) 993f2cb1360SIngo Molnar { 994f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 995f2cb1360SIngo Molnar struct sched_domain *child = sd->child; 9960c0e776aSPeter Zijlstra struct sched_group *sg; 997f2cb1360SIngo Molnar 998f2cb1360SIngo Molnar if (child) 999f2cb1360SIngo Molnar cpu = cpumask_first(sched_domain_span(child)); 1000f2cb1360SIngo Molnar 10010c0e776aSPeter Zijlstra sg = *per_cpu_ptr(sdd->sg, cpu); 10020c0e776aSPeter Zijlstra sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1003f2cb1360SIngo Molnar 1004f2cb1360SIngo Molnar /* For claim_allocations: */ 10050c0e776aSPeter Zijlstra atomic_inc(&sg->ref); 10060c0e776aSPeter Zijlstra atomic_inc(&sg->sgc->ref); 10070c0e776aSPeter Zijlstra 10080c0e776aSPeter Zijlstra if (child) { 1009ae4df9d6SPeter Zijlstra cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1010ae4df9d6SPeter Zijlstra cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 10110c0e776aSPeter Zijlstra } else { 1012ae4df9d6SPeter Zijlstra cpumask_set_cpu(cpu, sched_group_span(sg)); 1013e5c14b1fSPeter Zijlstra cpumask_set_cpu(cpu, group_balance_mask(sg)); 1014f2cb1360SIngo Molnar } 1015f2cb1360SIngo Molnar 1016ae4df9d6SPeter Zijlstra sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 10170c0e776aSPeter Zijlstra sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1018e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 10190c0e776aSPeter Zijlstra 10200c0e776aSPeter Zijlstra return sg; 1021f2cb1360SIngo Molnar } 1022f2cb1360SIngo Molnar 1023f2cb1360SIngo Molnar /* 1024f2cb1360SIngo Molnar * build_sched_groups will build a circular linked list of the groups 1025f2cb1360SIngo Molnar * covered by the given span, and will set each group's ->cpumask correctly, 1026f2cb1360SIngo Molnar * and ->cpu_capacity to 0. 1027f2cb1360SIngo Molnar * 1028f2cb1360SIngo Molnar * Assumes the sched_domain tree is fully constructed 1029f2cb1360SIngo Molnar */ 1030f2cb1360SIngo Molnar static int 1031f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu) 1032f2cb1360SIngo Molnar { 1033f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL; 1034f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1035f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1036f2cb1360SIngo Molnar struct cpumask *covered; 1037f2cb1360SIngo Molnar int i; 1038f2cb1360SIngo Molnar 1039f2cb1360SIngo Molnar lockdep_assert_held(&sched_domains_mutex); 1040f2cb1360SIngo Molnar covered = sched_domains_tmpmask; 1041f2cb1360SIngo Molnar 1042f2cb1360SIngo Molnar cpumask_clear(covered); 1043f2cb1360SIngo Molnar 10440c0e776aSPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1045f2cb1360SIngo Molnar struct sched_group *sg; 1046f2cb1360SIngo Molnar 1047f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1048f2cb1360SIngo Molnar continue; 1049f2cb1360SIngo Molnar 10500c0e776aSPeter Zijlstra sg = get_group(i, sdd); 1051f2cb1360SIngo Molnar 1052ae4df9d6SPeter Zijlstra cpumask_or(covered, covered, sched_group_span(sg)); 1053f2cb1360SIngo Molnar 1054f2cb1360SIngo Molnar if (!first) 1055f2cb1360SIngo Molnar first = sg; 1056f2cb1360SIngo Molnar if (last) 1057f2cb1360SIngo Molnar last->next = sg; 1058f2cb1360SIngo Molnar last = sg; 1059f2cb1360SIngo Molnar } 1060f2cb1360SIngo Molnar last->next = first; 10610c0e776aSPeter Zijlstra sd->groups = first; 1062f2cb1360SIngo Molnar 1063f2cb1360SIngo Molnar return 0; 1064f2cb1360SIngo Molnar } 1065f2cb1360SIngo Molnar 1066f2cb1360SIngo Molnar /* 1067f2cb1360SIngo Molnar * Initialize sched groups cpu_capacity. 1068f2cb1360SIngo Molnar * 1069f2cb1360SIngo Molnar * cpu_capacity indicates the capacity of sched group, which is used while 1070f2cb1360SIngo Molnar * distributing the load between different sched groups in a sched domain. 1071f2cb1360SIngo Molnar * Typically cpu_capacity for all the groups in a sched domain will be same 1072f2cb1360SIngo Molnar * unless there are asymmetries in the topology. If there are asymmetries, 1073f2cb1360SIngo Molnar * group having more cpu_capacity will pickup more load compared to the 1074f2cb1360SIngo Molnar * group having less cpu_capacity. 1075f2cb1360SIngo Molnar */ 1076f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1077f2cb1360SIngo Molnar { 1078f2cb1360SIngo Molnar struct sched_group *sg = sd->groups; 1079f2cb1360SIngo Molnar 1080f2cb1360SIngo Molnar WARN_ON(!sg); 1081f2cb1360SIngo Molnar 1082f2cb1360SIngo Molnar do { 1083f2cb1360SIngo Molnar int cpu, max_cpu = -1; 1084f2cb1360SIngo Molnar 1085ae4df9d6SPeter Zijlstra sg->group_weight = cpumask_weight(sched_group_span(sg)); 1086f2cb1360SIngo Molnar 1087f2cb1360SIngo Molnar if (!(sd->flags & SD_ASYM_PACKING)) 1088f2cb1360SIngo Molnar goto next; 1089f2cb1360SIngo Molnar 1090ae4df9d6SPeter Zijlstra for_each_cpu(cpu, sched_group_span(sg)) { 1091f2cb1360SIngo Molnar if (max_cpu < 0) 1092f2cb1360SIngo Molnar max_cpu = cpu; 1093f2cb1360SIngo Molnar else if (sched_asym_prefer(cpu, max_cpu)) 1094f2cb1360SIngo Molnar max_cpu = cpu; 1095f2cb1360SIngo Molnar } 1096f2cb1360SIngo Molnar sg->asym_prefer_cpu = max_cpu; 1097f2cb1360SIngo Molnar 1098f2cb1360SIngo Molnar next: 1099f2cb1360SIngo Molnar sg = sg->next; 1100f2cb1360SIngo Molnar } while (sg != sd->groups); 1101f2cb1360SIngo Molnar 1102f2cb1360SIngo Molnar if (cpu != group_balance_cpu(sg)) 1103f2cb1360SIngo Molnar return; 1104f2cb1360SIngo Molnar 1105f2cb1360SIngo Molnar update_group_capacity(sd, cpu); 1106f2cb1360SIngo Molnar } 1107f2cb1360SIngo Molnar 1108f2cb1360SIngo Molnar /* 1109f2cb1360SIngo Molnar * Initializers for schedule domains 1110f2cb1360SIngo Molnar * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1111f2cb1360SIngo Molnar */ 1112f2cb1360SIngo Molnar 1113f2cb1360SIngo Molnar static int default_relax_domain_level = -1; 1114f2cb1360SIngo Molnar int sched_domain_level_max; 1115f2cb1360SIngo Molnar 1116f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str) 1117f2cb1360SIngo Molnar { 1118f2cb1360SIngo Molnar if (kstrtoint(str, 0, &default_relax_domain_level)) 1119f2cb1360SIngo Molnar pr_warn("Unable to set relax_domain_level\n"); 1120f2cb1360SIngo Molnar 1121f2cb1360SIngo Molnar return 1; 1122f2cb1360SIngo Molnar } 1123f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level); 1124f2cb1360SIngo Molnar 1125f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd, 1126f2cb1360SIngo Molnar struct sched_domain_attr *attr) 1127f2cb1360SIngo Molnar { 1128f2cb1360SIngo Molnar int request; 1129f2cb1360SIngo Molnar 1130f2cb1360SIngo Molnar if (!attr || attr->relax_domain_level < 0) { 1131f2cb1360SIngo Molnar if (default_relax_domain_level < 0) 1132f2cb1360SIngo Molnar return; 1133f2cb1360SIngo Molnar else 1134f2cb1360SIngo Molnar request = default_relax_domain_level; 1135f2cb1360SIngo Molnar } else 1136f2cb1360SIngo Molnar request = attr->relax_domain_level; 1137f2cb1360SIngo Molnar if (request < sd->level) { 1138f2cb1360SIngo Molnar /* Turn off idle balance on this domain: */ 1139f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1140f2cb1360SIngo Molnar } else { 1141f2cb1360SIngo Molnar /* Turn on idle balance on this domain: */ 1142f2cb1360SIngo Molnar sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1143f2cb1360SIngo Molnar } 1144f2cb1360SIngo Molnar } 1145f2cb1360SIngo Molnar 1146f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map); 1147f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map); 1148f2cb1360SIngo Molnar 1149f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1150f2cb1360SIngo Molnar const struct cpumask *cpu_map) 1151f2cb1360SIngo Molnar { 1152f2cb1360SIngo Molnar switch (what) { 1153f2cb1360SIngo Molnar case sa_rootdomain: 1154f2cb1360SIngo Molnar if (!atomic_read(&d->rd->refcount)) 1155f2cb1360SIngo Molnar free_rootdomain(&d->rd->rcu); 1156f2cb1360SIngo Molnar /* Fall through */ 1157f2cb1360SIngo Molnar case sa_sd: 1158f2cb1360SIngo Molnar free_percpu(d->sd); 1159f2cb1360SIngo Molnar /* Fall through */ 1160f2cb1360SIngo Molnar case sa_sd_storage: 1161f2cb1360SIngo Molnar __sdt_free(cpu_map); 1162f2cb1360SIngo Molnar /* Fall through */ 1163f2cb1360SIngo Molnar case sa_none: 1164f2cb1360SIngo Molnar break; 1165f2cb1360SIngo Molnar } 1166f2cb1360SIngo Molnar } 1167f2cb1360SIngo Molnar 1168f2cb1360SIngo Molnar static enum s_alloc 1169f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1170f2cb1360SIngo Molnar { 1171f2cb1360SIngo Molnar memset(d, 0, sizeof(*d)); 1172f2cb1360SIngo Molnar 1173f2cb1360SIngo Molnar if (__sdt_alloc(cpu_map)) 1174f2cb1360SIngo Molnar return sa_sd_storage; 1175f2cb1360SIngo Molnar d->sd = alloc_percpu(struct sched_domain *); 1176f2cb1360SIngo Molnar if (!d->sd) 1177f2cb1360SIngo Molnar return sa_sd_storage; 1178f2cb1360SIngo Molnar d->rd = alloc_rootdomain(); 1179f2cb1360SIngo Molnar if (!d->rd) 1180f2cb1360SIngo Molnar return sa_sd; 118197fb7a0aSIngo Molnar 1182f2cb1360SIngo Molnar return sa_rootdomain; 1183f2cb1360SIngo Molnar } 1184f2cb1360SIngo Molnar 1185f2cb1360SIngo Molnar /* 1186f2cb1360SIngo Molnar * NULL the sd_data elements we've used to build the sched_domain and 1187f2cb1360SIngo Molnar * sched_group structure so that the subsequent __free_domain_allocs() 1188f2cb1360SIngo Molnar * will not free the data we're using. 1189f2cb1360SIngo Molnar */ 1190f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd) 1191f2cb1360SIngo Molnar { 1192f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1193f2cb1360SIngo Molnar 1194f2cb1360SIngo Molnar WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1195f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, cpu) = NULL; 1196f2cb1360SIngo Molnar 1197f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 1198f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, cpu) = NULL; 1199f2cb1360SIngo Molnar 1200f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1201f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, cpu) = NULL; 1202f2cb1360SIngo Molnar 1203f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1204f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1205f2cb1360SIngo Molnar } 1206f2cb1360SIngo Molnar 1207f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1208f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type; 120997fb7a0aSIngo Molnar 121097fb7a0aSIngo Molnar static int sched_domains_numa_levels; 1211f2cb1360SIngo Molnar static int sched_domains_curr_level; 121297fb7a0aSIngo Molnar 121397fb7a0aSIngo Molnar int sched_max_numa_distance; 121497fb7a0aSIngo Molnar static int *sched_domains_numa_distance; 121597fb7a0aSIngo Molnar static struct cpumask ***sched_domains_numa_masks; 1216f2cb1360SIngo Molnar #endif 1217f2cb1360SIngo Molnar 1218f2cb1360SIngo Molnar /* 1219f2cb1360SIngo Molnar * SD_flags allowed in topology descriptions. 1220f2cb1360SIngo Molnar * 1221f2cb1360SIngo Molnar * These flags are purely descriptive of the topology and do not prescribe 1222f2cb1360SIngo Molnar * behaviour. Behaviour is artificial and mapped in the below sd_init() 1223f2cb1360SIngo Molnar * function: 1224f2cb1360SIngo Molnar * 1225f2cb1360SIngo Molnar * SD_SHARE_CPUCAPACITY - describes SMT topologies 1226f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCES - describes shared caches 1227f2cb1360SIngo Molnar * SD_NUMA - describes NUMA topologies 1228f2cb1360SIngo Molnar * SD_SHARE_POWERDOMAIN - describes shared power domain 1229f2cb1360SIngo Molnar * 1230f2cb1360SIngo Molnar * Odd one out, which beside describing the topology has a quirk also 1231f2cb1360SIngo Molnar * prescribes the desired behaviour that goes along with it: 1232f2cb1360SIngo Molnar * 1233f2cb1360SIngo Molnar * SD_ASYM_PACKING - describes SMT quirks 1234f2cb1360SIngo Molnar */ 1235f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS \ 1236f2cb1360SIngo Molnar (SD_SHARE_CPUCAPACITY | \ 1237f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | \ 1238f2cb1360SIngo Molnar SD_NUMA | \ 1239f2cb1360SIngo Molnar SD_ASYM_PACKING | \ 1240f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN) 1241f2cb1360SIngo Molnar 1242f2cb1360SIngo Molnar static struct sched_domain * 1243f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl, 1244f2cb1360SIngo Molnar const struct cpumask *cpu_map, 124505484e09SMorten Rasmussen struct sched_domain *child, int dflags, int cpu) 1246f2cb1360SIngo Molnar { 1247f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1248f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1249f2cb1360SIngo Molnar int sd_id, sd_weight, sd_flags = 0; 1250f2cb1360SIngo Molnar 1251f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1252f2cb1360SIngo Molnar /* 1253f2cb1360SIngo Molnar * Ugly hack to pass state to sd_numa_mask()... 1254f2cb1360SIngo Molnar */ 1255f2cb1360SIngo Molnar sched_domains_curr_level = tl->numa_level; 1256f2cb1360SIngo Molnar #endif 1257f2cb1360SIngo Molnar 1258f2cb1360SIngo Molnar sd_weight = cpumask_weight(tl->mask(cpu)); 1259f2cb1360SIngo Molnar 1260f2cb1360SIngo Molnar if (tl->sd_flags) 1261f2cb1360SIngo Molnar sd_flags = (*tl->sd_flags)(); 1262f2cb1360SIngo Molnar if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1263f2cb1360SIngo Molnar "wrong sd_flags in topology description\n")) 1264f2cb1360SIngo Molnar sd_flags &= ~TOPOLOGY_SD_FLAGS; 1265f2cb1360SIngo Molnar 126605484e09SMorten Rasmussen /* Apply detected topology flags */ 126705484e09SMorten Rasmussen sd_flags |= dflags; 126805484e09SMorten Rasmussen 1269f2cb1360SIngo Molnar *sd = (struct sched_domain){ 1270f2cb1360SIngo Molnar .min_interval = sd_weight, 1271f2cb1360SIngo Molnar .max_interval = 2*sd_weight, 1272f2cb1360SIngo Molnar .busy_factor = 32, 1273f2cb1360SIngo Molnar .imbalance_pct = 125, 1274f2cb1360SIngo Molnar 1275f2cb1360SIngo Molnar .cache_nice_tries = 0, 1276f2cb1360SIngo Molnar .busy_idx = 0, 1277f2cb1360SIngo Molnar .idle_idx = 0, 1278f2cb1360SIngo Molnar .newidle_idx = 0, 1279f2cb1360SIngo Molnar .wake_idx = 0, 1280f2cb1360SIngo Molnar .forkexec_idx = 0, 1281f2cb1360SIngo Molnar 1282f2cb1360SIngo Molnar .flags = 1*SD_LOAD_BALANCE 1283f2cb1360SIngo Molnar | 1*SD_BALANCE_NEWIDLE 1284f2cb1360SIngo Molnar | 1*SD_BALANCE_EXEC 1285f2cb1360SIngo Molnar | 1*SD_BALANCE_FORK 1286f2cb1360SIngo Molnar | 0*SD_BALANCE_WAKE 1287f2cb1360SIngo Molnar | 1*SD_WAKE_AFFINE 1288f2cb1360SIngo Molnar | 0*SD_SHARE_CPUCAPACITY 1289f2cb1360SIngo Molnar | 0*SD_SHARE_PKG_RESOURCES 1290f2cb1360SIngo Molnar | 0*SD_SERIALIZE 12919c63e84dSMorten Rasmussen | 1*SD_PREFER_SIBLING 1292f2cb1360SIngo Molnar | 0*SD_NUMA 1293f2cb1360SIngo Molnar | sd_flags 1294f2cb1360SIngo Molnar , 1295f2cb1360SIngo Molnar 1296f2cb1360SIngo Molnar .last_balance = jiffies, 1297f2cb1360SIngo Molnar .balance_interval = sd_weight, 1298f2cb1360SIngo Molnar .max_newidle_lb_cost = 0, 1299f2cb1360SIngo Molnar .next_decay_max_lb_cost = jiffies, 1300f2cb1360SIngo Molnar .child = child, 1301f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1302f2cb1360SIngo Molnar .name = tl->name, 1303f2cb1360SIngo Molnar #endif 1304f2cb1360SIngo Molnar }; 1305f2cb1360SIngo Molnar 1306f2cb1360SIngo Molnar cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 1307f2cb1360SIngo Molnar sd_id = cpumask_first(sched_domain_span(sd)); 1308f2cb1360SIngo Molnar 1309f2cb1360SIngo Molnar /* 1310f2cb1360SIngo Molnar * Convert topological properties into behaviour. 1311f2cb1360SIngo Molnar */ 1312f2cb1360SIngo Molnar 1313f2cb1360SIngo Molnar if (sd->flags & SD_ASYM_CPUCAPACITY) { 1314f2cb1360SIngo Molnar struct sched_domain *t = sd; 1315f2cb1360SIngo Molnar 13169c63e84dSMorten Rasmussen /* 13179c63e84dSMorten Rasmussen * Don't attempt to spread across CPUs of different capacities. 13189c63e84dSMorten Rasmussen */ 13199c63e84dSMorten Rasmussen if (sd->child) 13209c63e84dSMorten Rasmussen sd->child->flags &= ~SD_PREFER_SIBLING; 13219c63e84dSMorten Rasmussen 1322f2cb1360SIngo Molnar for_each_lower_domain(t) 1323f2cb1360SIngo Molnar t->flags |= SD_BALANCE_WAKE; 1324f2cb1360SIngo Molnar } 1325f2cb1360SIngo Molnar 1326f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_CPUCAPACITY) { 1327f2cb1360SIngo Molnar sd->imbalance_pct = 110; 1328f2cb1360SIngo Molnar 1329f2cb1360SIngo Molnar } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 1330f2cb1360SIngo Molnar sd->imbalance_pct = 117; 1331f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1332f2cb1360SIngo Molnar sd->busy_idx = 2; 1333f2cb1360SIngo Molnar 1334f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1335f2cb1360SIngo Molnar } else if (sd->flags & SD_NUMA) { 1336f2cb1360SIngo Molnar sd->cache_nice_tries = 2; 1337f2cb1360SIngo Molnar sd->busy_idx = 3; 1338f2cb1360SIngo Molnar sd->idle_idx = 2; 1339f2cb1360SIngo Molnar 13409c63e84dSMorten Rasmussen sd->flags &= ~SD_PREFER_SIBLING; 1341f2cb1360SIngo Molnar sd->flags |= SD_SERIALIZE; 1342f2cb1360SIngo Molnar if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 1343f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_EXEC | 1344f2cb1360SIngo Molnar SD_BALANCE_FORK | 1345f2cb1360SIngo Molnar SD_WAKE_AFFINE); 1346f2cb1360SIngo Molnar } 1347f2cb1360SIngo Molnar 1348f2cb1360SIngo Molnar #endif 1349f2cb1360SIngo Molnar } else { 1350f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1351f2cb1360SIngo Molnar sd->busy_idx = 2; 1352f2cb1360SIngo Molnar sd->idle_idx = 1; 1353f2cb1360SIngo Molnar } 1354f2cb1360SIngo Molnar 1355f2cb1360SIngo Molnar /* 1356f2cb1360SIngo Molnar * For all levels sharing cache; connect a sched_domain_shared 1357f2cb1360SIngo Molnar * instance. 1358f2cb1360SIngo Molnar */ 1359f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_PKG_RESOURCES) { 1360f2cb1360SIngo Molnar sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 1361f2cb1360SIngo Molnar atomic_inc(&sd->shared->ref); 1362f2cb1360SIngo Molnar atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 1363f2cb1360SIngo Molnar } 1364f2cb1360SIngo Molnar 1365f2cb1360SIngo Molnar sd->private = sdd; 1366f2cb1360SIngo Molnar 1367f2cb1360SIngo Molnar return sd; 1368f2cb1360SIngo Molnar } 1369f2cb1360SIngo Molnar 1370f2cb1360SIngo Molnar /* 1371f2cb1360SIngo Molnar * Topology list, bottom-up. 1372f2cb1360SIngo Molnar */ 1373f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = { 1374f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT 1375f2cb1360SIngo Molnar { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 1376f2cb1360SIngo Molnar #endif 1377f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC 1378f2cb1360SIngo Molnar { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 1379f2cb1360SIngo Molnar #endif 1380f2cb1360SIngo Molnar { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1381f2cb1360SIngo Molnar { NULL, }, 1382f2cb1360SIngo Molnar }; 1383f2cb1360SIngo Molnar 1384f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology = 1385f2cb1360SIngo Molnar default_topology; 1386f2cb1360SIngo Molnar 1387f2cb1360SIngo Molnar #define for_each_sd_topology(tl) \ 1388f2cb1360SIngo Molnar for (tl = sched_domain_topology; tl->mask; tl++) 1389f2cb1360SIngo Molnar 1390f2cb1360SIngo Molnar void set_sched_topology(struct sched_domain_topology_level *tl) 1391f2cb1360SIngo Molnar { 1392f2cb1360SIngo Molnar if (WARN_ON_ONCE(sched_smp_initialized)) 1393f2cb1360SIngo Molnar return; 1394f2cb1360SIngo Molnar 1395f2cb1360SIngo Molnar sched_domain_topology = tl; 1396f2cb1360SIngo Molnar } 1397f2cb1360SIngo Molnar 1398f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1399f2cb1360SIngo Molnar 1400f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu) 1401f2cb1360SIngo Molnar { 1402f2cb1360SIngo Molnar return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1403f2cb1360SIngo Molnar } 1404f2cb1360SIngo Molnar 1405f2cb1360SIngo Molnar static void sched_numa_warn(const char *str) 1406f2cb1360SIngo Molnar { 1407f2cb1360SIngo Molnar static int done = false; 1408f2cb1360SIngo Molnar int i,j; 1409f2cb1360SIngo Molnar 1410f2cb1360SIngo Molnar if (done) 1411f2cb1360SIngo Molnar return; 1412f2cb1360SIngo Molnar 1413f2cb1360SIngo Molnar done = true; 1414f2cb1360SIngo Molnar 1415f2cb1360SIngo Molnar printk(KERN_WARNING "ERROR: %s\n\n", str); 1416f2cb1360SIngo Molnar 1417f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1418f2cb1360SIngo Molnar printk(KERN_WARNING " "); 1419f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) 1420f2cb1360SIngo Molnar printk(KERN_CONT "%02d ", node_distance(i,j)); 1421f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 1422f2cb1360SIngo Molnar } 1423f2cb1360SIngo Molnar printk(KERN_WARNING "\n"); 1424f2cb1360SIngo Molnar } 1425f2cb1360SIngo Molnar 1426f2cb1360SIngo Molnar bool find_numa_distance(int distance) 1427f2cb1360SIngo Molnar { 1428f2cb1360SIngo Molnar int i; 1429f2cb1360SIngo Molnar 1430f2cb1360SIngo Molnar if (distance == node_distance(0, 0)) 1431f2cb1360SIngo Molnar return true; 1432f2cb1360SIngo Molnar 1433f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1434f2cb1360SIngo Molnar if (sched_domains_numa_distance[i] == distance) 1435f2cb1360SIngo Molnar return true; 1436f2cb1360SIngo Molnar } 1437f2cb1360SIngo Molnar 1438f2cb1360SIngo Molnar return false; 1439f2cb1360SIngo Molnar } 1440f2cb1360SIngo Molnar 1441f2cb1360SIngo Molnar /* 1442f2cb1360SIngo Molnar * A system can have three types of NUMA topology: 1443f2cb1360SIngo Molnar * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1444f2cb1360SIngo Molnar * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1445f2cb1360SIngo Molnar * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1446f2cb1360SIngo Molnar * 1447f2cb1360SIngo Molnar * The difference between a glueless mesh topology and a backplane 1448f2cb1360SIngo Molnar * topology lies in whether communication between not directly 1449f2cb1360SIngo Molnar * connected nodes goes through intermediary nodes (where programs 1450f2cb1360SIngo Molnar * could run), or through backplane controllers. This affects 1451f2cb1360SIngo Molnar * placement of programs. 1452f2cb1360SIngo Molnar * 1453f2cb1360SIngo Molnar * The type of topology can be discerned with the following tests: 1454f2cb1360SIngo Molnar * - If the maximum distance between any nodes is 1 hop, the system 1455f2cb1360SIngo Molnar * is directly connected. 1456f2cb1360SIngo Molnar * - If for two nodes A and B, located N > 1 hops away from each other, 1457f2cb1360SIngo Molnar * there is an intermediary node C, which is < N hops away from both 1458f2cb1360SIngo Molnar * nodes A and B, the system is a glueless mesh. 1459f2cb1360SIngo Molnar */ 1460f2cb1360SIngo Molnar static void init_numa_topology_type(void) 1461f2cb1360SIngo Molnar { 1462f2cb1360SIngo Molnar int a, b, c, n; 1463f2cb1360SIngo Molnar 1464f2cb1360SIngo Molnar n = sched_max_numa_distance; 1465f2cb1360SIngo Molnar 1466e5e96fafSSrikar Dronamraju if (sched_domains_numa_levels <= 2) { 1467f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_DIRECT; 1468f2cb1360SIngo Molnar return; 1469f2cb1360SIngo Molnar } 1470f2cb1360SIngo Molnar 1471f2cb1360SIngo Molnar for_each_online_node(a) { 1472f2cb1360SIngo Molnar for_each_online_node(b) { 1473f2cb1360SIngo Molnar /* Find two nodes furthest removed from each other. */ 1474f2cb1360SIngo Molnar if (node_distance(a, b) < n) 1475f2cb1360SIngo Molnar continue; 1476f2cb1360SIngo Molnar 1477f2cb1360SIngo Molnar /* Is there an intermediary node between a and b? */ 1478f2cb1360SIngo Molnar for_each_online_node(c) { 1479f2cb1360SIngo Molnar if (node_distance(a, c) < n && 1480f2cb1360SIngo Molnar node_distance(b, c) < n) { 1481f2cb1360SIngo Molnar sched_numa_topology_type = 1482f2cb1360SIngo Molnar NUMA_GLUELESS_MESH; 1483f2cb1360SIngo Molnar return; 1484f2cb1360SIngo Molnar } 1485f2cb1360SIngo Molnar } 1486f2cb1360SIngo Molnar 1487f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_BACKPLANE; 1488f2cb1360SIngo Molnar return; 1489f2cb1360SIngo Molnar } 1490f2cb1360SIngo Molnar } 1491f2cb1360SIngo Molnar } 1492f2cb1360SIngo Molnar 1493f2cb1360SIngo Molnar void sched_init_numa(void) 1494f2cb1360SIngo Molnar { 1495f2cb1360SIngo Molnar int next_distance, curr_distance = node_distance(0, 0); 1496f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1497f2cb1360SIngo Molnar int level = 0; 1498f2cb1360SIngo Molnar int i, j, k; 1499f2cb1360SIngo Molnar 1500993f0b05SPeter Zijlstra sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL); 1501f2cb1360SIngo Molnar if (!sched_domains_numa_distance) 1502f2cb1360SIngo Molnar return; 1503f2cb1360SIngo Molnar 1504051f3ca0SSuravee Suthikulpanit /* Includes NUMA identity node at level 0. */ 1505051f3ca0SSuravee Suthikulpanit sched_domains_numa_distance[level++] = curr_distance; 1506051f3ca0SSuravee Suthikulpanit sched_domains_numa_levels = level; 1507051f3ca0SSuravee Suthikulpanit 1508f2cb1360SIngo Molnar /* 1509f2cb1360SIngo Molnar * O(nr_nodes^2) deduplicating selection sort -- in order to find the 1510f2cb1360SIngo Molnar * unique distances in the node_distance() table. 1511f2cb1360SIngo Molnar * 1512f2cb1360SIngo Molnar * Assumes node_distance(0,j) includes all distances in 1513f2cb1360SIngo Molnar * node_distance(i,j) in order to avoid cubic time. 1514f2cb1360SIngo Molnar */ 1515f2cb1360SIngo Molnar next_distance = curr_distance; 1516f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1517f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1518f2cb1360SIngo Molnar for (k = 0; k < nr_node_ids; k++) { 1519f2cb1360SIngo Molnar int distance = node_distance(i, k); 1520f2cb1360SIngo Molnar 1521f2cb1360SIngo Molnar if (distance > curr_distance && 1522f2cb1360SIngo Molnar (distance < next_distance || 1523f2cb1360SIngo Molnar next_distance == curr_distance)) 1524f2cb1360SIngo Molnar next_distance = distance; 1525f2cb1360SIngo Molnar 1526f2cb1360SIngo Molnar /* 1527f2cb1360SIngo Molnar * While not a strong assumption it would be nice to know 1528f2cb1360SIngo Molnar * about cases where if node A is connected to B, B is not 1529f2cb1360SIngo Molnar * equally connected to A. 1530f2cb1360SIngo Molnar */ 1531f2cb1360SIngo Molnar if (sched_debug() && node_distance(k, i) != distance) 1532f2cb1360SIngo Molnar sched_numa_warn("Node-distance not symmetric"); 1533f2cb1360SIngo Molnar 1534f2cb1360SIngo Molnar if (sched_debug() && i && !find_numa_distance(distance)) 1535f2cb1360SIngo Molnar sched_numa_warn("Node-0 not representative"); 1536f2cb1360SIngo Molnar } 1537f2cb1360SIngo Molnar if (next_distance != curr_distance) { 1538f2cb1360SIngo Molnar sched_domains_numa_distance[level++] = next_distance; 1539f2cb1360SIngo Molnar sched_domains_numa_levels = level; 1540f2cb1360SIngo Molnar curr_distance = next_distance; 1541f2cb1360SIngo Molnar } else break; 1542f2cb1360SIngo Molnar } 1543f2cb1360SIngo Molnar 1544f2cb1360SIngo Molnar /* 1545f2cb1360SIngo Molnar * In case of sched_debug() we verify the above assumption. 1546f2cb1360SIngo Molnar */ 1547f2cb1360SIngo Molnar if (!sched_debug()) 1548f2cb1360SIngo Molnar break; 1549f2cb1360SIngo Molnar } 1550f2cb1360SIngo Molnar 1551f2cb1360SIngo Molnar /* 1552051f3ca0SSuravee Suthikulpanit * 'level' contains the number of unique distances 1553f2cb1360SIngo Molnar * 1554f2cb1360SIngo Molnar * The sched_domains_numa_distance[] array includes the actual distance 1555f2cb1360SIngo Molnar * numbers. 1556f2cb1360SIngo Molnar */ 1557f2cb1360SIngo Molnar 1558f2cb1360SIngo Molnar /* 1559f2cb1360SIngo Molnar * Here, we should temporarily reset sched_domains_numa_levels to 0. 1560f2cb1360SIngo Molnar * If it fails to allocate memory for array sched_domains_numa_masks[][], 1561f2cb1360SIngo Molnar * the array will contain less then 'level' members. This could be 1562f2cb1360SIngo Molnar * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1563f2cb1360SIngo Molnar * in other functions. 1564f2cb1360SIngo Molnar * 1565f2cb1360SIngo Molnar * We reset it to 'level' at the end of this function. 1566f2cb1360SIngo Molnar */ 1567f2cb1360SIngo Molnar sched_domains_numa_levels = 0; 1568f2cb1360SIngo Molnar 1569f2cb1360SIngo Molnar sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 1570f2cb1360SIngo Molnar if (!sched_domains_numa_masks) 1571f2cb1360SIngo Molnar return; 1572f2cb1360SIngo Molnar 1573f2cb1360SIngo Molnar /* 1574f2cb1360SIngo Molnar * Now for each level, construct a mask per node which contains all 1575f2cb1360SIngo Molnar * CPUs of nodes that are that many hops away from us. 1576f2cb1360SIngo Molnar */ 1577f2cb1360SIngo Molnar for (i = 0; i < level; i++) { 1578f2cb1360SIngo Molnar sched_domains_numa_masks[i] = 1579f2cb1360SIngo Molnar kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 1580f2cb1360SIngo Molnar if (!sched_domains_numa_masks[i]) 1581f2cb1360SIngo Molnar return; 1582f2cb1360SIngo Molnar 1583f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1584f2cb1360SIngo Molnar struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1585f2cb1360SIngo Molnar if (!mask) 1586f2cb1360SIngo Molnar return; 1587f2cb1360SIngo Molnar 1588f2cb1360SIngo Molnar sched_domains_numa_masks[i][j] = mask; 1589f2cb1360SIngo Molnar 1590f2cb1360SIngo Molnar for_each_node(k) { 1591f2cb1360SIngo Molnar if (node_distance(j, k) > sched_domains_numa_distance[i]) 1592f2cb1360SIngo Molnar continue; 1593f2cb1360SIngo Molnar 1594f2cb1360SIngo Molnar cpumask_or(mask, mask, cpumask_of_node(k)); 1595f2cb1360SIngo Molnar } 1596f2cb1360SIngo Molnar } 1597f2cb1360SIngo Molnar } 1598f2cb1360SIngo Molnar 1599f2cb1360SIngo Molnar /* Compute default topology size */ 1600f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++); 1601f2cb1360SIngo Molnar 1602f2cb1360SIngo Molnar tl = kzalloc((i + level + 1) * 1603f2cb1360SIngo Molnar sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1604f2cb1360SIngo Molnar if (!tl) 1605f2cb1360SIngo Molnar return; 1606f2cb1360SIngo Molnar 1607f2cb1360SIngo Molnar /* 1608f2cb1360SIngo Molnar * Copy the default topology bits.. 1609f2cb1360SIngo Molnar */ 1610f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++) 1611f2cb1360SIngo Molnar tl[i] = sched_domain_topology[i]; 1612f2cb1360SIngo Molnar 1613f2cb1360SIngo Molnar /* 1614051f3ca0SSuravee Suthikulpanit * Add the NUMA identity distance, aka single NODE. 1615051f3ca0SSuravee Suthikulpanit */ 1616051f3ca0SSuravee Suthikulpanit tl[i++] = (struct sched_domain_topology_level){ 1617051f3ca0SSuravee Suthikulpanit .mask = sd_numa_mask, 1618051f3ca0SSuravee Suthikulpanit .numa_level = 0, 1619051f3ca0SSuravee Suthikulpanit SD_INIT_NAME(NODE) 1620051f3ca0SSuravee Suthikulpanit }; 1621051f3ca0SSuravee Suthikulpanit 1622051f3ca0SSuravee Suthikulpanit /* 1623f2cb1360SIngo Molnar * .. and append 'j' levels of NUMA goodness. 1624f2cb1360SIngo Molnar */ 1625051f3ca0SSuravee Suthikulpanit for (j = 1; j < level; i++, j++) { 1626f2cb1360SIngo Molnar tl[i] = (struct sched_domain_topology_level){ 1627f2cb1360SIngo Molnar .mask = sd_numa_mask, 1628f2cb1360SIngo Molnar .sd_flags = cpu_numa_flags, 1629f2cb1360SIngo Molnar .flags = SDTL_OVERLAP, 1630f2cb1360SIngo Molnar .numa_level = j, 1631f2cb1360SIngo Molnar SD_INIT_NAME(NUMA) 1632f2cb1360SIngo Molnar }; 1633f2cb1360SIngo Molnar } 1634f2cb1360SIngo Molnar 1635f2cb1360SIngo Molnar sched_domain_topology = tl; 1636f2cb1360SIngo Molnar 1637f2cb1360SIngo Molnar sched_domains_numa_levels = level; 1638f2cb1360SIngo Molnar sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 1639f2cb1360SIngo Molnar 1640f2cb1360SIngo Molnar init_numa_topology_type(); 1641f2cb1360SIngo Molnar } 1642f2cb1360SIngo Molnar 1643f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu) 1644f2cb1360SIngo Molnar { 1645f2cb1360SIngo Molnar int node = cpu_to_node(cpu); 1646f2cb1360SIngo Molnar int i, j; 1647f2cb1360SIngo Molnar 1648f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1649f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1650f2cb1360SIngo Molnar if (node_distance(j, node) <= sched_domains_numa_distance[i]) 1651f2cb1360SIngo Molnar cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 1652f2cb1360SIngo Molnar } 1653f2cb1360SIngo Molnar } 1654f2cb1360SIngo Molnar } 1655f2cb1360SIngo Molnar 1656f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu) 1657f2cb1360SIngo Molnar { 1658f2cb1360SIngo Molnar int i, j; 1659f2cb1360SIngo Molnar 1660f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1661f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) 1662f2cb1360SIngo Molnar cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 1663f2cb1360SIngo Molnar } 1664f2cb1360SIngo Molnar } 1665f2cb1360SIngo Molnar 1666f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */ 1667f2cb1360SIngo Molnar 1668f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map) 1669f2cb1360SIngo Molnar { 1670f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1671f2cb1360SIngo Molnar int j; 1672f2cb1360SIngo Molnar 1673f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1674f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1675f2cb1360SIngo Molnar 1676f2cb1360SIngo Molnar sdd->sd = alloc_percpu(struct sched_domain *); 1677f2cb1360SIngo Molnar if (!sdd->sd) 1678f2cb1360SIngo Molnar return -ENOMEM; 1679f2cb1360SIngo Molnar 1680f2cb1360SIngo Molnar sdd->sds = alloc_percpu(struct sched_domain_shared *); 1681f2cb1360SIngo Molnar if (!sdd->sds) 1682f2cb1360SIngo Molnar return -ENOMEM; 1683f2cb1360SIngo Molnar 1684f2cb1360SIngo Molnar sdd->sg = alloc_percpu(struct sched_group *); 1685f2cb1360SIngo Molnar if (!sdd->sg) 1686f2cb1360SIngo Molnar return -ENOMEM; 1687f2cb1360SIngo Molnar 1688f2cb1360SIngo Molnar sdd->sgc = alloc_percpu(struct sched_group_capacity *); 1689f2cb1360SIngo Molnar if (!sdd->sgc) 1690f2cb1360SIngo Molnar return -ENOMEM; 1691f2cb1360SIngo Molnar 1692f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 1693f2cb1360SIngo Molnar struct sched_domain *sd; 1694f2cb1360SIngo Molnar struct sched_domain_shared *sds; 1695f2cb1360SIngo Molnar struct sched_group *sg; 1696f2cb1360SIngo Molnar struct sched_group_capacity *sgc; 1697f2cb1360SIngo Molnar 1698f2cb1360SIngo Molnar sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 1699f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1700f2cb1360SIngo Molnar if (!sd) 1701f2cb1360SIngo Molnar return -ENOMEM; 1702f2cb1360SIngo Molnar 1703f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, j) = sd; 1704f2cb1360SIngo Molnar 1705f2cb1360SIngo Molnar sds = kzalloc_node(sizeof(struct sched_domain_shared), 1706f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1707f2cb1360SIngo Molnar if (!sds) 1708f2cb1360SIngo Molnar return -ENOMEM; 1709f2cb1360SIngo Molnar 1710f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, j) = sds; 1711f2cb1360SIngo Molnar 1712f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 1713f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1714f2cb1360SIngo Molnar if (!sg) 1715f2cb1360SIngo Molnar return -ENOMEM; 1716f2cb1360SIngo Molnar 1717f2cb1360SIngo Molnar sg->next = sg; 1718f2cb1360SIngo Molnar 1719f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, j) = sg; 1720f2cb1360SIngo Molnar 1721f2cb1360SIngo Molnar sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 1722f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1723f2cb1360SIngo Molnar if (!sgc) 1724f2cb1360SIngo Molnar return -ENOMEM; 1725f2cb1360SIngo Molnar 1726005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1727005f874dSPeter Zijlstra sgc->id = j; 1728005f874dSPeter Zijlstra #endif 1729005f874dSPeter Zijlstra 1730f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, j) = sgc; 1731f2cb1360SIngo Molnar } 1732f2cb1360SIngo Molnar } 1733f2cb1360SIngo Molnar 1734f2cb1360SIngo Molnar return 0; 1735f2cb1360SIngo Molnar } 1736f2cb1360SIngo Molnar 1737f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map) 1738f2cb1360SIngo Molnar { 1739f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1740f2cb1360SIngo Molnar int j; 1741f2cb1360SIngo Molnar 1742f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1743f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1744f2cb1360SIngo Molnar 1745f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 1746f2cb1360SIngo Molnar struct sched_domain *sd; 1747f2cb1360SIngo Molnar 1748f2cb1360SIngo Molnar if (sdd->sd) { 1749f2cb1360SIngo Molnar sd = *per_cpu_ptr(sdd->sd, j); 1750f2cb1360SIngo Molnar if (sd && (sd->flags & SD_OVERLAP)) 1751f2cb1360SIngo Molnar free_sched_groups(sd->groups, 0); 1752f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sd, j)); 1753f2cb1360SIngo Molnar } 1754f2cb1360SIngo Molnar 1755f2cb1360SIngo Molnar if (sdd->sds) 1756f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sds, j)); 1757f2cb1360SIngo Molnar if (sdd->sg) 1758f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sg, j)); 1759f2cb1360SIngo Molnar if (sdd->sgc) 1760f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sgc, j)); 1761f2cb1360SIngo Molnar } 1762f2cb1360SIngo Molnar free_percpu(sdd->sd); 1763f2cb1360SIngo Molnar sdd->sd = NULL; 1764f2cb1360SIngo Molnar free_percpu(sdd->sds); 1765f2cb1360SIngo Molnar sdd->sds = NULL; 1766f2cb1360SIngo Molnar free_percpu(sdd->sg); 1767f2cb1360SIngo Molnar sdd->sg = NULL; 1768f2cb1360SIngo Molnar free_percpu(sdd->sgc); 1769f2cb1360SIngo Molnar sdd->sgc = NULL; 1770f2cb1360SIngo Molnar } 1771f2cb1360SIngo Molnar } 1772f2cb1360SIngo Molnar 1773181a80d1SViresh Kumar static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 1774f2cb1360SIngo Molnar const struct cpumask *cpu_map, struct sched_domain_attr *attr, 177505484e09SMorten Rasmussen struct sched_domain *child, int dflags, int cpu) 1776f2cb1360SIngo Molnar { 177705484e09SMorten Rasmussen struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); 1778f2cb1360SIngo Molnar 1779f2cb1360SIngo Molnar if (child) { 1780f2cb1360SIngo Molnar sd->level = child->level + 1; 1781f2cb1360SIngo Molnar sched_domain_level_max = max(sched_domain_level_max, sd->level); 1782f2cb1360SIngo Molnar child->parent = sd; 1783f2cb1360SIngo Molnar 1784f2cb1360SIngo Molnar if (!cpumask_subset(sched_domain_span(child), 1785f2cb1360SIngo Molnar sched_domain_span(sd))) { 1786f2cb1360SIngo Molnar pr_err("BUG: arch topology borken\n"); 1787f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1788f2cb1360SIngo Molnar pr_err(" the %s domain not a subset of the %s domain\n", 1789f2cb1360SIngo Molnar child->name, sd->name); 1790f2cb1360SIngo Molnar #endif 179197fb7a0aSIngo Molnar /* Fixup, ensure @sd has at least @child CPUs. */ 1792f2cb1360SIngo Molnar cpumask_or(sched_domain_span(sd), 1793f2cb1360SIngo Molnar sched_domain_span(sd), 1794f2cb1360SIngo Molnar sched_domain_span(child)); 1795f2cb1360SIngo Molnar } 1796f2cb1360SIngo Molnar 1797f2cb1360SIngo Molnar } 1798f2cb1360SIngo Molnar set_domain_attribute(sd, attr); 1799f2cb1360SIngo Molnar 1800f2cb1360SIngo Molnar return sd; 1801f2cb1360SIngo Molnar } 1802f2cb1360SIngo Molnar 1803f2cb1360SIngo Molnar /* 180405484e09SMorten Rasmussen * Find the sched_domain_topology_level where all CPU capacities are visible 180505484e09SMorten Rasmussen * for all CPUs. 180605484e09SMorten Rasmussen */ 180705484e09SMorten Rasmussen static struct sched_domain_topology_level 180805484e09SMorten Rasmussen *asym_cpu_capacity_level(const struct cpumask *cpu_map) 180905484e09SMorten Rasmussen { 181005484e09SMorten Rasmussen int i, j, asym_level = 0; 181105484e09SMorten Rasmussen bool asym = false; 181205484e09SMorten Rasmussen struct sched_domain_topology_level *tl, *asym_tl = NULL; 181305484e09SMorten Rasmussen unsigned long cap; 181405484e09SMorten Rasmussen 181505484e09SMorten Rasmussen /* Is there any asymmetry? */ 181605484e09SMorten Rasmussen cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map)); 181705484e09SMorten Rasmussen 181805484e09SMorten Rasmussen for_each_cpu(i, cpu_map) { 181905484e09SMorten Rasmussen if (arch_scale_cpu_capacity(NULL, i) != cap) { 182005484e09SMorten Rasmussen asym = true; 182105484e09SMorten Rasmussen break; 182205484e09SMorten Rasmussen } 182305484e09SMorten Rasmussen } 182405484e09SMorten Rasmussen 182505484e09SMorten Rasmussen if (!asym) 182605484e09SMorten Rasmussen return NULL; 182705484e09SMorten Rasmussen 182805484e09SMorten Rasmussen /* 182905484e09SMorten Rasmussen * Examine topology from all CPU's point of views to detect the lowest 183005484e09SMorten Rasmussen * sched_domain_topology_level where a highest capacity CPU is visible 183105484e09SMorten Rasmussen * to everyone. 183205484e09SMorten Rasmussen */ 183305484e09SMorten Rasmussen for_each_cpu(i, cpu_map) { 183405484e09SMorten Rasmussen unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i); 183505484e09SMorten Rasmussen int tl_id = 0; 183605484e09SMorten Rasmussen 183705484e09SMorten Rasmussen for_each_sd_topology(tl) { 183805484e09SMorten Rasmussen if (tl_id < asym_level) 183905484e09SMorten Rasmussen goto next_level; 184005484e09SMorten Rasmussen 184105484e09SMorten Rasmussen for_each_cpu_and(j, tl->mask(i), cpu_map) { 184205484e09SMorten Rasmussen unsigned long capacity; 184305484e09SMorten Rasmussen 184405484e09SMorten Rasmussen capacity = arch_scale_cpu_capacity(NULL, j); 184505484e09SMorten Rasmussen 184605484e09SMorten Rasmussen if (capacity <= max_capacity) 184705484e09SMorten Rasmussen continue; 184805484e09SMorten Rasmussen 184905484e09SMorten Rasmussen max_capacity = capacity; 185005484e09SMorten Rasmussen asym_level = tl_id; 185105484e09SMorten Rasmussen asym_tl = tl; 185205484e09SMorten Rasmussen } 185305484e09SMorten Rasmussen next_level: 185405484e09SMorten Rasmussen tl_id++; 185505484e09SMorten Rasmussen } 185605484e09SMorten Rasmussen } 185705484e09SMorten Rasmussen 185805484e09SMorten Rasmussen return asym_tl; 185905484e09SMorten Rasmussen } 186005484e09SMorten Rasmussen 186105484e09SMorten Rasmussen 186205484e09SMorten Rasmussen /* 1863f2cb1360SIngo Molnar * Build sched domains for a given set of CPUs and attach the sched domains 1864f2cb1360SIngo Molnar * to the individual CPUs 1865f2cb1360SIngo Molnar */ 1866f2cb1360SIngo Molnar static int 1867f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 1868f2cb1360SIngo Molnar { 1869f2cb1360SIngo Molnar enum s_alloc alloc_state; 1870f2cb1360SIngo Molnar struct sched_domain *sd; 1871f2cb1360SIngo Molnar struct s_data d; 1872f2cb1360SIngo Molnar struct rq *rq = NULL; 1873f2cb1360SIngo Molnar int i, ret = -ENOMEM; 187405484e09SMorten Rasmussen struct sched_domain_topology_level *tl_asym; 1875df054e84SMorten Rasmussen bool has_asym = false; 1876f2cb1360SIngo Molnar 1877f2cb1360SIngo Molnar alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 1878f2cb1360SIngo Molnar if (alloc_state != sa_rootdomain) 1879f2cb1360SIngo Molnar goto error; 1880f2cb1360SIngo Molnar 188105484e09SMorten Rasmussen tl_asym = asym_cpu_capacity_level(cpu_map); 188205484e09SMorten Rasmussen 1883f2cb1360SIngo Molnar /* Set up domains for CPUs specified by the cpu_map: */ 1884f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1885f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1886f2cb1360SIngo Molnar 1887f2cb1360SIngo Molnar sd = NULL; 1888f2cb1360SIngo Molnar for_each_sd_topology(tl) { 188905484e09SMorten Rasmussen int dflags = 0; 189005484e09SMorten Rasmussen 1891df054e84SMorten Rasmussen if (tl == tl_asym) { 189205484e09SMorten Rasmussen dflags |= SD_ASYM_CPUCAPACITY; 1893df054e84SMorten Rasmussen has_asym = true; 1894df054e84SMorten Rasmussen } 189505484e09SMorten Rasmussen 189605484e09SMorten Rasmussen sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); 189705484e09SMorten Rasmussen 1898f2cb1360SIngo Molnar if (tl == sched_domain_topology) 1899f2cb1360SIngo Molnar *per_cpu_ptr(d.sd, i) = sd; 1900af85596cSPeter Zijlstra if (tl->flags & SDTL_OVERLAP) 1901f2cb1360SIngo Molnar sd->flags |= SD_OVERLAP; 1902f2cb1360SIngo Molnar if (cpumask_equal(cpu_map, sched_domain_span(sd))) 1903f2cb1360SIngo Molnar break; 1904f2cb1360SIngo Molnar } 1905f2cb1360SIngo Molnar } 1906f2cb1360SIngo Molnar 1907f2cb1360SIngo Molnar /* Build the groups for the domains */ 1908f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1909f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 1910f2cb1360SIngo Molnar sd->span_weight = cpumask_weight(sched_domain_span(sd)); 1911f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 1912f2cb1360SIngo Molnar if (build_overlap_sched_groups(sd, i)) 1913f2cb1360SIngo Molnar goto error; 1914f2cb1360SIngo Molnar } else { 1915f2cb1360SIngo Molnar if (build_sched_groups(sd, i)) 1916f2cb1360SIngo Molnar goto error; 1917f2cb1360SIngo Molnar } 1918f2cb1360SIngo Molnar } 1919f2cb1360SIngo Molnar } 1920f2cb1360SIngo Molnar 1921f2cb1360SIngo Molnar /* Calculate CPU capacity for physical packages and nodes */ 1922f2cb1360SIngo Molnar for (i = nr_cpumask_bits-1; i >= 0; i--) { 1923f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, cpu_map)) 1924f2cb1360SIngo Molnar continue; 1925f2cb1360SIngo Molnar 1926f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 1927f2cb1360SIngo Molnar claim_allocations(i, sd); 1928f2cb1360SIngo Molnar init_sched_groups_capacity(i, sd); 1929f2cb1360SIngo Molnar } 1930f2cb1360SIngo Molnar } 1931f2cb1360SIngo Molnar 1932f2cb1360SIngo Molnar /* Attach the domains */ 1933f2cb1360SIngo Molnar rcu_read_lock(); 1934f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1935f2cb1360SIngo Molnar rq = cpu_rq(i); 1936f2cb1360SIngo Molnar sd = *per_cpu_ptr(d.sd, i); 1937f2cb1360SIngo Molnar 1938f2cb1360SIngo Molnar /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ 1939f2cb1360SIngo Molnar if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) 1940f2cb1360SIngo Molnar WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); 1941f2cb1360SIngo Molnar 1942f2cb1360SIngo Molnar cpu_attach_domain(sd, d.rd, i); 1943f2cb1360SIngo Molnar } 1944f2cb1360SIngo Molnar rcu_read_unlock(); 1945f2cb1360SIngo Molnar 1946df054e84SMorten Rasmussen if (has_asym) 1947df054e84SMorten Rasmussen static_branch_enable_cpuslocked(&sched_asym_cpucapacity); 1948df054e84SMorten Rasmussen 1949f2cb1360SIngo Molnar if (rq && sched_debug_enabled) { 1950bf5015a5SJuri Lelli pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", 1951f2cb1360SIngo Molnar cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 1952f2cb1360SIngo Molnar } 1953f2cb1360SIngo Molnar 1954f2cb1360SIngo Molnar ret = 0; 1955f2cb1360SIngo Molnar error: 1956f2cb1360SIngo Molnar __free_domain_allocs(&d, alloc_state, cpu_map); 195797fb7a0aSIngo Molnar 1958f2cb1360SIngo Molnar return ret; 1959f2cb1360SIngo Molnar } 1960f2cb1360SIngo Molnar 1961f2cb1360SIngo Molnar /* Current sched domains: */ 1962f2cb1360SIngo Molnar static cpumask_var_t *doms_cur; 1963f2cb1360SIngo Molnar 1964f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */ 1965f2cb1360SIngo Molnar static int ndoms_cur; 1966f2cb1360SIngo Molnar 1967f2cb1360SIngo Molnar /* Attribues of custom domains in 'doms_cur' */ 1968f2cb1360SIngo Molnar static struct sched_domain_attr *dattr_cur; 1969f2cb1360SIngo Molnar 1970f2cb1360SIngo Molnar /* 1971f2cb1360SIngo Molnar * Special case: If a kmalloc() of a doms_cur partition (array of 1972f2cb1360SIngo Molnar * cpumask) fails, then fallback to a single sched domain, 1973f2cb1360SIngo Molnar * as determined by the single cpumask fallback_doms. 1974f2cb1360SIngo Molnar */ 19758d5dc512SPeter Zijlstra static cpumask_var_t fallback_doms; 1976f2cb1360SIngo Molnar 1977f2cb1360SIngo Molnar /* 1978f2cb1360SIngo Molnar * arch_update_cpu_topology lets virtualized architectures update the 1979f2cb1360SIngo Molnar * CPU core maps. It is supposed to return 1 if the topology changed 1980f2cb1360SIngo Molnar * or 0 if it stayed the same. 1981f2cb1360SIngo Molnar */ 1982f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void) 1983f2cb1360SIngo Molnar { 1984f2cb1360SIngo Molnar return 0; 1985f2cb1360SIngo Molnar } 1986f2cb1360SIngo Molnar 1987f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 1988f2cb1360SIngo Molnar { 1989f2cb1360SIngo Molnar int i; 1990f2cb1360SIngo Molnar cpumask_var_t *doms; 1991f2cb1360SIngo Molnar 19926da2ec56SKees Cook doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); 1993f2cb1360SIngo Molnar if (!doms) 1994f2cb1360SIngo Molnar return NULL; 1995f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) { 1996f2cb1360SIngo Molnar if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 1997f2cb1360SIngo Molnar free_sched_domains(doms, i); 1998f2cb1360SIngo Molnar return NULL; 1999f2cb1360SIngo Molnar } 2000f2cb1360SIngo Molnar } 2001f2cb1360SIngo Molnar return doms; 2002f2cb1360SIngo Molnar } 2003f2cb1360SIngo Molnar 2004f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2005f2cb1360SIngo Molnar { 2006f2cb1360SIngo Molnar unsigned int i; 2007f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) 2008f2cb1360SIngo Molnar free_cpumask_var(doms[i]); 2009f2cb1360SIngo Molnar kfree(doms); 2010f2cb1360SIngo Molnar } 2011f2cb1360SIngo Molnar 2012f2cb1360SIngo Molnar /* 2013f2cb1360SIngo Molnar * Set up scheduler domains and groups. Callers must hold the hotplug lock. 2014f2cb1360SIngo Molnar * For now this just excludes isolated CPUs, but could be used to 2015f2cb1360SIngo Molnar * exclude other special cases in the future. 2016f2cb1360SIngo Molnar */ 20178d5dc512SPeter Zijlstra int sched_init_domains(const struct cpumask *cpu_map) 2018f2cb1360SIngo Molnar { 2019f2cb1360SIngo Molnar int err; 2020f2cb1360SIngo Molnar 20218d5dc512SPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 20221676330eSPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 20238d5dc512SPeter Zijlstra zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 20248d5dc512SPeter Zijlstra 2025f2cb1360SIngo Molnar arch_update_cpu_topology(); 2026f2cb1360SIngo Molnar ndoms_cur = 1; 2027f2cb1360SIngo Molnar doms_cur = alloc_sched_domains(ndoms_cur); 2028f2cb1360SIngo Molnar if (!doms_cur) 2029f2cb1360SIngo Molnar doms_cur = &fallback_doms; 2030edb93821SFrederic Weisbecker cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN)); 2031f2cb1360SIngo Molnar err = build_sched_domains(doms_cur[0], NULL); 2032f2cb1360SIngo Molnar register_sched_domain_sysctl(); 2033f2cb1360SIngo Molnar 2034f2cb1360SIngo Molnar return err; 2035f2cb1360SIngo Molnar } 2036f2cb1360SIngo Molnar 2037f2cb1360SIngo Molnar /* 2038f2cb1360SIngo Molnar * Detach sched domains from a group of CPUs specified in cpu_map 2039f2cb1360SIngo Molnar * These CPUs will now be attached to the NULL domain 2040f2cb1360SIngo Molnar */ 2041f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map) 2042f2cb1360SIngo Molnar { 2043f2cb1360SIngo Molnar int i; 2044f2cb1360SIngo Molnar 2045f2cb1360SIngo Molnar rcu_read_lock(); 2046f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) 2047f2cb1360SIngo Molnar cpu_attach_domain(NULL, &def_root_domain, i); 2048f2cb1360SIngo Molnar rcu_read_unlock(); 2049f2cb1360SIngo Molnar } 2050f2cb1360SIngo Molnar 2051f2cb1360SIngo Molnar /* handle null as "default" */ 2052f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2053f2cb1360SIngo Molnar struct sched_domain_attr *new, int idx_new) 2054f2cb1360SIngo Molnar { 2055f2cb1360SIngo Molnar struct sched_domain_attr tmp; 2056f2cb1360SIngo Molnar 2057f2cb1360SIngo Molnar /* Fast path: */ 2058f2cb1360SIngo Molnar if (!new && !cur) 2059f2cb1360SIngo Molnar return 1; 2060f2cb1360SIngo Molnar 2061f2cb1360SIngo Molnar tmp = SD_ATTR_INIT; 206297fb7a0aSIngo Molnar 2063f2cb1360SIngo Molnar return !memcmp(cur ? (cur + idx_cur) : &tmp, 2064f2cb1360SIngo Molnar new ? (new + idx_new) : &tmp, 2065f2cb1360SIngo Molnar sizeof(struct sched_domain_attr)); 2066f2cb1360SIngo Molnar } 2067f2cb1360SIngo Molnar 2068f2cb1360SIngo Molnar /* 2069f2cb1360SIngo Molnar * Partition sched domains as specified by the 'ndoms_new' 2070f2cb1360SIngo Molnar * cpumasks in the array doms_new[] of cpumasks. This compares 2071f2cb1360SIngo Molnar * doms_new[] to the current sched domain partitioning, doms_cur[]. 2072f2cb1360SIngo Molnar * It destroys each deleted domain and builds each new domain. 2073f2cb1360SIngo Molnar * 2074f2cb1360SIngo Molnar * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2075f2cb1360SIngo Molnar * The masks don't intersect (don't overlap.) We should setup one 2076f2cb1360SIngo Molnar * sched domain for each mask. CPUs not in any of the cpumasks will 2077f2cb1360SIngo Molnar * not be load balanced. If the same cpumask appears both in the 2078f2cb1360SIngo Molnar * current 'doms_cur' domains and in the new 'doms_new', we can leave 2079f2cb1360SIngo Molnar * it as it is. 2080f2cb1360SIngo Molnar * 2081f2cb1360SIngo Molnar * The passed in 'doms_new' should be allocated using 2082f2cb1360SIngo Molnar * alloc_sched_domains. This routine takes ownership of it and will 2083f2cb1360SIngo Molnar * free_sched_domains it when done with it. If the caller failed the 2084f2cb1360SIngo Molnar * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2085f2cb1360SIngo Molnar * and partition_sched_domains() will fallback to the single partition 2086f2cb1360SIngo Molnar * 'fallback_doms', it also forces the domains to be rebuilt. 2087f2cb1360SIngo Molnar * 2088f2cb1360SIngo Molnar * If doms_new == NULL it will be replaced with cpu_online_mask. 2089f2cb1360SIngo Molnar * ndoms_new == 0 is a special case for destroying existing domains, 2090f2cb1360SIngo Molnar * and it will not create the default domain. 2091f2cb1360SIngo Molnar * 2092f2cb1360SIngo Molnar * Call with hotplug lock held 2093f2cb1360SIngo Molnar */ 2094f2cb1360SIngo Molnar void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 2095f2cb1360SIngo Molnar struct sched_domain_attr *dattr_new) 2096f2cb1360SIngo Molnar { 2097f2cb1360SIngo Molnar int i, j, n; 2098f2cb1360SIngo Molnar int new_topology; 2099f2cb1360SIngo Molnar 2100f2cb1360SIngo Molnar mutex_lock(&sched_domains_mutex); 2101f2cb1360SIngo Molnar 2102f2cb1360SIngo Molnar /* Always unregister in case we don't destroy any domains: */ 2103f2cb1360SIngo Molnar unregister_sched_domain_sysctl(); 2104f2cb1360SIngo Molnar 2105f2cb1360SIngo Molnar /* Let the architecture update CPU core mappings: */ 2106f2cb1360SIngo Molnar new_topology = arch_update_cpu_topology(); 2107f2cb1360SIngo Molnar 210809e0dd8eSPeter Zijlstra if (!doms_new) { 210909e0dd8eSPeter Zijlstra WARN_ON_ONCE(dattr_new); 211009e0dd8eSPeter Zijlstra n = 0; 211109e0dd8eSPeter Zijlstra doms_new = alloc_sched_domains(1); 211209e0dd8eSPeter Zijlstra if (doms_new) { 211309e0dd8eSPeter Zijlstra n = 1; 2114edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 2115edb93821SFrederic Weisbecker housekeeping_cpumask(HK_FLAG_DOMAIN)); 211609e0dd8eSPeter Zijlstra } 211709e0dd8eSPeter Zijlstra } else { 211809e0dd8eSPeter Zijlstra n = ndoms_new; 211909e0dd8eSPeter Zijlstra } 2120f2cb1360SIngo Molnar 2121f2cb1360SIngo Molnar /* Destroy deleted domains: */ 2122f2cb1360SIngo Molnar for (i = 0; i < ndoms_cur; i++) { 2123f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 21246aa140faSQuentin Perret if (cpumask_equal(doms_cur[i], doms_new[j]) && 21256aa140faSQuentin Perret dattrs_equal(dattr_cur, i, dattr_new, j)) 2126f2cb1360SIngo Molnar goto match1; 2127f2cb1360SIngo Molnar } 2128f2cb1360SIngo Molnar /* No match - a current sched domain not in new doms_new[] */ 2129f2cb1360SIngo Molnar detach_destroy_domains(doms_cur[i]); 2130f2cb1360SIngo Molnar match1: 2131f2cb1360SIngo Molnar ; 2132f2cb1360SIngo Molnar } 2133f2cb1360SIngo Molnar 2134f2cb1360SIngo Molnar n = ndoms_cur; 213509e0dd8eSPeter Zijlstra if (!doms_new) { 2136f2cb1360SIngo Molnar n = 0; 2137f2cb1360SIngo Molnar doms_new = &fallback_doms; 2138edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 2139edb93821SFrederic Weisbecker housekeeping_cpumask(HK_FLAG_DOMAIN)); 2140f2cb1360SIngo Molnar } 2141f2cb1360SIngo Molnar 2142f2cb1360SIngo Molnar /* Build new domains: */ 2143f2cb1360SIngo Molnar for (i = 0; i < ndoms_new; i++) { 2144f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 21456aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 21466aa140faSQuentin Perret dattrs_equal(dattr_new, i, dattr_cur, j)) 2147f2cb1360SIngo Molnar goto match2; 2148f2cb1360SIngo Molnar } 2149f2cb1360SIngo Molnar /* No match - add a new doms_new */ 2150f2cb1360SIngo Molnar build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2151f2cb1360SIngo Molnar match2: 2152f2cb1360SIngo Molnar ; 2153f2cb1360SIngo Molnar } 2154f2cb1360SIngo Molnar 21556aa140faSQuentin Perret #ifdef CONFIG_ENERGY_MODEL 21566aa140faSQuentin Perret /* Build perf. domains: */ 21576aa140faSQuentin Perret for (i = 0; i < ndoms_new; i++) { 21586aa140faSQuentin Perret for (j = 0; j < n; j++) { 21596aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 21606aa140faSQuentin Perret cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) 21616aa140faSQuentin Perret goto match3; 21626aa140faSQuentin Perret } 21636aa140faSQuentin Perret /* No match - add perf. domains for a new rd */ 21646aa140faSQuentin Perret build_perf_domains(doms_new[i]); 21656aa140faSQuentin Perret match3: 21666aa140faSQuentin Perret ; 21676aa140faSQuentin Perret } 21686aa140faSQuentin Perret #endif 21696aa140faSQuentin Perret 2170f2cb1360SIngo Molnar /* Remember the new sched domains: */ 2171f2cb1360SIngo Molnar if (doms_cur != &fallback_doms) 2172f2cb1360SIngo Molnar free_sched_domains(doms_cur, ndoms_cur); 2173f2cb1360SIngo Molnar 2174f2cb1360SIngo Molnar kfree(dattr_cur); 2175f2cb1360SIngo Molnar doms_cur = doms_new; 2176f2cb1360SIngo Molnar dattr_cur = dattr_new; 2177f2cb1360SIngo Molnar ndoms_cur = ndoms_new; 2178f2cb1360SIngo Molnar 2179f2cb1360SIngo Molnar register_sched_domain_sysctl(); 2180f2cb1360SIngo Molnar 2181f2cb1360SIngo Molnar mutex_unlock(&sched_domains_mutex); 2182f2cb1360SIngo Molnar } 2183