1f2cb1360SIngo Molnar /* 2f2cb1360SIngo Molnar * Scheduler topology setup/handling methods 3f2cb1360SIngo Molnar */ 4f2cb1360SIngo Molnar #include <linux/sched.h> 5f2cb1360SIngo Molnar #include <linux/mutex.h> 6f2cb1360SIngo Molnar 7f2cb1360SIngo Molnar #include "sched.h" 8f2cb1360SIngo Molnar 9f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex); 10f2cb1360SIngo Molnar 11f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */ 12f2cb1360SIngo Molnar cpumask_var_t sched_domains_tmpmask; 13f2cb1360SIngo Molnar 14f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 15f2cb1360SIngo Molnar 16f2cb1360SIngo Molnar static __read_mostly int sched_debug_enabled; 17f2cb1360SIngo Molnar 18f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str) 19f2cb1360SIngo Molnar { 20f2cb1360SIngo Molnar sched_debug_enabled = 1; 21f2cb1360SIngo Molnar 22f2cb1360SIngo Molnar return 0; 23f2cb1360SIngo Molnar } 24f2cb1360SIngo Molnar early_param("sched_debug", sched_debug_setup); 25f2cb1360SIngo Molnar 26f2cb1360SIngo Molnar static inline bool sched_debug(void) 27f2cb1360SIngo Molnar { 28f2cb1360SIngo Molnar return sched_debug_enabled; 29f2cb1360SIngo Molnar } 30f2cb1360SIngo Molnar 31f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 32f2cb1360SIngo Molnar struct cpumask *groupmask) 33f2cb1360SIngo Molnar { 34f2cb1360SIngo Molnar struct sched_group *group = sd->groups; 35f2cb1360SIngo Molnar 36f2cb1360SIngo Molnar cpumask_clear(groupmask); 37f2cb1360SIngo Molnar 38f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 39f2cb1360SIngo Molnar 40f2cb1360SIngo Molnar if (!(sd->flags & SD_LOAD_BALANCE)) { 41f2cb1360SIngo Molnar printk("does not load-balance\n"); 42f2cb1360SIngo Molnar if (sd->parent) 43f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 44f2cb1360SIngo Molnar " has parent"); 45f2cb1360SIngo Molnar return -1; 46f2cb1360SIngo Molnar } 47f2cb1360SIngo Molnar 48f2cb1360SIngo Molnar printk(KERN_CONT "span %*pbl level %s\n", 49f2cb1360SIngo Molnar cpumask_pr_args(sched_domain_span(sd)), sd->name); 50f2cb1360SIngo Molnar 51f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 52f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: domain->span does not contain " 53f2cb1360SIngo Molnar "CPU%d\n", cpu); 54f2cb1360SIngo Molnar } 55f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 56f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: domain->groups does not contain" 57f2cb1360SIngo Molnar " CPU%d\n", cpu); 58f2cb1360SIngo Molnar } 59f2cb1360SIngo Molnar 60f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s groups:", level + 1, ""); 61f2cb1360SIngo Molnar do { 62f2cb1360SIngo Molnar if (!group) { 63f2cb1360SIngo Molnar printk("\n"); 64f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: group is NULL\n"); 65f2cb1360SIngo Molnar break; 66f2cb1360SIngo Molnar } 67f2cb1360SIngo Molnar 68f2cb1360SIngo Molnar if (!cpumask_weight(sched_group_cpus(group))) { 69f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 70f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: empty group\n"); 71f2cb1360SIngo Molnar break; 72f2cb1360SIngo Molnar } 73f2cb1360SIngo Molnar 74f2cb1360SIngo Molnar if (!(sd->flags & SD_OVERLAP) && 75f2cb1360SIngo Molnar cpumask_intersects(groupmask, sched_group_cpus(group))) { 76f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 77f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: repeated CPUs\n"); 78f2cb1360SIngo Molnar break; 79f2cb1360SIngo Molnar } 80f2cb1360SIngo Molnar 81f2cb1360SIngo Molnar cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 82f2cb1360SIngo Molnar 83f2cb1360SIngo Molnar printk(KERN_CONT " %*pbl", 84f2cb1360SIngo Molnar cpumask_pr_args(sched_group_cpus(group))); 85*b0151c25SPeter Zijlstra 86*b0151c25SPeter Zijlstra if ((sd->flags & SD_OVERLAP) && !cpumask_full(sched_group_mask(group))) { 87*b0151c25SPeter Zijlstra printk(KERN_CONT " (mask: %*pbl)", 88*b0151c25SPeter Zijlstra cpumask_pr_args(sched_group_mask(group))); 89*b0151c25SPeter Zijlstra } 90*b0151c25SPeter Zijlstra 91f2cb1360SIngo Molnar if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 92*b0151c25SPeter Zijlstra printk(KERN_CONT " (cpu_capacity: %lu)", 93f2cb1360SIngo Molnar group->sgc->capacity); 94f2cb1360SIngo Molnar } 95f2cb1360SIngo Molnar 96f2cb1360SIngo Molnar group = group->next; 97*b0151c25SPeter Zijlstra 98*b0151c25SPeter Zijlstra if (group != sd->groups) 99*b0151c25SPeter Zijlstra printk(KERN_CONT ","); 100*b0151c25SPeter Zijlstra 101f2cb1360SIngo Molnar } while (group != sd->groups); 102f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 103f2cb1360SIngo Molnar 104f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), groupmask)) 105f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 106f2cb1360SIngo Molnar 107f2cb1360SIngo Molnar if (sd->parent && 108f2cb1360SIngo Molnar !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 109f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: parent span is not a superset " 110f2cb1360SIngo Molnar "of domain->span\n"); 111f2cb1360SIngo Molnar return 0; 112f2cb1360SIngo Molnar } 113f2cb1360SIngo Molnar 114f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu) 115f2cb1360SIngo Molnar { 116f2cb1360SIngo Molnar int level = 0; 117f2cb1360SIngo Molnar 118f2cb1360SIngo Molnar if (!sched_debug_enabled) 119f2cb1360SIngo Molnar return; 120f2cb1360SIngo Molnar 121f2cb1360SIngo Molnar if (!sd) { 122f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 123f2cb1360SIngo Molnar return; 124f2cb1360SIngo Molnar } 125f2cb1360SIngo Molnar 126f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 127f2cb1360SIngo Molnar 128f2cb1360SIngo Molnar for (;;) { 129f2cb1360SIngo Molnar if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 130f2cb1360SIngo Molnar break; 131f2cb1360SIngo Molnar level++; 132f2cb1360SIngo Molnar sd = sd->parent; 133f2cb1360SIngo Molnar if (!sd) 134f2cb1360SIngo Molnar break; 135f2cb1360SIngo Molnar } 136f2cb1360SIngo Molnar } 137f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */ 138f2cb1360SIngo Molnar 139f2cb1360SIngo Molnar # define sched_debug_enabled 0 140f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0) 141f2cb1360SIngo Molnar static inline bool sched_debug(void) 142f2cb1360SIngo Molnar { 143f2cb1360SIngo Molnar return false; 144f2cb1360SIngo Molnar } 145f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */ 146f2cb1360SIngo Molnar 147f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd) 148f2cb1360SIngo Molnar { 149f2cb1360SIngo Molnar if (cpumask_weight(sched_domain_span(sd)) == 1) 150f2cb1360SIngo Molnar return 1; 151f2cb1360SIngo Molnar 152f2cb1360SIngo Molnar /* Following flags need at least 2 groups */ 153f2cb1360SIngo Molnar if (sd->flags & (SD_LOAD_BALANCE | 154f2cb1360SIngo Molnar SD_BALANCE_NEWIDLE | 155f2cb1360SIngo Molnar SD_BALANCE_FORK | 156f2cb1360SIngo Molnar SD_BALANCE_EXEC | 157f2cb1360SIngo Molnar SD_SHARE_CPUCAPACITY | 158f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | 159f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | 160f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN)) { 161f2cb1360SIngo Molnar if (sd->groups != sd->groups->next) 162f2cb1360SIngo Molnar return 0; 163f2cb1360SIngo Molnar } 164f2cb1360SIngo Molnar 165f2cb1360SIngo Molnar /* Following flags don't use groups */ 166f2cb1360SIngo Molnar if (sd->flags & (SD_WAKE_AFFINE)) 167f2cb1360SIngo Molnar return 0; 168f2cb1360SIngo Molnar 169f2cb1360SIngo Molnar return 1; 170f2cb1360SIngo Molnar } 171f2cb1360SIngo Molnar 172f2cb1360SIngo Molnar static int 173f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 174f2cb1360SIngo Molnar { 175f2cb1360SIngo Molnar unsigned long cflags = sd->flags, pflags = parent->flags; 176f2cb1360SIngo Molnar 177f2cb1360SIngo Molnar if (sd_degenerate(parent)) 178f2cb1360SIngo Molnar return 1; 179f2cb1360SIngo Molnar 180f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 181f2cb1360SIngo Molnar return 0; 182f2cb1360SIngo Molnar 183f2cb1360SIngo Molnar /* Flags needing groups don't count if only 1 group in parent */ 184f2cb1360SIngo Molnar if (parent->groups == parent->groups->next) { 185f2cb1360SIngo Molnar pflags &= ~(SD_LOAD_BALANCE | 186f2cb1360SIngo Molnar SD_BALANCE_NEWIDLE | 187f2cb1360SIngo Molnar SD_BALANCE_FORK | 188f2cb1360SIngo Molnar SD_BALANCE_EXEC | 189f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | 190f2cb1360SIngo Molnar SD_SHARE_CPUCAPACITY | 191f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | 192f2cb1360SIngo Molnar SD_PREFER_SIBLING | 193f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN); 194f2cb1360SIngo Molnar if (nr_node_ids == 1) 195f2cb1360SIngo Molnar pflags &= ~SD_SERIALIZE; 196f2cb1360SIngo Molnar } 197f2cb1360SIngo Molnar if (~cflags & pflags) 198f2cb1360SIngo Molnar return 0; 199f2cb1360SIngo Molnar 200f2cb1360SIngo Molnar return 1; 201f2cb1360SIngo Molnar } 202f2cb1360SIngo Molnar 203f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu) 204f2cb1360SIngo Molnar { 205f2cb1360SIngo Molnar struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 206f2cb1360SIngo Molnar 207f2cb1360SIngo Molnar cpupri_cleanup(&rd->cpupri); 208f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 209f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 210f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 211f2cb1360SIngo Molnar free_cpumask_var(rd->online); 212f2cb1360SIngo Molnar free_cpumask_var(rd->span); 213f2cb1360SIngo Molnar kfree(rd); 214f2cb1360SIngo Molnar } 215f2cb1360SIngo Molnar 216f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd) 217f2cb1360SIngo Molnar { 218f2cb1360SIngo Molnar struct root_domain *old_rd = NULL; 219f2cb1360SIngo Molnar unsigned long flags; 220f2cb1360SIngo Molnar 221f2cb1360SIngo Molnar raw_spin_lock_irqsave(&rq->lock, flags); 222f2cb1360SIngo Molnar 223f2cb1360SIngo Molnar if (rq->rd) { 224f2cb1360SIngo Molnar old_rd = rq->rd; 225f2cb1360SIngo Molnar 226f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, old_rd->online)) 227f2cb1360SIngo Molnar set_rq_offline(rq); 228f2cb1360SIngo Molnar 229f2cb1360SIngo Molnar cpumask_clear_cpu(rq->cpu, old_rd->span); 230f2cb1360SIngo Molnar 231f2cb1360SIngo Molnar /* 232f2cb1360SIngo Molnar * If we dont want to free the old_rd yet then 233f2cb1360SIngo Molnar * set old_rd to NULL to skip the freeing later 234f2cb1360SIngo Molnar * in this function: 235f2cb1360SIngo Molnar */ 236f2cb1360SIngo Molnar if (!atomic_dec_and_test(&old_rd->refcount)) 237f2cb1360SIngo Molnar old_rd = NULL; 238f2cb1360SIngo Molnar } 239f2cb1360SIngo Molnar 240f2cb1360SIngo Molnar atomic_inc(&rd->refcount); 241f2cb1360SIngo Molnar rq->rd = rd; 242f2cb1360SIngo Molnar 243f2cb1360SIngo Molnar cpumask_set_cpu(rq->cpu, rd->span); 244f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 245f2cb1360SIngo Molnar set_rq_online(rq); 246f2cb1360SIngo Molnar 247f2cb1360SIngo Molnar raw_spin_unlock_irqrestore(&rq->lock, flags); 248f2cb1360SIngo Molnar 249f2cb1360SIngo Molnar if (old_rd) 250f2cb1360SIngo Molnar call_rcu_sched(&old_rd->rcu, free_rootdomain); 251f2cb1360SIngo Molnar } 252f2cb1360SIngo Molnar 253f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd) 254f2cb1360SIngo Molnar { 255f2cb1360SIngo Molnar memset(rd, 0, sizeof(*rd)); 256f2cb1360SIngo Molnar 257f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 258f2cb1360SIngo Molnar goto out; 259f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 260f2cb1360SIngo Molnar goto free_span; 261f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 262f2cb1360SIngo Molnar goto free_online; 263f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 264f2cb1360SIngo Molnar goto free_dlo_mask; 265f2cb1360SIngo Molnar 266f2cb1360SIngo Molnar init_dl_bw(&rd->dl_bw); 267f2cb1360SIngo Molnar if (cpudl_init(&rd->cpudl) != 0) 268f2cb1360SIngo Molnar goto free_rto_mask; 269f2cb1360SIngo Molnar 270f2cb1360SIngo Molnar if (cpupri_init(&rd->cpupri) != 0) 271f2cb1360SIngo Molnar goto free_cpudl; 272f2cb1360SIngo Molnar return 0; 273f2cb1360SIngo Molnar 274f2cb1360SIngo Molnar free_cpudl: 275f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 276f2cb1360SIngo Molnar free_rto_mask: 277f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 278f2cb1360SIngo Molnar free_dlo_mask: 279f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 280f2cb1360SIngo Molnar free_online: 281f2cb1360SIngo Molnar free_cpumask_var(rd->online); 282f2cb1360SIngo Molnar free_span: 283f2cb1360SIngo Molnar free_cpumask_var(rd->span); 284f2cb1360SIngo Molnar out: 285f2cb1360SIngo Molnar return -ENOMEM; 286f2cb1360SIngo Molnar } 287f2cb1360SIngo Molnar 288f2cb1360SIngo Molnar /* 289f2cb1360SIngo Molnar * By default the system creates a single root-domain with all CPUs as 290f2cb1360SIngo Molnar * members (mimicking the global state we have today). 291f2cb1360SIngo Molnar */ 292f2cb1360SIngo Molnar struct root_domain def_root_domain; 293f2cb1360SIngo Molnar 294f2cb1360SIngo Molnar void init_defrootdomain(void) 295f2cb1360SIngo Molnar { 296f2cb1360SIngo Molnar init_rootdomain(&def_root_domain); 297f2cb1360SIngo Molnar 298f2cb1360SIngo Molnar atomic_set(&def_root_domain.refcount, 1); 299f2cb1360SIngo Molnar } 300f2cb1360SIngo Molnar 301f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void) 302f2cb1360SIngo Molnar { 303f2cb1360SIngo Molnar struct root_domain *rd; 304f2cb1360SIngo Molnar 305f2cb1360SIngo Molnar rd = kmalloc(sizeof(*rd), GFP_KERNEL); 306f2cb1360SIngo Molnar if (!rd) 307f2cb1360SIngo Molnar return NULL; 308f2cb1360SIngo Molnar 309f2cb1360SIngo Molnar if (init_rootdomain(rd) != 0) { 310f2cb1360SIngo Molnar kfree(rd); 311f2cb1360SIngo Molnar return NULL; 312f2cb1360SIngo Molnar } 313f2cb1360SIngo Molnar 314f2cb1360SIngo Molnar return rd; 315f2cb1360SIngo Molnar } 316f2cb1360SIngo Molnar 317f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc) 318f2cb1360SIngo Molnar { 319f2cb1360SIngo Molnar struct sched_group *tmp, *first; 320f2cb1360SIngo Molnar 321f2cb1360SIngo Molnar if (!sg) 322f2cb1360SIngo Molnar return; 323f2cb1360SIngo Molnar 324f2cb1360SIngo Molnar first = sg; 325f2cb1360SIngo Molnar do { 326f2cb1360SIngo Molnar tmp = sg->next; 327f2cb1360SIngo Molnar 328f2cb1360SIngo Molnar if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 329f2cb1360SIngo Molnar kfree(sg->sgc); 330f2cb1360SIngo Molnar 331f2cb1360SIngo Molnar kfree(sg); 332f2cb1360SIngo Molnar sg = tmp; 333f2cb1360SIngo Molnar } while (sg != first); 334f2cb1360SIngo Molnar } 335f2cb1360SIngo Molnar 336f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd) 337f2cb1360SIngo Molnar { 338f2cb1360SIngo Molnar /* 339f2cb1360SIngo Molnar * If its an overlapping domain it has private groups, iterate and 340f2cb1360SIngo Molnar * nuke them all. 341f2cb1360SIngo Molnar */ 342f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 343f2cb1360SIngo Molnar free_sched_groups(sd->groups, 1); 344f2cb1360SIngo Molnar } else if (atomic_dec_and_test(&sd->groups->ref)) { 345f2cb1360SIngo Molnar kfree(sd->groups->sgc); 346f2cb1360SIngo Molnar kfree(sd->groups); 347f2cb1360SIngo Molnar } 348f2cb1360SIngo Molnar if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 349f2cb1360SIngo Molnar kfree(sd->shared); 350f2cb1360SIngo Molnar kfree(sd); 351f2cb1360SIngo Molnar } 352f2cb1360SIngo Molnar 353f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu) 354f2cb1360SIngo Molnar { 355f2cb1360SIngo Molnar struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 356f2cb1360SIngo Molnar 357f2cb1360SIngo Molnar while (sd) { 358f2cb1360SIngo Molnar struct sched_domain *parent = sd->parent; 359f2cb1360SIngo Molnar destroy_sched_domain(sd); 360f2cb1360SIngo Molnar sd = parent; 361f2cb1360SIngo Molnar } 362f2cb1360SIngo Molnar } 363f2cb1360SIngo Molnar 364f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd) 365f2cb1360SIngo Molnar { 366f2cb1360SIngo Molnar if (sd) 367f2cb1360SIngo Molnar call_rcu(&sd->rcu, destroy_sched_domains_rcu); 368f2cb1360SIngo Molnar } 369f2cb1360SIngo Molnar 370f2cb1360SIngo Molnar /* 371f2cb1360SIngo Molnar * Keep a special pointer to the highest sched_domain that has 372f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 373f2cb1360SIngo Molnar * allows us to avoid some pointer chasing select_idle_sibling(). 374f2cb1360SIngo Molnar * 375f2cb1360SIngo Molnar * Also keep a unique ID per domain (we use the first CPU number in 376f2cb1360SIngo Molnar * the cpumask of the domain), this allows us to quickly tell if 377f2cb1360SIngo Molnar * two CPUs are in the same cache domain, see cpus_share_cache(). 378f2cb1360SIngo Molnar */ 379f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_llc); 380f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size); 381f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id); 382f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 383f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_numa); 384f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_asym); 385f2cb1360SIngo Molnar 386f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu) 387f2cb1360SIngo Molnar { 388f2cb1360SIngo Molnar struct sched_domain_shared *sds = NULL; 389f2cb1360SIngo Molnar struct sched_domain *sd; 390f2cb1360SIngo Molnar int id = cpu; 391f2cb1360SIngo Molnar int size = 1; 392f2cb1360SIngo Molnar 393f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 394f2cb1360SIngo Molnar if (sd) { 395f2cb1360SIngo Molnar id = cpumask_first(sched_domain_span(sd)); 396f2cb1360SIngo Molnar size = cpumask_weight(sched_domain_span(sd)); 397f2cb1360SIngo Molnar sds = sd->shared; 398f2cb1360SIngo Molnar } 399f2cb1360SIngo Molnar 400f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 401f2cb1360SIngo Molnar per_cpu(sd_llc_size, cpu) = size; 402f2cb1360SIngo Molnar per_cpu(sd_llc_id, cpu) = id; 403f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 404f2cb1360SIngo Molnar 405f2cb1360SIngo Molnar sd = lowest_flag_domain(cpu, SD_NUMA); 406f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 407f2cb1360SIngo Molnar 408f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 409f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 410f2cb1360SIngo Molnar } 411f2cb1360SIngo Molnar 412f2cb1360SIngo Molnar /* 413f2cb1360SIngo Molnar * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 414f2cb1360SIngo Molnar * hold the hotplug lock. 415f2cb1360SIngo Molnar */ 416f2cb1360SIngo Molnar static void 417f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 418f2cb1360SIngo Molnar { 419f2cb1360SIngo Molnar struct rq *rq = cpu_rq(cpu); 420f2cb1360SIngo Molnar struct sched_domain *tmp; 421f2cb1360SIngo Molnar 422f2cb1360SIngo Molnar /* Remove the sched domains which do not contribute to scheduling. */ 423f2cb1360SIngo Molnar for (tmp = sd; tmp; ) { 424f2cb1360SIngo Molnar struct sched_domain *parent = tmp->parent; 425f2cb1360SIngo Molnar if (!parent) 426f2cb1360SIngo Molnar break; 427f2cb1360SIngo Molnar 428f2cb1360SIngo Molnar if (sd_parent_degenerate(tmp, parent)) { 429f2cb1360SIngo Molnar tmp->parent = parent->parent; 430f2cb1360SIngo Molnar if (parent->parent) 431f2cb1360SIngo Molnar parent->parent->child = tmp; 432f2cb1360SIngo Molnar /* 433f2cb1360SIngo Molnar * Transfer SD_PREFER_SIBLING down in case of a 434f2cb1360SIngo Molnar * degenerate parent; the spans match for this 435f2cb1360SIngo Molnar * so the property transfers. 436f2cb1360SIngo Molnar */ 437f2cb1360SIngo Molnar if (parent->flags & SD_PREFER_SIBLING) 438f2cb1360SIngo Molnar tmp->flags |= SD_PREFER_SIBLING; 439f2cb1360SIngo Molnar destroy_sched_domain(parent); 440f2cb1360SIngo Molnar } else 441f2cb1360SIngo Molnar tmp = tmp->parent; 442f2cb1360SIngo Molnar } 443f2cb1360SIngo Molnar 444f2cb1360SIngo Molnar if (sd && sd_degenerate(sd)) { 445f2cb1360SIngo Molnar tmp = sd; 446f2cb1360SIngo Molnar sd = sd->parent; 447f2cb1360SIngo Molnar destroy_sched_domain(tmp); 448f2cb1360SIngo Molnar if (sd) 449f2cb1360SIngo Molnar sd->child = NULL; 450f2cb1360SIngo Molnar } 451f2cb1360SIngo Molnar 452f2cb1360SIngo Molnar sched_domain_debug(sd, cpu); 453f2cb1360SIngo Molnar 454f2cb1360SIngo Molnar rq_attach_root(rq, rd); 455f2cb1360SIngo Molnar tmp = rq->sd; 456f2cb1360SIngo Molnar rcu_assign_pointer(rq->sd, sd); 457f2cb1360SIngo Molnar destroy_sched_domains(tmp); 458f2cb1360SIngo Molnar 459f2cb1360SIngo Molnar update_top_cache_domain(cpu); 460f2cb1360SIngo Molnar } 461f2cb1360SIngo Molnar 462f2cb1360SIngo Molnar /* Setup the mask of CPUs configured for isolated domains */ 463f2cb1360SIngo Molnar static int __init isolated_cpu_setup(char *str) 464f2cb1360SIngo Molnar { 465f2cb1360SIngo Molnar int ret; 466f2cb1360SIngo Molnar 467f2cb1360SIngo Molnar alloc_bootmem_cpumask_var(&cpu_isolated_map); 468f2cb1360SIngo Molnar ret = cpulist_parse(str, cpu_isolated_map); 469f2cb1360SIngo Molnar if (ret) { 470f2cb1360SIngo Molnar pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); 471f2cb1360SIngo Molnar return 0; 472f2cb1360SIngo Molnar } 473f2cb1360SIngo Molnar return 1; 474f2cb1360SIngo Molnar } 475f2cb1360SIngo Molnar __setup("isolcpus=", isolated_cpu_setup); 476f2cb1360SIngo Molnar 477f2cb1360SIngo Molnar struct s_data { 478f2cb1360SIngo Molnar struct sched_domain ** __percpu sd; 479f2cb1360SIngo Molnar struct root_domain *rd; 480f2cb1360SIngo Molnar }; 481f2cb1360SIngo Molnar 482f2cb1360SIngo Molnar enum s_alloc { 483f2cb1360SIngo Molnar sa_rootdomain, 484f2cb1360SIngo Molnar sa_sd, 485f2cb1360SIngo Molnar sa_sd_storage, 486f2cb1360SIngo Molnar sa_none, 487f2cb1360SIngo Molnar }; 488f2cb1360SIngo Molnar 489f2cb1360SIngo Molnar /* 490f2cb1360SIngo Molnar * Build an iteration mask that can exclude certain CPUs from the upwards 491f2cb1360SIngo Molnar * domain traversal. 492f2cb1360SIngo Molnar * 493f2cb1360SIngo Molnar * Asymmetric node setups can result in situations where the domain tree is of 494f2cb1360SIngo Molnar * unequal depth, make sure to skip domains that already cover the entire 495f2cb1360SIngo Molnar * range. 496f2cb1360SIngo Molnar * 497f2cb1360SIngo Molnar * In that case build_sched_domains() will have terminated the iteration early 498f2cb1360SIngo Molnar * and our sibling sd spans will be empty. Domains should always include the 499f2cb1360SIngo Molnar * CPU they're built on, so check that. 500f2cb1360SIngo Molnar */ 501f2cb1360SIngo Molnar static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 502f2cb1360SIngo Molnar { 503f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 504f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 505f2cb1360SIngo Molnar struct sched_domain *sibling; 506f2cb1360SIngo Molnar int i; 507f2cb1360SIngo Molnar 508f2cb1360SIngo Molnar for_each_cpu(i, span) { 509f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 510f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 511f2cb1360SIngo Molnar continue; 512f2cb1360SIngo Molnar 513f2cb1360SIngo Molnar cpumask_set_cpu(i, sched_group_mask(sg)); 514f2cb1360SIngo Molnar } 515f2cb1360SIngo Molnar } 516f2cb1360SIngo Molnar 517f2cb1360SIngo Molnar /* 518f2cb1360SIngo Molnar * Return the canonical balance CPU for this group, this is the first CPU 519f2cb1360SIngo Molnar * of this group that's also in the iteration mask. 520f2cb1360SIngo Molnar */ 521f2cb1360SIngo Molnar int group_balance_cpu(struct sched_group *sg) 522f2cb1360SIngo Molnar { 523f2cb1360SIngo Molnar return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 524f2cb1360SIngo Molnar } 525f2cb1360SIngo Molnar 5268c033469SLauro Ramos Venancio static struct sched_group * 5278c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 5288c033469SLauro Ramos Venancio { 5298c033469SLauro Ramos Venancio struct sched_group *sg; 5308c033469SLauro Ramos Venancio struct cpumask *sg_span; 5318c033469SLauro Ramos Venancio 5328c033469SLauro Ramos Venancio sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 5338c033469SLauro Ramos Venancio GFP_KERNEL, cpu_to_node(cpu)); 5348c033469SLauro Ramos Venancio 5358c033469SLauro Ramos Venancio if (!sg) 5368c033469SLauro Ramos Venancio return NULL; 5378c033469SLauro Ramos Venancio 5388c033469SLauro Ramos Venancio sg_span = sched_group_cpus(sg); 5398c033469SLauro Ramos Venancio if (sd->child) 5408c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd->child)); 5418c033469SLauro Ramos Venancio else 5428c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd)); 5438c033469SLauro Ramos Venancio 5448c033469SLauro Ramos Venancio return sg; 5458c033469SLauro Ramos Venancio } 5468c033469SLauro Ramos Venancio 5478c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd, 5488c033469SLauro Ramos Venancio struct sched_group *sg, int cpu) 5498c033469SLauro Ramos Venancio { 5508c033469SLauro Ramos Venancio struct sd_data *sdd = sd->private; 5518c033469SLauro Ramos Venancio struct cpumask *sg_span; 5528c033469SLauro Ramos Venancio 5538c033469SLauro Ramos Venancio sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 5548c033469SLauro Ramos Venancio if (atomic_inc_return(&sg->sgc->ref) == 1) 5558c033469SLauro Ramos Venancio build_group_mask(sd, sg); 5568c033469SLauro Ramos Venancio 5578c033469SLauro Ramos Venancio /* 5588c033469SLauro Ramos Venancio * Initialize sgc->capacity such that even if we mess up the 5598c033469SLauro Ramos Venancio * domains and no possible iteration will get us here, we won't 5608c033469SLauro Ramos Venancio * die on a /0 trap. 5618c033469SLauro Ramos Venancio */ 5628c033469SLauro Ramos Venancio sg_span = sched_group_cpus(sg); 5638c033469SLauro Ramos Venancio sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 5648c033469SLauro Ramos Venancio sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 5658c033469SLauro Ramos Venancio } 5668c033469SLauro Ramos Venancio 567f2cb1360SIngo Molnar static int 568f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu) 569f2cb1360SIngo Molnar { 57091eaed0dSPeter Zijlstra struct sched_group *first = NULL, *last = NULL, *sg; 571f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 572f2cb1360SIngo Molnar struct cpumask *covered = sched_domains_tmpmask; 573f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 574f2cb1360SIngo Molnar struct sched_domain *sibling; 575f2cb1360SIngo Molnar int i; 576f2cb1360SIngo Molnar 577f2cb1360SIngo Molnar cpumask_clear(covered); 578f2cb1360SIngo Molnar 5790372dd27SPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 580f2cb1360SIngo Molnar struct cpumask *sg_span; 581f2cb1360SIngo Molnar 582f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 583f2cb1360SIngo Molnar continue; 584f2cb1360SIngo Molnar 585f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 586f2cb1360SIngo Molnar 587f2cb1360SIngo Molnar /* See the comment near build_group_mask(). */ 588f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 589f2cb1360SIngo Molnar continue; 590f2cb1360SIngo Molnar 5918c033469SLauro Ramos Venancio sg = build_group_from_child_sched_domain(sibling, cpu); 592f2cb1360SIngo Molnar if (!sg) 593f2cb1360SIngo Molnar goto fail; 594f2cb1360SIngo Molnar 595f2cb1360SIngo Molnar sg_span = sched_group_cpus(sg); 596f2cb1360SIngo Molnar cpumask_or(covered, covered, sg_span); 597f2cb1360SIngo Molnar 5988c033469SLauro Ramos Venancio init_overlap_sched_group(sd, sg, i); 599f2cb1360SIngo Molnar 600f2cb1360SIngo Molnar if (!first) 601f2cb1360SIngo Molnar first = sg; 602f2cb1360SIngo Molnar if (last) 603f2cb1360SIngo Molnar last->next = sg; 604f2cb1360SIngo Molnar last = sg; 605f2cb1360SIngo Molnar last->next = first; 606f2cb1360SIngo Molnar } 60791eaed0dSPeter Zijlstra sd->groups = first; 608f2cb1360SIngo Molnar 609f2cb1360SIngo Molnar return 0; 610f2cb1360SIngo Molnar 611f2cb1360SIngo Molnar fail: 612f2cb1360SIngo Molnar free_sched_groups(first, 0); 613f2cb1360SIngo Molnar 614f2cb1360SIngo Molnar return -ENOMEM; 615f2cb1360SIngo Molnar } 616f2cb1360SIngo Molnar 617f2cb1360SIngo Molnar static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 618f2cb1360SIngo Molnar { 619f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 620f2cb1360SIngo Molnar struct sched_domain *child = sd->child; 621f2cb1360SIngo Molnar 622f2cb1360SIngo Molnar if (child) 623f2cb1360SIngo Molnar cpu = cpumask_first(sched_domain_span(child)); 624f2cb1360SIngo Molnar 625f2cb1360SIngo Molnar if (sg) { 626f2cb1360SIngo Molnar *sg = *per_cpu_ptr(sdd->sg, cpu); 627f2cb1360SIngo Molnar (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 628f2cb1360SIngo Molnar 629f2cb1360SIngo Molnar /* For claim_allocations: */ 630f2cb1360SIngo Molnar atomic_set(&(*sg)->sgc->ref, 1); 631f2cb1360SIngo Molnar } 632f2cb1360SIngo Molnar 633f2cb1360SIngo Molnar return cpu; 634f2cb1360SIngo Molnar } 635f2cb1360SIngo Molnar 636f2cb1360SIngo Molnar /* 637f2cb1360SIngo Molnar * build_sched_groups will build a circular linked list of the groups 638f2cb1360SIngo Molnar * covered by the given span, and will set each group's ->cpumask correctly, 639f2cb1360SIngo Molnar * and ->cpu_capacity to 0. 640f2cb1360SIngo Molnar * 641f2cb1360SIngo Molnar * Assumes the sched_domain tree is fully constructed 642f2cb1360SIngo Molnar */ 643f2cb1360SIngo Molnar static int 644f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu) 645f2cb1360SIngo Molnar { 646f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL; 647f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 648f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 649f2cb1360SIngo Molnar struct cpumask *covered; 650f2cb1360SIngo Molnar int i; 651f2cb1360SIngo Molnar 652f2cb1360SIngo Molnar get_group(cpu, sdd, &sd->groups); 653f2cb1360SIngo Molnar atomic_inc(&sd->groups->ref); 654f2cb1360SIngo Molnar 655f2cb1360SIngo Molnar if (cpu != cpumask_first(span)) 656f2cb1360SIngo Molnar return 0; 657f2cb1360SIngo Molnar 658f2cb1360SIngo Molnar lockdep_assert_held(&sched_domains_mutex); 659f2cb1360SIngo Molnar covered = sched_domains_tmpmask; 660f2cb1360SIngo Molnar 661f2cb1360SIngo Molnar cpumask_clear(covered); 662f2cb1360SIngo Molnar 663f2cb1360SIngo Molnar for_each_cpu(i, span) { 664f2cb1360SIngo Molnar struct sched_group *sg; 665f2cb1360SIngo Molnar int group, j; 666f2cb1360SIngo Molnar 667f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 668f2cb1360SIngo Molnar continue; 669f2cb1360SIngo Molnar 670f2cb1360SIngo Molnar group = get_group(i, sdd, &sg); 671f2cb1360SIngo Molnar cpumask_setall(sched_group_mask(sg)); 672f2cb1360SIngo Molnar 673f2cb1360SIngo Molnar for_each_cpu(j, span) { 674f2cb1360SIngo Molnar if (get_group(j, sdd, NULL) != group) 675f2cb1360SIngo Molnar continue; 676f2cb1360SIngo Molnar 677f2cb1360SIngo Molnar cpumask_set_cpu(j, covered); 678f2cb1360SIngo Molnar cpumask_set_cpu(j, sched_group_cpus(sg)); 679f2cb1360SIngo Molnar } 680f2cb1360SIngo Molnar 681f2cb1360SIngo Molnar if (!first) 682f2cb1360SIngo Molnar first = sg; 683f2cb1360SIngo Molnar if (last) 684f2cb1360SIngo Molnar last->next = sg; 685f2cb1360SIngo Molnar last = sg; 686f2cb1360SIngo Molnar } 687f2cb1360SIngo Molnar last->next = first; 688f2cb1360SIngo Molnar 689f2cb1360SIngo Molnar return 0; 690f2cb1360SIngo Molnar } 691f2cb1360SIngo Molnar 692f2cb1360SIngo Molnar /* 693f2cb1360SIngo Molnar * Initialize sched groups cpu_capacity. 694f2cb1360SIngo Molnar * 695f2cb1360SIngo Molnar * cpu_capacity indicates the capacity of sched group, which is used while 696f2cb1360SIngo Molnar * distributing the load between different sched groups in a sched domain. 697f2cb1360SIngo Molnar * Typically cpu_capacity for all the groups in a sched domain will be same 698f2cb1360SIngo Molnar * unless there are asymmetries in the topology. If there are asymmetries, 699f2cb1360SIngo Molnar * group having more cpu_capacity will pickup more load compared to the 700f2cb1360SIngo Molnar * group having less cpu_capacity. 701f2cb1360SIngo Molnar */ 702f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 703f2cb1360SIngo Molnar { 704f2cb1360SIngo Molnar struct sched_group *sg = sd->groups; 705f2cb1360SIngo Molnar 706f2cb1360SIngo Molnar WARN_ON(!sg); 707f2cb1360SIngo Molnar 708f2cb1360SIngo Molnar do { 709f2cb1360SIngo Molnar int cpu, max_cpu = -1; 710f2cb1360SIngo Molnar 711f2cb1360SIngo Molnar sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 712f2cb1360SIngo Molnar 713f2cb1360SIngo Molnar if (!(sd->flags & SD_ASYM_PACKING)) 714f2cb1360SIngo Molnar goto next; 715f2cb1360SIngo Molnar 716f2cb1360SIngo Molnar for_each_cpu(cpu, sched_group_cpus(sg)) { 717f2cb1360SIngo Molnar if (max_cpu < 0) 718f2cb1360SIngo Molnar max_cpu = cpu; 719f2cb1360SIngo Molnar else if (sched_asym_prefer(cpu, max_cpu)) 720f2cb1360SIngo Molnar max_cpu = cpu; 721f2cb1360SIngo Molnar } 722f2cb1360SIngo Molnar sg->asym_prefer_cpu = max_cpu; 723f2cb1360SIngo Molnar 724f2cb1360SIngo Molnar next: 725f2cb1360SIngo Molnar sg = sg->next; 726f2cb1360SIngo Molnar } while (sg != sd->groups); 727f2cb1360SIngo Molnar 728f2cb1360SIngo Molnar if (cpu != group_balance_cpu(sg)) 729f2cb1360SIngo Molnar return; 730f2cb1360SIngo Molnar 731f2cb1360SIngo Molnar update_group_capacity(sd, cpu); 732f2cb1360SIngo Molnar } 733f2cb1360SIngo Molnar 734f2cb1360SIngo Molnar /* 735f2cb1360SIngo Molnar * Initializers for schedule domains 736f2cb1360SIngo Molnar * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 737f2cb1360SIngo Molnar */ 738f2cb1360SIngo Molnar 739f2cb1360SIngo Molnar static int default_relax_domain_level = -1; 740f2cb1360SIngo Molnar int sched_domain_level_max; 741f2cb1360SIngo Molnar 742f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str) 743f2cb1360SIngo Molnar { 744f2cb1360SIngo Molnar if (kstrtoint(str, 0, &default_relax_domain_level)) 745f2cb1360SIngo Molnar pr_warn("Unable to set relax_domain_level\n"); 746f2cb1360SIngo Molnar 747f2cb1360SIngo Molnar return 1; 748f2cb1360SIngo Molnar } 749f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level); 750f2cb1360SIngo Molnar 751f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd, 752f2cb1360SIngo Molnar struct sched_domain_attr *attr) 753f2cb1360SIngo Molnar { 754f2cb1360SIngo Molnar int request; 755f2cb1360SIngo Molnar 756f2cb1360SIngo Molnar if (!attr || attr->relax_domain_level < 0) { 757f2cb1360SIngo Molnar if (default_relax_domain_level < 0) 758f2cb1360SIngo Molnar return; 759f2cb1360SIngo Molnar else 760f2cb1360SIngo Molnar request = default_relax_domain_level; 761f2cb1360SIngo Molnar } else 762f2cb1360SIngo Molnar request = attr->relax_domain_level; 763f2cb1360SIngo Molnar if (request < sd->level) { 764f2cb1360SIngo Molnar /* Turn off idle balance on this domain: */ 765f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 766f2cb1360SIngo Molnar } else { 767f2cb1360SIngo Molnar /* Turn on idle balance on this domain: */ 768f2cb1360SIngo Molnar sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 769f2cb1360SIngo Molnar } 770f2cb1360SIngo Molnar } 771f2cb1360SIngo Molnar 772f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map); 773f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map); 774f2cb1360SIngo Molnar 775f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 776f2cb1360SIngo Molnar const struct cpumask *cpu_map) 777f2cb1360SIngo Molnar { 778f2cb1360SIngo Molnar switch (what) { 779f2cb1360SIngo Molnar case sa_rootdomain: 780f2cb1360SIngo Molnar if (!atomic_read(&d->rd->refcount)) 781f2cb1360SIngo Molnar free_rootdomain(&d->rd->rcu); 782f2cb1360SIngo Molnar /* Fall through */ 783f2cb1360SIngo Molnar case sa_sd: 784f2cb1360SIngo Molnar free_percpu(d->sd); 785f2cb1360SIngo Molnar /* Fall through */ 786f2cb1360SIngo Molnar case sa_sd_storage: 787f2cb1360SIngo Molnar __sdt_free(cpu_map); 788f2cb1360SIngo Molnar /* Fall through */ 789f2cb1360SIngo Molnar case sa_none: 790f2cb1360SIngo Molnar break; 791f2cb1360SIngo Molnar } 792f2cb1360SIngo Molnar } 793f2cb1360SIngo Molnar 794f2cb1360SIngo Molnar static enum s_alloc 795f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 796f2cb1360SIngo Molnar { 797f2cb1360SIngo Molnar memset(d, 0, sizeof(*d)); 798f2cb1360SIngo Molnar 799f2cb1360SIngo Molnar if (__sdt_alloc(cpu_map)) 800f2cb1360SIngo Molnar return sa_sd_storage; 801f2cb1360SIngo Molnar d->sd = alloc_percpu(struct sched_domain *); 802f2cb1360SIngo Molnar if (!d->sd) 803f2cb1360SIngo Molnar return sa_sd_storage; 804f2cb1360SIngo Molnar d->rd = alloc_rootdomain(); 805f2cb1360SIngo Molnar if (!d->rd) 806f2cb1360SIngo Molnar return sa_sd; 807f2cb1360SIngo Molnar return sa_rootdomain; 808f2cb1360SIngo Molnar } 809f2cb1360SIngo Molnar 810f2cb1360SIngo Molnar /* 811f2cb1360SIngo Molnar * NULL the sd_data elements we've used to build the sched_domain and 812f2cb1360SIngo Molnar * sched_group structure so that the subsequent __free_domain_allocs() 813f2cb1360SIngo Molnar * will not free the data we're using. 814f2cb1360SIngo Molnar */ 815f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd) 816f2cb1360SIngo Molnar { 817f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 818f2cb1360SIngo Molnar 819f2cb1360SIngo Molnar WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 820f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, cpu) = NULL; 821f2cb1360SIngo Molnar 822f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 823f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, cpu) = NULL; 824f2cb1360SIngo Molnar 825f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 826f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, cpu) = NULL; 827f2cb1360SIngo Molnar 828f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 829f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, cpu) = NULL; 830f2cb1360SIngo Molnar } 831f2cb1360SIngo Molnar 832f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 833f2cb1360SIngo Molnar static int sched_domains_numa_levels; 834f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type; 835f2cb1360SIngo Molnar static int *sched_domains_numa_distance; 836f2cb1360SIngo Molnar int sched_max_numa_distance; 837f2cb1360SIngo Molnar static struct cpumask ***sched_domains_numa_masks; 838f2cb1360SIngo Molnar static int sched_domains_curr_level; 839f2cb1360SIngo Molnar #endif 840f2cb1360SIngo Molnar 841f2cb1360SIngo Molnar /* 842f2cb1360SIngo Molnar * SD_flags allowed in topology descriptions. 843f2cb1360SIngo Molnar * 844f2cb1360SIngo Molnar * These flags are purely descriptive of the topology and do not prescribe 845f2cb1360SIngo Molnar * behaviour. Behaviour is artificial and mapped in the below sd_init() 846f2cb1360SIngo Molnar * function: 847f2cb1360SIngo Molnar * 848f2cb1360SIngo Molnar * SD_SHARE_CPUCAPACITY - describes SMT topologies 849f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCES - describes shared caches 850f2cb1360SIngo Molnar * SD_NUMA - describes NUMA topologies 851f2cb1360SIngo Molnar * SD_SHARE_POWERDOMAIN - describes shared power domain 852f2cb1360SIngo Molnar * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies 853f2cb1360SIngo Molnar * 854f2cb1360SIngo Molnar * Odd one out, which beside describing the topology has a quirk also 855f2cb1360SIngo Molnar * prescribes the desired behaviour that goes along with it: 856f2cb1360SIngo Molnar * 857f2cb1360SIngo Molnar * SD_ASYM_PACKING - describes SMT quirks 858f2cb1360SIngo Molnar */ 859f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS \ 860f2cb1360SIngo Molnar (SD_SHARE_CPUCAPACITY | \ 861f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | \ 862f2cb1360SIngo Molnar SD_NUMA | \ 863f2cb1360SIngo Molnar SD_ASYM_PACKING | \ 864f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | \ 865f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN) 866f2cb1360SIngo Molnar 867f2cb1360SIngo Molnar static struct sched_domain * 868f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl, 869f2cb1360SIngo Molnar const struct cpumask *cpu_map, 870f2cb1360SIngo Molnar struct sched_domain *child, int cpu) 871f2cb1360SIngo Molnar { 872f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 873f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 874f2cb1360SIngo Molnar int sd_id, sd_weight, sd_flags = 0; 875f2cb1360SIngo Molnar 876f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 877f2cb1360SIngo Molnar /* 878f2cb1360SIngo Molnar * Ugly hack to pass state to sd_numa_mask()... 879f2cb1360SIngo Molnar */ 880f2cb1360SIngo Molnar sched_domains_curr_level = tl->numa_level; 881f2cb1360SIngo Molnar #endif 882f2cb1360SIngo Molnar 883f2cb1360SIngo Molnar sd_weight = cpumask_weight(tl->mask(cpu)); 884f2cb1360SIngo Molnar 885f2cb1360SIngo Molnar if (tl->sd_flags) 886f2cb1360SIngo Molnar sd_flags = (*tl->sd_flags)(); 887f2cb1360SIngo Molnar if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 888f2cb1360SIngo Molnar "wrong sd_flags in topology description\n")) 889f2cb1360SIngo Molnar sd_flags &= ~TOPOLOGY_SD_FLAGS; 890f2cb1360SIngo Molnar 891f2cb1360SIngo Molnar *sd = (struct sched_domain){ 892f2cb1360SIngo Molnar .min_interval = sd_weight, 893f2cb1360SIngo Molnar .max_interval = 2*sd_weight, 894f2cb1360SIngo Molnar .busy_factor = 32, 895f2cb1360SIngo Molnar .imbalance_pct = 125, 896f2cb1360SIngo Molnar 897f2cb1360SIngo Molnar .cache_nice_tries = 0, 898f2cb1360SIngo Molnar .busy_idx = 0, 899f2cb1360SIngo Molnar .idle_idx = 0, 900f2cb1360SIngo Molnar .newidle_idx = 0, 901f2cb1360SIngo Molnar .wake_idx = 0, 902f2cb1360SIngo Molnar .forkexec_idx = 0, 903f2cb1360SIngo Molnar 904f2cb1360SIngo Molnar .flags = 1*SD_LOAD_BALANCE 905f2cb1360SIngo Molnar | 1*SD_BALANCE_NEWIDLE 906f2cb1360SIngo Molnar | 1*SD_BALANCE_EXEC 907f2cb1360SIngo Molnar | 1*SD_BALANCE_FORK 908f2cb1360SIngo Molnar | 0*SD_BALANCE_WAKE 909f2cb1360SIngo Molnar | 1*SD_WAKE_AFFINE 910f2cb1360SIngo Molnar | 0*SD_SHARE_CPUCAPACITY 911f2cb1360SIngo Molnar | 0*SD_SHARE_PKG_RESOURCES 912f2cb1360SIngo Molnar | 0*SD_SERIALIZE 913f2cb1360SIngo Molnar | 0*SD_PREFER_SIBLING 914f2cb1360SIngo Molnar | 0*SD_NUMA 915f2cb1360SIngo Molnar | sd_flags 916f2cb1360SIngo Molnar , 917f2cb1360SIngo Molnar 918f2cb1360SIngo Molnar .last_balance = jiffies, 919f2cb1360SIngo Molnar .balance_interval = sd_weight, 920f2cb1360SIngo Molnar .smt_gain = 0, 921f2cb1360SIngo Molnar .max_newidle_lb_cost = 0, 922f2cb1360SIngo Molnar .next_decay_max_lb_cost = jiffies, 923f2cb1360SIngo Molnar .child = child, 924f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 925f2cb1360SIngo Molnar .name = tl->name, 926f2cb1360SIngo Molnar #endif 927f2cb1360SIngo Molnar }; 928f2cb1360SIngo Molnar 929f2cb1360SIngo Molnar cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 930f2cb1360SIngo Molnar sd_id = cpumask_first(sched_domain_span(sd)); 931f2cb1360SIngo Molnar 932f2cb1360SIngo Molnar /* 933f2cb1360SIngo Molnar * Convert topological properties into behaviour. 934f2cb1360SIngo Molnar */ 935f2cb1360SIngo Molnar 936f2cb1360SIngo Molnar if (sd->flags & SD_ASYM_CPUCAPACITY) { 937f2cb1360SIngo Molnar struct sched_domain *t = sd; 938f2cb1360SIngo Molnar 939f2cb1360SIngo Molnar for_each_lower_domain(t) 940f2cb1360SIngo Molnar t->flags |= SD_BALANCE_WAKE; 941f2cb1360SIngo Molnar } 942f2cb1360SIngo Molnar 943f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_CPUCAPACITY) { 944f2cb1360SIngo Molnar sd->flags |= SD_PREFER_SIBLING; 945f2cb1360SIngo Molnar sd->imbalance_pct = 110; 946f2cb1360SIngo Molnar sd->smt_gain = 1178; /* ~15% */ 947f2cb1360SIngo Molnar 948f2cb1360SIngo Molnar } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 949f2cb1360SIngo Molnar sd->imbalance_pct = 117; 950f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 951f2cb1360SIngo Molnar sd->busy_idx = 2; 952f2cb1360SIngo Molnar 953f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 954f2cb1360SIngo Molnar } else if (sd->flags & SD_NUMA) { 955f2cb1360SIngo Molnar sd->cache_nice_tries = 2; 956f2cb1360SIngo Molnar sd->busy_idx = 3; 957f2cb1360SIngo Molnar sd->idle_idx = 2; 958f2cb1360SIngo Molnar 959f2cb1360SIngo Molnar sd->flags |= SD_SERIALIZE; 960f2cb1360SIngo Molnar if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 961f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_EXEC | 962f2cb1360SIngo Molnar SD_BALANCE_FORK | 963f2cb1360SIngo Molnar SD_WAKE_AFFINE); 964f2cb1360SIngo Molnar } 965f2cb1360SIngo Molnar 966f2cb1360SIngo Molnar #endif 967f2cb1360SIngo Molnar } else { 968f2cb1360SIngo Molnar sd->flags |= SD_PREFER_SIBLING; 969f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 970f2cb1360SIngo Molnar sd->busy_idx = 2; 971f2cb1360SIngo Molnar sd->idle_idx = 1; 972f2cb1360SIngo Molnar } 973f2cb1360SIngo Molnar 974f2cb1360SIngo Molnar /* 975f2cb1360SIngo Molnar * For all levels sharing cache; connect a sched_domain_shared 976f2cb1360SIngo Molnar * instance. 977f2cb1360SIngo Molnar */ 978f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_PKG_RESOURCES) { 979f2cb1360SIngo Molnar sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 980f2cb1360SIngo Molnar atomic_inc(&sd->shared->ref); 981f2cb1360SIngo Molnar atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 982f2cb1360SIngo Molnar } 983f2cb1360SIngo Molnar 984f2cb1360SIngo Molnar sd->private = sdd; 985f2cb1360SIngo Molnar 986f2cb1360SIngo Molnar return sd; 987f2cb1360SIngo Molnar } 988f2cb1360SIngo Molnar 989f2cb1360SIngo Molnar /* 990f2cb1360SIngo Molnar * Topology list, bottom-up. 991f2cb1360SIngo Molnar */ 992f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = { 993f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT 994f2cb1360SIngo Molnar { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 995f2cb1360SIngo Molnar #endif 996f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC 997f2cb1360SIngo Molnar { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 998f2cb1360SIngo Molnar #endif 999f2cb1360SIngo Molnar { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1000f2cb1360SIngo Molnar { NULL, }, 1001f2cb1360SIngo Molnar }; 1002f2cb1360SIngo Molnar 1003f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology = 1004f2cb1360SIngo Molnar default_topology; 1005f2cb1360SIngo Molnar 1006f2cb1360SIngo Molnar #define for_each_sd_topology(tl) \ 1007f2cb1360SIngo Molnar for (tl = sched_domain_topology; tl->mask; tl++) 1008f2cb1360SIngo Molnar 1009f2cb1360SIngo Molnar void set_sched_topology(struct sched_domain_topology_level *tl) 1010f2cb1360SIngo Molnar { 1011f2cb1360SIngo Molnar if (WARN_ON_ONCE(sched_smp_initialized)) 1012f2cb1360SIngo Molnar return; 1013f2cb1360SIngo Molnar 1014f2cb1360SIngo Molnar sched_domain_topology = tl; 1015f2cb1360SIngo Molnar } 1016f2cb1360SIngo Molnar 1017f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1018f2cb1360SIngo Molnar 1019f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu) 1020f2cb1360SIngo Molnar { 1021f2cb1360SIngo Molnar return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1022f2cb1360SIngo Molnar } 1023f2cb1360SIngo Molnar 1024f2cb1360SIngo Molnar static void sched_numa_warn(const char *str) 1025f2cb1360SIngo Molnar { 1026f2cb1360SIngo Molnar static int done = false; 1027f2cb1360SIngo Molnar int i,j; 1028f2cb1360SIngo Molnar 1029f2cb1360SIngo Molnar if (done) 1030f2cb1360SIngo Molnar return; 1031f2cb1360SIngo Molnar 1032f2cb1360SIngo Molnar done = true; 1033f2cb1360SIngo Molnar 1034f2cb1360SIngo Molnar printk(KERN_WARNING "ERROR: %s\n\n", str); 1035f2cb1360SIngo Molnar 1036f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1037f2cb1360SIngo Molnar printk(KERN_WARNING " "); 1038f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) 1039f2cb1360SIngo Molnar printk(KERN_CONT "%02d ", node_distance(i,j)); 1040f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 1041f2cb1360SIngo Molnar } 1042f2cb1360SIngo Molnar printk(KERN_WARNING "\n"); 1043f2cb1360SIngo Molnar } 1044f2cb1360SIngo Molnar 1045f2cb1360SIngo Molnar bool find_numa_distance(int distance) 1046f2cb1360SIngo Molnar { 1047f2cb1360SIngo Molnar int i; 1048f2cb1360SIngo Molnar 1049f2cb1360SIngo Molnar if (distance == node_distance(0, 0)) 1050f2cb1360SIngo Molnar return true; 1051f2cb1360SIngo Molnar 1052f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1053f2cb1360SIngo Molnar if (sched_domains_numa_distance[i] == distance) 1054f2cb1360SIngo Molnar return true; 1055f2cb1360SIngo Molnar } 1056f2cb1360SIngo Molnar 1057f2cb1360SIngo Molnar return false; 1058f2cb1360SIngo Molnar } 1059f2cb1360SIngo Molnar 1060f2cb1360SIngo Molnar /* 1061f2cb1360SIngo Molnar * A system can have three types of NUMA topology: 1062f2cb1360SIngo Molnar * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1063f2cb1360SIngo Molnar * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1064f2cb1360SIngo Molnar * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1065f2cb1360SIngo Molnar * 1066f2cb1360SIngo Molnar * The difference between a glueless mesh topology and a backplane 1067f2cb1360SIngo Molnar * topology lies in whether communication between not directly 1068f2cb1360SIngo Molnar * connected nodes goes through intermediary nodes (where programs 1069f2cb1360SIngo Molnar * could run), or through backplane controllers. This affects 1070f2cb1360SIngo Molnar * placement of programs. 1071f2cb1360SIngo Molnar * 1072f2cb1360SIngo Molnar * The type of topology can be discerned with the following tests: 1073f2cb1360SIngo Molnar * - If the maximum distance between any nodes is 1 hop, the system 1074f2cb1360SIngo Molnar * is directly connected. 1075f2cb1360SIngo Molnar * - If for two nodes A and B, located N > 1 hops away from each other, 1076f2cb1360SIngo Molnar * there is an intermediary node C, which is < N hops away from both 1077f2cb1360SIngo Molnar * nodes A and B, the system is a glueless mesh. 1078f2cb1360SIngo Molnar */ 1079f2cb1360SIngo Molnar static void init_numa_topology_type(void) 1080f2cb1360SIngo Molnar { 1081f2cb1360SIngo Molnar int a, b, c, n; 1082f2cb1360SIngo Molnar 1083f2cb1360SIngo Molnar n = sched_max_numa_distance; 1084f2cb1360SIngo Molnar 1085f2cb1360SIngo Molnar if (sched_domains_numa_levels <= 1) { 1086f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_DIRECT; 1087f2cb1360SIngo Molnar return; 1088f2cb1360SIngo Molnar } 1089f2cb1360SIngo Molnar 1090f2cb1360SIngo Molnar for_each_online_node(a) { 1091f2cb1360SIngo Molnar for_each_online_node(b) { 1092f2cb1360SIngo Molnar /* Find two nodes furthest removed from each other. */ 1093f2cb1360SIngo Molnar if (node_distance(a, b) < n) 1094f2cb1360SIngo Molnar continue; 1095f2cb1360SIngo Molnar 1096f2cb1360SIngo Molnar /* Is there an intermediary node between a and b? */ 1097f2cb1360SIngo Molnar for_each_online_node(c) { 1098f2cb1360SIngo Molnar if (node_distance(a, c) < n && 1099f2cb1360SIngo Molnar node_distance(b, c) < n) { 1100f2cb1360SIngo Molnar sched_numa_topology_type = 1101f2cb1360SIngo Molnar NUMA_GLUELESS_MESH; 1102f2cb1360SIngo Molnar return; 1103f2cb1360SIngo Molnar } 1104f2cb1360SIngo Molnar } 1105f2cb1360SIngo Molnar 1106f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_BACKPLANE; 1107f2cb1360SIngo Molnar return; 1108f2cb1360SIngo Molnar } 1109f2cb1360SIngo Molnar } 1110f2cb1360SIngo Molnar } 1111f2cb1360SIngo Molnar 1112f2cb1360SIngo Molnar void sched_init_numa(void) 1113f2cb1360SIngo Molnar { 1114f2cb1360SIngo Molnar int next_distance, curr_distance = node_distance(0, 0); 1115f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1116f2cb1360SIngo Molnar int level = 0; 1117f2cb1360SIngo Molnar int i, j, k; 1118f2cb1360SIngo Molnar 1119f2cb1360SIngo Molnar sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 1120f2cb1360SIngo Molnar if (!sched_domains_numa_distance) 1121f2cb1360SIngo Molnar return; 1122f2cb1360SIngo Molnar 1123f2cb1360SIngo Molnar /* 1124f2cb1360SIngo Molnar * O(nr_nodes^2) deduplicating selection sort -- in order to find the 1125f2cb1360SIngo Molnar * unique distances in the node_distance() table. 1126f2cb1360SIngo Molnar * 1127f2cb1360SIngo Molnar * Assumes node_distance(0,j) includes all distances in 1128f2cb1360SIngo Molnar * node_distance(i,j) in order to avoid cubic time. 1129f2cb1360SIngo Molnar */ 1130f2cb1360SIngo Molnar next_distance = curr_distance; 1131f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1132f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1133f2cb1360SIngo Molnar for (k = 0; k < nr_node_ids; k++) { 1134f2cb1360SIngo Molnar int distance = node_distance(i, k); 1135f2cb1360SIngo Molnar 1136f2cb1360SIngo Molnar if (distance > curr_distance && 1137f2cb1360SIngo Molnar (distance < next_distance || 1138f2cb1360SIngo Molnar next_distance == curr_distance)) 1139f2cb1360SIngo Molnar next_distance = distance; 1140f2cb1360SIngo Molnar 1141f2cb1360SIngo Molnar /* 1142f2cb1360SIngo Molnar * While not a strong assumption it would be nice to know 1143f2cb1360SIngo Molnar * about cases where if node A is connected to B, B is not 1144f2cb1360SIngo Molnar * equally connected to A. 1145f2cb1360SIngo Molnar */ 1146f2cb1360SIngo Molnar if (sched_debug() && node_distance(k, i) != distance) 1147f2cb1360SIngo Molnar sched_numa_warn("Node-distance not symmetric"); 1148f2cb1360SIngo Molnar 1149f2cb1360SIngo Molnar if (sched_debug() && i && !find_numa_distance(distance)) 1150f2cb1360SIngo Molnar sched_numa_warn("Node-0 not representative"); 1151f2cb1360SIngo Molnar } 1152f2cb1360SIngo Molnar if (next_distance != curr_distance) { 1153f2cb1360SIngo Molnar sched_domains_numa_distance[level++] = next_distance; 1154f2cb1360SIngo Molnar sched_domains_numa_levels = level; 1155f2cb1360SIngo Molnar curr_distance = next_distance; 1156f2cb1360SIngo Molnar } else break; 1157f2cb1360SIngo Molnar } 1158f2cb1360SIngo Molnar 1159f2cb1360SIngo Molnar /* 1160f2cb1360SIngo Molnar * In case of sched_debug() we verify the above assumption. 1161f2cb1360SIngo Molnar */ 1162f2cb1360SIngo Molnar if (!sched_debug()) 1163f2cb1360SIngo Molnar break; 1164f2cb1360SIngo Molnar } 1165f2cb1360SIngo Molnar 1166f2cb1360SIngo Molnar if (!level) 1167f2cb1360SIngo Molnar return; 1168f2cb1360SIngo Molnar 1169f2cb1360SIngo Molnar /* 1170f2cb1360SIngo Molnar * 'level' contains the number of unique distances, excluding the 1171f2cb1360SIngo Molnar * identity distance node_distance(i,i). 1172f2cb1360SIngo Molnar * 1173f2cb1360SIngo Molnar * The sched_domains_numa_distance[] array includes the actual distance 1174f2cb1360SIngo Molnar * numbers. 1175f2cb1360SIngo Molnar */ 1176f2cb1360SIngo Molnar 1177f2cb1360SIngo Molnar /* 1178f2cb1360SIngo Molnar * Here, we should temporarily reset sched_domains_numa_levels to 0. 1179f2cb1360SIngo Molnar * If it fails to allocate memory for array sched_domains_numa_masks[][], 1180f2cb1360SIngo Molnar * the array will contain less then 'level' members. This could be 1181f2cb1360SIngo Molnar * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1182f2cb1360SIngo Molnar * in other functions. 1183f2cb1360SIngo Molnar * 1184f2cb1360SIngo Molnar * We reset it to 'level' at the end of this function. 1185f2cb1360SIngo Molnar */ 1186f2cb1360SIngo Molnar sched_domains_numa_levels = 0; 1187f2cb1360SIngo Molnar 1188f2cb1360SIngo Molnar sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 1189f2cb1360SIngo Molnar if (!sched_domains_numa_masks) 1190f2cb1360SIngo Molnar return; 1191f2cb1360SIngo Molnar 1192f2cb1360SIngo Molnar /* 1193f2cb1360SIngo Molnar * Now for each level, construct a mask per node which contains all 1194f2cb1360SIngo Molnar * CPUs of nodes that are that many hops away from us. 1195f2cb1360SIngo Molnar */ 1196f2cb1360SIngo Molnar for (i = 0; i < level; i++) { 1197f2cb1360SIngo Molnar sched_domains_numa_masks[i] = 1198f2cb1360SIngo Molnar kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 1199f2cb1360SIngo Molnar if (!sched_domains_numa_masks[i]) 1200f2cb1360SIngo Molnar return; 1201f2cb1360SIngo Molnar 1202f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1203f2cb1360SIngo Molnar struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1204f2cb1360SIngo Molnar if (!mask) 1205f2cb1360SIngo Molnar return; 1206f2cb1360SIngo Molnar 1207f2cb1360SIngo Molnar sched_domains_numa_masks[i][j] = mask; 1208f2cb1360SIngo Molnar 1209f2cb1360SIngo Molnar for_each_node(k) { 1210f2cb1360SIngo Molnar if (node_distance(j, k) > sched_domains_numa_distance[i]) 1211f2cb1360SIngo Molnar continue; 1212f2cb1360SIngo Molnar 1213f2cb1360SIngo Molnar cpumask_or(mask, mask, cpumask_of_node(k)); 1214f2cb1360SIngo Molnar } 1215f2cb1360SIngo Molnar } 1216f2cb1360SIngo Molnar } 1217f2cb1360SIngo Molnar 1218f2cb1360SIngo Molnar /* Compute default topology size */ 1219f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++); 1220f2cb1360SIngo Molnar 1221f2cb1360SIngo Molnar tl = kzalloc((i + level + 1) * 1222f2cb1360SIngo Molnar sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1223f2cb1360SIngo Molnar if (!tl) 1224f2cb1360SIngo Molnar return; 1225f2cb1360SIngo Molnar 1226f2cb1360SIngo Molnar /* 1227f2cb1360SIngo Molnar * Copy the default topology bits.. 1228f2cb1360SIngo Molnar */ 1229f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++) 1230f2cb1360SIngo Molnar tl[i] = sched_domain_topology[i]; 1231f2cb1360SIngo Molnar 1232f2cb1360SIngo Molnar /* 1233f2cb1360SIngo Molnar * .. and append 'j' levels of NUMA goodness. 1234f2cb1360SIngo Molnar */ 1235f2cb1360SIngo Molnar for (j = 0; j < level; i++, j++) { 1236f2cb1360SIngo Molnar tl[i] = (struct sched_domain_topology_level){ 1237f2cb1360SIngo Molnar .mask = sd_numa_mask, 1238f2cb1360SIngo Molnar .sd_flags = cpu_numa_flags, 1239f2cb1360SIngo Molnar .flags = SDTL_OVERLAP, 1240f2cb1360SIngo Molnar .numa_level = j, 1241f2cb1360SIngo Molnar SD_INIT_NAME(NUMA) 1242f2cb1360SIngo Molnar }; 1243f2cb1360SIngo Molnar } 1244f2cb1360SIngo Molnar 1245f2cb1360SIngo Molnar sched_domain_topology = tl; 1246f2cb1360SIngo Molnar 1247f2cb1360SIngo Molnar sched_domains_numa_levels = level; 1248f2cb1360SIngo Molnar sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 1249f2cb1360SIngo Molnar 1250f2cb1360SIngo Molnar init_numa_topology_type(); 1251f2cb1360SIngo Molnar } 1252f2cb1360SIngo Molnar 1253f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu) 1254f2cb1360SIngo Molnar { 1255f2cb1360SIngo Molnar int node = cpu_to_node(cpu); 1256f2cb1360SIngo Molnar int i, j; 1257f2cb1360SIngo Molnar 1258f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1259f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1260f2cb1360SIngo Molnar if (node_distance(j, node) <= sched_domains_numa_distance[i]) 1261f2cb1360SIngo Molnar cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 1262f2cb1360SIngo Molnar } 1263f2cb1360SIngo Molnar } 1264f2cb1360SIngo Molnar } 1265f2cb1360SIngo Molnar 1266f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu) 1267f2cb1360SIngo Molnar { 1268f2cb1360SIngo Molnar int i, j; 1269f2cb1360SIngo Molnar 1270f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1271f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) 1272f2cb1360SIngo Molnar cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 1273f2cb1360SIngo Molnar } 1274f2cb1360SIngo Molnar } 1275f2cb1360SIngo Molnar 1276f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */ 1277f2cb1360SIngo Molnar 1278f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map) 1279f2cb1360SIngo Molnar { 1280f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1281f2cb1360SIngo Molnar int j; 1282f2cb1360SIngo Molnar 1283f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1284f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1285f2cb1360SIngo Molnar 1286f2cb1360SIngo Molnar sdd->sd = alloc_percpu(struct sched_domain *); 1287f2cb1360SIngo Molnar if (!sdd->sd) 1288f2cb1360SIngo Molnar return -ENOMEM; 1289f2cb1360SIngo Molnar 1290f2cb1360SIngo Molnar sdd->sds = alloc_percpu(struct sched_domain_shared *); 1291f2cb1360SIngo Molnar if (!sdd->sds) 1292f2cb1360SIngo Molnar return -ENOMEM; 1293f2cb1360SIngo Molnar 1294f2cb1360SIngo Molnar sdd->sg = alloc_percpu(struct sched_group *); 1295f2cb1360SIngo Molnar if (!sdd->sg) 1296f2cb1360SIngo Molnar return -ENOMEM; 1297f2cb1360SIngo Molnar 1298f2cb1360SIngo Molnar sdd->sgc = alloc_percpu(struct sched_group_capacity *); 1299f2cb1360SIngo Molnar if (!sdd->sgc) 1300f2cb1360SIngo Molnar return -ENOMEM; 1301f2cb1360SIngo Molnar 1302f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 1303f2cb1360SIngo Molnar struct sched_domain *sd; 1304f2cb1360SIngo Molnar struct sched_domain_shared *sds; 1305f2cb1360SIngo Molnar struct sched_group *sg; 1306f2cb1360SIngo Molnar struct sched_group_capacity *sgc; 1307f2cb1360SIngo Molnar 1308f2cb1360SIngo Molnar sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 1309f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1310f2cb1360SIngo Molnar if (!sd) 1311f2cb1360SIngo Molnar return -ENOMEM; 1312f2cb1360SIngo Molnar 1313f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, j) = sd; 1314f2cb1360SIngo Molnar 1315f2cb1360SIngo Molnar sds = kzalloc_node(sizeof(struct sched_domain_shared), 1316f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1317f2cb1360SIngo Molnar if (!sds) 1318f2cb1360SIngo Molnar return -ENOMEM; 1319f2cb1360SIngo Molnar 1320f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, j) = sds; 1321f2cb1360SIngo Molnar 1322f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 1323f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1324f2cb1360SIngo Molnar if (!sg) 1325f2cb1360SIngo Molnar return -ENOMEM; 1326f2cb1360SIngo Molnar 1327f2cb1360SIngo Molnar sg->next = sg; 1328f2cb1360SIngo Molnar 1329f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, j) = sg; 1330f2cb1360SIngo Molnar 1331f2cb1360SIngo Molnar sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 1332f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1333f2cb1360SIngo Molnar if (!sgc) 1334f2cb1360SIngo Molnar return -ENOMEM; 1335f2cb1360SIngo Molnar 1336f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, j) = sgc; 1337f2cb1360SIngo Molnar } 1338f2cb1360SIngo Molnar } 1339f2cb1360SIngo Molnar 1340f2cb1360SIngo Molnar return 0; 1341f2cb1360SIngo Molnar } 1342f2cb1360SIngo Molnar 1343f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map) 1344f2cb1360SIngo Molnar { 1345f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1346f2cb1360SIngo Molnar int j; 1347f2cb1360SIngo Molnar 1348f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1349f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1350f2cb1360SIngo Molnar 1351f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 1352f2cb1360SIngo Molnar struct sched_domain *sd; 1353f2cb1360SIngo Molnar 1354f2cb1360SIngo Molnar if (sdd->sd) { 1355f2cb1360SIngo Molnar sd = *per_cpu_ptr(sdd->sd, j); 1356f2cb1360SIngo Molnar if (sd && (sd->flags & SD_OVERLAP)) 1357f2cb1360SIngo Molnar free_sched_groups(sd->groups, 0); 1358f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sd, j)); 1359f2cb1360SIngo Molnar } 1360f2cb1360SIngo Molnar 1361f2cb1360SIngo Molnar if (sdd->sds) 1362f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sds, j)); 1363f2cb1360SIngo Molnar if (sdd->sg) 1364f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sg, j)); 1365f2cb1360SIngo Molnar if (sdd->sgc) 1366f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sgc, j)); 1367f2cb1360SIngo Molnar } 1368f2cb1360SIngo Molnar free_percpu(sdd->sd); 1369f2cb1360SIngo Molnar sdd->sd = NULL; 1370f2cb1360SIngo Molnar free_percpu(sdd->sds); 1371f2cb1360SIngo Molnar sdd->sds = NULL; 1372f2cb1360SIngo Molnar free_percpu(sdd->sg); 1373f2cb1360SIngo Molnar sdd->sg = NULL; 1374f2cb1360SIngo Molnar free_percpu(sdd->sgc); 1375f2cb1360SIngo Molnar sdd->sgc = NULL; 1376f2cb1360SIngo Molnar } 1377f2cb1360SIngo Molnar } 1378f2cb1360SIngo Molnar 1379f2cb1360SIngo Molnar struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 1380f2cb1360SIngo Molnar const struct cpumask *cpu_map, struct sched_domain_attr *attr, 1381f2cb1360SIngo Molnar struct sched_domain *child, int cpu) 1382f2cb1360SIngo Molnar { 1383f2cb1360SIngo Molnar struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 1384f2cb1360SIngo Molnar 1385f2cb1360SIngo Molnar if (child) { 1386f2cb1360SIngo Molnar sd->level = child->level + 1; 1387f2cb1360SIngo Molnar sched_domain_level_max = max(sched_domain_level_max, sd->level); 1388f2cb1360SIngo Molnar child->parent = sd; 1389f2cb1360SIngo Molnar 1390f2cb1360SIngo Molnar if (!cpumask_subset(sched_domain_span(child), 1391f2cb1360SIngo Molnar sched_domain_span(sd))) { 1392f2cb1360SIngo Molnar pr_err("BUG: arch topology borken\n"); 1393f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1394f2cb1360SIngo Molnar pr_err(" the %s domain not a subset of the %s domain\n", 1395f2cb1360SIngo Molnar child->name, sd->name); 1396f2cb1360SIngo Molnar #endif 1397f2cb1360SIngo Molnar /* Fixup, ensure @sd has at least @child cpus. */ 1398f2cb1360SIngo Molnar cpumask_or(sched_domain_span(sd), 1399f2cb1360SIngo Molnar sched_domain_span(sd), 1400f2cb1360SIngo Molnar sched_domain_span(child)); 1401f2cb1360SIngo Molnar } 1402f2cb1360SIngo Molnar 1403f2cb1360SIngo Molnar } 1404f2cb1360SIngo Molnar set_domain_attribute(sd, attr); 1405f2cb1360SIngo Molnar 1406f2cb1360SIngo Molnar return sd; 1407f2cb1360SIngo Molnar } 1408f2cb1360SIngo Molnar 1409f2cb1360SIngo Molnar /* 1410f2cb1360SIngo Molnar * Build sched domains for a given set of CPUs and attach the sched domains 1411f2cb1360SIngo Molnar * to the individual CPUs 1412f2cb1360SIngo Molnar */ 1413f2cb1360SIngo Molnar static int 1414f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 1415f2cb1360SIngo Molnar { 1416f2cb1360SIngo Molnar enum s_alloc alloc_state; 1417f2cb1360SIngo Molnar struct sched_domain *sd; 1418f2cb1360SIngo Molnar struct s_data d; 1419f2cb1360SIngo Molnar struct rq *rq = NULL; 1420f2cb1360SIngo Molnar int i, ret = -ENOMEM; 1421f2cb1360SIngo Molnar 1422f2cb1360SIngo Molnar alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 1423f2cb1360SIngo Molnar if (alloc_state != sa_rootdomain) 1424f2cb1360SIngo Molnar goto error; 1425f2cb1360SIngo Molnar 1426f2cb1360SIngo Molnar /* Set up domains for CPUs specified by the cpu_map: */ 1427f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1428f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1429f2cb1360SIngo Molnar 1430f2cb1360SIngo Molnar sd = NULL; 1431f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1432f2cb1360SIngo Molnar sd = build_sched_domain(tl, cpu_map, attr, sd, i); 1433f2cb1360SIngo Molnar if (tl == sched_domain_topology) 1434f2cb1360SIngo Molnar *per_cpu_ptr(d.sd, i) = sd; 1435f2cb1360SIngo Molnar if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 1436f2cb1360SIngo Molnar sd->flags |= SD_OVERLAP; 1437f2cb1360SIngo Molnar if (cpumask_equal(cpu_map, sched_domain_span(sd))) 1438f2cb1360SIngo Molnar break; 1439f2cb1360SIngo Molnar } 1440f2cb1360SIngo Molnar } 1441f2cb1360SIngo Molnar 1442f2cb1360SIngo Molnar /* Build the groups for the domains */ 1443f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1444f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 1445f2cb1360SIngo Molnar sd->span_weight = cpumask_weight(sched_domain_span(sd)); 1446f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 1447f2cb1360SIngo Molnar if (build_overlap_sched_groups(sd, i)) 1448f2cb1360SIngo Molnar goto error; 1449f2cb1360SIngo Molnar } else { 1450f2cb1360SIngo Molnar if (build_sched_groups(sd, i)) 1451f2cb1360SIngo Molnar goto error; 1452f2cb1360SIngo Molnar } 1453f2cb1360SIngo Molnar } 1454f2cb1360SIngo Molnar } 1455f2cb1360SIngo Molnar 1456f2cb1360SIngo Molnar /* Calculate CPU capacity for physical packages and nodes */ 1457f2cb1360SIngo Molnar for (i = nr_cpumask_bits-1; i >= 0; i--) { 1458f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, cpu_map)) 1459f2cb1360SIngo Molnar continue; 1460f2cb1360SIngo Molnar 1461f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 1462f2cb1360SIngo Molnar claim_allocations(i, sd); 1463f2cb1360SIngo Molnar init_sched_groups_capacity(i, sd); 1464f2cb1360SIngo Molnar } 1465f2cb1360SIngo Molnar } 1466f2cb1360SIngo Molnar 1467f2cb1360SIngo Molnar /* Attach the domains */ 1468f2cb1360SIngo Molnar rcu_read_lock(); 1469f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1470f2cb1360SIngo Molnar rq = cpu_rq(i); 1471f2cb1360SIngo Molnar sd = *per_cpu_ptr(d.sd, i); 1472f2cb1360SIngo Molnar 1473f2cb1360SIngo Molnar /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ 1474f2cb1360SIngo Molnar if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) 1475f2cb1360SIngo Molnar WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); 1476f2cb1360SIngo Molnar 1477f2cb1360SIngo Molnar cpu_attach_domain(sd, d.rd, i); 1478f2cb1360SIngo Molnar } 1479f2cb1360SIngo Molnar rcu_read_unlock(); 1480f2cb1360SIngo Molnar 1481f2cb1360SIngo Molnar if (rq && sched_debug_enabled) { 1482f2cb1360SIngo Molnar pr_info("span: %*pbl (max cpu_capacity = %lu)\n", 1483f2cb1360SIngo Molnar cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 1484f2cb1360SIngo Molnar } 1485f2cb1360SIngo Molnar 1486f2cb1360SIngo Molnar ret = 0; 1487f2cb1360SIngo Molnar error: 1488f2cb1360SIngo Molnar __free_domain_allocs(&d, alloc_state, cpu_map); 1489f2cb1360SIngo Molnar return ret; 1490f2cb1360SIngo Molnar } 1491f2cb1360SIngo Molnar 1492f2cb1360SIngo Molnar /* Current sched domains: */ 1493f2cb1360SIngo Molnar static cpumask_var_t *doms_cur; 1494f2cb1360SIngo Molnar 1495f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */ 1496f2cb1360SIngo Molnar static int ndoms_cur; 1497f2cb1360SIngo Molnar 1498f2cb1360SIngo Molnar /* Attribues of custom domains in 'doms_cur' */ 1499f2cb1360SIngo Molnar static struct sched_domain_attr *dattr_cur; 1500f2cb1360SIngo Molnar 1501f2cb1360SIngo Molnar /* 1502f2cb1360SIngo Molnar * Special case: If a kmalloc() of a doms_cur partition (array of 1503f2cb1360SIngo Molnar * cpumask) fails, then fallback to a single sched domain, 1504f2cb1360SIngo Molnar * as determined by the single cpumask fallback_doms. 1505f2cb1360SIngo Molnar */ 1506f2cb1360SIngo Molnar cpumask_var_t fallback_doms; 1507f2cb1360SIngo Molnar 1508f2cb1360SIngo Molnar /* 1509f2cb1360SIngo Molnar * arch_update_cpu_topology lets virtualized architectures update the 1510f2cb1360SIngo Molnar * CPU core maps. It is supposed to return 1 if the topology changed 1511f2cb1360SIngo Molnar * or 0 if it stayed the same. 1512f2cb1360SIngo Molnar */ 1513f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void) 1514f2cb1360SIngo Molnar { 1515f2cb1360SIngo Molnar return 0; 1516f2cb1360SIngo Molnar } 1517f2cb1360SIngo Molnar 1518f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 1519f2cb1360SIngo Molnar { 1520f2cb1360SIngo Molnar int i; 1521f2cb1360SIngo Molnar cpumask_var_t *doms; 1522f2cb1360SIngo Molnar 1523f2cb1360SIngo Molnar doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 1524f2cb1360SIngo Molnar if (!doms) 1525f2cb1360SIngo Molnar return NULL; 1526f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) { 1527f2cb1360SIngo Molnar if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 1528f2cb1360SIngo Molnar free_sched_domains(doms, i); 1529f2cb1360SIngo Molnar return NULL; 1530f2cb1360SIngo Molnar } 1531f2cb1360SIngo Molnar } 1532f2cb1360SIngo Molnar return doms; 1533f2cb1360SIngo Molnar } 1534f2cb1360SIngo Molnar 1535f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 1536f2cb1360SIngo Molnar { 1537f2cb1360SIngo Molnar unsigned int i; 1538f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) 1539f2cb1360SIngo Molnar free_cpumask_var(doms[i]); 1540f2cb1360SIngo Molnar kfree(doms); 1541f2cb1360SIngo Molnar } 1542f2cb1360SIngo Molnar 1543f2cb1360SIngo Molnar /* 1544f2cb1360SIngo Molnar * Set up scheduler domains and groups. Callers must hold the hotplug lock. 1545f2cb1360SIngo Molnar * For now this just excludes isolated CPUs, but could be used to 1546f2cb1360SIngo Molnar * exclude other special cases in the future. 1547f2cb1360SIngo Molnar */ 1548f2cb1360SIngo Molnar int init_sched_domains(const struct cpumask *cpu_map) 1549f2cb1360SIngo Molnar { 1550f2cb1360SIngo Molnar int err; 1551f2cb1360SIngo Molnar 1552f2cb1360SIngo Molnar arch_update_cpu_topology(); 1553f2cb1360SIngo Molnar ndoms_cur = 1; 1554f2cb1360SIngo Molnar doms_cur = alloc_sched_domains(ndoms_cur); 1555f2cb1360SIngo Molnar if (!doms_cur) 1556f2cb1360SIngo Molnar doms_cur = &fallback_doms; 1557f2cb1360SIngo Molnar cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 1558f2cb1360SIngo Molnar err = build_sched_domains(doms_cur[0], NULL); 1559f2cb1360SIngo Molnar register_sched_domain_sysctl(); 1560f2cb1360SIngo Molnar 1561f2cb1360SIngo Molnar return err; 1562f2cb1360SIngo Molnar } 1563f2cb1360SIngo Molnar 1564f2cb1360SIngo Molnar /* 1565f2cb1360SIngo Molnar * Detach sched domains from a group of CPUs specified in cpu_map 1566f2cb1360SIngo Molnar * These CPUs will now be attached to the NULL domain 1567f2cb1360SIngo Molnar */ 1568f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map) 1569f2cb1360SIngo Molnar { 1570f2cb1360SIngo Molnar int i; 1571f2cb1360SIngo Molnar 1572f2cb1360SIngo Molnar rcu_read_lock(); 1573f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) 1574f2cb1360SIngo Molnar cpu_attach_domain(NULL, &def_root_domain, i); 1575f2cb1360SIngo Molnar rcu_read_unlock(); 1576f2cb1360SIngo Molnar } 1577f2cb1360SIngo Molnar 1578f2cb1360SIngo Molnar /* handle null as "default" */ 1579f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 1580f2cb1360SIngo Molnar struct sched_domain_attr *new, int idx_new) 1581f2cb1360SIngo Molnar { 1582f2cb1360SIngo Molnar struct sched_domain_attr tmp; 1583f2cb1360SIngo Molnar 1584f2cb1360SIngo Molnar /* Fast path: */ 1585f2cb1360SIngo Molnar if (!new && !cur) 1586f2cb1360SIngo Molnar return 1; 1587f2cb1360SIngo Molnar 1588f2cb1360SIngo Molnar tmp = SD_ATTR_INIT; 1589f2cb1360SIngo Molnar return !memcmp(cur ? (cur + idx_cur) : &tmp, 1590f2cb1360SIngo Molnar new ? (new + idx_new) : &tmp, 1591f2cb1360SIngo Molnar sizeof(struct sched_domain_attr)); 1592f2cb1360SIngo Molnar } 1593f2cb1360SIngo Molnar 1594f2cb1360SIngo Molnar /* 1595f2cb1360SIngo Molnar * Partition sched domains as specified by the 'ndoms_new' 1596f2cb1360SIngo Molnar * cpumasks in the array doms_new[] of cpumasks. This compares 1597f2cb1360SIngo Molnar * doms_new[] to the current sched domain partitioning, doms_cur[]. 1598f2cb1360SIngo Molnar * It destroys each deleted domain and builds each new domain. 1599f2cb1360SIngo Molnar * 1600f2cb1360SIngo Molnar * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 1601f2cb1360SIngo Molnar * The masks don't intersect (don't overlap.) We should setup one 1602f2cb1360SIngo Molnar * sched domain for each mask. CPUs not in any of the cpumasks will 1603f2cb1360SIngo Molnar * not be load balanced. If the same cpumask appears both in the 1604f2cb1360SIngo Molnar * current 'doms_cur' domains and in the new 'doms_new', we can leave 1605f2cb1360SIngo Molnar * it as it is. 1606f2cb1360SIngo Molnar * 1607f2cb1360SIngo Molnar * The passed in 'doms_new' should be allocated using 1608f2cb1360SIngo Molnar * alloc_sched_domains. This routine takes ownership of it and will 1609f2cb1360SIngo Molnar * free_sched_domains it when done with it. If the caller failed the 1610f2cb1360SIngo Molnar * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 1611f2cb1360SIngo Molnar * and partition_sched_domains() will fallback to the single partition 1612f2cb1360SIngo Molnar * 'fallback_doms', it also forces the domains to be rebuilt. 1613f2cb1360SIngo Molnar * 1614f2cb1360SIngo Molnar * If doms_new == NULL it will be replaced with cpu_online_mask. 1615f2cb1360SIngo Molnar * ndoms_new == 0 is a special case for destroying existing domains, 1616f2cb1360SIngo Molnar * and it will not create the default domain. 1617f2cb1360SIngo Molnar * 1618f2cb1360SIngo Molnar * Call with hotplug lock held 1619f2cb1360SIngo Molnar */ 1620f2cb1360SIngo Molnar void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1621f2cb1360SIngo Molnar struct sched_domain_attr *dattr_new) 1622f2cb1360SIngo Molnar { 1623f2cb1360SIngo Molnar int i, j, n; 1624f2cb1360SIngo Molnar int new_topology; 1625f2cb1360SIngo Molnar 1626f2cb1360SIngo Molnar mutex_lock(&sched_domains_mutex); 1627f2cb1360SIngo Molnar 1628f2cb1360SIngo Molnar /* Always unregister in case we don't destroy any domains: */ 1629f2cb1360SIngo Molnar unregister_sched_domain_sysctl(); 1630f2cb1360SIngo Molnar 1631f2cb1360SIngo Molnar /* Let the architecture update CPU core mappings: */ 1632f2cb1360SIngo Molnar new_topology = arch_update_cpu_topology(); 1633f2cb1360SIngo Molnar 1634f2cb1360SIngo Molnar n = doms_new ? ndoms_new : 0; 1635f2cb1360SIngo Molnar 1636f2cb1360SIngo Molnar /* Destroy deleted domains: */ 1637f2cb1360SIngo Molnar for (i = 0; i < ndoms_cur; i++) { 1638f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 1639f2cb1360SIngo Molnar if (cpumask_equal(doms_cur[i], doms_new[j]) 1640f2cb1360SIngo Molnar && dattrs_equal(dattr_cur, i, dattr_new, j)) 1641f2cb1360SIngo Molnar goto match1; 1642f2cb1360SIngo Molnar } 1643f2cb1360SIngo Molnar /* No match - a current sched domain not in new doms_new[] */ 1644f2cb1360SIngo Molnar detach_destroy_domains(doms_cur[i]); 1645f2cb1360SIngo Molnar match1: 1646f2cb1360SIngo Molnar ; 1647f2cb1360SIngo Molnar } 1648f2cb1360SIngo Molnar 1649f2cb1360SIngo Molnar n = ndoms_cur; 1650f2cb1360SIngo Molnar if (doms_new == NULL) { 1651f2cb1360SIngo Molnar n = 0; 1652f2cb1360SIngo Molnar doms_new = &fallback_doms; 1653f2cb1360SIngo Molnar cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 1654f2cb1360SIngo Molnar WARN_ON_ONCE(dattr_new); 1655f2cb1360SIngo Molnar } 1656f2cb1360SIngo Molnar 1657f2cb1360SIngo Molnar /* Build new domains: */ 1658f2cb1360SIngo Molnar for (i = 0; i < ndoms_new; i++) { 1659f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 1660f2cb1360SIngo Molnar if (cpumask_equal(doms_new[i], doms_cur[j]) 1661f2cb1360SIngo Molnar && dattrs_equal(dattr_new, i, dattr_cur, j)) 1662f2cb1360SIngo Molnar goto match2; 1663f2cb1360SIngo Molnar } 1664f2cb1360SIngo Molnar /* No match - add a new doms_new */ 1665f2cb1360SIngo Molnar build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 1666f2cb1360SIngo Molnar match2: 1667f2cb1360SIngo Molnar ; 1668f2cb1360SIngo Molnar } 1669f2cb1360SIngo Molnar 1670f2cb1360SIngo Molnar /* Remember the new sched domains: */ 1671f2cb1360SIngo Molnar if (doms_cur != &fallback_doms) 1672f2cb1360SIngo Molnar free_sched_domains(doms_cur, ndoms_cur); 1673f2cb1360SIngo Molnar 1674f2cb1360SIngo Molnar kfree(dattr_cur); 1675f2cb1360SIngo Molnar doms_cur = doms_new; 1676f2cb1360SIngo Molnar dattr_cur = dattr_new; 1677f2cb1360SIngo Molnar ndoms_cur = ndoms_new; 1678f2cb1360SIngo Molnar 1679f2cb1360SIngo Molnar register_sched_domain_sysctl(); 1680f2cb1360SIngo Molnar 1681f2cb1360SIngo Molnar mutex_unlock(&sched_domains_mutex); 1682f2cb1360SIngo Molnar } 1683f2cb1360SIngo Molnar 1684