1*f2cb1360SIngo Molnar /* 2*f2cb1360SIngo Molnar * Scheduler topology setup/handling methods 3*f2cb1360SIngo Molnar */ 4*f2cb1360SIngo Molnar #include <linux/sched.h> 5*f2cb1360SIngo Molnar #include <linux/mutex.h> 6*f2cb1360SIngo Molnar 7*f2cb1360SIngo Molnar #include "sched.h" 8*f2cb1360SIngo Molnar 9*f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex); 10*f2cb1360SIngo Molnar 11*f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */ 12*f2cb1360SIngo Molnar cpumask_var_t sched_domains_tmpmask; 13*f2cb1360SIngo Molnar 14*f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 15*f2cb1360SIngo Molnar 16*f2cb1360SIngo Molnar static __read_mostly int sched_debug_enabled; 17*f2cb1360SIngo Molnar 18*f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str) 19*f2cb1360SIngo Molnar { 20*f2cb1360SIngo Molnar sched_debug_enabled = 1; 21*f2cb1360SIngo Molnar 22*f2cb1360SIngo Molnar return 0; 23*f2cb1360SIngo Molnar } 24*f2cb1360SIngo Molnar early_param("sched_debug", sched_debug_setup); 25*f2cb1360SIngo Molnar 26*f2cb1360SIngo Molnar static inline bool sched_debug(void) 27*f2cb1360SIngo Molnar { 28*f2cb1360SIngo Molnar return sched_debug_enabled; 29*f2cb1360SIngo Molnar } 30*f2cb1360SIngo Molnar 31*f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 32*f2cb1360SIngo Molnar struct cpumask *groupmask) 33*f2cb1360SIngo Molnar { 34*f2cb1360SIngo Molnar struct sched_group *group = sd->groups; 35*f2cb1360SIngo Molnar 36*f2cb1360SIngo Molnar cpumask_clear(groupmask); 37*f2cb1360SIngo Molnar 38*f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 39*f2cb1360SIngo Molnar 40*f2cb1360SIngo Molnar if (!(sd->flags & SD_LOAD_BALANCE)) { 41*f2cb1360SIngo Molnar printk("does not load-balance\n"); 42*f2cb1360SIngo Molnar if (sd->parent) 43*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" 44*f2cb1360SIngo Molnar " has parent"); 45*f2cb1360SIngo Molnar return -1; 46*f2cb1360SIngo Molnar } 47*f2cb1360SIngo Molnar 48*f2cb1360SIngo Molnar printk(KERN_CONT "span %*pbl level %s\n", 49*f2cb1360SIngo Molnar cpumask_pr_args(sched_domain_span(sd)), sd->name); 50*f2cb1360SIngo Molnar 51*f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 52*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: domain->span does not contain " 53*f2cb1360SIngo Molnar "CPU%d\n", cpu); 54*f2cb1360SIngo Molnar } 55*f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { 56*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: domain->groups does not contain" 57*f2cb1360SIngo Molnar " CPU%d\n", cpu); 58*f2cb1360SIngo Molnar } 59*f2cb1360SIngo Molnar 60*f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s groups:", level + 1, ""); 61*f2cb1360SIngo Molnar do { 62*f2cb1360SIngo Molnar if (!group) { 63*f2cb1360SIngo Molnar printk("\n"); 64*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: group is NULL\n"); 65*f2cb1360SIngo Molnar break; 66*f2cb1360SIngo Molnar } 67*f2cb1360SIngo Molnar 68*f2cb1360SIngo Molnar if (!cpumask_weight(sched_group_cpus(group))) { 69*f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 70*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: empty group\n"); 71*f2cb1360SIngo Molnar break; 72*f2cb1360SIngo Molnar } 73*f2cb1360SIngo Molnar 74*f2cb1360SIngo Molnar if (!(sd->flags & SD_OVERLAP) && 75*f2cb1360SIngo Molnar cpumask_intersects(groupmask, sched_group_cpus(group))) { 76*f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 77*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: repeated CPUs\n"); 78*f2cb1360SIngo Molnar break; 79*f2cb1360SIngo Molnar } 80*f2cb1360SIngo Molnar 81*f2cb1360SIngo Molnar cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 82*f2cb1360SIngo Molnar 83*f2cb1360SIngo Molnar printk(KERN_CONT " %*pbl", 84*f2cb1360SIngo Molnar cpumask_pr_args(sched_group_cpus(group))); 85*f2cb1360SIngo Molnar if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 86*f2cb1360SIngo Molnar printk(KERN_CONT " (cpu_capacity = %lu)", 87*f2cb1360SIngo Molnar group->sgc->capacity); 88*f2cb1360SIngo Molnar } 89*f2cb1360SIngo Molnar 90*f2cb1360SIngo Molnar group = group->next; 91*f2cb1360SIngo Molnar } while (group != sd->groups); 92*f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 93*f2cb1360SIngo Molnar 94*f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), groupmask)) 95*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 96*f2cb1360SIngo Molnar 97*f2cb1360SIngo Molnar if (sd->parent && 98*f2cb1360SIngo Molnar !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 99*f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: parent span is not a superset " 100*f2cb1360SIngo Molnar "of domain->span\n"); 101*f2cb1360SIngo Molnar return 0; 102*f2cb1360SIngo Molnar } 103*f2cb1360SIngo Molnar 104*f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu) 105*f2cb1360SIngo Molnar { 106*f2cb1360SIngo Molnar int level = 0; 107*f2cb1360SIngo Molnar 108*f2cb1360SIngo Molnar if (!sched_debug_enabled) 109*f2cb1360SIngo Molnar return; 110*f2cb1360SIngo Molnar 111*f2cb1360SIngo Molnar if (!sd) { 112*f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 113*f2cb1360SIngo Molnar return; 114*f2cb1360SIngo Molnar } 115*f2cb1360SIngo Molnar 116*f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 117*f2cb1360SIngo Molnar 118*f2cb1360SIngo Molnar for (;;) { 119*f2cb1360SIngo Molnar if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 120*f2cb1360SIngo Molnar break; 121*f2cb1360SIngo Molnar level++; 122*f2cb1360SIngo Molnar sd = sd->parent; 123*f2cb1360SIngo Molnar if (!sd) 124*f2cb1360SIngo Molnar break; 125*f2cb1360SIngo Molnar } 126*f2cb1360SIngo Molnar } 127*f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */ 128*f2cb1360SIngo Molnar 129*f2cb1360SIngo Molnar # define sched_debug_enabled 0 130*f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0) 131*f2cb1360SIngo Molnar static inline bool sched_debug(void) 132*f2cb1360SIngo Molnar { 133*f2cb1360SIngo Molnar return false; 134*f2cb1360SIngo Molnar } 135*f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */ 136*f2cb1360SIngo Molnar 137*f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd) 138*f2cb1360SIngo Molnar { 139*f2cb1360SIngo Molnar if (cpumask_weight(sched_domain_span(sd)) == 1) 140*f2cb1360SIngo Molnar return 1; 141*f2cb1360SIngo Molnar 142*f2cb1360SIngo Molnar /* Following flags need at least 2 groups */ 143*f2cb1360SIngo Molnar if (sd->flags & (SD_LOAD_BALANCE | 144*f2cb1360SIngo Molnar SD_BALANCE_NEWIDLE | 145*f2cb1360SIngo Molnar SD_BALANCE_FORK | 146*f2cb1360SIngo Molnar SD_BALANCE_EXEC | 147*f2cb1360SIngo Molnar SD_SHARE_CPUCAPACITY | 148*f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | 149*f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | 150*f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN)) { 151*f2cb1360SIngo Molnar if (sd->groups != sd->groups->next) 152*f2cb1360SIngo Molnar return 0; 153*f2cb1360SIngo Molnar } 154*f2cb1360SIngo Molnar 155*f2cb1360SIngo Molnar /* Following flags don't use groups */ 156*f2cb1360SIngo Molnar if (sd->flags & (SD_WAKE_AFFINE)) 157*f2cb1360SIngo Molnar return 0; 158*f2cb1360SIngo Molnar 159*f2cb1360SIngo Molnar return 1; 160*f2cb1360SIngo Molnar } 161*f2cb1360SIngo Molnar 162*f2cb1360SIngo Molnar static int 163*f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 164*f2cb1360SIngo Molnar { 165*f2cb1360SIngo Molnar unsigned long cflags = sd->flags, pflags = parent->flags; 166*f2cb1360SIngo Molnar 167*f2cb1360SIngo Molnar if (sd_degenerate(parent)) 168*f2cb1360SIngo Molnar return 1; 169*f2cb1360SIngo Molnar 170*f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 171*f2cb1360SIngo Molnar return 0; 172*f2cb1360SIngo Molnar 173*f2cb1360SIngo Molnar /* Flags needing groups don't count if only 1 group in parent */ 174*f2cb1360SIngo Molnar if (parent->groups == parent->groups->next) { 175*f2cb1360SIngo Molnar pflags &= ~(SD_LOAD_BALANCE | 176*f2cb1360SIngo Molnar SD_BALANCE_NEWIDLE | 177*f2cb1360SIngo Molnar SD_BALANCE_FORK | 178*f2cb1360SIngo Molnar SD_BALANCE_EXEC | 179*f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | 180*f2cb1360SIngo Molnar SD_SHARE_CPUCAPACITY | 181*f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | 182*f2cb1360SIngo Molnar SD_PREFER_SIBLING | 183*f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN); 184*f2cb1360SIngo Molnar if (nr_node_ids == 1) 185*f2cb1360SIngo Molnar pflags &= ~SD_SERIALIZE; 186*f2cb1360SIngo Molnar } 187*f2cb1360SIngo Molnar if (~cflags & pflags) 188*f2cb1360SIngo Molnar return 0; 189*f2cb1360SIngo Molnar 190*f2cb1360SIngo Molnar return 1; 191*f2cb1360SIngo Molnar } 192*f2cb1360SIngo Molnar 193*f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu) 194*f2cb1360SIngo Molnar { 195*f2cb1360SIngo Molnar struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 196*f2cb1360SIngo Molnar 197*f2cb1360SIngo Molnar cpupri_cleanup(&rd->cpupri); 198*f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 199*f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 200*f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 201*f2cb1360SIngo Molnar free_cpumask_var(rd->online); 202*f2cb1360SIngo Molnar free_cpumask_var(rd->span); 203*f2cb1360SIngo Molnar kfree(rd); 204*f2cb1360SIngo Molnar } 205*f2cb1360SIngo Molnar 206*f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd) 207*f2cb1360SIngo Molnar { 208*f2cb1360SIngo Molnar struct root_domain *old_rd = NULL; 209*f2cb1360SIngo Molnar unsigned long flags; 210*f2cb1360SIngo Molnar 211*f2cb1360SIngo Molnar raw_spin_lock_irqsave(&rq->lock, flags); 212*f2cb1360SIngo Molnar 213*f2cb1360SIngo Molnar if (rq->rd) { 214*f2cb1360SIngo Molnar old_rd = rq->rd; 215*f2cb1360SIngo Molnar 216*f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, old_rd->online)) 217*f2cb1360SIngo Molnar set_rq_offline(rq); 218*f2cb1360SIngo Molnar 219*f2cb1360SIngo Molnar cpumask_clear_cpu(rq->cpu, old_rd->span); 220*f2cb1360SIngo Molnar 221*f2cb1360SIngo Molnar /* 222*f2cb1360SIngo Molnar * If we dont want to free the old_rd yet then 223*f2cb1360SIngo Molnar * set old_rd to NULL to skip the freeing later 224*f2cb1360SIngo Molnar * in this function: 225*f2cb1360SIngo Molnar */ 226*f2cb1360SIngo Molnar if (!atomic_dec_and_test(&old_rd->refcount)) 227*f2cb1360SIngo Molnar old_rd = NULL; 228*f2cb1360SIngo Molnar } 229*f2cb1360SIngo Molnar 230*f2cb1360SIngo Molnar atomic_inc(&rd->refcount); 231*f2cb1360SIngo Molnar rq->rd = rd; 232*f2cb1360SIngo Molnar 233*f2cb1360SIngo Molnar cpumask_set_cpu(rq->cpu, rd->span); 234*f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 235*f2cb1360SIngo Molnar set_rq_online(rq); 236*f2cb1360SIngo Molnar 237*f2cb1360SIngo Molnar raw_spin_unlock_irqrestore(&rq->lock, flags); 238*f2cb1360SIngo Molnar 239*f2cb1360SIngo Molnar if (old_rd) 240*f2cb1360SIngo Molnar call_rcu_sched(&old_rd->rcu, free_rootdomain); 241*f2cb1360SIngo Molnar } 242*f2cb1360SIngo Molnar 243*f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd) 244*f2cb1360SIngo Molnar { 245*f2cb1360SIngo Molnar memset(rd, 0, sizeof(*rd)); 246*f2cb1360SIngo Molnar 247*f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 248*f2cb1360SIngo Molnar goto out; 249*f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 250*f2cb1360SIngo Molnar goto free_span; 251*f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 252*f2cb1360SIngo Molnar goto free_online; 253*f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 254*f2cb1360SIngo Molnar goto free_dlo_mask; 255*f2cb1360SIngo Molnar 256*f2cb1360SIngo Molnar init_dl_bw(&rd->dl_bw); 257*f2cb1360SIngo Molnar if (cpudl_init(&rd->cpudl) != 0) 258*f2cb1360SIngo Molnar goto free_rto_mask; 259*f2cb1360SIngo Molnar 260*f2cb1360SIngo Molnar if (cpupri_init(&rd->cpupri) != 0) 261*f2cb1360SIngo Molnar goto free_cpudl; 262*f2cb1360SIngo Molnar return 0; 263*f2cb1360SIngo Molnar 264*f2cb1360SIngo Molnar free_cpudl: 265*f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 266*f2cb1360SIngo Molnar free_rto_mask: 267*f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 268*f2cb1360SIngo Molnar free_dlo_mask: 269*f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 270*f2cb1360SIngo Molnar free_online: 271*f2cb1360SIngo Molnar free_cpumask_var(rd->online); 272*f2cb1360SIngo Molnar free_span: 273*f2cb1360SIngo Molnar free_cpumask_var(rd->span); 274*f2cb1360SIngo Molnar out: 275*f2cb1360SIngo Molnar return -ENOMEM; 276*f2cb1360SIngo Molnar } 277*f2cb1360SIngo Molnar 278*f2cb1360SIngo Molnar /* 279*f2cb1360SIngo Molnar * By default the system creates a single root-domain with all CPUs as 280*f2cb1360SIngo Molnar * members (mimicking the global state we have today). 281*f2cb1360SIngo Molnar */ 282*f2cb1360SIngo Molnar struct root_domain def_root_domain; 283*f2cb1360SIngo Molnar 284*f2cb1360SIngo Molnar void init_defrootdomain(void) 285*f2cb1360SIngo Molnar { 286*f2cb1360SIngo Molnar init_rootdomain(&def_root_domain); 287*f2cb1360SIngo Molnar 288*f2cb1360SIngo Molnar atomic_set(&def_root_domain.refcount, 1); 289*f2cb1360SIngo Molnar } 290*f2cb1360SIngo Molnar 291*f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void) 292*f2cb1360SIngo Molnar { 293*f2cb1360SIngo Molnar struct root_domain *rd; 294*f2cb1360SIngo Molnar 295*f2cb1360SIngo Molnar rd = kmalloc(sizeof(*rd), GFP_KERNEL); 296*f2cb1360SIngo Molnar if (!rd) 297*f2cb1360SIngo Molnar return NULL; 298*f2cb1360SIngo Molnar 299*f2cb1360SIngo Molnar if (init_rootdomain(rd) != 0) { 300*f2cb1360SIngo Molnar kfree(rd); 301*f2cb1360SIngo Molnar return NULL; 302*f2cb1360SIngo Molnar } 303*f2cb1360SIngo Molnar 304*f2cb1360SIngo Molnar return rd; 305*f2cb1360SIngo Molnar } 306*f2cb1360SIngo Molnar 307*f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc) 308*f2cb1360SIngo Molnar { 309*f2cb1360SIngo Molnar struct sched_group *tmp, *first; 310*f2cb1360SIngo Molnar 311*f2cb1360SIngo Molnar if (!sg) 312*f2cb1360SIngo Molnar return; 313*f2cb1360SIngo Molnar 314*f2cb1360SIngo Molnar first = sg; 315*f2cb1360SIngo Molnar do { 316*f2cb1360SIngo Molnar tmp = sg->next; 317*f2cb1360SIngo Molnar 318*f2cb1360SIngo Molnar if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 319*f2cb1360SIngo Molnar kfree(sg->sgc); 320*f2cb1360SIngo Molnar 321*f2cb1360SIngo Molnar kfree(sg); 322*f2cb1360SIngo Molnar sg = tmp; 323*f2cb1360SIngo Molnar } while (sg != first); 324*f2cb1360SIngo Molnar } 325*f2cb1360SIngo Molnar 326*f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd) 327*f2cb1360SIngo Molnar { 328*f2cb1360SIngo Molnar /* 329*f2cb1360SIngo Molnar * If its an overlapping domain it has private groups, iterate and 330*f2cb1360SIngo Molnar * nuke them all. 331*f2cb1360SIngo Molnar */ 332*f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 333*f2cb1360SIngo Molnar free_sched_groups(sd->groups, 1); 334*f2cb1360SIngo Molnar } else if (atomic_dec_and_test(&sd->groups->ref)) { 335*f2cb1360SIngo Molnar kfree(sd->groups->sgc); 336*f2cb1360SIngo Molnar kfree(sd->groups); 337*f2cb1360SIngo Molnar } 338*f2cb1360SIngo Molnar if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 339*f2cb1360SIngo Molnar kfree(sd->shared); 340*f2cb1360SIngo Molnar kfree(sd); 341*f2cb1360SIngo Molnar } 342*f2cb1360SIngo Molnar 343*f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu) 344*f2cb1360SIngo Molnar { 345*f2cb1360SIngo Molnar struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 346*f2cb1360SIngo Molnar 347*f2cb1360SIngo Molnar while (sd) { 348*f2cb1360SIngo Molnar struct sched_domain *parent = sd->parent; 349*f2cb1360SIngo Molnar destroy_sched_domain(sd); 350*f2cb1360SIngo Molnar sd = parent; 351*f2cb1360SIngo Molnar } 352*f2cb1360SIngo Molnar } 353*f2cb1360SIngo Molnar 354*f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd) 355*f2cb1360SIngo Molnar { 356*f2cb1360SIngo Molnar if (sd) 357*f2cb1360SIngo Molnar call_rcu(&sd->rcu, destroy_sched_domains_rcu); 358*f2cb1360SIngo Molnar } 359*f2cb1360SIngo Molnar 360*f2cb1360SIngo Molnar /* 361*f2cb1360SIngo Molnar * Keep a special pointer to the highest sched_domain that has 362*f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 363*f2cb1360SIngo Molnar * allows us to avoid some pointer chasing select_idle_sibling(). 364*f2cb1360SIngo Molnar * 365*f2cb1360SIngo Molnar * Also keep a unique ID per domain (we use the first CPU number in 366*f2cb1360SIngo Molnar * the cpumask of the domain), this allows us to quickly tell if 367*f2cb1360SIngo Molnar * two CPUs are in the same cache domain, see cpus_share_cache(). 368*f2cb1360SIngo Molnar */ 369*f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_llc); 370*f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size); 371*f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id); 372*f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 373*f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_numa); 374*f2cb1360SIngo Molnar DEFINE_PER_CPU(struct sched_domain *, sd_asym); 375*f2cb1360SIngo Molnar 376*f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu) 377*f2cb1360SIngo Molnar { 378*f2cb1360SIngo Molnar struct sched_domain_shared *sds = NULL; 379*f2cb1360SIngo Molnar struct sched_domain *sd; 380*f2cb1360SIngo Molnar int id = cpu; 381*f2cb1360SIngo Molnar int size = 1; 382*f2cb1360SIngo Molnar 383*f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 384*f2cb1360SIngo Molnar if (sd) { 385*f2cb1360SIngo Molnar id = cpumask_first(sched_domain_span(sd)); 386*f2cb1360SIngo Molnar size = cpumask_weight(sched_domain_span(sd)); 387*f2cb1360SIngo Molnar sds = sd->shared; 388*f2cb1360SIngo Molnar } 389*f2cb1360SIngo Molnar 390*f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 391*f2cb1360SIngo Molnar per_cpu(sd_llc_size, cpu) = size; 392*f2cb1360SIngo Molnar per_cpu(sd_llc_id, cpu) = id; 393*f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 394*f2cb1360SIngo Molnar 395*f2cb1360SIngo Molnar sd = lowest_flag_domain(cpu, SD_NUMA); 396*f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 397*f2cb1360SIngo Molnar 398*f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 399*f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); 400*f2cb1360SIngo Molnar } 401*f2cb1360SIngo Molnar 402*f2cb1360SIngo Molnar /* 403*f2cb1360SIngo Molnar * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 404*f2cb1360SIngo Molnar * hold the hotplug lock. 405*f2cb1360SIngo Molnar */ 406*f2cb1360SIngo Molnar static void 407*f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 408*f2cb1360SIngo Molnar { 409*f2cb1360SIngo Molnar struct rq *rq = cpu_rq(cpu); 410*f2cb1360SIngo Molnar struct sched_domain *tmp; 411*f2cb1360SIngo Molnar 412*f2cb1360SIngo Molnar /* Remove the sched domains which do not contribute to scheduling. */ 413*f2cb1360SIngo Molnar for (tmp = sd; tmp; ) { 414*f2cb1360SIngo Molnar struct sched_domain *parent = tmp->parent; 415*f2cb1360SIngo Molnar if (!parent) 416*f2cb1360SIngo Molnar break; 417*f2cb1360SIngo Molnar 418*f2cb1360SIngo Molnar if (sd_parent_degenerate(tmp, parent)) { 419*f2cb1360SIngo Molnar tmp->parent = parent->parent; 420*f2cb1360SIngo Molnar if (parent->parent) 421*f2cb1360SIngo Molnar parent->parent->child = tmp; 422*f2cb1360SIngo Molnar /* 423*f2cb1360SIngo Molnar * Transfer SD_PREFER_SIBLING down in case of a 424*f2cb1360SIngo Molnar * degenerate parent; the spans match for this 425*f2cb1360SIngo Molnar * so the property transfers. 426*f2cb1360SIngo Molnar */ 427*f2cb1360SIngo Molnar if (parent->flags & SD_PREFER_SIBLING) 428*f2cb1360SIngo Molnar tmp->flags |= SD_PREFER_SIBLING; 429*f2cb1360SIngo Molnar destroy_sched_domain(parent); 430*f2cb1360SIngo Molnar } else 431*f2cb1360SIngo Molnar tmp = tmp->parent; 432*f2cb1360SIngo Molnar } 433*f2cb1360SIngo Molnar 434*f2cb1360SIngo Molnar if (sd && sd_degenerate(sd)) { 435*f2cb1360SIngo Molnar tmp = sd; 436*f2cb1360SIngo Molnar sd = sd->parent; 437*f2cb1360SIngo Molnar destroy_sched_domain(tmp); 438*f2cb1360SIngo Molnar if (sd) 439*f2cb1360SIngo Molnar sd->child = NULL; 440*f2cb1360SIngo Molnar } 441*f2cb1360SIngo Molnar 442*f2cb1360SIngo Molnar sched_domain_debug(sd, cpu); 443*f2cb1360SIngo Molnar 444*f2cb1360SIngo Molnar rq_attach_root(rq, rd); 445*f2cb1360SIngo Molnar tmp = rq->sd; 446*f2cb1360SIngo Molnar rcu_assign_pointer(rq->sd, sd); 447*f2cb1360SIngo Molnar destroy_sched_domains(tmp); 448*f2cb1360SIngo Molnar 449*f2cb1360SIngo Molnar update_top_cache_domain(cpu); 450*f2cb1360SIngo Molnar } 451*f2cb1360SIngo Molnar 452*f2cb1360SIngo Molnar /* Setup the mask of CPUs configured for isolated domains */ 453*f2cb1360SIngo Molnar static int __init isolated_cpu_setup(char *str) 454*f2cb1360SIngo Molnar { 455*f2cb1360SIngo Molnar int ret; 456*f2cb1360SIngo Molnar 457*f2cb1360SIngo Molnar alloc_bootmem_cpumask_var(&cpu_isolated_map); 458*f2cb1360SIngo Molnar ret = cpulist_parse(str, cpu_isolated_map); 459*f2cb1360SIngo Molnar if (ret) { 460*f2cb1360SIngo Molnar pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); 461*f2cb1360SIngo Molnar return 0; 462*f2cb1360SIngo Molnar } 463*f2cb1360SIngo Molnar return 1; 464*f2cb1360SIngo Molnar } 465*f2cb1360SIngo Molnar __setup("isolcpus=", isolated_cpu_setup); 466*f2cb1360SIngo Molnar 467*f2cb1360SIngo Molnar struct s_data { 468*f2cb1360SIngo Molnar struct sched_domain ** __percpu sd; 469*f2cb1360SIngo Molnar struct root_domain *rd; 470*f2cb1360SIngo Molnar }; 471*f2cb1360SIngo Molnar 472*f2cb1360SIngo Molnar enum s_alloc { 473*f2cb1360SIngo Molnar sa_rootdomain, 474*f2cb1360SIngo Molnar sa_sd, 475*f2cb1360SIngo Molnar sa_sd_storage, 476*f2cb1360SIngo Molnar sa_none, 477*f2cb1360SIngo Molnar }; 478*f2cb1360SIngo Molnar 479*f2cb1360SIngo Molnar /* 480*f2cb1360SIngo Molnar * Build an iteration mask that can exclude certain CPUs from the upwards 481*f2cb1360SIngo Molnar * domain traversal. 482*f2cb1360SIngo Molnar * 483*f2cb1360SIngo Molnar * Asymmetric node setups can result in situations where the domain tree is of 484*f2cb1360SIngo Molnar * unequal depth, make sure to skip domains that already cover the entire 485*f2cb1360SIngo Molnar * range. 486*f2cb1360SIngo Molnar * 487*f2cb1360SIngo Molnar * In that case build_sched_domains() will have terminated the iteration early 488*f2cb1360SIngo Molnar * and our sibling sd spans will be empty. Domains should always include the 489*f2cb1360SIngo Molnar * CPU they're built on, so check that. 490*f2cb1360SIngo Molnar */ 491*f2cb1360SIngo Molnar static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 492*f2cb1360SIngo Molnar { 493*f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 494*f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 495*f2cb1360SIngo Molnar struct sched_domain *sibling; 496*f2cb1360SIngo Molnar int i; 497*f2cb1360SIngo Molnar 498*f2cb1360SIngo Molnar for_each_cpu(i, span) { 499*f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 500*f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 501*f2cb1360SIngo Molnar continue; 502*f2cb1360SIngo Molnar 503*f2cb1360SIngo Molnar cpumask_set_cpu(i, sched_group_mask(sg)); 504*f2cb1360SIngo Molnar } 505*f2cb1360SIngo Molnar } 506*f2cb1360SIngo Molnar 507*f2cb1360SIngo Molnar /* 508*f2cb1360SIngo Molnar * Return the canonical balance CPU for this group, this is the first CPU 509*f2cb1360SIngo Molnar * of this group that's also in the iteration mask. 510*f2cb1360SIngo Molnar */ 511*f2cb1360SIngo Molnar int group_balance_cpu(struct sched_group *sg) 512*f2cb1360SIngo Molnar { 513*f2cb1360SIngo Molnar return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 514*f2cb1360SIngo Molnar } 515*f2cb1360SIngo Molnar 516*f2cb1360SIngo Molnar static int 517*f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu) 518*f2cb1360SIngo Molnar { 519*f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; 520*f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 521*f2cb1360SIngo Molnar struct cpumask *covered = sched_domains_tmpmask; 522*f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 523*f2cb1360SIngo Molnar struct sched_domain *sibling; 524*f2cb1360SIngo Molnar int i; 525*f2cb1360SIngo Molnar 526*f2cb1360SIngo Molnar cpumask_clear(covered); 527*f2cb1360SIngo Molnar 528*f2cb1360SIngo Molnar for_each_cpu(i, span) { 529*f2cb1360SIngo Molnar struct cpumask *sg_span; 530*f2cb1360SIngo Molnar 531*f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 532*f2cb1360SIngo Molnar continue; 533*f2cb1360SIngo Molnar 534*f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 535*f2cb1360SIngo Molnar 536*f2cb1360SIngo Molnar /* See the comment near build_group_mask(). */ 537*f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 538*f2cb1360SIngo Molnar continue; 539*f2cb1360SIngo Molnar 540*f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 541*f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(cpu)); 542*f2cb1360SIngo Molnar 543*f2cb1360SIngo Molnar if (!sg) 544*f2cb1360SIngo Molnar goto fail; 545*f2cb1360SIngo Molnar 546*f2cb1360SIngo Molnar sg_span = sched_group_cpus(sg); 547*f2cb1360SIngo Molnar if (sibling->child) 548*f2cb1360SIngo Molnar cpumask_copy(sg_span, sched_domain_span(sibling->child)); 549*f2cb1360SIngo Molnar else 550*f2cb1360SIngo Molnar cpumask_set_cpu(i, sg_span); 551*f2cb1360SIngo Molnar 552*f2cb1360SIngo Molnar cpumask_or(covered, covered, sg_span); 553*f2cb1360SIngo Molnar 554*f2cb1360SIngo Molnar sg->sgc = *per_cpu_ptr(sdd->sgc, i); 555*f2cb1360SIngo Molnar if (atomic_inc_return(&sg->sgc->ref) == 1) 556*f2cb1360SIngo Molnar build_group_mask(sd, sg); 557*f2cb1360SIngo Molnar 558*f2cb1360SIngo Molnar /* 559*f2cb1360SIngo Molnar * Initialize sgc->capacity such that even if we mess up the 560*f2cb1360SIngo Molnar * domains and no possible iteration will get us here, we won't 561*f2cb1360SIngo Molnar * die on a /0 trap. 562*f2cb1360SIngo Molnar */ 563*f2cb1360SIngo Molnar sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 564*f2cb1360SIngo Molnar sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 565*f2cb1360SIngo Molnar 566*f2cb1360SIngo Molnar /* 567*f2cb1360SIngo Molnar * Make sure the first group of this domain contains the 568*f2cb1360SIngo Molnar * canonical balance CPU. Otherwise the sched_domain iteration 569*f2cb1360SIngo Molnar * breaks. See update_sg_lb_stats(). 570*f2cb1360SIngo Molnar */ 571*f2cb1360SIngo Molnar if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 572*f2cb1360SIngo Molnar group_balance_cpu(sg) == cpu) 573*f2cb1360SIngo Molnar groups = sg; 574*f2cb1360SIngo Molnar 575*f2cb1360SIngo Molnar if (!first) 576*f2cb1360SIngo Molnar first = sg; 577*f2cb1360SIngo Molnar if (last) 578*f2cb1360SIngo Molnar last->next = sg; 579*f2cb1360SIngo Molnar last = sg; 580*f2cb1360SIngo Molnar last->next = first; 581*f2cb1360SIngo Molnar } 582*f2cb1360SIngo Molnar sd->groups = groups; 583*f2cb1360SIngo Molnar 584*f2cb1360SIngo Molnar return 0; 585*f2cb1360SIngo Molnar 586*f2cb1360SIngo Molnar fail: 587*f2cb1360SIngo Molnar free_sched_groups(first, 0); 588*f2cb1360SIngo Molnar 589*f2cb1360SIngo Molnar return -ENOMEM; 590*f2cb1360SIngo Molnar } 591*f2cb1360SIngo Molnar 592*f2cb1360SIngo Molnar static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) 593*f2cb1360SIngo Molnar { 594*f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 595*f2cb1360SIngo Molnar struct sched_domain *child = sd->child; 596*f2cb1360SIngo Molnar 597*f2cb1360SIngo Molnar if (child) 598*f2cb1360SIngo Molnar cpu = cpumask_first(sched_domain_span(child)); 599*f2cb1360SIngo Molnar 600*f2cb1360SIngo Molnar if (sg) { 601*f2cb1360SIngo Molnar *sg = *per_cpu_ptr(sdd->sg, cpu); 602*f2cb1360SIngo Molnar (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); 603*f2cb1360SIngo Molnar 604*f2cb1360SIngo Molnar /* For claim_allocations: */ 605*f2cb1360SIngo Molnar atomic_set(&(*sg)->sgc->ref, 1); 606*f2cb1360SIngo Molnar } 607*f2cb1360SIngo Molnar 608*f2cb1360SIngo Molnar return cpu; 609*f2cb1360SIngo Molnar } 610*f2cb1360SIngo Molnar 611*f2cb1360SIngo Molnar /* 612*f2cb1360SIngo Molnar * build_sched_groups will build a circular linked list of the groups 613*f2cb1360SIngo Molnar * covered by the given span, and will set each group's ->cpumask correctly, 614*f2cb1360SIngo Molnar * and ->cpu_capacity to 0. 615*f2cb1360SIngo Molnar * 616*f2cb1360SIngo Molnar * Assumes the sched_domain tree is fully constructed 617*f2cb1360SIngo Molnar */ 618*f2cb1360SIngo Molnar static int 619*f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu) 620*f2cb1360SIngo Molnar { 621*f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL; 622*f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 623*f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 624*f2cb1360SIngo Molnar struct cpumask *covered; 625*f2cb1360SIngo Molnar int i; 626*f2cb1360SIngo Molnar 627*f2cb1360SIngo Molnar get_group(cpu, sdd, &sd->groups); 628*f2cb1360SIngo Molnar atomic_inc(&sd->groups->ref); 629*f2cb1360SIngo Molnar 630*f2cb1360SIngo Molnar if (cpu != cpumask_first(span)) 631*f2cb1360SIngo Molnar return 0; 632*f2cb1360SIngo Molnar 633*f2cb1360SIngo Molnar lockdep_assert_held(&sched_domains_mutex); 634*f2cb1360SIngo Molnar covered = sched_domains_tmpmask; 635*f2cb1360SIngo Molnar 636*f2cb1360SIngo Molnar cpumask_clear(covered); 637*f2cb1360SIngo Molnar 638*f2cb1360SIngo Molnar for_each_cpu(i, span) { 639*f2cb1360SIngo Molnar struct sched_group *sg; 640*f2cb1360SIngo Molnar int group, j; 641*f2cb1360SIngo Molnar 642*f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 643*f2cb1360SIngo Molnar continue; 644*f2cb1360SIngo Molnar 645*f2cb1360SIngo Molnar group = get_group(i, sdd, &sg); 646*f2cb1360SIngo Molnar cpumask_setall(sched_group_mask(sg)); 647*f2cb1360SIngo Molnar 648*f2cb1360SIngo Molnar for_each_cpu(j, span) { 649*f2cb1360SIngo Molnar if (get_group(j, sdd, NULL) != group) 650*f2cb1360SIngo Molnar continue; 651*f2cb1360SIngo Molnar 652*f2cb1360SIngo Molnar cpumask_set_cpu(j, covered); 653*f2cb1360SIngo Molnar cpumask_set_cpu(j, sched_group_cpus(sg)); 654*f2cb1360SIngo Molnar } 655*f2cb1360SIngo Molnar 656*f2cb1360SIngo Molnar if (!first) 657*f2cb1360SIngo Molnar first = sg; 658*f2cb1360SIngo Molnar if (last) 659*f2cb1360SIngo Molnar last->next = sg; 660*f2cb1360SIngo Molnar last = sg; 661*f2cb1360SIngo Molnar } 662*f2cb1360SIngo Molnar last->next = first; 663*f2cb1360SIngo Molnar 664*f2cb1360SIngo Molnar return 0; 665*f2cb1360SIngo Molnar } 666*f2cb1360SIngo Molnar 667*f2cb1360SIngo Molnar /* 668*f2cb1360SIngo Molnar * Initialize sched groups cpu_capacity. 669*f2cb1360SIngo Molnar * 670*f2cb1360SIngo Molnar * cpu_capacity indicates the capacity of sched group, which is used while 671*f2cb1360SIngo Molnar * distributing the load between different sched groups in a sched domain. 672*f2cb1360SIngo Molnar * Typically cpu_capacity for all the groups in a sched domain will be same 673*f2cb1360SIngo Molnar * unless there are asymmetries in the topology. If there are asymmetries, 674*f2cb1360SIngo Molnar * group having more cpu_capacity will pickup more load compared to the 675*f2cb1360SIngo Molnar * group having less cpu_capacity. 676*f2cb1360SIngo Molnar */ 677*f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 678*f2cb1360SIngo Molnar { 679*f2cb1360SIngo Molnar struct sched_group *sg = sd->groups; 680*f2cb1360SIngo Molnar 681*f2cb1360SIngo Molnar WARN_ON(!sg); 682*f2cb1360SIngo Molnar 683*f2cb1360SIngo Molnar do { 684*f2cb1360SIngo Molnar int cpu, max_cpu = -1; 685*f2cb1360SIngo Molnar 686*f2cb1360SIngo Molnar sg->group_weight = cpumask_weight(sched_group_cpus(sg)); 687*f2cb1360SIngo Molnar 688*f2cb1360SIngo Molnar if (!(sd->flags & SD_ASYM_PACKING)) 689*f2cb1360SIngo Molnar goto next; 690*f2cb1360SIngo Molnar 691*f2cb1360SIngo Molnar for_each_cpu(cpu, sched_group_cpus(sg)) { 692*f2cb1360SIngo Molnar if (max_cpu < 0) 693*f2cb1360SIngo Molnar max_cpu = cpu; 694*f2cb1360SIngo Molnar else if (sched_asym_prefer(cpu, max_cpu)) 695*f2cb1360SIngo Molnar max_cpu = cpu; 696*f2cb1360SIngo Molnar } 697*f2cb1360SIngo Molnar sg->asym_prefer_cpu = max_cpu; 698*f2cb1360SIngo Molnar 699*f2cb1360SIngo Molnar next: 700*f2cb1360SIngo Molnar sg = sg->next; 701*f2cb1360SIngo Molnar } while (sg != sd->groups); 702*f2cb1360SIngo Molnar 703*f2cb1360SIngo Molnar if (cpu != group_balance_cpu(sg)) 704*f2cb1360SIngo Molnar return; 705*f2cb1360SIngo Molnar 706*f2cb1360SIngo Molnar update_group_capacity(sd, cpu); 707*f2cb1360SIngo Molnar } 708*f2cb1360SIngo Molnar 709*f2cb1360SIngo Molnar /* 710*f2cb1360SIngo Molnar * Initializers for schedule domains 711*f2cb1360SIngo Molnar * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 712*f2cb1360SIngo Molnar */ 713*f2cb1360SIngo Molnar 714*f2cb1360SIngo Molnar static int default_relax_domain_level = -1; 715*f2cb1360SIngo Molnar int sched_domain_level_max; 716*f2cb1360SIngo Molnar 717*f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str) 718*f2cb1360SIngo Molnar { 719*f2cb1360SIngo Molnar if (kstrtoint(str, 0, &default_relax_domain_level)) 720*f2cb1360SIngo Molnar pr_warn("Unable to set relax_domain_level\n"); 721*f2cb1360SIngo Molnar 722*f2cb1360SIngo Molnar return 1; 723*f2cb1360SIngo Molnar } 724*f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level); 725*f2cb1360SIngo Molnar 726*f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd, 727*f2cb1360SIngo Molnar struct sched_domain_attr *attr) 728*f2cb1360SIngo Molnar { 729*f2cb1360SIngo Molnar int request; 730*f2cb1360SIngo Molnar 731*f2cb1360SIngo Molnar if (!attr || attr->relax_domain_level < 0) { 732*f2cb1360SIngo Molnar if (default_relax_domain_level < 0) 733*f2cb1360SIngo Molnar return; 734*f2cb1360SIngo Molnar else 735*f2cb1360SIngo Molnar request = default_relax_domain_level; 736*f2cb1360SIngo Molnar } else 737*f2cb1360SIngo Molnar request = attr->relax_domain_level; 738*f2cb1360SIngo Molnar if (request < sd->level) { 739*f2cb1360SIngo Molnar /* Turn off idle balance on this domain: */ 740*f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 741*f2cb1360SIngo Molnar } else { 742*f2cb1360SIngo Molnar /* Turn on idle balance on this domain: */ 743*f2cb1360SIngo Molnar sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 744*f2cb1360SIngo Molnar } 745*f2cb1360SIngo Molnar } 746*f2cb1360SIngo Molnar 747*f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map); 748*f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map); 749*f2cb1360SIngo Molnar 750*f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 751*f2cb1360SIngo Molnar const struct cpumask *cpu_map) 752*f2cb1360SIngo Molnar { 753*f2cb1360SIngo Molnar switch (what) { 754*f2cb1360SIngo Molnar case sa_rootdomain: 755*f2cb1360SIngo Molnar if (!atomic_read(&d->rd->refcount)) 756*f2cb1360SIngo Molnar free_rootdomain(&d->rd->rcu); 757*f2cb1360SIngo Molnar /* Fall through */ 758*f2cb1360SIngo Molnar case sa_sd: 759*f2cb1360SIngo Molnar free_percpu(d->sd); 760*f2cb1360SIngo Molnar /* Fall through */ 761*f2cb1360SIngo Molnar case sa_sd_storage: 762*f2cb1360SIngo Molnar __sdt_free(cpu_map); 763*f2cb1360SIngo Molnar /* Fall through */ 764*f2cb1360SIngo Molnar case sa_none: 765*f2cb1360SIngo Molnar break; 766*f2cb1360SIngo Molnar } 767*f2cb1360SIngo Molnar } 768*f2cb1360SIngo Molnar 769*f2cb1360SIngo Molnar static enum s_alloc 770*f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 771*f2cb1360SIngo Molnar { 772*f2cb1360SIngo Molnar memset(d, 0, sizeof(*d)); 773*f2cb1360SIngo Molnar 774*f2cb1360SIngo Molnar if (__sdt_alloc(cpu_map)) 775*f2cb1360SIngo Molnar return sa_sd_storage; 776*f2cb1360SIngo Molnar d->sd = alloc_percpu(struct sched_domain *); 777*f2cb1360SIngo Molnar if (!d->sd) 778*f2cb1360SIngo Molnar return sa_sd_storage; 779*f2cb1360SIngo Molnar d->rd = alloc_rootdomain(); 780*f2cb1360SIngo Molnar if (!d->rd) 781*f2cb1360SIngo Molnar return sa_sd; 782*f2cb1360SIngo Molnar return sa_rootdomain; 783*f2cb1360SIngo Molnar } 784*f2cb1360SIngo Molnar 785*f2cb1360SIngo Molnar /* 786*f2cb1360SIngo Molnar * NULL the sd_data elements we've used to build the sched_domain and 787*f2cb1360SIngo Molnar * sched_group structure so that the subsequent __free_domain_allocs() 788*f2cb1360SIngo Molnar * will not free the data we're using. 789*f2cb1360SIngo Molnar */ 790*f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd) 791*f2cb1360SIngo Molnar { 792*f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 793*f2cb1360SIngo Molnar 794*f2cb1360SIngo Molnar WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 795*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, cpu) = NULL; 796*f2cb1360SIngo Molnar 797*f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 798*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, cpu) = NULL; 799*f2cb1360SIngo Molnar 800*f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 801*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, cpu) = NULL; 802*f2cb1360SIngo Molnar 803*f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 804*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, cpu) = NULL; 805*f2cb1360SIngo Molnar } 806*f2cb1360SIngo Molnar 807*f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 808*f2cb1360SIngo Molnar static int sched_domains_numa_levels; 809*f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type; 810*f2cb1360SIngo Molnar static int *sched_domains_numa_distance; 811*f2cb1360SIngo Molnar int sched_max_numa_distance; 812*f2cb1360SIngo Molnar static struct cpumask ***sched_domains_numa_masks; 813*f2cb1360SIngo Molnar static int sched_domains_curr_level; 814*f2cb1360SIngo Molnar #endif 815*f2cb1360SIngo Molnar 816*f2cb1360SIngo Molnar /* 817*f2cb1360SIngo Molnar * SD_flags allowed in topology descriptions. 818*f2cb1360SIngo Molnar * 819*f2cb1360SIngo Molnar * These flags are purely descriptive of the topology and do not prescribe 820*f2cb1360SIngo Molnar * behaviour. Behaviour is artificial and mapped in the below sd_init() 821*f2cb1360SIngo Molnar * function: 822*f2cb1360SIngo Molnar * 823*f2cb1360SIngo Molnar * SD_SHARE_CPUCAPACITY - describes SMT topologies 824*f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCES - describes shared caches 825*f2cb1360SIngo Molnar * SD_NUMA - describes NUMA topologies 826*f2cb1360SIngo Molnar * SD_SHARE_POWERDOMAIN - describes shared power domain 827*f2cb1360SIngo Molnar * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies 828*f2cb1360SIngo Molnar * 829*f2cb1360SIngo Molnar * Odd one out, which beside describing the topology has a quirk also 830*f2cb1360SIngo Molnar * prescribes the desired behaviour that goes along with it: 831*f2cb1360SIngo Molnar * 832*f2cb1360SIngo Molnar * SD_ASYM_PACKING - describes SMT quirks 833*f2cb1360SIngo Molnar */ 834*f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS \ 835*f2cb1360SIngo Molnar (SD_SHARE_CPUCAPACITY | \ 836*f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | \ 837*f2cb1360SIngo Molnar SD_NUMA | \ 838*f2cb1360SIngo Molnar SD_ASYM_PACKING | \ 839*f2cb1360SIngo Molnar SD_ASYM_CPUCAPACITY | \ 840*f2cb1360SIngo Molnar SD_SHARE_POWERDOMAIN) 841*f2cb1360SIngo Molnar 842*f2cb1360SIngo Molnar static struct sched_domain * 843*f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl, 844*f2cb1360SIngo Molnar const struct cpumask *cpu_map, 845*f2cb1360SIngo Molnar struct sched_domain *child, int cpu) 846*f2cb1360SIngo Molnar { 847*f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 848*f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 849*f2cb1360SIngo Molnar int sd_id, sd_weight, sd_flags = 0; 850*f2cb1360SIngo Molnar 851*f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 852*f2cb1360SIngo Molnar /* 853*f2cb1360SIngo Molnar * Ugly hack to pass state to sd_numa_mask()... 854*f2cb1360SIngo Molnar */ 855*f2cb1360SIngo Molnar sched_domains_curr_level = tl->numa_level; 856*f2cb1360SIngo Molnar #endif 857*f2cb1360SIngo Molnar 858*f2cb1360SIngo Molnar sd_weight = cpumask_weight(tl->mask(cpu)); 859*f2cb1360SIngo Molnar 860*f2cb1360SIngo Molnar if (tl->sd_flags) 861*f2cb1360SIngo Molnar sd_flags = (*tl->sd_flags)(); 862*f2cb1360SIngo Molnar if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 863*f2cb1360SIngo Molnar "wrong sd_flags in topology description\n")) 864*f2cb1360SIngo Molnar sd_flags &= ~TOPOLOGY_SD_FLAGS; 865*f2cb1360SIngo Molnar 866*f2cb1360SIngo Molnar *sd = (struct sched_domain){ 867*f2cb1360SIngo Molnar .min_interval = sd_weight, 868*f2cb1360SIngo Molnar .max_interval = 2*sd_weight, 869*f2cb1360SIngo Molnar .busy_factor = 32, 870*f2cb1360SIngo Molnar .imbalance_pct = 125, 871*f2cb1360SIngo Molnar 872*f2cb1360SIngo Molnar .cache_nice_tries = 0, 873*f2cb1360SIngo Molnar .busy_idx = 0, 874*f2cb1360SIngo Molnar .idle_idx = 0, 875*f2cb1360SIngo Molnar .newidle_idx = 0, 876*f2cb1360SIngo Molnar .wake_idx = 0, 877*f2cb1360SIngo Molnar .forkexec_idx = 0, 878*f2cb1360SIngo Molnar 879*f2cb1360SIngo Molnar .flags = 1*SD_LOAD_BALANCE 880*f2cb1360SIngo Molnar | 1*SD_BALANCE_NEWIDLE 881*f2cb1360SIngo Molnar | 1*SD_BALANCE_EXEC 882*f2cb1360SIngo Molnar | 1*SD_BALANCE_FORK 883*f2cb1360SIngo Molnar | 0*SD_BALANCE_WAKE 884*f2cb1360SIngo Molnar | 1*SD_WAKE_AFFINE 885*f2cb1360SIngo Molnar | 0*SD_SHARE_CPUCAPACITY 886*f2cb1360SIngo Molnar | 0*SD_SHARE_PKG_RESOURCES 887*f2cb1360SIngo Molnar | 0*SD_SERIALIZE 888*f2cb1360SIngo Molnar | 0*SD_PREFER_SIBLING 889*f2cb1360SIngo Molnar | 0*SD_NUMA 890*f2cb1360SIngo Molnar | sd_flags 891*f2cb1360SIngo Molnar , 892*f2cb1360SIngo Molnar 893*f2cb1360SIngo Molnar .last_balance = jiffies, 894*f2cb1360SIngo Molnar .balance_interval = sd_weight, 895*f2cb1360SIngo Molnar .smt_gain = 0, 896*f2cb1360SIngo Molnar .max_newidle_lb_cost = 0, 897*f2cb1360SIngo Molnar .next_decay_max_lb_cost = jiffies, 898*f2cb1360SIngo Molnar .child = child, 899*f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 900*f2cb1360SIngo Molnar .name = tl->name, 901*f2cb1360SIngo Molnar #endif 902*f2cb1360SIngo Molnar }; 903*f2cb1360SIngo Molnar 904*f2cb1360SIngo Molnar cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 905*f2cb1360SIngo Molnar sd_id = cpumask_first(sched_domain_span(sd)); 906*f2cb1360SIngo Molnar 907*f2cb1360SIngo Molnar /* 908*f2cb1360SIngo Molnar * Convert topological properties into behaviour. 909*f2cb1360SIngo Molnar */ 910*f2cb1360SIngo Molnar 911*f2cb1360SIngo Molnar if (sd->flags & SD_ASYM_CPUCAPACITY) { 912*f2cb1360SIngo Molnar struct sched_domain *t = sd; 913*f2cb1360SIngo Molnar 914*f2cb1360SIngo Molnar for_each_lower_domain(t) 915*f2cb1360SIngo Molnar t->flags |= SD_BALANCE_WAKE; 916*f2cb1360SIngo Molnar } 917*f2cb1360SIngo Molnar 918*f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_CPUCAPACITY) { 919*f2cb1360SIngo Molnar sd->flags |= SD_PREFER_SIBLING; 920*f2cb1360SIngo Molnar sd->imbalance_pct = 110; 921*f2cb1360SIngo Molnar sd->smt_gain = 1178; /* ~15% */ 922*f2cb1360SIngo Molnar 923*f2cb1360SIngo Molnar } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 924*f2cb1360SIngo Molnar sd->imbalance_pct = 117; 925*f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 926*f2cb1360SIngo Molnar sd->busy_idx = 2; 927*f2cb1360SIngo Molnar 928*f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 929*f2cb1360SIngo Molnar } else if (sd->flags & SD_NUMA) { 930*f2cb1360SIngo Molnar sd->cache_nice_tries = 2; 931*f2cb1360SIngo Molnar sd->busy_idx = 3; 932*f2cb1360SIngo Molnar sd->idle_idx = 2; 933*f2cb1360SIngo Molnar 934*f2cb1360SIngo Molnar sd->flags |= SD_SERIALIZE; 935*f2cb1360SIngo Molnar if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { 936*f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_EXEC | 937*f2cb1360SIngo Molnar SD_BALANCE_FORK | 938*f2cb1360SIngo Molnar SD_WAKE_AFFINE); 939*f2cb1360SIngo Molnar } 940*f2cb1360SIngo Molnar 941*f2cb1360SIngo Molnar #endif 942*f2cb1360SIngo Molnar } else { 943*f2cb1360SIngo Molnar sd->flags |= SD_PREFER_SIBLING; 944*f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 945*f2cb1360SIngo Molnar sd->busy_idx = 2; 946*f2cb1360SIngo Molnar sd->idle_idx = 1; 947*f2cb1360SIngo Molnar } 948*f2cb1360SIngo Molnar 949*f2cb1360SIngo Molnar /* 950*f2cb1360SIngo Molnar * For all levels sharing cache; connect a sched_domain_shared 951*f2cb1360SIngo Molnar * instance. 952*f2cb1360SIngo Molnar */ 953*f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_PKG_RESOURCES) { 954*f2cb1360SIngo Molnar sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 955*f2cb1360SIngo Molnar atomic_inc(&sd->shared->ref); 956*f2cb1360SIngo Molnar atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 957*f2cb1360SIngo Molnar } 958*f2cb1360SIngo Molnar 959*f2cb1360SIngo Molnar sd->private = sdd; 960*f2cb1360SIngo Molnar 961*f2cb1360SIngo Molnar return sd; 962*f2cb1360SIngo Molnar } 963*f2cb1360SIngo Molnar 964*f2cb1360SIngo Molnar /* 965*f2cb1360SIngo Molnar * Topology list, bottom-up. 966*f2cb1360SIngo Molnar */ 967*f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = { 968*f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT 969*f2cb1360SIngo Molnar { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 970*f2cb1360SIngo Molnar #endif 971*f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC 972*f2cb1360SIngo Molnar { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 973*f2cb1360SIngo Molnar #endif 974*f2cb1360SIngo Molnar { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 975*f2cb1360SIngo Molnar { NULL, }, 976*f2cb1360SIngo Molnar }; 977*f2cb1360SIngo Molnar 978*f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology = 979*f2cb1360SIngo Molnar default_topology; 980*f2cb1360SIngo Molnar 981*f2cb1360SIngo Molnar #define for_each_sd_topology(tl) \ 982*f2cb1360SIngo Molnar for (tl = sched_domain_topology; tl->mask; tl++) 983*f2cb1360SIngo Molnar 984*f2cb1360SIngo Molnar void set_sched_topology(struct sched_domain_topology_level *tl) 985*f2cb1360SIngo Molnar { 986*f2cb1360SIngo Molnar if (WARN_ON_ONCE(sched_smp_initialized)) 987*f2cb1360SIngo Molnar return; 988*f2cb1360SIngo Molnar 989*f2cb1360SIngo Molnar sched_domain_topology = tl; 990*f2cb1360SIngo Molnar } 991*f2cb1360SIngo Molnar 992*f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 993*f2cb1360SIngo Molnar 994*f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu) 995*f2cb1360SIngo Molnar { 996*f2cb1360SIngo Molnar return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 997*f2cb1360SIngo Molnar } 998*f2cb1360SIngo Molnar 999*f2cb1360SIngo Molnar static void sched_numa_warn(const char *str) 1000*f2cb1360SIngo Molnar { 1001*f2cb1360SIngo Molnar static int done = false; 1002*f2cb1360SIngo Molnar int i,j; 1003*f2cb1360SIngo Molnar 1004*f2cb1360SIngo Molnar if (done) 1005*f2cb1360SIngo Molnar return; 1006*f2cb1360SIngo Molnar 1007*f2cb1360SIngo Molnar done = true; 1008*f2cb1360SIngo Molnar 1009*f2cb1360SIngo Molnar printk(KERN_WARNING "ERROR: %s\n\n", str); 1010*f2cb1360SIngo Molnar 1011*f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1012*f2cb1360SIngo Molnar printk(KERN_WARNING " "); 1013*f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) 1014*f2cb1360SIngo Molnar printk(KERN_CONT "%02d ", node_distance(i,j)); 1015*f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 1016*f2cb1360SIngo Molnar } 1017*f2cb1360SIngo Molnar printk(KERN_WARNING "\n"); 1018*f2cb1360SIngo Molnar } 1019*f2cb1360SIngo Molnar 1020*f2cb1360SIngo Molnar bool find_numa_distance(int distance) 1021*f2cb1360SIngo Molnar { 1022*f2cb1360SIngo Molnar int i; 1023*f2cb1360SIngo Molnar 1024*f2cb1360SIngo Molnar if (distance == node_distance(0, 0)) 1025*f2cb1360SIngo Molnar return true; 1026*f2cb1360SIngo Molnar 1027*f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1028*f2cb1360SIngo Molnar if (sched_domains_numa_distance[i] == distance) 1029*f2cb1360SIngo Molnar return true; 1030*f2cb1360SIngo Molnar } 1031*f2cb1360SIngo Molnar 1032*f2cb1360SIngo Molnar return false; 1033*f2cb1360SIngo Molnar } 1034*f2cb1360SIngo Molnar 1035*f2cb1360SIngo Molnar /* 1036*f2cb1360SIngo Molnar * A system can have three types of NUMA topology: 1037*f2cb1360SIngo Molnar * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1038*f2cb1360SIngo Molnar * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1039*f2cb1360SIngo Molnar * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1040*f2cb1360SIngo Molnar * 1041*f2cb1360SIngo Molnar * The difference between a glueless mesh topology and a backplane 1042*f2cb1360SIngo Molnar * topology lies in whether communication between not directly 1043*f2cb1360SIngo Molnar * connected nodes goes through intermediary nodes (where programs 1044*f2cb1360SIngo Molnar * could run), or through backplane controllers. This affects 1045*f2cb1360SIngo Molnar * placement of programs. 1046*f2cb1360SIngo Molnar * 1047*f2cb1360SIngo Molnar * The type of topology can be discerned with the following tests: 1048*f2cb1360SIngo Molnar * - If the maximum distance between any nodes is 1 hop, the system 1049*f2cb1360SIngo Molnar * is directly connected. 1050*f2cb1360SIngo Molnar * - If for two nodes A and B, located N > 1 hops away from each other, 1051*f2cb1360SIngo Molnar * there is an intermediary node C, which is < N hops away from both 1052*f2cb1360SIngo Molnar * nodes A and B, the system is a glueless mesh. 1053*f2cb1360SIngo Molnar */ 1054*f2cb1360SIngo Molnar static void init_numa_topology_type(void) 1055*f2cb1360SIngo Molnar { 1056*f2cb1360SIngo Molnar int a, b, c, n; 1057*f2cb1360SIngo Molnar 1058*f2cb1360SIngo Molnar n = sched_max_numa_distance; 1059*f2cb1360SIngo Molnar 1060*f2cb1360SIngo Molnar if (sched_domains_numa_levels <= 1) { 1061*f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_DIRECT; 1062*f2cb1360SIngo Molnar return; 1063*f2cb1360SIngo Molnar } 1064*f2cb1360SIngo Molnar 1065*f2cb1360SIngo Molnar for_each_online_node(a) { 1066*f2cb1360SIngo Molnar for_each_online_node(b) { 1067*f2cb1360SIngo Molnar /* Find two nodes furthest removed from each other. */ 1068*f2cb1360SIngo Molnar if (node_distance(a, b) < n) 1069*f2cb1360SIngo Molnar continue; 1070*f2cb1360SIngo Molnar 1071*f2cb1360SIngo Molnar /* Is there an intermediary node between a and b? */ 1072*f2cb1360SIngo Molnar for_each_online_node(c) { 1073*f2cb1360SIngo Molnar if (node_distance(a, c) < n && 1074*f2cb1360SIngo Molnar node_distance(b, c) < n) { 1075*f2cb1360SIngo Molnar sched_numa_topology_type = 1076*f2cb1360SIngo Molnar NUMA_GLUELESS_MESH; 1077*f2cb1360SIngo Molnar return; 1078*f2cb1360SIngo Molnar } 1079*f2cb1360SIngo Molnar } 1080*f2cb1360SIngo Molnar 1081*f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_BACKPLANE; 1082*f2cb1360SIngo Molnar return; 1083*f2cb1360SIngo Molnar } 1084*f2cb1360SIngo Molnar } 1085*f2cb1360SIngo Molnar } 1086*f2cb1360SIngo Molnar 1087*f2cb1360SIngo Molnar void sched_init_numa(void) 1088*f2cb1360SIngo Molnar { 1089*f2cb1360SIngo Molnar int next_distance, curr_distance = node_distance(0, 0); 1090*f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1091*f2cb1360SIngo Molnar int level = 0; 1092*f2cb1360SIngo Molnar int i, j, k; 1093*f2cb1360SIngo Molnar 1094*f2cb1360SIngo Molnar sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 1095*f2cb1360SIngo Molnar if (!sched_domains_numa_distance) 1096*f2cb1360SIngo Molnar return; 1097*f2cb1360SIngo Molnar 1098*f2cb1360SIngo Molnar /* 1099*f2cb1360SIngo Molnar * O(nr_nodes^2) deduplicating selection sort -- in order to find the 1100*f2cb1360SIngo Molnar * unique distances in the node_distance() table. 1101*f2cb1360SIngo Molnar * 1102*f2cb1360SIngo Molnar * Assumes node_distance(0,j) includes all distances in 1103*f2cb1360SIngo Molnar * node_distance(i,j) in order to avoid cubic time. 1104*f2cb1360SIngo Molnar */ 1105*f2cb1360SIngo Molnar next_distance = curr_distance; 1106*f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1107*f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1108*f2cb1360SIngo Molnar for (k = 0; k < nr_node_ids; k++) { 1109*f2cb1360SIngo Molnar int distance = node_distance(i, k); 1110*f2cb1360SIngo Molnar 1111*f2cb1360SIngo Molnar if (distance > curr_distance && 1112*f2cb1360SIngo Molnar (distance < next_distance || 1113*f2cb1360SIngo Molnar next_distance == curr_distance)) 1114*f2cb1360SIngo Molnar next_distance = distance; 1115*f2cb1360SIngo Molnar 1116*f2cb1360SIngo Molnar /* 1117*f2cb1360SIngo Molnar * While not a strong assumption it would be nice to know 1118*f2cb1360SIngo Molnar * about cases where if node A is connected to B, B is not 1119*f2cb1360SIngo Molnar * equally connected to A. 1120*f2cb1360SIngo Molnar */ 1121*f2cb1360SIngo Molnar if (sched_debug() && node_distance(k, i) != distance) 1122*f2cb1360SIngo Molnar sched_numa_warn("Node-distance not symmetric"); 1123*f2cb1360SIngo Molnar 1124*f2cb1360SIngo Molnar if (sched_debug() && i && !find_numa_distance(distance)) 1125*f2cb1360SIngo Molnar sched_numa_warn("Node-0 not representative"); 1126*f2cb1360SIngo Molnar } 1127*f2cb1360SIngo Molnar if (next_distance != curr_distance) { 1128*f2cb1360SIngo Molnar sched_domains_numa_distance[level++] = next_distance; 1129*f2cb1360SIngo Molnar sched_domains_numa_levels = level; 1130*f2cb1360SIngo Molnar curr_distance = next_distance; 1131*f2cb1360SIngo Molnar } else break; 1132*f2cb1360SIngo Molnar } 1133*f2cb1360SIngo Molnar 1134*f2cb1360SIngo Molnar /* 1135*f2cb1360SIngo Molnar * In case of sched_debug() we verify the above assumption. 1136*f2cb1360SIngo Molnar */ 1137*f2cb1360SIngo Molnar if (!sched_debug()) 1138*f2cb1360SIngo Molnar break; 1139*f2cb1360SIngo Molnar } 1140*f2cb1360SIngo Molnar 1141*f2cb1360SIngo Molnar if (!level) 1142*f2cb1360SIngo Molnar return; 1143*f2cb1360SIngo Molnar 1144*f2cb1360SIngo Molnar /* 1145*f2cb1360SIngo Molnar * 'level' contains the number of unique distances, excluding the 1146*f2cb1360SIngo Molnar * identity distance node_distance(i,i). 1147*f2cb1360SIngo Molnar * 1148*f2cb1360SIngo Molnar * The sched_domains_numa_distance[] array includes the actual distance 1149*f2cb1360SIngo Molnar * numbers. 1150*f2cb1360SIngo Molnar */ 1151*f2cb1360SIngo Molnar 1152*f2cb1360SIngo Molnar /* 1153*f2cb1360SIngo Molnar * Here, we should temporarily reset sched_domains_numa_levels to 0. 1154*f2cb1360SIngo Molnar * If it fails to allocate memory for array sched_domains_numa_masks[][], 1155*f2cb1360SIngo Molnar * the array will contain less then 'level' members. This could be 1156*f2cb1360SIngo Molnar * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1157*f2cb1360SIngo Molnar * in other functions. 1158*f2cb1360SIngo Molnar * 1159*f2cb1360SIngo Molnar * We reset it to 'level' at the end of this function. 1160*f2cb1360SIngo Molnar */ 1161*f2cb1360SIngo Molnar sched_domains_numa_levels = 0; 1162*f2cb1360SIngo Molnar 1163*f2cb1360SIngo Molnar sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); 1164*f2cb1360SIngo Molnar if (!sched_domains_numa_masks) 1165*f2cb1360SIngo Molnar return; 1166*f2cb1360SIngo Molnar 1167*f2cb1360SIngo Molnar /* 1168*f2cb1360SIngo Molnar * Now for each level, construct a mask per node which contains all 1169*f2cb1360SIngo Molnar * CPUs of nodes that are that many hops away from us. 1170*f2cb1360SIngo Molnar */ 1171*f2cb1360SIngo Molnar for (i = 0; i < level; i++) { 1172*f2cb1360SIngo Molnar sched_domains_numa_masks[i] = 1173*f2cb1360SIngo Molnar kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 1174*f2cb1360SIngo Molnar if (!sched_domains_numa_masks[i]) 1175*f2cb1360SIngo Molnar return; 1176*f2cb1360SIngo Molnar 1177*f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1178*f2cb1360SIngo Molnar struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1179*f2cb1360SIngo Molnar if (!mask) 1180*f2cb1360SIngo Molnar return; 1181*f2cb1360SIngo Molnar 1182*f2cb1360SIngo Molnar sched_domains_numa_masks[i][j] = mask; 1183*f2cb1360SIngo Molnar 1184*f2cb1360SIngo Molnar for_each_node(k) { 1185*f2cb1360SIngo Molnar if (node_distance(j, k) > sched_domains_numa_distance[i]) 1186*f2cb1360SIngo Molnar continue; 1187*f2cb1360SIngo Molnar 1188*f2cb1360SIngo Molnar cpumask_or(mask, mask, cpumask_of_node(k)); 1189*f2cb1360SIngo Molnar } 1190*f2cb1360SIngo Molnar } 1191*f2cb1360SIngo Molnar } 1192*f2cb1360SIngo Molnar 1193*f2cb1360SIngo Molnar /* Compute default topology size */ 1194*f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++); 1195*f2cb1360SIngo Molnar 1196*f2cb1360SIngo Molnar tl = kzalloc((i + level + 1) * 1197*f2cb1360SIngo Molnar sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1198*f2cb1360SIngo Molnar if (!tl) 1199*f2cb1360SIngo Molnar return; 1200*f2cb1360SIngo Molnar 1201*f2cb1360SIngo Molnar /* 1202*f2cb1360SIngo Molnar * Copy the default topology bits.. 1203*f2cb1360SIngo Molnar */ 1204*f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++) 1205*f2cb1360SIngo Molnar tl[i] = sched_domain_topology[i]; 1206*f2cb1360SIngo Molnar 1207*f2cb1360SIngo Molnar /* 1208*f2cb1360SIngo Molnar * .. and append 'j' levels of NUMA goodness. 1209*f2cb1360SIngo Molnar */ 1210*f2cb1360SIngo Molnar for (j = 0; j < level; i++, j++) { 1211*f2cb1360SIngo Molnar tl[i] = (struct sched_domain_topology_level){ 1212*f2cb1360SIngo Molnar .mask = sd_numa_mask, 1213*f2cb1360SIngo Molnar .sd_flags = cpu_numa_flags, 1214*f2cb1360SIngo Molnar .flags = SDTL_OVERLAP, 1215*f2cb1360SIngo Molnar .numa_level = j, 1216*f2cb1360SIngo Molnar SD_INIT_NAME(NUMA) 1217*f2cb1360SIngo Molnar }; 1218*f2cb1360SIngo Molnar } 1219*f2cb1360SIngo Molnar 1220*f2cb1360SIngo Molnar sched_domain_topology = tl; 1221*f2cb1360SIngo Molnar 1222*f2cb1360SIngo Molnar sched_domains_numa_levels = level; 1223*f2cb1360SIngo Molnar sched_max_numa_distance = sched_domains_numa_distance[level - 1]; 1224*f2cb1360SIngo Molnar 1225*f2cb1360SIngo Molnar init_numa_topology_type(); 1226*f2cb1360SIngo Molnar } 1227*f2cb1360SIngo Molnar 1228*f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu) 1229*f2cb1360SIngo Molnar { 1230*f2cb1360SIngo Molnar int node = cpu_to_node(cpu); 1231*f2cb1360SIngo Molnar int i, j; 1232*f2cb1360SIngo Molnar 1233*f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1234*f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 1235*f2cb1360SIngo Molnar if (node_distance(j, node) <= sched_domains_numa_distance[i]) 1236*f2cb1360SIngo Molnar cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 1237*f2cb1360SIngo Molnar } 1238*f2cb1360SIngo Molnar } 1239*f2cb1360SIngo Molnar } 1240*f2cb1360SIngo Molnar 1241*f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu) 1242*f2cb1360SIngo Molnar { 1243*f2cb1360SIngo Molnar int i, j; 1244*f2cb1360SIngo Molnar 1245*f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 1246*f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) 1247*f2cb1360SIngo Molnar cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 1248*f2cb1360SIngo Molnar } 1249*f2cb1360SIngo Molnar } 1250*f2cb1360SIngo Molnar 1251*f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */ 1252*f2cb1360SIngo Molnar 1253*f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map) 1254*f2cb1360SIngo Molnar { 1255*f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1256*f2cb1360SIngo Molnar int j; 1257*f2cb1360SIngo Molnar 1258*f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1259*f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1260*f2cb1360SIngo Molnar 1261*f2cb1360SIngo Molnar sdd->sd = alloc_percpu(struct sched_domain *); 1262*f2cb1360SIngo Molnar if (!sdd->sd) 1263*f2cb1360SIngo Molnar return -ENOMEM; 1264*f2cb1360SIngo Molnar 1265*f2cb1360SIngo Molnar sdd->sds = alloc_percpu(struct sched_domain_shared *); 1266*f2cb1360SIngo Molnar if (!sdd->sds) 1267*f2cb1360SIngo Molnar return -ENOMEM; 1268*f2cb1360SIngo Molnar 1269*f2cb1360SIngo Molnar sdd->sg = alloc_percpu(struct sched_group *); 1270*f2cb1360SIngo Molnar if (!sdd->sg) 1271*f2cb1360SIngo Molnar return -ENOMEM; 1272*f2cb1360SIngo Molnar 1273*f2cb1360SIngo Molnar sdd->sgc = alloc_percpu(struct sched_group_capacity *); 1274*f2cb1360SIngo Molnar if (!sdd->sgc) 1275*f2cb1360SIngo Molnar return -ENOMEM; 1276*f2cb1360SIngo Molnar 1277*f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 1278*f2cb1360SIngo Molnar struct sched_domain *sd; 1279*f2cb1360SIngo Molnar struct sched_domain_shared *sds; 1280*f2cb1360SIngo Molnar struct sched_group *sg; 1281*f2cb1360SIngo Molnar struct sched_group_capacity *sgc; 1282*f2cb1360SIngo Molnar 1283*f2cb1360SIngo Molnar sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 1284*f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1285*f2cb1360SIngo Molnar if (!sd) 1286*f2cb1360SIngo Molnar return -ENOMEM; 1287*f2cb1360SIngo Molnar 1288*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, j) = sd; 1289*f2cb1360SIngo Molnar 1290*f2cb1360SIngo Molnar sds = kzalloc_node(sizeof(struct sched_domain_shared), 1291*f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1292*f2cb1360SIngo Molnar if (!sds) 1293*f2cb1360SIngo Molnar return -ENOMEM; 1294*f2cb1360SIngo Molnar 1295*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, j) = sds; 1296*f2cb1360SIngo Molnar 1297*f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 1298*f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1299*f2cb1360SIngo Molnar if (!sg) 1300*f2cb1360SIngo Molnar return -ENOMEM; 1301*f2cb1360SIngo Molnar 1302*f2cb1360SIngo Molnar sg->next = sg; 1303*f2cb1360SIngo Molnar 1304*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, j) = sg; 1305*f2cb1360SIngo Molnar 1306*f2cb1360SIngo Molnar sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 1307*f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 1308*f2cb1360SIngo Molnar if (!sgc) 1309*f2cb1360SIngo Molnar return -ENOMEM; 1310*f2cb1360SIngo Molnar 1311*f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, j) = sgc; 1312*f2cb1360SIngo Molnar } 1313*f2cb1360SIngo Molnar } 1314*f2cb1360SIngo Molnar 1315*f2cb1360SIngo Molnar return 0; 1316*f2cb1360SIngo Molnar } 1317*f2cb1360SIngo Molnar 1318*f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map) 1319*f2cb1360SIngo Molnar { 1320*f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1321*f2cb1360SIngo Molnar int j; 1322*f2cb1360SIngo Molnar 1323*f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1324*f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1325*f2cb1360SIngo Molnar 1326*f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 1327*f2cb1360SIngo Molnar struct sched_domain *sd; 1328*f2cb1360SIngo Molnar 1329*f2cb1360SIngo Molnar if (sdd->sd) { 1330*f2cb1360SIngo Molnar sd = *per_cpu_ptr(sdd->sd, j); 1331*f2cb1360SIngo Molnar if (sd && (sd->flags & SD_OVERLAP)) 1332*f2cb1360SIngo Molnar free_sched_groups(sd->groups, 0); 1333*f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sd, j)); 1334*f2cb1360SIngo Molnar } 1335*f2cb1360SIngo Molnar 1336*f2cb1360SIngo Molnar if (sdd->sds) 1337*f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sds, j)); 1338*f2cb1360SIngo Molnar if (sdd->sg) 1339*f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sg, j)); 1340*f2cb1360SIngo Molnar if (sdd->sgc) 1341*f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sgc, j)); 1342*f2cb1360SIngo Molnar } 1343*f2cb1360SIngo Molnar free_percpu(sdd->sd); 1344*f2cb1360SIngo Molnar sdd->sd = NULL; 1345*f2cb1360SIngo Molnar free_percpu(sdd->sds); 1346*f2cb1360SIngo Molnar sdd->sds = NULL; 1347*f2cb1360SIngo Molnar free_percpu(sdd->sg); 1348*f2cb1360SIngo Molnar sdd->sg = NULL; 1349*f2cb1360SIngo Molnar free_percpu(sdd->sgc); 1350*f2cb1360SIngo Molnar sdd->sgc = NULL; 1351*f2cb1360SIngo Molnar } 1352*f2cb1360SIngo Molnar } 1353*f2cb1360SIngo Molnar 1354*f2cb1360SIngo Molnar struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 1355*f2cb1360SIngo Molnar const struct cpumask *cpu_map, struct sched_domain_attr *attr, 1356*f2cb1360SIngo Molnar struct sched_domain *child, int cpu) 1357*f2cb1360SIngo Molnar { 1358*f2cb1360SIngo Molnar struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 1359*f2cb1360SIngo Molnar 1360*f2cb1360SIngo Molnar if (child) { 1361*f2cb1360SIngo Molnar sd->level = child->level + 1; 1362*f2cb1360SIngo Molnar sched_domain_level_max = max(sched_domain_level_max, sd->level); 1363*f2cb1360SIngo Molnar child->parent = sd; 1364*f2cb1360SIngo Molnar 1365*f2cb1360SIngo Molnar if (!cpumask_subset(sched_domain_span(child), 1366*f2cb1360SIngo Molnar sched_domain_span(sd))) { 1367*f2cb1360SIngo Molnar pr_err("BUG: arch topology borken\n"); 1368*f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1369*f2cb1360SIngo Molnar pr_err(" the %s domain not a subset of the %s domain\n", 1370*f2cb1360SIngo Molnar child->name, sd->name); 1371*f2cb1360SIngo Molnar #endif 1372*f2cb1360SIngo Molnar /* Fixup, ensure @sd has at least @child cpus. */ 1373*f2cb1360SIngo Molnar cpumask_or(sched_domain_span(sd), 1374*f2cb1360SIngo Molnar sched_domain_span(sd), 1375*f2cb1360SIngo Molnar sched_domain_span(child)); 1376*f2cb1360SIngo Molnar } 1377*f2cb1360SIngo Molnar 1378*f2cb1360SIngo Molnar } 1379*f2cb1360SIngo Molnar set_domain_attribute(sd, attr); 1380*f2cb1360SIngo Molnar 1381*f2cb1360SIngo Molnar return sd; 1382*f2cb1360SIngo Molnar } 1383*f2cb1360SIngo Molnar 1384*f2cb1360SIngo Molnar /* 1385*f2cb1360SIngo Molnar * Build sched domains for a given set of CPUs and attach the sched domains 1386*f2cb1360SIngo Molnar * to the individual CPUs 1387*f2cb1360SIngo Molnar */ 1388*f2cb1360SIngo Molnar static int 1389*f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 1390*f2cb1360SIngo Molnar { 1391*f2cb1360SIngo Molnar enum s_alloc alloc_state; 1392*f2cb1360SIngo Molnar struct sched_domain *sd; 1393*f2cb1360SIngo Molnar struct s_data d; 1394*f2cb1360SIngo Molnar struct rq *rq = NULL; 1395*f2cb1360SIngo Molnar int i, ret = -ENOMEM; 1396*f2cb1360SIngo Molnar 1397*f2cb1360SIngo Molnar alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 1398*f2cb1360SIngo Molnar if (alloc_state != sa_rootdomain) 1399*f2cb1360SIngo Molnar goto error; 1400*f2cb1360SIngo Molnar 1401*f2cb1360SIngo Molnar /* Set up domains for CPUs specified by the cpu_map: */ 1402*f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1403*f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1404*f2cb1360SIngo Molnar 1405*f2cb1360SIngo Molnar sd = NULL; 1406*f2cb1360SIngo Molnar for_each_sd_topology(tl) { 1407*f2cb1360SIngo Molnar sd = build_sched_domain(tl, cpu_map, attr, sd, i); 1408*f2cb1360SIngo Molnar if (tl == sched_domain_topology) 1409*f2cb1360SIngo Molnar *per_cpu_ptr(d.sd, i) = sd; 1410*f2cb1360SIngo Molnar if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) 1411*f2cb1360SIngo Molnar sd->flags |= SD_OVERLAP; 1412*f2cb1360SIngo Molnar if (cpumask_equal(cpu_map, sched_domain_span(sd))) 1413*f2cb1360SIngo Molnar break; 1414*f2cb1360SIngo Molnar } 1415*f2cb1360SIngo Molnar } 1416*f2cb1360SIngo Molnar 1417*f2cb1360SIngo Molnar /* Build the groups for the domains */ 1418*f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1419*f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 1420*f2cb1360SIngo Molnar sd->span_weight = cpumask_weight(sched_domain_span(sd)); 1421*f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 1422*f2cb1360SIngo Molnar if (build_overlap_sched_groups(sd, i)) 1423*f2cb1360SIngo Molnar goto error; 1424*f2cb1360SIngo Molnar } else { 1425*f2cb1360SIngo Molnar if (build_sched_groups(sd, i)) 1426*f2cb1360SIngo Molnar goto error; 1427*f2cb1360SIngo Molnar } 1428*f2cb1360SIngo Molnar } 1429*f2cb1360SIngo Molnar } 1430*f2cb1360SIngo Molnar 1431*f2cb1360SIngo Molnar /* Calculate CPU capacity for physical packages and nodes */ 1432*f2cb1360SIngo Molnar for (i = nr_cpumask_bits-1; i >= 0; i--) { 1433*f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, cpu_map)) 1434*f2cb1360SIngo Molnar continue; 1435*f2cb1360SIngo Molnar 1436*f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 1437*f2cb1360SIngo Molnar claim_allocations(i, sd); 1438*f2cb1360SIngo Molnar init_sched_groups_capacity(i, sd); 1439*f2cb1360SIngo Molnar } 1440*f2cb1360SIngo Molnar } 1441*f2cb1360SIngo Molnar 1442*f2cb1360SIngo Molnar /* Attach the domains */ 1443*f2cb1360SIngo Molnar rcu_read_lock(); 1444*f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 1445*f2cb1360SIngo Molnar rq = cpu_rq(i); 1446*f2cb1360SIngo Molnar sd = *per_cpu_ptr(d.sd, i); 1447*f2cb1360SIngo Molnar 1448*f2cb1360SIngo Molnar /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ 1449*f2cb1360SIngo Molnar if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) 1450*f2cb1360SIngo Molnar WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); 1451*f2cb1360SIngo Molnar 1452*f2cb1360SIngo Molnar cpu_attach_domain(sd, d.rd, i); 1453*f2cb1360SIngo Molnar } 1454*f2cb1360SIngo Molnar rcu_read_unlock(); 1455*f2cb1360SIngo Molnar 1456*f2cb1360SIngo Molnar if (rq && sched_debug_enabled) { 1457*f2cb1360SIngo Molnar pr_info("span: %*pbl (max cpu_capacity = %lu)\n", 1458*f2cb1360SIngo Molnar cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 1459*f2cb1360SIngo Molnar } 1460*f2cb1360SIngo Molnar 1461*f2cb1360SIngo Molnar ret = 0; 1462*f2cb1360SIngo Molnar error: 1463*f2cb1360SIngo Molnar __free_domain_allocs(&d, alloc_state, cpu_map); 1464*f2cb1360SIngo Molnar return ret; 1465*f2cb1360SIngo Molnar } 1466*f2cb1360SIngo Molnar 1467*f2cb1360SIngo Molnar /* Current sched domains: */ 1468*f2cb1360SIngo Molnar static cpumask_var_t *doms_cur; 1469*f2cb1360SIngo Molnar 1470*f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */ 1471*f2cb1360SIngo Molnar static int ndoms_cur; 1472*f2cb1360SIngo Molnar 1473*f2cb1360SIngo Molnar /* Attribues of custom domains in 'doms_cur' */ 1474*f2cb1360SIngo Molnar static struct sched_domain_attr *dattr_cur; 1475*f2cb1360SIngo Molnar 1476*f2cb1360SIngo Molnar /* 1477*f2cb1360SIngo Molnar * Special case: If a kmalloc() of a doms_cur partition (array of 1478*f2cb1360SIngo Molnar * cpumask) fails, then fallback to a single sched domain, 1479*f2cb1360SIngo Molnar * as determined by the single cpumask fallback_doms. 1480*f2cb1360SIngo Molnar */ 1481*f2cb1360SIngo Molnar cpumask_var_t fallback_doms; 1482*f2cb1360SIngo Molnar 1483*f2cb1360SIngo Molnar /* 1484*f2cb1360SIngo Molnar * arch_update_cpu_topology lets virtualized architectures update the 1485*f2cb1360SIngo Molnar * CPU core maps. It is supposed to return 1 if the topology changed 1486*f2cb1360SIngo Molnar * or 0 if it stayed the same. 1487*f2cb1360SIngo Molnar */ 1488*f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void) 1489*f2cb1360SIngo Molnar { 1490*f2cb1360SIngo Molnar return 0; 1491*f2cb1360SIngo Molnar } 1492*f2cb1360SIngo Molnar 1493*f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 1494*f2cb1360SIngo Molnar { 1495*f2cb1360SIngo Molnar int i; 1496*f2cb1360SIngo Molnar cpumask_var_t *doms; 1497*f2cb1360SIngo Molnar 1498*f2cb1360SIngo Molnar doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); 1499*f2cb1360SIngo Molnar if (!doms) 1500*f2cb1360SIngo Molnar return NULL; 1501*f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) { 1502*f2cb1360SIngo Molnar if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 1503*f2cb1360SIngo Molnar free_sched_domains(doms, i); 1504*f2cb1360SIngo Molnar return NULL; 1505*f2cb1360SIngo Molnar } 1506*f2cb1360SIngo Molnar } 1507*f2cb1360SIngo Molnar return doms; 1508*f2cb1360SIngo Molnar } 1509*f2cb1360SIngo Molnar 1510*f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 1511*f2cb1360SIngo Molnar { 1512*f2cb1360SIngo Molnar unsigned int i; 1513*f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) 1514*f2cb1360SIngo Molnar free_cpumask_var(doms[i]); 1515*f2cb1360SIngo Molnar kfree(doms); 1516*f2cb1360SIngo Molnar } 1517*f2cb1360SIngo Molnar 1518*f2cb1360SIngo Molnar /* 1519*f2cb1360SIngo Molnar * Set up scheduler domains and groups. Callers must hold the hotplug lock. 1520*f2cb1360SIngo Molnar * For now this just excludes isolated CPUs, but could be used to 1521*f2cb1360SIngo Molnar * exclude other special cases in the future. 1522*f2cb1360SIngo Molnar */ 1523*f2cb1360SIngo Molnar int init_sched_domains(const struct cpumask *cpu_map) 1524*f2cb1360SIngo Molnar { 1525*f2cb1360SIngo Molnar int err; 1526*f2cb1360SIngo Molnar 1527*f2cb1360SIngo Molnar arch_update_cpu_topology(); 1528*f2cb1360SIngo Molnar ndoms_cur = 1; 1529*f2cb1360SIngo Molnar doms_cur = alloc_sched_domains(ndoms_cur); 1530*f2cb1360SIngo Molnar if (!doms_cur) 1531*f2cb1360SIngo Molnar doms_cur = &fallback_doms; 1532*f2cb1360SIngo Molnar cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 1533*f2cb1360SIngo Molnar err = build_sched_domains(doms_cur[0], NULL); 1534*f2cb1360SIngo Molnar register_sched_domain_sysctl(); 1535*f2cb1360SIngo Molnar 1536*f2cb1360SIngo Molnar return err; 1537*f2cb1360SIngo Molnar } 1538*f2cb1360SIngo Molnar 1539*f2cb1360SIngo Molnar /* 1540*f2cb1360SIngo Molnar * Detach sched domains from a group of CPUs specified in cpu_map 1541*f2cb1360SIngo Molnar * These CPUs will now be attached to the NULL domain 1542*f2cb1360SIngo Molnar */ 1543*f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map) 1544*f2cb1360SIngo Molnar { 1545*f2cb1360SIngo Molnar int i; 1546*f2cb1360SIngo Molnar 1547*f2cb1360SIngo Molnar rcu_read_lock(); 1548*f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) 1549*f2cb1360SIngo Molnar cpu_attach_domain(NULL, &def_root_domain, i); 1550*f2cb1360SIngo Molnar rcu_read_unlock(); 1551*f2cb1360SIngo Molnar } 1552*f2cb1360SIngo Molnar 1553*f2cb1360SIngo Molnar /* handle null as "default" */ 1554*f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 1555*f2cb1360SIngo Molnar struct sched_domain_attr *new, int idx_new) 1556*f2cb1360SIngo Molnar { 1557*f2cb1360SIngo Molnar struct sched_domain_attr tmp; 1558*f2cb1360SIngo Molnar 1559*f2cb1360SIngo Molnar /* Fast path: */ 1560*f2cb1360SIngo Molnar if (!new && !cur) 1561*f2cb1360SIngo Molnar return 1; 1562*f2cb1360SIngo Molnar 1563*f2cb1360SIngo Molnar tmp = SD_ATTR_INIT; 1564*f2cb1360SIngo Molnar return !memcmp(cur ? (cur + idx_cur) : &tmp, 1565*f2cb1360SIngo Molnar new ? (new + idx_new) : &tmp, 1566*f2cb1360SIngo Molnar sizeof(struct sched_domain_attr)); 1567*f2cb1360SIngo Molnar } 1568*f2cb1360SIngo Molnar 1569*f2cb1360SIngo Molnar /* 1570*f2cb1360SIngo Molnar * Partition sched domains as specified by the 'ndoms_new' 1571*f2cb1360SIngo Molnar * cpumasks in the array doms_new[] of cpumasks. This compares 1572*f2cb1360SIngo Molnar * doms_new[] to the current sched domain partitioning, doms_cur[]. 1573*f2cb1360SIngo Molnar * It destroys each deleted domain and builds each new domain. 1574*f2cb1360SIngo Molnar * 1575*f2cb1360SIngo Molnar * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 1576*f2cb1360SIngo Molnar * The masks don't intersect (don't overlap.) We should setup one 1577*f2cb1360SIngo Molnar * sched domain for each mask. CPUs not in any of the cpumasks will 1578*f2cb1360SIngo Molnar * not be load balanced. If the same cpumask appears both in the 1579*f2cb1360SIngo Molnar * current 'doms_cur' domains and in the new 'doms_new', we can leave 1580*f2cb1360SIngo Molnar * it as it is. 1581*f2cb1360SIngo Molnar * 1582*f2cb1360SIngo Molnar * The passed in 'doms_new' should be allocated using 1583*f2cb1360SIngo Molnar * alloc_sched_domains. This routine takes ownership of it and will 1584*f2cb1360SIngo Molnar * free_sched_domains it when done with it. If the caller failed the 1585*f2cb1360SIngo Molnar * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 1586*f2cb1360SIngo Molnar * and partition_sched_domains() will fallback to the single partition 1587*f2cb1360SIngo Molnar * 'fallback_doms', it also forces the domains to be rebuilt. 1588*f2cb1360SIngo Molnar * 1589*f2cb1360SIngo Molnar * If doms_new == NULL it will be replaced with cpu_online_mask. 1590*f2cb1360SIngo Molnar * ndoms_new == 0 is a special case for destroying existing domains, 1591*f2cb1360SIngo Molnar * and it will not create the default domain. 1592*f2cb1360SIngo Molnar * 1593*f2cb1360SIngo Molnar * Call with hotplug lock held 1594*f2cb1360SIngo Molnar */ 1595*f2cb1360SIngo Molnar void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1596*f2cb1360SIngo Molnar struct sched_domain_attr *dattr_new) 1597*f2cb1360SIngo Molnar { 1598*f2cb1360SIngo Molnar int i, j, n; 1599*f2cb1360SIngo Molnar int new_topology; 1600*f2cb1360SIngo Molnar 1601*f2cb1360SIngo Molnar mutex_lock(&sched_domains_mutex); 1602*f2cb1360SIngo Molnar 1603*f2cb1360SIngo Molnar /* Always unregister in case we don't destroy any domains: */ 1604*f2cb1360SIngo Molnar unregister_sched_domain_sysctl(); 1605*f2cb1360SIngo Molnar 1606*f2cb1360SIngo Molnar /* Let the architecture update CPU core mappings: */ 1607*f2cb1360SIngo Molnar new_topology = arch_update_cpu_topology(); 1608*f2cb1360SIngo Molnar 1609*f2cb1360SIngo Molnar n = doms_new ? ndoms_new : 0; 1610*f2cb1360SIngo Molnar 1611*f2cb1360SIngo Molnar /* Destroy deleted domains: */ 1612*f2cb1360SIngo Molnar for (i = 0; i < ndoms_cur; i++) { 1613*f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 1614*f2cb1360SIngo Molnar if (cpumask_equal(doms_cur[i], doms_new[j]) 1615*f2cb1360SIngo Molnar && dattrs_equal(dattr_cur, i, dattr_new, j)) 1616*f2cb1360SIngo Molnar goto match1; 1617*f2cb1360SIngo Molnar } 1618*f2cb1360SIngo Molnar /* No match - a current sched domain not in new doms_new[] */ 1619*f2cb1360SIngo Molnar detach_destroy_domains(doms_cur[i]); 1620*f2cb1360SIngo Molnar match1: 1621*f2cb1360SIngo Molnar ; 1622*f2cb1360SIngo Molnar } 1623*f2cb1360SIngo Molnar 1624*f2cb1360SIngo Molnar n = ndoms_cur; 1625*f2cb1360SIngo Molnar if (doms_new == NULL) { 1626*f2cb1360SIngo Molnar n = 0; 1627*f2cb1360SIngo Molnar doms_new = &fallback_doms; 1628*f2cb1360SIngo Molnar cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); 1629*f2cb1360SIngo Molnar WARN_ON_ONCE(dattr_new); 1630*f2cb1360SIngo Molnar } 1631*f2cb1360SIngo Molnar 1632*f2cb1360SIngo Molnar /* Build new domains: */ 1633*f2cb1360SIngo Molnar for (i = 0; i < ndoms_new; i++) { 1634*f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 1635*f2cb1360SIngo Molnar if (cpumask_equal(doms_new[i], doms_cur[j]) 1636*f2cb1360SIngo Molnar && dattrs_equal(dattr_new, i, dattr_cur, j)) 1637*f2cb1360SIngo Molnar goto match2; 1638*f2cb1360SIngo Molnar } 1639*f2cb1360SIngo Molnar /* No match - add a new doms_new */ 1640*f2cb1360SIngo Molnar build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 1641*f2cb1360SIngo Molnar match2: 1642*f2cb1360SIngo Molnar ; 1643*f2cb1360SIngo Molnar } 1644*f2cb1360SIngo Molnar 1645*f2cb1360SIngo Molnar /* Remember the new sched domains: */ 1646*f2cb1360SIngo Molnar if (doms_cur != &fallback_doms) 1647*f2cb1360SIngo Molnar free_sched_domains(doms_cur, ndoms_cur); 1648*f2cb1360SIngo Molnar 1649*f2cb1360SIngo Molnar kfree(dattr_cur); 1650*f2cb1360SIngo Molnar doms_cur = doms_new; 1651*f2cb1360SIngo Molnar dattr_cur = dattr_new; 1652*f2cb1360SIngo Molnar ndoms_cur = ndoms_new; 1653*f2cb1360SIngo Molnar 1654*f2cb1360SIngo Molnar register_sched_domain_sysctl(); 1655*f2cb1360SIngo Molnar 1656*f2cb1360SIngo Molnar mutex_unlock(&sched_domains_mutex); 1657*f2cb1360SIngo Molnar } 1658*f2cb1360SIngo Molnar 1659