1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2f2cb1360SIngo Molnar /* 3f2cb1360SIngo Molnar * Scheduler topology setup/handling methods 4f2cb1360SIngo Molnar */ 5f2cb1360SIngo Molnar 6f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex); 7f2cb1360SIngo Molnar 8f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */ 9ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask; 10ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask2; 11f2cb1360SIngo Molnar 12f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 13f2cb1360SIngo Molnar 14f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str) 15f2cb1360SIngo Molnar { 169406415fSPeter Zijlstra sched_debug_verbose = true; 17f2cb1360SIngo Molnar 18f2cb1360SIngo Molnar return 0; 19f2cb1360SIngo Molnar } 209406415fSPeter Zijlstra early_param("sched_verbose", sched_debug_setup); 21f2cb1360SIngo Molnar 22f2cb1360SIngo Molnar static inline bool sched_debug(void) 23f2cb1360SIngo Molnar { 249406415fSPeter Zijlstra return sched_debug_verbose; 25f2cb1360SIngo Molnar } 26f2cb1360SIngo Molnar 27848785dfSValentin Schneider #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name }, 28848785dfSValentin Schneider const struct sd_flag_debug sd_flag_debug[] = { 29848785dfSValentin Schneider #include <linux/sched/sd_flags.h> 30848785dfSValentin Schneider }; 31848785dfSValentin Schneider #undef SD_FLAG 32848785dfSValentin Schneider 33f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 34f2cb1360SIngo Molnar struct cpumask *groupmask) 35f2cb1360SIngo Molnar { 36f2cb1360SIngo Molnar struct sched_group *group = sd->groups; 3765c5e253SValentin Schneider unsigned long flags = sd->flags; 3865c5e253SValentin Schneider unsigned int idx; 39f2cb1360SIngo Molnar 40f2cb1360SIngo Molnar cpumask_clear(groupmask); 41f2cb1360SIngo Molnar 42005f874dSPeter Zijlstra printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 43005f874dSPeter Zijlstra printk(KERN_CONT "span=%*pbl level=%s\n", 44f2cb1360SIngo Molnar cpumask_pr_args(sched_domain_span(sd)), sd->name); 45f2cb1360SIngo Molnar 46f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 4797fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 48f2cb1360SIngo Molnar } 496cd0c583SYi Wang if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 5097fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 51f2cb1360SIngo Molnar } 52f2cb1360SIngo Molnar 5365c5e253SValentin Schneider for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 5465c5e253SValentin Schneider unsigned int flag = BIT(idx); 5565c5e253SValentin Schneider unsigned int meta_flags = sd_flag_debug[idx].meta_flags; 5665c5e253SValentin Schneider 5765c5e253SValentin Schneider if ((meta_flags & SDF_SHARED_CHILD) && sd->child && 5865c5e253SValentin Schneider !(sd->child->flags & flag)) 5965c5e253SValentin Schneider printk(KERN_ERR "ERROR: flag %s set here but not in child\n", 6065c5e253SValentin Schneider sd_flag_debug[idx].name); 6165c5e253SValentin Schneider 6265c5e253SValentin Schneider if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && 6365c5e253SValentin Schneider !(sd->parent->flags & flag)) 6465c5e253SValentin Schneider printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", 6565c5e253SValentin Schneider sd_flag_debug[idx].name); 6665c5e253SValentin Schneider } 6765c5e253SValentin Schneider 68f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s groups:", level + 1, ""); 69f2cb1360SIngo Molnar do { 70f2cb1360SIngo Molnar if (!group) { 71f2cb1360SIngo Molnar printk("\n"); 72f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: group is NULL\n"); 73f2cb1360SIngo Molnar break; 74f2cb1360SIngo Molnar } 75f2cb1360SIngo Molnar 761087ad4eSYury Norov if (cpumask_empty(sched_group_span(group))) { 77f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 78f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: empty group\n"); 79f2cb1360SIngo Molnar break; 80f2cb1360SIngo Molnar } 81f2cb1360SIngo Molnar 82f2cb1360SIngo Molnar if (!(sd->flags & SD_OVERLAP) && 83ae4df9d6SPeter Zijlstra cpumask_intersects(groupmask, sched_group_span(group))) { 84f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 85f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: repeated CPUs\n"); 86f2cb1360SIngo Molnar break; 87f2cb1360SIngo Molnar } 88f2cb1360SIngo Molnar 89ae4df9d6SPeter Zijlstra cpumask_or(groupmask, groupmask, sched_group_span(group)); 90f2cb1360SIngo Molnar 91005f874dSPeter Zijlstra printk(KERN_CONT " %d:{ span=%*pbl", 92005f874dSPeter Zijlstra group->sgc->id, 93ae4df9d6SPeter Zijlstra cpumask_pr_args(sched_group_span(group))); 94b0151c25SPeter Zijlstra 95af218122SPeter Zijlstra if ((sd->flags & SD_OVERLAP) && 96ae4df9d6SPeter Zijlstra !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 97005f874dSPeter Zijlstra printk(KERN_CONT " mask=%*pbl", 98e5c14b1fSPeter Zijlstra cpumask_pr_args(group_balance_mask(group))); 99b0151c25SPeter Zijlstra } 100b0151c25SPeter Zijlstra 101005f874dSPeter Zijlstra if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 102005f874dSPeter Zijlstra printk(KERN_CONT " cap=%lu", group->sgc->capacity); 103f2cb1360SIngo Molnar 104a420b063SPeter Zijlstra if (group == sd->groups && sd->child && 105a420b063SPeter Zijlstra !cpumask_equal(sched_domain_span(sd->child), 106ae4df9d6SPeter Zijlstra sched_group_span(group))) { 107a420b063SPeter Zijlstra printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 108a420b063SPeter Zijlstra } 109a420b063SPeter Zijlstra 110005f874dSPeter Zijlstra printk(KERN_CONT " }"); 111005f874dSPeter Zijlstra 112f2cb1360SIngo Molnar group = group->next; 113b0151c25SPeter Zijlstra 114b0151c25SPeter Zijlstra if (group != sd->groups) 115b0151c25SPeter Zijlstra printk(KERN_CONT ","); 116b0151c25SPeter Zijlstra 117f2cb1360SIngo Molnar } while (group != sd->groups); 118f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 119f2cb1360SIngo Molnar 120f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), groupmask)) 121f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 122f2cb1360SIngo Molnar 123f2cb1360SIngo Molnar if (sd->parent && 124f2cb1360SIngo Molnar !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 12597fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 126f2cb1360SIngo Molnar return 0; 127f2cb1360SIngo Molnar } 128f2cb1360SIngo Molnar 129f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu) 130f2cb1360SIngo Molnar { 131f2cb1360SIngo Molnar int level = 0; 132f2cb1360SIngo Molnar 1339406415fSPeter Zijlstra if (!sched_debug_verbose) 134f2cb1360SIngo Molnar return; 135f2cb1360SIngo Molnar 136f2cb1360SIngo Molnar if (!sd) { 137f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 138f2cb1360SIngo Molnar return; 139f2cb1360SIngo Molnar } 140f2cb1360SIngo Molnar 141005f874dSPeter Zijlstra printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 142f2cb1360SIngo Molnar 143f2cb1360SIngo Molnar for (;;) { 144f2cb1360SIngo Molnar if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 145f2cb1360SIngo Molnar break; 146f2cb1360SIngo Molnar level++; 147f2cb1360SIngo Molnar sd = sd->parent; 148f2cb1360SIngo Molnar if (!sd) 149f2cb1360SIngo Molnar break; 150f2cb1360SIngo Molnar } 151f2cb1360SIngo Molnar } 152f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */ 153f2cb1360SIngo Molnar 1549406415fSPeter Zijlstra # define sched_debug_verbose 0 155f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0) 156f2cb1360SIngo Molnar static inline bool sched_debug(void) 157f2cb1360SIngo Molnar { 158f2cb1360SIngo Molnar return false; 159f2cb1360SIngo Molnar } 160f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */ 161f2cb1360SIngo Molnar 1624fc472f1SValentin Schneider /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */ 1634fc472f1SValentin Schneider #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) | 1644fc472f1SValentin Schneider static const unsigned int SD_DEGENERATE_GROUPS_MASK = 1654fc472f1SValentin Schneider #include <linux/sched/sd_flags.h> 1664fc472f1SValentin Schneider 0; 1674fc472f1SValentin Schneider #undef SD_FLAG 1684fc472f1SValentin Schneider 169f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd) 170f2cb1360SIngo Molnar { 171f2cb1360SIngo Molnar if (cpumask_weight(sched_domain_span(sd)) == 1) 172f2cb1360SIngo Molnar return 1; 173f2cb1360SIngo Molnar 174f2cb1360SIngo Molnar /* Following flags need at least 2 groups */ 1756f349818SValentin Schneider if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && 1766f349818SValentin Schneider (sd->groups != sd->groups->next)) 177f2cb1360SIngo Molnar return 0; 178f2cb1360SIngo Molnar 179f2cb1360SIngo Molnar /* Following flags don't use groups */ 180f2cb1360SIngo Molnar if (sd->flags & (SD_WAKE_AFFINE)) 181f2cb1360SIngo Molnar return 0; 182f2cb1360SIngo Molnar 183f2cb1360SIngo Molnar return 1; 184f2cb1360SIngo Molnar } 185f2cb1360SIngo Molnar 186f2cb1360SIngo Molnar static int 187f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 188f2cb1360SIngo Molnar { 189f2cb1360SIngo Molnar unsigned long cflags = sd->flags, pflags = parent->flags; 190f2cb1360SIngo Molnar 191f2cb1360SIngo Molnar if (sd_degenerate(parent)) 192f2cb1360SIngo Molnar return 1; 193f2cb1360SIngo Molnar 194f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 195f2cb1360SIngo Molnar return 0; 196f2cb1360SIngo Molnar 197f2cb1360SIngo Molnar /* Flags needing groups don't count if only 1 group in parent */ 198ab65afb0SValentin Schneider if (parent->groups == parent->groups->next) 1993a6712c7SValentin Schneider pflags &= ~SD_DEGENERATE_GROUPS_MASK; 200ab65afb0SValentin Schneider 201f2cb1360SIngo Molnar if (~cflags & pflags) 202f2cb1360SIngo Molnar return 0; 203f2cb1360SIngo Molnar 204f2cb1360SIngo Molnar return 1; 205f2cb1360SIngo Molnar } 206f2cb1360SIngo Molnar 207531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 208f8a696f2SPeter Zijlstra DEFINE_STATIC_KEY_FALSE(sched_energy_present); 209*8a044141SZhen Ni static unsigned int sysctl_sched_energy_aware = 1; 210531b5c9fSQuentin Perret DEFINE_MUTEX(sched_energy_mutex); 211531b5c9fSQuentin Perret bool sched_energy_update; 212531b5c9fSQuentin Perret 21331f6a8c0SIonela Voinescu void rebuild_sched_domains_energy(void) 21431f6a8c0SIonela Voinescu { 21531f6a8c0SIonela Voinescu mutex_lock(&sched_energy_mutex); 21631f6a8c0SIonela Voinescu sched_energy_update = true; 21731f6a8c0SIonela Voinescu rebuild_sched_domains(); 21831f6a8c0SIonela Voinescu sched_energy_update = false; 21931f6a8c0SIonela Voinescu mutex_unlock(&sched_energy_mutex); 22031f6a8c0SIonela Voinescu } 22131f6a8c0SIonela Voinescu 2228d5d0cfbSQuentin Perret #ifdef CONFIG_PROC_SYSCTL 223*8a044141SZhen Ni static int sched_energy_aware_handler(struct ctl_table *table, int write, 22432927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos) 2258d5d0cfbSQuentin Perret { 2268d5d0cfbSQuentin Perret int ret, state; 2278d5d0cfbSQuentin Perret 2288d5d0cfbSQuentin Perret if (write && !capable(CAP_SYS_ADMIN)) 2298d5d0cfbSQuentin Perret return -EPERM; 2308d5d0cfbSQuentin Perret 2318d5d0cfbSQuentin Perret ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2328d5d0cfbSQuentin Perret if (!ret && write) { 2338d5d0cfbSQuentin Perret state = static_branch_unlikely(&sched_energy_present); 23431f6a8c0SIonela Voinescu if (state != sysctl_sched_energy_aware) 23531f6a8c0SIonela Voinescu rebuild_sched_domains_energy(); 2368d5d0cfbSQuentin Perret } 2378d5d0cfbSQuentin Perret 2388d5d0cfbSQuentin Perret return ret; 2398d5d0cfbSQuentin Perret } 240*8a044141SZhen Ni 241*8a044141SZhen Ni static struct ctl_table sched_energy_aware_sysctls[] = { 242*8a044141SZhen Ni { 243*8a044141SZhen Ni .procname = "sched_energy_aware", 244*8a044141SZhen Ni .data = &sysctl_sched_energy_aware, 245*8a044141SZhen Ni .maxlen = sizeof(unsigned int), 246*8a044141SZhen Ni .mode = 0644, 247*8a044141SZhen Ni .proc_handler = sched_energy_aware_handler, 248*8a044141SZhen Ni .extra1 = SYSCTL_ZERO, 249*8a044141SZhen Ni .extra2 = SYSCTL_ONE, 250*8a044141SZhen Ni }, 251*8a044141SZhen Ni {} 252*8a044141SZhen Ni }; 253*8a044141SZhen Ni 254*8a044141SZhen Ni static int __init sched_energy_aware_sysctl_init(void) 255*8a044141SZhen Ni { 256*8a044141SZhen Ni register_sysctl_init("kernel", sched_energy_aware_sysctls); 257*8a044141SZhen Ni return 0; 258*8a044141SZhen Ni } 259*8a044141SZhen Ni 260*8a044141SZhen Ni late_initcall(sched_energy_aware_sysctl_init); 2618d5d0cfbSQuentin Perret #endif 2628d5d0cfbSQuentin Perret 2636aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) 2646aa140faSQuentin Perret { 2656aa140faSQuentin Perret struct perf_domain *tmp; 2666aa140faSQuentin Perret 2676aa140faSQuentin Perret while (pd) { 2686aa140faSQuentin Perret tmp = pd->next; 2696aa140faSQuentin Perret kfree(pd); 2706aa140faSQuentin Perret pd = tmp; 2716aa140faSQuentin Perret } 2726aa140faSQuentin Perret } 2736aa140faSQuentin Perret 2746aa140faSQuentin Perret static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 2756aa140faSQuentin Perret { 2766aa140faSQuentin Perret while (pd) { 2776aa140faSQuentin Perret if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 2786aa140faSQuentin Perret return pd; 2796aa140faSQuentin Perret pd = pd->next; 2806aa140faSQuentin Perret } 2816aa140faSQuentin Perret 2826aa140faSQuentin Perret return NULL; 2836aa140faSQuentin Perret } 2846aa140faSQuentin Perret 2856aa140faSQuentin Perret static struct perf_domain *pd_init(int cpu) 2866aa140faSQuentin Perret { 2876aa140faSQuentin Perret struct em_perf_domain *obj = em_cpu_get(cpu); 2886aa140faSQuentin Perret struct perf_domain *pd; 2896aa140faSQuentin Perret 2906aa140faSQuentin Perret if (!obj) { 2916aa140faSQuentin Perret if (sched_debug()) 2926aa140faSQuentin Perret pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 2936aa140faSQuentin Perret return NULL; 2946aa140faSQuentin Perret } 2956aa140faSQuentin Perret 2966aa140faSQuentin Perret pd = kzalloc(sizeof(*pd), GFP_KERNEL); 2976aa140faSQuentin Perret if (!pd) 2986aa140faSQuentin Perret return NULL; 2996aa140faSQuentin Perret pd->em_pd = obj; 3006aa140faSQuentin Perret 3016aa140faSQuentin Perret return pd; 3026aa140faSQuentin Perret } 3036aa140faSQuentin Perret 3046aa140faSQuentin Perret static void perf_domain_debug(const struct cpumask *cpu_map, 3056aa140faSQuentin Perret struct perf_domain *pd) 3066aa140faSQuentin Perret { 3076aa140faSQuentin Perret if (!sched_debug() || !pd) 3086aa140faSQuentin Perret return; 3096aa140faSQuentin Perret 3106aa140faSQuentin Perret printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 3116aa140faSQuentin Perret 3126aa140faSQuentin Perret while (pd) { 313521b512bSLukasz Luba printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", 3146aa140faSQuentin Perret cpumask_first(perf_domain_span(pd)), 3156aa140faSQuentin Perret cpumask_pr_args(perf_domain_span(pd)), 316521b512bSLukasz Luba em_pd_nr_perf_states(pd->em_pd)); 3176aa140faSQuentin Perret pd = pd->next; 3186aa140faSQuentin Perret } 3196aa140faSQuentin Perret 3206aa140faSQuentin Perret printk(KERN_CONT "\n"); 3216aa140faSQuentin Perret } 3226aa140faSQuentin Perret 3236aa140faSQuentin Perret static void destroy_perf_domain_rcu(struct rcu_head *rp) 3246aa140faSQuentin Perret { 3256aa140faSQuentin Perret struct perf_domain *pd; 3266aa140faSQuentin Perret 3276aa140faSQuentin Perret pd = container_of(rp, struct perf_domain, rcu); 3286aa140faSQuentin Perret free_pd(pd); 3296aa140faSQuentin Perret } 3306aa140faSQuentin Perret 3311f74de87SQuentin Perret static void sched_energy_set(bool has_eas) 3321f74de87SQuentin Perret { 3331f74de87SQuentin Perret if (!has_eas && static_branch_unlikely(&sched_energy_present)) { 3341f74de87SQuentin Perret if (sched_debug()) 3351f74de87SQuentin Perret pr_info("%s: stopping EAS\n", __func__); 3361f74de87SQuentin Perret static_branch_disable_cpuslocked(&sched_energy_present); 3371f74de87SQuentin Perret } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) { 3381f74de87SQuentin Perret if (sched_debug()) 3391f74de87SQuentin Perret pr_info("%s: starting EAS\n", __func__); 3401f74de87SQuentin Perret static_branch_enable_cpuslocked(&sched_energy_present); 3411f74de87SQuentin Perret } 3421f74de87SQuentin Perret } 3431f74de87SQuentin Perret 344b68a4c0dSQuentin Perret /* 345b68a4c0dSQuentin Perret * EAS can be used on a root domain if it meets all the following conditions: 346b68a4c0dSQuentin Perret * 1. an Energy Model (EM) is available; 347b68a4c0dSQuentin Perret * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 34838502ab4SValentin Schneider * 3. no SMT is detected. 34938502ab4SValentin Schneider * 4. the EM complexity is low enough to keep scheduling overheads low; 35038502ab4SValentin Schneider * 5. schedutil is driving the frequency of all CPUs of the rd; 351fa50e2b4SIonela Voinescu * 6. frequency invariance support is present; 352b68a4c0dSQuentin Perret * 353b68a4c0dSQuentin Perret * The complexity of the Energy Model is defined as: 354b68a4c0dSQuentin Perret * 355521b512bSLukasz Luba * C = nr_pd * (nr_cpus + nr_ps) 356b68a4c0dSQuentin Perret * 357b68a4c0dSQuentin Perret * with parameters defined as: 358b68a4c0dSQuentin Perret * - nr_pd: the number of performance domains 359b68a4c0dSQuentin Perret * - nr_cpus: the number of CPUs 360521b512bSLukasz Luba * - nr_ps: the sum of the number of performance states of all performance 361b68a4c0dSQuentin Perret * domains (for example, on a system with 2 performance domains, 362521b512bSLukasz Luba * with 10 performance states each, nr_ps = 2 * 10 = 20). 363b68a4c0dSQuentin Perret * 364b68a4c0dSQuentin Perret * It is generally not a good idea to use such a model in the wake-up path on 365b68a4c0dSQuentin Perret * very complex platforms because of the associated scheduling overheads. The 366b68a4c0dSQuentin Perret * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs 367521b512bSLukasz Luba * with per-CPU DVFS and less than 8 performance states each, for example. 368b68a4c0dSQuentin Perret */ 369b68a4c0dSQuentin Perret #define EM_MAX_COMPLEXITY 2048 370b68a4c0dSQuentin Perret 371531b5c9fSQuentin Perret extern struct cpufreq_governor schedutil_gov; 3721f74de87SQuentin Perret static bool build_perf_domains(const struct cpumask *cpu_map) 3736aa140faSQuentin Perret { 374521b512bSLukasz Luba int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); 3756aa140faSQuentin Perret struct perf_domain *pd = NULL, *tmp; 3766aa140faSQuentin Perret int cpu = cpumask_first(cpu_map); 3776aa140faSQuentin Perret struct root_domain *rd = cpu_rq(cpu)->rd; 378531b5c9fSQuentin Perret struct cpufreq_policy *policy; 379531b5c9fSQuentin Perret struct cpufreq_governor *gov; 380b68a4c0dSQuentin Perret 3818d5d0cfbSQuentin Perret if (!sysctl_sched_energy_aware) 3828d5d0cfbSQuentin Perret goto free; 3838d5d0cfbSQuentin Perret 384b68a4c0dSQuentin Perret /* EAS is enabled for asymmetric CPU capacity topologies. */ 385b68a4c0dSQuentin Perret if (!per_cpu(sd_asym_cpucapacity, cpu)) { 386b68a4c0dSQuentin Perret if (sched_debug()) { 387b68a4c0dSQuentin Perret pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n", 388b68a4c0dSQuentin Perret cpumask_pr_args(cpu_map)); 389b68a4c0dSQuentin Perret } 390b68a4c0dSQuentin Perret goto free; 391b68a4c0dSQuentin Perret } 3926aa140faSQuentin Perret 39338502ab4SValentin Schneider /* EAS definitely does *not* handle SMT */ 39438502ab4SValentin Schneider if (sched_smt_active()) { 39538502ab4SValentin Schneider pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n", 39638502ab4SValentin Schneider cpumask_pr_args(cpu_map)); 39738502ab4SValentin Schneider goto free; 39838502ab4SValentin Schneider } 39938502ab4SValentin Schneider 400fa50e2b4SIonela Voinescu if (!arch_scale_freq_invariant()) { 401fa50e2b4SIonela Voinescu if (sched_debug()) { 402fa50e2b4SIonela Voinescu pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported", 403fa50e2b4SIonela Voinescu cpumask_pr_args(cpu_map)); 404fa50e2b4SIonela Voinescu } 405fa50e2b4SIonela Voinescu goto free; 406fa50e2b4SIonela Voinescu } 407fa50e2b4SIonela Voinescu 4086aa140faSQuentin Perret for_each_cpu(i, cpu_map) { 4096aa140faSQuentin Perret /* Skip already covered CPUs. */ 4106aa140faSQuentin Perret if (find_pd(pd, i)) 4116aa140faSQuentin Perret continue; 4126aa140faSQuentin Perret 413531b5c9fSQuentin Perret /* Do not attempt EAS if schedutil is not being used. */ 414531b5c9fSQuentin Perret policy = cpufreq_cpu_get(i); 415531b5c9fSQuentin Perret if (!policy) 416531b5c9fSQuentin Perret goto free; 417531b5c9fSQuentin Perret gov = policy->governor; 418531b5c9fSQuentin Perret cpufreq_cpu_put(policy); 419531b5c9fSQuentin Perret if (gov != &schedutil_gov) { 420531b5c9fSQuentin Perret if (rd->pd) 421531b5c9fSQuentin Perret pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n", 422531b5c9fSQuentin Perret cpumask_pr_args(cpu_map)); 423531b5c9fSQuentin Perret goto free; 424531b5c9fSQuentin Perret } 425531b5c9fSQuentin Perret 4266aa140faSQuentin Perret /* Create the new pd and add it to the local list. */ 4276aa140faSQuentin Perret tmp = pd_init(i); 4286aa140faSQuentin Perret if (!tmp) 4296aa140faSQuentin Perret goto free; 4306aa140faSQuentin Perret tmp->next = pd; 4316aa140faSQuentin Perret pd = tmp; 432b68a4c0dSQuentin Perret 433b68a4c0dSQuentin Perret /* 434521b512bSLukasz Luba * Count performance domains and performance states for the 435b68a4c0dSQuentin Perret * complexity check. 436b68a4c0dSQuentin Perret */ 437b68a4c0dSQuentin Perret nr_pd++; 438521b512bSLukasz Luba nr_ps += em_pd_nr_perf_states(pd->em_pd); 439b68a4c0dSQuentin Perret } 440b68a4c0dSQuentin Perret 441b68a4c0dSQuentin Perret /* Bail out if the Energy Model complexity is too high. */ 442521b512bSLukasz Luba if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) { 443b68a4c0dSQuentin Perret WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", 444b68a4c0dSQuentin Perret cpumask_pr_args(cpu_map)); 445b68a4c0dSQuentin Perret goto free; 4466aa140faSQuentin Perret } 4476aa140faSQuentin Perret 4486aa140faSQuentin Perret perf_domain_debug(cpu_map, pd); 4496aa140faSQuentin Perret 4506aa140faSQuentin Perret /* Attach the new list of performance domains to the root domain. */ 4516aa140faSQuentin Perret tmp = rd->pd; 4526aa140faSQuentin Perret rcu_assign_pointer(rd->pd, pd); 4536aa140faSQuentin Perret if (tmp) 4546aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 4556aa140faSQuentin Perret 4561f74de87SQuentin Perret return !!pd; 4576aa140faSQuentin Perret 4586aa140faSQuentin Perret free: 4596aa140faSQuentin Perret free_pd(pd); 4606aa140faSQuentin Perret tmp = rd->pd; 4616aa140faSQuentin Perret rcu_assign_pointer(rd->pd, NULL); 4626aa140faSQuentin Perret if (tmp) 4636aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 4641f74de87SQuentin Perret 4651f74de87SQuentin Perret return false; 4666aa140faSQuentin Perret } 4676aa140faSQuentin Perret #else 4686aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) { } 469531b5c9fSQuentin Perret #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/ 4706aa140faSQuentin Perret 471f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu) 472f2cb1360SIngo Molnar { 473f2cb1360SIngo Molnar struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 474f2cb1360SIngo Molnar 475f2cb1360SIngo Molnar cpupri_cleanup(&rd->cpupri); 476f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 477f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 478f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 479f2cb1360SIngo Molnar free_cpumask_var(rd->online); 480f2cb1360SIngo Molnar free_cpumask_var(rd->span); 4816aa140faSQuentin Perret free_pd(rd->pd); 482f2cb1360SIngo Molnar kfree(rd); 483f2cb1360SIngo Molnar } 484f2cb1360SIngo Molnar 485f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd) 486f2cb1360SIngo Molnar { 487f2cb1360SIngo Molnar struct root_domain *old_rd = NULL; 488f2cb1360SIngo Molnar unsigned long flags; 489f2cb1360SIngo Molnar 4905cb9eaa3SPeter Zijlstra raw_spin_rq_lock_irqsave(rq, flags); 491f2cb1360SIngo Molnar 492f2cb1360SIngo Molnar if (rq->rd) { 493f2cb1360SIngo Molnar old_rd = rq->rd; 494f2cb1360SIngo Molnar 495f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, old_rd->online)) 496f2cb1360SIngo Molnar set_rq_offline(rq); 497f2cb1360SIngo Molnar 498f2cb1360SIngo Molnar cpumask_clear_cpu(rq->cpu, old_rd->span); 499f2cb1360SIngo Molnar 500f2cb1360SIngo Molnar /* 501f2cb1360SIngo Molnar * If we dont want to free the old_rd yet then 502f2cb1360SIngo Molnar * set old_rd to NULL to skip the freeing later 503f2cb1360SIngo Molnar * in this function: 504f2cb1360SIngo Molnar */ 505f2cb1360SIngo Molnar if (!atomic_dec_and_test(&old_rd->refcount)) 506f2cb1360SIngo Molnar old_rd = NULL; 507f2cb1360SIngo Molnar } 508f2cb1360SIngo Molnar 509f2cb1360SIngo Molnar atomic_inc(&rd->refcount); 510f2cb1360SIngo Molnar rq->rd = rd; 511f2cb1360SIngo Molnar 512f2cb1360SIngo Molnar cpumask_set_cpu(rq->cpu, rd->span); 513f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 514f2cb1360SIngo Molnar set_rq_online(rq); 515f2cb1360SIngo Molnar 5165cb9eaa3SPeter Zijlstra raw_spin_rq_unlock_irqrestore(rq, flags); 517f2cb1360SIngo Molnar 518f2cb1360SIngo Molnar if (old_rd) 519337e9b07SPaul E. McKenney call_rcu(&old_rd->rcu, free_rootdomain); 520f2cb1360SIngo Molnar } 521f2cb1360SIngo Molnar 522364f5665SSteven Rostedt (VMware) void sched_get_rd(struct root_domain *rd) 523364f5665SSteven Rostedt (VMware) { 524364f5665SSteven Rostedt (VMware) atomic_inc(&rd->refcount); 525364f5665SSteven Rostedt (VMware) } 526364f5665SSteven Rostedt (VMware) 527364f5665SSteven Rostedt (VMware) void sched_put_rd(struct root_domain *rd) 528364f5665SSteven Rostedt (VMware) { 529364f5665SSteven Rostedt (VMware) if (!atomic_dec_and_test(&rd->refcount)) 530364f5665SSteven Rostedt (VMware) return; 531364f5665SSteven Rostedt (VMware) 532337e9b07SPaul E. McKenney call_rcu(&rd->rcu, free_rootdomain); 533364f5665SSteven Rostedt (VMware) } 534364f5665SSteven Rostedt (VMware) 535f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd) 536f2cb1360SIngo Molnar { 537f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 538f2cb1360SIngo Molnar goto out; 539f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 540f2cb1360SIngo Molnar goto free_span; 541f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 542f2cb1360SIngo Molnar goto free_online; 543f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 544f2cb1360SIngo Molnar goto free_dlo_mask; 545f2cb1360SIngo Molnar 5464bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 5474bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 5484bdced5cSSteven Rostedt (Red Hat) raw_spin_lock_init(&rd->rto_lock); 549da6ff099SSebastian Andrzej Siewior rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); 5504bdced5cSSteven Rostedt (Red Hat) #endif 5514bdced5cSSteven Rostedt (Red Hat) 55226762423SPeng Liu rd->visit_gen = 0; 553f2cb1360SIngo Molnar init_dl_bw(&rd->dl_bw); 554f2cb1360SIngo Molnar if (cpudl_init(&rd->cpudl) != 0) 555f2cb1360SIngo Molnar goto free_rto_mask; 556f2cb1360SIngo Molnar 557f2cb1360SIngo Molnar if (cpupri_init(&rd->cpupri) != 0) 558f2cb1360SIngo Molnar goto free_cpudl; 559f2cb1360SIngo Molnar return 0; 560f2cb1360SIngo Molnar 561f2cb1360SIngo Molnar free_cpudl: 562f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 563f2cb1360SIngo Molnar free_rto_mask: 564f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 565f2cb1360SIngo Molnar free_dlo_mask: 566f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 567f2cb1360SIngo Molnar free_online: 568f2cb1360SIngo Molnar free_cpumask_var(rd->online); 569f2cb1360SIngo Molnar free_span: 570f2cb1360SIngo Molnar free_cpumask_var(rd->span); 571f2cb1360SIngo Molnar out: 572f2cb1360SIngo Molnar return -ENOMEM; 573f2cb1360SIngo Molnar } 574f2cb1360SIngo Molnar 575f2cb1360SIngo Molnar /* 576f2cb1360SIngo Molnar * By default the system creates a single root-domain with all CPUs as 577f2cb1360SIngo Molnar * members (mimicking the global state we have today). 578f2cb1360SIngo Molnar */ 579f2cb1360SIngo Molnar struct root_domain def_root_domain; 580f2cb1360SIngo Molnar 581f2cb1360SIngo Molnar void init_defrootdomain(void) 582f2cb1360SIngo Molnar { 583f2cb1360SIngo Molnar init_rootdomain(&def_root_domain); 584f2cb1360SIngo Molnar 585f2cb1360SIngo Molnar atomic_set(&def_root_domain.refcount, 1); 586f2cb1360SIngo Molnar } 587f2cb1360SIngo Molnar 588f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void) 589f2cb1360SIngo Molnar { 590f2cb1360SIngo Molnar struct root_domain *rd; 591f2cb1360SIngo Molnar 5924d13a06dSViresh Kumar rd = kzalloc(sizeof(*rd), GFP_KERNEL); 593f2cb1360SIngo Molnar if (!rd) 594f2cb1360SIngo Molnar return NULL; 595f2cb1360SIngo Molnar 596f2cb1360SIngo Molnar if (init_rootdomain(rd) != 0) { 597f2cb1360SIngo Molnar kfree(rd); 598f2cb1360SIngo Molnar return NULL; 599f2cb1360SIngo Molnar } 600f2cb1360SIngo Molnar 601f2cb1360SIngo Molnar return rd; 602f2cb1360SIngo Molnar } 603f2cb1360SIngo Molnar 604f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc) 605f2cb1360SIngo Molnar { 606f2cb1360SIngo Molnar struct sched_group *tmp, *first; 607f2cb1360SIngo Molnar 608f2cb1360SIngo Molnar if (!sg) 609f2cb1360SIngo Molnar return; 610f2cb1360SIngo Molnar 611f2cb1360SIngo Molnar first = sg; 612f2cb1360SIngo Molnar do { 613f2cb1360SIngo Molnar tmp = sg->next; 614f2cb1360SIngo Molnar 615f2cb1360SIngo Molnar if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 616f2cb1360SIngo Molnar kfree(sg->sgc); 617f2cb1360SIngo Molnar 618213c5a45SShu Wang if (atomic_dec_and_test(&sg->ref)) 619f2cb1360SIngo Molnar kfree(sg); 620f2cb1360SIngo Molnar sg = tmp; 621f2cb1360SIngo Molnar } while (sg != first); 622f2cb1360SIngo Molnar } 623f2cb1360SIngo Molnar 624f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd) 625f2cb1360SIngo Molnar { 626f2cb1360SIngo Molnar /* 627a090c4f2SPeter Zijlstra * A normal sched domain may have multiple group references, an 628a090c4f2SPeter Zijlstra * overlapping domain, having private groups, only one. Iterate, 629a090c4f2SPeter Zijlstra * dropping group/capacity references, freeing where none remain. 630f2cb1360SIngo Molnar */ 631f2cb1360SIngo Molnar free_sched_groups(sd->groups, 1); 632213c5a45SShu Wang 633f2cb1360SIngo Molnar if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 634f2cb1360SIngo Molnar kfree(sd->shared); 635f2cb1360SIngo Molnar kfree(sd); 636f2cb1360SIngo Molnar } 637f2cb1360SIngo Molnar 638f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu) 639f2cb1360SIngo Molnar { 640f2cb1360SIngo Molnar struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 641f2cb1360SIngo Molnar 642f2cb1360SIngo Molnar while (sd) { 643f2cb1360SIngo Molnar struct sched_domain *parent = sd->parent; 644f2cb1360SIngo Molnar destroy_sched_domain(sd); 645f2cb1360SIngo Molnar sd = parent; 646f2cb1360SIngo Molnar } 647f2cb1360SIngo Molnar } 648f2cb1360SIngo Molnar 649f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd) 650f2cb1360SIngo Molnar { 651f2cb1360SIngo Molnar if (sd) 652f2cb1360SIngo Molnar call_rcu(&sd->rcu, destroy_sched_domains_rcu); 653f2cb1360SIngo Molnar } 654f2cb1360SIngo Molnar 655f2cb1360SIngo Molnar /* 656f2cb1360SIngo Molnar * Keep a special pointer to the highest sched_domain that has 657f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 658f2cb1360SIngo Molnar * allows us to avoid some pointer chasing select_idle_sibling(). 659f2cb1360SIngo Molnar * 660f2cb1360SIngo Molnar * Also keep a unique ID per domain (we use the first CPU number in 661f2cb1360SIngo Molnar * the cpumask of the domain), this allows us to quickly tell if 662f2cb1360SIngo Molnar * two CPUs are in the same cache domain, see cpus_share_cache(). 663f2cb1360SIngo Molnar */ 664994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 665f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size); 666f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id); 667994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 668994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); 669994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 670994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 671df054e84SMorten Rasmussen DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 672f2cb1360SIngo Molnar 673f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu) 674f2cb1360SIngo Molnar { 675f2cb1360SIngo Molnar struct sched_domain_shared *sds = NULL; 676f2cb1360SIngo Molnar struct sched_domain *sd; 677f2cb1360SIngo Molnar int id = cpu; 678f2cb1360SIngo Molnar int size = 1; 679f2cb1360SIngo Molnar 680f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 681f2cb1360SIngo Molnar if (sd) { 682f2cb1360SIngo Molnar id = cpumask_first(sched_domain_span(sd)); 683f2cb1360SIngo Molnar size = cpumask_weight(sched_domain_span(sd)); 684f2cb1360SIngo Molnar sds = sd->shared; 685f2cb1360SIngo Molnar } 686f2cb1360SIngo Molnar 687f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 688f2cb1360SIngo Molnar per_cpu(sd_llc_size, cpu) = size; 689f2cb1360SIngo Molnar per_cpu(sd_llc_id, cpu) = id; 690f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 691f2cb1360SIngo Molnar 692f2cb1360SIngo Molnar sd = lowest_flag_domain(cpu, SD_NUMA); 693f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 694f2cb1360SIngo Molnar 695f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 696011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 697011b27bbSQuentin Perret 698c744dc4aSBeata Michalska sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); 699011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 700f2cb1360SIngo Molnar } 701f2cb1360SIngo Molnar 702f2cb1360SIngo Molnar /* 703f2cb1360SIngo Molnar * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 704f2cb1360SIngo Molnar * hold the hotplug lock. 705f2cb1360SIngo Molnar */ 706f2cb1360SIngo Molnar static void 707f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 708f2cb1360SIngo Molnar { 709f2cb1360SIngo Molnar struct rq *rq = cpu_rq(cpu); 710f2cb1360SIngo Molnar struct sched_domain *tmp; 711f2cb1360SIngo Molnar 712f2cb1360SIngo Molnar /* Remove the sched domains which do not contribute to scheduling. */ 713f2cb1360SIngo Molnar for (tmp = sd; tmp; ) { 714f2cb1360SIngo Molnar struct sched_domain *parent = tmp->parent; 715f2cb1360SIngo Molnar if (!parent) 716f2cb1360SIngo Molnar break; 717f2cb1360SIngo Molnar 718f2cb1360SIngo Molnar if (sd_parent_degenerate(tmp, parent)) { 719f2cb1360SIngo Molnar tmp->parent = parent->parent; 720f2cb1360SIngo Molnar if (parent->parent) 721f2cb1360SIngo Molnar parent->parent->child = tmp; 722f2cb1360SIngo Molnar /* 723f2cb1360SIngo Molnar * Transfer SD_PREFER_SIBLING down in case of a 724f2cb1360SIngo Molnar * degenerate parent; the spans match for this 725f2cb1360SIngo Molnar * so the property transfers. 726f2cb1360SIngo Molnar */ 727f2cb1360SIngo Molnar if (parent->flags & SD_PREFER_SIBLING) 728f2cb1360SIngo Molnar tmp->flags |= SD_PREFER_SIBLING; 729f2cb1360SIngo Molnar destroy_sched_domain(parent); 730f2cb1360SIngo Molnar } else 731f2cb1360SIngo Molnar tmp = tmp->parent; 732f2cb1360SIngo Molnar } 733f2cb1360SIngo Molnar 734f2cb1360SIngo Molnar if (sd && sd_degenerate(sd)) { 735f2cb1360SIngo Molnar tmp = sd; 736f2cb1360SIngo Molnar sd = sd->parent; 737f2cb1360SIngo Molnar destroy_sched_domain(tmp); 73816d364baSRicardo Neri if (sd) { 73916d364baSRicardo Neri struct sched_group *sg = sd->groups; 74016d364baSRicardo Neri 74116d364baSRicardo Neri /* 74216d364baSRicardo Neri * sched groups hold the flags of the child sched 74316d364baSRicardo Neri * domain for convenience. Clear such flags since 74416d364baSRicardo Neri * the child is being destroyed. 74516d364baSRicardo Neri */ 74616d364baSRicardo Neri do { 74716d364baSRicardo Neri sg->flags = 0; 74816d364baSRicardo Neri } while (sg != sd->groups); 74916d364baSRicardo Neri 750f2cb1360SIngo Molnar sd->child = NULL; 751f2cb1360SIngo Molnar } 75216d364baSRicardo Neri } 753f2cb1360SIngo Molnar 754f2cb1360SIngo Molnar sched_domain_debug(sd, cpu); 755f2cb1360SIngo Molnar 756f2cb1360SIngo Molnar rq_attach_root(rq, rd); 757f2cb1360SIngo Molnar tmp = rq->sd; 758f2cb1360SIngo Molnar rcu_assign_pointer(rq->sd, sd); 759bbdacdfeSPeter Zijlstra dirty_sched_domain_sysctl(cpu); 760f2cb1360SIngo Molnar destroy_sched_domains(tmp); 761f2cb1360SIngo Molnar 762f2cb1360SIngo Molnar update_top_cache_domain(cpu); 763f2cb1360SIngo Molnar } 764f2cb1360SIngo Molnar 765f2cb1360SIngo Molnar struct s_data { 76699687cdbSLuc Van Oostenryck struct sched_domain * __percpu *sd; 767f2cb1360SIngo Molnar struct root_domain *rd; 768f2cb1360SIngo Molnar }; 769f2cb1360SIngo Molnar 770f2cb1360SIngo Molnar enum s_alloc { 771f2cb1360SIngo Molnar sa_rootdomain, 772f2cb1360SIngo Molnar sa_sd, 773f2cb1360SIngo Molnar sa_sd_storage, 774f2cb1360SIngo Molnar sa_none, 775f2cb1360SIngo Molnar }; 776f2cb1360SIngo Molnar 777f2cb1360SIngo Molnar /* 77835a566e6SPeter Zijlstra * Return the canonical balance CPU for this group, this is the first CPU 779e5c14b1fSPeter Zijlstra * of this group that's also in the balance mask. 78035a566e6SPeter Zijlstra * 781e5c14b1fSPeter Zijlstra * The balance mask are all those CPUs that could actually end up at this 782e5c14b1fSPeter Zijlstra * group. See build_balance_mask(). 78335a566e6SPeter Zijlstra * 78435a566e6SPeter Zijlstra * Also see should_we_balance(). 78535a566e6SPeter Zijlstra */ 78635a566e6SPeter Zijlstra int group_balance_cpu(struct sched_group *sg) 78735a566e6SPeter Zijlstra { 788e5c14b1fSPeter Zijlstra return cpumask_first(group_balance_mask(sg)); 78935a566e6SPeter Zijlstra } 79035a566e6SPeter Zijlstra 79135a566e6SPeter Zijlstra 79235a566e6SPeter Zijlstra /* 79335a566e6SPeter Zijlstra * NUMA topology (first read the regular topology blurb below) 79435a566e6SPeter Zijlstra * 79535a566e6SPeter Zijlstra * Given a node-distance table, for example: 79635a566e6SPeter Zijlstra * 79735a566e6SPeter Zijlstra * node 0 1 2 3 79835a566e6SPeter Zijlstra * 0: 10 20 30 20 79935a566e6SPeter Zijlstra * 1: 20 10 20 30 80035a566e6SPeter Zijlstra * 2: 30 20 10 20 80135a566e6SPeter Zijlstra * 3: 20 30 20 10 80235a566e6SPeter Zijlstra * 80335a566e6SPeter Zijlstra * which represents a 4 node ring topology like: 80435a566e6SPeter Zijlstra * 80535a566e6SPeter Zijlstra * 0 ----- 1 80635a566e6SPeter Zijlstra * | | 80735a566e6SPeter Zijlstra * | | 80835a566e6SPeter Zijlstra * | | 80935a566e6SPeter Zijlstra * 3 ----- 2 81035a566e6SPeter Zijlstra * 81135a566e6SPeter Zijlstra * We want to construct domains and groups to represent this. The way we go 81235a566e6SPeter Zijlstra * about doing this is to build the domains on 'hops'. For each NUMA level we 81335a566e6SPeter Zijlstra * construct the mask of all nodes reachable in @level hops. 81435a566e6SPeter Zijlstra * 81535a566e6SPeter Zijlstra * For the above NUMA topology that gives 3 levels: 81635a566e6SPeter Zijlstra * 81735a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 0-3 0-3 81835a566e6SPeter Zijlstra * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 81935a566e6SPeter Zijlstra * 82035a566e6SPeter Zijlstra * NUMA-1 0-1,3 0-2 1-3 0,2-3 82135a566e6SPeter Zijlstra * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 82235a566e6SPeter Zijlstra * 82335a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 82435a566e6SPeter Zijlstra * 82535a566e6SPeter Zijlstra * 82635a566e6SPeter Zijlstra * As can be seen; things don't nicely line up as with the regular topology. 82735a566e6SPeter Zijlstra * When we iterate a domain in child domain chunks some nodes can be 82835a566e6SPeter Zijlstra * represented multiple times -- hence the "overlap" naming for this part of 82935a566e6SPeter Zijlstra * the topology. 83035a566e6SPeter Zijlstra * 83135a566e6SPeter Zijlstra * In order to minimize this overlap, we only build enough groups to cover the 83235a566e6SPeter Zijlstra * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 83335a566e6SPeter Zijlstra * 83435a566e6SPeter Zijlstra * Because: 83535a566e6SPeter Zijlstra * 83635a566e6SPeter Zijlstra * - the first group of each domain is its child domain; this 83735a566e6SPeter Zijlstra * gets us the first 0-1,3 83835a566e6SPeter Zijlstra * - the only uncovered node is 2, who's child domain is 1-3. 83935a566e6SPeter Zijlstra * 84035a566e6SPeter Zijlstra * However, because of the overlap, computing a unique CPU for each group is 84135a566e6SPeter Zijlstra * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 84235a566e6SPeter Zijlstra * groups include the CPUs of Node-0, while those CPUs would not in fact ever 84335a566e6SPeter Zijlstra * end up at those groups (they would end up in group: 0-1,3). 84435a566e6SPeter Zijlstra * 845e5c14b1fSPeter Zijlstra * To correct this we have to introduce the group balance mask. This mask 84635a566e6SPeter Zijlstra * will contain those CPUs in the group that can reach this group given the 84735a566e6SPeter Zijlstra * (child) domain tree. 84835a566e6SPeter Zijlstra * 84935a566e6SPeter Zijlstra * With this we can once again compute balance_cpu and sched_group_capacity 85035a566e6SPeter Zijlstra * relations. 85135a566e6SPeter Zijlstra * 85235a566e6SPeter Zijlstra * XXX include words on how balance_cpu is unique and therefore can be 85335a566e6SPeter Zijlstra * used for sched_group_capacity links. 85435a566e6SPeter Zijlstra * 85535a566e6SPeter Zijlstra * 85635a566e6SPeter Zijlstra * Another 'interesting' topology is: 85735a566e6SPeter Zijlstra * 85835a566e6SPeter Zijlstra * node 0 1 2 3 85935a566e6SPeter Zijlstra * 0: 10 20 20 30 86035a566e6SPeter Zijlstra * 1: 20 10 20 20 86135a566e6SPeter Zijlstra * 2: 20 20 10 20 86235a566e6SPeter Zijlstra * 3: 30 20 20 10 86335a566e6SPeter Zijlstra * 86435a566e6SPeter Zijlstra * Which looks a little like: 86535a566e6SPeter Zijlstra * 86635a566e6SPeter Zijlstra * 0 ----- 1 86735a566e6SPeter Zijlstra * | / | 86835a566e6SPeter Zijlstra * | / | 86935a566e6SPeter Zijlstra * | / | 87035a566e6SPeter Zijlstra * 2 ----- 3 87135a566e6SPeter Zijlstra * 87235a566e6SPeter Zijlstra * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 87335a566e6SPeter Zijlstra * are not. 87435a566e6SPeter Zijlstra * 87535a566e6SPeter Zijlstra * This leads to a few particularly weird cases where the sched_domain's are 87697fb7a0aSIngo Molnar * not of the same number for each CPU. Consider: 87735a566e6SPeter Zijlstra * 87835a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 87935a566e6SPeter Zijlstra * groups: {0-2},{1-3} {1-3},{0-2} 88035a566e6SPeter Zijlstra * 88135a566e6SPeter Zijlstra * NUMA-1 0-2 0-3 0-3 1-3 88235a566e6SPeter Zijlstra * 88335a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 88435a566e6SPeter Zijlstra * 88535a566e6SPeter Zijlstra */ 88635a566e6SPeter Zijlstra 88735a566e6SPeter Zijlstra 88835a566e6SPeter Zijlstra /* 889e5c14b1fSPeter Zijlstra * Build the balance mask; it contains only those CPUs that can arrive at this 890e5c14b1fSPeter Zijlstra * group and should be considered to continue balancing. 89135a566e6SPeter Zijlstra * 89235a566e6SPeter Zijlstra * We do this during the group creation pass, therefore the group information 89335a566e6SPeter Zijlstra * isn't complete yet, however since each group represents a (child) domain we 89435a566e6SPeter Zijlstra * can fully construct this using the sched_domain bits (which are already 89535a566e6SPeter Zijlstra * complete). 896f2cb1360SIngo Molnar */ 8971676330eSPeter Zijlstra static void 898e5c14b1fSPeter Zijlstra build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 899f2cb1360SIngo Molnar { 900ae4df9d6SPeter Zijlstra const struct cpumask *sg_span = sched_group_span(sg); 901f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 902f2cb1360SIngo Molnar struct sched_domain *sibling; 903f2cb1360SIngo Molnar int i; 904f2cb1360SIngo Molnar 9051676330eSPeter Zijlstra cpumask_clear(mask); 9061676330eSPeter Zijlstra 907f32d782eSLauro Ramos Venancio for_each_cpu(i, sg_span) { 908f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 90973bb059fSPeter Zijlstra 91073bb059fSPeter Zijlstra /* 91173bb059fSPeter Zijlstra * Can happen in the asymmetric case, where these siblings are 91273bb059fSPeter Zijlstra * unused. The mask will not be empty because those CPUs that 91373bb059fSPeter Zijlstra * do have the top domain _should_ span the domain. 91473bb059fSPeter Zijlstra */ 91573bb059fSPeter Zijlstra if (!sibling->child) 91673bb059fSPeter Zijlstra continue; 91773bb059fSPeter Zijlstra 91873bb059fSPeter Zijlstra /* If we would not end up here, we can't continue from here */ 91973bb059fSPeter Zijlstra if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 920f2cb1360SIngo Molnar continue; 921f2cb1360SIngo Molnar 9221676330eSPeter Zijlstra cpumask_set_cpu(i, mask); 923f2cb1360SIngo Molnar } 92473bb059fSPeter Zijlstra 92573bb059fSPeter Zijlstra /* We must not have empty masks here */ 9261676330eSPeter Zijlstra WARN_ON_ONCE(cpumask_empty(mask)); 927f2cb1360SIngo Molnar } 928f2cb1360SIngo Molnar 929f2cb1360SIngo Molnar /* 93035a566e6SPeter Zijlstra * XXX: This creates per-node group entries; since the load-balancer will 93135a566e6SPeter Zijlstra * immediately access remote memory to construct this group's load-balance 93235a566e6SPeter Zijlstra * statistics having the groups node local is of dubious benefit. 933f2cb1360SIngo Molnar */ 9348c033469SLauro Ramos Venancio static struct sched_group * 9358c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 9368c033469SLauro Ramos Venancio { 9378c033469SLauro Ramos Venancio struct sched_group *sg; 9388c033469SLauro Ramos Venancio struct cpumask *sg_span; 9398c033469SLauro Ramos Venancio 9408c033469SLauro Ramos Venancio sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 9418c033469SLauro Ramos Venancio GFP_KERNEL, cpu_to_node(cpu)); 9428c033469SLauro Ramos Venancio 9438c033469SLauro Ramos Venancio if (!sg) 9448c033469SLauro Ramos Venancio return NULL; 9458c033469SLauro Ramos Venancio 946ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 94716d364baSRicardo Neri if (sd->child) { 9488c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd->child)); 94916d364baSRicardo Neri sg->flags = sd->child->flags; 95016d364baSRicardo Neri } else { 9518c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd)); 95216d364baSRicardo Neri } 9538c033469SLauro Ramos Venancio 954213c5a45SShu Wang atomic_inc(&sg->ref); 9558c033469SLauro Ramos Venancio return sg; 9568c033469SLauro Ramos Venancio } 9578c033469SLauro Ramos Venancio 9588c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd, 9591676330eSPeter Zijlstra struct sched_group *sg) 9608c033469SLauro Ramos Venancio { 9611676330eSPeter Zijlstra struct cpumask *mask = sched_domains_tmpmask2; 9628c033469SLauro Ramos Venancio struct sd_data *sdd = sd->private; 9638c033469SLauro Ramos Venancio struct cpumask *sg_span; 9641676330eSPeter Zijlstra int cpu; 9651676330eSPeter Zijlstra 966e5c14b1fSPeter Zijlstra build_balance_mask(sd, sg, mask); 9670a2b65c0SBarry Song cpu = cpumask_first(mask); 9688c033469SLauro Ramos Venancio 9698c033469SLauro Ramos Venancio sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 9708c033469SLauro Ramos Venancio if (atomic_inc_return(&sg->sgc->ref) == 1) 971e5c14b1fSPeter Zijlstra cpumask_copy(group_balance_mask(sg), mask); 97235a566e6SPeter Zijlstra else 973e5c14b1fSPeter Zijlstra WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 9748c033469SLauro Ramos Venancio 9758c033469SLauro Ramos Venancio /* 9768c033469SLauro Ramos Venancio * Initialize sgc->capacity such that even if we mess up the 9778c033469SLauro Ramos Venancio * domains and no possible iteration will get us here, we won't 9788c033469SLauro Ramos Venancio * die on a /0 trap. 9798c033469SLauro Ramos Venancio */ 980ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 9818c033469SLauro Ramos Venancio sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 9828c033469SLauro Ramos Venancio sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 983e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 9848c033469SLauro Ramos Venancio } 9858c033469SLauro Ramos Venancio 986585b6d27SBarry Song static struct sched_domain * 987585b6d27SBarry Song find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) 988585b6d27SBarry Song { 989585b6d27SBarry Song /* 990585b6d27SBarry Song * The proper descendant would be the one whose child won't span out 991585b6d27SBarry Song * of sd 992585b6d27SBarry Song */ 993585b6d27SBarry Song while (sibling->child && 994585b6d27SBarry Song !cpumask_subset(sched_domain_span(sibling->child), 995585b6d27SBarry Song sched_domain_span(sd))) 996585b6d27SBarry Song sibling = sibling->child; 997585b6d27SBarry Song 998585b6d27SBarry Song /* 999585b6d27SBarry Song * As we are referencing sgc across different topology level, we need 1000585b6d27SBarry Song * to go down to skip those sched_domains which don't contribute to 1001585b6d27SBarry Song * scheduling because they will be degenerated in cpu_attach_domain 1002585b6d27SBarry Song */ 1003585b6d27SBarry Song while (sibling->child && 1004585b6d27SBarry Song cpumask_equal(sched_domain_span(sibling->child), 1005585b6d27SBarry Song sched_domain_span(sibling))) 1006585b6d27SBarry Song sibling = sibling->child; 1007585b6d27SBarry Song 1008585b6d27SBarry Song return sibling; 1009585b6d27SBarry Song } 1010585b6d27SBarry Song 1011f2cb1360SIngo Molnar static int 1012f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu) 1013f2cb1360SIngo Molnar { 101491eaed0dSPeter Zijlstra struct sched_group *first = NULL, *last = NULL, *sg; 1015f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1016f2cb1360SIngo Molnar struct cpumask *covered = sched_domains_tmpmask; 1017f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1018f2cb1360SIngo Molnar struct sched_domain *sibling; 1019f2cb1360SIngo Molnar int i; 1020f2cb1360SIngo Molnar 1021f2cb1360SIngo Molnar cpumask_clear(covered); 1022f2cb1360SIngo Molnar 10230372dd27SPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1024f2cb1360SIngo Molnar struct cpumask *sg_span; 1025f2cb1360SIngo Molnar 1026f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1027f2cb1360SIngo Molnar continue; 1028f2cb1360SIngo Molnar 1029f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 1030f2cb1360SIngo Molnar 1031c20e1ea4SLauro Ramos Venancio /* 1032c20e1ea4SLauro Ramos Venancio * Asymmetric node setups can result in situations where the 1033c20e1ea4SLauro Ramos Venancio * domain tree is of unequal depth, make sure to skip domains 1034c20e1ea4SLauro Ramos Venancio * that already cover the entire range. 1035c20e1ea4SLauro Ramos Venancio * 1036c20e1ea4SLauro Ramos Venancio * In that case build_sched_domains() will have terminated the 1037c20e1ea4SLauro Ramos Venancio * iteration early and our sibling sd spans will be empty. 1038c20e1ea4SLauro Ramos Venancio * Domains should always include the CPU they're built on, so 1039c20e1ea4SLauro Ramos Venancio * check that. 1040c20e1ea4SLauro Ramos Venancio */ 1041f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 1042f2cb1360SIngo Molnar continue; 1043f2cb1360SIngo Molnar 1044585b6d27SBarry Song /* 1045585b6d27SBarry Song * Usually we build sched_group by sibling's child sched_domain 1046585b6d27SBarry Song * But for machines whose NUMA diameter are 3 or above, we move 1047585b6d27SBarry Song * to build sched_group by sibling's proper descendant's child 1048585b6d27SBarry Song * domain because sibling's child sched_domain will span out of 1049585b6d27SBarry Song * the sched_domain being built as below. 1050585b6d27SBarry Song * 1051585b6d27SBarry Song * Smallest diameter=3 topology is: 1052585b6d27SBarry Song * 1053585b6d27SBarry Song * node 0 1 2 3 1054585b6d27SBarry Song * 0: 10 20 30 40 1055585b6d27SBarry Song * 1: 20 10 20 30 1056585b6d27SBarry Song * 2: 30 20 10 20 1057585b6d27SBarry Song * 3: 40 30 20 10 1058585b6d27SBarry Song * 1059585b6d27SBarry Song * 0 --- 1 --- 2 --- 3 1060585b6d27SBarry Song * 1061585b6d27SBarry Song * NUMA-3 0-3 N/A N/A 0-3 1062585b6d27SBarry Song * groups: {0-2},{1-3} {1-3},{0-2} 1063585b6d27SBarry Song * 1064585b6d27SBarry Song * NUMA-2 0-2 0-3 0-3 1-3 1065585b6d27SBarry Song * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} 1066585b6d27SBarry Song * 1067585b6d27SBarry Song * NUMA-1 0-1 0-2 1-3 2-3 1068585b6d27SBarry Song * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} 1069585b6d27SBarry Song * 1070585b6d27SBarry Song * NUMA-0 0 1 2 3 1071585b6d27SBarry Song * 1072585b6d27SBarry Song * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the 1073585b6d27SBarry Song * group span isn't a subset of the domain span. 1074585b6d27SBarry Song */ 1075585b6d27SBarry Song if (sibling->child && 1076585b6d27SBarry Song !cpumask_subset(sched_domain_span(sibling->child), span)) 1077585b6d27SBarry Song sibling = find_descended_sibling(sd, sibling); 1078585b6d27SBarry Song 10798c033469SLauro Ramos Venancio sg = build_group_from_child_sched_domain(sibling, cpu); 1080f2cb1360SIngo Molnar if (!sg) 1081f2cb1360SIngo Molnar goto fail; 1082f2cb1360SIngo Molnar 1083ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 1084f2cb1360SIngo Molnar cpumask_or(covered, covered, sg_span); 1085f2cb1360SIngo Molnar 1086585b6d27SBarry Song init_overlap_sched_group(sibling, sg); 1087f2cb1360SIngo Molnar 1088f2cb1360SIngo Molnar if (!first) 1089f2cb1360SIngo Molnar first = sg; 1090f2cb1360SIngo Molnar if (last) 1091f2cb1360SIngo Molnar last->next = sg; 1092f2cb1360SIngo Molnar last = sg; 1093f2cb1360SIngo Molnar last->next = first; 1094f2cb1360SIngo Molnar } 109591eaed0dSPeter Zijlstra sd->groups = first; 1096f2cb1360SIngo Molnar 1097f2cb1360SIngo Molnar return 0; 1098f2cb1360SIngo Molnar 1099f2cb1360SIngo Molnar fail: 1100f2cb1360SIngo Molnar free_sched_groups(first, 0); 1101f2cb1360SIngo Molnar 1102f2cb1360SIngo Molnar return -ENOMEM; 1103f2cb1360SIngo Molnar } 1104f2cb1360SIngo Molnar 110535a566e6SPeter Zijlstra 110635a566e6SPeter Zijlstra /* 110735a566e6SPeter Zijlstra * Package topology (also see the load-balance blurb in fair.c) 110835a566e6SPeter Zijlstra * 110935a566e6SPeter Zijlstra * The scheduler builds a tree structure to represent a number of important 111035a566e6SPeter Zijlstra * topology features. By default (default_topology[]) these include: 111135a566e6SPeter Zijlstra * 111235a566e6SPeter Zijlstra * - Simultaneous multithreading (SMT) 111335a566e6SPeter Zijlstra * - Multi-Core Cache (MC) 111435a566e6SPeter Zijlstra * - Package (DIE) 111535a566e6SPeter Zijlstra * 111635a566e6SPeter Zijlstra * Where the last one more or less denotes everything up to a NUMA node. 111735a566e6SPeter Zijlstra * 111835a566e6SPeter Zijlstra * The tree consists of 3 primary data structures: 111935a566e6SPeter Zijlstra * 112035a566e6SPeter Zijlstra * sched_domain -> sched_group -> sched_group_capacity 112135a566e6SPeter Zijlstra * ^ ^ ^ ^ 112235a566e6SPeter Zijlstra * `-' `-' 112335a566e6SPeter Zijlstra * 112497fb7a0aSIngo Molnar * The sched_domains are per-CPU and have a two way link (parent & child) and 112535a566e6SPeter Zijlstra * denote the ever growing mask of CPUs belonging to that level of topology. 112635a566e6SPeter Zijlstra * 112735a566e6SPeter Zijlstra * Each sched_domain has a circular (double) linked list of sched_group's, each 112835a566e6SPeter Zijlstra * denoting the domains of the level below (or individual CPUs in case of the 112935a566e6SPeter Zijlstra * first domain level). The sched_group linked by a sched_domain includes the 113035a566e6SPeter Zijlstra * CPU of that sched_domain [*]. 113135a566e6SPeter Zijlstra * 113235a566e6SPeter Zijlstra * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 113335a566e6SPeter Zijlstra * 113435a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 113535a566e6SPeter Zijlstra * 113635a566e6SPeter Zijlstra * DIE [ ] 113735a566e6SPeter Zijlstra * MC [ ] [ ] 113835a566e6SPeter Zijlstra * SMT [ ] [ ] [ ] [ ] 113935a566e6SPeter Zijlstra * 114035a566e6SPeter Zijlstra * - or - 114135a566e6SPeter Zijlstra * 114235a566e6SPeter Zijlstra * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 114335a566e6SPeter Zijlstra * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 114435a566e6SPeter Zijlstra * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 114535a566e6SPeter Zijlstra * 114635a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 114735a566e6SPeter Zijlstra * 114835a566e6SPeter Zijlstra * One way to think about it is: sched_domain moves you up and down among these 114935a566e6SPeter Zijlstra * topology levels, while sched_group moves you sideways through it, at child 115035a566e6SPeter Zijlstra * domain granularity. 115135a566e6SPeter Zijlstra * 115235a566e6SPeter Zijlstra * sched_group_capacity ensures each unique sched_group has shared storage. 115335a566e6SPeter Zijlstra * 115435a566e6SPeter Zijlstra * There are two related construction problems, both require a CPU that 115535a566e6SPeter Zijlstra * uniquely identify each group (for a given domain): 115635a566e6SPeter Zijlstra * 115735a566e6SPeter Zijlstra * - The first is the balance_cpu (see should_we_balance() and the 115835a566e6SPeter Zijlstra * load-balance blub in fair.c); for each group we only want 1 CPU to 115935a566e6SPeter Zijlstra * continue balancing at a higher domain. 116035a566e6SPeter Zijlstra * 116135a566e6SPeter Zijlstra * - The second is the sched_group_capacity; we want all identical groups 116235a566e6SPeter Zijlstra * to share a single sched_group_capacity. 116335a566e6SPeter Zijlstra * 116435a566e6SPeter Zijlstra * Since these topologies are exclusive by construction. That is, its 116535a566e6SPeter Zijlstra * impossible for an SMT thread to belong to multiple cores, and cores to 116635a566e6SPeter Zijlstra * be part of multiple caches. There is a very clear and unique location 116735a566e6SPeter Zijlstra * for each CPU in the hierarchy. 116835a566e6SPeter Zijlstra * 116935a566e6SPeter Zijlstra * Therefore computing a unique CPU for each group is trivial (the iteration 117035a566e6SPeter Zijlstra * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 117135a566e6SPeter Zijlstra * group), we can simply pick the first CPU in each group. 117235a566e6SPeter Zijlstra * 117335a566e6SPeter Zijlstra * 117435a566e6SPeter Zijlstra * [*] in other words, the first group of each domain is its child domain. 117535a566e6SPeter Zijlstra */ 117635a566e6SPeter Zijlstra 11770c0e776aSPeter Zijlstra static struct sched_group *get_group(int cpu, struct sd_data *sdd) 1178f2cb1360SIngo Molnar { 1179f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1180f2cb1360SIngo Molnar struct sched_domain *child = sd->child; 11810c0e776aSPeter Zijlstra struct sched_group *sg; 118267d4f6ffSValentin Schneider bool already_visited; 1183f2cb1360SIngo Molnar 1184f2cb1360SIngo Molnar if (child) 1185f2cb1360SIngo Molnar cpu = cpumask_first(sched_domain_span(child)); 1186f2cb1360SIngo Molnar 11870c0e776aSPeter Zijlstra sg = *per_cpu_ptr(sdd->sg, cpu); 11880c0e776aSPeter Zijlstra sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1189f2cb1360SIngo Molnar 119067d4f6ffSValentin Schneider /* Increase refcounts for claim_allocations: */ 119167d4f6ffSValentin Schneider already_visited = atomic_inc_return(&sg->ref) > 1; 119267d4f6ffSValentin Schneider /* sgc visits should follow a similar trend as sg */ 119367d4f6ffSValentin Schneider WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); 119467d4f6ffSValentin Schneider 119567d4f6ffSValentin Schneider /* If we have already visited that group, it's already initialized. */ 119667d4f6ffSValentin Schneider if (already_visited) 119767d4f6ffSValentin Schneider return sg; 11980c0e776aSPeter Zijlstra 11990c0e776aSPeter Zijlstra if (child) { 1200ae4df9d6SPeter Zijlstra cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1201ae4df9d6SPeter Zijlstra cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 120216d364baSRicardo Neri sg->flags = child->flags; 12030c0e776aSPeter Zijlstra } else { 1204ae4df9d6SPeter Zijlstra cpumask_set_cpu(cpu, sched_group_span(sg)); 1205e5c14b1fSPeter Zijlstra cpumask_set_cpu(cpu, group_balance_mask(sg)); 1206f2cb1360SIngo Molnar } 1207f2cb1360SIngo Molnar 1208ae4df9d6SPeter Zijlstra sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 12090c0e776aSPeter Zijlstra sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1210e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 12110c0e776aSPeter Zijlstra 12120c0e776aSPeter Zijlstra return sg; 1213f2cb1360SIngo Molnar } 1214f2cb1360SIngo Molnar 1215f2cb1360SIngo Molnar /* 1216f2cb1360SIngo Molnar * build_sched_groups will build a circular linked list of the groups 1217d8743230SValentin Schneider * covered by the given span, will set each group's ->cpumask correctly, 1218d8743230SValentin Schneider * and will initialize their ->sgc. 1219f2cb1360SIngo Molnar * 1220f2cb1360SIngo Molnar * Assumes the sched_domain tree is fully constructed 1221f2cb1360SIngo Molnar */ 1222f2cb1360SIngo Molnar static int 1223f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu) 1224f2cb1360SIngo Molnar { 1225f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL; 1226f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1227f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1228f2cb1360SIngo Molnar struct cpumask *covered; 1229f2cb1360SIngo Molnar int i; 1230f2cb1360SIngo Molnar 1231f2cb1360SIngo Molnar lockdep_assert_held(&sched_domains_mutex); 1232f2cb1360SIngo Molnar covered = sched_domains_tmpmask; 1233f2cb1360SIngo Molnar 1234f2cb1360SIngo Molnar cpumask_clear(covered); 1235f2cb1360SIngo Molnar 12360c0e776aSPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1237f2cb1360SIngo Molnar struct sched_group *sg; 1238f2cb1360SIngo Molnar 1239f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1240f2cb1360SIngo Molnar continue; 1241f2cb1360SIngo Molnar 12420c0e776aSPeter Zijlstra sg = get_group(i, sdd); 1243f2cb1360SIngo Molnar 1244ae4df9d6SPeter Zijlstra cpumask_or(covered, covered, sched_group_span(sg)); 1245f2cb1360SIngo Molnar 1246f2cb1360SIngo Molnar if (!first) 1247f2cb1360SIngo Molnar first = sg; 1248f2cb1360SIngo Molnar if (last) 1249f2cb1360SIngo Molnar last->next = sg; 1250f2cb1360SIngo Molnar last = sg; 1251f2cb1360SIngo Molnar } 1252f2cb1360SIngo Molnar last->next = first; 12530c0e776aSPeter Zijlstra sd->groups = first; 1254f2cb1360SIngo Molnar 1255f2cb1360SIngo Molnar return 0; 1256f2cb1360SIngo Molnar } 1257f2cb1360SIngo Molnar 1258f2cb1360SIngo Molnar /* 1259f2cb1360SIngo Molnar * Initialize sched groups cpu_capacity. 1260f2cb1360SIngo Molnar * 1261f2cb1360SIngo Molnar * cpu_capacity indicates the capacity of sched group, which is used while 1262f2cb1360SIngo Molnar * distributing the load between different sched groups in a sched domain. 1263f2cb1360SIngo Molnar * Typically cpu_capacity for all the groups in a sched domain will be same 1264f2cb1360SIngo Molnar * unless there are asymmetries in the topology. If there are asymmetries, 1265f2cb1360SIngo Molnar * group having more cpu_capacity will pickup more load compared to the 1266f2cb1360SIngo Molnar * group having less cpu_capacity. 1267f2cb1360SIngo Molnar */ 1268f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1269f2cb1360SIngo Molnar { 1270f2cb1360SIngo Molnar struct sched_group *sg = sd->groups; 1271f2cb1360SIngo Molnar 1272f2cb1360SIngo Molnar WARN_ON(!sg); 1273f2cb1360SIngo Molnar 1274f2cb1360SIngo Molnar do { 1275f2cb1360SIngo Molnar int cpu, max_cpu = -1; 1276f2cb1360SIngo Molnar 1277ae4df9d6SPeter Zijlstra sg->group_weight = cpumask_weight(sched_group_span(sg)); 1278f2cb1360SIngo Molnar 1279f2cb1360SIngo Molnar if (!(sd->flags & SD_ASYM_PACKING)) 1280f2cb1360SIngo Molnar goto next; 1281f2cb1360SIngo Molnar 1282ae4df9d6SPeter Zijlstra for_each_cpu(cpu, sched_group_span(sg)) { 1283f2cb1360SIngo Molnar if (max_cpu < 0) 1284f2cb1360SIngo Molnar max_cpu = cpu; 1285f2cb1360SIngo Molnar else if (sched_asym_prefer(cpu, max_cpu)) 1286f2cb1360SIngo Molnar max_cpu = cpu; 1287f2cb1360SIngo Molnar } 1288f2cb1360SIngo Molnar sg->asym_prefer_cpu = max_cpu; 1289f2cb1360SIngo Molnar 1290f2cb1360SIngo Molnar next: 1291f2cb1360SIngo Molnar sg = sg->next; 1292f2cb1360SIngo Molnar } while (sg != sd->groups); 1293f2cb1360SIngo Molnar 1294f2cb1360SIngo Molnar if (cpu != group_balance_cpu(sg)) 1295f2cb1360SIngo Molnar return; 1296f2cb1360SIngo Molnar 1297f2cb1360SIngo Molnar update_group_capacity(sd, cpu); 1298f2cb1360SIngo Molnar } 1299f2cb1360SIngo Molnar 1300f2cb1360SIngo Molnar /* 1301c744dc4aSBeata Michalska * Asymmetric CPU capacity bits 1302c744dc4aSBeata Michalska */ 1303c744dc4aSBeata Michalska struct asym_cap_data { 1304c744dc4aSBeata Michalska struct list_head link; 1305c744dc4aSBeata Michalska unsigned long capacity; 1306c744dc4aSBeata Michalska unsigned long cpus[]; 1307c744dc4aSBeata Michalska }; 1308c744dc4aSBeata Michalska 1309c744dc4aSBeata Michalska /* 1310c744dc4aSBeata Michalska * Set of available CPUs grouped by their corresponding capacities 1311c744dc4aSBeata Michalska * Each list entry contains a CPU mask reflecting CPUs that share the same 1312c744dc4aSBeata Michalska * capacity. 1313c744dc4aSBeata Michalska * The lifespan of data is unlimited. 1314c744dc4aSBeata Michalska */ 1315c744dc4aSBeata Michalska static LIST_HEAD(asym_cap_list); 1316c744dc4aSBeata Michalska 1317c744dc4aSBeata Michalska #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) 1318c744dc4aSBeata Michalska 1319c744dc4aSBeata Michalska /* 1320c744dc4aSBeata Michalska * Verify whether there is any CPU capacity asymmetry in a given sched domain. 1321c744dc4aSBeata Michalska * Provides sd_flags reflecting the asymmetry scope. 1322c744dc4aSBeata Michalska */ 1323c744dc4aSBeata Michalska static inline int 1324c744dc4aSBeata Michalska asym_cpu_capacity_classify(const struct cpumask *sd_span, 1325c744dc4aSBeata Michalska const struct cpumask *cpu_map) 1326c744dc4aSBeata Michalska { 1327c744dc4aSBeata Michalska struct asym_cap_data *entry; 1328c744dc4aSBeata Michalska int count = 0, miss = 0; 1329c744dc4aSBeata Michalska 1330c744dc4aSBeata Michalska /* 1331c744dc4aSBeata Michalska * Count how many unique CPU capacities this domain spans across 1332c744dc4aSBeata Michalska * (compare sched_domain CPUs mask with ones representing available 1333c744dc4aSBeata Michalska * CPUs capacities). Take into account CPUs that might be offline: 1334c744dc4aSBeata Michalska * skip those. 1335c744dc4aSBeata Michalska */ 1336c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) { 1337c744dc4aSBeata Michalska if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) 1338c744dc4aSBeata Michalska ++count; 1339c744dc4aSBeata Michalska else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) 1340c744dc4aSBeata Michalska ++miss; 1341c744dc4aSBeata Michalska } 1342c744dc4aSBeata Michalska 1343c744dc4aSBeata Michalska WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); 1344c744dc4aSBeata Michalska 1345c744dc4aSBeata Michalska /* No asymmetry detected */ 1346c744dc4aSBeata Michalska if (count < 2) 1347c744dc4aSBeata Michalska return 0; 1348c744dc4aSBeata Michalska /* Some of the available CPU capacity values have not been detected */ 1349c744dc4aSBeata Michalska if (miss) 1350c744dc4aSBeata Michalska return SD_ASYM_CPUCAPACITY; 1351c744dc4aSBeata Michalska 1352c744dc4aSBeata Michalska /* Full asymmetry */ 1353c744dc4aSBeata Michalska return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; 1354c744dc4aSBeata Michalska 1355c744dc4aSBeata Michalska } 1356c744dc4aSBeata Michalska 1357c744dc4aSBeata Michalska static inline void asym_cpu_capacity_update_data(int cpu) 1358c744dc4aSBeata Michalska { 1359c744dc4aSBeata Michalska unsigned long capacity = arch_scale_cpu_capacity(cpu); 1360c744dc4aSBeata Michalska struct asym_cap_data *entry = NULL; 1361c744dc4aSBeata Michalska 1362c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) { 1363c744dc4aSBeata Michalska if (capacity == entry->capacity) 1364c744dc4aSBeata Michalska goto done; 1365c744dc4aSBeata Michalska } 1366c744dc4aSBeata Michalska 1367c744dc4aSBeata Michalska entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); 1368c744dc4aSBeata Michalska if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) 1369c744dc4aSBeata Michalska return; 1370c744dc4aSBeata Michalska entry->capacity = capacity; 1371c744dc4aSBeata Michalska list_add(&entry->link, &asym_cap_list); 1372c744dc4aSBeata Michalska done: 1373c744dc4aSBeata Michalska __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); 1374c744dc4aSBeata Michalska } 1375c744dc4aSBeata Michalska 1376c744dc4aSBeata Michalska /* 1377c744dc4aSBeata Michalska * Build-up/update list of CPUs grouped by their capacities 1378c744dc4aSBeata Michalska * An update requires explicit request to rebuild sched domains 1379c744dc4aSBeata Michalska * with state indicating CPU topology changes. 1380c744dc4aSBeata Michalska */ 1381c744dc4aSBeata Michalska static void asym_cpu_capacity_scan(void) 1382c744dc4aSBeata Michalska { 1383c744dc4aSBeata Michalska struct asym_cap_data *entry, *next; 1384c744dc4aSBeata Michalska int cpu; 1385c744dc4aSBeata Michalska 1386c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) 1387c744dc4aSBeata Michalska cpumask_clear(cpu_capacity_span(entry)); 1388c744dc4aSBeata Michalska 138904d4e665SFrederic Weisbecker for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1390c744dc4aSBeata Michalska asym_cpu_capacity_update_data(cpu); 1391c744dc4aSBeata Michalska 1392c744dc4aSBeata Michalska list_for_each_entry_safe(entry, next, &asym_cap_list, link) { 1393c744dc4aSBeata Michalska if (cpumask_empty(cpu_capacity_span(entry))) { 1394c744dc4aSBeata Michalska list_del(&entry->link); 1395c744dc4aSBeata Michalska kfree(entry); 1396c744dc4aSBeata Michalska } 1397c744dc4aSBeata Michalska } 1398c744dc4aSBeata Michalska 1399c744dc4aSBeata Michalska /* 1400c744dc4aSBeata Michalska * Only one capacity value has been detected i.e. this system is symmetric. 1401c744dc4aSBeata Michalska * No need to keep this data around. 1402c744dc4aSBeata Michalska */ 1403c744dc4aSBeata Michalska if (list_is_singular(&asym_cap_list)) { 1404c744dc4aSBeata Michalska entry = list_first_entry(&asym_cap_list, typeof(*entry), link); 1405c744dc4aSBeata Michalska list_del(&entry->link); 1406c744dc4aSBeata Michalska kfree(entry); 1407c744dc4aSBeata Michalska } 1408c744dc4aSBeata Michalska } 1409c744dc4aSBeata Michalska 1410c744dc4aSBeata Michalska /* 1411f2cb1360SIngo Molnar * Initializers for schedule domains 1412f2cb1360SIngo Molnar * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1413f2cb1360SIngo Molnar */ 1414f2cb1360SIngo Molnar 1415f2cb1360SIngo Molnar static int default_relax_domain_level = -1; 1416f2cb1360SIngo Molnar int sched_domain_level_max; 1417f2cb1360SIngo Molnar 1418f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str) 1419f2cb1360SIngo Molnar { 1420f2cb1360SIngo Molnar if (kstrtoint(str, 0, &default_relax_domain_level)) 1421f2cb1360SIngo Molnar pr_warn("Unable to set relax_domain_level\n"); 1422f2cb1360SIngo Molnar 1423f2cb1360SIngo Molnar return 1; 1424f2cb1360SIngo Molnar } 1425f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level); 1426f2cb1360SIngo Molnar 1427f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd, 1428f2cb1360SIngo Molnar struct sched_domain_attr *attr) 1429f2cb1360SIngo Molnar { 1430f2cb1360SIngo Molnar int request; 1431f2cb1360SIngo Molnar 1432f2cb1360SIngo Molnar if (!attr || attr->relax_domain_level < 0) { 1433f2cb1360SIngo Molnar if (default_relax_domain_level < 0) 1434f2cb1360SIngo Molnar return; 1435f2cb1360SIngo Molnar request = default_relax_domain_level; 1436f2cb1360SIngo Molnar } else 1437f2cb1360SIngo Molnar request = attr->relax_domain_level; 14389ae7ab20SValentin Schneider 14399ae7ab20SValentin Schneider if (sd->level > request) { 1440f2cb1360SIngo Molnar /* Turn off idle balance on this domain: */ 1441f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1442f2cb1360SIngo Molnar } 1443f2cb1360SIngo Molnar } 1444f2cb1360SIngo Molnar 1445f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map); 1446f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map); 1447f2cb1360SIngo Molnar 1448f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1449f2cb1360SIngo Molnar const struct cpumask *cpu_map) 1450f2cb1360SIngo Molnar { 1451f2cb1360SIngo Molnar switch (what) { 1452f2cb1360SIngo Molnar case sa_rootdomain: 1453f2cb1360SIngo Molnar if (!atomic_read(&d->rd->refcount)) 1454f2cb1360SIngo Molnar free_rootdomain(&d->rd->rcu); 1455df561f66SGustavo A. R. Silva fallthrough; 1456f2cb1360SIngo Molnar case sa_sd: 1457f2cb1360SIngo Molnar free_percpu(d->sd); 1458df561f66SGustavo A. R. Silva fallthrough; 1459f2cb1360SIngo Molnar case sa_sd_storage: 1460f2cb1360SIngo Molnar __sdt_free(cpu_map); 1461df561f66SGustavo A. R. Silva fallthrough; 1462f2cb1360SIngo Molnar case sa_none: 1463f2cb1360SIngo Molnar break; 1464f2cb1360SIngo Molnar } 1465f2cb1360SIngo Molnar } 1466f2cb1360SIngo Molnar 1467f2cb1360SIngo Molnar static enum s_alloc 1468f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1469f2cb1360SIngo Molnar { 1470f2cb1360SIngo Molnar memset(d, 0, sizeof(*d)); 1471f2cb1360SIngo Molnar 1472f2cb1360SIngo Molnar if (__sdt_alloc(cpu_map)) 1473f2cb1360SIngo Molnar return sa_sd_storage; 1474f2cb1360SIngo Molnar d->sd = alloc_percpu(struct sched_domain *); 1475f2cb1360SIngo Molnar if (!d->sd) 1476f2cb1360SIngo Molnar return sa_sd_storage; 1477f2cb1360SIngo Molnar d->rd = alloc_rootdomain(); 1478f2cb1360SIngo Molnar if (!d->rd) 1479f2cb1360SIngo Molnar return sa_sd; 148097fb7a0aSIngo Molnar 1481f2cb1360SIngo Molnar return sa_rootdomain; 1482f2cb1360SIngo Molnar } 1483f2cb1360SIngo Molnar 1484f2cb1360SIngo Molnar /* 1485f2cb1360SIngo Molnar * NULL the sd_data elements we've used to build the sched_domain and 1486f2cb1360SIngo Molnar * sched_group structure so that the subsequent __free_domain_allocs() 1487f2cb1360SIngo Molnar * will not free the data we're using. 1488f2cb1360SIngo Molnar */ 1489f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd) 1490f2cb1360SIngo Molnar { 1491f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1492f2cb1360SIngo Molnar 1493f2cb1360SIngo Molnar WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1494f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, cpu) = NULL; 1495f2cb1360SIngo Molnar 1496f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 1497f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, cpu) = NULL; 1498f2cb1360SIngo Molnar 1499f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1500f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, cpu) = NULL; 1501f2cb1360SIngo Molnar 1502f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1503f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1504f2cb1360SIngo Molnar } 1505f2cb1360SIngo Molnar 1506f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1507f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type; 150897fb7a0aSIngo Molnar 150997fb7a0aSIngo Molnar static int sched_domains_numa_levels; 1510f2cb1360SIngo Molnar static int sched_domains_curr_level; 151197fb7a0aSIngo Molnar 151297fb7a0aSIngo Molnar int sched_max_numa_distance; 151397fb7a0aSIngo Molnar static int *sched_domains_numa_distance; 151497fb7a0aSIngo Molnar static struct cpumask ***sched_domains_numa_masks; 1515f2cb1360SIngo Molnar #endif 1516f2cb1360SIngo Molnar 1517f2cb1360SIngo Molnar /* 1518f2cb1360SIngo Molnar * SD_flags allowed in topology descriptions. 1519f2cb1360SIngo Molnar * 1520f2cb1360SIngo Molnar * These flags are purely descriptive of the topology and do not prescribe 1521f2cb1360SIngo Molnar * behaviour. Behaviour is artificial and mapped in the below sd_init() 1522f2cb1360SIngo Molnar * function: 1523f2cb1360SIngo Molnar * 1524f2cb1360SIngo Molnar * SD_SHARE_CPUCAPACITY - describes SMT topologies 1525f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCES - describes shared caches 1526f2cb1360SIngo Molnar * SD_NUMA - describes NUMA topologies 1527f2cb1360SIngo Molnar * 1528f2cb1360SIngo Molnar * Odd one out, which beside describing the topology has a quirk also 1529f2cb1360SIngo Molnar * prescribes the desired behaviour that goes along with it: 1530f2cb1360SIngo Molnar * 1531f2cb1360SIngo Molnar * SD_ASYM_PACKING - describes SMT quirks 1532f2cb1360SIngo Molnar */ 1533f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS \ 1534f2cb1360SIngo Molnar (SD_SHARE_CPUCAPACITY | \ 1535f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | \ 1536f2cb1360SIngo Molnar SD_NUMA | \ 1537cfe7ddcbSValentin Schneider SD_ASYM_PACKING) 1538f2cb1360SIngo Molnar 1539f2cb1360SIngo Molnar static struct sched_domain * 1540f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl, 1541f2cb1360SIngo Molnar const struct cpumask *cpu_map, 1542c744dc4aSBeata Michalska struct sched_domain *child, int cpu) 1543f2cb1360SIngo Molnar { 1544f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1545f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1546f2cb1360SIngo Molnar int sd_id, sd_weight, sd_flags = 0; 1547c744dc4aSBeata Michalska struct cpumask *sd_span; 1548f2cb1360SIngo Molnar 1549f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1550f2cb1360SIngo Molnar /* 1551f2cb1360SIngo Molnar * Ugly hack to pass state to sd_numa_mask()... 1552f2cb1360SIngo Molnar */ 1553f2cb1360SIngo Molnar sched_domains_curr_level = tl->numa_level; 1554f2cb1360SIngo Molnar #endif 1555f2cb1360SIngo Molnar 1556f2cb1360SIngo Molnar sd_weight = cpumask_weight(tl->mask(cpu)); 1557f2cb1360SIngo Molnar 1558f2cb1360SIngo Molnar if (tl->sd_flags) 1559f2cb1360SIngo Molnar sd_flags = (*tl->sd_flags)(); 1560f2cb1360SIngo Molnar if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1561f2cb1360SIngo Molnar "wrong sd_flags in topology description\n")) 15629b1b234bSPeng Liu sd_flags &= TOPOLOGY_SD_FLAGS; 1563f2cb1360SIngo Molnar 1564f2cb1360SIngo Molnar *sd = (struct sched_domain){ 1565f2cb1360SIngo Molnar .min_interval = sd_weight, 1566f2cb1360SIngo Molnar .max_interval = 2*sd_weight, 15676e749913SVincent Guittot .busy_factor = 16, 15682208cdaaSVincent Guittot .imbalance_pct = 117, 1569f2cb1360SIngo Molnar 1570f2cb1360SIngo Molnar .cache_nice_tries = 0, 1571f2cb1360SIngo Molnar 157236c5bdc4SValentin Schneider .flags = 1*SD_BALANCE_NEWIDLE 1573f2cb1360SIngo Molnar | 1*SD_BALANCE_EXEC 1574f2cb1360SIngo Molnar | 1*SD_BALANCE_FORK 1575f2cb1360SIngo Molnar | 0*SD_BALANCE_WAKE 1576f2cb1360SIngo Molnar | 1*SD_WAKE_AFFINE 1577f2cb1360SIngo Molnar | 0*SD_SHARE_CPUCAPACITY 1578f2cb1360SIngo Molnar | 0*SD_SHARE_PKG_RESOURCES 1579f2cb1360SIngo Molnar | 0*SD_SERIALIZE 15809c63e84dSMorten Rasmussen | 1*SD_PREFER_SIBLING 1581f2cb1360SIngo Molnar | 0*SD_NUMA 1582f2cb1360SIngo Molnar | sd_flags 1583f2cb1360SIngo Molnar , 1584f2cb1360SIngo Molnar 1585f2cb1360SIngo Molnar .last_balance = jiffies, 1586f2cb1360SIngo Molnar .balance_interval = sd_weight, 1587f2cb1360SIngo Molnar .max_newidle_lb_cost = 0, 1588e60b56e4SVincent Guittot .last_decay_max_lb_cost = jiffies, 1589f2cb1360SIngo Molnar .child = child, 1590f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1591f2cb1360SIngo Molnar .name = tl->name, 1592f2cb1360SIngo Molnar #endif 1593f2cb1360SIngo Molnar }; 1594f2cb1360SIngo Molnar 1595c744dc4aSBeata Michalska sd_span = sched_domain_span(sd); 1596c744dc4aSBeata Michalska cpumask_and(sd_span, cpu_map, tl->mask(cpu)); 1597c744dc4aSBeata Michalska sd_id = cpumask_first(sd_span); 1598c744dc4aSBeata Michalska 1599c744dc4aSBeata Michalska sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); 1600c744dc4aSBeata Michalska 1601c744dc4aSBeata Michalska WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == 1602c744dc4aSBeata Michalska (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), 1603c744dc4aSBeata Michalska "CPU capacity asymmetry not supported on SMT\n"); 1604f2cb1360SIngo Molnar 1605f2cb1360SIngo Molnar /* 1606f2cb1360SIngo Molnar * Convert topological properties into behaviour. 1607f2cb1360SIngo Molnar */ 1608a526d466SMorten Rasmussen /* Don't attempt to spread across CPUs of different capacities. */ 1609a526d466SMorten Rasmussen if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) 16109c63e84dSMorten Rasmussen sd->child->flags &= ~SD_PREFER_SIBLING; 16119c63e84dSMorten Rasmussen 1612f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_CPUCAPACITY) { 1613f2cb1360SIngo Molnar sd->imbalance_pct = 110; 1614f2cb1360SIngo Molnar 1615f2cb1360SIngo Molnar } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 1616f2cb1360SIngo Molnar sd->imbalance_pct = 117; 1617f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1618f2cb1360SIngo Molnar 1619f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1620f2cb1360SIngo Molnar } else if (sd->flags & SD_NUMA) { 1621f2cb1360SIngo Molnar sd->cache_nice_tries = 2; 1622f2cb1360SIngo Molnar 16239c63e84dSMorten Rasmussen sd->flags &= ~SD_PREFER_SIBLING; 1624f2cb1360SIngo Molnar sd->flags |= SD_SERIALIZE; 1625a55c7454SMatt Fleming if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { 1626f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_EXEC | 1627f2cb1360SIngo Molnar SD_BALANCE_FORK | 1628f2cb1360SIngo Molnar SD_WAKE_AFFINE); 1629f2cb1360SIngo Molnar } 1630f2cb1360SIngo Molnar 1631f2cb1360SIngo Molnar #endif 1632f2cb1360SIngo Molnar } else { 1633f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1634f2cb1360SIngo Molnar } 1635f2cb1360SIngo Molnar 1636f2cb1360SIngo Molnar /* 1637f2cb1360SIngo Molnar * For all levels sharing cache; connect a sched_domain_shared 1638f2cb1360SIngo Molnar * instance. 1639f2cb1360SIngo Molnar */ 1640f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_PKG_RESOURCES) { 1641f2cb1360SIngo Molnar sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 1642f2cb1360SIngo Molnar atomic_inc(&sd->shared->ref); 1643f2cb1360SIngo Molnar atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 1644f2cb1360SIngo Molnar } 1645f2cb1360SIngo Molnar 1646f2cb1360SIngo Molnar sd->private = sdd; 1647f2cb1360SIngo Molnar 1648f2cb1360SIngo Molnar return sd; 1649f2cb1360SIngo Molnar } 1650f2cb1360SIngo Molnar 1651f2cb1360SIngo Molnar /* 1652f2cb1360SIngo Molnar * Topology list, bottom-up. 1653f2cb1360SIngo Molnar */ 1654f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = { 1655f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT 1656f2cb1360SIngo Molnar { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 1657f2cb1360SIngo Molnar #endif 1658778c558fSBarry Song 1659778c558fSBarry Song #ifdef CONFIG_SCHED_CLUSTER 1660778c558fSBarry Song { cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) }, 1661778c558fSBarry Song #endif 1662778c558fSBarry Song 1663f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC 1664f2cb1360SIngo Molnar { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 1665f2cb1360SIngo Molnar #endif 1666f2cb1360SIngo Molnar { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1667f2cb1360SIngo Molnar { NULL, }, 1668f2cb1360SIngo Molnar }; 1669f2cb1360SIngo Molnar 1670f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology = 1671f2cb1360SIngo Molnar default_topology; 16720fb3978bSHuang Ying static struct sched_domain_topology_level *sched_domain_topology_saved; 1673f2cb1360SIngo Molnar 1674f2cb1360SIngo Molnar #define for_each_sd_topology(tl) \ 1675f2cb1360SIngo Molnar for (tl = sched_domain_topology; tl->mask; tl++) 1676f2cb1360SIngo Molnar 1677f2cb1360SIngo Molnar void set_sched_topology(struct sched_domain_topology_level *tl) 1678f2cb1360SIngo Molnar { 1679f2cb1360SIngo Molnar if (WARN_ON_ONCE(sched_smp_initialized)) 1680f2cb1360SIngo Molnar return; 1681f2cb1360SIngo Molnar 1682f2cb1360SIngo Molnar sched_domain_topology = tl; 16830fb3978bSHuang Ying sched_domain_topology_saved = NULL; 1684f2cb1360SIngo Molnar } 1685f2cb1360SIngo Molnar 1686f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1687f2cb1360SIngo Molnar 1688f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu) 1689f2cb1360SIngo Molnar { 1690f2cb1360SIngo Molnar return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1691f2cb1360SIngo Molnar } 1692f2cb1360SIngo Molnar 1693f2cb1360SIngo Molnar static void sched_numa_warn(const char *str) 1694f2cb1360SIngo Molnar { 1695f2cb1360SIngo Molnar static int done = false; 1696f2cb1360SIngo Molnar int i,j; 1697f2cb1360SIngo Molnar 1698f2cb1360SIngo Molnar if (done) 1699f2cb1360SIngo Molnar return; 1700f2cb1360SIngo Molnar 1701f2cb1360SIngo Molnar done = true; 1702f2cb1360SIngo Molnar 1703f2cb1360SIngo Molnar printk(KERN_WARNING "ERROR: %s\n\n", str); 1704f2cb1360SIngo Molnar 1705f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1706f2cb1360SIngo Molnar printk(KERN_WARNING " "); 17070fb3978bSHuang Ying for (j = 0; j < nr_node_ids; j++) { 17080fb3978bSHuang Ying if (!node_state(i, N_CPU) || !node_state(j, N_CPU)) 17090fb3978bSHuang Ying printk(KERN_CONT "(%02d) ", node_distance(i,j)); 17100fb3978bSHuang Ying else 1711f2cb1360SIngo Molnar printk(KERN_CONT " %02d ", node_distance(i,j)); 17120fb3978bSHuang Ying } 1713f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 1714f2cb1360SIngo Molnar } 1715f2cb1360SIngo Molnar printk(KERN_WARNING "\n"); 1716f2cb1360SIngo Molnar } 1717f2cb1360SIngo Molnar 1718f2cb1360SIngo Molnar bool find_numa_distance(int distance) 1719f2cb1360SIngo Molnar { 17200fb3978bSHuang Ying bool found = false; 17210fb3978bSHuang Ying int i, *distances; 1722f2cb1360SIngo Molnar 1723f2cb1360SIngo Molnar if (distance == node_distance(0, 0)) 1724f2cb1360SIngo Molnar return true; 1725f2cb1360SIngo Molnar 17260fb3978bSHuang Ying rcu_read_lock(); 17270fb3978bSHuang Ying distances = rcu_dereference(sched_domains_numa_distance); 17280fb3978bSHuang Ying if (!distances) 17290fb3978bSHuang Ying goto unlock; 1730f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 17310fb3978bSHuang Ying if (distances[i] == distance) { 17320fb3978bSHuang Ying found = true; 17330fb3978bSHuang Ying break; 17340fb3978bSHuang Ying } 17350fb3978bSHuang Ying } 17360fb3978bSHuang Ying unlock: 17370fb3978bSHuang Ying rcu_read_unlock(); 17380fb3978bSHuang Ying 17390fb3978bSHuang Ying return found; 1740f2cb1360SIngo Molnar } 1741f2cb1360SIngo Molnar 17420fb3978bSHuang Ying #define for_each_cpu_node_but(n, nbut) \ 17430fb3978bSHuang Ying for_each_node_state(n, N_CPU) \ 17440fb3978bSHuang Ying if (n == nbut) \ 17450fb3978bSHuang Ying continue; \ 17460fb3978bSHuang Ying else 1747f2cb1360SIngo Molnar 1748f2cb1360SIngo Molnar /* 1749f2cb1360SIngo Molnar * A system can have three types of NUMA topology: 1750f2cb1360SIngo Molnar * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1751f2cb1360SIngo Molnar * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1752f2cb1360SIngo Molnar * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1753f2cb1360SIngo Molnar * 1754f2cb1360SIngo Molnar * The difference between a glueless mesh topology and a backplane 1755f2cb1360SIngo Molnar * topology lies in whether communication between not directly 1756f2cb1360SIngo Molnar * connected nodes goes through intermediary nodes (where programs 1757f2cb1360SIngo Molnar * could run), or through backplane controllers. This affects 1758f2cb1360SIngo Molnar * placement of programs. 1759f2cb1360SIngo Molnar * 1760f2cb1360SIngo Molnar * The type of topology can be discerned with the following tests: 1761f2cb1360SIngo Molnar * - If the maximum distance between any nodes is 1 hop, the system 1762f2cb1360SIngo Molnar * is directly connected. 1763f2cb1360SIngo Molnar * - If for two nodes A and B, located N > 1 hops away from each other, 1764f2cb1360SIngo Molnar * there is an intermediary node C, which is < N hops away from both 1765f2cb1360SIngo Molnar * nodes A and B, the system is a glueless mesh. 1766f2cb1360SIngo Molnar */ 17670fb3978bSHuang Ying static void init_numa_topology_type(int offline_node) 1768f2cb1360SIngo Molnar { 1769f2cb1360SIngo Molnar int a, b, c, n; 1770f2cb1360SIngo Molnar 1771f2cb1360SIngo Molnar n = sched_max_numa_distance; 1772f2cb1360SIngo Molnar 1773e5e96fafSSrikar Dronamraju if (sched_domains_numa_levels <= 2) { 1774f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_DIRECT; 1775f2cb1360SIngo Molnar return; 1776f2cb1360SIngo Molnar } 1777f2cb1360SIngo Molnar 17780fb3978bSHuang Ying for_each_cpu_node_but(a, offline_node) { 17790fb3978bSHuang Ying for_each_cpu_node_but(b, offline_node) { 1780f2cb1360SIngo Molnar /* Find two nodes furthest removed from each other. */ 1781f2cb1360SIngo Molnar if (node_distance(a, b) < n) 1782f2cb1360SIngo Molnar continue; 1783f2cb1360SIngo Molnar 1784f2cb1360SIngo Molnar /* Is there an intermediary node between a and b? */ 17850fb3978bSHuang Ying for_each_cpu_node_but(c, offline_node) { 1786f2cb1360SIngo Molnar if (node_distance(a, c) < n && 1787f2cb1360SIngo Molnar node_distance(b, c) < n) { 1788f2cb1360SIngo Molnar sched_numa_topology_type = 1789f2cb1360SIngo Molnar NUMA_GLUELESS_MESH; 1790f2cb1360SIngo Molnar return; 1791f2cb1360SIngo Molnar } 1792f2cb1360SIngo Molnar } 1793f2cb1360SIngo Molnar 1794f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_BACKPLANE; 1795f2cb1360SIngo Molnar return; 1796f2cb1360SIngo Molnar } 1797f2cb1360SIngo Molnar } 17980fb3978bSHuang Ying 17990fb3978bSHuang Ying pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n"); 18000fb3978bSHuang Ying sched_numa_topology_type = NUMA_DIRECT; 1801f2cb1360SIngo Molnar } 1802f2cb1360SIngo Molnar 1803620a6dc4SValentin Schneider 1804620a6dc4SValentin Schneider #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS) 1805620a6dc4SValentin Schneider 18060fb3978bSHuang Ying void sched_init_numa(int offline_node) 1807f2cb1360SIngo Molnar { 1808f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1809620a6dc4SValentin Schneider unsigned long *distance_map; 1810620a6dc4SValentin Schneider int nr_levels = 0; 1811620a6dc4SValentin Schneider int i, j; 18120fb3978bSHuang Ying int *distances; 18130fb3978bSHuang Ying struct cpumask ***masks; 1814051f3ca0SSuravee Suthikulpanit 1815f2cb1360SIngo Molnar /* 1816f2cb1360SIngo Molnar * O(nr_nodes^2) deduplicating selection sort -- in order to find the 1817f2cb1360SIngo Molnar * unique distances in the node_distance() table. 1818f2cb1360SIngo Molnar */ 1819620a6dc4SValentin Schneider distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL); 1820620a6dc4SValentin Schneider if (!distance_map) 1821620a6dc4SValentin Schneider return; 1822620a6dc4SValentin Schneider 1823620a6dc4SValentin Schneider bitmap_zero(distance_map, NR_DISTANCE_VALUES); 18240fb3978bSHuang Ying for_each_cpu_node_but(i, offline_node) { 18250fb3978bSHuang Ying for_each_cpu_node_but(j, offline_node) { 1826620a6dc4SValentin Schneider int distance = node_distance(i, j); 1827f2cb1360SIngo Molnar 1828620a6dc4SValentin Schneider if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) { 1829620a6dc4SValentin Schneider sched_numa_warn("Invalid distance value range"); 18300fb3978bSHuang Ying bitmap_free(distance_map); 1831620a6dc4SValentin Schneider return; 1832620a6dc4SValentin Schneider } 1833f2cb1360SIngo Molnar 1834620a6dc4SValentin Schneider bitmap_set(distance_map, distance, 1); 1835620a6dc4SValentin Schneider } 1836620a6dc4SValentin Schneider } 1837f2cb1360SIngo Molnar /* 1838620a6dc4SValentin Schneider * We can now figure out how many unique distance values there are and 1839620a6dc4SValentin Schneider * allocate memory accordingly. 1840f2cb1360SIngo Molnar */ 1841620a6dc4SValentin Schneider nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); 1842f2cb1360SIngo Molnar 18430fb3978bSHuang Ying distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL); 18440fb3978bSHuang Ying if (!distances) { 1845620a6dc4SValentin Schneider bitmap_free(distance_map); 1846620a6dc4SValentin Schneider return; 1847f2cb1360SIngo Molnar } 1848620a6dc4SValentin Schneider 1849620a6dc4SValentin Schneider for (i = 0, j = 0; i < nr_levels; i++, j++) { 1850620a6dc4SValentin Schneider j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); 18510fb3978bSHuang Ying distances[i] = j; 1852f2cb1360SIngo Molnar } 18530fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_distance, distances); 1854f2cb1360SIngo Molnar 1855620a6dc4SValentin Schneider bitmap_free(distance_map); 1856620a6dc4SValentin Schneider 1857f2cb1360SIngo Molnar /* 1858620a6dc4SValentin Schneider * 'nr_levels' contains the number of unique distances 1859f2cb1360SIngo Molnar * 1860f2cb1360SIngo Molnar * The sched_domains_numa_distance[] array includes the actual distance 1861f2cb1360SIngo Molnar * numbers. 1862f2cb1360SIngo Molnar */ 1863f2cb1360SIngo Molnar 1864f2cb1360SIngo Molnar /* 1865f2cb1360SIngo Molnar * Here, we should temporarily reset sched_domains_numa_levels to 0. 1866f2cb1360SIngo Molnar * If it fails to allocate memory for array sched_domains_numa_masks[][], 1867620a6dc4SValentin Schneider * the array will contain less then 'nr_levels' members. This could be 1868f2cb1360SIngo Molnar * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1869f2cb1360SIngo Molnar * in other functions. 1870f2cb1360SIngo Molnar * 1871620a6dc4SValentin Schneider * We reset it to 'nr_levels' at the end of this function. 1872f2cb1360SIngo Molnar */ 1873f2cb1360SIngo Molnar sched_domains_numa_levels = 0; 1874f2cb1360SIngo Molnar 18750fb3978bSHuang Ying masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); 18760fb3978bSHuang Ying if (!masks) 1877f2cb1360SIngo Molnar return; 1878f2cb1360SIngo Molnar 1879f2cb1360SIngo Molnar /* 1880f2cb1360SIngo Molnar * Now for each level, construct a mask per node which contains all 1881f2cb1360SIngo Molnar * CPUs of nodes that are that many hops away from us. 1882f2cb1360SIngo Molnar */ 1883620a6dc4SValentin Schneider for (i = 0; i < nr_levels; i++) { 18840fb3978bSHuang Ying masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 18850fb3978bSHuang Ying if (!masks[i]) 1886f2cb1360SIngo Molnar return; 1887f2cb1360SIngo Molnar 18880fb3978bSHuang Ying for_each_cpu_node_but(j, offline_node) { 1889f2cb1360SIngo Molnar struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1890620a6dc4SValentin Schneider int k; 1891620a6dc4SValentin Schneider 1892f2cb1360SIngo Molnar if (!mask) 1893f2cb1360SIngo Molnar return; 1894f2cb1360SIngo Molnar 18950fb3978bSHuang Ying masks[i][j] = mask; 1896f2cb1360SIngo Molnar 18970fb3978bSHuang Ying for_each_cpu_node_but(k, offline_node) { 1898620a6dc4SValentin Schneider if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) 1899620a6dc4SValentin Schneider sched_numa_warn("Node-distance not symmetric"); 1900620a6dc4SValentin Schneider 1901f2cb1360SIngo Molnar if (node_distance(j, k) > sched_domains_numa_distance[i]) 1902f2cb1360SIngo Molnar continue; 1903f2cb1360SIngo Molnar 1904f2cb1360SIngo Molnar cpumask_or(mask, mask, cpumask_of_node(k)); 1905f2cb1360SIngo Molnar } 1906f2cb1360SIngo Molnar } 1907f2cb1360SIngo Molnar } 19080fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_masks, masks); 1909f2cb1360SIngo Molnar 1910f2cb1360SIngo Molnar /* Compute default topology size */ 1911f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++); 1912f2cb1360SIngo Molnar 191371e5f664SDietmar Eggemann tl = kzalloc((i + nr_levels + 1) * 1914f2cb1360SIngo Molnar sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1915f2cb1360SIngo Molnar if (!tl) 1916f2cb1360SIngo Molnar return; 1917f2cb1360SIngo Molnar 1918f2cb1360SIngo Molnar /* 1919f2cb1360SIngo Molnar * Copy the default topology bits.. 1920f2cb1360SIngo Molnar */ 1921f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++) 1922f2cb1360SIngo Molnar tl[i] = sched_domain_topology[i]; 1923f2cb1360SIngo Molnar 1924f2cb1360SIngo Molnar /* 1925051f3ca0SSuravee Suthikulpanit * Add the NUMA identity distance, aka single NODE. 1926051f3ca0SSuravee Suthikulpanit */ 1927051f3ca0SSuravee Suthikulpanit tl[i++] = (struct sched_domain_topology_level){ 1928051f3ca0SSuravee Suthikulpanit .mask = sd_numa_mask, 1929051f3ca0SSuravee Suthikulpanit .numa_level = 0, 1930051f3ca0SSuravee Suthikulpanit SD_INIT_NAME(NODE) 1931051f3ca0SSuravee Suthikulpanit }; 1932051f3ca0SSuravee Suthikulpanit 1933051f3ca0SSuravee Suthikulpanit /* 1934f2cb1360SIngo Molnar * .. and append 'j' levels of NUMA goodness. 1935f2cb1360SIngo Molnar */ 1936620a6dc4SValentin Schneider for (j = 1; j < nr_levels; i++, j++) { 1937f2cb1360SIngo Molnar tl[i] = (struct sched_domain_topology_level){ 1938f2cb1360SIngo Molnar .mask = sd_numa_mask, 1939f2cb1360SIngo Molnar .sd_flags = cpu_numa_flags, 1940f2cb1360SIngo Molnar .flags = SDTL_OVERLAP, 1941f2cb1360SIngo Molnar .numa_level = j, 1942f2cb1360SIngo Molnar SD_INIT_NAME(NUMA) 1943f2cb1360SIngo Molnar }; 1944f2cb1360SIngo Molnar } 1945f2cb1360SIngo Molnar 19460fb3978bSHuang Ying sched_domain_topology_saved = sched_domain_topology; 1947f2cb1360SIngo Molnar sched_domain_topology = tl; 1948f2cb1360SIngo Molnar 1949620a6dc4SValentin Schneider sched_domains_numa_levels = nr_levels; 19500fb3978bSHuang Ying WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); 1951f2cb1360SIngo Molnar 19520fb3978bSHuang Ying init_numa_topology_type(offline_node); 19530083242cSValentin Schneider } 19540083242cSValentin Schneider 19550fb3978bSHuang Ying 19560fb3978bSHuang Ying static void sched_reset_numa(void) 19570083242cSValentin Schneider { 19580fb3978bSHuang Ying int nr_levels, *distances; 19590fb3978bSHuang Ying struct cpumask ***masks; 19600fb3978bSHuang Ying 19610fb3978bSHuang Ying nr_levels = sched_domains_numa_levels; 19620fb3978bSHuang Ying sched_domains_numa_levels = 0; 19630fb3978bSHuang Ying sched_max_numa_distance = 0; 19640fb3978bSHuang Ying sched_numa_topology_type = NUMA_DIRECT; 19650fb3978bSHuang Ying distances = sched_domains_numa_distance; 19660fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_distance, NULL); 19670fb3978bSHuang Ying masks = sched_domains_numa_masks; 19680fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_masks, NULL); 19690fb3978bSHuang Ying if (distances || masks) { 19700083242cSValentin Schneider int i, j; 19710083242cSValentin Schneider 19720fb3978bSHuang Ying synchronize_rcu(); 19730fb3978bSHuang Ying kfree(distances); 19740fb3978bSHuang Ying for (i = 0; i < nr_levels && masks; i++) { 19750fb3978bSHuang Ying if (!masks[i]) 19760fb3978bSHuang Ying continue; 19770fb3978bSHuang Ying for_each_node(j) 19780fb3978bSHuang Ying kfree(masks[i][j]); 19790fb3978bSHuang Ying kfree(masks[i]); 19800fb3978bSHuang Ying } 19810fb3978bSHuang Ying kfree(masks); 19820fb3978bSHuang Ying } 19830fb3978bSHuang Ying if (sched_domain_topology_saved) { 19840fb3978bSHuang Ying kfree(sched_domain_topology); 19850fb3978bSHuang Ying sched_domain_topology = sched_domain_topology_saved; 19860fb3978bSHuang Ying sched_domain_topology_saved = NULL; 19870fb3978bSHuang Ying } 19880fb3978bSHuang Ying } 19890fb3978bSHuang Ying 19900083242cSValentin Schneider /* 19910fb3978bSHuang Ying * Call with hotplug lock held 19920083242cSValentin Schneider */ 19930fb3978bSHuang Ying void sched_update_numa(int cpu, bool online) 19940fb3978bSHuang Ying { 19950fb3978bSHuang Ying int node; 19960fb3978bSHuang Ying 19970fb3978bSHuang Ying node = cpu_to_node(cpu); 19980fb3978bSHuang Ying /* 19990fb3978bSHuang Ying * Scheduler NUMA topology is updated when the first CPU of a 20000fb3978bSHuang Ying * node is onlined or the last CPU of a node is offlined. 20010fb3978bSHuang Ying */ 20020fb3978bSHuang Ying if (cpumask_weight(cpumask_of_node(node)) != 1) 20030083242cSValentin Schneider return; 20040083242cSValentin Schneider 20050fb3978bSHuang Ying sched_reset_numa(); 20060fb3978bSHuang Ying sched_init_numa(online ? NUMA_NO_NODE : node); 2007f2cb1360SIngo Molnar } 2008f2cb1360SIngo Molnar 2009f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu) 2010f2cb1360SIngo Molnar { 2011f2cb1360SIngo Molnar int node = cpu_to_node(cpu); 2012f2cb1360SIngo Molnar int i, j; 2013f2cb1360SIngo Molnar 2014f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 2015f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 20160fb3978bSHuang Ying if (!node_state(j, N_CPU)) 20170083242cSValentin Schneider continue; 20180083242cSValentin Schneider 20190083242cSValentin Schneider /* Set ourselves in the remote node's masks */ 2020f2cb1360SIngo Molnar if (node_distance(j, node) <= sched_domains_numa_distance[i]) 2021f2cb1360SIngo Molnar cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 2022f2cb1360SIngo Molnar } 2023f2cb1360SIngo Molnar } 2024f2cb1360SIngo Molnar } 2025f2cb1360SIngo Molnar 2026f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu) 2027f2cb1360SIngo Molnar { 2028f2cb1360SIngo Molnar int i, j; 2029f2cb1360SIngo Molnar 2030f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 20310fb3978bSHuang Ying for (j = 0; j < nr_node_ids; j++) { 20320fb3978bSHuang Ying if (sched_domains_numa_masks[i][j]) 2033f2cb1360SIngo Molnar cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 2034f2cb1360SIngo Molnar } 2035f2cb1360SIngo Molnar } 20360fb3978bSHuang Ying } 2037f2cb1360SIngo Molnar 2038e0e8d491SWanpeng Li /* 2039e0e8d491SWanpeng Li * sched_numa_find_closest() - given the NUMA topology, find the cpu 2040e0e8d491SWanpeng Li * closest to @cpu from @cpumask. 2041e0e8d491SWanpeng Li * cpumask: cpumask to find a cpu from 2042e0e8d491SWanpeng Li * cpu: cpu to be close to 2043e0e8d491SWanpeng Li * 2044e0e8d491SWanpeng Li * returns: cpu, or nr_cpu_ids when nothing found. 2045e0e8d491SWanpeng Li */ 2046e0e8d491SWanpeng Li int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 2047e0e8d491SWanpeng Li { 20480fb3978bSHuang Ying int i, j = cpu_to_node(cpu), found = nr_cpu_ids; 20490fb3978bSHuang Ying struct cpumask ***masks; 2050e0e8d491SWanpeng Li 20510fb3978bSHuang Ying rcu_read_lock(); 20520fb3978bSHuang Ying masks = rcu_dereference(sched_domains_numa_masks); 20530fb3978bSHuang Ying if (!masks) 20540fb3978bSHuang Ying goto unlock; 2055e0e8d491SWanpeng Li for (i = 0; i < sched_domains_numa_levels; i++) { 20560fb3978bSHuang Ying if (!masks[i][j]) 20570fb3978bSHuang Ying break; 20580fb3978bSHuang Ying cpu = cpumask_any_and(cpus, masks[i][j]); 20590fb3978bSHuang Ying if (cpu < nr_cpu_ids) { 20600fb3978bSHuang Ying found = cpu; 20610fb3978bSHuang Ying break; 2062e0e8d491SWanpeng Li } 20630fb3978bSHuang Ying } 20640fb3978bSHuang Ying unlock: 20650fb3978bSHuang Ying rcu_read_unlock(); 20660fb3978bSHuang Ying 20670fb3978bSHuang Ying return found; 2068e0e8d491SWanpeng Li } 2069e0e8d491SWanpeng Li 2070f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */ 2071f2cb1360SIngo Molnar 2072f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map) 2073f2cb1360SIngo Molnar { 2074f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2075f2cb1360SIngo Molnar int j; 2076f2cb1360SIngo Molnar 2077f2cb1360SIngo Molnar for_each_sd_topology(tl) { 2078f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 2079f2cb1360SIngo Molnar 2080f2cb1360SIngo Molnar sdd->sd = alloc_percpu(struct sched_domain *); 2081f2cb1360SIngo Molnar if (!sdd->sd) 2082f2cb1360SIngo Molnar return -ENOMEM; 2083f2cb1360SIngo Molnar 2084f2cb1360SIngo Molnar sdd->sds = alloc_percpu(struct sched_domain_shared *); 2085f2cb1360SIngo Molnar if (!sdd->sds) 2086f2cb1360SIngo Molnar return -ENOMEM; 2087f2cb1360SIngo Molnar 2088f2cb1360SIngo Molnar sdd->sg = alloc_percpu(struct sched_group *); 2089f2cb1360SIngo Molnar if (!sdd->sg) 2090f2cb1360SIngo Molnar return -ENOMEM; 2091f2cb1360SIngo Molnar 2092f2cb1360SIngo Molnar sdd->sgc = alloc_percpu(struct sched_group_capacity *); 2093f2cb1360SIngo Molnar if (!sdd->sgc) 2094f2cb1360SIngo Molnar return -ENOMEM; 2095f2cb1360SIngo Molnar 2096f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 2097f2cb1360SIngo Molnar struct sched_domain *sd; 2098f2cb1360SIngo Molnar struct sched_domain_shared *sds; 2099f2cb1360SIngo Molnar struct sched_group *sg; 2100f2cb1360SIngo Molnar struct sched_group_capacity *sgc; 2101f2cb1360SIngo Molnar 2102f2cb1360SIngo Molnar sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 2103f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2104f2cb1360SIngo Molnar if (!sd) 2105f2cb1360SIngo Molnar return -ENOMEM; 2106f2cb1360SIngo Molnar 2107f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, j) = sd; 2108f2cb1360SIngo Molnar 2109f2cb1360SIngo Molnar sds = kzalloc_node(sizeof(struct sched_domain_shared), 2110f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2111f2cb1360SIngo Molnar if (!sds) 2112f2cb1360SIngo Molnar return -ENOMEM; 2113f2cb1360SIngo Molnar 2114f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, j) = sds; 2115f2cb1360SIngo Molnar 2116f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 2117f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2118f2cb1360SIngo Molnar if (!sg) 2119f2cb1360SIngo Molnar return -ENOMEM; 2120f2cb1360SIngo Molnar 2121f2cb1360SIngo Molnar sg->next = sg; 2122f2cb1360SIngo Molnar 2123f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, j) = sg; 2124f2cb1360SIngo Molnar 2125f2cb1360SIngo Molnar sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 2126f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2127f2cb1360SIngo Molnar if (!sgc) 2128f2cb1360SIngo Molnar return -ENOMEM; 2129f2cb1360SIngo Molnar 2130005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2131005f874dSPeter Zijlstra sgc->id = j; 2132005f874dSPeter Zijlstra #endif 2133005f874dSPeter Zijlstra 2134f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, j) = sgc; 2135f2cb1360SIngo Molnar } 2136f2cb1360SIngo Molnar } 2137f2cb1360SIngo Molnar 2138f2cb1360SIngo Molnar return 0; 2139f2cb1360SIngo Molnar } 2140f2cb1360SIngo Molnar 2141f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map) 2142f2cb1360SIngo Molnar { 2143f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2144f2cb1360SIngo Molnar int j; 2145f2cb1360SIngo Molnar 2146f2cb1360SIngo Molnar for_each_sd_topology(tl) { 2147f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 2148f2cb1360SIngo Molnar 2149f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 2150f2cb1360SIngo Molnar struct sched_domain *sd; 2151f2cb1360SIngo Molnar 2152f2cb1360SIngo Molnar if (sdd->sd) { 2153f2cb1360SIngo Molnar sd = *per_cpu_ptr(sdd->sd, j); 2154f2cb1360SIngo Molnar if (sd && (sd->flags & SD_OVERLAP)) 2155f2cb1360SIngo Molnar free_sched_groups(sd->groups, 0); 2156f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sd, j)); 2157f2cb1360SIngo Molnar } 2158f2cb1360SIngo Molnar 2159f2cb1360SIngo Molnar if (sdd->sds) 2160f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sds, j)); 2161f2cb1360SIngo Molnar if (sdd->sg) 2162f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sg, j)); 2163f2cb1360SIngo Molnar if (sdd->sgc) 2164f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sgc, j)); 2165f2cb1360SIngo Molnar } 2166f2cb1360SIngo Molnar free_percpu(sdd->sd); 2167f2cb1360SIngo Molnar sdd->sd = NULL; 2168f2cb1360SIngo Molnar free_percpu(sdd->sds); 2169f2cb1360SIngo Molnar sdd->sds = NULL; 2170f2cb1360SIngo Molnar free_percpu(sdd->sg); 2171f2cb1360SIngo Molnar sdd->sg = NULL; 2172f2cb1360SIngo Molnar free_percpu(sdd->sgc); 2173f2cb1360SIngo Molnar sdd->sgc = NULL; 2174f2cb1360SIngo Molnar } 2175f2cb1360SIngo Molnar } 2176f2cb1360SIngo Molnar 2177181a80d1SViresh Kumar static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 2178f2cb1360SIngo Molnar const struct cpumask *cpu_map, struct sched_domain_attr *attr, 2179c744dc4aSBeata Michalska struct sched_domain *child, int cpu) 2180f2cb1360SIngo Molnar { 2181c744dc4aSBeata Michalska struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 2182f2cb1360SIngo Molnar 2183f2cb1360SIngo Molnar if (child) { 2184f2cb1360SIngo Molnar sd->level = child->level + 1; 2185f2cb1360SIngo Molnar sched_domain_level_max = max(sched_domain_level_max, sd->level); 2186f2cb1360SIngo Molnar child->parent = sd; 2187f2cb1360SIngo Molnar 2188f2cb1360SIngo Molnar if (!cpumask_subset(sched_domain_span(child), 2189f2cb1360SIngo Molnar sched_domain_span(sd))) { 2190f2cb1360SIngo Molnar pr_err("BUG: arch topology borken\n"); 2191f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 2192f2cb1360SIngo Molnar pr_err(" the %s domain not a subset of the %s domain\n", 2193f2cb1360SIngo Molnar child->name, sd->name); 2194f2cb1360SIngo Molnar #endif 219597fb7a0aSIngo Molnar /* Fixup, ensure @sd has at least @child CPUs. */ 2196f2cb1360SIngo Molnar cpumask_or(sched_domain_span(sd), 2197f2cb1360SIngo Molnar sched_domain_span(sd), 2198f2cb1360SIngo Molnar sched_domain_span(child)); 2199f2cb1360SIngo Molnar } 2200f2cb1360SIngo Molnar 2201f2cb1360SIngo Molnar } 2202f2cb1360SIngo Molnar set_domain_attribute(sd, attr); 2203f2cb1360SIngo Molnar 2204f2cb1360SIngo Molnar return sd; 2205f2cb1360SIngo Molnar } 2206f2cb1360SIngo Molnar 2207f2cb1360SIngo Molnar /* 2208ccf74128SValentin Schneider * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for 2209ccf74128SValentin Schneider * any two given CPUs at this (non-NUMA) topology level. 2210ccf74128SValentin Schneider */ 2211ccf74128SValentin Schneider static bool topology_span_sane(struct sched_domain_topology_level *tl, 2212ccf74128SValentin Schneider const struct cpumask *cpu_map, int cpu) 2213ccf74128SValentin Schneider { 2214ccf74128SValentin Schneider int i; 2215ccf74128SValentin Schneider 2216ccf74128SValentin Schneider /* NUMA levels are allowed to overlap */ 2217ccf74128SValentin Schneider if (tl->flags & SDTL_OVERLAP) 2218ccf74128SValentin Schneider return true; 2219ccf74128SValentin Schneider 2220ccf74128SValentin Schneider /* 2221ccf74128SValentin Schneider * Non-NUMA levels cannot partially overlap - they must be either 2222ccf74128SValentin Schneider * completely equal or completely disjoint. Otherwise we can end up 2223ccf74128SValentin Schneider * breaking the sched_group lists - i.e. a later get_group() pass 2224ccf74128SValentin Schneider * breaks the linking done for an earlier span. 2225ccf74128SValentin Schneider */ 2226ccf74128SValentin Schneider for_each_cpu(i, cpu_map) { 2227ccf74128SValentin Schneider if (i == cpu) 2228ccf74128SValentin Schneider continue; 2229ccf74128SValentin Schneider /* 2230ccf74128SValentin Schneider * We should 'and' all those masks with 'cpu_map' to exactly 2231ccf74128SValentin Schneider * match the topology we're about to build, but that can only 2232ccf74128SValentin Schneider * remove CPUs, which only lessens our ability to detect 2233ccf74128SValentin Schneider * overlaps 2234ccf74128SValentin Schneider */ 2235ccf74128SValentin Schneider if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && 2236ccf74128SValentin Schneider cpumask_intersects(tl->mask(cpu), tl->mask(i))) 2237ccf74128SValentin Schneider return false; 2238ccf74128SValentin Schneider } 2239ccf74128SValentin Schneider 2240ccf74128SValentin Schneider return true; 2241ccf74128SValentin Schneider } 2242ccf74128SValentin Schneider 2243ccf74128SValentin Schneider /* 2244f2cb1360SIngo Molnar * Build sched domains for a given set of CPUs and attach the sched domains 2245f2cb1360SIngo Molnar * to the individual CPUs 2246f2cb1360SIngo Molnar */ 2247f2cb1360SIngo Molnar static int 2248f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 2249f2cb1360SIngo Molnar { 2250cd1cb335SValentin Schneider enum s_alloc alloc_state = sa_none; 2251f2cb1360SIngo Molnar struct sched_domain *sd; 2252f2cb1360SIngo Molnar struct s_data d; 2253f2cb1360SIngo Molnar struct rq *rq = NULL; 2254f2cb1360SIngo Molnar int i, ret = -ENOMEM; 2255df054e84SMorten Rasmussen bool has_asym = false; 2256f2cb1360SIngo Molnar 2257cd1cb335SValentin Schneider if (WARN_ON(cpumask_empty(cpu_map))) 2258cd1cb335SValentin Schneider goto error; 2259cd1cb335SValentin Schneider 2260f2cb1360SIngo Molnar alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 2261f2cb1360SIngo Molnar if (alloc_state != sa_rootdomain) 2262f2cb1360SIngo Molnar goto error; 2263f2cb1360SIngo Molnar 2264f2cb1360SIngo Molnar /* Set up domains for CPUs specified by the cpu_map: */ 2265f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2266f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2267f2cb1360SIngo Molnar 2268f2cb1360SIngo Molnar sd = NULL; 2269f2cb1360SIngo Molnar for_each_sd_topology(tl) { 227005484e09SMorten Rasmussen 2271ccf74128SValentin Schneider if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) 2272ccf74128SValentin Schneider goto error; 2273ccf74128SValentin Schneider 2274c744dc4aSBeata Michalska sd = build_sched_domain(tl, cpu_map, attr, sd, i); 2275c744dc4aSBeata Michalska 2276c744dc4aSBeata Michalska has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; 227705484e09SMorten Rasmussen 2278f2cb1360SIngo Molnar if (tl == sched_domain_topology) 2279f2cb1360SIngo Molnar *per_cpu_ptr(d.sd, i) = sd; 2280af85596cSPeter Zijlstra if (tl->flags & SDTL_OVERLAP) 2281f2cb1360SIngo Molnar sd->flags |= SD_OVERLAP; 2282f2cb1360SIngo Molnar if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2283f2cb1360SIngo Molnar break; 2284f2cb1360SIngo Molnar } 2285f2cb1360SIngo Molnar } 2286f2cb1360SIngo Molnar 2287f2cb1360SIngo Molnar /* Build the groups for the domains */ 2288f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2289f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2290f2cb1360SIngo Molnar sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2291f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 2292f2cb1360SIngo Molnar if (build_overlap_sched_groups(sd, i)) 2293f2cb1360SIngo Molnar goto error; 2294f2cb1360SIngo Molnar } else { 2295f2cb1360SIngo Molnar if (build_sched_groups(sd, i)) 2296f2cb1360SIngo Molnar goto error; 2297f2cb1360SIngo Molnar } 2298f2cb1360SIngo Molnar } 2299f2cb1360SIngo Molnar } 2300f2cb1360SIngo Molnar 2301e496132eSMel Gorman /* 2302e496132eSMel Gorman * Calculate an allowed NUMA imbalance such that LLCs do not get 2303e496132eSMel Gorman * imbalanced. 2304e496132eSMel Gorman */ 2305e496132eSMel Gorman for_each_cpu(i, cpu_map) { 2306e496132eSMel Gorman unsigned int imb = 0; 2307e496132eSMel Gorman unsigned int imb_span = 1; 2308e496132eSMel Gorman 2309e496132eSMel Gorman for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2310e496132eSMel Gorman struct sched_domain *child = sd->child; 2311e496132eSMel Gorman 2312e496132eSMel Gorman if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && 2313e496132eSMel Gorman (child->flags & SD_SHARE_PKG_RESOURCES)) { 23147f434dffSK Prateek Nayak struct sched_domain __rcu *top_p; 2315e496132eSMel Gorman unsigned int nr_llcs; 2316e496132eSMel Gorman 2317e496132eSMel Gorman /* 2318e496132eSMel Gorman * For a single LLC per node, allow an 2319e496132eSMel Gorman * imbalance up to 25% of the node. This is an 2320e496132eSMel Gorman * arbitrary cutoff based on SMT-2 to balance 2321e496132eSMel Gorman * between memory bandwidth and avoiding 2322e496132eSMel Gorman * premature sharing of HT resources and SMT-4 2323e496132eSMel Gorman * or SMT-8 *may* benefit from a different 2324e496132eSMel Gorman * cutoff. 2325e496132eSMel Gorman * 2326e496132eSMel Gorman * For multiple LLCs, allow an imbalance 2327e496132eSMel Gorman * until multiple tasks would share an LLC 2328e496132eSMel Gorman * on one node while LLCs on another node 2329e496132eSMel Gorman * remain idle. 2330e496132eSMel Gorman */ 2331e496132eSMel Gorman nr_llcs = sd->span_weight / child->span_weight; 2332e496132eSMel Gorman if (nr_llcs == 1) 2333e496132eSMel Gorman imb = sd->span_weight >> 2; 2334e496132eSMel Gorman else 2335e496132eSMel Gorman imb = nr_llcs; 2336e496132eSMel Gorman sd->imb_numa_nr = imb; 2337e496132eSMel Gorman 2338e496132eSMel Gorman /* Set span based on the first NUMA domain. */ 23397f434dffSK Prateek Nayak top_p = sd->parent; 2340e496132eSMel Gorman while (top_p && !(top_p->flags & SD_NUMA)) { 23417f434dffSK Prateek Nayak top_p = top_p->parent; 2342e496132eSMel Gorman } 2343e496132eSMel Gorman imb_span = top_p ? top_p->span_weight : sd->span_weight; 2344e496132eSMel Gorman } else { 2345e496132eSMel Gorman int factor = max(1U, (sd->span_weight / imb_span)); 2346e496132eSMel Gorman 2347e496132eSMel Gorman sd->imb_numa_nr = imb * factor; 2348e496132eSMel Gorman } 2349e496132eSMel Gorman } 2350e496132eSMel Gorman } 2351e496132eSMel Gorman 2352f2cb1360SIngo Molnar /* Calculate CPU capacity for physical packages and nodes */ 2353f2cb1360SIngo Molnar for (i = nr_cpumask_bits-1; i >= 0; i--) { 2354f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, cpu_map)) 2355f2cb1360SIngo Molnar continue; 2356f2cb1360SIngo Molnar 2357f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2358f2cb1360SIngo Molnar claim_allocations(i, sd); 2359f2cb1360SIngo Molnar init_sched_groups_capacity(i, sd); 2360f2cb1360SIngo Molnar } 2361f2cb1360SIngo Molnar } 2362f2cb1360SIngo Molnar 2363f2cb1360SIngo Molnar /* Attach the domains */ 2364f2cb1360SIngo Molnar rcu_read_lock(); 2365f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2366f2cb1360SIngo Molnar rq = cpu_rq(i); 2367f2cb1360SIngo Molnar sd = *per_cpu_ptr(d.sd, i); 2368f2cb1360SIngo Molnar 2369f2cb1360SIngo Molnar /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ 2370f2cb1360SIngo Molnar if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) 2371f2cb1360SIngo Molnar WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); 2372f2cb1360SIngo Molnar 2373f2cb1360SIngo Molnar cpu_attach_domain(sd, d.rd, i); 2374f2cb1360SIngo Molnar } 2375f2cb1360SIngo Molnar rcu_read_unlock(); 2376f2cb1360SIngo Molnar 2377df054e84SMorten Rasmussen if (has_asym) 2378e284df70SValentin Schneider static_branch_inc_cpuslocked(&sched_asym_cpucapacity); 2379df054e84SMorten Rasmussen 23809406415fSPeter Zijlstra if (rq && sched_debug_verbose) { 2381bf5015a5SJuri Lelli pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", 2382f2cb1360SIngo Molnar cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 2383f2cb1360SIngo Molnar } 2384f2cb1360SIngo Molnar 2385f2cb1360SIngo Molnar ret = 0; 2386f2cb1360SIngo Molnar error: 2387f2cb1360SIngo Molnar __free_domain_allocs(&d, alloc_state, cpu_map); 238897fb7a0aSIngo Molnar 2389f2cb1360SIngo Molnar return ret; 2390f2cb1360SIngo Molnar } 2391f2cb1360SIngo Molnar 2392f2cb1360SIngo Molnar /* Current sched domains: */ 2393f2cb1360SIngo Molnar static cpumask_var_t *doms_cur; 2394f2cb1360SIngo Molnar 2395f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */ 2396f2cb1360SIngo Molnar static int ndoms_cur; 2397f2cb1360SIngo Molnar 23983b03706fSIngo Molnar /* Attributes of custom domains in 'doms_cur' */ 2399f2cb1360SIngo Molnar static struct sched_domain_attr *dattr_cur; 2400f2cb1360SIngo Molnar 2401f2cb1360SIngo Molnar /* 2402f2cb1360SIngo Molnar * Special case: If a kmalloc() of a doms_cur partition (array of 2403f2cb1360SIngo Molnar * cpumask) fails, then fallback to a single sched domain, 2404f2cb1360SIngo Molnar * as determined by the single cpumask fallback_doms. 2405f2cb1360SIngo Molnar */ 24068d5dc512SPeter Zijlstra static cpumask_var_t fallback_doms; 2407f2cb1360SIngo Molnar 2408f2cb1360SIngo Molnar /* 2409f2cb1360SIngo Molnar * arch_update_cpu_topology lets virtualized architectures update the 2410f2cb1360SIngo Molnar * CPU core maps. It is supposed to return 1 if the topology changed 2411f2cb1360SIngo Molnar * or 0 if it stayed the same. 2412f2cb1360SIngo Molnar */ 2413f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void) 2414f2cb1360SIngo Molnar { 2415f2cb1360SIngo Molnar return 0; 2416f2cb1360SIngo Molnar } 2417f2cb1360SIngo Molnar 2418f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 2419f2cb1360SIngo Molnar { 2420f2cb1360SIngo Molnar int i; 2421f2cb1360SIngo Molnar cpumask_var_t *doms; 2422f2cb1360SIngo Molnar 24236da2ec56SKees Cook doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); 2424f2cb1360SIngo Molnar if (!doms) 2425f2cb1360SIngo Molnar return NULL; 2426f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) { 2427f2cb1360SIngo Molnar if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 2428f2cb1360SIngo Molnar free_sched_domains(doms, i); 2429f2cb1360SIngo Molnar return NULL; 2430f2cb1360SIngo Molnar } 2431f2cb1360SIngo Molnar } 2432f2cb1360SIngo Molnar return doms; 2433f2cb1360SIngo Molnar } 2434f2cb1360SIngo Molnar 2435f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2436f2cb1360SIngo Molnar { 2437f2cb1360SIngo Molnar unsigned int i; 2438f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) 2439f2cb1360SIngo Molnar free_cpumask_var(doms[i]); 2440f2cb1360SIngo Molnar kfree(doms); 2441f2cb1360SIngo Molnar } 2442f2cb1360SIngo Molnar 2443f2cb1360SIngo Molnar /* 2444cb0c0414SJuri Lelli * Set up scheduler domains and groups. For now this just excludes isolated 2445cb0c0414SJuri Lelli * CPUs, but could be used to exclude other special cases in the future. 2446f2cb1360SIngo Molnar */ 24478d5dc512SPeter Zijlstra int sched_init_domains(const struct cpumask *cpu_map) 2448f2cb1360SIngo Molnar { 2449f2cb1360SIngo Molnar int err; 2450f2cb1360SIngo Molnar 24518d5dc512SPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 24521676330eSPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 24538d5dc512SPeter Zijlstra zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 24548d5dc512SPeter Zijlstra 2455f2cb1360SIngo Molnar arch_update_cpu_topology(); 2456c744dc4aSBeata Michalska asym_cpu_capacity_scan(); 2457f2cb1360SIngo Molnar ndoms_cur = 1; 2458f2cb1360SIngo Molnar doms_cur = alloc_sched_domains(ndoms_cur); 2459f2cb1360SIngo Molnar if (!doms_cur) 2460f2cb1360SIngo Molnar doms_cur = &fallback_doms; 246104d4e665SFrederic Weisbecker cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2462f2cb1360SIngo Molnar err = build_sched_domains(doms_cur[0], NULL); 2463f2cb1360SIngo Molnar 2464f2cb1360SIngo Molnar return err; 2465f2cb1360SIngo Molnar } 2466f2cb1360SIngo Molnar 2467f2cb1360SIngo Molnar /* 2468f2cb1360SIngo Molnar * Detach sched domains from a group of CPUs specified in cpu_map 2469f2cb1360SIngo Molnar * These CPUs will now be attached to the NULL domain 2470f2cb1360SIngo Molnar */ 2471f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map) 2472f2cb1360SIngo Molnar { 2473e284df70SValentin Schneider unsigned int cpu = cpumask_any(cpu_map); 2474f2cb1360SIngo Molnar int i; 2475f2cb1360SIngo Molnar 2476e284df70SValentin Schneider if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) 2477e284df70SValentin Schneider static_branch_dec_cpuslocked(&sched_asym_cpucapacity); 2478e284df70SValentin Schneider 2479f2cb1360SIngo Molnar rcu_read_lock(); 2480f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) 2481f2cb1360SIngo Molnar cpu_attach_domain(NULL, &def_root_domain, i); 2482f2cb1360SIngo Molnar rcu_read_unlock(); 2483f2cb1360SIngo Molnar } 2484f2cb1360SIngo Molnar 2485f2cb1360SIngo Molnar /* handle null as "default" */ 2486f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2487f2cb1360SIngo Molnar struct sched_domain_attr *new, int idx_new) 2488f2cb1360SIngo Molnar { 2489f2cb1360SIngo Molnar struct sched_domain_attr tmp; 2490f2cb1360SIngo Molnar 2491f2cb1360SIngo Molnar /* Fast path: */ 2492f2cb1360SIngo Molnar if (!new && !cur) 2493f2cb1360SIngo Molnar return 1; 2494f2cb1360SIngo Molnar 2495f2cb1360SIngo Molnar tmp = SD_ATTR_INIT; 249697fb7a0aSIngo Molnar 2497f2cb1360SIngo Molnar return !memcmp(cur ? (cur + idx_cur) : &tmp, 2498f2cb1360SIngo Molnar new ? (new + idx_new) : &tmp, 2499f2cb1360SIngo Molnar sizeof(struct sched_domain_attr)); 2500f2cb1360SIngo Molnar } 2501f2cb1360SIngo Molnar 2502f2cb1360SIngo Molnar /* 2503f2cb1360SIngo Molnar * Partition sched domains as specified by the 'ndoms_new' 2504f2cb1360SIngo Molnar * cpumasks in the array doms_new[] of cpumasks. This compares 2505f2cb1360SIngo Molnar * doms_new[] to the current sched domain partitioning, doms_cur[]. 2506f2cb1360SIngo Molnar * It destroys each deleted domain and builds each new domain. 2507f2cb1360SIngo Molnar * 2508f2cb1360SIngo Molnar * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2509f2cb1360SIngo Molnar * The masks don't intersect (don't overlap.) We should setup one 2510f2cb1360SIngo Molnar * sched domain for each mask. CPUs not in any of the cpumasks will 2511f2cb1360SIngo Molnar * not be load balanced. If the same cpumask appears both in the 2512f2cb1360SIngo Molnar * current 'doms_cur' domains and in the new 'doms_new', we can leave 2513f2cb1360SIngo Molnar * it as it is. 2514f2cb1360SIngo Molnar * 2515f2cb1360SIngo Molnar * The passed in 'doms_new' should be allocated using 2516f2cb1360SIngo Molnar * alloc_sched_domains. This routine takes ownership of it and will 2517f2cb1360SIngo Molnar * free_sched_domains it when done with it. If the caller failed the 2518f2cb1360SIngo Molnar * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2519f2cb1360SIngo Molnar * and partition_sched_domains() will fallback to the single partition 2520f2cb1360SIngo Molnar * 'fallback_doms', it also forces the domains to be rebuilt. 2521f2cb1360SIngo Molnar * 2522f2cb1360SIngo Molnar * If doms_new == NULL it will be replaced with cpu_online_mask. 2523f2cb1360SIngo Molnar * ndoms_new == 0 is a special case for destroying existing domains, 2524f2cb1360SIngo Molnar * and it will not create the default domain. 2525f2cb1360SIngo Molnar * 2526c22645f4SMathieu Poirier * Call with hotplug lock and sched_domains_mutex held 2527f2cb1360SIngo Molnar */ 2528c22645f4SMathieu Poirier void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 2529f2cb1360SIngo Molnar struct sched_domain_attr *dattr_new) 2530f2cb1360SIngo Molnar { 25311f74de87SQuentin Perret bool __maybe_unused has_eas = false; 2532f2cb1360SIngo Molnar int i, j, n; 2533f2cb1360SIngo Molnar int new_topology; 2534f2cb1360SIngo Molnar 2535c22645f4SMathieu Poirier lockdep_assert_held(&sched_domains_mutex); 2536f2cb1360SIngo Molnar 2537f2cb1360SIngo Molnar /* Let the architecture update CPU core mappings: */ 2538f2cb1360SIngo Molnar new_topology = arch_update_cpu_topology(); 2539c744dc4aSBeata Michalska /* Trigger rebuilding CPU capacity asymmetry data */ 2540c744dc4aSBeata Michalska if (new_topology) 2541c744dc4aSBeata Michalska asym_cpu_capacity_scan(); 2542f2cb1360SIngo Molnar 254309e0dd8eSPeter Zijlstra if (!doms_new) { 254409e0dd8eSPeter Zijlstra WARN_ON_ONCE(dattr_new); 254509e0dd8eSPeter Zijlstra n = 0; 254609e0dd8eSPeter Zijlstra doms_new = alloc_sched_domains(1); 254709e0dd8eSPeter Zijlstra if (doms_new) { 254809e0dd8eSPeter Zijlstra n = 1; 2549edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 255004d4e665SFrederic Weisbecker housekeeping_cpumask(HK_TYPE_DOMAIN)); 255109e0dd8eSPeter Zijlstra } 255209e0dd8eSPeter Zijlstra } else { 255309e0dd8eSPeter Zijlstra n = ndoms_new; 255409e0dd8eSPeter Zijlstra } 2555f2cb1360SIngo Molnar 2556f2cb1360SIngo Molnar /* Destroy deleted domains: */ 2557f2cb1360SIngo Molnar for (i = 0; i < ndoms_cur; i++) { 2558f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 25596aa140faSQuentin Perret if (cpumask_equal(doms_cur[i], doms_new[j]) && 2560f9a25f77SMathieu Poirier dattrs_equal(dattr_cur, i, dattr_new, j)) { 2561f9a25f77SMathieu Poirier struct root_domain *rd; 2562f9a25f77SMathieu Poirier 2563f9a25f77SMathieu Poirier /* 2564f9a25f77SMathieu Poirier * This domain won't be destroyed and as such 2565f9a25f77SMathieu Poirier * its dl_bw->total_bw needs to be cleared. It 2566f9a25f77SMathieu Poirier * will be recomputed in function 2567f9a25f77SMathieu Poirier * update_tasks_root_domain(). 2568f9a25f77SMathieu Poirier */ 2569f9a25f77SMathieu Poirier rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; 2570f9a25f77SMathieu Poirier dl_clear_root_domain(rd); 2571f2cb1360SIngo Molnar goto match1; 2572f2cb1360SIngo Molnar } 2573f9a25f77SMathieu Poirier } 2574f2cb1360SIngo Molnar /* No match - a current sched domain not in new doms_new[] */ 2575f2cb1360SIngo Molnar detach_destroy_domains(doms_cur[i]); 2576f2cb1360SIngo Molnar match1: 2577f2cb1360SIngo Molnar ; 2578f2cb1360SIngo Molnar } 2579f2cb1360SIngo Molnar 2580f2cb1360SIngo Molnar n = ndoms_cur; 258109e0dd8eSPeter Zijlstra if (!doms_new) { 2582f2cb1360SIngo Molnar n = 0; 2583f2cb1360SIngo Molnar doms_new = &fallback_doms; 2584edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 258504d4e665SFrederic Weisbecker housekeeping_cpumask(HK_TYPE_DOMAIN)); 2586f2cb1360SIngo Molnar } 2587f2cb1360SIngo Molnar 2588f2cb1360SIngo Molnar /* Build new domains: */ 2589f2cb1360SIngo Molnar for (i = 0; i < ndoms_new; i++) { 2590f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 25916aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 25926aa140faSQuentin Perret dattrs_equal(dattr_new, i, dattr_cur, j)) 2593f2cb1360SIngo Molnar goto match2; 2594f2cb1360SIngo Molnar } 2595f2cb1360SIngo Molnar /* No match - add a new doms_new */ 2596f2cb1360SIngo Molnar build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2597f2cb1360SIngo Molnar match2: 2598f2cb1360SIngo Molnar ; 2599f2cb1360SIngo Molnar } 2600f2cb1360SIngo Molnar 2601531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 26026aa140faSQuentin Perret /* Build perf. domains: */ 26036aa140faSQuentin Perret for (i = 0; i < ndoms_new; i++) { 2604531b5c9fSQuentin Perret for (j = 0; j < n && !sched_energy_update; j++) { 26056aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 26061f74de87SQuentin Perret cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { 26071f74de87SQuentin Perret has_eas = true; 26086aa140faSQuentin Perret goto match3; 26096aa140faSQuentin Perret } 26101f74de87SQuentin Perret } 26116aa140faSQuentin Perret /* No match - add perf. domains for a new rd */ 26121f74de87SQuentin Perret has_eas |= build_perf_domains(doms_new[i]); 26136aa140faSQuentin Perret match3: 26146aa140faSQuentin Perret ; 26156aa140faSQuentin Perret } 26161f74de87SQuentin Perret sched_energy_set(has_eas); 26176aa140faSQuentin Perret #endif 26186aa140faSQuentin Perret 2619f2cb1360SIngo Molnar /* Remember the new sched domains: */ 2620f2cb1360SIngo Molnar if (doms_cur != &fallback_doms) 2621f2cb1360SIngo Molnar free_sched_domains(doms_cur, ndoms_cur); 2622f2cb1360SIngo Molnar 2623f2cb1360SIngo Molnar kfree(dattr_cur); 2624f2cb1360SIngo Molnar doms_cur = doms_new; 2625f2cb1360SIngo Molnar dattr_cur = dattr_new; 2626f2cb1360SIngo Molnar ndoms_cur = ndoms_new; 2627f2cb1360SIngo Molnar 26283b87f136SPeter Zijlstra update_sched_domain_debugfs(); 2629c22645f4SMathieu Poirier } 2630f2cb1360SIngo Molnar 2631c22645f4SMathieu Poirier /* 2632c22645f4SMathieu Poirier * Call with hotplug lock held 2633c22645f4SMathieu Poirier */ 2634c22645f4SMathieu Poirier void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 2635c22645f4SMathieu Poirier struct sched_domain_attr *dattr_new) 2636c22645f4SMathieu Poirier { 2637c22645f4SMathieu Poirier mutex_lock(&sched_domains_mutex); 2638c22645f4SMathieu Poirier partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 2639f2cb1360SIngo Molnar mutex_unlock(&sched_domains_mutex); 2640f2cb1360SIngo Molnar } 2641