1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2f2cb1360SIngo Molnar /* 3f2cb1360SIngo Molnar * Scheduler topology setup/handling methods 4f2cb1360SIngo Molnar */ 5f2cb1360SIngo Molnar 6cd7f5535SYury Norov #include <linux/bsearch.h> 7cd7f5535SYury Norov 8f2cb1360SIngo Molnar DEFINE_MUTEX(sched_domains_mutex); 9f2cb1360SIngo Molnar 10f2cb1360SIngo Molnar /* Protected by sched_domains_mutex: */ 11ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask; 12ace80310Szhong jiang static cpumask_var_t sched_domains_tmpmask2; 13f2cb1360SIngo Molnar 14f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 15f2cb1360SIngo Molnar 16f2cb1360SIngo Molnar static int __init sched_debug_setup(char *str) 17f2cb1360SIngo Molnar { 189406415fSPeter Zijlstra sched_debug_verbose = true; 19f2cb1360SIngo Molnar 20f2cb1360SIngo Molnar return 0; 21f2cb1360SIngo Molnar } 229406415fSPeter Zijlstra early_param("sched_verbose", sched_debug_setup); 23f2cb1360SIngo Molnar 24f2cb1360SIngo Molnar static inline bool sched_debug(void) 25f2cb1360SIngo Molnar { 269406415fSPeter Zijlstra return sched_debug_verbose; 27f2cb1360SIngo Molnar } 28f2cb1360SIngo Molnar 29848785dfSValentin Schneider #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name }, 30848785dfSValentin Schneider const struct sd_flag_debug sd_flag_debug[] = { 31848785dfSValentin Schneider #include <linux/sched/sd_flags.h> 32848785dfSValentin Schneider }; 33848785dfSValentin Schneider #undef SD_FLAG 34848785dfSValentin Schneider 35f2cb1360SIngo Molnar static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 36f2cb1360SIngo Molnar struct cpumask *groupmask) 37f2cb1360SIngo Molnar { 38f2cb1360SIngo Molnar struct sched_group *group = sd->groups; 3965c5e253SValentin Schneider unsigned long flags = sd->flags; 4065c5e253SValentin Schneider unsigned int idx; 41f2cb1360SIngo Molnar 42f2cb1360SIngo Molnar cpumask_clear(groupmask); 43f2cb1360SIngo Molnar 44005f874dSPeter Zijlstra printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); 45005f874dSPeter Zijlstra printk(KERN_CONT "span=%*pbl level=%s\n", 46f2cb1360SIngo Molnar cpumask_pr_args(sched_domain_span(sd)), sd->name); 47f2cb1360SIngo Molnar 48f2cb1360SIngo Molnar if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 4997fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); 50f2cb1360SIngo Molnar } 516cd0c583SYi Wang if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { 5297fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); 53f2cb1360SIngo Molnar } 54f2cb1360SIngo Molnar 5565c5e253SValentin Schneider for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 5665c5e253SValentin Schneider unsigned int flag = BIT(idx); 5765c5e253SValentin Schneider unsigned int meta_flags = sd_flag_debug[idx].meta_flags; 5865c5e253SValentin Schneider 5965c5e253SValentin Schneider if ((meta_flags & SDF_SHARED_CHILD) && sd->child && 6065c5e253SValentin Schneider !(sd->child->flags & flag)) 6165c5e253SValentin Schneider printk(KERN_ERR "ERROR: flag %s set here but not in child\n", 6265c5e253SValentin Schneider sd_flag_debug[idx].name); 6365c5e253SValentin Schneider 6465c5e253SValentin Schneider if ((meta_flags & SDF_SHARED_PARENT) && sd->parent && 6565c5e253SValentin Schneider !(sd->parent->flags & flag)) 6665c5e253SValentin Schneider printk(KERN_ERR "ERROR: flag %s set here but not in parent\n", 6765c5e253SValentin Schneider sd_flag_debug[idx].name); 6865c5e253SValentin Schneider } 6965c5e253SValentin Schneider 70f2cb1360SIngo Molnar printk(KERN_DEBUG "%*s groups:", level + 1, ""); 71f2cb1360SIngo Molnar do { 72f2cb1360SIngo Molnar if (!group) { 73f2cb1360SIngo Molnar printk("\n"); 74f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: group is NULL\n"); 75f2cb1360SIngo Molnar break; 76f2cb1360SIngo Molnar } 77f2cb1360SIngo Molnar 781087ad4eSYury Norov if (cpumask_empty(sched_group_span(group))) { 79f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 80f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: empty group\n"); 81f2cb1360SIngo Molnar break; 82f2cb1360SIngo Molnar } 83f2cb1360SIngo Molnar 84f2cb1360SIngo Molnar if (!(sd->flags & SD_OVERLAP) && 85ae4df9d6SPeter Zijlstra cpumask_intersects(groupmask, sched_group_span(group))) { 86f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 87f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: repeated CPUs\n"); 88f2cb1360SIngo Molnar break; 89f2cb1360SIngo Molnar } 90f2cb1360SIngo Molnar 91ae4df9d6SPeter Zijlstra cpumask_or(groupmask, groupmask, sched_group_span(group)); 92f2cb1360SIngo Molnar 93005f874dSPeter Zijlstra printk(KERN_CONT " %d:{ span=%*pbl", 94005f874dSPeter Zijlstra group->sgc->id, 95ae4df9d6SPeter Zijlstra cpumask_pr_args(sched_group_span(group))); 96b0151c25SPeter Zijlstra 97af218122SPeter Zijlstra if ((sd->flags & SD_OVERLAP) && 98ae4df9d6SPeter Zijlstra !cpumask_equal(group_balance_mask(group), sched_group_span(group))) { 99005f874dSPeter Zijlstra printk(KERN_CONT " mask=%*pbl", 100e5c14b1fSPeter Zijlstra cpumask_pr_args(group_balance_mask(group))); 101b0151c25SPeter Zijlstra } 102b0151c25SPeter Zijlstra 103005f874dSPeter Zijlstra if (group->sgc->capacity != SCHED_CAPACITY_SCALE) 104005f874dSPeter Zijlstra printk(KERN_CONT " cap=%lu", group->sgc->capacity); 105f2cb1360SIngo Molnar 106a420b063SPeter Zijlstra if (group == sd->groups && sd->child && 107a420b063SPeter Zijlstra !cpumask_equal(sched_domain_span(sd->child), 108ae4df9d6SPeter Zijlstra sched_group_span(group))) { 109a420b063SPeter Zijlstra printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n"); 110a420b063SPeter Zijlstra } 111a420b063SPeter Zijlstra 112005f874dSPeter Zijlstra printk(KERN_CONT " }"); 113005f874dSPeter Zijlstra 114f2cb1360SIngo Molnar group = group->next; 115b0151c25SPeter Zijlstra 116b0151c25SPeter Zijlstra if (group != sd->groups) 117b0151c25SPeter Zijlstra printk(KERN_CONT ","); 118b0151c25SPeter Zijlstra 119f2cb1360SIngo Molnar } while (group != sd->groups); 120f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 121f2cb1360SIngo Molnar 122f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), groupmask)) 123f2cb1360SIngo Molnar printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 124f2cb1360SIngo Molnar 125f2cb1360SIngo Molnar if (sd->parent && 126f2cb1360SIngo Molnar !cpumask_subset(groupmask, sched_domain_span(sd->parent))) 12797fb7a0aSIngo Molnar printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n"); 128f2cb1360SIngo Molnar return 0; 129f2cb1360SIngo Molnar } 130f2cb1360SIngo Molnar 131f2cb1360SIngo Molnar static void sched_domain_debug(struct sched_domain *sd, int cpu) 132f2cb1360SIngo Molnar { 133f2cb1360SIngo Molnar int level = 0; 134f2cb1360SIngo Molnar 1359406415fSPeter Zijlstra if (!sched_debug_verbose) 136f2cb1360SIngo Molnar return; 137f2cb1360SIngo Molnar 138f2cb1360SIngo Molnar if (!sd) { 139f2cb1360SIngo Molnar printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 140f2cb1360SIngo Molnar return; 141f2cb1360SIngo Molnar } 142f2cb1360SIngo Molnar 143005f874dSPeter Zijlstra printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); 144f2cb1360SIngo Molnar 145f2cb1360SIngo Molnar for (;;) { 146f2cb1360SIngo Molnar if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) 147f2cb1360SIngo Molnar break; 148f2cb1360SIngo Molnar level++; 149f2cb1360SIngo Molnar sd = sd->parent; 150f2cb1360SIngo Molnar if (!sd) 151f2cb1360SIngo Molnar break; 152f2cb1360SIngo Molnar } 153f2cb1360SIngo Molnar } 154f2cb1360SIngo Molnar #else /* !CONFIG_SCHED_DEBUG */ 155f2cb1360SIngo Molnar 1569406415fSPeter Zijlstra # define sched_debug_verbose 0 157f2cb1360SIngo Molnar # define sched_domain_debug(sd, cpu) do { } while (0) 158f2cb1360SIngo Molnar static inline bool sched_debug(void) 159f2cb1360SIngo Molnar { 160f2cb1360SIngo Molnar return false; 161f2cb1360SIngo Molnar } 162f2cb1360SIngo Molnar #endif /* CONFIG_SCHED_DEBUG */ 163f2cb1360SIngo Molnar 1644fc472f1SValentin Schneider /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */ 1654fc472f1SValentin Schneider #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) | 1664fc472f1SValentin Schneider static const unsigned int SD_DEGENERATE_GROUPS_MASK = 1674fc472f1SValentin Schneider #include <linux/sched/sd_flags.h> 1684fc472f1SValentin Schneider 0; 1694fc472f1SValentin Schneider #undef SD_FLAG 1704fc472f1SValentin Schneider 171f2cb1360SIngo Molnar static int sd_degenerate(struct sched_domain *sd) 172f2cb1360SIngo Molnar { 173f2cb1360SIngo Molnar if (cpumask_weight(sched_domain_span(sd)) == 1) 174f2cb1360SIngo Molnar return 1; 175f2cb1360SIngo Molnar 176f2cb1360SIngo Molnar /* Following flags need at least 2 groups */ 1776f349818SValentin Schneider if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) && 1786f349818SValentin Schneider (sd->groups != sd->groups->next)) 179f2cb1360SIngo Molnar return 0; 180f2cb1360SIngo Molnar 181f2cb1360SIngo Molnar /* Following flags don't use groups */ 182f2cb1360SIngo Molnar if (sd->flags & (SD_WAKE_AFFINE)) 183f2cb1360SIngo Molnar return 0; 184f2cb1360SIngo Molnar 185f2cb1360SIngo Molnar return 1; 186f2cb1360SIngo Molnar } 187f2cb1360SIngo Molnar 188f2cb1360SIngo Molnar static int 189f2cb1360SIngo Molnar sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) 190f2cb1360SIngo Molnar { 191f2cb1360SIngo Molnar unsigned long cflags = sd->flags, pflags = parent->flags; 192f2cb1360SIngo Molnar 193f2cb1360SIngo Molnar if (sd_degenerate(parent)) 194f2cb1360SIngo Molnar return 1; 195f2cb1360SIngo Molnar 196f2cb1360SIngo Molnar if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) 197f2cb1360SIngo Molnar return 0; 198f2cb1360SIngo Molnar 199f2cb1360SIngo Molnar /* Flags needing groups don't count if only 1 group in parent */ 200ab65afb0SValentin Schneider if (parent->groups == parent->groups->next) 2013a6712c7SValentin Schneider pflags &= ~SD_DEGENERATE_GROUPS_MASK; 202ab65afb0SValentin Schneider 203f2cb1360SIngo Molnar if (~cflags & pflags) 204f2cb1360SIngo Molnar return 0; 205f2cb1360SIngo Molnar 206f2cb1360SIngo Molnar return 1; 207f2cb1360SIngo Molnar } 208f2cb1360SIngo Molnar 209531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 210f8a696f2SPeter Zijlstra DEFINE_STATIC_KEY_FALSE(sched_energy_present); 2118a044141SZhen Ni static unsigned int sysctl_sched_energy_aware = 1; 212d91e15a2STom Rix static DEFINE_MUTEX(sched_energy_mutex); 213d91e15a2STom Rix static bool sched_energy_update; 214531b5c9fSQuentin Perret 21531f6a8c0SIonela Voinescu void rebuild_sched_domains_energy(void) 21631f6a8c0SIonela Voinescu { 21731f6a8c0SIonela Voinescu mutex_lock(&sched_energy_mutex); 21831f6a8c0SIonela Voinescu sched_energy_update = true; 21931f6a8c0SIonela Voinescu rebuild_sched_domains(); 22031f6a8c0SIonela Voinescu sched_energy_update = false; 22131f6a8c0SIonela Voinescu mutex_unlock(&sched_energy_mutex); 22231f6a8c0SIonela Voinescu } 22331f6a8c0SIonela Voinescu 2248d5d0cfbSQuentin Perret #ifdef CONFIG_PROC_SYSCTL 2258a044141SZhen Ni static int sched_energy_aware_handler(struct ctl_table *table, int write, 22632927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos) 2278d5d0cfbSQuentin Perret { 2288d5d0cfbSQuentin Perret int ret, state; 2298d5d0cfbSQuentin Perret 2308d5d0cfbSQuentin Perret if (write && !capable(CAP_SYS_ADMIN)) 2318d5d0cfbSQuentin Perret return -EPERM; 2328d5d0cfbSQuentin Perret 2338d5d0cfbSQuentin Perret ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2348d5d0cfbSQuentin Perret if (!ret && write) { 2358d5d0cfbSQuentin Perret state = static_branch_unlikely(&sched_energy_present); 23631f6a8c0SIonela Voinescu if (state != sysctl_sched_energy_aware) 23731f6a8c0SIonela Voinescu rebuild_sched_domains_energy(); 2388d5d0cfbSQuentin Perret } 2398d5d0cfbSQuentin Perret 2408d5d0cfbSQuentin Perret return ret; 2418d5d0cfbSQuentin Perret } 2428a044141SZhen Ni 2438a044141SZhen Ni static struct ctl_table sched_energy_aware_sysctls[] = { 2448a044141SZhen Ni { 2458a044141SZhen Ni .procname = "sched_energy_aware", 2468a044141SZhen Ni .data = &sysctl_sched_energy_aware, 2478a044141SZhen Ni .maxlen = sizeof(unsigned int), 2488a044141SZhen Ni .mode = 0644, 2498a044141SZhen Ni .proc_handler = sched_energy_aware_handler, 2508a044141SZhen Ni .extra1 = SYSCTL_ZERO, 2518a044141SZhen Ni .extra2 = SYSCTL_ONE, 2528a044141SZhen Ni }, 2538a044141SZhen Ni {} 2548a044141SZhen Ni }; 2558a044141SZhen Ni 2568a044141SZhen Ni static int __init sched_energy_aware_sysctl_init(void) 2578a044141SZhen Ni { 2588a044141SZhen Ni register_sysctl_init("kernel", sched_energy_aware_sysctls); 2598a044141SZhen Ni return 0; 2608a044141SZhen Ni } 2618a044141SZhen Ni 2628a044141SZhen Ni late_initcall(sched_energy_aware_sysctl_init); 2638d5d0cfbSQuentin Perret #endif 2648d5d0cfbSQuentin Perret 2656aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) 2666aa140faSQuentin Perret { 2676aa140faSQuentin Perret struct perf_domain *tmp; 2686aa140faSQuentin Perret 2696aa140faSQuentin Perret while (pd) { 2706aa140faSQuentin Perret tmp = pd->next; 2716aa140faSQuentin Perret kfree(pd); 2726aa140faSQuentin Perret pd = tmp; 2736aa140faSQuentin Perret } 2746aa140faSQuentin Perret } 2756aa140faSQuentin Perret 2766aa140faSQuentin Perret static struct perf_domain *find_pd(struct perf_domain *pd, int cpu) 2776aa140faSQuentin Perret { 2786aa140faSQuentin Perret while (pd) { 2796aa140faSQuentin Perret if (cpumask_test_cpu(cpu, perf_domain_span(pd))) 2806aa140faSQuentin Perret return pd; 2816aa140faSQuentin Perret pd = pd->next; 2826aa140faSQuentin Perret } 2836aa140faSQuentin Perret 2846aa140faSQuentin Perret return NULL; 2856aa140faSQuentin Perret } 2866aa140faSQuentin Perret 2876aa140faSQuentin Perret static struct perf_domain *pd_init(int cpu) 2886aa140faSQuentin Perret { 2896aa140faSQuentin Perret struct em_perf_domain *obj = em_cpu_get(cpu); 2906aa140faSQuentin Perret struct perf_domain *pd; 2916aa140faSQuentin Perret 2926aa140faSQuentin Perret if (!obj) { 2936aa140faSQuentin Perret if (sched_debug()) 2946aa140faSQuentin Perret pr_info("%s: no EM found for CPU%d\n", __func__, cpu); 2956aa140faSQuentin Perret return NULL; 2966aa140faSQuentin Perret } 2976aa140faSQuentin Perret 2986aa140faSQuentin Perret pd = kzalloc(sizeof(*pd), GFP_KERNEL); 2996aa140faSQuentin Perret if (!pd) 3006aa140faSQuentin Perret return NULL; 3016aa140faSQuentin Perret pd->em_pd = obj; 3026aa140faSQuentin Perret 3036aa140faSQuentin Perret return pd; 3046aa140faSQuentin Perret } 3056aa140faSQuentin Perret 3066aa140faSQuentin Perret static void perf_domain_debug(const struct cpumask *cpu_map, 3076aa140faSQuentin Perret struct perf_domain *pd) 3086aa140faSQuentin Perret { 3096aa140faSQuentin Perret if (!sched_debug() || !pd) 3106aa140faSQuentin Perret return; 3116aa140faSQuentin Perret 3126aa140faSQuentin Perret printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map)); 3136aa140faSQuentin Perret 3146aa140faSQuentin Perret while (pd) { 315521b512bSLukasz Luba printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }", 3166aa140faSQuentin Perret cpumask_first(perf_domain_span(pd)), 3176aa140faSQuentin Perret cpumask_pr_args(perf_domain_span(pd)), 318521b512bSLukasz Luba em_pd_nr_perf_states(pd->em_pd)); 3196aa140faSQuentin Perret pd = pd->next; 3206aa140faSQuentin Perret } 3216aa140faSQuentin Perret 3226aa140faSQuentin Perret printk(KERN_CONT "\n"); 3236aa140faSQuentin Perret } 3246aa140faSQuentin Perret 3256aa140faSQuentin Perret static void destroy_perf_domain_rcu(struct rcu_head *rp) 3266aa140faSQuentin Perret { 3276aa140faSQuentin Perret struct perf_domain *pd; 3286aa140faSQuentin Perret 3296aa140faSQuentin Perret pd = container_of(rp, struct perf_domain, rcu); 3306aa140faSQuentin Perret free_pd(pd); 3316aa140faSQuentin Perret } 3326aa140faSQuentin Perret 3331f74de87SQuentin Perret static void sched_energy_set(bool has_eas) 3341f74de87SQuentin Perret { 3351f74de87SQuentin Perret if (!has_eas && static_branch_unlikely(&sched_energy_present)) { 3361f74de87SQuentin Perret if (sched_debug()) 3371f74de87SQuentin Perret pr_info("%s: stopping EAS\n", __func__); 3381f74de87SQuentin Perret static_branch_disable_cpuslocked(&sched_energy_present); 3391f74de87SQuentin Perret } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) { 3401f74de87SQuentin Perret if (sched_debug()) 3411f74de87SQuentin Perret pr_info("%s: starting EAS\n", __func__); 3421f74de87SQuentin Perret static_branch_enable_cpuslocked(&sched_energy_present); 3431f74de87SQuentin Perret } 3441f74de87SQuentin Perret } 3451f74de87SQuentin Perret 346b68a4c0dSQuentin Perret /* 347b68a4c0dSQuentin Perret * EAS can be used on a root domain if it meets all the following conditions: 348b68a4c0dSQuentin Perret * 1. an Energy Model (EM) is available; 349b68a4c0dSQuentin Perret * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy. 35038502ab4SValentin Schneider * 3. no SMT is detected. 35138502ab4SValentin Schneider * 4. the EM complexity is low enough to keep scheduling overheads low; 35238502ab4SValentin Schneider * 5. schedutil is driving the frequency of all CPUs of the rd; 353fa50e2b4SIonela Voinescu * 6. frequency invariance support is present; 354b68a4c0dSQuentin Perret * 355b68a4c0dSQuentin Perret * The complexity of the Energy Model is defined as: 356b68a4c0dSQuentin Perret * 357521b512bSLukasz Luba * C = nr_pd * (nr_cpus + nr_ps) 358b68a4c0dSQuentin Perret * 359b68a4c0dSQuentin Perret * with parameters defined as: 360b68a4c0dSQuentin Perret * - nr_pd: the number of performance domains 361b68a4c0dSQuentin Perret * - nr_cpus: the number of CPUs 362521b512bSLukasz Luba * - nr_ps: the sum of the number of performance states of all performance 363b68a4c0dSQuentin Perret * domains (for example, on a system with 2 performance domains, 364521b512bSLukasz Luba * with 10 performance states each, nr_ps = 2 * 10 = 20). 365b68a4c0dSQuentin Perret * 366b68a4c0dSQuentin Perret * It is generally not a good idea to use such a model in the wake-up path on 367b68a4c0dSQuentin Perret * very complex platforms because of the associated scheduling overheads. The 368b68a4c0dSQuentin Perret * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs 369521b512bSLukasz Luba * with per-CPU DVFS and less than 8 performance states each, for example. 370b68a4c0dSQuentin Perret */ 371b68a4c0dSQuentin Perret #define EM_MAX_COMPLEXITY 2048 372b68a4c0dSQuentin Perret 373531b5c9fSQuentin Perret extern struct cpufreq_governor schedutil_gov; 3741f74de87SQuentin Perret static bool build_perf_domains(const struct cpumask *cpu_map) 3756aa140faSQuentin Perret { 376521b512bSLukasz Luba int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map); 3776aa140faSQuentin Perret struct perf_domain *pd = NULL, *tmp; 3786aa140faSQuentin Perret int cpu = cpumask_first(cpu_map); 3796aa140faSQuentin Perret struct root_domain *rd = cpu_rq(cpu)->rd; 380531b5c9fSQuentin Perret struct cpufreq_policy *policy; 381531b5c9fSQuentin Perret struct cpufreq_governor *gov; 382b68a4c0dSQuentin Perret 3838d5d0cfbSQuentin Perret if (!sysctl_sched_energy_aware) 3848d5d0cfbSQuentin Perret goto free; 3858d5d0cfbSQuentin Perret 386b68a4c0dSQuentin Perret /* EAS is enabled for asymmetric CPU capacity topologies. */ 387b68a4c0dSQuentin Perret if (!per_cpu(sd_asym_cpucapacity, cpu)) { 388b68a4c0dSQuentin Perret if (sched_debug()) { 389b68a4c0dSQuentin Perret pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n", 390b68a4c0dSQuentin Perret cpumask_pr_args(cpu_map)); 391b68a4c0dSQuentin Perret } 392b68a4c0dSQuentin Perret goto free; 393b68a4c0dSQuentin Perret } 3946aa140faSQuentin Perret 39538502ab4SValentin Schneider /* EAS definitely does *not* handle SMT */ 39638502ab4SValentin Schneider if (sched_smt_active()) { 39738502ab4SValentin Schneider pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n", 39838502ab4SValentin Schneider cpumask_pr_args(cpu_map)); 39938502ab4SValentin Schneider goto free; 40038502ab4SValentin Schneider } 40138502ab4SValentin Schneider 402fa50e2b4SIonela Voinescu if (!arch_scale_freq_invariant()) { 403fa50e2b4SIonela Voinescu if (sched_debug()) { 404fa50e2b4SIonela Voinescu pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported", 405fa50e2b4SIonela Voinescu cpumask_pr_args(cpu_map)); 406fa50e2b4SIonela Voinescu } 407fa50e2b4SIonela Voinescu goto free; 408fa50e2b4SIonela Voinescu } 409fa50e2b4SIonela Voinescu 4106aa140faSQuentin Perret for_each_cpu(i, cpu_map) { 4116aa140faSQuentin Perret /* Skip already covered CPUs. */ 4126aa140faSQuentin Perret if (find_pd(pd, i)) 4136aa140faSQuentin Perret continue; 4146aa140faSQuentin Perret 415531b5c9fSQuentin Perret /* Do not attempt EAS if schedutil is not being used. */ 416531b5c9fSQuentin Perret policy = cpufreq_cpu_get(i); 417531b5c9fSQuentin Perret if (!policy) 418531b5c9fSQuentin Perret goto free; 419531b5c9fSQuentin Perret gov = policy->governor; 420531b5c9fSQuentin Perret cpufreq_cpu_put(policy); 421531b5c9fSQuentin Perret if (gov != &schedutil_gov) { 422531b5c9fSQuentin Perret if (rd->pd) 423531b5c9fSQuentin Perret pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n", 424531b5c9fSQuentin Perret cpumask_pr_args(cpu_map)); 425531b5c9fSQuentin Perret goto free; 426531b5c9fSQuentin Perret } 427531b5c9fSQuentin Perret 4286aa140faSQuentin Perret /* Create the new pd and add it to the local list. */ 4296aa140faSQuentin Perret tmp = pd_init(i); 4306aa140faSQuentin Perret if (!tmp) 4316aa140faSQuentin Perret goto free; 4326aa140faSQuentin Perret tmp->next = pd; 4336aa140faSQuentin Perret pd = tmp; 434b68a4c0dSQuentin Perret 435b68a4c0dSQuentin Perret /* 436521b512bSLukasz Luba * Count performance domains and performance states for the 437b68a4c0dSQuentin Perret * complexity check. 438b68a4c0dSQuentin Perret */ 439b68a4c0dSQuentin Perret nr_pd++; 440521b512bSLukasz Luba nr_ps += em_pd_nr_perf_states(pd->em_pd); 441b68a4c0dSQuentin Perret } 442b68a4c0dSQuentin Perret 443b68a4c0dSQuentin Perret /* Bail out if the Energy Model complexity is too high. */ 444521b512bSLukasz Luba if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) { 445b68a4c0dSQuentin Perret WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n", 446b68a4c0dSQuentin Perret cpumask_pr_args(cpu_map)); 447b68a4c0dSQuentin Perret goto free; 4486aa140faSQuentin Perret } 4496aa140faSQuentin Perret 4506aa140faSQuentin Perret perf_domain_debug(cpu_map, pd); 4516aa140faSQuentin Perret 4526aa140faSQuentin Perret /* Attach the new list of performance domains to the root domain. */ 4536aa140faSQuentin Perret tmp = rd->pd; 4546aa140faSQuentin Perret rcu_assign_pointer(rd->pd, pd); 4556aa140faSQuentin Perret if (tmp) 4566aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 4576aa140faSQuentin Perret 4581f74de87SQuentin Perret return !!pd; 4596aa140faSQuentin Perret 4606aa140faSQuentin Perret free: 4616aa140faSQuentin Perret free_pd(pd); 4626aa140faSQuentin Perret tmp = rd->pd; 4636aa140faSQuentin Perret rcu_assign_pointer(rd->pd, NULL); 4646aa140faSQuentin Perret if (tmp) 4656aa140faSQuentin Perret call_rcu(&tmp->rcu, destroy_perf_domain_rcu); 4661f74de87SQuentin Perret 4671f74de87SQuentin Perret return false; 4686aa140faSQuentin Perret } 4696aa140faSQuentin Perret #else 4706aa140faSQuentin Perret static void free_pd(struct perf_domain *pd) { } 471531b5c9fSQuentin Perret #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/ 4726aa140faSQuentin Perret 473f2cb1360SIngo Molnar static void free_rootdomain(struct rcu_head *rcu) 474f2cb1360SIngo Molnar { 475f2cb1360SIngo Molnar struct root_domain *rd = container_of(rcu, struct root_domain, rcu); 476f2cb1360SIngo Molnar 477f2cb1360SIngo Molnar cpupri_cleanup(&rd->cpupri); 478f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 479f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 480f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 481f2cb1360SIngo Molnar free_cpumask_var(rd->online); 482f2cb1360SIngo Molnar free_cpumask_var(rd->span); 4836aa140faSQuentin Perret free_pd(rd->pd); 484f2cb1360SIngo Molnar kfree(rd); 485f2cb1360SIngo Molnar } 486f2cb1360SIngo Molnar 487f2cb1360SIngo Molnar void rq_attach_root(struct rq *rq, struct root_domain *rd) 488f2cb1360SIngo Molnar { 489f2cb1360SIngo Molnar struct root_domain *old_rd = NULL; 490cab3ecaeSHao Jia struct rq_flags rf; 491f2cb1360SIngo Molnar 492cab3ecaeSHao Jia rq_lock_irqsave(rq, &rf); 493f2cb1360SIngo Molnar 494f2cb1360SIngo Molnar if (rq->rd) { 495f2cb1360SIngo Molnar old_rd = rq->rd; 496f2cb1360SIngo Molnar 497f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, old_rd->online)) 498f2cb1360SIngo Molnar set_rq_offline(rq); 499f2cb1360SIngo Molnar 500f2cb1360SIngo Molnar cpumask_clear_cpu(rq->cpu, old_rd->span); 501f2cb1360SIngo Molnar 502f2cb1360SIngo Molnar /* 503f2cb1360SIngo Molnar * If we dont want to free the old_rd yet then 504f2cb1360SIngo Molnar * set old_rd to NULL to skip the freeing later 505f2cb1360SIngo Molnar * in this function: 506f2cb1360SIngo Molnar */ 507f2cb1360SIngo Molnar if (!atomic_dec_and_test(&old_rd->refcount)) 508f2cb1360SIngo Molnar old_rd = NULL; 509f2cb1360SIngo Molnar } 510f2cb1360SIngo Molnar 511f2cb1360SIngo Molnar atomic_inc(&rd->refcount); 512f2cb1360SIngo Molnar rq->rd = rd; 513f2cb1360SIngo Molnar 514f2cb1360SIngo Molnar cpumask_set_cpu(rq->cpu, rd->span); 515f2cb1360SIngo Molnar if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 516f2cb1360SIngo Molnar set_rq_online(rq); 517f2cb1360SIngo Molnar 518cab3ecaeSHao Jia rq_unlock_irqrestore(rq, &rf); 519f2cb1360SIngo Molnar 520f2cb1360SIngo Molnar if (old_rd) 521337e9b07SPaul E. McKenney call_rcu(&old_rd->rcu, free_rootdomain); 522f2cb1360SIngo Molnar } 523f2cb1360SIngo Molnar 524364f5665SSteven Rostedt (VMware) void sched_get_rd(struct root_domain *rd) 525364f5665SSteven Rostedt (VMware) { 526364f5665SSteven Rostedt (VMware) atomic_inc(&rd->refcount); 527364f5665SSteven Rostedt (VMware) } 528364f5665SSteven Rostedt (VMware) 529364f5665SSteven Rostedt (VMware) void sched_put_rd(struct root_domain *rd) 530364f5665SSteven Rostedt (VMware) { 531364f5665SSteven Rostedt (VMware) if (!atomic_dec_and_test(&rd->refcount)) 532364f5665SSteven Rostedt (VMware) return; 533364f5665SSteven Rostedt (VMware) 534337e9b07SPaul E. McKenney call_rcu(&rd->rcu, free_rootdomain); 535364f5665SSteven Rostedt (VMware) } 536364f5665SSteven Rostedt (VMware) 537f2cb1360SIngo Molnar static int init_rootdomain(struct root_domain *rd) 538f2cb1360SIngo Molnar { 539f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) 540f2cb1360SIngo Molnar goto out; 541f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) 542f2cb1360SIngo Molnar goto free_span; 543f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 544f2cb1360SIngo Molnar goto free_online; 545f2cb1360SIngo Molnar if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 546f2cb1360SIngo Molnar goto free_dlo_mask; 547f2cb1360SIngo Molnar 5484bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 5494bdced5cSSteven Rostedt (Red Hat) rd->rto_cpu = -1; 5504bdced5cSSteven Rostedt (Red Hat) raw_spin_lock_init(&rd->rto_lock); 551da6ff099SSebastian Andrzej Siewior rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func); 5524bdced5cSSteven Rostedt (Red Hat) #endif 5534bdced5cSSteven Rostedt (Red Hat) 55426762423SPeng Liu rd->visit_gen = 0; 555f2cb1360SIngo Molnar init_dl_bw(&rd->dl_bw); 556f2cb1360SIngo Molnar if (cpudl_init(&rd->cpudl) != 0) 557f2cb1360SIngo Molnar goto free_rto_mask; 558f2cb1360SIngo Molnar 559f2cb1360SIngo Molnar if (cpupri_init(&rd->cpupri) != 0) 560f2cb1360SIngo Molnar goto free_cpudl; 561f2cb1360SIngo Molnar return 0; 562f2cb1360SIngo Molnar 563f2cb1360SIngo Molnar free_cpudl: 564f2cb1360SIngo Molnar cpudl_cleanup(&rd->cpudl); 565f2cb1360SIngo Molnar free_rto_mask: 566f2cb1360SIngo Molnar free_cpumask_var(rd->rto_mask); 567f2cb1360SIngo Molnar free_dlo_mask: 568f2cb1360SIngo Molnar free_cpumask_var(rd->dlo_mask); 569f2cb1360SIngo Molnar free_online: 570f2cb1360SIngo Molnar free_cpumask_var(rd->online); 571f2cb1360SIngo Molnar free_span: 572f2cb1360SIngo Molnar free_cpumask_var(rd->span); 573f2cb1360SIngo Molnar out: 574f2cb1360SIngo Molnar return -ENOMEM; 575f2cb1360SIngo Molnar } 576f2cb1360SIngo Molnar 577f2cb1360SIngo Molnar /* 578f2cb1360SIngo Molnar * By default the system creates a single root-domain with all CPUs as 579f2cb1360SIngo Molnar * members (mimicking the global state we have today). 580f2cb1360SIngo Molnar */ 581f2cb1360SIngo Molnar struct root_domain def_root_domain; 582f2cb1360SIngo Molnar 5839a5322dbSBing Huang void __init init_defrootdomain(void) 584f2cb1360SIngo Molnar { 585f2cb1360SIngo Molnar init_rootdomain(&def_root_domain); 586f2cb1360SIngo Molnar 587f2cb1360SIngo Molnar atomic_set(&def_root_domain.refcount, 1); 588f2cb1360SIngo Molnar } 589f2cb1360SIngo Molnar 590f2cb1360SIngo Molnar static struct root_domain *alloc_rootdomain(void) 591f2cb1360SIngo Molnar { 592f2cb1360SIngo Molnar struct root_domain *rd; 593f2cb1360SIngo Molnar 5944d13a06dSViresh Kumar rd = kzalloc(sizeof(*rd), GFP_KERNEL); 595f2cb1360SIngo Molnar if (!rd) 596f2cb1360SIngo Molnar return NULL; 597f2cb1360SIngo Molnar 598f2cb1360SIngo Molnar if (init_rootdomain(rd) != 0) { 599f2cb1360SIngo Molnar kfree(rd); 600f2cb1360SIngo Molnar return NULL; 601f2cb1360SIngo Molnar } 602f2cb1360SIngo Molnar 603f2cb1360SIngo Molnar return rd; 604f2cb1360SIngo Molnar } 605f2cb1360SIngo Molnar 606f2cb1360SIngo Molnar static void free_sched_groups(struct sched_group *sg, int free_sgc) 607f2cb1360SIngo Molnar { 608f2cb1360SIngo Molnar struct sched_group *tmp, *first; 609f2cb1360SIngo Molnar 610f2cb1360SIngo Molnar if (!sg) 611f2cb1360SIngo Molnar return; 612f2cb1360SIngo Molnar 613f2cb1360SIngo Molnar first = sg; 614f2cb1360SIngo Molnar do { 615f2cb1360SIngo Molnar tmp = sg->next; 616f2cb1360SIngo Molnar 617f2cb1360SIngo Molnar if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) 618f2cb1360SIngo Molnar kfree(sg->sgc); 619f2cb1360SIngo Molnar 620213c5a45SShu Wang if (atomic_dec_and_test(&sg->ref)) 621f2cb1360SIngo Molnar kfree(sg); 622f2cb1360SIngo Molnar sg = tmp; 623f2cb1360SIngo Molnar } while (sg != first); 624f2cb1360SIngo Molnar } 625f2cb1360SIngo Molnar 626f2cb1360SIngo Molnar static void destroy_sched_domain(struct sched_domain *sd) 627f2cb1360SIngo Molnar { 628f2cb1360SIngo Molnar /* 629a090c4f2SPeter Zijlstra * A normal sched domain may have multiple group references, an 630a090c4f2SPeter Zijlstra * overlapping domain, having private groups, only one. Iterate, 631a090c4f2SPeter Zijlstra * dropping group/capacity references, freeing where none remain. 632f2cb1360SIngo Molnar */ 633f2cb1360SIngo Molnar free_sched_groups(sd->groups, 1); 634213c5a45SShu Wang 635f2cb1360SIngo Molnar if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) 636f2cb1360SIngo Molnar kfree(sd->shared); 637f2cb1360SIngo Molnar kfree(sd); 638f2cb1360SIngo Molnar } 639f2cb1360SIngo Molnar 640f2cb1360SIngo Molnar static void destroy_sched_domains_rcu(struct rcu_head *rcu) 641f2cb1360SIngo Molnar { 642f2cb1360SIngo Molnar struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); 643f2cb1360SIngo Molnar 644f2cb1360SIngo Molnar while (sd) { 645f2cb1360SIngo Molnar struct sched_domain *parent = sd->parent; 646f2cb1360SIngo Molnar destroy_sched_domain(sd); 647f2cb1360SIngo Molnar sd = parent; 648f2cb1360SIngo Molnar } 649f2cb1360SIngo Molnar } 650f2cb1360SIngo Molnar 651f2cb1360SIngo Molnar static void destroy_sched_domains(struct sched_domain *sd) 652f2cb1360SIngo Molnar { 653f2cb1360SIngo Molnar if (sd) 654f2cb1360SIngo Molnar call_rcu(&sd->rcu, destroy_sched_domains_rcu); 655f2cb1360SIngo Molnar } 656f2cb1360SIngo Molnar 657f2cb1360SIngo Molnar /* 658f2cb1360SIngo Molnar * Keep a special pointer to the highest sched_domain that has 659f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 660f2cb1360SIngo Molnar * allows us to avoid some pointer chasing select_idle_sibling(). 661f2cb1360SIngo Molnar * 662f2cb1360SIngo Molnar * Also keep a unique ID per domain (we use the first CPU number in 663f2cb1360SIngo Molnar * the cpumask of the domain), this allows us to quickly tell if 664f2cb1360SIngo Molnar * two CPUs are in the same cache domain, see cpus_share_cache(). 665f2cb1360SIngo Molnar */ 666994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); 667f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_size); 668f2cb1360SIngo Molnar DEFINE_PER_CPU(int, sd_llc_id); 669994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 670994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); 671994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 672994aeb7aSJoel Fernandes (Google) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 673df054e84SMorten Rasmussen DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); 674f2cb1360SIngo Molnar 675f2cb1360SIngo Molnar static void update_top_cache_domain(int cpu) 676f2cb1360SIngo Molnar { 677f2cb1360SIngo Molnar struct sched_domain_shared *sds = NULL; 678f2cb1360SIngo Molnar struct sched_domain *sd; 679f2cb1360SIngo Molnar int id = cpu; 680f2cb1360SIngo Molnar int size = 1; 681f2cb1360SIngo Molnar 682f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 683f2cb1360SIngo Molnar if (sd) { 684f2cb1360SIngo Molnar id = cpumask_first(sched_domain_span(sd)); 685f2cb1360SIngo Molnar size = cpumask_weight(sched_domain_span(sd)); 686f2cb1360SIngo Molnar sds = sd->shared; 687f2cb1360SIngo Molnar } 688f2cb1360SIngo Molnar 689f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 690f2cb1360SIngo Molnar per_cpu(sd_llc_size, cpu) = size; 691f2cb1360SIngo Molnar per_cpu(sd_llc_id, cpu) = id; 692f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); 693f2cb1360SIngo Molnar 694f2cb1360SIngo Molnar sd = lowest_flag_domain(cpu, SD_NUMA); 695f2cb1360SIngo Molnar rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); 696f2cb1360SIngo Molnar 697f2cb1360SIngo Molnar sd = highest_flag_domain(cpu, SD_ASYM_PACKING); 698011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd); 699011b27bbSQuentin Perret 700c744dc4aSBeata Michalska sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL); 701011b27bbSQuentin Perret rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd); 702f2cb1360SIngo Molnar } 703f2cb1360SIngo Molnar 704f2cb1360SIngo Molnar /* 705f2cb1360SIngo Molnar * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 706f2cb1360SIngo Molnar * hold the hotplug lock. 707f2cb1360SIngo Molnar */ 708f2cb1360SIngo Molnar static void 709f2cb1360SIngo Molnar cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) 710f2cb1360SIngo Molnar { 711f2cb1360SIngo Molnar struct rq *rq = cpu_rq(cpu); 712f2cb1360SIngo Molnar struct sched_domain *tmp; 713f2cb1360SIngo Molnar 714f2cb1360SIngo Molnar /* Remove the sched domains which do not contribute to scheduling. */ 715f2cb1360SIngo Molnar for (tmp = sd; tmp; ) { 716f2cb1360SIngo Molnar struct sched_domain *parent = tmp->parent; 717f2cb1360SIngo Molnar if (!parent) 718f2cb1360SIngo Molnar break; 719f2cb1360SIngo Molnar 720f2cb1360SIngo Molnar if (sd_parent_degenerate(tmp, parent)) { 721f2cb1360SIngo Molnar tmp->parent = parent->parent; 722bf2dc42dSTim C Chen 723bf2dc42dSTim C Chen if (parent->parent) { 724f2cb1360SIngo Molnar parent->parent->child = tmp; 7254efcc8bcSChen Yu parent->parent->groups->flags = tmp->flags; 726bf2dc42dSTim C Chen } 727bf2dc42dSTim C Chen 728f2cb1360SIngo Molnar /* 729f2cb1360SIngo Molnar * Transfer SD_PREFER_SIBLING down in case of a 730f2cb1360SIngo Molnar * degenerate parent; the spans match for this 731f2cb1360SIngo Molnar * so the property transfers. 732f2cb1360SIngo Molnar */ 733f2cb1360SIngo Molnar if (parent->flags & SD_PREFER_SIBLING) 734f2cb1360SIngo Molnar tmp->flags |= SD_PREFER_SIBLING; 735f2cb1360SIngo Molnar destroy_sched_domain(parent); 736f2cb1360SIngo Molnar } else 737f2cb1360SIngo Molnar tmp = tmp->parent; 738f2cb1360SIngo Molnar } 739f2cb1360SIngo Molnar 740f2cb1360SIngo Molnar if (sd && sd_degenerate(sd)) { 741f2cb1360SIngo Molnar tmp = sd; 742f2cb1360SIngo Molnar sd = sd->parent; 743f2cb1360SIngo Molnar destroy_sched_domain(tmp); 74416d364baSRicardo Neri if (sd) { 74516d364baSRicardo Neri struct sched_group *sg = sd->groups; 74616d364baSRicardo Neri 74716d364baSRicardo Neri /* 74816d364baSRicardo Neri * sched groups hold the flags of the child sched 74916d364baSRicardo Neri * domain for convenience. Clear such flags since 75016d364baSRicardo Neri * the child is being destroyed. 75116d364baSRicardo Neri */ 75216d364baSRicardo Neri do { 75316d364baSRicardo Neri sg->flags = 0; 75416d364baSRicardo Neri } while (sg != sd->groups); 75516d364baSRicardo Neri 756f2cb1360SIngo Molnar sd->child = NULL; 757f2cb1360SIngo Molnar } 75816d364baSRicardo Neri } 759f2cb1360SIngo Molnar 760f2cb1360SIngo Molnar sched_domain_debug(sd, cpu); 761f2cb1360SIngo Molnar 762f2cb1360SIngo Molnar rq_attach_root(rq, rd); 763f2cb1360SIngo Molnar tmp = rq->sd; 764f2cb1360SIngo Molnar rcu_assign_pointer(rq->sd, sd); 765bbdacdfeSPeter Zijlstra dirty_sched_domain_sysctl(cpu); 766f2cb1360SIngo Molnar destroy_sched_domains(tmp); 767f2cb1360SIngo Molnar 768f2cb1360SIngo Molnar update_top_cache_domain(cpu); 769f2cb1360SIngo Molnar } 770f2cb1360SIngo Molnar 771f2cb1360SIngo Molnar struct s_data { 77299687cdbSLuc Van Oostenryck struct sched_domain * __percpu *sd; 773f2cb1360SIngo Molnar struct root_domain *rd; 774f2cb1360SIngo Molnar }; 775f2cb1360SIngo Molnar 776f2cb1360SIngo Molnar enum s_alloc { 777f2cb1360SIngo Molnar sa_rootdomain, 778f2cb1360SIngo Molnar sa_sd, 779f2cb1360SIngo Molnar sa_sd_storage, 780f2cb1360SIngo Molnar sa_none, 781f2cb1360SIngo Molnar }; 782f2cb1360SIngo Molnar 783f2cb1360SIngo Molnar /* 78435a566e6SPeter Zijlstra * Return the canonical balance CPU for this group, this is the first CPU 785e5c14b1fSPeter Zijlstra * of this group that's also in the balance mask. 78635a566e6SPeter Zijlstra * 787e5c14b1fSPeter Zijlstra * The balance mask are all those CPUs that could actually end up at this 788e5c14b1fSPeter Zijlstra * group. See build_balance_mask(). 78935a566e6SPeter Zijlstra * 79035a566e6SPeter Zijlstra * Also see should_we_balance(). 79135a566e6SPeter Zijlstra */ 79235a566e6SPeter Zijlstra int group_balance_cpu(struct sched_group *sg) 79335a566e6SPeter Zijlstra { 794e5c14b1fSPeter Zijlstra return cpumask_first(group_balance_mask(sg)); 79535a566e6SPeter Zijlstra } 79635a566e6SPeter Zijlstra 79735a566e6SPeter Zijlstra 79835a566e6SPeter Zijlstra /* 79935a566e6SPeter Zijlstra * NUMA topology (first read the regular topology blurb below) 80035a566e6SPeter Zijlstra * 80135a566e6SPeter Zijlstra * Given a node-distance table, for example: 80235a566e6SPeter Zijlstra * 80335a566e6SPeter Zijlstra * node 0 1 2 3 80435a566e6SPeter Zijlstra * 0: 10 20 30 20 80535a566e6SPeter Zijlstra * 1: 20 10 20 30 80635a566e6SPeter Zijlstra * 2: 30 20 10 20 80735a566e6SPeter Zijlstra * 3: 20 30 20 10 80835a566e6SPeter Zijlstra * 80935a566e6SPeter Zijlstra * which represents a 4 node ring topology like: 81035a566e6SPeter Zijlstra * 81135a566e6SPeter Zijlstra * 0 ----- 1 81235a566e6SPeter Zijlstra * | | 81335a566e6SPeter Zijlstra * | | 81435a566e6SPeter Zijlstra * | | 81535a566e6SPeter Zijlstra * 3 ----- 2 81635a566e6SPeter Zijlstra * 81735a566e6SPeter Zijlstra * We want to construct domains and groups to represent this. The way we go 81835a566e6SPeter Zijlstra * about doing this is to build the domains on 'hops'. For each NUMA level we 81935a566e6SPeter Zijlstra * construct the mask of all nodes reachable in @level hops. 82035a566e6SPeter Zijlstra * 82135a566e6SPeter Zijlstra * For the above NUMA topology that gives 3 levels: 82235a566e6SPeter Zijlstra * 82335a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 0-3 0-3 82435a566e6SPeter Zijlstra * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2} 82535a566e6SPeter Zijlstra * 82635a566e6SPeter Zijlstra * NUMA-1 0-1,3 0-2 1-3 0,2-3 82735a566e6SPeter Zijlstra * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3} 82835a566e6SPeter Zijlstra * 82935a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 83035a566e6SPeter Zijlstra * 83135a566e6SPeter Zijlstra * 83235a566e6SPeter Zijlstra * As can be seen; things don't nicely line up as with the regular topology. 83335a566e6SPeter Zijlstra * When we iterate a domain in child domain chunks some nodes can be 83435a566e6SPeter Zijlstra * represented multiple times -- hence the "overlap" naming for this part of 83535a566e6SPeter Zijlstra * the topology. 83635a566e6SPeter Zijlstra * 83735a566e6SPeter Zijlstra * In order to minimize this overlap, we only build enough groups to cover the 83835a566e6SPeter Zijlstra * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3. 83935a566e6SPeter Zijlstra * 84035a566e6SPeter Zijlstra * Because: 84135a566e6SPeter Zijlstra * 84235a566e6SPeter Zijlstra * - the first group of each domain is its child domain; this 84335a566e6SPeter Zijlstra * gets us the first 0-1,3 84435a566e6SPeter Zijlstra * - the only uncovered node is 2, who's child domain is 1-3. 84535a566e6SPeter Zijlstra * 84635a566e6SPeter Zijlstra * However, because of the overlap, computing a unique CPU for each group is 84735a566e6SPeter Zijlstra * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both 84835a566e6SPeter Zijlstra * groups include the CPUs of Node-0, while those CPUs would not in fact ever 84935a566e6SPeter Zijlstra * end up at those groups (they would end up in group: 0-1,3). 85035a566e6SPeter Zijlstra * 851e5c14b1fSPeter Zijlstra * To correct this we have to introduce the group balance mask. This mask 85235a566e6SPeter Zijlstra * will contain those CPUs in the group that can reach this group given the 85335a566e6SPeter Zijlstra * (child) domain tree. 85435a566e6SPeter Zijlstra * 85535a566e6SPeter Zijlstra * With this we can once again compute balance_cpu and sched_group_capacity 85635a566e6SPeter Zijlstra * relations. 85735a566e6SPeter Zijlstra * 85835a566e6SPeter Zijlstra * XXX include words on how balance_cpu is unique and therefore can be 85935a566e6SPeter Zijlstra * used for sched_group_capacity links. 86035a566e6SPeter Zijlstra * 86135a566e6SPeter Zijlstra * 86235a566e6SPeter Zijlstra * Another 'interesting' topology is: 86335a566e6SPeter Zijlstra * 86435a566e6SPeter Zijlstra * node 0 1 2 3 86535a566e6SPeter Zijlstra * 0: 10 20 20 30 86635a566e6SPeter Zijlstra * 1: 20 10 20 20 86735a566e6SPeter Zijlstra * 2: 20 20 10 20 86835a566e6SPeter Zijlstra * 3: 30 20 20 10 86935a566e6SPeter Zijlstra * 87035a566e6SPeter Zijlstra * Which looks a little like: 87135a566e6SPeter Zijlstra * 87235a566e6SPeter Zijlstra * 0 ----- 1 87335a566e6SPeter Zijlstra * | / | 87435a566e6SPeter Zijlstra * | / | 87535a566e6SPeter Zijlstra * | / | 87635a566e6SPeter Zijlstra * 2 ----- 3 87735a566e6SPeter Zijlstra * 87835a566e6SPeter Zijlstra * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3 87935a566e6SPeter Zijlstra * are not. 88035a566e6SPeter Zijlstra * 88135a566e6SPeter Zijlstra * This leads to a few particularly weird cases where the sched_domain's are 88297fb7a0aSIngo Molnar * not of the same number for each CPU. Consider: 88335a566e6SPeter Zijlstra * 88435a566e6SPeter Zijlstra * NUMA-2 0-3 0-3 88535a566e6SPeter Zijlstra * groups: {0-2},{1-3} {1-3},{0-2} 88635a566e6SPeter Zijlstra * 88735a566e6SPeter Zijlstra * NUMA-1 0-2 0-3 0-3 1-3 88835a566e6SPeter Zijlstra * 88935a566e6SPeter Zijlstra * NUMA-0 0 1 2 3 89035a566e6SPeter Zijlstra * 89135a566e6SPeter Zijlstra */ 89235a566e6SPeter Zijlstra 89335a566e6SPeter Zijlstra 89435a566e6SPeter Zijlstra /* 895e5c14b1fSPeter Zijlstra * Build the balance mask; it contains only those CPUs that can arrive at this 896e5c14b1fSPeter Zijlstra * group and should be considered to continue balancing. 89735a566e6SPeter Zijlstra * 89835a566e6SPeter Zijlstra * We do this during the group creation pass, therefore the group information 89935a566e6SPeter Zijlstra * isn't complete yet, however since each group represents a (child) domain we 90035a566e6SPeter Zijlstra * can fully construct this using the sched_domain bits (which are already 90135a566e6SPeter Zijlstra * complete). 902f2cb1360SIngo Molnar */ 9031676330eSPeter Zijlstra static void 904e5c14b1fSPeter Zijlstra build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask) 905f2cb1360SIngo Molnar { 906ae4df9d6SPeter Zijlstra const struct cpumask *sg_span = sched_group_span(sg); 907f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 908f2cb1360SIngo Molnar struct sched_domain *sibling; 909f2cb1360SIngo Molnar int i; 910f2cb1360SIngo Molnar 9111676330eSPeter Zijlstra cpumask_clear(mask); 9121676330eSPeter Zijlstra 913f32d782eSLauro Ramos Venancio for_each_cpu(i, sg_span) { 914f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 91573bb059fSPeter Zijlstra 91673bb059fSPeter Zijlstra /* 91773bb059fSPeter Zijlstra * Can happen in the asymmetric case, where these siblings are 91873bb059fSPeter Zijlstra * unused. The mask will not be empty because those CPUs that 91973bb059fSPeter Zijlstra * do have the top domain _should_ span the domain. 92073bb059fSPeter Zijlstra */ 92173bb059fSPeter Zijlstra if (!sibling->child) 92273bb059fSPeter Zijlstra continue; 92373bb059fSPeter Zijlstra 92473bb059fSPeter Zijlstra /* If we would not end up here, we can't continue from here */ 92573bb059fSPeter Zijlstra if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) 926f2cb1360SIngo Molnar continue; 927f2cb1360SIngo Molnar 9281676330eSPeter Zijlstra cpumask_set_cpu(i, mask); 929f2cb1360SIngo Molnar } 93073bb059fSPeter Zijlstra 93173bb059fSPeter Zijlstra /* We must not have empty masks here */ 9321676330eSPeter Zijlstra WARN_ON_ONCE(cpumask_empty(mask)); 933f2cb1360SIngo Molnar } 934f2cb1360SIngo Molnar 935f2cb1360SIngo Molnar /* 93635a566e6SPeter Zijlstra * XXX: This creates per-node group entries; since the load-balancer will 93735a566e6SPeter Zijlstra * immediately access remote memory to construct this group's load-balance 93835a566e6SPeter Zijlstra * statistics having the groups node local is of dubious benefit. 939f2cb1360SIngo Molnar */ 9408c033469SLauro Ramos Venancio static struct sched_group * 9418c033469SLauro Ramos Venancio build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) 9428c033469SLauro Ramos Venancio { 9438c033469SLauro Ramos Venancio struct sched_group *sg; 9448c033469SLauro Ramos Venancio struct cpumask *sg_span; 9458c033469SLauro Ramos Venancio 9468c033469SLauro Ramos Venancio sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 9478c033469SLauro Ramos Venancio GFP_KERNEL, cpu_to_node(cpu)); 9488c033469SLauro Ramos Venancio 9498c033469SLauro Ramos Venancio if (!sg) 9508c033469SLauro Ramos Venancio return NULL; 9518c033469SLauro Ramos Venancio 952ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 95316d364baSRicardo Neri if (sd->child) { 9548c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd->child)); 95516d364baSRicardo Neri sg->flags = sd->child->flags; 95616d364baSRicardo Neri } else { 9578c033469SLauro Ramos Venancio cpumask_copy(sg_span, sched_domain_span(sd)); 95816d364baSRicardo Neri } 9598c033469SLauro Ramos Venancio 960213c5a45SShu Wang atomic_inc(&sg->ref); 9618c033469SLauro Ramos Venancio return sg; 9628c033469SLauro Ramos Venancio } 9638c033469SLauro Ramos Venancio 9648c033469SLauro Ramos Venancio static void init_overlap_sched_group(struct sched_domain *sd, 9651676330eSPeter Zijlstra struct sched_group *sg) 9668c033469SLauro Ramos Venancio { 9671676330eSPeter Zijlstra struct cpumask *mask = sched_domains_tmpmask2; 9688c033469SLauro Ramos Venancio struct sd_data *sdd = sd->private; 9698c033469SLauro Ramos Venancio struct cpumask *sg_span; 9701676330eSPeter Zijlstra int cpu; 9711676330eSPeter Zijlstra 972e5c14b1fSPeter Zijlstra build_balance_mask(sd, sg, mask); 9730a2b65c0SBarry Song cpu = cpumask_first(mask); 9748c033469SLauro Ramos Venancio 9758c033469SLauro Ramos Venancio sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 9768c033469SLauro Ramos Venancio if (atomic_inc_return(&sg->sgc->ref) == 1) 977e5c14b1fSPeter Zijlstra cpumask_copy(group_balance_mask(sg), mask); 97835a566e6SPeter Zijlstra else 979e5c14b1fSPeter Zijlstra WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask)); 9808c033469SLauro Ramos Venancio 9818c033469SLauro Ramos Venancio /* 9828c033469SLauro Ramos Venancio * Initialize sgc->capacity such that even if we mess up the 9838c033469SLauro Ramos Venancio * domains and no possible iteration will get us here, we won't 9848c033469SLauro Ramos Venancio * die on a /0 trap. 9858c033469SLauro Ramos Venancio */ 986ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 9878c033469SLauro Ramos Venancio sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); 9888c033469SLauro Ramos Venancio sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 989e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 9908c033469SLauro Ramos Venancio } 9918c033469SLauro Ramos Venancio 992585b6d27SBarry Song static struct sched_domain * 993585b6d27SBarry Song find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling) 994585b6d27SBarry Song { 995585b6d27SBarry Song /* 996585b6d27SBarry Song * The proper descendant would be the one whose child won't span out 997585b6d27SBarry Song * of sd 998585b6d27SBarry Song */ 999585b6d27SBarry Song while (sibling->child && 1000585b6d27SBarry Song !cpumask_subset(sched_domain_span(sibling->child), 1001585b6d27SBarry Song sched_domain_span(sd))) 1002585b6d27SBarry Song sibling = sibling->child; 1003585b6d27SBarry Song 1004585b6d27SBarry Song /* 1005585b6d27SBarry Song * As we are referencing sgc across different topology level, we need 1006585b6d27SBarry Song * to go down to skip those sched_domains which don't contribute to 1007585b6d27SBarry Song * scheduling because they will be degenerated in cpu_attach_domain 1008585b6d27SBarry Song */ 1009585b6d27SBarry Song while (sibling->child && 1010585b6d27SBarry Song cpumask_equal(sched_domain_span(sibling->child), 1011585b6d27SBarry Song sched_domain_span(sibling))) 1012585b6d27SBarry Song sibling = sibling->child; 1013585b6d27SBarry Song 1014585b6d27SBarry Song return sibling; 1015585b6d27SBarry Song } 1016585b6d27SBarry Song 1017f2cb1360SIngo Molnar static int 1018f2cb1360SIngo Molnar build_overlap_sched_groups(struct sched_domain *sd, int cpu) 1019f2cb1360SIngo Molnar { 102091eaed0dSPeter Zijlstra struct sched_group *first = NULL, *last = NULL, *sg; 1021f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1022f2cb1360SIngo Molnar struct cpumask *covered = sched_domains_tmpmask; 1023f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1024f2cb1360SIngo Molnar struct sched_domain *sibling; 1025f2cb1360SIngo Molnar int i; 1026f2cb1360SIngo Molnar 1027f2cb1360SIngo Molnar cpumask_clear(covered); 1028f2cb1360SIngo Molnar 10290372dd27SPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1030f2cb1360SIngo Molnar struct cpumask *sg_span; 1031f2cb1360SIngo Molnar 1032f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1033f2cb1360SIngo Molnar continue; 1034f2cb1360SIngo Molnar 1035f2cb1360SIngo Molnar sibling = *per_cpu_ptr(sdd->sd, i); 1036f2cb1360SIngo Molnar 1037c20e1ea4SLauro Ramos Venancio /* 1038c20e1ea4SLauro Ramos Venancio * Asymmetric node setups can result in situations where the 1039c20e1ea4SLauro Ramos Venancio * domain tree is of unequal depth, make sure to skip domains 1040c20e1ea4SLauro Ramos Venancio * that already cover the entire range. 1041c20e1ea4SLauro Ramos Venancio * 1042c20e1ea4SLauro Ramos Venancio * In that case build_sched_domains() will have terminated the 1043c20e1ea4SLauro Ramos Venancio * iteration early and our sibling sd spans will be empty. 1044c20e1ea4SLauro Ramos Venancio * Domains should always include the CPU they're built on, so 1045c20e1ea4SLauro Ramos Venancio * check that. 1046c20e1ea4SLauro Ramos Venancio */ 1047f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 1048f2cb1360SIngo Molnar continue; 1049f2cb1360SIngo Molnar 1050585b6d27SBarry Song /* 1051585b6d27SBarry Song * Usually we build sched_group by sibling's child sched_domain 1052585b6d27SBarry Song * But for machines whose NUMA diameter are 3 or above, we move 1053585b6d27SBarry Song * to build sched_group by sibling's proper descendant's child 1054585b6d27SBarry Song * domain because sibling's child sched_domain will span out of 1055585b6d27SBarry Song * the sched_domain being built as below. 1056585b6d27SBarry Song * 1057585b6d27SBarry Song * Smallest diameter=3 topology is: 1058585b6d27SBarry Song * 1059585b6d27SBarry Song * node 0 1 2 3 1060585b6d27SBarry Song * 0: 10 20 30 40 1061585b6d27SBarry Song * 1: 20 10 20 30 1062585b6d27SBarry Song * 2: 30 20 10 20 1063585b6d27SBarry Song * 3: 40 30 20 10 1064585b6d27SBarry Song * 1065585b6d27SBarry Song * 0 --- 1 --- 2 --- 3 1066585b6d27SBarry Song * 1067585b6d27SBarry Song * NUMA-3 0-3 N/A N/A 0-3 1068585b6d27SBarry Song * groups: {0-2},{1-3} {1-3},{0-2} 1069585b6d27SBarry Song * 1070585b6d27SBarry Song * NUMA-2 0-2 0-3 0-3 1-3 1071585b6d27SBarry Song * groups: {0-1},{1-3} {0-2},{2-3} {1-3},{0-1} {2-3},{0-2} 1072585b6d27SBarry Song * 1073585b6d27SBarry Song * NUMA-1 0-1 0-2 1-3 2-3 1074585b6d27SBarry Song * groups: {0},{1} {1},{2},{0} {2},{3},{1} {3},{2} 1075585b6d27SBarry Song * 1076585b6d27SBarry Song * NUMA-0 0 1 2 3 1077585b6d27SBarry Song * 1078585b6d27SBarry Song * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the 1079585b6d27SBarry Song * group span isn't a subset of the domain span. 1080585b6d27SBarry Song */ 1081585b6d27SBarry Song if (sibling->child && 1082585b6d27SBarry Song !cpumask_subset(sched_domain_span(sibling->child), span)) 1083585b6d27SBarry Song sibling = find_descended_sibling(sd, sibling); 1084585b6d27SBarry Song 10858c033469SLauro Ramos Venancio sg = build_group_from_child_sched_domain(sibling, cpu); 1086f2cb1360SIngo Molnar if (!sg) 1087f2cb1360SIngo Molnar goto fail; 1088f2cb1360SIngo Molnar 1089ae4df9d6SPeter Zijlstra sg_span = sched_group_span(sg); 1090f2cb1360SIngo Molnar cpumask_or(covered, covered, sg_span); 1091f2cb1360SIngo Molnar 1092585b6d27SBarry Song init_overlap_sched_group(sibling, sg); 1093f2cb1360SIngo Molnar 1094f2cb1360SIngo Molnar if (!first) 1095f2cb1360SIngo Molnar first = sg; 1096f2cb1360SIngo Molnar if (last) 1097f2cb1360SIngo Molnar last->next = sg; 1098f2cb1360SIngo Molnar last = sg; 1099f2cb1360SIngo Molnar last->next = first; 1100f2cb1360SIngo Molnar } 110191eaed0dSPeter Zijlstra sd->groups = first; 1102f2cb1360SIngo Molnar 1103f2cb1360SIngo Molnar return 0; 1104f2cb1360SIngo Molnar 1105f2cb1360SIngo Molnar fail: 1106f2cb1360SIngo Molnar free_sched_groups(first, 0); 1107f2cb1360SIngo Molnar 1108f2cb1360SIngo Molnar return -ENOMEM; 1109f2cb1360SIngo Molnar } 1110f2cb1360SIngo Molnar 111135a566e6SPeter Zijlstra 111235a566e6SPeter Zijlstra /* 111335a566e6SPeter Zijlstra * Package topology (also see the load-balance blurb in fair.c) 111435a566e6SPeter Zijlstra * 111535a566e6SPeter Zijlstra * The scheduler builds a tree structure to represent a number of important 111635a566e6SPeter Zijlstra * topology features. By default (default_topology[]) these include: 111735a566e6SPeter Zijlstra * 111835a566e6SPeter Zijlstra * - Simultaneous multithreading (SMT) 111935a566e6SPeter Zijlstra * - Multi-Core Cache (MC) 112035a566e6SPeter Zijlstra * - Package (DIE) 112135a566e6SPeter Zijlstra * 112235a566e6SPeter Zijlstra * Where the last one more or less denotes everything up to a NUMA node. 112335a566e6SPeter Zijlstra * 112435a566e6SPeter Zijlstra * The tree consists of 3 primary data structures: 112535a566e6SPeter Zijlstra * 112635a566e6SPeter Zijlstra * sched_domain -> sched_group -> sched_group_capacity 112735a566e6SPeter Zijlstra * ^ ^ ^ ^ 112835a566e6SPeter Zijlstra * `-' `-' 112935a566e6SPeter Zijlstra * 113097fb7a0aSIngo Molnar * The sched_domains are per-CPU and have a two way link (parent & child) and 113135a566e6SPeter Zijlstra * denote the ever growing mask of CPUs belonging to that level of topology. 113235a566e6SPeter Zijlstra * 113335a566e6SPeter Zijlstra * Each sched_domain has a circular (double) linked list of sched_group's, each 113435a566e6SPeter Zijlstra * denoting the domains of the level below (or individual CPUs in case of the 113535a566e6SPeter Zijlstra * first domain level). The sched_group linked by a sched_domain includes the 113635a566e6SPeter Zijlstra * CPU of that sched_domain [*]. 113735a566e6SPeter Zijlstra * 113835a566e6SPeter Zijlstra * Take for instance a 2 threaded, 2 core, 2 cache cluster part: 113935a566e6SPeter Zijlstra * 114035a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 114135a566e6SPeter Zijlstra * 114235a566e6SPeter Zijlstra * DIE [ ] 114335a566e6SPeter Zijlstra * MC [ ] [ ] 114435a566e6SPeter Zijlstra * SMT [ ] [ ] [ ] [ ] 114535a566e6SPeter Zijlstra * 114635a566e6SPeter Zijlstra * - or - 114735a566e6SPeter Zijlstra * 114835a566e6SPeter Zijlstra * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7 114935a566e6SPeter Zijlstra * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7 115035a566e6SPeter Zijlstra * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 115135a566e6SPeter Zijlstra * 115235a566e6SPeter Zijlstra * CPU 0 1 2 3 4 5 6 7 115335a566e6SPeter Zijlstra * 115435a566e6SPeter Zijlstra * One way to think about it is: sched_domain moves you up and down among these 115535a566e6SPeter Zijlstra * topology levels, while sched_group moves you sideways through it, at child 115635a566e6SPeter Zijlstra * domain granularity. 115735a566e6SPeter Zijlstra * 115835a566e6SPeter Zijlstra * sched_group_capacity ensures each unique sched_group has shared storage. 115935a566e6SPeter Zijlstra * 116035a566e6SPeter Zijlstra * There are two related construction problems, both require a CPU that 116135a566e6SPeter Zijlstra * uniquely identify each group (for a given domain): 116235a566e6SPeter Zijlstra * 116335a566e6SPeter Zijlstra * - The first is the balance_cpu (see should_we_balance() and the 116435a566e6SPeter Zijlstra * load-balance blub in fair.c); for each group we only want 1 CPU to 116535a566e6SPeter Zijlstra * continue balancing at a higher domain. 116635a566e6SPeter Zijlstra * 116735a566e6SPeter Zijlstra * - The second is the sched_group_capacity; we want all identical groups 116835a566e6SPeter Zijlstra * to share a single sched_group_capacity. 116935a566e6SPeter Zijlstra * 117035a566e6SPeter Zijlstra * Since these topologies are exclusive by construction. That is, its 117135a566e6SPeter Zijlstra * impossible for an SMT thread to belong to multiple cores, and cores to 117235a566e6SPeter Zijlstra * be part of multiple caches. There is a very clear and unique location 117335a566e6SPeter Zijlstra * for each CPU in the hierarchy. 117435a566e6SPeter Zijlstra * 117535a566e6SPeter Zijlstra * Therefore computing a unique CPU for each group is trivial (the iteration 117635a566e6SPeter Zijlstra * mask is redundant and set all 1s; all CPUs in a group will end up at _that_ 117735a566e6SPeter Zijlstra * group), we can simply pick the first CPU in each group. 117835a566e6SPeter Zijlstra * 117935a566e6SPeter Zijlstra * 118035a566e6SPeter Zijlstra * [*] in other words, the first group of each domain is its child domain. 118135a566e6SPeter Zijlstra */ 118235a566e6SPeter Zijlstra 11830c0e776aSPeter Zijlstra static struct sched_group *get_group(int cpu, struct sd_data *sdd) 1184f2cb1360SIngo Molnar { 1185f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1186f2cb1360SIngo Molnar struct sched_domain *child = sd->child; 11870c0e776aSPeter Zijlstra struct sched_group *sg; 118867d4f6ffSValentin Schneider bool already_visited; 1189f2cb1360SIngo Molnar 1190f2cb1360SIngo Molnar if (child) 1191f2cb1360SIngo Molnar cpu = cpumask_first(sched_domain_span(child)); 1192f2cb1360SIngo Molnar 11930c0e776aSPeter Zijlstra sg = *per_cpu_ptr(sdd->sg, cpu); 11940c0e776aSPeter Zijlstra sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); 1195f2cb1360SIngo Molnar 119667d4f6ffSValentin Schneider /* Increase refcounts for claim_allocations: */ 119767d4f6ffSValentin Schneider already_visited = atomic_inc_return(&sg->ref) > 1; 119867d4f6ffSValentin Schneider /* sgc visits should follow a similar trend as sg */ 119967d4f6ffSValentin Schneider WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); 120067d4f6ffSValentin Schneider 120167d4f6ffSValentin Schneider /* If we have already visited that group, it's already initialized. */ 120267d4f6ffSValentin Schneider if (already_visited) 120367d4f6ffSValentin Schneider return sg; 12040c0e776aSPeter Zijlstra 12050c0e776aSPeter Zijlstra if (child) { 1206ae4df9d6SPeter Zijlstra cpumask_copy(sched_group_span(sg), sched_domain_span(child)); 1207ae4df9d6SPeter Zijlstra cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); 120816d364baSRicardo Neri sg->flags = child->flags; 12090c0e776aSPeter Zijlstra } else { 1210ae4df9d6SPeter Zijlstra cpumask_set_cpu(cpu, sched_group_span(sg)); 1211e5c14b1fSPeter Zijlstra cpumask_set_cpu(cpu, group_balance_mask(sg)); 1212f2cb1360SIngo Molnar } 1213f2cb1360SIngo Molnar 1214ae4df9d6SPeter Zijlstra sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); 12150c0e776aSPeter Zijlstra sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; 1216e3d6d0cbSMorten Rasmussen sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; 12170c0e776aSPeter Zijlstra 12180c0e776aSPeter Zijlstra return sg; 1219f2cb1360SIngo Molnar } 1220f2cb1360SIngo Molnar 1221f2cb1360SIngo Molnar /* 1222f2cb1360SIngo Molnar * build_sched_groups will build a circular linked list of the groups 1223d8743230SValentin Schneider * covered by the given span, will set each group's ->cpumask correctly, 1224d8743230SValentin Schneider * and will initialize their ->sgc. 1225f2cb1360SIngo Molnar * 1226f2cb1360SIngo Molnar * Assumes the sched_domain tree is fully constructed 1227f2cb1360SIngo Molnar */ 1228f2cb1360SIngo Molnar static int 1229f2cb1360SIngo Molnar build_sched_groups(struct sched_domain *sd, int cpu) 1230f2cb1360SIngo Molnar { 1231f2cb1360SIngo Molnar struct sched_group *first = NULL, *last = NULL; 1232f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1233f2cb1360SIngo Molnar const struct cpumask *span = sched_domain_span(sd); 1234f2cb1360SIngo Molnar struct cpumask *covered; 1235f2cb1360SIngo Molnar int i; 1236f2cb1360SIngo Molnar 1237f2cb1360SIngo Molnar lockdep_assert_held(&sched_domains_mutex); 1238f2cb1360SIngo Molnar covered = sched_domains_tmpmask; 1239f2cb1360SIngo Molnar 1240f2cb1360SIngo Molnar cpumask_clear(covered); 1241f2cb1360SIngo Molnar 12420c0e776aSPeter Zijlstra for_each_cpu_wrap(i, span, cpu) { 1243f2cb1360SIngo Molnar struct sched_group *sg; 1244f2cb1360SIngo Molnar 1245f2cb1360SIngo Molnar if (cpumask_test_cpu(i, covered)) 1246f2cb1360SIngo Molnar continue; 1247f2cb1360SIngo Molnar 12480c0e776aSPeter Zijlstra sg = get_group(i, sdd); 1249f2cb1360SIngo Molnar 1250ae4df9d6SPeter Zijlstra cpumask_or(covered, covered, sched_group_span(sg)); 1251f2cb1360SIngo Molnar 1252f2cb1360SIngo Molnar if (!first) 1253f2cb1360SIngo Molnar first = sg; 1254f2cb1360SIngo Molnar if (last) 1255f2cb1360SIngo Molnar last->next = sg; 1256f2cb1360SIngo Molnar last = sg; 1257f2cb1360SIngo Molnar } 1258f2cb1360SIngo Molnar last->next = first; 12590c0e776aSPeter Zijlstra sd->groups = first; 1260f2cb1360SIngo Molnar 1261f2cb1360SIngo Molnar return 0; 1262f2cb1360SIngo Molnar } 1263f2cb1360SIngo Molnar 1264f2cb1360SIngo Molnar /* 1265f2cb1360SIngo Molnar * Initialize sched groups cpu_capacity. 1266f2cb1360SIngo Molnar * 1267f2cb1360SIngo Molnar * cpu_capacity indicates the capacity of sched group, which is used while 1268f2cb1360SIngo Molnar * distributing the load between different sched groups in a sched domain. 1269f2cb1360SIngo Molnar * Typically cpu_capacity for all the groups in a sched domain will be same 1270f2cb1360SIngo Molnar * unless there are asymmetries in the topology. If there are asymmetries, 1271f2cb1360SIngo Molnar * group having more cpu_capacity will pickup more load compared to the 1272f2cb1360SIngo Molnar * group having less cpu_capacity. 1273f2cb1360SIngo Molnar */ 1274f2cb1360SIngo Molnar static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) 1275f2cb1360SIngo Molnar { 1276f2cb1360SIngo Molnar struct sched_group *sg = sd->groups; 1277d24cb0d9STim C Chen struct cpumask *mask = sched_domains_tmpmask2; 1278f2cb1360SIngo Molnar 1279f2cb1360SIngo Molnar WARN_ON(!sg); 1280f2cb1360SIngo Molnar 1281f2cb1360SIngo Molnar do { 1282d24cb0d9STim C Chen int cpu, cores = 0, max_cpu = -1; 1283f2cb1360SIngo Molnar 1284ae4df9d6SPeter Zijlstra sg->group_weight = cpumask_weight(sched_group_span(sg)); 1285f2cb1360SIngo Molnar 1286d24cb0d9STim C Chen cpumask_copy(mask, sched_group_span(sg)); 1287d24cb0d9STim C Chen for_each_cpu(cpu, mask) { 1288d24cb0d9STim C Chen cores++; 1289d24cb0d9STim C Chen #ifdef CONFIG_SCHED_SMT 1290d24cb0d9STim C Chen cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); 1291d24cb0d9STim C Chen #endif 1292d24cb0d9STim C Chen } 1293d24cb0d9STim C Chen sg->cores = cores; 1294d24cb0d9STim C Chen 1295f2cb1360SIngo Molnar if (!(sd->flags & SD_ASYM_PACKING)) 1296f2cb1360SIngo Molnar goto next; 1297f2cb1360SIngo Molnar 1298ae4df9d6SPeter Zijlstra for_each_cpu(cpu, sched_group_span(sg)) { 1299f2cb1360SIngo Molnar if (max_cpu < 0) 1300f2cb1360SIngo Molnar max_cpu = cpu; 1301f2cb1360SIngo Molnar else if (sched_asym_prefer(cpu, max_cpu)) 1302f2cb1360SIngo Molnar max_cpu = cpu; 1303f2cb1360SIngo Molnar } 1304f2cb1360SIngo Molnar sg->asym_prefer_cpu = max_cpu; 1305f2cb1360SIngo Molnar 1306f2cb1360SIngo Molnar next: 1307f2cb1360SIngo Molnar sg = sg->next; 1308f2cb1360SIngo Molnar } while (sg != sd->groups); 1309f2cb1360SIngo Molnar 1310f2cb1360SIngo Molnar if (cpu != group_balance_cpu(sg)) 1311f2cb1360SIngo Molnar return; 1312f2cb1360SIngo Molnar 1313f2cb1360SIngo Molnar update_group_capacity(sd, cpu); 1314f2cb1360SIngo Molnar } 1315f2cb1360SIngo Molnar 1316f2cb1360SIngo Molnar /* 1317c744dc4aSBeata Michalska * Asymmetric CPU capacity bits 1318c744dc4aSBeata Michalska */ 1319c744dc4aSBeata Michalska struct asym_cap_data { 1320c744dc4aSBeata Michalska struct list_head link; 1321c744dc4aSBeata Michalska unsigned long capacity; 1322c744dc4aSBeata Michalska unsigned long cpus[]; 1323c744dc4aSBeata Michalska }; 1324c744dc4aSBeata Michalska 1325c744dc4aSBeata Michalska /* 1326c744dc4aSBeata Michalska * Set of available CPUs grouped by their corresponding capacities 1327c744dc4aSBeata Michalska * Each list entry contains a CPU mask reflecting CPUs that share the same 1328c744dc4aSBeata Michalska * capacity. 1329c744dc4aSBeata Michalska * The lifespan of data is unlimited. 1330c744dc4aSBeata Michalska */ 1331c744dc4aSBeata Michalska static LIST_HEAD(asym_cap_list); 1332c744dc4aSBeata Michalska 1333c744dc4aSBeata Michalska #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) 1334c744dc4aSBeata Michalska 1335c744dc4aSBeata Michalska /* 1336c744dc4aSBeata Michalska * Verify whether there is any CPU capacity asymmetry in a given sched domain. 1337c744dc4aSBeata Michalska * Provides sd_flags reflecting the asymmetry scope. 1338c744dc4aSBeata Michalska */ 1339c744dc4aSBeata Michalska static inline int 1340c744dc4aSBeata Michalska asym_cpu_capacity_classify(const struct cpumask *sd_span, 1341c744dc4aSBeata Michalska const struct cpumask *cpu_map) 1342c744dc4aSBeata Michalska { 1343c744dc4aSBeata Michalska struct asym_cap_data *entry; 1344c744dc4aSBeata Michalska int count = 0, miss = 0; 1345c744dc4aSBeata Michalska 1346c744dc4aSBeata Michalska /* 1347c744dc4aSBeata Michalska * Count how many unique CPU capacities this domain spans across 1348c744dc4aSBeata Michalska * (compare sched_domain CPUs mask with ones representing available 1349c744dc4aSBeata Michalska * CPUs capacities). Take into account CPUs that might be offline: 1350c744dc4aSBeata Michalska * skip those. 1351c744dc4aSBeata Michalska */ 1352c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) { 1353c744dc4aSBeata Michalska if (cpumask_intersects(sd_span, cpu_capacity_span(entry))) 1354c744dc4aSBeata Michalska ++count; 1355c744dc4aSBeata Michalska else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry))) 1356c744dc4aSBeata Michalska ++miss; 1357c744dc4aSBeata Michalska } 1358c744dc4aSBeata Michalska 1359c744dc4aSBeata Michalska WARN_ON_ONCE(!count && !list_empty(&asym_cap_list)); 1360c744dc4aSBeata Michalska 1361c744dc4aSBeata Michalska /* No asymmetry detected */ 1362c744dc4aSBeata Michalska if (count < 2) 1363c744dc4aSBeata Michalska return 0; 1364c744dc4aSBeata Michalska /* Some of the available CPU capacity values have not been detected */ 1365c744dc4aSBeata Michalska if (miss) 1366c744dc4aSBeata Michalska return SD_ASYM_CPUCAPACITY; 1367c744dc4aSBeata Michalska 1368c744dc4aSBeata Michalska /* Full asymmetry */ 1369c744dc4aSBeata Michalska return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL; 1370c744dc4aSBeata Michalska 1371c744dc4aSBeata Michalska } 1372c744dc4aSBeata Michalska 1373c744dc4aSBeata Michalska static inline void asym_cpu_capacity_update_data(int cpu) 1374c744dc4aSBeata Michalska { 1375c744dc4aSBeata Michalska unsigned long capacity = arch_scale_cpu_capacity(cpu); 1376c744dc4aSBeata Michalska struct asym_cap_data *entry = NULL; 1377c744dc4aSBeata Michalska 1378c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) { 1379c744dc4aSBeata Michalska if (capacity == entry->capacity) 1380c744dc4aSBeata Michalska goto done; 1381c744dc4aSBeata Michalska } 1382c744dc4aSBeata Michalska 1383c744dc4aSBeata Michalska entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); 1384c744dc4aSBeata Michalska if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n")) 1385c744dc4aSBeata Michalska return; 1386c744dc4aSBeata Michalska entry->capacity = capacity; 1387c744dc4aSBeata Michalska list_add(&entry->link, &asym_cap_list); 1388c744dc4aSBeata Michalska done: 1389c744dc4aSBeata Michalska __cpumask_set_cpu(cpu, cpu_capacity_span(entry)); 1390c744dc4aSBeata Michalska } 1391c744dc4aSBeata Michalska 1392c744dc4aSBeata Michalska /* 1393c744dc4aSBeata Michalska * Build-up/update list of CPUs grouped by their capacities 1394c744dc4aSBeata Michalska * An update requires explicit request to rebuild sched domains 1395c744dc4aSBeata Michalska * with state indicating CPU topology changes. 1396c744dc4aSBeata Michalska */ 1397c744dc4aSBeata Michalska static void asym_cpu_capacity_scan(void) 1398c744dc4aSBeata Michalska { 1399c744dc4aSBeata Michalska struct asym_cap_data *entry, *next; 1400c744dc4aSBeata Michalska int cpu; 1401c744dc4aSBeata Michalska 1402c744dc4aSBeata Michalska list_for_each_entry(entry, &asym_cap_list, link) 1403c744dc4aSBeata Michalska cpumask_clear(cpu_capacity_span(entry)); 1404c744dc4aSBeata Michalska 140504d4e665SFrederic Weisbecker for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) 1406c744dc4aSBeata Michalska asym_cpu_capacity_update_data(cpu); 1407c744dc4aSBeata Michalska 1408c744dc4aSBeata Michalska list_for_each_entry_safe(entry, next, &asym_cap_list, link) { 1409c744dc4aSBeata Michalska if (cpumask_empty(cpu_capacity_span(entry))) { 1410c744dc4aSBeata Michalska list_del(&entry->link); 1411c744dc4aSBeata Michalska kfree(entry); 1412c744dc4aSBeata Michalska } 1413c744dc4aSBeata Michalska } 1414c744dc4aSBeata Michalska 1415c744dc4aSBeata Michalska /* 1416c744dc4aSBeata Michalska * Only one capacity value has been detected i.e. this system is symmetric. 1417c744dc4aSBeata Michalska * No need to keep this data around. 1418c744dc4aSBeata Michalska */ 1419c744dc4aSBeata Michalska if (list_is_singular(&asym_cap_list)) { 1420c744dc4aSBeata Michalska entry = list_first_entry(&asym_cap_list, typeof(*entry), link); 1421c744dc4aSBeata Michalska list_del(&entry->link); 1422c744dc4aSBeata Michalska kfree(entry); 1423c744dc4aSBeata Michalska } 1424c744dc4aSBeata Michalska } 1425c744dc4aSBeata Michalska 1426c744dc4aSBeata Michalska /* 1427f2cb1360SIngo Molnar * Initializers for schedule domains 1428f2cb1360SIngo Molnar * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 1429f2cb1360SIngo Molnar */ 1430f2cb1360SIngo Molnar 1431f2cb1360SIngo Molnar static int default_relax_domain_level = -1; 1432f2cb1360SIngo Molnar int sched_domain_level_max; 1433f2cb1360SIngo Molnar 1434f2cb1360SIngo Molnar static int __init setup_relax_domain_level(char *str) 1435f2cb1360SIngo Molnar { 1436f2cb1360SIngo Molnar if (kstrtoint(str, 0, &default_relax_domain_level)) 1437f2cb1360SIngo Molnar pr_warn("Unable to set relax_domain_level\n"); 1438f2cb1360SIngo Molnar 1439f2cb1360SIngo Molnar return 1; 1440f2cb1360SIngo Molnar } 1441f2cb1360SIngo Molnar __setup("relax_domain_level=", setup_relax_domain_level); 1442f2cb1360SIngo Molnar 1443f2cb1360SIngo Molnar static void set_domain_attribute(struct sched_domain *sd, 1444f2cb1360SIngo Molnar struct sched_domain_attr *attr) 1445f2cb1360SIngo Molnar { 1446f2cb1360SIngo Molnar int request; 1447f2cb1360SIngo Molnar 1448f2cb1360SIngo Molnar if (!attr || attr->relax_domain_level < 0) { 1449f2cb1360SIngo Molnar if (default_relax_domain_level < 0) 1450f2cb1360SIngo Molnar return; 1451f2cb1360SIngo Molnar request = default_relax_domain_level; 1452f2cb1360SIngo Molnar } else 1453f2cb1360SIngo Molnar request = attr->relax_domain_level; 14549ae7ab20SValentin Schneider 14559ae7ab20SValentin Schneider if (sd->level > request) { 1456f2cb1360SIngo Molnar /* Turn off idle balance on this domain: */ 1457f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); 1458f2cb1360SIngo Molnar } 1459f2cb1360SIngo Molnar } 1460f2cb1360SIngo Molnar 1461f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map); 1462f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map); 1463f2cb1360SIngo Molnar 1464f2cb1360SIngo Molnar static void __free_domain_allocs(struct s_data *d, enum s_alloc what, 1465f2cb1360SIngo Molnar const struct cpumask *cpu_map) 1466f2cb1360SIngo Molnar { 1467f2cb1360SIngo Molnar switch (what) { 1468f2cb1360SIngo Molnar case sa_rootdomain: 1469f2cb1360SIngo Molnar if (!atomic_read(&d->rd->refcount)) 1470f2cb1360SIngo Molnar free_rootdomain(&d->rd->rcu); 1471df561f66SGustavo A. R. Silva fallthrough; 1472f2cb1360SIngo Molnar case sa_sd: 1473f2cb1360SIngo Molnar free_percpu(d->sd); 1474df561f66SGustavo A. R. Silva fallthrough; 1475f2cb1360SIngo Molnar case sa_sd_storage: 1476f2cb1360SIngo Molnar __sdt_free(cpu_map); 1477df561f66SGustavo A. R. Silva fallthrough; 1478f2cb1360SIngo Molnar case sa_none: 1479f2cb1360SIngo Molnar break; 1480f2cb1360SIngo Molnar } 1481f2cb1360SIngo Molnar } 1482f2cb1360SIngo Molnar 1483f2cb1360SIngo Molnar static enum s_alloc 1484f2cb1360SIngo Molnar __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) 1485f2cb1360SIngo Molnar { 1486f2cb1360SIngo Molnar memset(d, 0, sizeof(*d)); 1487f2cb1360SIngo Molnar 1488f2cb1360SIngo Molnar if (__sdt_alloc(cpu_map)) 1489f2cb1360SIngo Molnar return sa_sd_storage; 1490f2cb1360SIngo Molnar d->sd = alloc_percpu(struct sched_domain *); 1491f2cb1360SIngo Molnar if (!d->sd) 1492f2cb1360SIngo Molnar return sa_sd_storage; 1493f2cb1360SIngo Molnar d->rd = alloc_rootdomain(); 1494f2cb1360SIngo Molnar if (!d->rd) 1495f2cb1360SIngo Molnar return sa_sd; 149697fb7a0aSIngo Molnar 1497f2cb1360SIngo Molnar return sa_rootdomain; 1498f2cb1360SIngo Molnar } 1499f2cb1360SIngo Molnar 1500f2cb1360SIngo Molnar /* 1501f2cb1360SIngo Molnar * NULL the sd_data elements we've used to build the sched_domain and 1502f2cb1360SIngo Molnar * sched_group structure so that the subsequent __free_domain_allocs() 1503f2cb1360SIngo Molnar * will not free the data we're using. 1504f2cb1360SIngo Molnar */ 1505f2cb1360SIngo Molnar static void claim_allocations(int cpu, struct sched_domain *sd) 1506f2cb1360SIngo Molnar { 1507f2cb1360SIngo Molnar struct sd_data *sdd = sd->private; 1508f2cb1360SIngo Molnar 1509f2cb1360SIngo Molnar WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); 1510f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, cpu) = NULL; 1511f2cb1360SIngo Molnar 1512f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) 1513f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, cpu) = NULL; 1514f2cb1360SIngo Molnar 1515f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) 1516f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, cpu) = NULL; 1517f2cb1360SIngo Molnar 1518f2cb1360SIngo Molnar if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) 1519f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, cpu) = NULL; 1520f2cb1360SIngo Molnar } 1521f2cb1360SIngo Molnar 1522f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1523f2cb1360SIngo Molnar enum numa_topology_type sched_numa_topology_type; 152497fb7a0aSIngo Molnar 152597fb7a0aSIngo Molnar static int sched_domains_numa_levels; 1526f2cb1360SIngo Molnar static int sched_domains_curr_level; 152797fb7a0aSIngo Molnar 152897fb7a0aSIngo Molnar int sched_max_numa_distance; 152997fb7a0aSIngo Molnar static int *sched_domains_numa_distance; 153097fb7a0aSIngo Molnar static struct cpumask ***sched_domains_numa_masks; 1531f2cb1360SIngo Molnar #endif 1532f2cb1360SIngo Molnar 1533f2cb1360SIngo Molnar /* 1534f2cb1360SIngo Molnar * SD_flags allowed in topology descriptions. 1535f2cb1360SIngo Molnar * 1536f2cb1360SIngo Molnar * These flags are purely descriptive of the topology and do not prescribe 1537f2cb1360SIngo Molnar * behaviour. Behaviour is artificial and mapped in the below sd_init() 1538f2cb1360SIngo Molnar * function: 1539f2cb1360SIngo Molnar * 1540f2cb1360SIngo Molnar * SD_SHARE_CPUCAPACITY - describes SMT topologies 1541f2cb1360SIngo Molnar * SD_SHARE_PKG_RESOURCES - describes shared caches 1542f2cb1360SIngo Molnar * SD_NUMA - describes NUMA topologies 1543f2cb1360SIngo Molnar * 1544f2cb1360SIngo Molnar * Odd one out, which beside describing the topology has a quirk also 1545f2cb1360SIngo Molnar * prescribes the desired behaviour that goes along with it: 1546f2cb1360SIngo Molnar * 1547f2cb1360SIngo Molnar * SD_ASYM_PACKING - describes SMT quirks 1548f2cb1360SIngo Molnar */ 1549f2cb1360SIngo Molnar #define TOPOLOGY_SD_FLAGS \ 1550f2cb1360SIngo Molnar (SD_SHARE_CPUCAPACITY | \ 1551f2cb1360SIngo Molnar SD_SHARE_PKG_RESOURCES | \ 1552f2cb1360SIngo Molnar SD_NUMA | \ 1553cfe7ddcbSValentin Schneider SD_ASYM_PACKING) 1554f2cb1360SIngo Molnar 1555f2cb1360SIngo Molnar static struct sched_domain * 1556f2cb1360SIngo Molnar sd_init(struct sched_domain_topology_level *tl, 1557f2cb1360SIngo Molnar const struct cpumask *cpu_map, 1558c744dc4aSBeata Michalska struct sched_domain *child, int cpu) 1559f2cb1360SIngo Molnar { 1560f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 1561f2cb1360SIngo Molnar struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); 1562f2cb1360SIngo Molnar int sd_id, sd_weight, sd_flags = 0; 1563c744dc4aSBeata Michalska struct cpumask *sd_span; 1564f2cb1360SIngo Molnar 1565f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1566f2cb1360SIngo Molnar /* 1567f2cb1360SIngo Molnar * Ugly hack to pass state to sd_numa_mask()... 1568f2cb1360SIngo Molnar */ 1569f2cb1360SIngo Molnar sched_domains_curr_level = tl->numa_level; 1570f2cb1360SIngo Molnar #endif 1571f2cb1360SIngo Molnar 1572f2cb1360SIngo Molnar sd_weight = cpumask_weight(tl->mask(cpu)); 1573f2cb1360SIngo Molnar 1574f2cb1360SIngo Molnar if (tl->sd_flags) 1575f2cb1360SIngo Molnar sd_flags = (*tl->sd_flags)(); 1576f2cb1360SIngo Molnar if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, 1577f2cb1360SIngo Molnar "wrong sd_flags in topology description\n")) 15789b1b234bSPeng Liu sd_flags &= TOPOLOGY_SD_FLAGS; 1579f2cb1360SIngo Molnar 1580f2cb1360SIngo Molnar *sd = (struct sched_domain){ 1581f2cb1360SIngo Molnar .min_interval = sd_weight, 1582f2cb1360SIngo Molnar .max_interval = 2*sd_weight, 15836e749913SVincent Guittot .busy_factor = 16, 15842208cdaaSVincent Guittot .imbalance_pct = 117, 1585f2cb1360SIngo Molnar 1586f2cb1360SIngo Molnar .cache_nice_tries = 0, 1587f2cb1360SIngo Molnar 158836c5bdc4SValentin Schneider .flags = 1*SD_BALANCE_NEWIDLE 1589f2cb1360SIngo Molnar | 1*SD_BALANCE_EXEC 1590f2cb1360SIngo Molnar | 1*SD_BALANCE_FORK 1591f2cb1360SIngo Molnar | 0*SD_BALANCE_WAKE 1592f2cb1360SIngo Molnar | 1*SD_WAKE_AFFINE 1593f2cb1360SIngo Molnar | 0*SD_SHARE_CPUCAPACITY 1594f2cb1360SIngo Molnar | 0*SD_SHARE_PKG_RESOURCES 1595f2cb1360SIngo Molnar | 0*SD_SERIALIZE 15969c63e84dSMorten Rasmussen | 1*SD_PREFER_SIBLING 1597f2cb1360SIngo Molnar | 0*SD_NUMA 1598f2cb1360SIngo Molnar | sd_flags 1599f2cb1360SIngo Molnar , 1600f2cb1360SIngo Molnar 1601f2cb1360SIngo Molnar .last_balance = jiffies, 1602f2cb1360SIngo Molnar .balance_interval = sd_weight, 1603f2cb1360SIngo Molnar .max_newidle_lb_cost = 0, 1604e60b56e4SVincent Guittot .last_decay_max_lb_cost = jiffies, 1605f2cb1360SIngo Molnar .child = child, 1606f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 1607f2cb1360SIngo Molnar .name = tl->name, 1608f2cb1360SIngo Molnar #endif 1609f2cb1360SIngo Molnar }; 1610f2cb1360SIngo Molnar 1611c744dc4aSBeata Michalska sd_span = sched_domain_span(sd); 1612c744dc4aSBeata Michalska cpumask_and(sd_span, cpu_map, tl->mask(cpu)); 1613c744dc4aSBeata Michalska sd_id = cpumask_first(sd_span); 1614c744dc4aSBeata Michalska 1615c744dc4aSBeata Michalska sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map); 1616c744dc4aSBeata Michalska 1617c744dc4aSBeata Michalska WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) == 1618c744dc4aSBeata Michalska (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY), 1619c744dc4aSBeata Michalska "CPU capacity asymmetry not supported on SMT\n"); 1620f2cb1360SIngo Molnar 1621f2cb1360SIngo Molnar /* 1622f2cb1360SIngo Molnar * Convert topological properties into behaviour. 1623f2cb1360SIngo Molnar */ 1624a526d466SMorten Rasmussen /* Don't attempt to spread across CPUs of different capacities. */ 1625a526d466SMorten Rasmussen if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child) 16269c63e84dSMorten Rasmussen sd->child->flags &= ~SD_PREFER_SIBLING; 16279c63e84dSMorten Rasmussen 1628f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_CPUCAPACITY) { 1629f2cb1360SIngo Molnar sd->imbalance_pct = 110; 1630f2cb1360SIngo Molnar 1631f2cb1360SIngo Molnar } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { 1632f2cb1360SIngo Molnar sd->imbalance_pct = 117; 1633f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1634f2cb1360SIngo Molnar 1635f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1636f2cb1360SIngo Molnar } else if (sd->flags & SD_NUMA) { 1637f2cb1360SIngo Molnar sd->cache_nice_tries = 2; 1638f2cb1360SIngo Molnar 16399c63e84dSMorten Rasmussen sd->flags &= ~SD_PREFER_SIBLING; 1640f2cb1360SIngo Molnar sd->flags |= SD_SERIALIZE; 1641a55c7454SMatt Fleming if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { 1642f2cb1360SIngo Molnar sd->flags &= ~(SD_BALANCE_EXEC | 1643f2cb1360SIngo Molnar SD_BALANCE_FORK | 1644f2cb1360SIngo Molnar SD_WAKE_AFFINE); 1645f2cb1360SIngo Molnar } 1646f2cb1360SIngo Molnar 1647f2cb1360SIngo Molnar #endif 1648f2cb1360SIngo Molnar } else { 1649f2cb1360SIngo Molnar sd->cache_nice_tries = 1; 1650f2cb1360SIngo Molnar } 1651f2cb1360SIngo Molnar 1652f2cb1360SIngo Molnar /* 1653f2cb1360SIngo Molnar * For all levels sharing cache; connect a sched_domain_shared 1654f2cb1360SIngo Molnar * instance. 1655f2cb1360SIngo Molnar */ 1656f2cb1360SIngo Molnar if (sd->flags & SD_SHARE_PKG_RESOURCES) { 1657f2cb1360SIngo Molnar sd->shared = *per_cpu_ptr(sdd->sds, sd_id); 1658f2cb1360SIngo Molnar atomic_inc(&sd->shared->ref); 1659f2cb1360SIngo Molnar atomic_set(&sd->shared->nr_busy_cpus, sd_weight); 1660f2cb1360SIngo Molnar } 1661f2cb1360SIngo Molnar 1662f2cb1360SIngo Molnar sd->private = sdd; 1663f2cb1360SIngo Molnar 1664f2cb1360SIngo Molnar return sd; 1665f2cb1360SIngo Molnar } 1666f2cb1360SIngo Molnar 1667f2cb1360SIngo Molnar /* 1668f2cb1360SIngo Molnar * Topology list, bottom-up. 1669f2cb1360SIngo Molnar */ 1670f2cb1360SIngo Molnar static struct sched_domain_topology_level default_topology[] = { 1671f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_SMT 1672f2cb1360SIngo Molnar { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 1673f2cb1360SIngo Molnar #endif 1674778c558fSBarry Song 1675778c558fSBarry Song #ifdef CONFIG_SCHED_CLUSTER 1676778c558fSBarry Song { cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) }, 1677778c558fSBarry Song #endif 1678778c558fSBarry Song 1679f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_MC 1680f2cb1360SIngo Molnar { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 1681f2cb1360SIngo Molnar #endif 1682f2cb1360SIngo Molnar { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 1683f2cb1360SIngo Molnar { NULL, }, 1684f2cb1360SIngo Molnar }; 1685f2cb1360SIngo Molnar 1686f2cb1360SIngo Molnar static struct sched_domain_topology_level *sched_domain_topology = 1687f2cb1360SIngo Molnar default_topology; 16880fb3978bSHuang Ying static struct sched_domain_topology_level *sched_domain_topology_saved; 1689f2cb1360SIngo Molnar 1690f2cb1360SIngo Molnar #define for_each_sd_topology(tl) \ 1691f2cb1360SIngo Molnar for (tl = sched_domain_topology; tl->mask; tl++) 1692f2cb1360SIngo Molnar 16930cce0fdeSMiaohe Lin void __init set_sched_topology(struct sched_domain_topology_level *tl) 1694f2cb1360SIngo Molnar { 1695f2cb1360SIngo Molnar if (WARN_ON_ONCE(sched_smp_initialized)) 1696f2cb1360SIngo Molnar return; 1697f2cb1360SIngo Molnar 1698f2cb1360SIngo Molnar sched_domain_topology = tl; 16990fb3978bSHuang Ying sched_domain_topology_saved = NULL; 1700f2cb1360SIngo Molnar } 1701f2cb1360SIngo Molnar 1702f2cb1360SIngo Molnar #ifdef CONFIG_NUMA 1703f2cb1360SIngo Molnar 1704f2cb1360SIngo Molnar static const struct cpumask *sd_numa_mask(int cpu) 1705f2cb1360SIngo Molnar { 1706f2cb1360SIngo Molnar return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 1707f2cb1360SIngo Molnar } 1708f2cb1360SIngo Molnar 1709f2cb1360SIngo Molnar static void sched_numa_warn(const char *str) 1710f2cb1360SIngo Molnar { 1711f2cb1360SIngo Molnar static int done = false; 1712f2cb1360SIngo Molnar int i,j; 1713f2cb1360SIngo Molnar 1714f2cb1360SIngo Molnar if (done) 1715f2cb1360SIngo Molnar return; 1716f2cb1360SIngo Molnar 1717f2cb1360SIngo Molnar done = true; 1718f2cb1360SIngo Molnar 1719f2cb1360SIngo Molnar printk(KERN_WARNING "ERROR: %s\n\n", str); 1720f2cb1360SIngo Molnar 1721f2cb1360SIngo Molnar for (i = 0; i < nr_node_ids; i++) { 1722f2cb1360SIngo Molnar printk(KERN_WARNING " "); 17230fb3978bSHuang Ying for (j = 0; j < nr_node_ids; j++) { 17240fb3978bSHuang Ying if (!node_state(i, N_CPU) || !node_state(j, N_CPU)) 17250fb3978bSHuang Ying printk(KERN_CONT "(%02d) ", node_distance(i,j)); 17260fb3978bSHuang Ying else 1727f2cb1360SIngo Molnar printk(KERN_CONT " %02d ", node_distance(i,j)); 17280fb3978bSHuang Ying } 1729f2cb1360SIngo Molnar printk(KERN_CONT "\n"); 1730f2cb1360SIngo Molnar } 1731f2cb1360SIngo Molnar printk(KERN_WARNING "\n"); 1732f2cb1360SIngo Molnar } 1733f2cb1360SIngo Molnar 1734f2cb1360SIngo Molnar bool find_numa_distance(int distance) 1735f2cb1360SIngo Molnar { 17360fb3978bSHuang Ying bool found = false; 17370fb3978bSHuang Ying int i, *distances; 1738f2cb1360SIngo Molnar 1739f2cb1360SIngo Molnar if (distance == node_distance(0, 0)) 1740f2cb1360SIngo Molnar return true; 1741f2cb1360SIngo Molnar 17420fb3978bSHuang Ying rcu_read_lock(); 17430fb3978bSHuang Ying distances = rcu_dereference(sched_domains_numa_distance); 17440fb3978bSHuang Ying if (!distances) 17450fb3978bSHuang Ying goto unlock; 1746f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 17470fb3978bSHuang Ying if (distances[i] == distance) { 17480fb3978bSHuang Ying found = true; 17490fb3978bSHuang Ying break; 17500fb3978bSHuang Ying } 17510fb3978bSHuang Ying } 17520fb3978bSHuang Ying unlock: 17530fb3978bSHuang Ying rcu_read_unlock(); 17540fb3978bSHuang Ying 17550fb3978bSHuang Ying return found; 1756f2cb1360SIngo Molnar } 1757f2cb1360SIngo Molnar 17580fb3978bSHuang Ying #define for_each_cpu_node_but(n, nbut) \ 17590fb3978bSHuang Ying for_each_node_state(n, N_CPU) \ 17600fb3978bSHuang Ying if (n == nbut) \ 17610fb3978bSHuang Ying continue; \ 17620fb3978bSHuang Ying else 1763f2cb1360SIngo Molnar 1764f2cb1360SIngo Molnar /* 1765f2cb1360SIngo Molnar * A system can have three types of NUMA topology: 1766f2cb1360SIngo Molnar * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system 1767f2cb1360SIngo Molnar * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes 1768f2cb1360SIngo Molnar * NUMA_BACKPLANE: nodes can reach other nodes through a backplane 1769f2cb1360SIngo Molnar * 1770f2cb1360SIngo Molnar * The difference between a glueless mesh topology and a backplane 1771f2cb1360SIngo Molnar * topology lies in whether communication between not directly 1772f2cb1360SIngo Molnar * connected nodes goes through intermediary nodes (where programs 1773f2cb1360SIngo Molnar * could run), or through backplane controllers. This affects 1774f2cb1360SIngo Molnar * placement of programs. 1775f2cb1360SIngo Molnar * 1776f2cb1360SIngo Molnar * The type of topology can be discerned with the following tests: 1777f2cb1360SIngo Molnar * - If the maximum distance between any nodes is 1 hop, the system 1778f2cb1360SIngo Molnar * is directly connected. 1779f2cb1360SIngo Molnar * - If for two nodes A and B, located N > 1 hops away from each other, 1780f2cb1360SIngo Molnar * there is an intermediary node C, which is < N hops away from both 1781f2cb1360SIngo Molnar * nodes A and B, the system is a glueless mesh. 1782f2cb1360SIngo Molnar */ 17830fb3978bSHuang Ying static void init_numa_topology_type(int offline_node) 1784f2cb1360SIngo Molnar { 1785f2cb1360SIngo Molnar int a, b, c, n; 1786f2cb1360SIngo Molnar 1787f2cb1360SIngo Molnar n = sched_max_numa_distance; 1788f2cb1360SIngo Molnar 1789e5e96fafSSrikar Dronamraju if (sched_domains_numa_levels <= 2) { 1790f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_DIRECT; 1791f2cb1360SIngo Molnar return; 1792f2cb1360SIngo Molnar } 1793f2cb1360SIngo Molnar 17940fb3978bSHuang Ying for_each_cpu_node_but(a, offline_node) { 17950fb3978bSHuang Ying for_each_cpu_node_but(b, offline_node) { 1796f2cb1360SIngo Molnar /* Find two nodes furthest removed from each other. */ 1797f2cb1360SIngo Molnar if (node_distance(a, b) < n) 1798f2cb1360SIngo Molnar continue; 1799f2cb1360SIngo Molnar 1800f2cb1360SIngo Molnar /* Is there an intermediary node between a and b? */ 18010fb3978bSHuang Ying for_each_cpu_node_but(c, offline_node) { 1802f2cb1360SIngo Molnar if (node_distance(a, c) < n && 1803f2cb1360SIngo Molnar node_distance(b, c) < n) { 1804f2cb1360SIngo Molnar sched_numa_topology_type = 1805f2cb1360SIngo Molnar NUMA_GLUELESS_MESH; 1806f2cb1360SIngo Molnar return; 1807f2cb1360SIngo Molnar } 1808f2cb1360SIngo Molnar } 1809f2cb1360SIngo Molnar 1810f2cb1360SIngo Molnar sched_numa_topology_type = NUMA_BACKPLANE; 1811f2cb1360SIngo Molnar return; 1812f2cb1360SIngo Molnar } 1813f2cb1360SIngo Molnar } 18140fb3978bSHuang Ying 18150fb3978bSHuang Ying pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n"); 18160fb3978bSHuang Ying sched_numa_topology_type = NUMA_DIRECT; 1817f2cb1360SIngo Molnar } 1818f2cb1360SIngo Molnar 1819620a6dc4SValentin Schneider 1820620a6dc4SValentin Schneider #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS) 1821620a6dc4SValentin Schneider 18220fb3978bSHuang Ying void sched_init_numa(int offline_node) 1823f2cb1360SIngo Molnar { 1824f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 1825620a6dc4SValentin Schneider unsigned long *distance_map; 1826620a6dc4SValentin Schneider int nr_levels = 0; 1827620a6dc4SValentin Schneider int i, j; 18280fb3978bSHuang Ying int *distances; 18290fb3978bSHuang Ying struct cpumask ***masks; 1830051f3ca0SSuravee Suthikulpanit 1831f2cb1360SIngo Molnar /* 1832f2cb1360SIngo Molnar * O(nr_nodes^2) deduplicating selection sort -- in order to find the 1833f2cb1360SIngo Molnar * unique distances in the node_distance() table. 1834f2cb1360SIngo Molnar */ 1835620a6dc4SValentin Schneider distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL); 1836620a6dc4SValentin Schneider if (!distance_map) 1837620a6dc4SValentin Schneider return; 1838620a6dc4SValentin Schneider 1839620a6dc4SValentin Schneider bitmap_zero(distance_map, NR_DISTANCE_VALUES); 18400fb3978bSHuang Ying for_each_cpu_node_but(i, offline_node) { 18410fb3978bSHuang Ying for_each_cpu_node_but(j, offline_node) { 1842620a6dc4SValentin Schneider int distance = node_distance(i, j); 1843f2cb1360SIngo Molnar 1844620a6dc4SValentin Schneider if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) { 1845620a6dc4SValentin Schneider sched_numa_warn("Invalid distance value range"); 18460fb3978bSHuang Ying bitmap_free(distance_map); 1847620a6dc4SValentin Schneider return; 1848620a6dc4SValentin Schneider } 1849f2cb1360SIngo Molnar 1850620a6dc4SValentin Schneider bitmap_set(distance_map, distance, 1); 1851620a6dc4SValentin Schneider } 1852620a6dc4SValentin Schneider } 1853f2cb1360SIngo Molnar /* 1854620a6dc4SValentin Schneider * We can now figure out how many unique distance values there are and 1855620a6dc4SValentin Schneider * allocate memory accordingly. 1856f2cb1360SIngo Molnar */ 1857620a6dc4SValentin Schneider nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); 1858f2cb1360SIngo Molnar 18590fb3978bSHuang Ying distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL); 18600fb3978bSHuang Ying if (!distances) { 1861620a6dc4SValentin Schneider bitmap_free(distance_map); 1862620a6dc4SValentin Schneider return; 1863f2cb1360SIngo Molnar } 1864620a6dc4SValentin Schneider 1865620a6dc4SValentin Schneider for (i = 0, j = 0; i < nr_levels; i++, j++) { 1866620a6dc4SValentin Schneider j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); 18670fb3978bSHuang Ying distances[i] = j; 1868f2cb1360SIngo Molnar } 18690fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_distance, distances); 1870f2cb1360SIngo Molnar 1871620a6dc4SValentin Schneider bitmap_free(distance_map); 1872620a6dc4SValentin Schneider 1873f2cb1360SIngo Molnar /* 1874620a6dc4SValentin Schneider * 'nr_levels' contains the number of unique distances 1875f2cb1360SIngo Molnar * 1876f2cb1360SIngo Molnar * The sched_domains_numa_distance[] array includes the actual distance 1877f2cb1360SIngo Molnar * numbers. 1878f2cb1360SIngo Molnar */ 1879f2cb1360SIngo Molnar 1880f2cb1360SIngo Molnar /* 1881f2cb1360SIngo Molnar * Here, we should temporarily reset sched_domains_numa_levels to 0. 1882f2cb1360SIngo Molnar * If it fails to allocate memory for array sched_domains_numa_masks[][], 1883620a6dc4SValentin Schneider * the array will contain less then 'nr_levels' members. This could be 1884f2cb1360SIngo Molnar * dangerous when we use it to iterate array sched_domains_numa_masks[][] 1885f2cb1360SIngo Molnar * in other functions. 1886f2cb1360SIngo Molnar * 1887620a6dc4SValentin Schneider * We reset it to 'nr_levels' at the end of this function. 1888f2cb1360SIngo Molnar */ 1889f2cb1360SIngo Molnar sched_domains_numa_levels = 0; 1890f2cb1360SIngo Molnar 18910fb3978bSHuang Ying masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL); 18920fb3978bSHuang Ying if (!masks) 1893f2cb1360SIngo Molnar return; 1894f2cb1360SIngo Molnar 1895f2cb1360SIngo Molnar /* 1896f2cb1360SIngo Molnar * Now for each level, construct a mask per node which contains all 1897f2cb1360SIngo Molnar * CPUs of nodes that are that many hops away from us. 1898f2cb1360SIngo Molnar */ 1899620a6dc4SValentin Schneider for (i = 0; i < nr_levels; i++) { 19000fb3978bSHuang Ying masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); 19010fb3978bSHuang Ying if (!masks[i]) 1902f2cb1360SIngo Molnar return; 1903f2cb1360SIngo Molnar 19040fb3978bSHuang Ying for_each_cpu_node_but(j, offline_node) { 1905f2cb1360SIngo Molnar struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); 1906620a6dc4SValentin Schneider int k; 1907620a6dc4SValentin Schneider 1908f2cb1360SIngo Molnar if (!mask) 1909f2cb1360SIngo Molnar return; 1910f2cb1360SIngo Molnar 19110fb3978bSHuang Ying masks[i][j] = mask; 1912f2cb1360SIngo Molnar 19130fb3978bSHuang Ying for_each_cpu_node_but(k, offline_node) { 1914620a6dc4SValentin Schneider if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) 1915620a6dc4SValentin Schneider sched_numa_warn("Node-distance not symmetric"); 1916620a6dc4SValentin Schneider 1917f2cb1360SIngo Molnar if (node_distance(j, k) > sched_domains_numa_distance[i]) 1918f2cb1360SIngo Molnar continue; 1919f2cb1360SIngo Molnar 1920f2cb1360SIngo Molnar cpumask_or(mask, mask, cpumask_of_node(k)); 1921f2cb1360SIngo Molnar } 1922f2cb1360SIngo Molnar } 1923f2cb1360SIngo Molnar } 19240fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_masks, masks); 1925f2cb1360SIngo Molnar 1926f2cb1360SIngo Molnar /* Compute default topology size */ 1927f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++); 1928f2cb1360SIngo Molnar 192971e5f664SDietmar Eggemann tl = kzalloc((i + nr_levels + 1) * 1930f2cb1360SIngo Molnar sizeof(struct sched_domain_topology_level), GFP_KERNEL); 1931f2cb1360SIngo Molnar if (!tl) 1932f2cb1360SIngo Molnar return; 1933f2cb1360SIngo Molnar 1934f2cb1360SIngo Molnar /* 1935f2cb1360SIngo Molnar * Copy the default topology bits.. 1936f2cb1360SIngo Molnar */ 1937f2cb1360SIngo Molnar for (i = 0; sched_domain_topology[i].mask; i++) 1938f2cb1360SIngo Molnar tl[i] = sched_domain_topology[i]; 1939f2cb1360SIngo Molnar 1940f2cb1360SIngo Molnar /* 1941051f3ca0SSuravee Suthikulpanit * Add the NUMA identity distance, aka single NODE. 1942051f3ca0SSuravee Suthikulpanit */ 1943051f3ca0SSuravee Suthikulpanit tl[i++] = (struct sched_domain_topology_level){ 1944051f3ca0SSuravee Suthikulpanit .mask = sd_numa_mask, 1945051f3ca0SSuravee Suthikulpanit .numa_level = 0, 1946051f3ca0SSuravee Suthikulpanit SD_INIT_NAME(NODE) 1947051f3ca0SSuravee Suthikulpanit }; 1948051f3ca0SSuravee Suthikulpanit 1949051f3ca0SSuravee Suthikulpanit /* 1950f2cb1360SIngo Molnar * .. and append 'j' levels of NUMA goodness. 1951f2cb1360SIngo Molnar */ 1952620a6dc4SValentin Schneider for (j = 1; j < nr_levels; i++, j++) { 1953f2cb1360SIngo Molnar tl[i] = (struct sched_domain_topology_level){ 1954f2cb1360SIngo Molnar .mask = sd_numa_mask, 1955f2cb1360SIngo Molnar .sd_flags = cpu_numa_flags, 1956f2cb1360SIngo Molnar .flags = SDTL_OVERLAP, 1957f2cb1360SIngo Molnar .numa_level = j, 1958f2cb1360SIngo Molnar SD_INIT_NAME(NUMA) 1959f2cb1360SIngo Molnar }; 1960f2cb1360SIngo Molnar } 1961f2cb1360SIngo Molnar 19620fb3978bSHuang Ying sched_domain_topology_saved = sched_domain_topology; 1963f2cb1360SIngo Molnar sched_domain_topology = tl; 1964f2cb1360SIngo Molnar 1965620a6dc4SValentin Schneider sched_domains_numa_levels = nr_levels; 19660fb3978bSHuang Ying WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]); 1967f2cb1360SIngo Molnar 19680fb3978bSHuang Ying init_numa_topology_type(offline_node); 19690083242cSValentin Schneider } 19700083242cSValentin Schneider 19710fb3978bSHuang Ying 19720fb3978bSHuang Ying static void sched_reset_numa(void) 19730083242cSValentin Schneider { 19740fb3978bSHuang Ying int nr_levels, *distances; 19750fb3978bSHuang Ying struct cpumask ***masks; 19760fb3978bSHuang Ying 19770fb3978bSHuang Ying nr_levels = sched_domains_numa_levels; 19780fb3978bSHuang Ying sched_domains_numa_levels = 0; 19790fb3978bSHuang Ying sched_max_numa_distance = 0; 19800fb3978bSHuang Ying sched_numa_topology_type = NUMA_DIRECT; 19810fb3978bSHuang Ying distances = sched_domains_numa_distance; 19820fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_distance, NULL); 19830fb3978bSHuang Ying masks = sched_domains_numa_masks; 19840fb3978bSHuang Ying rcu_assign_pointer(sched_domains_numa_masks, NULL); 19850fb3978bSHuang Ying if (distances || masks) { 19860083242cSValentin Schneider int i, j; 19870083242cSValentin Schneider 19880fb3978bSHuang Ying synchronize_rcu(); 19890fb3978bSHuang Ying kfree(distances); 19900fb3978bSHuang Ying for (i = 0; i < nr_levels && masks; i++) { 19910fb3978bSHuang Ying if (!masks[i]) 19920fb3978bSHuang Ying continue; 19930fb3978bSHuang Ying for_each_node(j) 19940fb3978bSHuang Ying kfree(masks[i][j]); 19950fb3978bSHuang Ying kfree(masks[i]); 19960fb3978bSHuang Ying } 19970fb3978bSHuang Ying kfree(masks); 19980fb3978bSHuang Ying } 19990fb3978bSHuang Ying if (sched_domain_topology_saved) { 20000fb3978bSHuang Ying kfree(sched_domain_topology); 20010fb3978bSHuang Ying sched_domain_topology = sched_domain_topology_saved; 20020fb3978bSHuang Ying sched_domain_topology_saved = NULL; 20030fb3978bSHuang Ying } 20040fb3978bSHuang Ying } 20050fb3978bSHuang Ying 20060083242cSValentin Schneider /* 20070fb3978bSHuang Ying * Call with hotplug lock held 20080083242cSValentin Schneider */ 20090fb3978bSHuang Ying void sched_update_numa(int cpu, bool online) 20100fb3978bSHuang Ying { 20110fb3978bSHuang Ying int node; 20120fb3978bSHuang Ying 20130fb3978bSHuang Ying node = cpu_to_node(cpu); 20140fb3978bSHuang Ying /* 20150fb3978bSHuang Ying * Scheduler NUMA topology is updated when the first CPU of a 20160fb3978bSHuang Ying * node is onlined or the last CPU of a node is offlined. 20170fb3978bSHuang Ying */ 20180fb3978bSHuang Ying if (cpumask_weight(cpumask_of_node(node)) != 1) 20190083242cSValentin Schneider return; 20200083242cSValentin Schneider 20210fb3978bSHuang Ying sched_reset_numa(); 20220fb3978bSHuang Ying sched_init_numa(online ? NUMA_NO_NODE : node); 2023f2cb1360SIngo Molnar } 2024f2cb1360SIngo Molnar 2025f2cb1360SIngo Molnar void sched_domains_numa_masks_set(unsigned int cpu) 2026f2cb1360SIngo Molnar { 2027f2cb1360SIngo Molnar int node = cpu_to_node(cpu); 2028f2cb1360SIngo Molnar int i, j; 2029f2cb1360SIngo Molnar 2030f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 2031f2cb1360SIngo Molnar for (j = 0; j < nr_node_ids; j++) { 20320fb3978bSHuang Ying if (!node_state(j, N_CPU)) 20330083242cSValentin Schneider continue; 20340083242cSValentin Schneider 20350083242cSValentin Schneider /* Set ourselves in the remote node's masks */ 2036f2cb1360SIngo Molnar if (node_distance(j, node) <= sched_domains_numa_distance[i]) 2037f2cb1360SIngo Molnar cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); 2038f2cb1360SIngo Molnar } 2039f2cb1360SIngo Molnar } 2040f2cb1360SIngo Molnar } 2041f2cb1360SIngo Molnar 2042f2cb1360SIngo Molnar void sched_domains_numa_masks_clear(unsigned int cpu) 2043f2cb1360SIngo Molnar { 2044f2cb1360SIngo Molnar int i, j; 2045f2cb1360SIngo Molnar 2046f2cb1360SIngo Molnar for (i = 0; i < sched_domains_numa_levels; i++) { 20470fb3978bSHuang Ying for (j = 0; j < nr_node_ids; j++) { 20480fb3978bSHuang Ying if (sched_domains_numa_masks[i][j]) 2049f2cb1360SIngo Molnar cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); 2050f2cb1360SIngo Molnar } 2051f2cb1360SIngo Molnar } 20520fb3978bSHuang Ying } 2053f2cb1360SIngo Molnar 2054e0e8d491SWanpeng Li /* 2055e0e8d491SWanpeng Li * sched_numa_find_closest() - given the NUMA topology, find the cpu 2056e0e8d491SWanpeng Li * closest to @cpu from @cpumask. 2057e0e8d491SWanpeng Li * cpumask: cpumask to find a cpu from 2058e0e8d491SWanpeng Li * cpu: cpu to be close to 2059e0e8d491SWanpeng Li * 2060e0e8d491SWanpeng Li * returns: cpu, or nr_cpu_ids when nothing found. 2061e0e8d491SWanpeng Li */ 2062e0e8d491SWanpeng Li int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 2063e0e8d491SWanpeng Li { 20640fb3978bSHuang Ying int i, j = cpu_to_node(cpu), found = nr_cpu_ids; 20650fb3978bSHuang Ying struct cpumask ***masks; 2066e0e8d491SWanpeng Li 20670fb3978bSHuang Ying rcu_read_lock(); 20680fb3978bSHuang Ying masks = rcu_dereference(sched_domains_numa_masks); 20690fb3978bSHuang Ying if (!masks) 20700fb3978bSHuang Ying goto unlock; 2071e0e8d491SWanpeng Li for (i = 0; i < sched_domains_numa_levels; i++) { 20720fb3978bSHuang Ying if (!masks[i][j]) 20730fb3978bSHuang Ying break; 20740fb3978bSHuang Ying cpu = cpumask_any_and(cpus, masks[i][j]); 20750fb3978bSHuang Ying if (cpu < nr_cpu_ids) { 20760fb3978bSHuang Ying found = cpu; 20770fb3978bSHuang Ying break; 2078e0e8d491SWanpeng Li } 20790fb3978bSHuang Ying } 20800fb3978bSHuang Ying unlock: 20810fb3978bSHuang Ying rcu_read_unlock(); 20820fb3978bSHuang Ying 20830fb3978bSHuang Ying return found; 2084e0e8d491SWanpeng Li } 2085e0e8d491SWanpeng Li 2086cd7f5535SYury Norov struct __cmp_key { 2087cd7f5535SYury Norov const struct cpumask *cpus; 2088cd7f5535SYury Norov struct cpumask ***masks; 2089cd7f5535SYury Norov int node; 2090cd7f5535SYury Norov int cpu; 2091cd7f5535SYury Norov int w; 2092cd7f5535SYury Norov }; 2093cd7f5535SYury Norov 2094cd7f5535SYury Norov static int hop_cmp(const void *a, const void *b) 2095cd7f5535SYury Norov { 209601bb11adSYury Norov struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b; 2097cd7f5535SYury Norov struct __cmp_key *k = (struct __cmp_key *)a; 2098cd7f5535SYury Norov 2099cd7f5535SYury Norov if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) 2100cd7f5535SYury Norov return 1; 2101cd7f5535SYury Norov 210201bb11adSYury Norov if (b == k->masks) { 210301bb11adSYury Norov k->w = 0; 210401bb11adSYury Norov return 0; 210501bb11adSYury Norov } 210601bb11adSYury Norov 210701bb11adSYury Norov prev_hop = *((struct cpumask ***)b - 1); 210801bb11adSYury Norov k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); 2109cd7f5535SYury Norov if (k->w <= k->cpu) 2110cd7f5535SYury Norov return 0; 2111cd7f5535SYury Norov 2112cd7f5535SYury Norov return -1; 2113cd7f5535SYury Norov } 2114cd7f5535SYury Norov 2115cd7f5535SYury Norov /* 2116cd7f5535SYury Norov * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu 2117cd7f5535SYury Norov * closest to @cpu from @cpumask. 2118cd7f5535SYury Norov * cpumask: cpumask to find a cpu from 2119cd7f5535SYury Norov * cpu: Nth cpu to find 2120cd7f5535SYury Norov * 2121cd7f5535SYury Norov * returns: cpu, or nr_cpu_ids when nothing found. 2122cd7f5535SYury Norov */ 2123cd7f5535SYury Norov int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) 2124cd7f5535SYury Norov { 2125*b633c051SYury Norov struct __cmp_key k = { .cpus = cpus, .cpu = cpu }; 2126cd7f5535SYury Norov struct cpumask ***hop_masks; 2127cd7f5535SYury Norov int hop, ret = nr_cpu_ids; 2128cd7f5535SYury Norov 2129cd7f5535SYury Norov rcu_read_lock(); 2130cd7f5535SYury Norov 2131*b633c051SYury Norov /* CPU-less node entries are uninitialized in sched_domains_numa_masks */ 2132*b633c051SYury Norov node = numa_nearest_node(node, N_CPU); 2133*b633c051SYury Norov k.node = node; 2134*b633c051SYury Norov 2135cd7f5535SYury Norov k.masks = rcu_dereference(sched_domains_numa_masks); 2136cd7f5535SYury Norov if (!k.masks) 2137cd7f5535SYury Norov goto unlock; 2138cd7f5535SYury Norov 2139cd7f5535SYury Norov hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); 2140cd7f5535SYury Norov hop = hop_masks - k.masks; 2141cd7f5535SYury Norov 2142cd7f5535SYury Norov ret = hop ? 2143cd7f5535SYury Norov cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : 2144cd7f5535SYury Norov cpumask_nth_and(cpu, cpus, k.masks[0][node]); 2145cd7f5535SYury Norov unlock: 2146cd7f5535SYury Norov rcu_read_unlock(); 2147cd7f5535SYury Norov return ret; 2148cd7f5535SYury Norov } 2149cd7f5535SYury Norov EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); 21509feae658SValentin Schneider 21519feae658SValentin Schneider /** 21529feae658SValentin Schneider * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from 21539feae658SValentin Schneider * @node 21549feae658SValentin Schneider * @node: The node to count hops from. 21559feae658SValentin Schneider * @hops: Include CPUs up to that many hops away. 0 means local node. 21569feae658SValentin Schneider * 21579feae658SValentin Schneider * Return: On success, a pointer to a cpumask of CPUs at most @hops away from 21589feae658SValentin Schneider * @node, an error value otherwise. 21599feae658SValentin Schneider * 21609feae658SValentin Schneider * Requires rcu_lock to be held. Returned cpumask is only valid within that 21619feae658SValentin Schneider * read-side section, copy it if required beyond that. 21629feae658SValentin Schneider * 21639feae658SValentin Schneider * Note that not all hops are equal in distance; see sched_init_numa() for how 21649feae658SValentin Schneider * distances and masks are handled. 21659feae658SValentin Schneider * Also note that this is a reflection of sched_domains_numa_masks, which may change 21669feae658SValentin Schneider * during the lifetime of the system (offline nodes are taken out of the masks). 21679feae658SValentin Schneider */ 21689feae658SValentin Schneider const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops) 21699feae658SValentin Schneider { 21709feae658SValentin Schneider struct cpumask ***masks; 21719feae658SValentin Schneider 21729feae658SValentin Schneider if (node >= nr_node_ids || hops >= sched_domains_numa_levels) 21739feae658SValentin Schneider return ERR_PTR(-EINVAL); 21749feae658SValentin Schneider 21759feae658SValentin Schneider masks = rcu_dereference(sched_domains_numa_masks); 21769feae658SValentin Schneider if (!masks) 21779feae658SValentin Schneider return ERR_PTR(-EBUSY); 21789feae658SValentin Schneider 21799feae658SValentin Schneider return masks[hops][node]; 21809feae658SValentin Schneider } 21819feae658SValentin Schneider EXPORT_SYMBOL_GPL(sched_numa_hop_mask); 21829feae658SValentin Schneider 2183f2cb1360SIngo Molnar #endif /* CONFIG_NUMA */ 2184f2cb1360SIngo Molnar 2185f2cb1360SIngo Molnar static int __sdt_alloc(const struct cpumask *cpu_map) 2186f2cb1360SIngo Molnar { 2187f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2188f2cb1360SIngo Molnar int j; 2189f2cb1360SIngo Molnar 2190f2cb1360SIngo Molnar for_each_sd_topology(tl) { 2191f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 2192f2cb1360SIngo Molnar 2193f2cb1360SIngo Molnar sdd->sd = alloc_percpu(struct sched_domain *); 2194f2cb1360SIngo Molnar if (!sdd->sd) 2195f2cb1360SIngo Molnar return -ENOMEM; 2196f2cb1360SIngo Molnar 2197f2cb1360SIngo Molnar sdd->sds = alloc_percpu(struct sched_domain_shared *); 2198f2cb1360SIngo Molnar if (!sdd->sds) 2199f2cb1360SIngo Molnar return -ENOMEM; 2200f2cb1360SIngo Molnar 2201f2cb1360SIngo Molnar sdd->sg = alloc_percpu(struct sched_group *); 2202f2cb1360SIngo Molnar if (!sdd->sg) 2203f2cb1360SIngo Molnar return -ENOMEM; 2204f2cb1360SIngo Molnar 2205f2cb1360SIngo Molnar sdd->sgc = alloc_percpu(struct sched_group_capacity *); 2206f2cb1360SIngo Molnar if (!sdd->sgc) 2207f2cb1360SIngo Molnar return -ENOMEM; 2208f2cb1360SIngo Molnar 2209f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 2210f2cb1360SIngo Molnar struct sched_domain *sd; 2211f2cb1360SIngo Molnar struct sched_domain_shared *sds; 2212f2cb1360SIngo Molnar struct sched_group *sg; 2213f2cb1360SIngo Molnar struct sched_group_capacity *sgc; 2214f2cb1360SIngo Molnar 2215f2cb1360SIngo Molnar sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), 2216f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2217f2cb1360SIngo Molnar if (!sd) 2218f2cb1360SIngo Molnar return -ENOMEM; 2219f2cb1360SIngo Molnar 2220f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sd, j) = sd; 2221f2cb1360SIngo Molnar 2222f2cb1360SIngo Molnar sds = kzalloc_node(sizeof(struct sched_domain_shared), 2223f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2224f2cb1360SIngo Molnar if (!sds) 2225f2cb1360SIngo Molnar return -ENOMEM; 2226f2cb1360SIngo Molnar 2227f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sds, j) = sds; 2228f2cb1360SIngo Molnar 2229f2cb1360SIngo Molnar sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 2230f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2231f2cb1360SIngo Molnar if (!sg) 2232f2cb1360SIngo Molnar return -ENOMEM; 2233f2cb1360SIngo Molnar 2234f2cb1360SIngo Molnar sg->next = sg; 2235f2cb1360SIngo Molnar 2236f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sg, j) = sg; 2237f2cb1360SIngo Molnar 2238f2cb1360SIngo Molnar sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), 2239f2cb1360SIngo Molnar GFP_KERNEL, cpu_to_node(j)); 2240f2cb1360SIngo Molnar if (!sgc) 2241f2cb1360SIngo Molnar return -ENOMEM; 2242f2cb1360SIngo Molnar 2243005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2244005f874dSPeter Zijlstra sgc->id = j; 2245005f874dSPeter Zijlstra #endif 2246005f874dSPeter Zijlstra 2247f2cb1360SIngo Molnar *per_cpu_ptr(sdd->sgc, j) = sgc; 2248f2cb1360SIngo Molnar } 2249f2cb1360SIngo Molnar } 2250f2cb1360SIngo Molnar 2251f2cb1360SIngo Molnar return 0; 2252f2cb1360SIngo Molnar } 2253f2cb1360SIngo Molnar 2254f2cb1360SIngo Molnar static void __sdt_free(const struct cpumask *cpu_map) 2255f2cb1360SIngo Molnar { 2256f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2257f2cb1360SIngo Molnar int j; 2258f2cb1360SIngo Molnar 2259f2cb1360SIngo Molnar for_each_sd_topology(tl) { 2260f2cb1360SIngo Molnar struct sd_data *sdd = &tl->data; 2261f2cb1360SIngo Molnar 2262f2cb1360SIngo Molnar for_each_cpu(j, cpu_map) { 2263f2cb1360SIngo Molnar struct sched_domain *sd; 2264f2cb1360SIngo Molnar 2265f2cb1360SIngo Molnar if (sdd->sd) { 2266f2cb1360SIngo Molnar sd = *per_cpu_ptr(sdd->sd, j); 2267f2cb1360SIngo Molnar if (sd && (sd->flags & SD_OVERLAP)) 2268f2cb1360SIngo Molnar free_sched_groups(sd->groups, 0); 2269f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sd, j)); 2270f2cb1360SIngo Molnar } 2271f2cb1360SIngo Molnar 2272f2cb1360SIngo Molnar if (sdd->sds) 2273f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sds, j)); 2274f2cb1360SIngo Molnar if (sdd->sg) 2275f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sg, j)); 2276f2cb1360SIngo Molnar if (sdd->sgc) 2277f2cb1360SIngo Molnar kfree(*per_cpu_ptr(sdd->sgc, j)); 2278f2cb1360SIngo Molnar } 2279f2cb1360SIngo Molnar free_percpu(sdd->sd); 2280f2cb1360SIngo Molnar sdd->sd = NULL; 2281f2cb1360SIngo Molnar free_percpu(sdd->sds); 2282f2cb1360SIngo Molnar sdd->sds = NULL; 2283f2cb1360SIngo Molnar free_percpu(sdd->sg); 2284f2cb1360SIngo Molnar sdd->sg = NULL; 2285f2cb1360SIngo Molnar free_percpu(sdd->sgc); 2286f2cb1360SIngo Molnar sdd->sgc = NULL; 2287f2cb1360SIngo Molnar } 2288f2cb1360SIngo Molnar } 2289f2cb1360SIngo Molnar 2290181a80d1SViresh Kumar static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, 2291f2cb1360SIngo Molnar const struct cpumask *cpu_map, struct sched_domain_attr *attr, 2292c744dc4aSBeata Michalska struct sched_domain *child, int cpu) 2293f2cb1360SIngo Molnar { 2294c744dc4aSBeata Michalska struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); 2295f2cb1360SIngo Molnar 2296f2cb1360SIngo Molnar if (child) { 2297f2cb1360SIngo Molnar sd->level = child->level + 1; 2298f2cb1360SIngo Molnar sched_domain_level_max = max(sched_domain_level_max, sd->level); 2299f2cb1360SIngo Molnar child->parent = sd; 2300f2cb1360SIngo Molnar 2301f2cb1360SIngo Molnar if (!cpumask_subset(sched_domain_span(child), 2302f2cb1360SIngo Molnar sched_domain_span(sd))) { 2303f2cb1360SIngo Molnar pr_err("BUG: arch topology borken\n"); 2304f2cb1360SIngo Molnar #ifdef CONFIG_SCHED_DEBUG 2305f2cb1360SIngo Molnar pr_err(" the %s domain not a subset of the %s domain\n", 2306f2cb1360SIngo Molnar child->name, sd->name); 2307f2cb1360SIngo Molnar #endif 230897fb7a0aSIngo Molnar /* Fixup, ensure @sd has at least @child CPUs. */ 2309f2cb1360SIngo Molnar cpumask_or(sched_domain_span(sd), 2310f2cb1360SIngo Molnar sched_domain_span(sd), 2311f2cb1360SIngo Molnar sched_domain_span(child)); 2312f2cb1360SIngo Molnar } 2313f2cb1360SIngo Molnar 2314f2cb1360SIngo Molnar } 2315f2cb1360SIngo Molnar set_domain_attribute(sd, attr); 2316f2cb1360SIngo Molnar 2317f2cb1360SIngo Molnar return sd; 2318f2cb1360SIngo Molnar } 2319f2cb1360SIngo Molnar 2320f2cb1360SIngo Molnar /* 2321ccf74128SValentin Schneider * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for 2322ccf74128SValentin Schneider * any two given CPUs at this (non-NUMA) topology level. 2323ccf74128SValentin Schneider */ 2324ccf74128SValentin Schneider static bool topology_span_sane(struct sched_domain_topology_level *tl, 2325ccf74128SValentin Schneider const struct cpumask *cpu_map, int cpu) 2326ccf74128SValentin Schneider { 2327ccf74128SValentin Schneider int i; 2328ccf74128SValentin Schneider 2329ccf74128SValentin Schneider /* NUMA levels are allowed to overlap */ 2330ccf74128SValentin Schneider if (tl->flags & SDTL_OVERLAP) 2331ccf74128SValentin Schneider return true; 2332ccf74128SValentin Schneider 2333ccf74128SValentin Schneider /* 2334ccf74128SValentin Schneider * Non-NUMA levels cannot partially overlap - they must be either 2335ccf74128SValentin Schneider * completely equal or completely disjoint. Otherwise we can end up 2336ccf74128SValentin Schneider * breaking the sched_group lists - i.e. a later get_group() pass 2337ccf74128SValentin Schneider * breaks the linking done for an earlier span. 2338ccf74128SValentin Schneider */ 2339ccf74128SValentin Schneider for_each_cpu(i, cpu_map) { 2340ccf74128SValentin Schneider if (i == cpu) 2341ccf74128SValentin Schneider continue; 2342ccf74128SValentin Schneider /* 2343ccf74128SValentin Schneider * We should 'and' all those masks with 'cpu_map' to exactly 2344ccf74128SValentin Schneider * match the topology we're about to build, but that can only 2345ccf74128SValentin Schneider * remove CPUs, which only lessens our ability to detect 2346ccf74128SValentin Schneider * overlaps 2347ccf74128SValentin Schneider */ 2348ccf74128SValentin Schneider if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && 2349ccf74128SValentin Schneider cpumask_intersects(tl->mask(cpu), tl->mask(i))) 2350ccf74128SValentin Schneider return false; 2351ccf74128SValentin Schneider } 2352ccf74128SValentin Schneider 2353ccf74128SValentin Schneider return true; 2354ccf74128SValentin Schneider } 2355ccf74128SValentin Schneider 2356ccf74128SValentin Schneider /* 2357f2cb1360SIngo Molnar * Build sched domains for a given set of CPUs and attach the sched domains 2358f2cb1360SIngo Molnar * to the individual CPUs 2359f2cb1360SIngo Molnar */ 2360f2cb1360SIngo Molnar static int 2361f2cb1360SIngo Molnar build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr) 2362f2cb1360SIngo Molnar { 2363cd1cb335SValentin Schneider enum s_alloc alloc_state = sa_none; 2364f2cb1360SIngo Molnar struct sched_domain *sd; 2365f2cb1360SIngo Molnar struct s_data d; 2366f2cb1360SIngo Molnar struct rq *rq = NULL; 2367f2cb1360SIngo Molnar int i, ret = -ENOMEM; 2368df054e84SMorten Rasmussen bool has_asym = false; 2369f2cb1360SIngo Molnar 2370cd1cb335SValentin Schneider if (WARN_ON(cpumask_empty(cpu_map))) 2371cd1cb335SValentin Schneider goto error; 2372cd1cb335SValentin Schneider 2373f2cb1360SIngo Molnar alloc_state = __visit_domain_allocation_hell(&d, cpu_map); 2374f2cb1360SIngo Molnar if (alloc_state != sa_rootdomain) 2375f2cb1360SIngo Molnar goto error; 2376f2cb1360SIngo Molnar 2377f2cb1360SIngo Molnar /* Set up domains for CPUs specified by the cpu_map: */ 2378f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2379f2cb1360SIngo Molnar struct sched_domain_topology_level *tl; 2380f2cb1360SIngo Molnar 2381f2cb1360SIngo Molnar sd = NULL; 2382f2cb1360SIngo Molnar for_each_sd_topology(tl) { 238305484e09SMorten Rasmussen 2384ccf74128SValentin Schneider if (WARN_ON(!topology_span_sane(tl, cpu_map, i))) 2385ccf74128SValentin Schneider goto error; 2386ccf74128SValentin Schneider 2387c744dc4aSBeata Michalska sd = build_sched_domain(tl, cpu_map, attr, sd, i); 2388c744dc4aSBeata Michalska 2389c744dc4aSBeata Michalska has_asym |= sd->flags & SD_ASYM_CPUCAPACITY; 239005484e09SMorten Rasmussen 2391f2cb1360SIngo Molnar if (tl == sched_domain_topology) 2392f2cb1360SIngo Molnar *per_cpu_ptr(d.sd, i) = sd; 2393af85596cSPeter Zijlstra if (tl->flags & SDTL_OVERLAP) 2394f2cb1360SIngo Molnar sd->flags |= SD_OVERLAP; 2395f2cb1360SIngo Molnar if (cpumask_equal(cpu_map, sched_domain_span(sd))) 2396f2cb1360SIngo Molnar break; 2397f2cb1360SIngo Molnar } 2398f2cb1360SIngo Molnar } 2399f2cb1360SIngo Molnar 2400f2cb1360SIngo Molnar /* Build the groups for the domains */ 2401f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2402f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2403f2cb1360SIngo Molnar sd->span_weight = cpumask_weight(sched_domain_span(sd)); 2404f2cb1360SIngo Molnar if (sd->flags & SD_OVERLAP) { 2405f2cb1360SIngo Molnar if (build_overlap_sched_groups(sd, i)) 2406f2cb1360SIngo Molnar goto error; 2407f2cb1360SIngo Molnar } else { 2408f2cb1360SIngo Molnar if (build_sched_groups(sd, i)) 2409f2cb1360SIngo Molnar goto error; 2410f2cb1360SIngo Molnar } 2411f2cb1360SIngo Molnar } 2412f2cb1360SIngo Molnar } 2413f2cb1360SIngo Molnar 2414e496132eSMel Gorman /* 2415e496132eSMel Gorman * Calculate an allowed NUMA imbalance such that LLCs do not get 2416e496132eSMel Gorman * imbalanced. 2417e496132eSMel Gorman */ 2418e496132eSMel Gorman for_each_cpu(i, cpu_map) { 2419e496132eSMel Gorman unsigned int imb = 0; 2420e496132eSMel Gorman unsigned int imb_span = 1; 2421e496132eSMel Gorman 2422e496132eSMel Gorman for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2423e496132eSMel Gorman struct sched_domain *child = sd->child; 2424e496132eSMel Gorman 2425e496132eSMel Gorman if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && 2426e496132eSMel Gorman (child->flags & SD_SHARE_PKG_RESOURCES)) { 24277f434dffSK Prateek Nayak struct sched_domain __rcu *top_p; 2428e496132eSMel Gorman unsigned int nr_llcs; 2429e496132eSMel Gorman 2430e496132eSMel Gorman /* 2431e496132eSMel Gorman * For a single LLC per node, allow an 2432026b98a9SMel Gorman * imbalance up to 12.5% of the node. This is 2433026b98a9SMel Gorman * arbitrary cutoff based two factors -- SMT and 2434026b98a9SMel Gorman * memory channels. For SMT-2, the intent is to 2435026b98a9SMel Gorman * avoid premature sharing of HT resources but 2436026b98a9SMel Gorman * SMT-4 or SMT-8 *may* benefit from a different 2437026b98a9SMel Gorman * cutoff. For memory channels, this is a very 2438026b98a9SMel Gorman * rough estimate of how many channels may be 2439026b98a9SMel Gorman * active and is based on recent CPUs with 2440026b98a9SMel Gorman * many cores. 2441e496132eSMel Gorman * 2442e496132eSMel Gorman * For multiple LLCs, allow an imbalance 2443e496132eSMel Gorman * until multiple tasks would share an LLC 2444e496132eSMel Gorman * on one node while LLCs on another node 2445026b98a9SMel Gorman * remain idle. This assumes that there are 2446026b98a9SMel Gorman * enough logical CPUs per LLC to avoid SMT 2447026b98a9SMel Gorman * factors and that there is a correlation 2448026b98a9SMel Gorman * between LLCs and memory channels. 2449e496132eSMel Gorman */ 2450e496132eSMel Gorman nr_llcs = sd->span_weight / child->span_weight; 2451e496132eSMel Gorman if (nr_llcs == 1) 2452026b98a9SMel Gorman imb = sd->span_weight >> 3; 2453e496132eSMel Gorman else 2454e496132eSMel Gorman imb = nr_llcs; 2455026b98a9SMel Gorman imb = max(1U, imb); 2456e496132eSMel Gorman sd->imb_numa_nr = imb; 2457e496132eSMel Gorman 2458e496132eSMel Gorman /* Set span based on the first NUMA domain. */ 24597f434dffSK Prateek Nayak top_p = sd->parent; 2460e496132eSMel Gorman while (top_p && !(top_p->flags & SD_NUMA)) { 24617f434dffSK Prateek Nayak top_p = top_p->parent; 2462e496132eSMel Gorman } 2463e496132eSMel Gorman imb_span = top_p ? top_p->span_weight : sd->span_weight; 2464e496132eSMel Gorman } else { 2465e496132eSMel Gorman int factor = max(1U, (sd->span_weight / imb_span)); 2466e496132eSMel Gorman 2467e496132eSMel Gorman sd->imb_numa_nr = imb * factor; 2468e496132eSMel Gorman } 2469e496132eSMel Gorman } 2470e496132eSMel Gorman } 2471e496132eSMel Gorman 2472f2cb1360SIngo Molnar /* Calculate CPU capacity for physical packages and nodes */ 2473f2cb1360SIngo Molnar for (i = nr_cpumask_bits-1; i >= 0; i--) { 2474f2cb1360SIngo Molnar if (!cpumask_test_cpu(i, cpu_map)) 2475f2cb1360SIngo Molnar continue; 2476f2cb1360SIngo Molnar 2477f2cb1360SIngo Molnar for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { 2478f2cb1360SIngo Molnar claim_allocations(i, sd); 2479f2cb1360SIngo Molnar init_sched_groups_capacity(i, sd); 2480f2cb1360SIngo Molnar } 2481f2cb1360SIngo Molnar } 2482f2cb1360SIngo Molnar 2483f2cb1360SIngo Molnar /* Attach the domains */ 2484f2cb1360SIngo Molnar rcu_read_lock(); 2485f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) { 2486f2cb1360SIngo Molnar rq = cpu_rq(i); 2487f2cb1360SIngo Molnar sd = *per_cpu_ptr(d.sd, i); 2488f2cb1360SIngo Molnar 2489f2cb1360SIngo Molnar /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */ 2490f2cb1360SIngo Molnar if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity)) 2491f2cb1360SIngo Molnar WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); 2492f2cb1360SIngo Molnar 2493f2cb1360SIngo Molnar cpu_attach_domain(sd, d.rd, i); 2494f2cb1360SIngo Molnar } 2495f2cb1360SIngo Molnar rcu_read_unlock(); 2496f2cb1360SIngo Molnar 2497df054e84SMorten Rasmussen if (has_asym) 2498e284df70SValentin Schneider static_branch_inc_cpuslocked(&sched_asym_cpucapacity); 2499df054e84SMorten Rasmussen 25009406415fSPeter Zijlstra if (rq && sched_debug_verbose) { 2501bf5015a5SJuri Lelli pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", 2502f2cb1360SIngo Molnar cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 2503f2cb1360SIngo Molnar } 2504f2cb1360SIngo Molnar 2505f2cb1360SIngo Molnar ret = 0; 2506f2cb1360SIngo Molnar error: 2507f2cb1360SIngo Molnar __free_domain_allocs(&d, alloc_state, cpu_map); 250897fb7a0aSIngo Molnar 2509f2cb1360SIngo Molnar return ret; 2510f2cb1360SIngo Molnar } 2511f2cb1360SIngo Molnar 2512f2cb1360SIngo Molnar /* Current sched domains: */ 2513f2cb1360SIngo Molnar static cpumask_var_t *doms_cur; 2514f2cb1360SIngo Molnar 2515f2cb1360SIngo Molnar /* Number of sched domains in 'doms_cur': */ 2516f2cb1360SIngo Molnar static int ndoms_cur; 2517f2cb1360SIngo Molnar 25183b03706fSIngo Molnar /* Attributes of custom domains in 'doms_cur' */ 2519f2cb1360SIngo Molnar static struct sched_domain_attr *dattr_cur; 2520f2cb1360SIngo Molnar 2521f2cb1360SIngo Molnar /* 2522f2cb1360SIngo Molnar * Special case: If a kmalloc() of a doms_cur partition (array of 2523f2cb1360SIngo Molnar * cpumask) fails, then fallback to a single sched domain, 2524f2cb1360SIngo Molnar * as determined by the single cpumask fallback_doms. 2525f2cb1360SIngo Molnar */ 25268d5dc512SPeter Zijlstra static cpumask_var_t fallback_doms; 2527f2cb1360SIngo Molnar 2528f2cb1360SIngo Molnar /* 2529f2cb1360SIngo Molnar * arch_update_cpu_topology lets virtualized architectures update the 2530f2cb1360SIngo Molnar * CPU core maps. It is supposed to return 1 if the topology changed 2531f2cb1360SIngo Molnar * or 0 if it stayed the same. 2532f2cb1360SIngo Molnar */ 2533f2cb1360SIngo Molnar int __weak arch_update_cpu_topology(void) 2534f2cb1360SIngo Molnar { 2535f2cb1360SIngo Molnar return 0; 2536f2cb1360SIngo Molnar } 2537f2cb1360SIngo Molnar 2538f2cb1360SIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms) 2539f2cb1360SIngo Molnar { 2540f2cb1360SIngo Molnar int i; 2541f2cb1360SIngo Molnar cpumask_var_t *doms; 2542f2cb1360SIngo Molnar 25436da2ec56SKees Cook doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); 2544f2cb1360SIngo Molnar if (!doms) 2545f2cb1360SIngo Molnar return NULL; 2546f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) { 2547f2cb1360SIngo Molnar if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { 2548f2cb1360SIngo Molnar free_sched_domains(doms, i); 2549f2cb1360SIngo Molnar return NULL; 2550f2cb1360SIngo Molnar } 2551f2cb1360SIngo Molnar } 2552f2cb1360SIngo Molnar return doms; 2553f2cb1360SIngo Molnar } 2554f2cb1360SIngo Molnar 2555f2cb1360SIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) 2556f2cb1360SIngo Molnar { 2557f2cb1360SIngo Molnar unsigned int i; 2558f2cb1360SIngo Molnar for (i = 0; i < ndoms; i++) 2559f2cb1360SIngo Molnar free_cpumask_var(doms[i]); 2560f2cb1360SIngo Molnar kfree(doms); 2561f2cb1360SIngo Molnar } 2562f2cb1360SIngo Molnar 2563f2cb1360SIngo Molnar /* 2564cb0c0414SJuri Lelli * Set up scheduler domains and groups. For now this just excludes isolated 2565cb0c0414SJuri Lelli * CPUs, but could be used to exclude other special cases in the future. 2566f2cb1360SIngo Molnar */ 2567ef90cf22SBing Huang int __init sched_init_domains(const struct cpumask *cpu_map) 2568f2cb1360SIngo Molnar { 2569f2cb1360SIngo Molnar int err; 2570f2cb1360SIngo Molnar 25718d5dc512SPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL); 25721676330eSPeter Zijlstra zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL); 25738d5dc512SPeter Zijlstra zalloc_cpumask_var(&fallback_doms, GFP_KERNEL); 25748d5dc512SPeter Zijlstra 2575f2cb1360SIngo Molnar arch_update_cpu_topology(); 2576c744dc4aSBeata Michalska asym_cpu_capacity_scan(); 2577f2cb1360SIngo Molnar ndoms_cur = 1; 2578f2cb1360SIngo Molnar doms_cur = alloc_sched_domains(ndoms_cur); 2579f2cb1360SIngo Molnar if (!doms_cur) 2580f2cb1360SIngo Molnar doms_cur = &fallback_doms; 258104d4e665SFrederic Weisbecker cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN)); 2582f2cb1360SIngo Molnar err = build_sched_domains(doms_cur[0], NULL); 2583f2cb1360SIngo Molnar 2584f2cb1360SIngo Molnar return err; 2585f2cb1360SIngo Molnar } 2586f2cb1360SIngo Molnar 2587f2cb1360SIngo Molnar /* 2588f2cb1360SIngo Molnar * Detach sched domains from a group of CPUs specified in cpu_map 2589f2cb1360SIngo Molnar * These CPUs will now be attached to the NULL domain 2590f2cb1360SIngo Molnar */ 2591f2cb1360SIngo Molnar static void detach_destroy_domains(const struct cpumask *cpu_map) 2592f2cb1360SIngo Molnar { 2593e284df70SValentin Schneider unsigned int cpu = cpumask_any(cpu_map); 2594f2cb1360SIngo Molnar int i; 2595f2cb1360SIngo Molnar 2596e284df70SValentin Schneider if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) 2597e284df70SValentin Schneider static_branch_dec_cpuslocked(&sched_asym_cpucapacity); 2598e284df70SValentin Schneider 2599f2cb1360SIngo Molnar rcu_read_lock(); 2600f2cb1360SIngo Molnar for_each_cpu(i, cpu_map) 2601f2cb1360SIngo Molnar cpu_attach_domain(NULL, &def_root_domain, i); 2602f2cb1360SIngo Molnar rcu_read_unlock(); 2603f2cb1360SIngo Molnar } 2604f2cb1360SIngo Molnar 2605f2cb1360SIngo Molnar /* handle null as "default" */ 2606f2cb1360SIngo Molnar static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, 2607f2cb1360SIngo Molnar struct sched_domain_attr *new, int idx_new) 2608f2cb1360SIngo Molnar { 2609f2cb1360SIngo Molnar struct sched_domain_attr tmp; 2610f2cb1360SIngo Molnar 2611f2cb1360SIngo Molnar /* Fast path: */ 2612f2cb1360SIngo Molnar if (!new && !cur) 2613f2cb1360SIngo Molnar return 1; 2614f2cb1360SIngo Molnar 2615f2cb1360SIngo Molnar tmp = SD_ATTR_INIT; 261697fb7a0aSIngo Molnar 2617f2cb1360SIngo Molnar return !memcmp(cur ? (cur + idx_cur) : &tmp, 2618f2cb1360SIngo Molnar new ? (new + idx_new) : &tmp, 2619f2cb1360SIngo Molnar sizeof(struct sched_domain_attr)); 2620f2cb1360SIngo Molnar } 2621f2cb1360SIngo Molnar 2622f2cb1360SIngo Molnar /* 2623f2cb1360SIngo Molnar * Partition sched domains as specified by the 'ndoms_new' 2624f2cb1360SIngo Molnar * cpumasks in the array doms_new[] of cpumasks. This compares 2625f2cb1360SIngo Molnar * doms_new[] to the current sched domain partitioning, doms_cur[]. 2626f2cb1360SIngo Molnar * It destroys each deleted domain and builds each new domain. 2627f2cb1360SIngo Molnar * 2628f2cb1360SIngo Molnar * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. 2629f2cb1360SIngo Molnar * The masks don't intersect (don't overlap.) We should setup one 2630f2cb1360SIngo Molnar * sched domain for each mask. CPUs not in any of the cpumasks will 2631f2cb1360SIngo Molnar * not be load balanced. If the same cpumask appears both in the 2632f2cb1360SIngo Molnar * current 'doms_cur' domains and in the new 'doms_new', we can leave 2633f2cb1360SIngo Molnar * it as it is. 2634f2cb1360SIngo Molnar * 2635f2cb1360SIngo Molnar * The passed in 'doms_new' should be allocated using 2636f2cb1360SIngo Molnar * alloc_sched_domains. This routine takes ownership of it and will 2637f2cb1360SIngo Molnar * free_sched_domains it when done with it. If the caller failed the 2638f2cb1360SIngo Molnar * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, 2639f2cb1360SIngo Molnar * and partition_sched_domains() will fallback to the single partition 2640f2cb1360SIngo Molnar * 'fallback_doms', it also forces the domains to be rebuilt. 2641f2cb1360SIngo Molnar * 2642f2cb1360SIngo Molnar * If doms_new == NULL it will be replaced with cpu_online_mask. 2643f2cb1360SIngo Molnar * ndoms_new == 0 is a special case for destroying existing domains, 2644f2cb1360SIngo Molnar * and it will not create the default domain. 2645f2cb1360SIngo Molnar * 2646c22645f4SMathieu Poirier * Call with hotplug lock and sched_domains_mutex held 2647f2cb1360SIngo Molnar */ 2648c22645f4SMathieu Poirier void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], 2649f2cb1360SIngo Molnar struct sched_domain_attr *dattr_new) 2650f2cb1360SIngo Molnar { 26511f74de87SQuentin Perret bool __maybe_unused has_eas = false; 2652f2cb1360SIngo Molnar int i, j, n; 2653f2cb1360SIngo Molnar int new_topology; 2654f2cb1360SIngo Molnar 2655c22645f4SMathieu Poirier lockdep_assert_held(&sched_domains_mutex); 2656f2cb1360SIngo Molnar 2657f2cb1360SIngo Molnar /* Let the architecture update CPU core mappings: */ 2658f2cb1360SIngo Molnar new_topology = arch_update_cpu_topology(); 2659c744dc4aSBeata Michalska /* Trigger rebuilding CPU capacity asymmetry data */ 2660c744dc4aSBeata Michalska if (new_topology) 2661c744dc4aSBeata Michalska asym_cpu_capacity_scan(); 2662f2cb1360SIngo Molnar 266309e0dd8eSPeter Zijlstra if (!doms_new) { 266409e0dd8eSPeter Zijlstra WARN_ON_ONCE(dattr_new); 266509e0dd8eSPeter Zijlstra n = 0; 266609e0dd8eSPeter Zijlstra doms_new = alloc_sched_domains(1); 266709e0dd8eSPeter Zijlstra if (doms_new) { 266809e0dd8eSPeter Zijlstra n = 1; 2669edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 267004d4e665SFrederic Weisbecker housekeeping_cpumask(HK_TYPE_DOMAIN)); 267109e0dd8eSPeter Zijlstra } 267209e0dd8eSPeter Zijlstra } else { 267309e0dd8eSPeter Zijlstra n = ndoms_new; 267409e0dd8eSPeter Zijlstra } 2675f2cb1360SIngo Molnar 2676f2cb1360SIngo Molnar /* Destroy deleted domains: */ 2677f2cb1360SIngo Molnar for (i = 0; i < ndoms_cur; i++) { 2678f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 26796aa140faSQuentin Perret if (cpumask_equal(doms_cur[i], doms_new[j]) && 2680f9a25f77SMathieu Poirier dattrs_equal(dattr_cur, i, dattr_new, j)) { 2681f9a25f77SMathieu Poirier struct root_domain *rd; 2682f9a25f77SMathieu Poirier 2683f9a25f77SMathieu Poirier /* 2684f9a25f77SMathieu Poirier * This domain won't be destroyed and as such 2685f9a25f77SMathieu Poirier * its dl_bw->total_bw needs to be cleared. It 2686f9a25f77SMathieu Poirier * will be recomputed in function 2687f9a25f77SMathieu Poirier * update_tasks_root_domain(). 2688f9a25f77SMathieu Poirier */ 2689f9a25f77SMathieu Poirier rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; 2690f9a25f77SMathieu Poirier dl_clear_root_domain(rd); 2691f2cb1360SIngo Molnar goto match1; 2692f2cb1360SIngo Molnar } 2693f9a25f77SMathieu Poirier } 2694f2cb1360SIngo Molnar /* No match - a current sched domain not in new doms_new[] */ 2695f2cb1360SIngo Molnar detach_destroy_domains(doms_cur[i]); 2696f2cb1360SIngo Molnar match1: 2697f2cb1360SIngo Molnar ; 2698f2cb1360SIngo Molnar } 2699f2cb1360SIngo Molnar 2700f2cb1360SIngo Molnar n = ndoms_cur; 270109e0dd8eSPeter Zijlstra if (!doms_new) { 2702f2cb1360SIngo Molnar n = 0; 2703f2cb1360SIngo Molnar doms_new = &fallback_doms; 2704edb93821SFrederic Weisbecker cpumask_and(doms_new[0], cpu_active_mask, 270504d4e665SFrederic Weisbecker housekeeping_cpumask(HK_TYPE_DOMAIN)); 2706f2cb1360SIngo Molnar } 2707f2cb1360SIngo Molnar 2708f2cb1360SIngo Molnar /* Build new domains: */ 2709f2cb1360SIngo Molnar for (i = 0; i < ndoms_new; i++) { 2710f2cb1360SIngo Molnar for (j = 0; j < n && !new_topology; j++) { 27116aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 27126aa140faSQuentin Perret dattrs_equal(dattr_new, i, dattr_cur, j)) 2713f2cb1360SIngo Molnar goto match2; 2714f2cb1360SIngo Molnar } 2715f2cb1360SIngo Molnar /* No match - add a new doms_new */ 2716f2cb1360SIngo Molnar build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); 2717f2cb1360SIngo Molnar match2: 2718f2cb1360SIngo Molnar ; 2719f2cb1360SIngo Molnar } 2720f2cb1360SIngo Molnar 2721531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 27226aa140faSQuentin Perret /* Build perf. domains: */ 27236aa140faSQuentin Perret for (i = 0; i < ndoms_new; i++) { 2724531b5c9fSQuentin Perret for (j = 0; j < n && !sched_energy_update; j++) { 27256aa140faSQuentin Perret if (cpumask_equal(doms_new[i], doms_cur[j]) && 27261f74de87SQuentin Perret cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { 27271f74de87SQuentin Perret has_eas = true; 27286aa140faSQuentin Perret goto match3; 27296aa140faSQuentin Perret } 27301f74de87SQuentin Perret } 27316aa140faSQuentin Perret /* No match - add perf. domains for a new rd */ 27321f74de87SQuentin Perret has_eas |= build_perf_domains(doms_new[i]); 27336aa140faSQuentin Perret match3: 27346aa140faSQuentin Perret ; 27356aa140faSQuentin Perret } 27361f74de87SQuentin Perret sched_energy_set(has_eas); 27376aa140faSQuentin Perret #endif 27386aa140faSQuentin Perret 2739f2cb1360SIngo Molnar /* Remember the new sched domains: */ 2740f2cb1360SIngo Molnar if (doms_cur != &fallback_doms) 2741f2cb1360SIngo Molnar free_sched_domains(doms_cur, ndoms_cur); 2742f2cb1360SIngo Molnar 2743f2cb1360SIngo Molnar kfree(dattr_cur); 2744f2cb1360SIngo Molnar doms_cur = doms_new; 2745f2cb1360SIngo Molnar dattr_cur = dattr_new; 2746f2cb1360SIngo Molnar ndoms_cur = ndoms_new; 2747f2cb1360SIngo Molnar 27483b87f136SPeter Zijlstra update_sched_domain_debugfs(); 2749c22645f4SMathieu Poirier } 2750f2cb1360SIngo Molnar 2751c22645f4SMathieu Poirier /* 2752c22645f4SMathieu Poirier * Call with hotplug lock held 2753c22645f4SMathieu Poirier */ 2754c22645f4SMathieu Poirier void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 2755c22645f4SMathieu Poirier struct sched_domain_attr *dattr_new) 2756c22645f4SMathieu Poirier { 2757c22645f4SMathieu Poirier mutex_lock(&sched_domains_mutex); 2758c22645f4SMathieu Poirier partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 2759f2cb1360SIngo Molnar mutex_unlock(&sched_domains_mutex); 2760f2cb1360SIngo Molnar } 2761