topology.c (ffb1e76f4f32d2b8ea4189df0484980370476395) topology.c (16d364ba6ef2aa59b409df70682770f3ed23f7c0)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Scheduler topology setup/handling methods
4 */
5#include "sched.h"
6
7DEFINE_MUTEX(sched_domains_mutex);
8

--- 702 unchanged lines hidden (view full) ---

711 } else
712 tmp = tmp->parent;
713 }
714
715 if (sd && sd_degenerate(sd)) {
716 tmp = sd;
717 sd = sd->parent;
718 destroy_sched_domain(tmp);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Scheduler topology setup/handling methods
4 */
5#include "sched.h"
6
7DEFINE_MUTEX(sched_domains_mutex);
8

--- 702 unchanged lines hidden (view full) ---

711 } else
712 tmp = tmp->parent;
713 }
714
715 if (sd && sd_degenerate(sd)) {
716 tmp = sd;
717 sd = sd->parent;
718 destroy_sched_domain(tmp);
719 if (sd)
719 if (sd) {
720 struct sched_group *sg = sd->groups;
721
722 /*
723 * sched groups hold the flags of the child sched
724 * domain for convenience. Clear such flags since
725 * the child is being destroyed.
726 */
727 do {
728 sg->flags = 0;
729 } while (sg != sd->groups);
730
720 sd->child = NULL;
731 sd->child = NULL;
732 }
721 }
722
723 for (tmp = sd; tmp; tmp = tmp->parent)
724 numa_distance += !!(tmp->flags & SD_NUMA);
725
726 sched_domain_debug(sd, cpu);
727
728 rq_attach_root(rq, rd);

--- 182 unchanged lines hidden (view full) ---

911
912 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
913 GFP_KERNEL, cpu_to_node(cpu));
914
915 if (!sg)
916 return NULL;
917
918 sg_span = sched_group_span(sg);
733 }
734
735 for (tmp = sd; tmp; tmp = tmp->parent)
736 numa_distance += !!(tmp->flags & SD_NUMA);
737
738 sched_domain_debug(sd, cpu);
739
740 rq_attach_root(rq, rd);

--- 182 unchanged lines hidden (view full) ---

923
924 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
925 GFP_KERNEL, cpu_to_node(cpu));
926
927 if (!sg)
928 return NULL;
929
930 sg_span = sched_group_span(sg);
919 if (sd->child)
931 if (sd->child) {
920 cpumask_copy(sg_span, sched_domain_span(sd->child));
932 cpumask_copy(sg_span, sched_domain_span(sd->child));
921 else
933 sg->flags = sd->child->flags;
934 } else {
922 cpumask_copy(sg_span, sched_domain_span(sd));
935 cpumask_copy(sg_span, sched_domain_span(sd));
936 }
923
924 atomic_inc(&sg->ref);
925 return sg;
926}
927
928static void init_overlap_sched_group(struct sched_domain *sd,
929 struct sched_group *sg)
930{

--- 233 unchanged lines hidden (view full) ---

1164
1165 /* If we have already visited that group, it's already initialized. */
1166 if (already_visited)
1167 return sg;
1168
1169 if (child) {
1170 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1171 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
937
938 atomic_inc(&sg->ref);
939 return sg;
940}
941
942static void init_overlap_sched_group(struct sched_domain *sd,
943 struct sched_group *sg)
944{

--- 233 unchanged lines hidden (view full) ---

1178
1179 /* If we have already visited that group, it's already initialized. */
1180 if (already_visited)
1181 return sg;
1182
1183 if (child) {
1184 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1185 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1186 sg->flags = child->flags;
1172 } else {
1173 cpumask_set_cpu(cpu, sched_group_span(sg));
1174 cpumask_set_cpu(cpu, group_balance_mask(sg));
1175 }
1176
1177 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1178 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1179 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;

--- 1339 unchanged lines hidden ---
1187 } else {
1188 cpumask_set_cpu(cpu, sched_group_span(sg));
1189 cpumask_set_cpu(cpu, group_balance_mask(sg));
1190 }
1191
1192 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1193 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1194 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;

--- 1339 unchanged lines hidden ---