Lines Matching refs:sgs

9686 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)  in group_has_capacity()  argument
9688 if (sgs->sum_nr_running < sgs->group_weight) in group_has_capacity()
9691 if ((sgs->group_capacity * imbalance_pct) < in group_has_capacity()
9692 (sgs->group_runnable * 100)) in group_has_capacity()
9695 if ((sgs->group_capacity * 100) > in group_has_capacity()
9696 (sgs->group_util * imbalance_pct)) in group_has_capacity()
9711 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) in group_is_overloaded() argument
9713 if (sgs->sum_nr_running <= sgs->group_weight) in group_is_overloaded()
9716 if ((sgs->group_capacity * 100) < in group_is_overloaded()
9717 (sgs->group_util * imbalance_pct)) in group_is_overloaded()
9720 if ((sgs->group_capacity * imbalance_pct) < in group_is_overloaded()
9721 (sgs->group_runnable * 100)) in group_is_overloaded()
9730 struct sg_lb_stats *sgs) in group_classify() argument
9732 if (group_is_overloaded(imbalance_pct, sgs)) in group_classify()
9738 if (sgs->group_asym_packing) in group_classify()
9741 if (sgs->group_smt_balance) in group_classify()
9744 if (sgs->group_misfit_task_load) in group_classify()
9747 if (!group_has_capacity(imbalance_pct, sgs)) in group_classify()
9794 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, in sched_asym() argument
9806 if (sgs->group_weight - sgs->idle_cpus != 1) in sched_asym()
9824 static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs, in smt_balance() argument
9837 sgs->sum_h_nr_running > 1) in smt_balance()
9902 struct sg_lb_stats *sgs, in update_sg_lb_stats() argument
9907 memset(sgs, 0, sizeof(*sgs)); in update_sg_lb_stats()
9915 sgs->group_load += load; in update_sg_lb_stats()
9916 sgs->group_util += cpu_util_cfs(i); in update_sg_lb_stats()
9917 sgs->group_runnable += cpu_runnable(rq); in update_sg_lb_stats()
9918 sgs->sum_h_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
9921 sgs->sum_nr_running += nr_running; in update_sg_lb_stats()
9930 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
9931 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
9937 sgs->idle_cpus++; in update_sg_lb_stats()
9947 if (sgs->group_misfit_task_load < rq->misfit_task_load) { in update_sg_lb_stats()
9948 sgs->group_misfit_task_load = rq->misfit_task_load; in update_sg_lb_stats()
9954 if (sgs->group_misfit_task_load < load) in update_sg_lb_stats()
9955 sgs->group_misfit_task_load = load; in update_sg_lb_stats()
9959 sgs->group_capacity = group->sgc->capacity; in update_sg_lb_stats()
9961 sgs->group_weight = group->group_weight; in update_sg_lb_stats()
9965 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && in update_sg_lb_stats()
9966 sched_asym(env, sds, sgs, group)) { in update_sg_lb_stats()
9967 sgs->group_asym_packing = 1; in update_sg_lb_stats()
9971 if (!local_group && smt_balance(env, sgs, group)) in update_sg_lb_stats()
9972 sgs->group_smt_balance = 1; in update_sg_lb_stats()
9974 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9977 if (sgs->group_type == group_overloaded) in update_sg_lb_stats()
9978 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_lb_stats()
9979 sgs->group_capacity; in update_sg_lb_stats()
9998 struct sg_lb_stats *sgs) in update_sd_pick_busiest() argument
10003 if (!sgs->sum_h_nr_running) in update_sd_pick_busiest()
10013 (sgs->group_type == group_misfit_task) && in update_sd_pick_busiest()
10018 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
10021 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
10029 switch (sgs->group_type) { in update_sd_pick_busiest()
10032 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
10054 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()
10063 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) in update_sd_pick_busiest()
10081 if (sgs->avg_load < busiest->avg_load) in update_sd_pick_busiest()
10084 if (sgs->avg_load == busiest->avg_load) { in update_sd_pick_busiest()
10102 if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1) in update_sd_pick_busiest()
10116 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest()
10118 else if ((sgs->idle_cpus == busiest->idle_cpus) && in update_sd_pick_busiest()
10119 (sgs->sum_nr_running <= busiest->sum_nr_running)) in update_sd_pick_busiest()
10132 (sgs->group_type <= group_fully_busy) && in update_sd_pick_busiest()
10140 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) in fbq_classify_group() argument
10142 if (sgs->sum_h_nr_running > sgs->nr_numa_running) in fbq_classify_group()
10144 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) in fbq_classify_group()
10158 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) in fbq_classify_group() argument
10225 struct sg_lb_stats *sgs, in update_sg_wakeup_stats() argument
10230 memset(sgs, 0, sizeof(*sgs)); in update_sg_wakeup_stats()
10234 sgs->group_misfit_task_load = 1; in update_sg_wakeup_stats()
10240 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
10241 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
10242 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
10244 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; in update_sg_wakeup_stats()
10247 sgs->sum_nr_running += nr_running; in update_sg_wakeup_stats()
10253 sgs->idle_cpus++; in update_sg_wakeup_stats()
10257 sgs->group_misfit_task_load && in update_sg_wakeup_stats()
10259 sgs->group_misfit_task_load = 0; in update_sg_wakeup_stats()
10263 sgs->group_capacity = group->sgc->capacity; in update_sg_wakeup_stats()
10265 sgs->group_weight = group->group_weight; in update_sg_wakeup_stats()
10267 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); in update_sg_wakeup_stats()
10273 if (sgs->group_type == group_fully_busy || in update_sg_wakeup_stats()
10274 sgs->group_type == group_overloaded) in update_sg_wakeup_stats()
10275 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_wakeup_stats()
10276 sgs->group_capacity; in update_sg_wakeup_stats()
10282 struct sg_lb_stats *sgs) in update_pick_idlest() argument
10284 if (sgs->group_type < idlest_sgs->group_type) in update_pick_idlest()
10287 if (sgs->group_type > idlest_sgs->group_type) in update_pick_idlest()
10295 switch (sgs->group_type) { in update_pick_idlest()
10299 if (idlest_sgs->avg_load <= sgs->avg_load) in update_pick_idlest()
10317 if (idlest_sgs->idle_cpus > sgs->idle_cpus) in update_pick_idlest()
10321 if (idlest_sgs->idle_cpus == sgs->idle_cpus && in update_pick_idlest()
10322 idlest_sgs->group_util <= sgs->group_util) in update_pick_idlest()
10342 struct sg_lb_stats *sgs; in find_idlest_group() local
10365 sgs = &local_sgs; in find_idlest_group()
10368 sgs = &tmp_sgs; in find_idlest_group()
10371 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
10373 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { in find_idlest_group()
10375 idlest_sgs = *sgs; in find_idlest_group()
10589 struct sg_lb_stats *sgs = &tmp_sgs; in update_sd_lb_stats() local
10595 sgs = local; in update_sd_lb_stats()
10602 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); in update_sd_lb_stats()
10608 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
10610 sds->busiest_stat = *sgs; in update_sd_lb_stats()
10615 sds->total_load += sgs->group_load; in update_sd_lb_stats()
10616 sds->total_capacity += sgs->group_capacity; in update_sd_lb_stats()
10618 sum_util += sgs->group_util; in update_sd_lb_stats()