Lines Matching +full:se +full:- +full:pos
1 // SPDX-License-Identifier: GPL-2.0
43 #include <linux/memory-tiers.h>
61 * The initial- and re-scaling of tunables is configurable
65 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
66 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
67 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
74 * Minimal preemption granularity for CPU-bound tasks:
108 return -cpu;
129 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
188 lw->weight += inc;
189 lw->inv_weight = 0;
194 lw->weight -= dec;
195 lw->inv_weight = 0;
200 lw->weight = w;
201 lw->inv_weight = 0;
208 * so pick a second-best guess by going with the log2 of the
256 if (likely(lw->inv_weight))
259 w = scale_load_down(lw->weight);
262 lw->inv_weight = 1;
264 lw->inv_weight = WMULT_CONST;
266 lw->inv_weight = WMULT_CONST / w;
272 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
292 shift -= fs;
296 fact = mul_u32_u32(fact, lw->inv_weight);
301 shift -= fs;
311 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
313 if (unlikely(se->load.weight != NICE_0_LOAD))
314 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
328 #define for_each_sched_entity(se) \
329 for (; se; se = se->parent)
336 if (cfs_rq->on_list)
337 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
339 cfs_rq->on_list = 1;
344 * enqueued. The fact that we always enqueue bottom-up
350 if (cfs_rq->tg->parent &&
351 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
358 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
359 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
365 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
369 if (!cfs_rq->tg->parent) {
374 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
375 &rq->leaf_cfs_rq_list);
380 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
390 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
395 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
401 if (cfs_rq->on_list) {
408 * to the prev element but it will point to rq->leaf_cfs_rq_list
411 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
412 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
414 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
415 cfs_rq->on_list = 0;
421 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
425 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
426 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
431 is_same_group(struct sched_entity *se, struct sched_entity *pse)
433 if (se->cfs_rq == pse->cfs_rq)
434 return se->cfs_rq;
439 static inline struct sched_entity *parent_entity(const struct sched_entity *se)
441 return se->parent;
445 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
457 se_depth = (*se)->depth;
458 pse_depth = (*pse)->depth;
461 se_depth--;
462 *se = parent_entity(*se);
466 pse_depth--;
470 while (!is_same_group(*se, *pse)) {
471 *se = parent_entity(*se);
478 return tg->idle > 0;
483 return cfs_rq->idle > 0;
486 static int se_is_idle(struct sched_entity *se)
488 if (entity_is_task(se))
489 return task_has_idle_policy(task_of(se));
490 return cfs_rq_is_idle(group_cfs_rq(se));
495 #define for_each_sched_entity(se) \
496 for (; se; se = NULL)
511 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
512 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
514 static inline struct sched_entity *parent_entity(struct sched_entity *se)
520 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
534 static int se_is_idle(struct sched_entity *se)
536 return task_has_idle_policy(task_of(se));
550 s64 delta = (s64)(vruntime - max_vruntime);
559 s64 delta = (s64)(vruntime - min_vruntime);
569 return (s64)(a->vruntime - b->vruntime) < 0;
572 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
574 return (s64)(se->vruntime - cfs_rq->min_vruntime);
581 * Compute virtual time from the per-task service numbers:
589 * lag_i = S - s_i = w_i * (V - v_i)
595 * \Sum w_i * (V - v_i) = 0
596 * \Sum w_i * V - w_i * v_i = 0
599 * se->vruntime):
602 * V = -------------- = --------------
609 * virtual time has non-continguous motion equivalent to:
611 * V +-= lag_i / W
618 * Substitute: v_i == (v_i - v0) + v0
620 * \Sum ((v_i - v0) + v0) * w_i \Sum (v_i - v0) * w_i
621 * V = ---------------------------- = --------------------- + v0
626 * v0 := cfs_rq->min_vruntime
627 * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
628 * \Sum w_i := cfs_rq->avg_load
631 * the per-task service, these deltas: (v_i - v), will be in the order of the
639 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
641 unsigned long weight = scale_load_down(se->load.weight);
642 s64 key = entity_key(cfs_rq, se);
644 cfs_rq->avg_vruntime += key * weight;
645 cfs_rq->avg_load += weight;
649 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
651 unsigned long weight = scale_load_down(se->load.weight);
652 s64 key = entity_key(cfs_rq, se);
654 cfs_rq->avg_vruntime -= key * weight;
655 cfs_rq->avg_load -= weight;
662 * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
664 cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
673 struct sched_entity *curr = cfs_rq->curr;
674 s64 avg = cfs_rq->avg_vruntime;
675 long load = cfs_rq->avg_load;
677 if (curr && curr->on_rq) {
678 unsigned long weight = scale_load_down(curr->load.weight);
687 avg -= (load - 1);
691 return cfs_rq->min_vruntime + avg;
695 * lag_i = S - s_i = w_i * (V - v_i)
698 * is possible -- by addition/removal/reweight to the tree -- to move V around
706 * -r_max < lag < max(r_max, q)
710 static s64 entity_lag(u64 avruntime, struct sched_entity *se)
714 vlag = avruntime - se->vruntime;
715 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
717 return clamp(vlag, -limit, limit);
720 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
722 SCHED_WARN_ON(!se->on_rq);
724 se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
731 * lag_i = S - s_i = w_i*(V - v_i)
733 * lag_i >= 0 -> V >= v_i
735 * \Sum (v_i - v)*w_i
736 * V = ------------------ + v
739 * lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
741 * Note: using 'avg_vruntime() > se->vruntime' is inacurate due
744 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
746 struct sched_entity *curr = cfs_rq->curr;
747 s64 avg = cfs_rq->avg_vruntime;
748 long load = cfs_rq->avg_load;
750 if (curr && curr->on_rq) {
751 unsigned long weight = scale_load_down(curr->load.weight);
757 return avg >= entity_key(cfs_rq, se) * load;
762 u64 min_vruntime = cfs_rq->min_vruntime;
766 s64 delta = (s64)(vruntime - min_vruntime);
776 struct sched_entity *se = __pick_first_entity(cfs_rq);
777 struct sched_entity *curr = cfs_rq->curr;
779 u64 vruntime = cfs_rq->min_vruntime;
782 if (curr->on_rq)
783 vruntime = curr->vruntime;
788 if (se) {
790 vruntime = se->vruntime;
792 vruntime = min_vruntime(vruntime, se->vruntime);
796 u64_u32_store(cfs_rq->min_vruntime,
805 #define deadline_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
807 static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node)
811 if (deadline_gt(min_deadline, se, rse))
812 se->min_deadline = rse->min_deadline;
817 * se->min_deadline = min(se->deadline, left->min_deadline, right->min_deadline)
819 static inline bool min_deadline_update(struct sched_entity *se, bool exit)
821 u64 old_min_deadline = se->min_deadline;
822 struct rb_node *node = &se->run_node;
824 se->min_deadline = se->deadline;
825 __update_min_deadline(se, node->rb_right);
826 __update_min_deadline(se, node->rb_left);
828 return se->min_deadline == old_min_deadline;
835 * Enqueue an entity into the rb-tree:
837 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
839 avg_vruntime_add(cfs_rq, se);
840 se->min_deadline = se->deadline;
841 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
845 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
847 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
849 avg_vruntime_sub(cfs_rq, se);
854 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
873 * We can do this in O(log n) time due to an augmented RB-tree. The
877 * se->min_deadline = min(se->deadline, se->{left,right}->min_deadline)
883 struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
884 struct sched_entity *curr = cfs_rq->curr;
888 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
893 * Once selected, run a task until it either becomes non-eligible or
896 if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
900 struct sched_entity *se = __node_2_se(node);
905 if (!entity_eligible(cfs_rq, se)) {
906 node = node->rb_left;
913 if (!best || deadline_gt(deadline, best, se))
914 best = se;
917 * Every se in a left branch is eligible, keep track of the
920 if (node->rb_left) {
921 struct sched_entity *left = __node_2_se(node->rb_left);
931 if (left->min_deadline == se->min_deadline)
936 if (se->deadline == se->min_deadline)
940 node = node->rb_right;
947 if (!best_left || (s64)(best_left->min_deadline - best->deadline) > 0)
954 node = &best_left->run_node;
956 struct sched_entity *se = __node_2_se(node);
959 if (se->deadline == se->min_deadline)
960 return se;
963 if (node->rb_left &&
964 __node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
965 node = node->rb_left;
970 node = node->rb_right;
977 struct sched_entity *se = __pick_eevdf(cfs_rq);
979 if (!se) {
987 return se;
993 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
1019 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1025 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
1027 if ((s64)(se->vruntime - se->deadline) < 0)
1035 se->slice = sysctl_sched_base_slice;
1040 se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
1045 if (cfs_rq->nr_running > 1) {
1047 clear_buddies(cfs_rq, se);
1059 void init_entity_runnable_average(struct sched_entity *se)
1061 struct sched_avg *sa = &se->avg;
1071 if (entity_is_task(se))
1072 sa->load_avg = scale_load_down(se->load.weight);
1081 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1090 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
1105 struct sched_entity *se = &p->se;
1106 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1107 struct sched_avg *sa = &se->avg;
1109 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
1111 if (p->sched_class != &fair_sched_class) {
1116 attach_entity_load_avg(cfs_rq, se);
1122 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
1127 if (cfs_rq->avg.util_avg != 0) {
1128 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1129 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
1131 if (sa->util_avg > cap)
1132 sa->util_avg = cap;
1134 sa->util_avg = cap;
1138 sa->runnable_avg = sa->util_avg;
1142 void init_entity_runnable_average(struct sched_entity *se)
1158 delta_exec = now - curr->exec_start;
1162 curr->exec_start = now;
1163 curr->sum_exec_runtime += delta_exec;
1169 __schedstat_set(stats->exec_max,
1170 max(delta_exec, stats->exec_max));
1188 struct task_struct *curr = rq->curr;
1191 delta_exec = update_curr_se(rq, &curr->se);
1203 struct sched_entity *curr = cfs_rq->curr;
1213 curr->vruntime += calc_delta_fair(delta_exec, curr);
1225 update_curr(cfs_rq_of(&rq->curr->se));
1229 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1237 stats = __schedstats_from_se(se);
1239 if (entity_is_task(se))
1240 p = task_of(se);
1246 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1254 stats = __schedstats_from_se(se);
1257 * When the sched_schedstat changes from 0 to 1, some sched se
1258 * maybe already in the runqueue, the se->statistics.wait_start
1262 if (unlikely(!schedstat_val(stats->wait_start)))
1265 if (entity_is_task(se))
1266 p = task_of(se);
1272 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1280 stats = __schedstats_from_se(se);
1282 if (entity_is_task(se))
1283 tsk = task_of(se);
1289 * Task is being enqueued - update stats:
1292 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1301 if (se != cfs_rq->curr)
1302 update_stats_wait_start_fair(cfs_rq, se);
1305 update_stats_enqueue_sleeper_fair(cfs_rq, se);
1309 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1319 if (se != cfs_rq->curr)
1320 update_stats_wait_end_fair(cfs_rq, se);
1322 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1323 struct task_struct *tsk = task_of(se);
1327 state = READ_ONCE(tsk->__state);
1329 __schedstat_set(tsk->stats.sleep_start,
1332 __schedstat_set(tsk->stats.block_start,
1338 * We are picking a new current task - update its stats:
1341 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1346 se->exec_start = rq_clock_task(rq_of(cfs_rq));
1438 * ->numa_group (see struct task_struct for locking rules).
1442 return rcu_dereference_check(p->numa_group, p == current ||
1443 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
1448 return rcu_dereference_protected(p->numa_group, p == current);
1460 * Calculations based on RSS as non-present and empty pages are skipped
1464 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1465 rss = get_mm_rss(p->mm);
1498 ng = rcu_dereference(p->numa_group);
1503 period *= refcount_read(&ng->refcount);
1528 period *= refcount_read(&ng->refcount);
1540 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1541 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1546 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1547 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1565 ng = rcu_dereference(p->numa_group);
1567 gid = ng->gid;
1586 if (!p->numa_faults)
1589 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1590 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1600 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1601 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1606 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
1607 group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
1616 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1628 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1636 * considered part of a numa group's pseudo-interleaving set. Migrations
1643 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1702 faults *= (max_dist - dist);
1703 faults /= (max_dist - LOCAL_DISTANCE);
1723 if (!p->numa_faults)
1726 total_faults = p->total_numa_faults;
1746 total_faults = ng->total_faults;
1782 pgdat->node_present_pages >> 4);
1783 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1784 struct zone *zone = pgdat->node_zones + z;
1804 * hint page fault latency = hint page fault time - scan time
1816 return (time - last_time) & PAGE_ACCESS_TIME_MASK;
1833 start = pgdat->nbp_rl_start;
1834 if (now - start > MSEC_PER_SEC &&
1835 cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
1836 pgdat->nbp_rl_nr_cand = nr_cand;
1837 if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit)
1853 start = pgdat->nbp_th_start;
1854 if (now - start > th_period &&
1855 cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
1859 diff_cand = nr_cand - pgdat->nbp_th_nr_cand;
1861 th = pgdat->nbp_threshold ? : ref_th;
1863 th = max(th - unit_th, unit_th);
1866 pgdat->nbp_th_nr_cand = nr_cand;
1867 pgdat->nbp_threshold = th;
1891 pgdat->nbp_threshold = 0;
1897 (20 - PAGE_SHIFT);
1900 th = pgdat->nbp_threshold ? : def_th;
1909 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1919 * two full passes of the "multi-stage node selection" test that is
1922 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
1927 * Multi-stage node selection is used in conjunction with a periodic
1928 * migration fault to build a temporal task<->page relation. By using
1929 * a two-stage filter we remove short/unlikely relations.
1933 * page (n_t) (in a given time-span) to a probability.
1941 * act on an unlikely task<->page relation.
1951 /* A shared fault, but p->numa_group has not been set up yet. */
1968 * --------------- * - > ---------------
2030 if ((ns->nr_running > ns->weight) &&
2031 (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
2032 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
2035 if ((ns->nr_running < ns->weight) ||
2036 (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
2037 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
2078 int cpu, idle_core = -1;
2081 ns->idle_cpu = -1;
2087 ns->load += cpu_load(rq);
2088 ns->runnable += cpu_runnable(rq);
2089 ns->util += cpu_util_cfs(cpu);
2090 ns->nr_running += rq->cfs.h_nr_running;
2091 ns->compute_capacity += capacity_of(cpu);
2093 if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
2094 if (READ_ONCE(rq->numa_migrate_on) ||
2095 !cpumask_test_cpu(cpu, env->p->cpus_ptr))
2098 if (ns->idle_cpu == -1)
2099 ns->idle_cpu = cpu;
2106 ns->weight = cpumask_weight(cpumask_of_node(nid));
2108 ns->node_type = numa_classify(env->imbalance_pct, ns);
2111 ns->idle_cpu = idle_core;
2117 struct rq *rq = cpu_rq(env->dst_cpu);
2119 /* Check if run-queue part of active NUMA balance. */
2120 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
2122 int start = env->dst_cpu;
2125 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) {
2126 if (cpu == env->best_cpu || !idle_cpu(cpu) ||
2127 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
2131 env->dst_cpu = cpu;
2132 rq = cpu_rq(env->dst_cpu);
2133 if (!xchg(&rq->numa_migrate_on, 1))
2143 * Clear previous best_cpu/rq numa-migrate flag, since task now
2146 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
2147 rq = cpu_rq(env->best_cpu);
2148 WRITE_ONCE(rq->numa_migrate_on, 0);
2151 if (env->best_task)
2152 put_task_struct(env->best_task);
2156 env->best_task = p;
2157 env->best_imp = imp;
2158 env->best_cpu = env->dst_cpu;
2172 * ------------ vs ---------
2175 src_capacity = env->src_stats.compute_capacity;
2176 dst_capacity = env->dst_stats.compute_capacity;
2178 imb = abs(dst_load * src_capacity - src_load * dst_capacity);
2180 orig_src_load = env->src_stats.load;
2181 orig_dst_load = env->dst_stats.load;
2183 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
2205 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
2206 struct rq *dst_rq = cpu_rq(env->dst_cpu);
2210 int dist = env->dist;
2215 if (READ_ONCE(dst_rq->numa_migrate_on))
2219 cur = rcu_dereference(dst_rq->curr);
2220 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
2225 * end try selecting ourselves (current == env->p) as a swap candidate.
2227 if (cur == env->p) {
2233 if (maymove && moveimp >= env->best_imp)
2240 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
2247 if (env->best_task &&
2248 env->best_task->numa_preferred_nid == env->src_nid &&
2249 cur->numa_preferred_nid != env->src_nid) {
2263 cur_ng = rcu_dereference(cur->numa_group);
2271 if (env->dst_stats.node_type == node_has_spare)
2274 imp = taskimp + task_weight(cur, env->src_nid, dist) -
2275 task_weight(cur, env->dst_nid, dist);
2281 imp -= imp / 16;
2288 imp += group_weight(cur, env->src_nid, dist) -
2289 group_weight(cur, env->dst_nid, dist);
2291 imp += task_weight(cur, env->src_nid, dist) -
2292 task_weight(cur, env->dst_nid, dist);
2296 if (cur->numa_preferred_nid == env->dst_nid)
2297 imp -= imp / 16;
2305 if (cur->numa_preferred_nid == env->src_nid)
2308 if (maymove && moveimp > imp && moveimp > env->best_imp) {
2318 if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
2319 env->best_task->numa_preferred_nid != env->src_nid) {
2329 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
2335 load = task_h_load(env->p) - task_h_load(cur);
2339 dst_load = env->dst_stats.load + load;
2340 src_load = env->src_stats.load - load;
2348 int cpu = env->dst_stats.idle_cpu;
2352 cpu = env->dst_cpu;
2358 if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
2359 idle_cpu(env->best_cpu)) {
2360 cpu = env->best_cpu;
2363 env->dst_cpu = cpu;
2373 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
2380 if (!maymove && env->best_task &&
2381 env->best_task->numa_preferred_nid == env->src_nid) {
2400 if (env->dst_stats.node_type == node_has_spare) {
2410 src_running = env->src_stats.nr_running - 1;
2411 dst_running = env->dst_stats.nr_running + 1;
2412 imbalance = max(0, dst_running - src_running);
2414 env->imb_numa_nr);
2419 if (env->dst_stats.idle_cpu >= 0) {
2420 env->dst_cpu = env->dst_stats.idle_cpu;
2428 * If the improvement from just moving env->p direction is better
2431 load = task_h_load(env->p);
2432 dst_load = env->dst_stats.load + load;
2433 src_load = env->src_stats.load - load;
2437 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
2439 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
2442 env->dst_cpu = cpu;
2460 .best_cpu = -1,
2474 * random movement of tasks -- counter the numa conditions we're trying
2480 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
2481 env.imb_numa_nr = sd->imb_numa_nr;
2493 return -EINVAL;
2496 env.dst_nid = p->numa_preferred_nid;
2501 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
2502 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2510 * - there is no space available on the preferred_nid
2511 * - the task is part of a numa_group that is interleaved across
2516 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2518 if (nid == env.src_nid || nid == p->numa_preferred_nid)
2529 taskimp = task_weight(p, nid, dist) - taskweight;
2530 groupimp = group_weight(p, nid, dist) - groupweight;
2550 if (env.best_cpu == -1)
2555 if (nid != p->numa_preferred_nid)
2560 if (env.best_cpu == -1) {
2561 trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
2562 return -EAGAIN;
2568 WRITE_ONCE(best_rq->numa_migrate_on, 0);
2575 WRITE_ONCE(best_rq->numa_migrate_on, 0);
2589 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
2593 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
2594 p->numa_migrate_retry = jiffies + interval;
2597 if (task_node(p) == p->numa_preferred_nid)
2627 numa_group->max_faults_cpu = max_faults;
2628 numa_group->active_nodes = active_nodes;
2654 unsigned long remote = p->numa_faults_locality[0];
2655 unsigned long local = p->numa_faults_locality[1];
2664 if (local + shared == 0 || p->numa_faults_locality[2]) {
2665 p->numa_scan_period = min(p->numa_scan_period_max,
2666 p->numa_scan_period << 1);
2668 p->mm->numa_next_scan = jiffies +
2669 msecs_to_jiffies(p->numa_scan_period);
2680 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
2689 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
2699 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
2705 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
2710 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
2713 p->numa_scan_period = clamp(p->numa_scan_period + diff,
2715 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2722 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
2729 now = p->se.exec_start;
2730 runtime = p->se.sum_exec_runtime;
2732 if (p->last_task_numa_placement) {
2733 delta = runtime - p->last_sum_exec_runtime;
2734 *period = now - p->last_task_numa_placement;
2740 delta = p->se.avg.load_sum;
2744 p->last_sum_exec_runtime = runtime;
2745 p->last_task_numa_placement = now;
2795 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2849 * The p->mm->numa_scan_seq field gets updated without
2853 seq = READ_ONCE(p->mm->numa_scan_seq);
2854 if (p->numa_scan_seq == seq)
2856 p->numa_scan_seq = seq;
2857 p->numa_scan_period_max = task_scan_max(p);
2859 total_faults = p->numa_faults_locality[0] +
2860 p->numa_faults_locality[1];
2866 group_lock = &ng->lock;
2886 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2887 fault_types[priv] += p->numa_faults[membuf_idx];
2888 p->numa_faults[membuf_idx] = 0;
2894 * little over-all impact on throughput, and thus their
2898 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2900 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2901 p->numa_faults[cpubuf_idx] = 0;
2903 p->numa_faults[mem_idx] += diff;
2904 p->numa_faults[cpu_idx] += f_diff;
2905 faults += p->numa_faults[mem_idx];
2906 p->total_numa_faults += diff;
2915 ng->faults[mem_idx] += diff;
2916 ng->faults[cpu_idx] += f_diff;
2917 ng->total_faults += diff;
2918 group_faults += ng->faults[mem_idx];
2933 /* Cannot migrate task to CPU-less node */
2956 if (max_nid != p->numa_preferred_nid)
2965 return refcount_inc_not_zero(&grp->refcount);
2970 if (refcount_dec_and_test(&grp->refcount))
2992 refcount_set(&grp->refcount, 1);
2993 grp->active_nodes = 1;
2994 grp->max_faults_cpu = 0;
2995 spin_lock_init(&grp->lock);
2996 grp->gid = p->pid;
2999 grp->faults[i] = p->numa_faults[i];
3001 grp->total_faults = p->total_numa_faults;
3003 grp->nr_tasks++;
3004 rcu_assign_pointer(p->numa_group, grp);
3008 tsk = READ_ONCE(cpu_rq(cpu)->curr);
3013 grp = rcu_dereference(tsk->numa_group);
3025 if (my_grp->nr_tasks > grp->nr_tasks)
3029 * Tie-break on the grp address.
3031 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
3035 if (tsk->mm == current->mm)
3054 double_lock_irq(&my_grp->lock, &grp->lock);
3057 my_grp->faults[i] -= p->numa_faults[i];
3058 grp->faults[i] += p->numa_faults[i];
3060 my_grp->total_faults -= p->total_numa_faults;
3061 grp->total_faults += p->total_numa_faults;
3063 my_grp->nr_tasks--;
3064 grp->nr_tasks++;
3066 spin_unlock(&my_grp->lock);
3067 spin_unlock_irq(&grp->lock);
3069 rcu_assign_pointer(p->numa_group, grp);
3084 * reset the data back to default state without freeing ->numa_faults.
3089 struct numa_group *grp = rcu_dereference_raw(p->numa_group);
3090 unsigned long *numa_faults = p->numa_faults;
3098 spin_lock_irqsave(&grp->lock, flags);
3100 grp->faults[i] -= p->numa_faults[i];
3101 grp->total_faults -= p->total_numa_faults;
3103 grp->nr_tasks--;
3104 spin_unlock_irqrestore(&grp->lock, flags);
3105 RCU_INIT_POINTER(p->numa_group, NULL);
3110 p->numa_faults = NULL;
3113 p->total_numa_faults = 0;
3135 if (!p->mm)
3147 /* Allocate buffer to track faults on a per-node basis */
3148 if (unlikely(!p->numa_faults)) {
3149 int size = sizeof(*p->numa_faults) *
3152 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
3153 if (!p->numa_faults)
3156 p->total_numa_faults = 0;
3157 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
3164 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
3179 if (!priv && !local && ng && ng->active_nodes > 1 &&
3188 if (time_after(jiffies, p->numa_migrate_retry)) {
3194 p->numa_pages_migrated += pages;
3196 p->numa_faults_locality[2] += pages;
3198 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
3199 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
3200 p->numa_faults_locality[local] += pages;
3207 * p->mm->numa_scan_seq is written to without exclusive access
3213 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
3214 p->mm->numa_scan_offset = 0;
3226 if ((READ_ONCE(current->mm->numa_scan_seq) - vma->numab_state->start_scan_seq) < 2)
3229 pids = vma->numab_state->pids_active[0] | vma->numab_state->pids_active[1];
3230 if (test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids))
3235 * some VMAs may never be scanned in multi-threaded applications:
3237 if (mm->numa_scan_offset > vma->vm_start) {
3247 if (READ_ONCE(mm->numa_scan_seq) >
3248 (vma->numab_state->prev_scan_seq + get_nr_threads(current)))
3264 struct mm_struct *mm = p->mm;
3265 u64 runtime = p->se.sum_exec_runtime;
3276 work->next = work;
3280 * NOTE: make sure not to dereference p->mm before this check,
3282 * without p->mm even though we still had it when we enqueued this
3285 if (p->flags & PF_EXITING)
3288 if (!mm->numa_next_scan) {
3289 mm->numa_next_scan = now +
3296 migrate = mm->numa_next_scan;
3300 if (p->numa_scan_period == 0) {
3301 p->numa_scan_period_max = task_scan_max(p);
3302 p->numa_scan_period = task_scan_start(p);
3305 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
3306 if (!try_cmpxchg(&mm->numa_next_scan, &migrate, next_scan))
3313 p->node_stamp += 2 * TICK_NSEC;
3316 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
3333 start = mm->numa_scan_offset;
3345 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
3353 * hinting faults in read-only file-backed mappings or the vdso
3356 if (!vma->vm_mm ||
3357 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) {
3371 /* Initialise new per-VMA NUMAB state. */
3372 if (!vma->numab_state) {
3379 if (cmpxchg(&vma->numab_state, NULL, ptr)) {
3384 vma->numab_state->start_scan_seq = mm->numa_scan_seq;
3386 vma->numab_state->next_scan = now +
3390 vma->numab_state->pids_active_reset = vma->numab_state->next_scan +
3398 vma->numab_state->prev_scan_seq = mm->numa_scan_seq - 1;
3405 if (mm->numa_scan_seq && time_before(jiffies,
3406 vma->numab_state->next_scan)) {
3412 if (mm->numa_scan_seq &&
3413 time_after(jiffies, vma->numab_state->pids_active_reset)) {
3414 vma->numab_state->pids_active_reset = vma->numab_state->pids_active_reset +
3416 vma->numab_state->pids_active[0] = READ_ONCE(vma->numab_state->pids_active[1]);
3417 vma->numab_state->pids_active[1] = 0;
3421 if (vma->numab_state->prev_scan_seq == mm->numa_scan_seq) {
3422 mm->numa_scan_offset = vma->vm_end;
3438 start = max(start, vma->vm_start);
3440 end = min(end, vma->vm_end);
3446 * is not already pte-numa. If the VMA contains
3452 pages -= (end - start) >> PAGE_SHIFT;
3453 virtpages -= (end - start) >> PAGE_SHIFT;
3460 } while (end != vma->vm_end);
3463 vma->numab_state->prev_scan_seq = mm->numa_scan_seq;
3491 mm->numa_scan_offset = start;
3502 if (unlikely(p->se.sum_exec_runtime != runtime)) {
3503 u64 diff = p->se.sum_exec_runtime - runtime;
3504 p->node_stamp += 32 * diff;
3511 struct mm_struct *mm = p->mm;
3514 mm_users = atomic_read(&mm->mm_users);
3516 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
3517 mm->numa_scan_seq = 0;
3520 p->node_stamp = 0;
3521 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
3522 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
3523 p->numa_migrate_retry = 0;
3525 p->numa_work.next = &p->numa_work;
3526 p->numa_faults = NULL;
3527 p->numa_pages_migrated = 0;
3528 p->total_numa_faults = 0;
3529 RCU_INIT_POINTER(p->numa_group, NULL);
3530 p->last_task_numa_placement = 0;
3531 p->last_sum_exec_runtime = 0;
3533 init_task_work(&p->numa_work, task_numa_work);
3537 p->numa_preferred_nid = NUMA_NO_NODE;
3549 current->numa_scan_period * mm_users * NSEC_PER_MSEC);
3551 p->node_stamp = delay;
3560 struct callback_head *work = &curr->numa_work;
3566 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
3575 now = curr->se.sum_exec_runtime;
3576 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
3578 if (now > curr->node_stamp + period) {
3579 if (!curr->node_stamp)
3580 curr->numa_scan_period = task_scan_start(curr);
3581 curr->node_stamp += period;
3583 if (!time_before(jiffies, curr->mm->numa_next_scan))
3596 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
3605 * is pulled cross-node due to wakeups or load balancing.
3607 if (p->numa_scan_seq) {
3613 if (dst_nid == p->numa_preferred_nid ||
3614 (p->numa_preferred_nid != NUMA_NO_NODE &&
3615 src_nid != p->numa_preferred_nid))
3619 p->numa_scan_period = task_scan_start(p);
3642 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3644 update_load_add(&cfs_rq->load, se->load.weight);
3646 if (entity_is_task(se)) {
3649 account_numa_enqueue(rq, task_of(se));
3650 list_add(&se->group_node, &rq->cfs_tasks);
3653 cfs_rq->nr_running++;
3654 if (se_is_idle(se))
3655 cfs_rq->idle_nr_running++;
3659 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3661 update_load_sub(&cfs_rq->load, se->load.weight);
3663 if (entity_is_task(se)) {
3664 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3665 list_del_init(&se->group_node);
3668 cfs_rq->nr_running--;
3669 if (se_is_idle(se))
3670 cfs_rq->idle_nr_running--;
3676 * Explicitly do a load-store to ensure the intermediate value never hits
3696 * Explicitly do a load-store to ensure the intermediate value never hits
3704 res = var - val; \
3713 * A variant of sub_positive(), which does not use explicit load-store
3718 *ptr -= min_t(typeof(*ptr), *ptr, _val); \
3723 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3725 cfs_rq->avg.load_avg += se->avg.load_avg;
3726 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3730 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3732 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3733 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3735 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3736 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3740 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3742 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3745 static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
3748 unsigned long old_weight = se->load.weight;
3756 * adjusted if re-weight at !0-lag point.
3759 * re-weight without changing vruntime at !0-lag point.
3761 * Weight VRuntime Avg-VRuntime
3765 * Since lag needs to be preserved through re-weight:
3767 * lag = (V - v)*w = (V'- v')*w', where v = v'
3768 * ==> V' = (V - v)*w/w' + v (1)
3773 * V' = (WV + w'v - wv) / (W + w' - w) (2)
3777 * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
3778 * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
3779 * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
3780 * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
3782 * Since we are doing at !0-lag point which means V != v, we
3785 * ==> W / (W + w' - w) = w / w'
3786 * ==> Ww' = Ww + ww' - ww
3787 * ==> W * (w' - w) = w * (w' - w)
3788 * ==> W = w (re-weight indicates w' != w)
3792 * average vruntime @V, which means we will always re-weight
3793 * at 0-lag point, thus breach assumption. Proof completed.
3796 * COROLLARY #2: Re-weight does NOT affect weighted average
3801 * (V - v)*w = (V' - v')*w'
3802 * ==> v' = V' - (V - v)*w/w' (4)
3806 * V' = (WV - wv + w'v') / (W - w + w')
3807 * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
3808 * = (WV - wv + w'V' - Vw + wv) / (W - w + w')
3809 * = (WV + w'V' - Vw) / (W - w + w')
3811 * ==> V'*(W - w + w') = WV + w'V' - Vw
3812 * ==> V' * (W - w) = (W - w) * V (5)
3815 * always occurs at 0-lag point, so V won't change. Or else
3820 * So according to corollary #1 & #2, the effect of re-weight
3823 * v' = V' - (V - v) * w / w' (4)
3824 * = V - (V - v) * w / w'
3825 * = V - vl * w / w'
3826 * = V - vl'
3828 if (avruntime != se->vruntime) {
3829 vlag = entity_lag(avruntime, se);
3831 se->vruntime = avruntime - vlag;
3841 * d' = v' + (d - v)*w/w'
3842 * = V' - (V - v)*w/w' + (d - v)*w/w'
3843 * = V - (V - v)*w/w' + (d - v)*w/w'
3844 * = V + (d - V)*w/w'
3846 vslice = (s64)(se->deadline - avruntime);
3848 se->deadline = avruntime + vslice;
3851 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3854 bool curr = cfs_rq->curr == se;
3857 if (se->on_rq) {
3862 __dequeue_entity(cfs_rq, se);
3863 update_load_sub(&cfs_rq->load, se->load.weight);
3865 dequeue_load_avg(cfs_rq, se);
3867 if (se->on_rq) {
3868 reweight_eevdf(se, avruntime, weight);
3871 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
3872 * we need to scale se->vlag when w_i changes.
3874 se->vlag = div_s64(se->vlag * se->load.weight, weight);
3877 update_load_set(&se->load, weight);
3881 u32 divider = get_pelt_divider(&se->avg);
3883 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3887 enqueue_load_avg(cfs_rq, se);
3888 if (se->on_rq) {
3889 update_load_add(&cfs_rq->load, se->load.weight);
3891 __enqueue_entity(cfs_rq, se);
3895 * whether the rq-wide min_vruntime needs updated too. Since
3897 * than up-to-date one, we do the update at the end of the
3906 struct sched_entity *se = &p->se;
3907 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3908 struct load_weight *load = &se->load;
3910 reweight_entity(cfs_rq, se, lw->weight);
3911 load->inv_weight = lw->inv_weight;
3925 * tg->weight * grq->load.weight
3926 * ge->load.weight = ----------------------------- (1)
3927 * \Sum grq->load.weight
3935 * grq->load.weight -> grq->avg.load_avg (2)
3939 * tg->weight * grq->avg.load_avg
3940 * ge->load.weight = ------------------------------ (3)
3941 * tg->load_avg
3943 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
3947 * The problem with it is that because the average is slow -- it was designed
3948 * to be exactly that of course -- this leads to transients in boundary
3950 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
3955 * tg->weight * grq->load.weight
3956 * ge->load.weight = ----------------------------- = tg->weight (4)
3957 * grp->load.weight
3964 * ge->load.weight =
3966 * tg->weight * grq->load.weight
3967 * --------------------------------------------------- (5)
3968 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3970 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3971 * we need to use grq->avg.load_avg as its lower bound, which then gives:
3974 * tg->weight * grq->load.weight
3975 * ge->load.weight = ----------------------------- (6)
3980 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
3981 * max(grq->load.weight, grq->avg.load_avg)
3985 * overestimates the ge->load.weight and therefore:
3987 * \Sum ge->load.weight >= tg->weight
3994 struct task_group *tg = cfs_rq->tg;
3996 tg_shares = READ_ONCE(tg->shares);
3998 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
4000 tg_weight = atomic_long_read(&tg->load_avg);
4003 tg_weight -= cfs_rq->tg_load_avg_contrib;
4011 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
4012 * of a group with small tg->shares value. It is a floor value which is
4016 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
4017 * on an 8-core system with 8 tasks each runnable on one CPU shares has
4030 static void update_cfs_group(struct sched_entity *se)
4032 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4042 shares = READ_ONCE(gcfs_rq->tg->shares);
4046 if (unlikely(se->load.weight != shares))
4047 reweight_entity(cfs_rq_of(se), se, shares);
4051 static inline void update_cfs_group(struct sched_entity *se)
4060 if (&rq->cfs == cfs_rq) {
4070 * As is, the util number is not freq-invariant (we'd have to
4082 if (sa->load_sum)
4085 if (sa->util_sum)
4088 if (sa->runnable_sum)
4096 SCHED_WARN_ON(sa->load_avg ||
4097 sa->util_avg ||
4098 sa->runnable_avg);
4105 return u64_u32_load_copy(cfs_rq->avg.last_update_time,
4106 cfs_rq->last_update_time_copy);
4112 * bottom-up, we only have to test whether the cfs_rq before us on the list
4123 if (cfs_rq->on_list) {
4124 prev = cfs_rq->leaf_cfs_rq_list.prev;
4126 prev = rq->tmp_alone_branch;
4129 if (prev == &rq->leaf_cfs_rq_list)
4134 return (prev_cfs_rq->tg->parent == cfs_rq->tg);
4139 if (cfs_rq->load.weight)
4142 if (!load_avg_is_decayed(&cfs_rq->avg))
4152 * update_tg_load_avg - update the tg's load avg
4155 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
4156 * However, because tg->load_avg is a global value there are performance
4167 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
4172 if (cfs_rq->tg == &root_task_group)
4175 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
4176 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4177 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
4183 * caller only guarantees p->pi_lock is held; no other assumptions,
4184 * including the state of rq->lock, should be made.
4186 void set_task_rq_fair(struct sched_entity *se,
4198 * getting what current time is, so simply throw away the out-of-date
4202 if (!(se->avg.last_update_time && prev))
4208 __update_load_avg_blocked_se(p_last_update_time, se);
4209 se->avg.last_update_time = n_last_update_time;
4217 * ge->avg == grq->avg (1)
4228 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
4233 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
4237 * ge->avg.runnable_avg == grq->avg.runnable_avg
4241 * ge->load.weight * grq->avg.load_avg
4242 * ge->avg.load_avg = ----------------------------------- (4)
4243 * grq->load.weight
4256 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
4267 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
4274 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
4280 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4282 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
4290 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4293 divider = get_pelt_divider(&cfs_rq->avg);
4297 se->avg.util_avg = gcfs_rq->avg.util_avg;
4298 new_sum = se->avg.util_avg * divider;
4299 delta_sum = (long)new_sum - (long)se->avg.util_sum;
4300 se->avg.util_sum = new_sum;
4303 add_positive(&cfs_rq->avg.util_avg, delta_avg);
4304 add_positive(&cfs_rq->avg.util_sum, delta_sum);
4307 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4308 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4312 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4314 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
4322 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4325 divider = get_pelt_divider(&cfs_rq->avg);
4328 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
4329 new_sum = se->avg.runnable_avg * divider;
4330 delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
4331 se->avg.runnable_sum = new_sum;
4334 add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
4335 add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
4337 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4338 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4342 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4344 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
4353 gcfs_rq->prop_runnable_sum = 0;
4356 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4359 divider = get_pelt_divider(&cfs_rq->avg);
4366 runnable_sum += se->avg.load_sum;
4373 if (scale_load_down(gcfs_rq->load.weight)) {
4374 load_sum = div_u64(gcfs_rq->avg.load_sum,
4375 scale_load_down(gcfs_rq->load.weight));
4378 /* But make sure to not inflate se's runnable */
4379 runnable_sum = min(se->avg.load_sum, load_sum);
4388 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
4391 load_sum = se_weight(se) * runnable_sum;
4394 delta_avg = load_avg - se->avg.load_avg;
4398 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
4400 se->avg.load_sum = runnable_sum;
4401 se->avg.load_avg = load_avg;
4402 add_positive(&cfs_rq->avg.load_avg, delta_avg);
4403 add_positive(&cfs_rq->avg.load_sum, delta_sum);
4405 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
4406 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
4411 cfs_rq->propagate = 1;
4412 cfs_rq->prop_runnable_sum += runnable_sum;
4416 static inline int propagate_entity_load_avg(struct sched_entity *se)
4420 if (entity_is_task(se))
4423 gcfs_rq = group_cfs_rq(se);
4424 if (!gcfs_rq->propagate)
4427 gcfs_rq->propagate = 0;
4429 cfs_rq = cfs_rq_of(se);
4431 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
4433 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
4434 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
4435 update_tg_cfs_load(cfs_rq, se, gcfs_rq);
4438 trace_pelt_se_tp(se);
4447 static inline bool skip_blocked_update(struct sched_entity *se)
4449 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4455 if (se->avg.load_avg || se->avg.util_avg)
4462 if (gcfs_rq->propagate)
4477 static inline int propagate_entity_load_avg(struct sched_entity *se)
4487 static inline void migrate_se_pelt_lag(struct sched_entity *se)
4494 if (load_avg_is_decayed(&se->avg))
4497 cfs_rq = cfs_rq_of(se);
4501 is_idle = is_idle_task(rcu_dereference(rq->curr));
4518 * - cfs->throttled_clock_pelt_time@cfs_rq_idle
4521 * = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
4524 * = sched_clock_cpu() - rq_clock()@rq_idle
4528 * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
4529 * sched_clock_cpu() - rq_clock()@rq_idle
4531 * rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
4532 * rq_clock()@rq_idle is rq->clock_idle
4533 * cfs->throttled_clock_pelt_time@cfs_rq_idle
4534 * is cfs_rq->throttled_pelt_idle
4538 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
4543 now = u64_u32_load(rq->clock_pelt_idle);
4553 now -= throttled;
4556 * cfs_rq->avg.last_update_time is more recent than our
4561 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
4563 __update_load_avg_blocked_se(now, se);
4566 static void migrate_se_pelt_lag(struct sched_entity *se) {}
4570 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
4577 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
4581 * Since both these conditions indicate a changed cfs_rq->avg.load we should
4588 struct sched_avg *sa = &cfs_rq->avg;
4591 if (cfs_rq->removed.nr) {
4593 u32 divider = get_pelt_divider(&cfs_rq->avg);
4595 raw_spin_lock(&cfs_rq->removed.lock);
4596 swap(cfs_rq->removed.util_avg, removed_util);
4597 swap(cfs_rq->removed.load_avg, removed_load);
4598 swap(cfs_rq->removed.runnable_avg, removed_runnable);
4599 cfs_rq->removed.nr = 0;
4600 raw_spin_unlock(&cfs_rq->removed.lock);
4603 sub_positive(&sa->load_avg, r);
4604 sub_positive(&sa->load_sum, r * divider);
4605 /* See sa->util_sum below */
4606 sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
4609 sub_positive(&sa->util_avg, r);
4610 sub_positive(&sa->util_sum, r * divider);
4612 * Because of rounding, se->util_sum might ends up being +1 more than
4613 * cfs->util_sum. Although this is not a problem by itself, detaching
4615 * util_avg (~1ms) can make cfs->util_sum becoming null whereas
4622 sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
4625 sub_positive(&sa->runnable_avg, r);
4626 sub_positive(&sa->runnable_sum, r * divider);
4627 /* See sa->util_sum above */
4628 sa->runnable_sum = max_t(u32, sa->runnable_sum,
4629 sa->runnable_avg * PELT_MIN_DIVIDER);
4636 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
4642 u64_u32_store_copy(sa->last_update_time,
4643 cfs_rq->last_update_time_copy,
4644 sa->last_update_time);
4649 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
4651 * @se: sched_entity to attach
4654 * cfs_rq->avg.last_update_time being current.
4656 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4659 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4662 u32 divider = get_pelt_divider(&cfs_rq->avg);
4665 * When we attach the @se to the @cfs_rq, we must align the decay
4671 se->avg.last_update_time = cfs_rq->avg.last_update_time;
4672 se->avg.period_contrib = cfs_rq->avg.period_contrib;
4680 se->avg.util_sum = se->avg.util_avg * divider;
4682 se->avg.runnable_sum = se->avg.runnable_avg * divider;
4684 se->avg.load_sum = se->avg.load_avg * divider;
4685 if (se_weight(se) < se->avg.load_sum)
4686 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
4688 se->avg.load_sum = 1;
4690 enqueue_load_avg(cfs_rq, se);
4691 cfs_rq->avg.util_avg += se->avg.util_avg;
4692 cfs_rq->avg.util_sum += se->avg.util_sum;
4693 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4694 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4696 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4704 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
4706 * @se: sched_entity to detach
4709 * cfs_rq->avg.last_update_time being current.
4711 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4713 dequeue_load_avg(cfs_rq, se);
4714 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4715 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4717 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4718 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4720 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4721 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4723 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4724 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4726 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4742 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4751 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4752 __update_load_avg_se(now, cfs_rq, se);
4755 decayed |= propagate_entity_load_avg(se);
4757 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4766 attach_entity_load_avg(cfs_rq, se);
4774 detach_entity_load_avg(cfs_rq, se);
4788 static void sync_entity_load_avg(struct sched_entity *se)
4790 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4794 __update_load_avg_blocked_se(last_update_time, se);
4801 static void remove_entity_load_avg(struct sched_entity *se)
4803 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4807 * tasks cannot exit without having gone through wake_up_new_task() ->
4812 sync_entity_load_avg(se);
4814 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4815 ++cfs_rq->removed.nr;
4816 cfs_rq->removed.util_avg += se->avg.util_avg;
4817 cfs_rq->removed.load_avg += se->avg.load_avg;
4818 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4819 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4824 return cfs_rq->avg.runnable_avg;
4829 return cfs_rq->avg.load_avg;
4836 return READ_ONCE(p->se.avg.util_avg);
4841 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4860 enqueued = cfs_rq->avg.util_est.enqueued;
4862 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4876 enqueued = cfs_rq->avg.util_est.enqueued;
4877 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
4878 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
4889 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
4895 return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
4919 ue = p->se.avg.util_est;
4941 last_ewma_diff = ue.enqueued - ue.ewma;
4942 last_enqueued_diff -= ue.enqueued;
4965 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
4966 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
4967 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
4968 * = w * ( last_ewma_diff ) + ewma(t-1)
4969 * = w * (last_ewma_diff + ewma(t-1) / w)
4979 WRITE_ONCE(p->se.avg.util_est, ue);
4981 trace_sched_util_est_se_tp(&p->se);
5022 capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
5038 * +----------------------------------------
5076 * +----------------------------------------
5098 return -1;
5120 if (!p || p->nr_cpus_allowed == 1) {
5121 rq->misfit_task_load = 0;
5126 rq->misfit_task_load = 0;
5134 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
5141 return !cfs_rq->nr_running;
5149 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
5154 static inline void remove_entity_load_avg(struct sched_entity *se) {}
5157 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5159 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5180 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5185 se->slice = sysctl_sched_base_slice;
5186 vslice = calc_delta_fair(se->slice, se);
5196 if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
5197 struct sched_entity *curr = cfs_rq->curr;
5200 lag = se->vlag;
5210 * lag_i = S - s_i = w_i * (V - v_i)
5215 * vl_i = V - v_i <=> v_i = V - vl_i
5227 * = (W*V + w_i*(V - vl_i)) / (W + w_i)
5228 * = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
5229 * = (V*(W + w_i) - w_i*l) / (W + w_i)
5230 * = V - w_i*vl_i / (W + w_i)
5234 * vl'_i = V' - v_i
5235 * = V - w_i*vl_i / (W + w_i) - (V - vl_i)
5236 * = vl_i - w_i*vl_i / (W + w_i)
5246 * vl'_i = vl_i - w_i*vl_i / (W + w_i)
5247 * = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
5249 * (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
5254 load = cfs_rq->avg_load;
5255 if (curr && curr->on_rq)
5256 load += scale_load_down(curr->load.weight);
5258 lag *= load + scale_load_down(se->load.weight);
5264 se->vruntime = vruntime - lag;
5277 se->deadline = se->vruntime + vslice;
5286 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5288 bool curr = cfs_rq->curr == se;
5295 place_entity(cfs_rq, se, flags);
5301 * - Update loads to have both entity and cfs_rq synced with now.
5302 * - For group_entity, update its runnable_weight to reflect the new
5304 * - For group_entity, update its weight to reflect the new share of
5306 * - Add its new weight to cfs_rq->load.weight
5308 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
5309 se_update_runnable(se);
5312 * but update_cfs_group() here will re-adjust the weight and have to
5315 update_cfs_group(se);
5318 * XXX now that the entity has been re-weighted, and it's lag adjusted,
5322 place_entity(cfs_rq, se, flags);
5324 account_entity_enqueue(cfs_rq, se);
5328 se->exec_start = 0;
5331 update_stats_enqueue_fair(cfs_rq, se, flags);
5333 __enqueue_entity(cfs_rq, se);
5334 se->on_rq = 1;
5336 if (cfs_rq->nr_running == 1) {
5344 if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
5345 cfs_rq->throttled_clock = rq_clock(rq);
5346 if (!cfs_rq->throttled_clock_self)
5347 cfs_rq->throttled_clock_self = rq_clock(rq);
5353 static void __clear_buddies_next(struct sched_entity *se)
5355 for_each_sched_entity(se) {
5356 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5357 if (cfs_rq->next != se)
5360 cfs_rq->next = NULL;
5364 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5366 if (cfs_rq->next == se)
5367 __clear_buddies_next(se);
5373 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5377 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
5381 * Update run-time statistics of the 'current'.
5387 * - Update loads to have both entity and cfs_rq synced with now.
5388 * - For group_entity, update its runnable_weight to reflect the new
5390 * - Subtract its previous weight from cfs_rq->load.weight.
5391 * - For group entity, update its weight to reflect the new share
5394 update_load_avg(cfs_rq, se, action);
5395 se_update_runnable(se);
5397 update_stats_dequeue_fair(cfs_rq, se, flags);
5399 clear_buddies(cfs_rq, se);
5401 update_entity_lag(cfs_rq, se);
5402 if (se != cfs_rq->curr)
5403 __dequeue_entity(cfs_rq, se);
5404 se->on_rq = 0;
5405 account_entity_dequeue(cfs_rq, se);
5410 update_cfs_group(se);
5413 * Now advance min_vruntime if @se was the entity holding it back,
5416 * further than we started -- ie. we'll be penalized.
5421 if (cfs_rq->nr_running == 0)
5426 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
5428 clear_buddies(cfs_rq, se);
5431 if (se->on_rq) {
5437 update_stats_wait_end_fair(cfs_rq, se);
5438 __dequeue_entity(cfs_rq, se);
5439 update_load_avg(cfs_rq, se, UPDATE_TG);
5444 se->vlag = se->deadline;
5447 update_stats_curr_start(cfs_rq, se);
5448 cfs_rq->curr = se;
5453 * when there are only lesser-weight tasks around):
5456 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
5459 stats = __schedstats_from_se(se);
5460 __schedstat_set(stats->slice_max,
5461 max((u64)stats->slice_max,
5462 se->sum_exec_runtime - se->prev_sum_exec_runtime));
5465 se->prev_sum_exec_runtime = se->sum_exec_runtime;
5482 cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
5483 return cfs_rq->next;
5496 if (prev->on_rq)
5502 if (prev->on_rq) {
5509 cfs_rq->curr = NULL;
5516 * Update run-time statistics of the 'current'.
5539 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
5594 * directly instead of rq->clock to avoid adding additional synchronization
5595 * around rq->lock.
5597 * requires cfs_b->lock
5603 if (unlikely(cfs_b->quota == RUNTIME_INF))
5606 cfs_b->runtime += cfs_b->quota;
5607 runtime = cfs_b->runtime_snap - cfs_b->runtime;
5609 cfs_b->burst_time += runtime;
5610 cfs_b->nr_burst++;
5613 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
5614 cfs_b->runtime_snap = cfs_b->runtime;
5619 return &tg->cfs_bandwidth;
5628 lockdep_assert_held(&cfs_b->lock);
5631 min_amount = target_runtime - cfs_rq->runtime_remaining;
5633 if (cfs_b->quota == RUNTIME_INF)
5638 if (cfs_b->runtime > 0) {
5639 amount = min(cfs_b->runtime, min_amount);
5640 cfs_b->runtime -= amount;
5641 cfs_b->idle = 0;
5645 cfs_rq->runtime_remaining += amount;
5647 return cfs_rq->runtime_remaining > 0;
5653 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5656 raw_spin_lock(&cfs_b->lock);
5658 raw_spin_unlock(&cfs_b->lock);
5666 cfs_rq->runtime_remaining -= delta_exec;
5668 if (likely(cfs_rq->runtime_remaining > 0))
5671 if (cfs_rq->throttled)
5677 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5684 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
5692 return cfs_bandwidth_used() && cfs_rq->throttled;
5698 return cfs_bandwidth_used() && cfs_rq->throttle_count;
5704 * load-balance operations.
5711 src_cfs_rq = tg->cfs_rq[src_cpu];
5712 dest_cfs_rq = tg->cfs_rq[dest_cpu];
5721 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5723 cfs_rq->throttle_count--;
5724 if (!cfs_rq->throttle_count) {
5725 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
5726 cfs_rq->throttled_clock_pelt;
5732 if (cfs_rq->throttled_clock_self) {
5733 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
5735 cfs_rq->throttled_clock_self = 0;
5740 cfs_rq->throttled_clock_self_time += delta;
5750 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5753 if (!cfs_rq->throttle_count) {
5754 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
5757 SCHED_WARN_ON(cfs_rq->throttled_clock_self);
5758 if (cfs_rq->nr_running)
5759 cfs_rq->throttled_clock_self = rq_clock(rq);
5761 cfs_rq->throttle_count++;
5769 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5770 struct sched_entity *se;
5773 raw_spin_lock(&cfs_b->lock);
5786 list_add_tail_rcu(&cfs_rq->throttled_list,
5787 &cfs_b->throttled_cfs_rq);
5789 raw_spin_unlock(&cfs_b->lock);
5794 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5798 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5801 task_delta = cfs_rq->h_nr_running;
5802 idle_task_delta = cfs_rq->idle_h_nr_running;
5803 for_each_sched_entity(se) {
5804 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5805 /* throttled entity or throttle-on-deactivate */
5806 if (!se->on_rq)
5809 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5811 if (cfs_rq_is_idle(group_cfs_rq(se)))
5812 idle_task_delta = cfs_rq->h_nr_running;
5814 qcfs_rq->h_nr_running -= task_delta;
5815 qcfs_rq->idle_h_nr_running -= idle_task_delta;
5817 if (qcfs_rq->load.weight) {
5818 /* Avoid re-evaluating load for this entity: */
5819 se = parent_entity(se);
5824 for_each_sched_entity(se) {
5825 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5826 /* throttled entity or throttle-on-deactivate */
5827 if (!se->on_rq)
5830 update_load_avg(qcfs_rq, se, 0);
5831 se_update_runnable(se);
5833 if (cfs_rq_is_idle(group_cfs_rq(se)))
5834 idle_task_delta = cfs_rq->h_nr_running;
5836 qcfs_rq->h_nr_running -= task_delta;
5837 qcfs_rq->idle_h_nr_running -= idle_task_delta;
5840 /* At this point se is NULL and we are at root level*/
5846 * throttled-list. rq->lock protects completion.
5848 cfs_rq->throttled = 1;
5849 SCHED_WARN_ON(cfs_rq->throttled_clock);
5850 if (cfs_rq->nr_running)
5851 cfs_rq->throttled_clock = rq_clock(rq);
5858 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5859 struct sched_entity *se;
5862 se = cfs_rq->tg->se[cpu_of(rq)];
5864 cfs_rq->throttled = 0;
5868 raw_spin_lock(&cfs_b->lock);
5869 if (cfs_rq->throttled_clock) {
5870 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5871 cfs_rq->throttled_clock = 0;
5873 list_del_rcu(&cfs_rq->throttled_list);
5874 raw_spin_unlock(&cfs_b->lock);
5877 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5879 if (!cfs_rq->load.weight) {
5880 if (!cfs_rq->on_list)
5886 for_each_sched_entity(se) {
5887 if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
5893 task_delta = cfs_rq->h_nr_running;
5894 idle_task_delta = cfs_rq->idle_h_nr_running;
5895 for_each_sched_entity(se) {
5896 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5898 if (se->on_rq)
5900 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
5902 if (cfs_rq_is_idle(group_cfs_rq(se)))
5903 idle_task_delta = cfs_rq->h_nr_running;
5905 qcfs_rq->h_nr_running += task_delta;
5906 qcfs_rq->idle_h_nr_running += idle_task_delta;
5913 for_each_sched_entity(se) {
5914 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5916 update_load_avg(qcfs_rq, se, UPDATE_TG);
5917 se_update_runnable(se);
5919 if (cfs_rq_is_idle(group_cfs_rq(se)))
5920 idle_task_delta = cfs_rq->h_nr_running;
5922 qcfs_rq->h_nr_running += task_delta;
5923 qcfs_rq->idle_h_nr_running += idle_task_delta;
5930 /* At this point se is NULL and we are at root level*/
5937 if (rq->curr == rq->idle && rq->cfs.nr_running)
5967 list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
5969 list_del_init(&cursor->throttled_csd_list);
5992 if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list)))
5995 first = list_empty(&rq->cfsb_csd_list);
5996 list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
5998 smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
6012 cfs_rq->runtime_remaining <= 0))
6029 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
6044 if (!list_empty(&cfs_rq->throttled_csd_list))
6049 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
6051 raw_spin_lock(&cfs_b->lock);
6052 runtime = -cfs_rq->runtime_remaining + 1;
6053 if (runtime > cfs_b->runtime)
6054 runtime = cfs_b->runtime;
6055 cfs_b->runtime -= runtime;
6056 remaining = cfs_b->runtime;
6057 raw_spin_unlock(&cfs_b->lock);
6059 cfs_rq->runtime_remaining += runtime;
6062 if (cfs_rq->runtime_remaining > 0) {
6091 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
6099 if (cfs_b->quota == RUNTIME_INF)
6102 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
6103 cfs_b->nr_periods += overrun;
6105 /* Refill extra burst quota even if cfs_b->idle */
6112 if (cfs_b->idle && !throttled)
6117 cfs_b->idle = 1;
6122 cfs_b->nr_throttled += overrun;
6125 * This check is repeated as we release cfs_b->lock while we unthrottle.
6127 while (throttled && cfs_b->runtime > 0) {
6128 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6129 /* we can't nest cfs_b->lock while distributing bandwidth */
6131 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6140 cfs_b->idle = 0;
6158 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
6164 struct hrtimer *refresh_timer = &cfs_b->period_timer;
6167 /* if the call-back is running a quota refresh is already occurring */
6188 if (cfs_b->slack_started)
6190 cfs_b->slack_started = true;
6192 hrtimer_start(&cfs_b->slack_timer,
6200 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6201 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
6206 raw_spin_lock(&cfs_b->lock);
6207 if (cfs_b->quota != RUNTIME_INF) {
6208 cfs_b->runtime += slack_runtime;
6210 /* we are under rq->lock, defer unthrottling using a timer */
6211 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
6212 !list_empty(&cfs_b->throttled_cfs_rq))
6215 raw_spin_unlock(&cfs_b->lock);
6218 cfs_rq->runtime_remaining -= slack_runtime;
6226 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
6234 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
6242 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6243 cfs_b->slack_started = false;
6246 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6250 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
6251 runtime = cfs_b->runtime;
6253 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6264 * runtime as update_curr() throttling can not trigger until it's on-rq.
6271 /* an active group must be handled by the update_curr()->put() path */
6272 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
6281 if (cfs_rq->runtime_remaining <= 0)
6292 if (!tg->parent)
6295 cfs_rq = tg->cfs_rq[cpu];
6296 pcfs_rq = tg->parent->cfs_rq[cpu];
6298 cfs_rq->throttle_count = pcfs_rq->throttle_count;
6299 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
6308 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
6342 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6344 overrun = hrtimer_forward_now(timer, cfs_b->period);
6351 u64 new, old = ktime_to_ns(cfs_b->period);
6360 cfs_b->period = ns_to_ktime(new);
6361 cfs_b->quota *= 2;
6362 cfs_b->burst *= 2;
6368 div_u64(cfs_b->quota, NSEC_PER_USEC));
6374 div_u64(cfs_b->quota, NSEC_PER_USEC));
6382 cfs_b->period_active = 0;
6383 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6390 raw_spin_lock_init(&cfs_b->lock);
6391 cfs_b->runtime = 0;
6392 cfs_b->quota = RUNTIME_INF;
6393 cfs_b->period = ns_to_ktime(default_cfs_period());
6394 cfs_b->burst = 0;
6395 cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF;
6397 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
6398 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6399 cfs_b->period_timer.function = sched_cfs_period_timer;
6402 hrtimer_set_expires(&cfs_b->period_timer,
6403 get_random_u32_below(cfs_b->period));
6404 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6405 cfs_b->slack_timer.function = sched_cfs_slack_timer;
6406 cfs_b->slack_started = false;
6411 cfs_rq->runtime_enabled = 0;
6412 INIT_LIST_HEAD(&cfs_rq->throttled_list);
6414 INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
6420 lockdep_assert_held(&cfs_b->lock);
6422 if (cfs_b->period_active)
6425 cfs_b->period_active = 1;
6426 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
6427 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
6435 if (!cfs_b->throttled_cfs_rq.next)
6438 hrtimer_cancel(&cfs_b->period_timer);
6439 hrtimer_cancel(&cfs_b->slack_timer);
6456 if (list_empty(&rq->cfsb_csd_list))
6482 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
6483 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6485 raw_spin_lock(&cfs_b->lock);
6486 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
6487 raw_spin_unlock(&cfs_b->lock);
6508 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6510 if (!cfs_rq->runtime_enabled)
6517 cfs_rq->runtime_remaining = 1;
6522 cfs_rq->runtime_enabled = 0;
6539 if (cfs_rq->runtime_enabled ||
6540 tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF)
6558 if (rq->nr_running != 1)
6632 struct sched_entity *se = &p->se;
6636 if (rq->cfs.h_nr_running > 1) {
6637 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
6638 u64 slice = se->slice;
6639 s64 delta = slice - ran;
6657 struct task_struct *curr = rq->curr;
6659 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
6696 WRITE_ONCE(rd->overutilized, status);
6709 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
6710 set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
6719 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
6720 rq->nr_running);
6739 struct sched_entity *se = &p->se;
6749 util_est_enqueue(&rq->cfs, p);
6756 if (p->in_iowait)
6759 for_each_sched_entity(se) {
6760 if (se->on_rq)
6762 cfs_rq = cfs_rq_of(se);
6763 enqueue_entity(cfs_rq, se, flags);
6765 cfs_rq->h_nr_running++;
6766 cfs_rq->idle_h_nr_running += idle_h_nr_running;
6778 for_each_sched_entity(se) {
6779 cfs_rq = cfs_rq_of(se);
6781 update_load_avg(cfs_rq, se, UPDATE_TG);
6782 se_update_runnable(se);
6783 update_cfs_group(se);
6785 cfs_rq->h_nr_running++;
6786 cfs_rq->idle_h_nr_running += idle_h_nr_running;
6796 /* At this point se is NULL and we are at root level*/
6822 static void set_next_buddy(struct sched_entity *se);
6832 struct sched_entity *se = &p->se;
6837 util_est_dequeue(&rq->cfs, p);
6839 for_each_sched_entity(se) {
6840 cfs_rq = cfs_rq_of(se);
6841 dequeue_entity(cfs_rq, se, flags);
6843 cfs_rq->h_nr_running--;
6844 cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6854 if (cfs_rq->load.weight) {
6855 /* Avoid re-evaluating load for this entity: */
6856 se = parent_entity(se);
6861 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
6862 set_next_buddy(se);
6868 for_each_sched_entity(se) {
6869 cfs_rq = cfs_rq_of(se);
6871 update_load_avg(cfs_rq, se, UPDATE_TG);
6872 se_update_runnable(se);
6873 update_cfs_group(se);
6875 cfs_rq->h_nr_running--;
6876 cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6887 /* At this point se is NULL and we are at root level*/
6892 rq->next_balance = jiffies;
6895 util_est_update(&rq->cfs, p, task_sleep);
6921 return cfs_rq_load_avg(&rq->cfs);
6925 * cpu_load_without - compute CPU load without any contributions from *p
6943 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6946 cfs_rq = &rq->cfs;
6947 load = READ_ONCE(cfs_rq->avg.load_avg);
6957 return cfs_rq_runnable_avg(&rq->cfs);
6966 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6969 cfs_rq = &rq->cfs;
6970 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
6973 lsub_positive(&runnable, p->se.avg.runnable_avg);
6980 return cpu_rq(cpu)->cpu_capacity;
6989 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
6990 current->wakee_flips >>= 1;
6991 current->wakee_flip_decay_ts = jiffies;
6994 if (current->last_wakee != p) {
6995 current->last_wakee = p;
6996 current->wakee_flips++;
7001 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
7011 * non-monogamous, with partner count exceeding socket size.
7019 unsigned int master = current->wakee_flips;
7020 unsigned int slave = p->wakee_flips;
7035 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
7036 * cache-affine and is (or will be) idle.
7038 * wake_affine_weight() - considers the weight to reflect the average
7060 if (sync && cpu_rq(this_cpu)->nr_running == 1)
7084 this_eff_load -= current_load;
7095 prev_eff_load -= task_load;
7097 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
7123 schedstat_inc(p->stats.nr_wakeups_affine_attempts);
7127 schedstat_inc(sd->ttwu_move_affine);
7128 schedstat_inc(p->stats.nr_wakeups_affine);
7136 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
7145 int shallowest_idle_cpu = -1;
7149 if (group->group_weight == 1)
7153 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
7164 if (idle && idle->exit_latency < min_exit_latency) {
7170 min_exit_latency = idle->exit_latency;
7171 latest_idle_timestamp = rq->idle_stamp;
7173 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
7174 rq->idle_stamp > latest_idle_timestamp) {
7180 latest_idle_timestamp = rq->idle_stamp;
7183 } else if (shallowest_idle_cpu == -1) {
7192 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
7200 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
7208 sync_entity_load_avg(&p->se);
7215 if (!(sd->flags & sd_flag)) {
7216 sd = sd->child;
7222 sd = sd->child;
7229 sd = sd->child;
7235 weight = sd->span_weight;
7238 if (weight <= tmp->span_weight)
7240 if (tmp->flags & sd_flag)
7254 return -1;
7267 WRITE_ONCE(sds->has_idle_cores, val);
7276 return READ_ONCE(sds->has_idle_cores);
7283 * information in sd_llc_shared->has_idle_cores.
7313 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
7323 if (*idle_cpu == -1) {
7332 if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
7340 return -1;
7350 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
7363 return -1;
7384 return -1;
7391 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
7392 * average idle time for this rq (as found in rq->avg_idle).
7397 int i, cpu, idle_cpu = -1, nr = INT_MAX;
7404 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
7412 return -1;
7419 if (unlikely(this_rq->wake_stamp < now)) {
7420 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
7421 this_rq->wake_stamp++;
7422 this_rq->wake_avg_idle >>= 1;
7426 avg_idle = this_rq->wake_avg_idle;
7427 avg_cost = this_sd->avg_scan_cost + 1;
7429 span_avg = sd->span_weight * avg_idle;
7441 /* because !--nr is the condition to stop scan */
7442 nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
7445 return -1;
7456 if (!--nr)
7457 return -1;
7468 time = cpu_clock(this) - time;
7474 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
7476 update_avg(&this_sd->avg_scan_cost, time);
7492 int cpu, best_cpu = -1;
7496 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
7518 cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
7521 * First, select CPU which fits better (-1 being better than 0).
7565 sync_entity_load_avg(&p->se);
7572 * per-cpu select_rq_mask usage
7589 * Allow a per-cpu kthread to stack with the wakee if the
7592 * per-cpu kthread that is now complete and the wakeup is
7599 this_rq()->nr_running <= 1 &&
7605 recent_used_cpu = p->recent_used_cpu;
7606 p->recent_used_cpu = prev;
7611 cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
7658 * cpu_util() - Estimates the amount of CPU capacity used by CFS tasks.
7661 * @dst_cpu: CPU @p migrates to, -1 if @p moves from @cpu or @p == NULL
7668 * recent utilization of currently non-runnable tasks on that CPU.
7676 * previously-executed tasks, which helps better deduce how busy a CPU will
7677 * be when a long-sleeping task wakes up. The contribution to CPU utilization
7691 * could be seen as over-utilized even though CPU1 has 20% of spare CPU
7694 * after task migrations (scheduler-driven DVFS).
7701 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
7702 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
7706 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
7711 * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
7724 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
7727 * During wake-up @p isn't enqueued yet and doesn't contribute
7728 * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
7732 * During exec (@dst_cpu = -1) @p is enqueued and does
7733 * contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
7742 * p->on_rq = TASK_ON_RQ_MIGRATING;
7743 * -------------------------------- A
7747 * -------------------------------- B
7765 return cpu_util(cpu, NULL, -1, 0);
7770 return cpu_util(cpu, NULL, -1, 1);
7789 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7792 return cpu_util(cpu, p, -1, 0);
7796 * energy_env - Utilization landscape for energy estimation.
7802 * @pd_cap: Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
7828 eenv->task_busy_time = busy_time;
7842 * - A stable PD utilization, no matter which CPU of that PD we want to place
7845 * - A fair comparison between CPUs as the task contribution (task_util())
7850 * exceed @eenv->pd_cap.
7860 unsigned long util = cpu_util(cpu, p, -1, 0);
7865 eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
7872 * Returns the maximum utilization among @eenv->cpus. This utilization can't
7873 * exceed @eenv->cpu_cap.
7898 return min(max_util, eenv->cpu_cap);
7911 unsigned long busy_time = eenv->pd_busy_time;
7914 busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
7916 return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
7920 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
7924 * out which of the CPU candidates is the most energy-efficient.
7941 * cluster-packing, and spreading inside a cluster. That should at least be
7948 * NOTE: Forkees are not accepted in the energy-aware wake-up path because
7952 * to be energy-inefficient in some use-cases. The alternative would be to
7955 * other use-cases too. So, until someone finds a better way to solve this,
7956 * let's keep things simple by re-using the existing slow path.
7964 struct root_domain *rd = this_rq()->rd;
7965 int cpu, best_energy_cpu, target = -1;
7966 int prev_fits = -1, best_fits = -1;
7974 pd = rcu_dereference(rd->pd);
7975 if (!pd || READ_ONCE(rd->overutilized))
7979 * Energy-aware wake-up happens on the lowest sched_domain starting
7984 sd = sd->parent;
7990 sync_entity_load_avg(&p->se);
7996 for (; pd; pd = pd->next) {
7999 long prev_spare_cap = -1, max_spare_cap = -1;
8002 int max_spare_cap_cpu = -1;
8003 int fits, max_fits = -1;
8013 cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
8026 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
8045 * max-aggregated uclamp_{min, max}.
8082 base_energy = compute_energy(&eenv, pd, cpus, p, -1);
8085 if (prev_spare_cap > -1) {
8091 prev_delta -= base_energy;
8115 cur_delta -= base_energy;
8159 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
8168 * required for stable ->cpus_allowed
8170 lockdep_assert_held(&p->pi_lock);
8175 cpumask_test_cpu(cpu, p->cpus_ptr))
8185 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
8194 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
8208 if (tmp->flags & sd_flag)
8229 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
8233 struct sched_entity *se = &p->se;
8236 remove_entity_load_avg(se);
8242 * leading to an inflation after wake-up on the new rq.
8248 migrate_se_pelt_lag(se);
8252 se->avg.last_update_time = 0;
8259 remove_entity_load_avg(&p->se);
8265 if (rq->nr_running)
8272 static void set_next_buddy(struct sched_entity *se)
8274 for_each_sched_entity(se) {
8275 if (SCHED_WARN_ON(!se->on_rq))
8277 if (se_is_idle(se))
8279 cfs_rq_of(se)->next = se;
8288 struct task_struct *curr = rq->curr;
8289 struct sched_entity *se = &curr->se, *pse = &p->se;
8294 if (unlikely(se == pse))
8301 * next-buddy nomination below.
8315 * Note: this also catches the edge-case of curr being in a throttled
8327 find_matching_se(&se, &pse);
8330 cse_is_idle = se_is_idle(se);
8334 * Preempt an idle entity in favor of a non-idle entity (and don't preempt
8345 if (unlikely(p->policy != SCHED_NORMAL))
8348 cfs_rq = cfs_rq_of(se);
8351 * XXX pick_eevdf(cfs_rq) != se ?
8365 struct sched_entity *se;
8369 cfs_rq = &rq->cfs;
8370 if (!cfs_rq->nr_running)
8374 struct sched_entity *curr = cfs_rq->curr;
8378 if (curr->on_rq)
8387 se = pick_next_entity(cfs_rq, curr);
8388 cfs_rq = group_cfs_rq(se);
8391 return task_of(se);
8398 struct cfs_rq *cfs_rq = &rq->cfs;
8399 struct sched_entity *se;
8408 if (!prev || prev->sched_class != &fair_sched_class)
8420 struct sched_entity *curr = cfs_rq->curr;
8424 * have to consider cfs_rq->curr. If it is still a runnable
8429 if (curr->on_rq)
8441 cfs_rq = &rq->cfs;
8443 if (!cfs_rq->nr_running)
8450 se = pick_next_entity(cfs_rq, curr);
8451 cfs_rq = group_cfs_rq(se);
8454 p = task_of(se);
8462 struct sched_entity *pse = &prev->se;
8464 while (!(cfs_rq = is_same_group(se, pse))) {
8465 int se_depth = se->depth;
8466 int pse_depth = pse->depth;
8473 set_next_entity(cfs_rq_of(se), se);
8474 se = parent_entity(se);
8479 set_next_entity(cfs_rq, se);
8489 se = pick_next_entity(cfs_rq, NULL);
8490 set_next_entity(cfs_rq, se);
8491 cfs_rq = group_cfs_rq(se);
8494 p = task_of(se);
8503 list_move(&p->se.group_node, &rq->cfs_tasks);
8521 * Because newidle_balance() releases (and re-acquires) rq->lock, it is
8523 * must re-start the pick_next_entity() loop.
8550 struct sched_entity *se = &prev->se;
8553 for_each_sched_entity(se) {
8554 cfs_rq = cfs_rq_of(se);
8555 put_prev_entity(cfs_rq, se);
8564 struct task_struct *curr = rq->curr;
8566 struct sched_entity *se = &curr->se;
8571 if (unlikely(rq->nr_running == 1))
8574 clear_buddies(cfs_rq, se);
8578 * Update run-time statistics of the 'current'.
8588 se->deadline += calc_delta_fair(se->slice, se);
8593 struct sched_entity *se = &p->se;
8596 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
8600 set_next_buddy(se);
8609 * Fair scheduling class load-balancing methods.
8613 * The purpose of load-balancing is to achieve the same basic fairness the
8614 * per-CPU scheduler provides, namely provide a proportional amount of compute
8619 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
8624 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
8630 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
8639 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
8646 * - infeasible weights;
8647 * - local vs global optima in the discrete case. ]
8657 * of load-balance at each level inv. proportional to the number of CPUs in
8663 * \Sum { --- * --- * 2^i } = O(n) (5)
8665 * `- size of each group
8666 * | | `- number of CPUs doing load-balance
8667 * | `- freq
8668 * `- sum over all levels
8710 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
8717 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
8799 /* The set of CPUs under consideration for load-balancing */
8814 * Is this task likely cache-hot:
8820 lockdep_assert_rq_held(env->src_rq);
8822 if (p->sched_class != &fair_sched_class)
8829 if (env->sd->flags & SD_SHARE_CPUCAPACITY)
8835 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
8836 (&p->se == cfs_rq_of(&p->se)->next))
8839 if (sysctl_sched_migration_cost == -1)
8846 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
8852 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
8861 * Returns -1, if task migration is not affected by locality.
8865 struct numa_group *numa_group = rcu_dereference(p->numa_group);
8870 return -1;
8872 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
8873 return -1;
8875 src_nid = cpu_to_node(env->src_cpu);
8876 dst_nid = cpu_to_node(env->dst_cpu);
8879 return -1;
8882 if (src_nid == p->numa_preferred_nid) {
8883 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
8886 return -1;
8890 if (dst_nid == p->numa_preferred_nid)
8894 if (env->idle == CPU_IDLE)
8895 return -1;
8913 return -1;
8918 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
8925 lockdep_assert_rq_held(env->src_rq);
8926 if (p->sched_task_hot)
8927 p->sched_task_hot = 0;
8934 * 4) are cache-hot on their current CPU.
8936 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
8943 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
8946 schedstat_inc(p->stats.nr_failed_migrations_affine);
8948 env->flags |= LBF_SOME_PINNED;
8956 * - for NEWLY_IDLE
8957 * - if we have already computed one in current iteration
8958 * - if it's an active balance
8960 if (env->idle == CPU_NEWLY_IDLE ||
8961 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
8964 /* Prevent to re-select dst_cpu via env's CPUs: */
8965 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
8966 if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
8967 env->flags |= LBF_DST_PINNED;
8968 env->new_dst_cpu = cpu;
8977 env->flags &= ~LBF_ALL_PINNED;
8979 if (task_on_cpu(env->src_rq, p)) {
8980 schedstat_inc(p->stats.nr_failed_migrations_running);
8991 if (env->flags & LBF_ACTIVE_LB)
8995 if (tsk_cache_hot == -1)
8999 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
9001 p->sched_task_hot = 1;
9005 schedstat_inc(p->stats.nr_failed_migrations_hot);
9010 * detach_task() -- detach the task for the migration specified in env
9014 lockdep_assert_rq_held(env->src_rq);
9016 if (p->sched_task_hot) {
9017 p->sched_task_hot = 0;
9018 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
9019 schedstat_inc(p->stats.nr_forced_migrations);
9022 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
9023 set_task_cpu(p, env->dst_cpu);
9027 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
9036 lockdep_assert_rq_held(env->src_rq);
9039 &env->src_rq->cfs_tasks, se.group_node) {
9047 * lb_gained[env->idle] is updated (other is detach_tasks)
9051 schedstat_inc(env->sd->lb_gained[env->idle]);
9058 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
9065 struct list_head *tasks = &env->src_rq->cfs_tasks;
9070 lockdep_assert_rq_held(env->src_rq);
9076 if (env->src_rq->nr_running <= 1) {
9077 env->flags &= ~LBF_ALL_PINNED;
9081 if (env->imbalance <= 0)
9089 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
9092 env->loop++;
9094 if (env->loop > env->loop_max)
9098 if (env->loop > env->loop_break) {
9099 env->loop_break += SCHED_NR_MIGRATE_BREAK;
9100 env->flags |= LBF_NEED_BREAK;
9104 p = list_last_entry(tasks, struct task_struct, se.group_node);
9109 switch (env->migration_type) {
9114 * value. Make sure that env->imbalance decreases
9121 load < 16 && !env->sd->nr_balance_failed)
9130 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
9133 env->imbalance -= load;
9139 if (util > env->imbalance)
9142 env->imbalance -= util;
9146 env->imbalance--;
9151 if (task_fits_cpu(p, env->src_cpu))
9154 env->imbalance = 0;
9159 list_add(&p->se.group_node, &env->tasks);
9169 if (env->idle == CPU_NEWLY_IDLE)
9177 if (env->imbalance <= 0)
9182 if (p->sched_task_hot)
9183 schedstat_inc(p->stats.nr_failed_migrations_hot);
9185 list_move(&p->se.group_node, tasks);
9193 schedstat_add(env->sd->lb_gained[env->idle], detached);
9199 * attach_task() -- attach the task detached by detach_task() to its new rq.
9211 * attach_one_task() -- attaches the task returned from detach_one_task() to
9225 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
9230 struct list_head *tasks = &env->tasks;
9234 rq_lock(env->dst_rq, &rf);
9235 update_rq_clock(env->dst_rq);
9238 p = list_first_entry(tasks, struct task_struct, se.group_node);
9239 list_del_init(&p->se.group_node);
9241 attach_task(env->dst_rq, p);
9244 rq_unlock(env->dst_rq, &rf);
9250 if (cfs_rq->avg.load_avg)
9253 if (cfs_rq->avg.util_avg)
9261 if (READ_ONCE(rq->avg_rt.util_avg))
9264 if (READ_ONCE(rq->avg_dl.util_avg))
9271 if (READ_ONCE(rq->avg_irq.util_avg))
9280 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
9286 rq->has_blocked_load = 0;
9306 curr_class = rq->curr->sched_class;
9325 struct cfs_rq *cfs_rq, *pos;
9333 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
9334 struct sched_entity *se;
9339 if (cfs_rq->nr_running == 0)
9342 if (cfs_rq == &rq->cfs)
9347 se = cfs_rq->tg->se[cpu];
9348 if (se && !skip_blocked_update(se))
9349 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
9368 * This needs to be done in a top-down fashion because the load of a child
9374 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
9378 if (cfs_rq->last_h_load_update == now)
9381 WRITE_ONCE(cfs_rq->h_load_next, NULL);
9382 for_each_sched_entity(se) {
9383 cfs_rq = cfs_rq_of(se);
9384 WRITE_ONCE(cfs_rq->h_load_next, se);
9385 if (cfs_rq->last_h_load_update == now)
9389 if (!se) {
9390 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
9391 cfs_rq->last_h_load_update = now;
9394 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
9395 load = cfs_rq->h_load;
9396 load = div64_ul(load * se->avg.load_avg,
9398 cfs_rq = group_cfs_rq(se);
9399 cfs_rq->h_load = load;
9400 cfs_rq->last_h_load_update = now;
9409 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9415 struct cfs_rq *cfs_rq = &rq->cfs;
9427 return p->se.avg.load_avg;
9453 * sg_lb_stats - stats of a sched_group required for load_balancing
9476 * sd_lb_stats - Structure to store the statistics of a sched_domain
9530 used = READ_ONCE(rq->avg_rt.util_avg);
9531 used += READ_ONCE(rq->avg_dl.util_avg);
9537 free = max - used;
9545 struct sched_group *sdg = sd->groups;
9547 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
9552 cpu_rq(cpu)->cpu_capacity = capacity;
9555 sdg->sgc->capacity = capacity;
9556 sdg->sgc->min_capacity = capacity;
9557 sdg->sgc->max_capacity = capacity;
9562 struct sched_domain *child = sd->child;
9563 struct sched_group *group, *sdg = sd->groups;
9567 interval = msecs_to_jiffies(sd->balance_interval);
9569 sdg->sgc->next_update = jiffies + interval;
9580 if (child->flags & SD_OVERLAP) {
9599 group = child->groups;
9601 struct sched_group_capacity *sgc = group->sgc;
9603 capacity += sgc->capacity;
9604 min_capacity = min(sgc->min_capacity, min_capacity);
9605 max_capacity = max(sgc->max_capacity, max_capacity);
9606 group = group->next;
9607 } while (group != child->groups);
9610 sdg->sgc->capacity = capacity;
9611 sdg->sgc->min_capacity = min_capacity;
9612 sdg->sgc->max_capacity = max_capacity;
9623 return ((rq->cpu_capacity * sd->imbalance_pct) <
9624 (rq->cpu_capacity_orig * 100));
9634 return rq->misfit_task_load &&
9635 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
9641 * groups is inadequate due to ->cpus_ptr constraints.
9650 * If we were to balance group-wise we'd place two tasks in the first group and
9670 return group->sgc->imbalance;
9688 if (sgs->sum_nr_running < sgs->group_weight)
9691 if ((sgs->group_capacity * imbalance_pct) <
9692 (sgs->group_runnable * 100))
9695 if ((sgs->group_capacity * 100) >
9696 (sgs->group_util * imbalance_pct))
9713 if (sgs->sum_nr_running <= sgs->group_weight)
9716 if ((sgs->group_capacity * 100) <
9717 (sgs->group_util * imbalance_pct))
9720 if ((sgs->group_capacity * imbalance_pct) <
9721 (sgs->group_runnable * 100))
9738 if (sgs->group_asym_packing)
9741 if (sgs->group_smt_balance)
9744 if (sgs->group_misfit_task_load)
9754 * sched_use_asym_prio - Check whether asym_packing priority must be used
9769 return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
9773 * sched_asym - Check if the destination CPU can do asym_packing load balance
9775 * @sds: Load-balancing data with statistics of the local group
9776 * @sgs: Load-balancing statistics of the candidate busiest group
9798 if (!sched_use_asym_prio(env->sd, env->dst_cpu))
9805 if (group->flags & SD_SHARE_CPUCAPACITY) {
9806 if (sgs->group_weight - sgs->idle_cpus != 1)
9810 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
9820 return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
9821 (sg2->flags & SD_SHARE_CPUCAPACITY);
9827 if (env->idle == CPU_NOT_IDLE)
9836 if (group->flags & SD_SHARE_CPUCAPACITY &&
9837 sgs->sum_h_nr_running > 1)
9851 if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running)
9854 ncores_busiest = sds->busiest->cores;
9855 ncores_local = sds->local->cores;
9858 imbalance = busiest->sum_nr_running;
9859 lsub_positive(&imbalance, local->sum_nr_running);
9864 imbalance = ncores_local * busiest->sum_nr_running;
9865 lsub_positive(&imbalance, ncores_busiest * local->sum_nr_running);
9871 if (imbalance <= 1 && local->sum_nr_running == 0 &&
9872 busiest->sum_nr_running > 1)
9885 if (rq->cfs.h_nr_running != 1)
9892 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
9894 * @sds: Load-balancing data with statistics of the local group.
9909 local_group = group == sds->local;
9911 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
9915 sgs->group_load += load;
9916 sgs->group_util += cpu_util_cfs(i);
9917 sgs->group_runnable += cpu_runnable(rq);
9918 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
9920 nr_running = rq->nr_running;
9921 sgs->sum_nr_running += nr_running;
9930 sgs->nr_numa_running += rq->nr_numa_running;
9931 sgs->nr_preferred_running += rq->nr_preferred_running;
9937 sgs->idle_cpus++;
9945 if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
9947 if (sgs->group_misfit_task_load < rq->misfit_task_load) {
9948 sgs->group_misfit_task_load = rq->misfit_task_load;
9951 } else if ((env->idle != CPU_NOT_IDLE) &&
9952 sched_reduced_capacity(rq, env->sd)) {
9954 if (sgs->group_misfit_task_load < load)
9955 sgs->group_misfit_task_load = load;
9959 sgs->group_capacity = group->sgc->capacity;
9961 sgs->group_weight = group->group_weight;
9964 if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
9965 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9967 sgs->group_asym_packing = 1;
9972 sgs->group_smt_balance = 1;
9974 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
9977 if (sgs->group_type == group_overloaded)
9978 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
9979 sgs->group_capacity;
9983 * update_sd_pick_busiest - return 1 on busiest group
10000 struct sg_lb_stats *busiest = &sds->busiest_stat;
10003 if (!sgs->sum_h_nr_running)
10012 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
10013 (sgs->group_type == group_misfit_task) &&
10014 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
10015 sds->local_stat.group_type != group_has_spare))
10018 if (sgs->group_type > busiest->group_type)
10021 if (sgs->group_type < busiest->group_type)
10029 switch (sgs->group_type) {
10032 if (sgs->avg_load <= busiest->avg_load)
10045 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
10054 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
10063 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
10081 if (sgs->avg_load < busiest->avg_load)
10084 if (sgs->avg_load == busiest->avg_load) {
10086 * SMT sched groups need more help than non-SMT groups.
10089 if (sds->busiest->flags & SD_SHARE_CPUCAPACITY)
10101 if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
10102 if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
10116 if (sgs->idle_cpus > busiest->idle_cpus)
10118 else if ((sgs->idle_cpus == busiest->idle_cpus) &&
10119 (sgs->sum_nr_running <= busiest->sum_nr_running))
10127 * per-CPU capacity. Migrating tasks to less capable CPUs may harm
10131 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
10132 (sgs->group_type <= group_fully_busy) &&
10133 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
10142 if (sgs->sum_h_nr_running > sgs->nr_numa_running)
10144 if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
10151 if (rq->nr_running > rq->nr_numa_running)
10153 if (rq->nr_running > rq->nr_preferred_running)
10173 * task_running_on_cpu - return 1 if @p is running on @cpu.
10179 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
10189 * idle_cpu_without - would a given CPU be idle without p ?
10199 if (rq->curr != rq->idle && rq->curr != p)
10203 * rq->nr_running can't be used but an updated version without the
10209 if (rq->ttwu_pending)
10217 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
10233 if (sd->flags & SD_ASYM_CPUCAPACITY)
10234 sgs->group_misfit_task_load = 1;
10240 sgs->group_load += cpu_load_without(rq, p);
10241 sgs->group_util += cpu_util_without(i, p);
10242 sgs->group_runnable += cpu_runnable_without(rq, p);
10244 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
10246 nr_running = rq->nr_running - local;
10247 sgs->sum_nr_running += nr_running;
10253 sgs->idle_cpus++;
10256 if (sd->flags & SD_ASYM_CPUCAPACITY &&
10257 sgs->group_misfit_task_load &&
10259 sgs->group_misfit_task_load = 0;
10263 sgs->group_capacity = group->sgc->capacity;
10265 sgs->group_weight = group->group_weight;
10267 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
10273 if (sgs->group_type == group_fully_busy ||
10274 sgs->group_type == group_overloaded)
10275 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
10276 sgs->group_capacity;
10284 if (sgs->group_type < idlest_sgs->group_type)
10287 if (sgs->group_type > idlest_sgs->group_type)
10295 switch (sgs->group_type) {
10299 if (idlest_sgs->avg_load <= sgs->avg_load)
10311 if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
10317 if (idlest_sgs->idle_cpus > sgs->idle_cpus)
10321 if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
10322 idlest_sgs->group_util <= sgs->group_util)
10340 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
10354 p->cpus_ptr))
10378 } while (group = group->next, group != sd->groups);
10409 (sd->imbalance_pct-100) / 100;
10416 * cross-domain, add imbalance to the load on the remote node
10420 if ((sd->flags & SD_NUMA) &&
10431 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
10443 if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
10449 if (sd->flags & SD_NUMA) {
10450 int imb_numa_nr = sd->imb_numa_nr;
10457 if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
10461 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
10473 if (p->nr_cpus_allowed != NR_CPUS) {
10476 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
10477 imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
10480 imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
10517 if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
10520 llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
10521 if (env->sd->span_weight != llc_weight)
10524 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
10534 * let y = SCHED_CAPACITY_SCALE - p * x^2 [1]
10552 * y = SCHED_CAPACITY_SCALE -
10561 pct = env->sd->imbalance_pct;
10565 y = SCHED_CAPACITY_SCALE - tmp;
10570 if ((int)y != sd_share->nr_idle_scan)
10571 WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
10575 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
10582 struct sched_group *sg = env->sd->groups;
10583 struct sg_lb_stats *local = &sds->local_stat;
10592 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
10594 sds->local = sg;
10597 if (env->idle != CPU_NEWLY_IDLE ||
10598 time_after_eq(jiffies, sg->sgc->next_update))
10599 update_group_capacity(env->sd, env->dst_cpu);
10609 sds->busiest = sg;
10610 sds->busiest_stat = *sgs;
10615 sds->total_load += sgs->group_load;
10616 sds->total_capacity += sgs->group_capacity;
10618 sum_util += sgs->group_util;
10619 sg = sg->next;
10620 } while (sg != env->sd->groups);
10627 if (sds->busiest)
10628 sds->prefer_sibling = !!(sds->busiest->flags & SD_PREFER_SIBLING);
10631 if (env->sd->flags & SD_NUMA)
10632 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
10634 if (!env->sd->parent) {
10636 WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
10638 /* Update over-utilization (tipping point, U >= 0) indicator */
10639 set_rd_overutilized_status(env->dst_rq->rd,
10642 set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
10649 * calculate_imbalance - Calculate the amount of imbalance present within the
10658 local = &sds->local_stat;
10659 busiest = &sds->busiest_stat;
10661 if (busiest->group_type == group_misfit_task) {
10662 if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
10664 env->migration_type = migrate_misfit;
10665 env->imbalance = 1;
10671 env->migration_type = migrate_load;
10672 env->imbalance = busiest->group_misfit_task_load;
10677 if (busiest->group_type == group_asym_packing) {
10682 env->migration_type = migrate_task;
10683 env->imbalance = busiest->sum_h_nr_running;
10687 if (busiest->group_type == group_smt_balance) {
10689 env->migration_type = migrate_task;
10690 env->imbalance = 1;
10694 if (busiest->group_type == group_imbalanced) {
10696 * In the group_imb case we cannot rely on group-wide averages
10697 * to ensure CPU-load equilibrium, try to move any task to fix
10701 env->migration_type = migrate_task;
10702 env->imbalance = 1;
10710 if (local->group_type == group_has_spare) {
10711 if ((busiest->group_type > group_fully_busy) &&
10712 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
10721 env->migration_type = migrate_util;
10722 env->imbalance = max(local->group_capacity, local->group_util) -
10723 local->group_util;
10732 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
10733 env->migration_type = migrate_task;
10734 env->imbalance = 1;
10740 if (busiest->group_weight == 1 || sds->prefer_sibling) {
10745 env->migration_type = migrate_task;
10746 env->imbalance = sibling_imbalance(env, sds, busiest, local);
10753 env->migration_type = migrate_task;
10754 env->imbalance = max_t(long, 0,
10755 (local->idle_cpus - busiest->idle_cpus));
10760 if (env->sd->flags & SD_NUMA) {
10761 env->imbalance = adjust_numa_imbalance(env->imbalance,
10762 local->sum_nr_running + 1,
10763 env->sd->imb_numa_nr);
10768 env->imbalance >>= 1;
10777 if (local->group_type < group_overloaded) {
10783 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
10784 local->group_capacity;
10790 if (local->avg_load >= busiest->avg_load) {
10791 env->imbalance = 0;
10795 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
10796 sds->total_capacity;
10802 if (local->avg_load >= sds->avg_load) {
10803 env->imbalance = 0;
10817 env->migration_type = migrate_load;
10818 env->imbalance = min(
10819 (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
10820 (sds->avg_load - local->avg_load) * local->group_capacity
10847 * find_busiest_group - Returns the busiest group within the sched_domain
10854 * Return: - The busiest group if imbalance exists.
10876 if (busiest->group_type == group_misfit_task)
10880 struct root_domain *rd = env->dst_rq->rd;
10882 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
10887 if (busiest->group_type == group_asym_packing)
10895 if (busiest->group_type == group_imbalanced)
10903 if (local->group_type > busiest->group_type)
10910 if (local->group_type == group_overloaded) {
10915 if (local->avg_load >= busiest->avg_load)
10926 if (local->avg_load >= sds.avg_load)
10933 if (100 * busiest->avg_load <=
10934 env->sd->imbalance_pct * local->avg_load)
10942 if (sds.prefer_sibling && local->group_type == group_has_spare &&
10946 if (busiest->group_type != group_overloaded) {
10947 if (env->idle == CPU_NOT_IDLE) {
10956 if (busiest->group_type == group_smt_balance &&
10962 if (busiest->group_weight > 1 &&
10963 local->idle_cpus <= (busiest->idle_cpus + 1)) {
10976 if (busiest->sum_h_nr_running == 1) {
10987 return env->imbalance ? sds.busiest : NULL;
10990 env->imbalance = 0;
10995 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
11005 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
11015 * - regular: there are !numa tasks
11016 * - remote: there are numa tasks that run on the 'wrong' node
11017 * - all: there is no distinction
11032 if (rt > env->fbq_type)
11035 nr_running = rq->cfs.h_nr_running;
11043 * eventually lead to active_balancing high->low capacity.
11044 * Higher per-CPU capacity is considered better than balancing
11047 if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
11048 !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
11059 if ((env->sd->flags & SD_ASYM_PACKING) &&
11060 sched_use_asym_prio(env->sd, i) &&
11061 sched_asym_prefer(i, env->dst_cpu) &&
11065 switch (env->migration_type) {
11073 if (nr_running == 1 && load > env->imbalance &&
11074 !check_cpu_capacity(rq, env->sd))
11126 if (rq->misfit_task_load > busiest_load) {
11127 busiest_load = rq->misfit_task_load;
11158 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
11159 sched_use_asym_prio(env->sd, env->dst_cpu) &&
11160 (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
11161 !sched_use_asym_prio(env->sd, env->src_cpu));
11167 struct sched_domain *sd = env->sd;
11174 if ((env->migration_type == migrate_task) &&
11175 (sd->nr_balance_failed > sd->cache_nice_tries+2))
11183 struct sched_domain *sd = env->sd;
11197 if ((env->idle != CPU_NOT_IDLE) &&
11198 (env->src_rq->cfs.h_nr_running == 1)) {
11199 if ((check_cpu_capacity(env->src_rq, sd)) &&
11200 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
11204 if (env->migration_type == migrate_misfit)
11215 struct sched_group *sg = env->sd->groups;
11216 int cpu, idle_smt = -1;
11222 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
11232 if (env->idle == CPU_NEWLY_IDLE) {
11233 if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending)
11240 for_each_cpu_and(cpu, swb_cpus, env->cpus) {
11249 if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
11250 if (idle_smt == -1)
11264 * Are we the first idle core in a non-SMT domain or higher,
11267 return cpu == env->dst_cpu;
11271 if (idle_smt != -1)
11272 return idle_smt == env->dst_cpu;
11275 return group_balance_cpu(sg) == env->dst_cpu;
11287 struct sched_domain *sd_parent = sd->parent;
11296 .dst_grpmask = group_balance_mask(sd->groups),
11306 schedstat_inc(sd->lb_count[idle]);
11316 schedstat_inc(sd->lb_nobusyg[idle]);
11322 schedstat_inc(sd->lb_nobusyq[idle]);
11328 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
11330 env.src_cpu = busiest->cpu;
11336 if (busiest->nr_running > 1) {
11339 * an imbalance but busiest->nr_running <= 1, the group is
11343 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
11350 * cur_ld_moved - load moved in current iteration
11351 * ld_moved - cumulative load moved across iterations
11358 * unlock busiest->lock, and we are able to be sure
11387 * nohz-idle), we now have balance_cpu in a position to move
11398 /* Prevent to re-select dst_cpu via env's CPUs */
11418 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
11445 schedstat_inc(sd->lb_failed[idle]);
11453 sd->nr_balance_failed++;
11465 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
11474 * ->active_balance synchronizes accesses to
11475 * ->active_balance_work. Once set, it's cleared
11478 if (!busiest->active_balance) {
11479 busiest->active_balance = 1;
11480 busiest->push_cpu = this_cpu;
11489 &busiest->active_balance_work);
11494 sd->nr_balance_failed = 0;
11499 sd->balance_interval = sd->min_interval;
11511 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
11523 schedstat_inc(sd->lb_balanced[idle]);
11525 sd->nr_balance_failed = 0;
11541 sd->balance_interval < MAX_PINNED_INTERVAL) ||
11542 sd->balance_interval < sd->max_interval)
11543 sd->balance_interval *= 2;
11551 unsigned long interval = sd->balance_interval;
11554 interval *= sd->busy_factor;
11565 interval -= 1;
11579 next = sd->last_balance + interval;
11595 int target_cpu = busiest_rq->push_cpu;
11603 * Between queueing the stop-work and running it is a hole in which
11612 !busiest_rq->active_balance))
11616 if (busiest_rq->nr_running <= 1)
11622 * Bjorn Helgaas on a 128-CPU setup.
11638 .src_cpu = busiest_rq->cpu,
11644 schedstat_inc(sd->alb_count);
11649 schedstat_inc(sd->alb_pushed);
11651 sd->nr_balance_failed = 0;
11653 schedstat_inc(sd->alb_failed);
11658 busiest_rq->active_balance = 0;
11673 * This trades load-balance latency on larger machines for less cross talk.
11682 if (cost > sd->max_newidle_lb_cost) {
11687 sd->max_newidle_lb_cost = cost;
11688 sd->last_decay_max_lb_cost = jiffies;
11689 } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
11695 sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256;
11696 sd->last_decay_max_lb_cost = jiffies;
11713 int cpu = rq->cpu;
11730 max_cost += sd->max_newidle_lb_cost;
11745 need_serialize = sd->flags & SD_SERIALIZE;
11751 if (time_after_eq(jiffies, sd->last_balance + interval)) {
11755 * env->dst_cpu, so we can't know our idle
11761 sd->last_balance = jiffies;
11767 if (time_after(next_balance, sd->last_balance + interval)) {
11768 next_balance = sd->last_balance + interval;
11774 * Ensure the rq-wide value also decays but keep it at a
11775 * reasonable floor to avoid funnies with rq->avg_idle.
11777 rq->max_idle_balance_cost =
11788 rq->next_balance = next_balance;
11794 return unlikely(!rcu_dereference_sched(rq->sd));
11800 * - When one of the busy CPUs notice that there may be an idle rebalancing
11803 * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
11859 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
11871 int nr_busy, i, cpu = rq->cpu;
11874 if (unlikely(rq->idle_balance))
11897 if (rq->nr_running >= 2) {
11904 sd = rcu_dereference(rq->sd);
11911 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
11961 * increase the overall cache use), we need some less-loaded LLC
11965 * the others are - so just get a nohz balance going if it looks
11968 nr_busy = atomic_read(&sds->nr_busy_cpus);
11991 if (!sd || !sd->nohz_idle)
11993 sd->nohz_idle = 0;
11995 atomic_inc(&sd->shared->nr_busy_cpus);
12004 if (likely(!rq->nohz_tick_stopped))
12007 rq->nohz_tick_stopped = 0;
12008 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
12011 set_cpu_sd_state_busy(rq->cpu);
12021 if (!sd || sd->nohz_idle)
12023 sd->nohz_idle = 1;
12025 atomic_dec(&sd->shared->nr_busy_cpus);
12049 * Can be set safely without rq->lock held
12051 * rq->lock is held during the check and the clear
12053 rq->has_blocked_load = 1;
12061 if (rq->nohz_tick_stopped)
12068 rq->nohz_tick_stopped = 1;
12093 unsigned int cpu = rq->cpu;
12095 if (!rq->has_blocked_load)
12101 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
12106 return rq->has_blocked_load;
12121 int this_cpu = this_rq->cpu;
12178 if (time_after_eq(jiffies, rq->next_balance)) {
12189 if (time_after(next_balance, rq->next_balance)) {
12190 next_balance = rq->next_balance;
12219 unsigned int flags = this_rq->nohz_idle_balance;
12224 this_rq->nohz_idle_balance = 0;
12254 int this_cpu = this_rq->cpu;
12264 if (this_rq->avg_idle < sysctl_sched_migration_cost)
12295 * < 0 - we released the lock and there are !fair tasks present
12296 * 0 - failed, no new tasks
12297 * > 0 - success, new (fair) tasks present
12302 int this_cpu = this_rq->cpu;
12313 if (this_rq->ttwu_pending)
12320 this_rq->idle_stamp = rq_clock(this_rq);
12330 * for load-balance and preemption/IRQs are still disabled avoiding
12332 * re-start the picking loop.
12337 sd = rcu_dereference_check_sched_domain(this_rq->sd);
12339 if (!READ_ONCE(this_rq->rd->overload) ||
12340 (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) {
12362 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
12365 if (sd->flags & SD_BALANCE_NEWIDLE) {
12372 domain_cost = t1 - t0;
12383 if (pulled_task || this_rq->nr_running > 0 ||
12384 this_rq->ttwu_pending)
12391 if (curr_cost > this_rq->max_idle_balance_cost)
12392 this_rq->max_idle_balance_cost = curr_cost;
12399 if (this_rq->cfs.h_nr_running && !pulled_task)
12403 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
12404 pulled_task = -1;
12408 if (time_after(this_rq->next_balance, next_balance))
12409 this_rq->next_balance = next_balance;
12412 this_rq->idle_stamp = 0;
12428 enum cpu_idle_type idle = this_rq->idle_balance ?
12443 update_blocked_averages(this_rq->cpu);
12459 if (time_after_eq(jiffies, rq->next_balance))
12484 __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
12486 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
12487 u64 slice = se->slice;
12509 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
12512 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
12513 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
12518 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
12520 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
12523 for_each_sched_entity(se) {
12524 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12527 if (cfs_rq->forceidle_seq == fi_seq)
12529 cfs_rq->forceidle_seq = fi_seq;
12532 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
12538 struct sched_entity *se = &p->se;
12540 if (p->sched_class != &fair_sched_class)
12543 se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
12550 const struct sched_entity *sea = &a->se;
12551 const struct sched_entity *seb = &b->se;
12556 SCHED_WARN_ON(task_rq(b)->core != rq->core);
12560 * Find an se in the hierarchy for tasks a and b, such that the se's
12563 while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
12564 int sea_depth = sea->depth;
12565 int seb_depth = seb->depth;
12573 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
12574 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
12576 cfs_rqa = sea->cfs_rq;
12577 cfs_rqb = seb->cfs_rq;
12579 cfs_rqa = &task_rq(a)->cfs;
12580 cfs_rqb = &task_rq(b)->cfs;
12584 * Find delta after normalizing se's vruntime with its cfs_rq's
12588 delta = (s64)(sea->vruntime - seb->vruntime) +
12589 (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
12599 cfs_rq = task_group(p)->cfs_rq[cpu];
12601 cfs_rq = &cpu_rq(cpu)->cfs;
12620 struct sched_entity *se = &curr->se;
12622 for_each_sched_entity(se) {
12623 cfs_rq = cfs_rq_of(se);
12624 entity_tick(cfs_rq, se, queued);
12638 * - child not yet on the tasklist
12639 * - preemption disabled
12643 struct sched_entity *se = &p->se, *curr;
12652 curr = cfs_rq->curr;
12655 place_entity(cfs_rq, se, ENQUEUE_INITIAL);
12669 if (rq->cfs.nr_running == 1)
12678 if (p->prio > oldprio)
12689 static void propagate_entity_cfs_rq(struct sched_entity *se)
12691 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12700 se = se->parent;
12702 for_each_sched_entity(se) {
12703 cfs_rq = cfs_rq_of(se);
12705 update_load_avg(cfs_rq, se, UPDATE_TG);
12715 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
12718 static void detach_entity_cfs_rq(struct sched_entity *se)
12720 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12725 * - A forked task which hasn't been woken up by wake_up_new_task().
12726 * - A task which has been woken up by try_to_wake_up() but is
12729 if (!se->avg.last_update_time)
12734 update_load_avg(cfs_rq, se, 0);
12735 detach_entity_load_avg(cfs_rq, se);
12737 propagate_entity_cfs_rq(se);
12740 static void attach_entity_cfs_rq(struct sched_entity *se)
12742 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12745 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
12746 attach_entity_load_avg(cfs_rq, se);
12748 propagate_entity_cfs_rq(se);
12753 struct sched_entity *se = &p->se;
12755 detach_entity_cfs_rq(se);
12760 struct sched_entity *se = &p->se;
12762 attach_entity_cfs_rq(se);
12789 * This routine is mostly called to set cfs_rq->curr field when a task
12794 struct sched_entity *se = &p->se;
12802 list_move(&se->group_node, &rq->cfs_tasks);
12806 for_each_sched_entity(se) {
12807 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12809 set_next_entity(cfs_rq, se);
12817 cfs_rq->tasks_timeline = RB_ROOT_CACHED;
12818 u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
12820 raw_spin_lock_init(&cfs_rq->removed.lock);
12831 if (READ_ONCE(p->__state) == TASK_NEW)
12837 /* Tell se's cfs_rq has been changed -- migrated */
12838 p->se.avg.last_update_time = 0;
12849 if (tg->cfs_rq)
12850 kfree(tg->cfs_rq[i]);
12851 if (tg->se)
12852 kfree(tg->se[i]);
12855 kfree(tg->cfs_rq);
12856 kfree(tg->se);
12861 struct sched_entity *se;
12865 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
12866 if (!tg->cfs_rq)
12868 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
12869 if (!tg->se)
12872 tg->shares = NICE_0_LOAD;
12882 se = kzalloc_node(sizeof(struct sched_entity_stats),
12884 if (!se)
12888 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
12889 init_entity_runnable_average(se);
12902 struct sched_entity *se;
12909 se = tg->se[i];
12912 attach_entity_cfs_rq(se);
12927 if (tg->se[cpu])
12928 remove_entity_load_avg(tg->se[cpu]);
12932 * check on_list without danger of it being re-added.
12934 if (!tg->cfs_rq[cpu]->on_list)
12940 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
12946 struct sched_entity *se, int cpu,
12951 cfs_rq->tg = tg;
12952 cfs_rq->rq = rq;
12955 tg->cfs_rq[cpu] = cfs_rq;
12956 tg->se[cpu] = se;
12958 /* se could be NULL for root_task_group */
12959 if (!se)
12963 se->cfs_rq = &rq->cfs;
12964 se->depth = 0;
12966 se->cfs_rq = parent->my_q;
12967 se->depth = parent->depth + 1;
12970 se->my_q = cfs_rq;
12972 update_load_set(&se->load, NICE_0_LOAD);
12973 se->parent = parent;
12987 if (!tg->se[0])
12988 return -EINVAL;
12992 if (tg->shares == shares)
12995 tg->shares = shares;
12998 struct sched_entity *se = tg->se[i];
13004 for_each_sched_entity(se) {
13005 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
13006 update_cfs_group(se);
13020 ret = -EINVAL;
13033 return -EINVAL;
13036 return -EINVAL;
13040 if (tg->idle == idle) {
13045 tg->idle = idle;
13049 struct sched_entity *se = tg->se[i];
13050 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
13057 grp_cfs_rq->idle = idle;
13061 if (se->on_rq) {
13062 parent_cfs_rq = cfs_rq_of(se);
13064 parent_cfs_rq->idle_nr_running++;
13066 parent_cfs_rq->idle_nr_running--;
13069 idle_task_delta = grp_cfs_rq->h_nr_running -
13070 grp_cfs_rq->idle_h_nr_running;
13072 idle_task_delta *= -1;
13074 for_each_sched_entity(se) {
13075 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13077 if (!se->on_rq)
13080 cfs_rq->idle_h_nr_running += idle_task_delta;
13119 struct sched_entity *se = &task->se;
13126 if (rq->cfs.load.weight)
13127 rr_interval = NS_TO_JIFFIES(se->slice);
13188 struct cfs_rq *cfs_rq, *pos;
13191 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
13204 ng = rcu_dereference(p->numa_group);
13206 if (p->numa_faults) {
13207 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
13208 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
13211 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
13212 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
13233 INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
13234 INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list);