Lines Matching full:se
311 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
313 if (unlikely(se->load.weight != NICE_0_LOAD))
314 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
328 #define for_each_sched_entity(se) \
329 for (; se; se = se->parent)
431 is_same_group(struct sched_entity *se, struct sched_entity *pse)
433 if (se->cfs_rq == pse->cfs_rq)
434 return se->cfs_rq;
439 static inline struct sched_entity *parent_entity(const struct sched_entity *se)
441 return se->parent;
445 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
457 se_depth = (*se)->depth;
462 *se = parent_entity(*se);
470 while (!is_same_group(*se, *pse)) {
471 *se = parent_entity(*se);
486 static int se_is_idle(struct sched_entity *se)
488 if (entity_is_task(se))
489 return task_has_idle_policy(task_of(se));
490 return cfs_rq_is_idle(group_cfs_rq(se));
495 #define for_each_sched_entity(se) \
496 for (; se; se = NULL)
514 static inline struct sched_entity *parent_entity(struct sched_entity *se)
520 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
534 static int se_is_idle(struct sched_entity *se)
536 return task_has_idle_policy(task_of(se));
572 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
574 return (s64)(se->vruntime - cfs_rq->min_vruntime);
599 * se->vruntime):
639 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
641 unsigned long weight = scale_load_down(se->load.weight);
642 s64 key = entity_key(cfs_rq, se);
649 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
651 unsigned long weight = scale_load_down(se->load.weight);
652 s64 key = entity_key(cfs_rq, se);
710 static s64 entity_lag(u64 avruntime, struct sched_entity *se)
714 vlag = avruntime - se->vruntime;
715 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
720 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
722 SCHED_WARN_ON(!se->on_rq);
724 se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
741 * Note: using 'avg_vruntime() > se->vruntime' is inacurate due
744 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
757 return avg >= entity_key(cfs_rq, se) * load;
776 struct sched_entity *se = __pick_first_entity(cfs_rq);
788 if (se) {
790 vruntime = se->vruntime;
792 vruntime = min_vruntime(vruntime, se->vruntime);
807 static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node)
811 if (deadline_gt(min_deadline, se, rse))
812 se->min_deadline = rse->min_deadline;
817 * se->min_deadline = min(se->deadline, left->min_deadline, right->min_deadline)
819 static inline bool min_deadline_update(struct sched_entity *se, bool exit)
821 u64 old_min_deadline = se->min_deadline;
822 struct rb_node *node = &se->run_node;
824 se->min_deadline = se->deadline;
825 __update_min_deadline(se, node->rb_right);
826 __update_min_deadline(se, node->rb_left);
828 return se->min_deadline == old_min_deadline;
837 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
839 avg_vruntime_add(cfs_rq, se);
840 se->min_deadline = se->deadline;
841 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
845 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
847 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
849 avg_vruntime_sub(cfs_rq, se);
877 * se->min_deadline = min(se->deadline, se->{left,right}->min_deadline)
900 struct sched_entity *se = __node_2_se(node);
905 if (!entity_eligible(cfs_rq, se)) {
913 if (!best || deadline_gt(deadline, best, se))
914 best = se;
917 * Every se in a left branch is eligible, keep track of the
931 if (left->min_deadline == se->min_deadline)
936 if (se->deadline == se->min_deadline)
956 struct sched_entity *se = __node_2_se(node);
959 if (se->deadline == se->min_deadline)
960 return se;
964 __node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
977 struct sched_entity *se = __pick_eevdf(cfs_rq);
979 if (!se) {
987 return se;
1019 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1025 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
1027 if ((s64)(se->vruntime - se->deadline) < 0)
1035 se->slice = sysctl_sched_base_slice;
1040 se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
1047 clear_buddies(cfs_rq, se);
1059 void init_entity_runnable_average(struct sched_entity *se)
1061 struct sched_avg *sa = &se->avg;
1071 if (entity_is_task(se))
1072 sa->load_avg = scale_load_down(se->load.weight);
1081 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1105 struct sched_entity *se = &p->se;
1106 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1107 struct sched_avg *sa = &se->avg;
1116 attach_entity_load_avg(cfs_rq, se);
1122 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
1128 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1142 void init_entity_runnable_average(struct sched_entity *se)
1191 delta_exec = update_curr_se(rq, &curr->se);
1225 update_curr(cfs_rq_of(&rq->curr->se));
1229 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1237 stats = __schedstats_from_se(se);
1239 if (entity_is_task(se))
1240 p = task_of(se);
1246 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1254 stats = __schedstats_from_se(se);
1257 * When the sched_schedstat changes from 0 to 1, some sched se
1258 * maybe already in the runqueue, the se->statistics.wait_start
1265 if (entity_is_task(se))
1266 p = task_of(se);
1272 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1280 stats = __schedstats_from_se(se);
1282 if (entity_is_task(se))
1283 tsk = task_of(se);
1292 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1301 if (se != cfs_rq->curr)
1302 update_stats_wait_start_fair(cfs_rq, se);
1305 update_stats_enqueue_sleeper_fair(cfs_rq, se);
1309 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1319 if (se != cfs_rq->curr)
1320 update_stats_wait_end_fair(cfs_rq, se);
1322 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
1323 struct task_struct *tsk = task_of(se);
1341 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1346 se->exec_start = rq_clock_task(rq_of(cfs_rq));
2729 now = p->se.exec_start;
2730 runtime = p->se.sum_exec_runtime;
2740 delta = p->se.avg.load_sum;
3265 u64 runtime = p->se.sum_exec_runtime;
3502 if (unlikely(p->se.sum_exec_runtime != runtime)) {
3503 u64 diff = p->se.sum_exec_runtime - runtime;
3575 now = curr->se.sum_exec_runtime;
3642 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3644 update_load_add(&cfs_rq->load, se->load.weight);
3646 if (entity_is_task(se)) {
3649 account_numa_enqueue(rq, task_of(se));
3650 list_add(&se->group_node, &rq->cfs_tasks);
3654 if (se_is_idle(se))
3659 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3661 update_load_sub(&cfs_rq->load, se->load.weight);
3663 if (entity_is_task(se)) {
3664 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3665 list_del_init(&se->group_node);
3669 if (se_is_idle(se))
3723 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3725 cfs_rq->avg.load_avg += se->avg.load_avg;
3726 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3730 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3732 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3733 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3740 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3742 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3745 static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
3748 unsigned long old_weight = se->load.weight;
3828 if (avruntime != se->vruntime) {
3829 vlag = entity_lag(avruntime, se);
3831 se->vruntime = avruntime - vlag;
3846 vslice = (s64)(se->deadline - avruntime);
3848 se->deadline = avruntime + vslice;
3851 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3854 bool curr = cfs_rq->curr == se;
3857 if (se->on_rq) {
3862 __dequeue_entity(cfs_rq, se);
3863 update_load_sub(&cfs_rq->load, se->load.weight);
3865 dequeue_load_avg(cfs_rq, se);
3867 if (se->on_rq) {
3868 reweight_eevdf(se, avruntime, weight);
3871 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
3872 * we need to scale se->vlag when w_i changes.
3874 se->vlag = div_s64(se->vlag * se->load.weight, weight);
3877 update_load_set(&se->load, weight);
3881 u32 divider = get_pelt_divider(&se->avg);
3883 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
3887 enqueue_load_avg(cfs_rq, se);
3888 if (se->on_rq) {
3889 update_load_add(&cfs_rq->load, se->load.weight);
3891 __enqueue_entity(cfs_rq, se);
3906 struct sched_entity *se = &p->se;
3907 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3908 struct load_weight *load = &se->load;
3910 reweight_entity(cfs_rq, se, lw->weight);
4030 static void update_cfs_group(struct sched_entity *se)
4032 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4046 if (unlikely(se->load.weight != shares))
4047 reweight_entity(cfs_rq_of(se), se, shares);
4051 static inline void update_cfs_group(struct sched_entity *se)
4186 void set_task_rq_fair(struct sched_entity *se,
4202 if (!(se->avg.last_update_time && prev))
4208 __update_load_avg_blocked_se(p_last_update_time, se);
4209 se->avg.last_update_time = n_last_update_time;
4280 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4282 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
4290 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4297 se->avg.util_avg = gcfs_rq->avg.util_avg;
4298 new_sum = se->avg.util_avg * divider;
4299 delta_sum = (long)new_sum - (long)se->avg.util_sum;
4300 se->avg.util_sum = new_sum;
4312 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4314 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
4322 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4328 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
4329 new_sum = se->avg.runnable_avg * divider;
4330 delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
4331 se->avg.runnable_sum = new_sum;
4342 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4356 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4366 runnable_sum += se->avg.load_sum;
4378 /* But make sure to not inflate se's runnable */
4379 runnable_sum = min(se->avg.load_sum, load_sum);
4388 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
4391 load_sum = se_weight(se) * runnable_sum;
4394 delta_avg = load_avg - se->avg.load_avg;
4398 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
4400 se->avg.load_sum = runnable_sum;
4401 se->avg.load_avg = load_avg;
4416 static inline int propagate_entity_load_avg(struct sched_entity *se)
4420 if (entity_is_task(se))
4423 gcfs_rq = group_cfs_rq(se);
4429 cfs_rq = cfs_rq_of(se);
4433 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
4434 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
4435 update_tg_cfs_load(cfs_rq, se, gcfs_rq);
4438 trace_pelt_se_tp(se);
4447 static inline bool skip_blocked_update(struct sched_entity *se)
4449 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4455 if (se->avg.load_avg || se->avg.util_avg)
4477 static inline int propagate_entity_load_avg(struct sched_entity *se)
4487 static inline void migrate_se_pelt_lag(struct sched_entity *se)
4494 if (load_avg_is_decayed(&se->avg))
4497 cfs_rq = cfs_rq_of(se);
4563 __update_load_avg_blocked_se(now, se);
4566 static void migrate_se_pelt_lag(struct sched_entity *se) {}
4612 * Because of rounding, se->util_sum might ends up being +1 more than
4651 * @se: sched_entity to attach
4656 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4659 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4665 * When we attach the @se to the @cfs_rq, we must align the decay
4671 se->avg.last_update_time = cfs_rq->avg.last_update_time;
4672 se->avg.period_contrib = cfs_rq->avg.period_contrib;
4680 se->avg.util_sum = se->avg.util_avg * divider;
4682 se->avg.runnable_sum = se->avg.runnable_avg * divider;
4684 se->avg.load_sum = se->avg.load_avg * divider;
4685 if (se_weight(se) < se->avg.load_sum)
4686 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
4688 se->avg.load_sum = 1;
4690 enqueue_load_avg(cfs_rq, se);
4691 cfs_rq->avg.util_avg += se->avg.util_avg;
4692 cfs_rq->avg.util_sum += se->avg.util_sum;
4693 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4694 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4696 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4706 * @se: sched_entity to detach
4711 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4713 dequeue_load_avg(cfs_rq, se);
4714 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4715 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4720 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4721 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4726 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4742 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4751 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
4752 __update_load_avg_se(now, cfs_rq, se);
4755 decayed |= propagate_entity_load_avg(se);
4757 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
4766 attach_entity_load_avg(cfs_rq, se);
4774 detach_entity_load_avg(cfs_rq, se);
4788 static void sync_entity_load_avg(struct sched_entity *se)
4790 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4794 __update_load_avg_blocked_se(last_update_time, se);
4801 static void remove_entity_load_avg(struct sched_entity *se)
4803 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4812 sync_entity_load_avg(se);
4816 cfs_rq->removed.util_avg += se->avg.util_avg;
4817 cfs_rq->removed.load_avg += se->avg.load_avg;
4818 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4836 return READ_ONCE(p->se.avg.util_avg);
4841 struct util_est ue = READ_ONCE(p->se.avg.util_est);
4919 ue = p->se.avg.util_est;
4979 WRITE_ONCE(p->se.avg.util_est, ue);
4981 trace_sched_util_est_se_tp(&p->se);
5149 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
5154 static inline void remove_entity_load_avg(struct sched_entity *se) {}
5157 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5159 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5180 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5185 se->slice = sysctl_sched_base_slice;
5186 vslice = calc_delta_fair(se->slice, se);
5200 lag = se->vlag;
5258 lag *= load + scale_load_down(se->load.weight);
5264 se->vruntime = vruntime - lag;
5277 se->deadline = se->vruntime + vslice;
5286 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5288 bool curr = cfs_rq->curr == se;
5295 place_entity(cfs_rq, se, flags);
5308 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
5309 se_update_runnable(se);
5315 update_cfs_group(se);
5322 place_entity(cfs_rq, se, flags);
5324 account_entity_enqueue(cfs_rq, se);
5328 se->exec_start = 0;
5331 update_stats_enqueue_fair(cfs_rq, se, flags);
5333 __enqueue_entity(cfs_rq, se);
5334 se->on_rq = 1;
5353 static void __clear_buddies_next(struct sched_entity *se)
5355 for_each_sched_entity(se) {
5356 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5357 if (cfs_rq->next != se)
5364 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5366 if (cfs_rq->next == se)
5367 __clear_buddies_next(se);
5373 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5377 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
5394 update_load_avg(cfs_rq, se, action);
5395 se_update_runnable(se);
5397 update_stats_dequeue_fair(cfs_rq, se, flags);
5399 clear_buddies(cfs_rq, se);
5401 update_entity_lag(cfs_rq, se);
5402 if (se != cfs_rq->curr)
5403 __dequeue_entity(cfs_rq, se);
5404 se->on_rq = 0;
5405 account_entity_dequeue(cfs_rq, se);
5410 update_cfs_group(se);
5413 * Now advance min_vruntime if @se was the entity holding it back,
5426 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
5428 clear_buddies(cfs_rq, se);
5431 if (se->on_rq) {
5437 update_stats_wait_end_fair(cfs_rq, se);
5438 __dequeue_entity(cfs_rq, se);
5439 update_load_avg(cfs_rq, se, UPDATE_TG);
5444 se->vlag = se->deadline;
5447 update_stats_curr_start(cfs_rq, se);
5448 cfs_rq->curr = se;
5456 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
5459 stats = __schedstats_from_se(se);
5462 se->sum_exec_runtime - se->prev_sum_exec_runtime));
5465 se->prev_sum_exec_runtime = se->sum_exec_runtime;
5770 struct sched_entity *se;
5794 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5803 for_each_sched_entity(se) {
5804 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5806 if (!se->on_rq)
5809 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
5811 if (cfs_rq_is_idle(group_cfs_rq(se)))
5819 se = parent_entity(se);
5824 for_each_sched_entity(se) {
5825 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5827 if (!se->on_rq)
5830 update_load_avg(qcfs_rq, se, 0);
5831 se_update_runnable(se);
5833 if (cfs_rq_is_idle(group_cfs_rq(se)))
5840 /* At this point se is NULL and we are at root level*/
5859 struct sched_entity *se;
5862 se = cfs_rq->tg->se[cpu_of(rq)];
5886 for_each_sched_entity(se) {
5887 if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
5895 for_each_sched_entity(se) {
5896 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5898 if (se->on_rq)
5900 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
5902 if (cfs_rq_is_idle(group_cfs_rq(se)))
5913 for_each_sched_entity(se) {
5914 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5916 update_load_avg(qcfs_rq, se, UPDATE_TG);
5917 se_update_runnable(se);
5919 if (cfs_rq_is_idle(group_cfs_rq(se)))
5930 /* At this point se is NULL and we are at root level*/
6632 struct sched_entity *se = &p->se;
6637 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
6638 u64 slice = se->slice;
6739 struct sched_entity *se = &p->se;
6759 for_each_sched_entity(se) {
6760 if (se->on_rq)
6762 cfs_rq = cfs_rq_of(se);
6763 enqueue_entity(cfs_rq, se, flags);
6778 for_each_sched_entity(se) {
6779 cfs_rq = cfs_rq_of(se);
6781 update_load_avg(cfs_rq, se, UPDATE_TG);
6782 se_update_runnable(se);
6783 update_cfs_group(se);
6796 /* At this point se is NULL and we are at root level*/
6822 static void set_next_buddy(struct sched_entity *se);
6832 struct sched_entity *se = &p->se;
6839 for_each_sched_entity(se) {
6840 cfs_rq = cfs_rq_of(se);
6841 dequeue_entity(cfs_rq, se, flags);
6856 se = parent_entity(se);
6861 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
6862 set_next_buddy(se);
6868 for_each_sched_entity(se) {
6869 cfs_rq = cfs_rq_of(se);
6871 update_load_avg(cfs_rq, se, UPDATE_TG);
6872 se_update_runnable(se);
6873 update_cfs_group(se);
6887 /* At this point se is NULL and we are at root level*/
6943 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6966 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6973 lsub_positive(&runnable, p->se.avg.runnable_avg);
7208 sync_entity_load_avg(&p->se);
7565 sync_entity_load_avg(&p->se);
7789 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
7990 sync_entity_load_avg(&p->se);
8233 struct sched_entity *se = &p->se;
8236 remove_entity_load_avg(se);
8248 migrate_se_pelt_lag(se);
8252 se->avg.last_update_time = 0;
8259 remove_entity_load_avg(&p->se);
8272 static void set_next_buddy(struct sched_entity *se)
8274 for_each_sched_entity(se) {
8275 if (SCHED_WARN_ON(!se->on_rq))
8277 if (se_is_idle(se))
8279 cfs_rq_of(se)->next = se;
8289 struct sched_entity *se = &curr->se, *pse = &p->se;
8294 if (unlikely(se == pse))
8327 find_matching_se(&se, &pse);
8330 cse_is_idle = se_is_idle(se);
8348 cfs_rq = cfs_rq_of(se);
8351 * XXX pick_eevdf(cfs_rq) != se ?
8365 struct sched_entity *se;
8387 se = pick_next_entity(cfs_rq, curr);
8388 cfs_rq = group_cfs_rq(se);
8391 return task_of(se);
8399 struct sched_entity *se;
8450 se = pick_next_entity(cfs_rq, curr);
8451 cfs_rq = group_cfs_rq(se);
8454 p = task_of(se);
8462 struct sched_entity *pse = &prev->se;
8464 while (!(cfs_rq = is_same_group(se, pse))) {
8465 int se_depth = se->depth;
8473 set_next_entity(cfs_rq_of(se), se);
8474 se = parent_entity(se);
8479 set_next_entity(cfs_rq, se);
8489 se = pick_next_entity(cfs_rq, NULL);
8490 set_next_entity(cfs_rq, se);
8491 cfs_rq = group_cfs_rq(se);
8494 p = task_of(se);
8503 list_move(&p->se.group_node, &rq->cfs_tasks);
8550 struct sched_entity *se = &prev->se;
8553 for_each_sched_entity(se) {
8554 cfs_rq = cfs_rq_of(se);
8555 put_prev_entity(cfs_rq, se);
8566 struct sched_entity *se = &curr->se;
8574 clear_buddies(cfs_rq, se);
8588 se->deadline += calc_delta_fair(se->slice, se);
8593 struct sched_entity *se = &p->se;
8596 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
8600 set_next_buddy(se);
8836 (&p->se == cfs_rq_of(&p->se)->next))
8852 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
9039 &env->src_rq->cfs_tasks, se.group_node) {
9104 p = list_last_entry(tasks, struct task_struct, se.group_node);
9159 list_add(&p->se.group_node, &env->tasks);
9185 list_move(&p->se.group_node, tasks);
9238 p = list_first_entry(tasks, struct task_struct, se.group_node);
9239 list_del_init(&p->se.group_node);
9334 struct sched_entity *se;
9347 se = cfs_rq->tg->se[cpu];
9348 if (se && !skip_blocked_update(se))
9349 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
9374 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
9382 for_each_sched_entity(se) {
9383 cfs_rq = cfs_rq_of(se);
9384 WRITE_ONCE(cfs_rq->h_load_next, se);
9389 if (!se) {
9394 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
9396 load = div64_ul(load * se->avg.load_avg,
9398 cfs_rq = group_cfs_rq(se);
9409 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9427 return p->se.avg.load_avg;
10179 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
12484 __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
12486 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
12487 u64 slice = se->slice;
12513 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
12520 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
12523 for_each_sched_entity(se) {
12524 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12538 struct sched_entity *se = &p->se;
12543 se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
12550 const struct sched_entity *sea = &a->se;
12551 const struct sched_entity *seb = &b->se;
12560 * Find an se in the hierarchy for tasks a and b, such that the se's
12584 * Find delta after normalizing se's vruntime with its cfs_rq's
12620 struct sched_entity *se = &curr->se;
12622 for_each_sched_entity(se) {
12623 cfs_rq = cfs_rq_of(se);
12624 entity_tick(cfs_rq, se, queued);
12643 struct sched_entity *se = &p->se, *curr;
12655 place_entity(cfs_rq, se, ENQUEUE_INITIAL);
12689 static void propagate_entity_cfs_rq(struct sched_entity *se)
12691 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12700 se = se->parent;
12702 for_each_sched_entity(se) {
12703 cfs_rq = cfs_rq_of(se);
12705 update_load_avg(cfs_rq, se, UPDATE_TG);
12715 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
12718 static void detach_entity_cfs_rq(struct sched_entity *se)
12720 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12729 if (!se->avg.last_update_time)
12734 update_load_avg(cfs_rq, se, 0);
12735 detach_entity_load_avg(cfs_rq, se);
12737 propagate_entity_cfs_rq(se);
12740 static void attach_entity_cfs_rq(struct sched_entity *se)
12742 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12745 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
12746 attach_entity_load_avg(cfs_rq, se);
12748 propagate_entity_cfs_rq(se);
12753 struct sched_entity *se = &p->se;
12755 detach_entity_cfs_rq(se);
12760 struct sched_entity *se = &p->se;
12762 attach_entity_cfs_rq(se);
12794 struct sched_entity *se = &p->se;
12802 list_move(&se->group_node, &rq->cfs_tasks);
12806 for_each_sched_entity(se) {
12807 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12809 set_next_entity(cfs_rq, se);
12837 /* Tell se's cfs_rq has been changed -- migrated */
12838 p->se.avg.last_update_time = 0;
12851 if (tg->se)
12852 kfree(tg->se[i]);
12856 kfree(tg->se);
12861 struct sched_entity *se;
12868 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
12869 if (!tg->se)
12882 se = kzalloc_node(sizeof(struct sched_entity_stats),
12884 if (!se)
12888 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
12889 init_entity_runnable_average(se);
12902 struct sched_entity *se;
12909 se = tg->se[i];
12912 attach_entity_cfs_rq(se);
12927 if (tg->se[cpu])
12928 remove_entity_load_avg(tg->se[cpu]);
12946 struct sched_entity *se, int cpu,
12956 tg->se[cpu] = se;
12958 /* se could be NULL for root_task_group */
12959 if (!se)
12963 se->cfs_rq = &rq->cfs;
12964 se->depth = 0;
12966 se->cfs_rq = parent->my_q;
12967 se->depth = parent->depth + 1;
12970 se->my_q = cfs_rq;
12972 update_load_set(&se->load, NICE_0_LOAD);
12973 se->parent = parent;
12987 if (!tg->se[0])
12998 struct sched_entity *se = tg->se[i];
13004 for_each_sched_entity(se) {
13005 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
13006 update_cfs_group(se);
13049 struct sched_entity *se = tg->se[i];
13061 if (se->on_rq) {
13062 parent_cfs_rq = cfs_rq_of(se);
13074 for_each_sched_entity(se) {
13075 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13077 if (!se->on_rq)
13119 struct sched_entity *se = &task->se;
13127 rr_interval = NS_TO_JIFFIES(se->slice);