Lines Matching refs:avg

674 	s64 avg = cfs_rq->avg_vruntime;  in avg_vruntime()  local
680 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
686 if (avg < 0) in avg_vruntime()
687 avg -= (load - 1); in avg_vruntime()
688 avg = div_s64(avg, load); in avg_vruntime()
691 return cfs_rq->min_vruntime + avg; in avg_vruntime()
747 s64 avg = cfs_rq->avg_vruntime; in entity_eligible() local
753 avg += entity_key(cfs_rq, curr) * weight; in entity_eligible()
757 return avg >= entity_key(cfs_rq, se) * load; in entity_eligible()
1061 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1107 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1109 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1122 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1127 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1128 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
1129 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
2714 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3615 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3616 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3622 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3623 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3625 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3626 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3771 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3773 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3888 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3995 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
4030 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
4055 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
4065 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
4090 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4097 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4170 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4181 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4185 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4186 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4187 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4188 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4191 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4192 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4195 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4196 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4202 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4213 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4216 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4217 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4218 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4219 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4222 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4223 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4225 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4226 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4247 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4254 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4262 load_sum = div_u64(gcfs_rq->avg.load_sum, in update_tg_cfs_load()
4267 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4276 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4282 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4286 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4288 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4289 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4290 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4291 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4293 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4294 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4343 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4382 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4476 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4481 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4550 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4559 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4560 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4568 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4570 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4572 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4573 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4574 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4576 se->avg.load_sum = 1; in attach_entity_load_avg()
4579 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4580 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4581 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4582 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4584 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4602 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4603 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4605 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4606 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4608 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4609 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4611 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4612 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4614 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4639 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4645 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4704 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4705 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4706 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4712 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4717 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4724 return READ_ONCE(p->se.avg.util_avg); in task_util()
4729 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4748 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
4750 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
4764 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
4766 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_dequeue()
4807 ue = p->se.avg.util_est; in util_est_update()
4867 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
6831 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6835 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
6854 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6858 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
6861 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7590 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7594 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
7612 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util()
7677 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8140 se->avg.last_update_time = 0; in migrate_task_rq_fair()
9133 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9136 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9279 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9292 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9310 return p->se.avg.load_avg; in task_h_load()
10062 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12612 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
12721 p->se.avg.last_update_time = 0; in task_change_group_fair()