xref: /openbmc/linux/kernel/sched/pelt.h (revision e2f3e35f)
1c0796298SVincent Guittot #ifdef CONFIG_SMP
223127296SVincent Guittot #include "sched-pelt.h"
3c0796298SVincent Guittot 
423127296SVincent Guittot int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
523127296SVincent Guittot int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
623127296SVincent Guittot int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
7371bf427SVincent Guittot int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
83727e0e1SVincent Guittot int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
9c0796298SVincent Guittot 
1076504793SThara Gopinath #ifdef CONFIG_SCHED_THERMAL_PRESSURE
1176504793SThara Gopinath int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
1276504793SThara Gopinath 
thermal_load_avg(struct rq * rq)1376504793SThara Gopinath static inline u64 thermal_load_avg(struct rq *rq)
1476504793SThara Gopinath {
1576504793SThara Gopinath 	return READ_ONCE(rq->avg_thermal.load_avg);
1676504793SThara Gopinath }
1776504793SThara Gopinath #else
1876504793SThara Gopinath static inline int
update_thermal_load_avg(u64 now,struct rq * rq,u64 capacity)1976504793SThara Gopinath update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
2076504793SThara Gopinath {
2176504793SThara Gopinath 	return 0;
2276504793SThara Gopinath }
2376504793SThara Gopinath 
thermal_load_avg(struct rq * rq)2476504793SThara Gopinath static inline u64 thermal_load_avg(struct rq *rq)
2576504793SThara Gopinath {
2676504793SThara Gopinath 	return 0;
2776504793SThara Gopinath }
2876504793SThara Gopinath #endif
2976504793SThara Gopinath 
3011d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
3191c27493SVincent Guittot int update_irq_load_avg(struct rq *rq, u64 running);
3291c27493SVincent Guittot #else
3391c27493SVincent Guittot static inline int
update_irq_load_avg(struct rq * rq,u64 running)3491c27493SVincent Guittot update_irq_load_avg(struct rq *rq, u64 running)
3591c27493SVincent Guittot {
3691c27493SVincent Guittot 	return 0;
3791c27493SVincent Guittot }
3891c27493SVincent Guittot #endif
3991c27493SVincent Guittot 
4098b0d890SVincent Guittot #define PELT_MIN_DIVIDER	(LOAD_AVG_MAX - 1024)
4198b0d890SVincent Guittot 
get_pelt_divider(struct sched_avg * avg)4287e867b4SVincent Guittot static inline u32 get_pelt_divider(struct sched_avg *avg)
4387e867b4SVincent Guittot {
4498b0d890SVincent Guittot 	return PELT_MIN_DIVIDER + avg->period_contrib;
4587e867b4SVincent Guittot }
4687e867b4SVincent Guittot 
cfs_se_util_change(struct sched_avg * avg)47c0796298SVincent Guittot static inline void cfs_se_util_change(struct sched_avg *avg)
48c0796298SVincent Guittot {
49c0796298SVincent Guittot 	unsigned int enqueued;
50c0796298SVincent Guittot 
51c0796298SVincent Guittot 	if (!sched_feat(UTIL_EST))
52c0796298SVincent Guittot 		return;
53c0796298SVincent Guittot 
5468d7a190SDietmar Eggemann 	/* Avoid store if the flag has been already reset */
55c0796298SVincent Guittot 	enqueued = avg->util_est.enqueued;
56c0796298SVincent Guittot 	if (!(enqueued & UTIL_AVG_UNCHANGED))
57c0796298SVincent Guittot 		return;
58c0796298SVincent Guittot 
59c0796298SVincent Guittot 	/* Reset flag to report util_avg has been updated */
60c0796298SVincent Guittot 	enqueued &= ~UTIL_AVG_UNCHANGED;
61c0796298SVincent Guittot 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
62c0796298SVincent Guittot }
63c0796298SVincent Guittot 
rq_clock_pelt(struct rq * rq)64*e2f3e35fSVincent Donnefort static inline u64 rq_clock_pelt(struct rq *rq)
65*e2f3e35fSVincent Donnefort {
66*e2f3e35fSVincent Donnefort 	lockdep_assert_rq_held(rq);
67*e2f3e35fSVincent Donnefort 	assert_clock_updated(rq);
68*e2f3e35fSVincent Donnefort 
69*e2f3e35fSVincent Donnefort 	return rq->clock_pelt - rq->lost_idle_time;
70*e2f3e35fSVincent Donnefort }
71*e2f3e35fSVincent Donnefort 
72*e2f3e35fSVincent Donnefort /* The rq is idle, we can sync to clock_task */
_update_idle_rq_clock_pelt(struct rq * rq)73*e2f3e35fSVincent Donnefort static inline void _update_idle_rq_clock_pelt(struct rq *rq)
74*e2f3e35fSVincent Donnefort {
75*e2f3e35fSVincent Donnefort 	rq->clock_pelt  = rq_clock_task(rq);
76*e2f3e35fSVincent Donnefort 
77*e2f3e35fSVincent Donnefort 	u64_u32_store(rq->clock_idle, rq_clock(rq));
78*e2f3e35fSVincent Donnefort 	/* Paired with smp_rmb in migrate_se_pelt_lag() */
79*e2f3e35fSVincent Donnefort 	smp_wmb();
80*e2f3e35fSVincent Donnefort 	u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
81*e2f3e35fSVincent Donnefort }
82*e2f3e35fSVincent Donnefort 
8323127296SVincent Guittot /*
8423127296SVincent Guittot  * The clock_pelt scales the time to reflect the effective amount of
8523127296SVincent Guittot  * computation done during the running delta time but then sync back to
8623127296SVincent Guittot  * clock_task when rq is idle.
8723127296SVincent Guittot  *
8823127296SVincent Guittot  *
8923127296SVincent Guittot  * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
9023127296SVincent Guittot  * @ max capacity  ------******---------------******---------------
9123127296SVincent Guittot  * @ half capacity ------************---------************---------
9223127296SVincent Guittot  * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
9323127296SVincent Guittot  *
9423127296SVincent Guittot  */
update_rq_clock_pelt(struct rq * rq,s64 delta)9523127296SVincent Guittot static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
9623127296SVincent Guittot {
9723127296SVincent Guittot 	if (unlikely(is_idle_task(rq->curr))) {
98*e2f3e35fSVincent Donnefort 		_update_idle_rq_clock_pelt(rq);
9923127296SVincent Guittot 		return;
10023127296SVincent Guittot 	}
10123127296SVincent Guittot 
10223127296SVincent Guittot 	/*
10323127296SVincent Guittot 	 * When a rq runs at a lower compute capacity, it will need
10423127296SVincent Guittot 	 * more time to do the same amount of work than at max
10523127296SVincent Guittot 	 * capacity. In order to be invariant, we scale the delta to
10623127296SVincent Guittot 	 * reflect how much work has been really done.
10723127296SVincent Guittot 	 * Running longer results in stealing idle time that will
10823127296SVincent Guittot 	 * disturb the load signal compared to max capacity. This
10923127296SVincent Guittot 	 * stolen idle time will be automatically reflected when the
11023127296SVincent Guittot 	 * rq will be idle and the clock will be synced with
11123127296SVincent Guittot 	 * rq_clock_task.
11223127296SVincent Guittot 	 */
11323127296SVincent Guittot 
11423127296SVincent Guittot 	/*
11523127296SVincent Guittot 	 * Scale the elapsed time to reflect the real amount of
11623127296SVincent Guittot 	 * computation
11723127296SVincent Guittot 	 */
1188ec59c0fSVincent Guittot 	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
11923127296SVincent Guittot 	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
12023127296SVincent Guittot 
12123127296SVincent Guittot 	rq->clock_pelt += delta;
12223127296SVincent Guittot }
12323127296SVincent Guittot 
12423127296SVincent Guittot /*
12523127296SVincent Guittot  * When rq becomes idle, we have to check if it has lost idle time
12623127296SVincent Guittot  * because it was fully busy. A rq is fully used when the /Sum util_sum
12723127296SVincent Guittot  * is greater or equal to:
12823127296SVincent Guittot  * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
12923127296SVincent Guittot  * For optimization and computing rounding purpose, we don't take into account
13023127296SVincent Guittot  * the position in the current window (period_contrib) and we use the higher
13123127296SVincent Guittot  * bound of util_sum to decide.
13223127296SVincent Guittot  */
update_idle_rq_clock_pelt(struct rq * rq)13323127296SVincent Guittot static inline void update_idle_rq_clock_pelt(struct rq *rq)
13423127296SVincent Guittot {
13523127296SVincent Guittot 	u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
13623127296SVincent Guittot 	u32 util_sum = rq->cfs.avg.util_sum;
13723127296SVincent Guittot 	util_sum += rq->avg_rt.util_sum;
13823127296SVincent Guittot 	util_sum += rq->avg_dl.util_sum;
13923127296SVincent Guittot 
14023127296SVincent Guittot 	/*
14123127296SVincent Guittot 	 * Reflecting stolen time makes sense only if the idle
14223127296SVincent Guittot 	 * phase would be present at max capacity. As soon as the
14323127296SVincent Guittot 	 * utilization of a rq has reached the maximum value, it is
1443b03706fSIngo Molnar 	 * considered as an always running rq without idle time to
14523127296SVincent Guittot 	 * steal. This potential idle time is considered as lost in
14623127296SVincent Guittot 	 * this case. We keep track of this lost idle time compare to
14723127296SVincent Guittot 	 * rq's clock_task.
14823127296SVincent Guittot 	 */
14923127296SVincent Guittot 	if (util_sum >= divider)
15023127296SVincent Guittot 		rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
15123127296SVincent Guittot 
152*e2f3e35fSVincent Donnefort 	_update_idle_rq_clock_pelt(rq);
15323127296SVincent Guittot }
15423127296SVincent Guittot 
15523127296SVincent Guittot #ifdef CONFIG_CFS_BANDWIDTH
update_idle_cfs_rq_clock_pelt(struct cfs_rq * cfs_rq)156*e2f3e35fSVincent Donnefort static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
157*e2f3e35fSVincent Donnefort {
158*e2f3e35fSVincent Donnefort 	u64 throttled;
159*e2f3e35fSVincent Donnefort 
160*e2f3e35fSVincent Donnefort 	if (unlikely(cfs_rq->throttle_count))
161*e2f3e35fSVincent Donnefort 		throttled = U64_MAX;
162*e2f3e35fSVincent Donnefort 	else
163*e2f3e35fSVincent Donnefort 		throttled = cfs_rq->throttled_clock_pelt_time;
164*e2f3e35fSVincent Donnefort 
165*e2f3e35fSVincent Donnefort 	u64_u32_store(cfs_rq->throttled_pelt_idle, throttled);
166*e2f3e35fSVincent Donnefort }
167*e2f3e35fSVincent Donnefort 
16823127296SVincent Guittot /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
cfs_rq_clock_pelt(struct cfs_rq * cfs_rq)16923127296SVincent Guittot static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
17023127296SVincent Guittot {
17123127296SVincent Guittot 	if (unlikely(cfs_rq->throttle_count))
17264eaf507SChengming Zhou 		return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
17323127296SVincent Guittot 
17464eaf507SChengming Zhou 	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
17523127296SVincent Guittot }
17623127296SVincent Guittot #else
update_idle_cfs_rq_clock_pelt(struct cfs_rq * cfs_rq)177*e2f3e35fSVincent Donnefort static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
cfs_rq_clock_pelt(struct cfs_rq * cfs_rq)17823127296SVincent Guittot static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
17923127296SVincent Guittot {
18023127296SVincent Guittot 	return rq_clock_pelt(rq_of(cfs_rq));
18123127296SVincent Guittot }
18223127296SVincent Guittot #endif
18323127296SVincent Guittot 
184c0796298SVincent Guittot #else
185c0796298SVincent Guittot 
186c0796298SVincent Guittot static inline int
update_cfs_rq_load_avg(u64 now,struct cfs_rq * cfs_rq)187c0796298SVincent Guittot update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
188c0796298SVincent Guittot {
189c0796298SVincent Guittot 	return 0;
190c0796298SVincent Guittot }
191c0796298SVincent Guittot 
192371bf427SVincent Guittot static inline int
update_rt_rq_load_avg(u64 now,struct rq * rq,int running)193371bf427SVincent Guittot update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
194371bf427SVincent Guittot {
195371bf427SVincent Guittot 	return 0;
196371bf427SVincent Guittot }
197371bf427SVincent Guittot 
1983727e0e1SVincent Guittot static inline int
update_dl_rq_load_avg(u64 now,struct rq * rq,int running)1993727e0e1SVincent Guittot update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
2003727e0e1SVincent Guittot {
2013727e0e1SVincent Guittot 	return 0;
2023727e0e1SVincent Guittot }
20391c27493SVincent Guittot 
20491c27493SVincent Guittot static inline int
update_thermal_load_avg(u64 now,struct rq * rq,u64 capacity)20576504793SThara Gopinath update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
20676504793SThara Gopinath {
20776504793SThara Gopinath 	return 0;
20876504793SThara Gopinath }
20976504793SThara Gopinath 
thermal_load_avg(struct rq * rq)21076504793SThara Gopinath static inline u64 thermal_load_avg(struct rq *rq)
21176504793SThara Gopinath {
21276504793SThara Gopinath 	return 0;
21376504793SThara Gopinath }
21476504793SThara Gopinath 
21576504793SThara Gopinath static inline int
update_irq_load_avg(struct rq * rq,u64 running)21691c27493SVincent Guittot update_irq_load_avg(struct rq *rq, u64 running)
21791c27493SVincent Guittot {
21891c27493SVincent Guittot 	return 0;
21991c27493SVincent Guittot }
22023127296SVincent Guittot 
rq_clock_pelt(struct rq * rq)22123127296SVincent Guittot static inline u64 rq_clock_pelt(struct rq *rq)
22223127296SVincent Guittot {
22323127296SVincent Guittot 	return rq_clock_task(rq);
22423127296SVincent Guittot }
22523127296SVincent Guittot 
22623127296SVincent Guittot static inline void
update_rq_clock_pelt(struct rq * rq,s64 delta)22723127296SVincent Guittot update_rq_clock_pelt(struct rq *rq, s64 delta) { }
22823127296SVincent Guittot 
22923127296SVincent Guittot static inline void
update_idle_rq_clock_pelt(struct rq * rq)23023127296SVincent Guittot update_idle_rq_clock_pelt(struct rq *rq) { }
23123127296SVincent Guittot 
update_idle_cfs_rq_clock_pelt(struct cfs_rq * cfs_rq)232*e2f3e35fSVincent Donnefort static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
233c0796298SVincent Guittot #endif
234c0796298SVincent Guittot 
235c0796298SVincent Guittot 
236