xref: /openbmc/linux/kernel/sched/pelt.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1c0796298SVincent Guittot // SPDX-License-Identifier: GPL-2.0
2c0796298SVincent Guittot /*
3c0796298SVincent Guittot  * Per Entity Load Tracking
4c0796298SVincent Guittot  *
5c0796298SVincent Guittot  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6c0796298SVincent Guittot  *
7c0796298SVincent Guittot  *  Interactivity improvements by Mike Galbraith
8c0796298SVincent Guittot  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9c0796298SVincent Guittot  *
10c0796298SVincent Guittot  *  Various enhancements by Dmitry Adamushko.
11c0796298SVincent Guittot  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12c0796298SVincent Guittot  *
13c0796298SVincent Guittot  *  Group scheduling enhancements by Srivatsa Vaddagiri
14c0796298SVincent Guittot  *  Copyright IBM Corporation, 2007
15c0796298SVincent Guittot  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16c0796298SVincent Guittot  *
17c0796298SVincent Guittot  *  Scaled math optimizations by Thomas Gleixner
18c0796298SVincent Guittot  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19c0796298SVincent Guittot  *
20c0796298SVincent Guittot  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21c0796298SVincent Guittot  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22c0796298SVincent Guittot  *
23c0796298SVincent Guittot  *  Move PELT related code from fair.c into this pelt.c file
24c0796298SVincent Guittot  *  Author: Vincent Guittot <vincent.guittot@linaro.org>
25c0796298SVincent Guittot  */
26c0796298SVincent Guittot 
27c0796298SVincent Guittot /*
28c0796298SVincent Guittot  * Approximate:
29c0796298SVincent Guittot  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
30c0796298SVincent Guittot  */
decay_load(u64 val,u64 n)31c0796298SVincent Guittot static u64 decay_load(u64 val, u64 n)
32c0796298SVincent Guittot {
33c0796298SVincent Guittot 	unsigned int local_n;
34c0796298SVincent Guittot 
35c0796298SVincent Guittot 	if (unlikely(n > LOAD_AVG_PERIOD * 63))
36c0796298SVincent Guittot 		return 0;
37c0796298SVincent Guittot 
38c0796298SVincent Guittot 	/* after bounds checking we can collapse to 32-bit */
39c0796298SVincent Guittot 	local_n = n;
40c0796298SVincent Guittot 
41c0796298SVincent Guittot 	/*
42c0796298SVincent Guittot 	 * As y^PERIOD = 1/2, we can combine
43c0796298SVincent Guittot 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
44c0796298SVincent Guittot 	 * With a look-up table which covers y^n (n<PERIOD)
45c0796298SVincent Guittot 	 *
46c0796298SVincent Guittot 	 * To achieve constant time decay_load.
47c0796298SVincent Guittot 	 */
48c0796298SVincent Guittot 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
49c0796298SVincent Guittot 		val >>= local_n / LOAD_AVG_PERIOD;
50c0796298SVincent Guittot 		local_n %= LOAD_AVG_PERIOD;
51c0796298SVincent Guittot 	}
52c0796298SVincent Guittot 
53c0796298SVincent Guittot 	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
54c0796298SVincent Guittot 	return val;
55c0796298SVincent Guittot }
56c0796298SVincent Guittot 
__accumulate_pelt_segments(u64 periods,u32 d1,u32 d3)57c0796298SVincent Guittot static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
58c0796298SVincent Guittot {
59c0796298SVincent Guittot 	u32 c1, c2, c3 = d3; /* y^0 == 1 */
60c0796298SVincent Guittot 
61c0796298SVincent Guittot 	/*
62c0796298SVincent Guittot 	 * c1 = d1 y^p
63c0796298SVincent Guittot 	 */
64c0796298SVincent Guittot 	c1 = decay_load((u64)d1, periods);
65c0796298SVincent Guittot 
66c0796298SVincent Guittot 	/*
67c0796298SVincent Guittot 	 *            p-1
68c0796298SVincent Guittot 	 * c2 = 1024 \Sum y^n
69c0796298SVincent Guittot 	 *            n=1
70c0796298SVincent Guittot 	 *
71c0796298SVincent Guittot 	 *              inf        inf
72c0796298SVincent Guittot 	 *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
73c0796298SVincent Guittot 	 *              n=0        n=p
74c0796298SVincent Guittot 	 */
75c0796298SVincent Guittot 	c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
76c0796298SVincent Guittot 
77c0796298SVincent Guittot 	return c1 + c2 + c3;
78c0796298SVincent Guittot }
79c0796298SVincent Guittot 
80c0796298SVincent Guittot /*
81c0796298SVincent Guittot  * Accumulate the three separate parts of the sum; d1 the remainder
82c0796298SVincent Guittot  * of the last (incomplete) period, d2 the span of full periods and d3
83c0796298SVincent Guittot  * the remainder of the (incomplete) current period.
84c0796298SVincent Guittot  *
85c0796298SVincent Guittot  *           d1          d2           d3
86c0796298SVincent Guittot  *           ^           ^            ^
87c0796298SVincent Guittot  *           |           |            |
88c0796298SVincent Guittot  *         |<->|<----------------->|<--->|
89c0796298SVincent Guittot  * ... |---x---|------| ... |------|-----x (now)
90c0796298SVincent Guittot  *
91c0796298SVincent Guittot  *                           p-1
92c0796298SVincent Guittot  * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
93c0796298SVincent Guittot  *                           n=1
94c0796298SVincent Guittot  *
95c0796298SVincent Guittot  *    = u y^p +					(Step 1)
96c0796298SVincent Guittot  *
97c0796298SVincent Guittot  *                     p-1
98c0796298SVincent Guittot  *      d1 y^p + 1024 \Sum y^n + d3 y^0		(Step 2)
99c0796298SVincent Guittot  *                     n=1
100c0796298SVincent Guittot  */
101c0796298SVincent Guittot static __always_inline u32
accumulate_sum(u64 delta,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)10223127296SVincent Guittot accumulate_sum(u64 delta, struct sched_avg *sa,
1039f683953SVincent Guittot 	       unsigned long load, unsigned long runnable, int running)
104c0796298SVincent Guittot {
105c0796298SVincent Guittot 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
106c0796298SVincent Guittot 	u64 periods;
107c0796298SVincent Guittot 
108c0796298SVincent Guittot 	delta += sa->period_contrib;
109c0796298SVincent Guittot 	periods = delta / 1024; /* A period is 1024us (~1ms) */
110c0796298SVincent Guittot 
111c0796298SVincent Guittot 	/*
112c0796298SVincent Guittot 	 * Step 1: decay old *_sum if we crossed period boundaries.
113c0796298SVincent Guittot 	 */
114c0796298SVincent Guittot 	if (periods) {
115c0796298SVincent Guittot 		sa->load_sum = decay_load(sa->load_sum, periods);
1169f683953SVincent Guittot 		sa->runnable_sum =
1179f683953SVincent Guittot 			decay_load(sa->runnable_sum, periods);
118c0796298SVincent Guittot 		sa->util_sum = decay_load((u64)(sa->util_sum), periods);
119c0796298SVincent Guittot 
120c0796298SVincent Guittot 		/*
121c0796298SVincent Guittot 		 * Step 2
122c0796298SVincent Guittot 		 */
123c0796298SVincent Guittot 		delta %= 1024;
124d040e073SPeng Wang 		if (load) {
125d040e073SPeng Wang 			/*
126d040e073SPeng Wang 			 * This relies on the:
127d040e073SPeng Wang 			 *
128d040e073SPeng Wang 			 * if (!load)
129d040e073SPeng Wang 			 *	runnable = running = 0;
130d040e073SPeng Wang 			 *
131d040e073SPeng Wang 			 * clause from ___update_load_sum(); this results in
132*3b03706fSIngo Molnar 			 * the below usage of @contrib to disappear entirely,
133d040e073SPeng Wang 			 * so no point in calculating it.
134d040e073SPeng Wang 			 */
135c0796298SVincent Guittot 			contrib = __accumulate_pelt_segments(periods,
136c0796298SVincent Guittot 					1024 - sa->period_contrib, delta);
137c0796298SVincent Guittot 		}
138d040e073SPeng Wang 	}
139c0796298SVincent Guittot 	sa->period_contrib = delta;
140c0796298SVincent Guittot 
141c0796298SVincent Guittot 	if (load)
142c0796298SVincent Guittot 		sa->load_sum += load * contrib;
1439f683953SVincent Guittot 	if (runnable)
1449f683953SVincent Guittot 		sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT;
145c0796298SVincent Guittot 	if (running)
14623127296SVincent Guittot 		sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
147c0796298SVincent Guittot 
148c0796298SVincent Guittot 	return periods;
149c0796298SVincent Guittot }
150c0796298SVincent Guittot 
151c0796298SVincent Guittot /*
152c0796298SVincent Guittot  * We can represent the historical contribution to runnable average as the
153c0796298SVincent Guittot  * coefficients of a geometric series.  To do this we sub-divide our runnable
154c0796298SVincent Guittot  * history into segments of approximately 1ms (1024us); label the segment that
155c0796298SVincent Guittot  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
156c0796298SVincent Guittot  *
157c0796298SVincent Guittot  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
158c0796298SVincent Guittot  *      p0            p1           p2
159c0796298SVincent Guittot  *     (now)       (~1ms ago)  (~2ms ago)
160c0796298SVincent Guittot  *
161c0796298SVincent Guittot  * Let u_i denote the fraction of p_i that the entity was runnable.
162c0796298SVincent Guittot  *
163c0796298SVincent Guittot  * We then designate the fractions u_i as our co-efficients, yielding the
164c0796298SVincent Guittot  * following representation of historical load:
165c0796298SVincent Guittot  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
166c0796298SVincent Guittot  *
167c0796298SVincent Guittot  * We choose y based on the with of a reasonably scheduling period, fixing:
168c0796298SVincent Guittot  *   y^32 = 0.5
169c0796298SVincent Guittot  *
170c0796298SVincent Guittot  * This means that the contribution to load ~32ms ago (u_32) will be weighted
171c0796298SVincent Guittot  * approximately half as much as the contribution to load within the last ms
172c0796298SVincent Guittot  * (u_0).
173c0796298SVincent Guittot  *
174c0796298SVincent Guittot  * When a period "rolls over" and we have new u_0`, multiplying the previous
175c0796298SVincent Guittot  * sum again by y is sufficient to update:
176c0796298SVincent Guittot  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
177c0796298SVincent Guittot  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
178c0796298SVincent Guittot  */
179c0796298SVincent Guittot static __always_inline int
___update_load_sum(u64 now,struct sched_avg * sa,unsigned long load,unsigned long runnable,int running)18023127296SVincent Guittot ___update_load_sum(u64 now, struct sched_avg *sa,
1819f683953SVincent Guittot 		  unsigned long load, unsigned long runnable, int running)
182c0796298SVincent Guittot {
183c0796298SVincent Guittot 	u64 delta;
184c0796298SVincent Guittot 
185c0796298SVincent Guittot 	delta = now - sa->last_update_time;
186c0796298SVincent Guittot 	/*
187c0796298SVincent Guittot 	 * This should only happen when time goes backwards, which it
188c0796298SVincent Guittot 	 * unfortunately does during sched clock init when we swap over to TSC.
189c0796298SVincent Guittot 	 */
190c0796298SVincent Guittot 	if ((s64)delta < 0) {
191c0796298SVincent Guittot 		sa->last_update_time = now;
192c0796298SVincent Guittot 		return 0;
193c0796298SVincent Guittot 	}
194c0796298SVincent Guittot 
195c0796298SVincent Guittot 	/*
196c0796298SVincent Guittot 	 * Use 1024ns as the unit of measurement since it's a reasonable
197c0796298SVincent Guittot 	 * approximation of 1us and fast to compute.
198c0796298SVincent Guittot 	 */
199c0796298SVincent Guittot 	delta >>= 10;
200c0796298SVincent Guittot 	if (!delta)
201c0796298SVincent Guittot 		return 0;
202c0796298SVincent Guittot 
203c0796298SVincent Guittot 	sa->last_update_time += delta << 10;
204c0796298SVincent Guittot 
205c0796298SVincent Guittot 	/*
206c0796298SVincent Guittot 	 * running is a subset of runnable (weight) so running can't be set if
207c0796298SVincent Guittot 	 * runnable is clear. But there are some corner cases where the current
208c0796298SVincent Guittot 	 * se has been already dequeued but cfs_rq->curr still points to it.
209c0796298SVincent Guittot 	 * This means that weight will be 0 but not running for a sched_entity
210c0796298SVincent Guittot 	 * but also for a cfs_rq if the latter becomes idle. As an example,
211c0796298SVincent Guittot 	 * this happens during idle_balance() which calls
212d040e073SPeng Wang 	 * update_blocked_averages().
213d040e073SPeng Wang 	 *
214d040e073SPeng Wang 	 * Also see the comment in accumulate_sum().
215c0796298SVincent Guittot 	 */
216c0796298SVincent Guittot 	if (!load)
2179f683953SVincent Guittot 		runnable = running = 0;
218c0796298SVincent Guittot 
219c0796298SVincent Guittot 	/*
220c0796298SVincent Guittot 	 * Now we know we crossed measurement unit boundaries. The *_avg
221c0796298SVincent Guittot 	 * accrues by two steps:
222c0796298SVincent Guittot 	 *
223c0796298SVincent Guittot 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
224c0796298SVincent Guittot 	 * crossed period boundaries, finish.
225c0796298SVincent Guittot 	 */
2269f683953SVincent Guittot 	if (!accumulate_sum(delta, sa, load, runnable, running))
227c0796298SVincent Guittot 		return 0;
228c0796298SVincent Guittot 
229c0796298SVincent Guittot 	return 1;
230c0796298SVincent Guittot }
231c0796298SVincent Guittot 
23295d68593SVincent Guittot /*
23395d68593SVincent Guittot  * When syncing *_avg with *_sum, we must take into account the current
23495d68593SVincent Guittot  * position in the PELT segment otherwise the remaining part of the segment
23595d68593SVincent Guittot  * will be considered as idle time whereas it's not yet elapsed and this will
23695d68593SVincent Guittot  * generate unwanted oscillation in the range [1002..1024[.
23795d68593SVincent Guittot  *
23895d68593SVincent Guittot  * The max value of *_sum varies with the position in the time segment and is
23995d68593SVincent Guittot  * equals to :
24095d68593SVincent Guittot  *
24195d68593SVincent Guittot  *   LOAD_AVG_MAX*y + sa->period_contrib
24295d68593SVincent Guittot  *
24395d68593SVincent Guittot  * which can be simplified into:
24495d68593SVincent Guittot  *
24595d68593SVincent Guittot  *   LOAD_AVG_MAX - 1024 + sa->period_contrib
24695d68593SVincent Guittot  *
24795d68593SVincent Guittot  * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024
24895d68593SVincent Guittot  *
24995d68593SVincent Guittot  * The same care must be taken when a sched entity is added, updated or
25095d68593SVincent Guittot  * removed from a cfs_rq and we need to update sched_avg. Scheduler entities
25195d68593SVincent Guittot  * and the cfs rq, to which they are attached, have the same position in the
25295d68593SVincent Guittot  * time segment because they use the same clock. This means that we can use
25395d68593SVincent Guittot  * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
25495d68593SVincent Guittot  * if it's more convenient.
25595d68593SVincent Guittot  */
256c0796298SVincent Guittot static __always_inline void
___update_load_avg(struct sched_avg * sa,unsigned long load)2570dacee1bSVincent Guittot ___update_load_avg(struct sched_avg *sa, unsigned long load)
258c0796298SVincent Guittot {
25987e867b4SVincent Guittot 	u32 divider = get_pelt_divider(sa);
260c0796298SVincent Guittot 
261c0796298SVincent Guittot 	/*
262c0796298SVincent Guittot 	 * Step 2: update *_avg.
263c0796298SVincent Guittot 	 */
264c0796298SVincent Guittot 	sa->load_avg = div_u64(load * sa->load_sum, divider);
2659f683953SVincent Guittot 	sa->runnable_avg = div_u64(sa->runnable_sum, divider);
266523e979dSVincent Guittot 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
267c0796298SVincent Guittot }
268c0796298SVincent Guittot 
269c0796298SVincent Guittot /*
270c0796298SVincent Guittot  * sched_entity:
271c0796298SVincent Guittot  *
272c0796298SVincent Guittot  *   task:
2730dacee1bSVincent Guittot  *     se_weight()   = se->load.weight
2749f683953SVincent Guittot  *     se_runnable() = !!on_rq
275c0796298SVincent Guittot  *
276c0796298SVincent Guittot  *   group: [ see update_cfs_group() ]
277c0796298SVincent Guittot  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
2789f683953SVincent Guittot  *     se_runnable() = grq->h_nr_running
2799f683953SVincent Guittot  *
2809f683953SVincent Guittot  *   runnable_sum = se_runnable() * runnable = grq->runnable_sum
2819f683953SVincent Guittot  *   runnable_avg = runnable_sum
282c0796298SVincent Guittot  *
2830dacee1bSVincent Guittot  *   load_sum := runnable
2840dacee1bSVincent Guittot  *   load_avg = se_weight(se) * load_sum
285c0796298SVincent Guittot  *
286c0796298SVincent Guittot  * cfq_rq:
287c0796298SVincent Guittot  *
2889f683953SVincent Guittot  *   runnable_sum = \Sum se->avg.runnable_sum
2899f683953SVincent Guittot  *   runnable_avg = \Sum se->avg.runnable_avg
2909f683953SVincent Guittot  *
291c0796298SVincent Guittot  *   load_sum = \Sum se_weight(se) * se->avg.load_sum
292c0796298SVincent Guittot  *   load_avg = \Sum se->avg.load_avg
293c0796298SVincent Guittot  */
294c0796298SVincent Guittot 
__update_load_avg_blocked_se(u64 now,struct sched_entity * se)29523127296SVincent Guittot int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
296c0796298SVincent Guittot {
2979f683953SVincent Guittot 	if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
2980dacee1bSVincent Guittot 		___update_load_avg(&se->avg, se_weight(se));
2998de6242cSQais Yousef 		trace_pelt_se_tp(se);
300c0796298SVincent Guittot 		return 1;
301c0796298SVincent Guittot 	}
302c0796298SVincent Guittot 
303c0796298SVincent Guittot 	return 0;
304c0796298SVincent Guittot }
305c0796298SVincent Guittot 
__update_load_avg_se(u64 now,struct cfs_rq * cfs_rq,struct sched_entity * se)30623127296SVincent Guittot int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
307c0796298SVincent Guittot {
3089f683953SVincent Guittot 	if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
3099f683953SVincent Guittot 				cfs_rq->curr == se)) {
310c0796298SVincent Guittot 
3110dacee1bSVincent Guittot 		___update_load_avg(&se->avg, se_weight(se));
312c0796298SVincent Guittot 		cfs_se_util_change(&se->avg);
3138de6242cSQais Yousef 		trace_pelt_se_tp(se);
314c0796298SVincent Guittot 		return 1;
315c0796298SVincent Guittot 	}
316c0796298SVincent Guittot 
317c0796298SVincent Guittot 	return 0;
318c0796298SVincent Guittot }
319c0796298SVincent Guittot 
__update_load_avg_cfs_rq(u64 now,struct cfs_rq * cfs_rq)32023127296SVincent Guittot int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
321c0796298SVincent Guittot {
32223127296SVincent Guittot 	if (___update_load_sum(now, &cfs_rq->avg,
323c0796298SVincent Guittot 				scale_load_down(cfs_rq->load.weight),
3249f683953SVincent Guittot 				cfs_rq->h_nr_running,
325c0796298SVincent Guittot 				cfs_rq->curr != NULL)) {
326c0796298SVincent Guittot 
3270dacee1bSVincent Guittot 		___update_load_avg(&cfs_rq->avg, 1);
328ba19f51fSQais Yousef 		trace_pelt_cfs_tp(cfs_rq);
329c0796298SVincent Guittot 		return 1;
330c0796298SVincent Guittot 	}
331c0796298SVincent Guittot 
332c0796298SVincent Guittot 	return 0;
333c0796298SVincent Guittot }
334371bf427SVincent Guittot 
335371bf427SVincent Guittot /*
336371bf427SVincent Guittot  * rt_rq:
337371bf427SVincent Guittot  *
338371bf427SVincent Guittot  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
339371bf427SVincent Guittot  *   util_sum = cpu_scale * load_sum
3400dacee1bSVincent Guittot  *   runnable_sum = util_sum
341371bf427SVincent Guittot  *
3429f683953SVincent Guittot  *   load_avg and runnable_avg are not supported and meaningless.
343371bf427SVincent Guittot  *
344371bf427SVincent Guittot  */
345371bf427SVincent Guittot 
update_rt_rq_load_avg(u64 now,struct rq * rq,int running)346371bf427SVincent Guittot int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
347371bf427SVincent Guittot {
34823127296SVincent Guittot 	if (___update_load_sum(now, &rq->avg_rt,
349371bf427SVincent Guittot 				running,
3509f683953SVincent Guittot 				running,
351371bf427SVincent Guittot 				running)) {
352371bf427SVincent Guittot 
3530dacee1bSVincent Guittot 		___update_load_avg(&rq->avg_rt, 1);
354ba19f51fSQais Yousef 		trace_pelt_rt_tp(rq);
355371bf427SVincent Guittot 		return 1;
356371bf427SVincent Guittot 	}
357371bf427SVincent Guittot 
358371bf427SVincent Guittot 	return 0;
359371bf427SVincent Guittot }
3603727e0e1SVincent Guittot 
3613727e0e1SVincent Guittot /*
3623727e0e1SVincent Guittot  * dl_rq:
3633727e0e1SVincent Guittot  *
3643727e0e1SVincent Guittot  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
3653727e0e1SVincent Guittot  *   util_sum = cpu_scale * load_sum
3660dacee1bSVincent Guittot  *   runnable_sum = util_sum
3670dacee1bSVincent Guittot  *
3689f683953SVincent Guittot  *   load_avg and runnable_avg are not supported and meaningless.
3693727e0e1SVincent Guittot  *
3703727e0e1SVincent Guittot  */
3713727e0e1SVincent Guittot 
update_dl_rq_load_avg(u64 now,struct rq * rq,int running)3723727e0e1SVincent Guittot int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
3733727e0e1SVincent Guittot {
37423127296SVincent Guittot 	if (___update_load_sum(now, &rq->avg_dl,
3753727e0e1SVincent Guittot 				running,
3769f683953SVincent Guittot 				running,
3773727e0e1SVincent Guittot 				running)) {
3783727e0e1SVincent Guittot 
3790dacee1bSVincent Guittot 		___update_load_avg(&rq->avg_dl, 1);
380ba19f51fSQais Yousef 		trace_pelt_dl_tp(rq);
3813727e0e1SVincent Guittot 		return 1;
3823727e0e1SVincent Guittot 	}
3833727e0e1SVincent Guittot 
3843727e0e1SVincent Guittot 	return 0;
3853727e0e1SVincent Guittot }
38691c27493SVincent Guittot 
38776504793SThara Gopinath #ifdef CONFIG_SCHED_THERMAL_PRESSURE
38876504793SThara Gopinath /*
38976504793SThara Gopinath  * thermal:
39076504793SThara Gopinath  *
39176504793SThara Gopinath  *   load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked
39276504793SThara Gopinath  *
39376504793SThara Gopinath  *   util_avg and runnable_load_avg are not supported and meaningless.
39476504793SThara Gopinath  *
39576504793SThara Gopinath  * Unlike rt/dl utilization tracking that track time spent by a cpu
39676504793SThara Gopinath  * running a rt/dl task through util_avg, the average thermal pressure is
39776504793SThara Gopinath  * tracked through load_avg. This is because thermal pressure signal is
39876504793SThara Gopinath  * time weighted "delta" capacity unlike util_avg which is binary.
39976504793SThara Gopinath  * "delta capacity" =  actual capacity  -
40076504793SThara Gopinath  *			capped capacity a cpu due to a thermal event.
40176504793SThara Gopinath  */
40276504793SThara Gopinath 
update_thermal_load_avg(u64 now,struct rq * rq,u64 capacity)40376504793SThara Gopinath int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
40476504793SThara Gopinath {
40576504793SThara Gopinath 	if (___update_load_sum(now, &rq->avg_thermal,
40676504793SThara Gopinath 			       capacity,
40776504793SThara Gopinath 			       capacity,
40876504793SThara Gopinath 			       capacity)) {
40976504793SThara Gopinath 		___update_load_avg(&rq->avg_thermal, 1);
41076504793SThara Gopinath 		trace_pelt_thermal_tp(rq);
41176504793SThara Gopinath 		return 1;
41276504793SThara Gopinath 	}
41376504793SThara Gopinath 
41476504793SThara Gopinath 	return 0;
41576504793SThara Gopinath }
41676504793SThara Gopinath #endif
41776504793SThara Gopinath 
41811d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
41991c27493SVincent Guittot /*
42091c27493SVincent Guittot  * irq:
42191c27493SVincent Guittot  *
42291c27493SVincent Guittot  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
42391c27493SVincent Guittot  *   util_sum = cpu_scale * load_sum
4240dacee1bSVincent Guittot  *   runnable_sum = util_sum
4250dacee1bSVincent Guittot  *
4269f683953SVincent Guittot  *   load_avg and runnable_avg are not supported and meaningless.
42791c27493SVincent Guittot  *
42891c27493SVincent Guittot  */
42991c27493SVincent Guittot 
update_irq_load_avg(struct rq * rq,u64 running)43091c27493SVincent Guittot int update_irq_load_avg(struct rq *rq, u64 running)
43191c27493SVincent Guittot {
43291c27493SVincent Guittot 	int ret = 0;
43323127296SVincent Guittot 
43423127296SVincent Guittot 	/*
43523127296SVincent Guittot 	 * We can't use clock_pelt because irq time is not accounted in
43623127296SVincent Guittot 	 * clock_task. Instead we directly scale the running time to
43723127296SVincent Guittot 	 * reflect the real amount of computation
43823127296SVincent Guittot 	 */
43923127296SVincent Guittot 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
4408ec59c0fSVincent Guittot 	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
44123127296SVincent Guittot 
44291c27493SVincent Guittot 	/*
44391c27493SVincent Guittot 	 * We know the time that has been used by interrupt since last update
44491c27493SVincent Guittot 	 * but we don't when. Let be pessimistic and assume that interrupt has
44591c27493SVincent Guittot 	 * happened just before the update. This is not so far from reality
44691c27493SVincent Guittot 	 * because interrupt will most probably wake up task and trig an update
44723127296SVincent Guittot 	 * of rq clock during which the metric is updated.
44891c27493SVincent Guittot 	 * We start to decay with normal context time and then we add the
44991c27493SVincent Guittot 	 * interrupt context time.
45091c27493SVincent Guittot 	 * We can safely remove running from rq->clock because
45191c27493SVincent Guittot 	 * rq->clock += delta with delta >= running
45291c27493SVincent Guittot 	 */
45323127296SVincent Guittot 	ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
45491c27493SVincent Guittot 				0,
4559f683953SVincent Guittot 				0,
45691c27493SVincent Guittot 				0);
45723127296SVincent Guittot 	ret += ___update_load_sum(rq->clock, &rq->avg_irq,
45891c27493SVincent Guittot 				1,
4599f683953SVincent Guittot 				1,
46091c27493SVincent Guittot 				1);
46191c27493SVincent Guittot 
462ba19f51fSQais Yousef 	if (ret) {
4630dacee1bSVincent Guittot 		___update_load_avg(&rq->avg_irq, 1);
464ba19f51fSQais Yousef 		trace_pelt_irq_tp(rq);
465ba19f51fSQais Yousef 	}
46691c27493SVincent Guittot 
46791c27493SVincent Guittot 	return ret;
46891c27493SVincent Guittot }
46991c27493SVincent Guittot #endif
470