xref: /openbmc/linux/kernel/sched/pelt.c (revision 8440bb9b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Per Entity Load Tracking
4  *
5  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *
7  *  Interactivity improvements by Mike Galbraith
8  *  (C) 2007 Mike Galbraith <efault@gmx.de>
9  *
10  *  Various enhancements by Dmitry Adamushko.
11  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12  *
13  *  Group scheduling enhancements by Srivatsa Vaddagiri
14  *  Copyright IBM Corporation, 2007
15  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16  *
17  *  Scaled math optimizations by Thomas Gleixner
18  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19  *
20  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22  *
23  *  Move PELT related code from fair.c into this pelt.c file
24  *  Author: Vincent Guittot <vincent.guittot@linaro.org>
25  */
26 
27 #include <linux/sched.h>
28 #include "sched.h"
29 #include "pelt.h"
30 
31 /*
32  * Approximate:
33  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
34  */
35 static u64 decay_load(u64 val, u64 n)
36 {
37 	unsigned int local_n;
38 
39 	if (unlikely(n > LOAD_AVG_PERIOD * 63))
40 		return 0;
41 
42 	/* after bounds checking we can collapse to 32-bit */
43 	local_n = n;
44 
45 	/*
46 	 * As y^PERIOD = 1/2, we can combine
47 	 *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
48 	 * With a look-up table which covers y^n (n<PERIOD)
49 	 *
50 	 * To achieve constant time decay_load.
51 	 */
52 	if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
53 		val >>= local_n / LOAD_AVG_PERIOD;
54 		local_n %= LOAD_AVG_PERIOD;
55 	}
56 
57 	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
58 	return val;
59 }
60 
61 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
62 {
63 	u32 c1, c2, c3 = d3; /* y^0 == 1 */
64 
65 	/*
66 	 * c1 = d1 y^p
67 	 */
68 	c1 = decay_load((u64)d1, periods);
69 
70 	/*
71 	 *            p-1
72 	 * c2 = 1024 \Sum y^n
73 	 *            n=1
74 	 *
75 	 *              inf        inf
76 	 *    = 1024 ( \Sum y^n - \Sum y^n - y^0 )
77 	 *              n=0        n=p
78 	 */
79 	c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
80 
81 	return c1 + c2 + c3;
82 }
83 
84 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
85 
86 /*
87  * Accumulate the three separate parts of the sum; d1 the remainder
88  * of the last (incomplete) period, d2 the span of full periods and d3
89  * the remainder of the (incomplete) current period.
90  *
91  *           d1          d2           d3
92  *           ^           ^            ^
93  *           |           |            |
94  *         |<->|<----------------->|<--->|
95  * ... |---x---|------| ... |------|-----x (now)
96  *
97  *                           p-1
98  * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
99  *                           n=1
100  *
101  *    = u y^p +					(Step 1)
102  *
103  *                     p-1
104  *      d1 y^p + 1024 \Sum y^n + d3 y^0		(Step 2)
105  *                     n=1
106  */
107 static __always_inline u32
108 accumulate_sum(u64 delta, struct sched_avg *sa,
109 	       unsigned long load, unsigned long runnable, int running)
110 {
111 	u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
112 	u64 periods;
113 
114 	delta += sa->period_contrib;
115 	periods = delta / 1024; /* A period is 1024us (~1ms) */
116 
117 	/*
118 	 * Step 1: decay old *_sum if we crossed period boundaries.
119 	 */
120 	if (periods) {
121 		sa->load_sum = decay_load(sa->load_sum, periods);
122 		sa->runnable_load_sum =
123 			decay_load(sa->runnable_load_sum, periods);
124 		sa->util_sum = decay_load((u64)(sa->util_sum), periods);
125 
126 		/*
127 		 * Step 2
128 		 */
129 		delta %= 1024;
130 		contrib = __accumulate_pelt_segments(periods,
131 				1024 - sa->period_contrib, delta);
132 	}
133 	sa->period_contrib = delta;
134 
135 	if (load)
136 		sa->load_sum += load * contrib;
137 	if (runnable)
138 		sa->runnable_load_sum += runnable * contrib;
139 	if (running)
140 		sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
141 
142 	return periods;
143 }
144 
145 /*
146  * We can represent the historical contribution to runnable average as the
147  * coefficients of a geometric series.  To do this we sub-divide our runnable
148  * history into segments of approximately 1ms (1024us); label the segment that
149  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
150  *
151  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
152  *      p0            p1           p2
153  *     (now)       (~1ms ago)  (~2ms ago)
154  *
155  * Let u_i denote the fraction of p_i that the entity was runnable.
156  *
157  * We then designate the fractions u_i as our co-efficients, yielding the
158  * following representation of historical load:
159  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
160  *
161  * We choose y based on the with of a reasonably scheduling period, fixing:
162  *   y^32 = 0.5
163  *
164  * This means that the contribution to load ~32ms ago (u_32) will be weighted
165  * approximately half as much as the contribution to load within the last ms
166  * (u_0).
167  *
168  * When a period "rolls over" and we have new u_0`, multiplying the previous
169  * sum again by y is sufficient to update:
170  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
171  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
172  */
173 static __always_inline int
174 ___update_load_sum(u64 now, struct sched_avg *sa,
175 		  unsigned long load, unsigned long runnable, int running)
176 {
177 	u64 delta;
178 
179 	delta = now - sa->last_update_time;
180 	/*
181 	 * This should only happen when time goes backwards, which it
182 	 * unfortunately does during sched clock init when we swap over to TSC.
183 	 */
184 	if ((s64)delta < 0) {
185 		sa->last_update_time = now;
186 		return 0;
187 	}
188 
189 	/*
190 	 * Use 1024ns as the unit of measurement since it's a reasonable
191 	 * approximation of 1us and fast to compute.
192 	 */
193 	delta >>= 10;
194 	if (!delta)
195 		return 0;
196 
197 	sa->last_update_time += delta << 10;
198 
199 	/*
200 	 * running is a subset of runnable (weight) so running can't be set if
201 	 * runnable is clear. But there are some corner cases where the current
202 	 * se has been already dequeued but cfs_rq->curr still points to it.
203 	 * This means that weight will be 0 but not running for a sched_entity
204 	 * but also for a cfs_rq if the latter becomes idle. As an example,
205 	 * this happens during idle_balance() which calls
206 	 * update_blocked_averages()
207 	 */
208 	if (!load)
209 		runnable = running = 0;
210 
211 	/*
212 	 * Now we know we crossed measurement unit boundaries. The *_avg
213 	 * accrues by two steps:
214 	 *
215 	 * Step 1: accumulate *_sum since last_update_time. If we haven't
216 	 * crossed period boundaries, finish.
217 	 */
218 	if (!accumulate_sum(delta, sa, load, runnable, running))
219 		return 0;
220 
221 	return 1;
222 }
223 
224 static __always_inline void
225 ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
226 {
227 	u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
228 
229 	/*
230 	 * Step 2: update *_avg.
231 	 */
232 	sa->load_avg = div_u64(load * sa->load_sum, divider);
233 	sa->runnable_load_avg =	div_u64(runnable * sa->runnable_load_sum, divider);
234 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
235 }
236 
237 /*
238  * sched_entity:
239  *
240  *   task:
241  *     se_runnable() == se_weight()
242  *
243  *   group: [ see update_cfs_group() ]
244  *     se_weight()   = tg->weight * grq->load_avg / tg->load_avg
245  *     se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
246  *
247  *   load_sum := runnable_sum
248  *   load_avg = se_weight(se) * runnable_avg
249  *
250  *   runnable_load_sum := runnable_sum
251  *   runnable_load_avg = se_runnable(se) * runnable_avg
252  *
253  * XXX collapse load_sum and runnable_load_sum
254  *
255  * cfq_rq:
256  *
257  *   load_sum = \Sum se_weight(se) * se->avg.load_sum
258  *   load_avg = \Sum se->avg.load_avg
259  *
260  *   runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
261  *   runnable_load_avg = \Sum se->avg.runable_load_avg
262  */
263 
264 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
265 {
266 	if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
267 		___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
268 		return 1;
269 	}
270 
271 	return 0;
272 }
273 
274 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
275 {
276 	if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
277 				cfs_rq->curr == se)) {
278 
279 		___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
280 		cfs_se_util_change(&se->avg);
281 		return 1;
282 	}
283 
284 	return 0;
285 }
286 
287 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
288 {
289 	if (___update_load_sum(now, &cfs_rq->avg,
290 				scale_load_down(cfs_rq->load.weight),
291 				scale_load_down(cfs_rq->runnable_weight),
292 				cfs_rq->curr != NULL)) {
293 
294 		___update_load_avg(&cfs_rq->avg, 1, 1);
295 		return 1;
296 	}
297 
298 	return 0;
299 }
300 
301 /*
302  * rt_rq:
303  *
304  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
305  *   util_sum = cpu_scale * load_sum
306  *   runnable_load_sum = load_sum
307  *
308  *   load_avg and runnable_load_avg are not supported and meaningless.
309  *
310  */
311 
312 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
313 {
314 	if (___update_load_sum(now, &rq->avg_rt,
315 				running,
316 				running,
317 				running)) {
318 
319 		___update_load_avg(&rq->avg_rt, 1, 1);
320 		return 1;
321 	}
322 
323 	return 0;
324 }
325 
326 /*
327  * dl_rq:
328  *
329  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
330  *   util_sum = cpu_scale * load_sum
331  *   runnable_load_sum = load_sum
332  *
333  */
334 
335 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
336 {
337 	if (___update_load_sum(now, &rq->avg_dl,
338 				running,
339 				running,
340 				running)) {
341 
342 		___update_load_avg(&rq->avg_dl, 1, 1);
343 		return 1;
344 	}
345 
346 	return 0;
347 }
348 
349 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
350 /*
351  * irq:
352  *
353  *   util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
354  *   util_sum = cpu_scale * load_sum
355  *   runnable_load_sum = load_sum
356  *
357  */
358 
359 int update_irq_load_avg(struct rq *rq, u64 running)
360 {
361 	int ret = 0;
362 
363 	/*
364 	 * We can't use clock_pelt because irq time is not accounted in
365 	 * clock_task. Instead we directly scale the running time to
366 	 * reflect the real amount of computation
367 	 */
368 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
369 	running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
370 
371 	/*
372 	 * We know the time that has been used by interrupt since last update
373 	 * but we don't when. Let be pessimistic and assume that interrupt has
374 	 * happened just before the update. This is not so far from reality
375 	 * because interrupt will most probably wake up task and trig an update
376 	 * of rq clock during which the metric is updated.
377 	 * We start to decay with normal context time and then we add the
378 	 * interrupt context time.
379 	 * We can safely remove running from rq->clock because
380 	 * rq->clock += delta with delta >= running
381 	 */
382 	ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
383 				0,
384 				0,
385 				0);
386 	ret += ___update_load_sum(rq->clock, &rq->avg_irq,
387 				1,
388 				1,
389 				1);
390 
391 	if (ret)
392 		___update_load_avg(&rq->avg_irq, 1, 1);
393 
394 	return ret;
395 }
396 #endif
397