1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Per Entity Load Tracking 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 * 23 * Move PELT related code from fair.c into this pelt.c file 24 * Author: Vincent Guittot <vincent.guittot@linaro.org> 25 */ 26 27 #include <linux/sched.h> 28 #include "sched.h" 29 #include "pelt.h" 30 31 #include <trace/events/sched.h> 32 33 /* 34 * Approximate: 35 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) 36 */ 37 static u64 decay_load(u64 val, u64 n) 38 { 39 unsigned int local_n; 40 41 if (unlikely(n > LOAD_AVG_PERIOD * 63)) 42 return 0; 43 44 /* after bounds checking we can collapse to 32-bit */ 45 local_n = n; 46 47 /* 48 * As y^PERIOD = 1/2, we can combine 49 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) 50 * With a look-up table which covers y^n (n<PERIOD) 51 * 52 * To achieve constant time decay_load. 53 */ 54 if (unlikely(local_n >= LOAD_AVG_PERIOD)) { 55 val >>= local_n / LOAD_AVG_PERIOD; 56 local_n %= LOAD_AVG_PERIOD; 57 } 58 59 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32); 60 return val; 61 } 62 63 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) 64 { 65 u32 c1, c2, c3 = d3; /* y^0 == 1 */ 66 67 /* 68 * c1 = d1 y^p 69 */ 70 c1 = decay_load((u64)d1, periods); 71 72 /* 73 * p-1 74 * c2 = 1024 \Sum y^n 75 * n=1 76 * 77 * inf inf 78 * = 1024 ( \Sum y^n - \Sum y^n - y^0 ) 79 * n=0 n=p 80 */ 81 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024; 82 83 return c1 + c2 + c3; 84 } 85 86 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 87 88 /* 89 * Accumulate the three separate parts of the sum; d1 the remainder 90 * of the last (incomplete) period, d2 the span of full periods and d3 91 * the remainder of the (incomplete) current period. 92 * 93 * d1 d2 d3 94 * ^ ^ ^ 95 * | | | 96 * |<->|<----------------->|<--->| 97 * ... |---x---|------| ... |------|-----x (now) 98 * 99 * p-1 100 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0 101 * n=1 102 * 103 * = u y^p + (Step 1) 104 * 105 * p-1 106 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2) 107 * n=1 108 */ 109 static __always_inline u32 110 accumulate_sum(u64 delta, struct sched_avg *sa, 111 unsigned long load, unsigned long runnable, int running) 112 { 113 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ 114 u64 periods; 115 116 delta += sa->period_contrib; 117 periods = delta / 1024; /* A period is 1024us (~1ms) */ 118 119 /* 120 * Step 1: decay old *_sum if we crossed period boundaries. 121 */ 122 if (periods) { 123 sa->load_sum = decay_load(sa->load_sum, periods); 124 sa->runnable_load_sum = 125 decay_load(sa->runnable_load_sum, periods); 126 sa->util_sum = decay_load((u64)(sa->util_sum), periods); 127 128 /* 129 * Step 2 130 */ 131 delta %= 1024; 132 if (load) { 133 /* 134 * This relies on the: 135 * 136 * if (!load) 137 * runnable = running = 0; 138 * 139 * clause from ___update_load_sum(); this results in 140 * the below usage of @contrib to dissapear entirely, 141 * so no point in calculating it. 142 */ 143 contrib = __accumulate_pelt_segments(periods, 144 1024 - sa->period_contrib, delta); 145 } 146 } 147 sa->period_contrib = delta; 148 149 if (load) 150 sa->load_sum += load * contrib; 151 if (runnable) 152 sa->runnable_load_sum += runnable * contrib; 153 if (running) 154 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT; 155 156 return periods; 157 } 158 159 /* 160 * We can represent the historical contribution to runnable average as the 161 * coefficients of a geometric series. To do this we sub-divide our runnable 162 * history into segments of approximately 1ms (1024us); label the segment that 163 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. 164 * 165 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... 166 * p0 p1 p2 167 * (now) (~1ms ago) (~2ms ago) 168 * 169 * Let u_i denote the fraction of p_i that the entity was runnable. 170 * 171 * We then designate the fractions u_i as our co-efficients, yielding the 172 * following representation of historical load: 173 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... 174 * 175 * We choose y based on the with of a reasonably scheduling period, fixing: 176 * y^32 = 0.5 177 * 178 * This means that the contribution to load ~32ms ago (u_32) will be weighted 179 * approximately half as much as the contribution to load within the last ms 180 * (u_0). 181 * 182 * When a period "rolls over" and we have new u_0`, multiplying the previous 183 * sum again by y is sufficient to update: 184 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) 185 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] 186 */ 187 static __always_inline int 188 ___update_load_sum(u64 now, struct sched_avg *sa, 189 unsigned long load, unsigned long runnable, int running) 190 { 191 u64 delta; 192 193 delta = now - sa->last_update_time; 194 /* 195 * This should only happen when time goes backwards, which it 196 * unfortunately does during sched clock init when we swap over to TSC. 197 */ 198 if ((s64)delta < 0) { 199 sa->last_update_time = now; 200 return 0; 201 } 202 203 /* 204 * Use 1024ns as the unit of measurement since it's a reasonable 205 * approximation of 1us and fast to compute. 206 */ 207 delta >>= 10; 208 if (!delta) 209 return 0; 210 211 sa->last_update_time += delta << 10; 212 213 /* 214 * running is a subset of runnable (weight) so running can't be set if 215 * runnable is clear. But there are some corner cases where the current 216 * se has been already dequeued but cfs_rq->curr still points to it. 217 * This means that weight will be 0 but not running for a sched_entity 218 * but also for a cfs_rq if the latter becomes idle. As an example, 219 * this happens during idle_balance() which calls 220 * update_blocked_averages(). 221 * 222 * Also see the comment in accumulate_sum(). 223 */ 224 if (!load) 225 runnable = running = 0; 226 227 /* 228 * Now we know we crossed measurement unit boundaries. The *_avg 229 * accrues by two steps: 230 * 231 * Step 1: accumulate *_sum since last_update_time. If we haven't 232 * crossed period boundaries, finish. 233 */ 234 if (!accumulate_sum(delta, sa, load, runnable, running)) 235 return 0; 236 237 return 1; 238 } 239 240 static __always_inline void 241 ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable) 242 { 243 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib; 244 245 /* 246 * Step 2: update *_avg. 247 */ 248 sa->load_avg = div_u64(load * sa->load_sum, divider); 249 sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider); 250 WRITE_ONCE(sa->util_avg, sa->util_sum / divider); 251 } 252 253 /* 254 * sched_entity: 255 * 256 * task: 257 * se_runnable() == se_weight() 258 * 259 * group: [ see update_cfs_group() ] 260 * se_weight() = tg->weight * grq->load_avg / tg->load_avg 261 * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg 262 * 263 * load_sum := runnable_sum 264 * load_avg = se_weight(se) * runnable_avg 265 * 266 * runnable_load_sum := runnable_sum 267 * runnable_load_avg = se_runnable(se) * runnable_avg 268 * 269 * XXX collapse load_sum and runnable_load_sum 270 * 271 * cfq_rq: 272 * 273 * load_sum = \Sum se_weight(se) * se->avg.load_sum 274 * load_avg = \Sum se->avg.load_avg 275 * 276 * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum 277 * runnable_load_avg = \Sum se->avg.runable_load_avg 278 */ 279 280 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) 281 { 282 if (___update_load_sum(now, &se->avg, 0, 0, 0)) { 283 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); 284 trace_pelt_se_tp(se); 285 return 1; 286 } 287 288 return 0; 289 } 290 291 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) 292 { 293 if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq, 294 cfs_rq->curr == se)) { 295 296 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); 297 cfs_se_util_change(&se->avg); 298 trace_pelt_se_tp(se); 299 return 1; 300 } 301 302 return 0; 303 } 304 305 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) 306 { 307 if (___update_load_sum(now, &cfs_rq->avg, 308 scale_load_down(cfs_rq->load.weight), 309 scale_load_down(cfs_rq->runnable_weight), 310 cfs_rq->curr != NULL)) { 311 312 ___update_load_avg(&cfs_rq->avg, 1, 1); 313 trace_pelt_cfs_tp(cfs_rq); 314 return 1; 315 } 316 317 return 0; 318 } 319 320 /* 321 * rt_rq: 322 * 323 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked 324 * util_sum = cpu_scale * load_sum 325 * runnable_load_sum = load_sum 326 * 327 * load_avg and runnable_load_avg are not supported and meaningless. 328 * 329 */ 330 331 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) 332 { 333 if (___update_load_sum(now, &rq->avg_rt, 334 running, 335 running, 336 running)) { 337 338 ___update_load_avg(&rq->avg_rt, 1, 1); 339 trace_pelt_rt_tp(rq); 340 return 1; 341 } 342 343 return 0; 344 } 345 346 /* 347 * dl_rq: 348 * 349 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked 350 * util_sum = cpu_scale * load_sum 351 * runnable_load_sum = load_sum 352 * 353 */ 354 355 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) 356 { 357 if (___update_load_sum(now, &rq->avg_dl, 358 running, 359 running, 360 running)) { 361 362 ___update_load_avg(&rq->avg_dl, 1, 1); 363 trace_pelt_dl_tp(rq); 364 return 1; 365 } 366 367 return 0; 368 } 369 370 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 371 /* 372 * irq: 373 * 374 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked 375 * util_sum = cpu_scale * load_sum 376 * runnable_load_sum = load_sum 377 * 378 */ 379 380 int update_irq_load_avg(struct rq *rq, u64 running) 381 { 382 int ret = 0; 383 384 /* 385 * We can't use clock_pelt because irq time is not accounted in 386 * clock_task. Instead we directly scale the running time to 387 * reflect the real amount of computation 388 */ 389 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); 390 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); 391 392 /* 393 * We know the time that has been used by interrupt since last update 394 * but we don't when. Let be pessimistic and assume that interrupt has 395 * happened just before the update. This is not so far from reality 396 * because interrupt will most probably wake up task and trig an update 397 * of rq clock during which the metric is updated. 398 * We start to decay with normal context time and then we add the 399 * interrupt context time. 400 * We can safely remove running from rq->clock because 401 * rq->clock += delta with delta >= running 402 */ 403 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq, 404 0, 405 0, 406 0); 407 ret += ___update_load_sum(rq->clock, &rq->avg_irq, 408 1, 409 1, 410 1); 411 412 if (ret) { 413 ___update_load_avg(&rq->avg_irq, 1, 1); 414 trace_pelt_irq_tp(rq); 415 } 416 417 return ret; 418 } 419 #endif 420