1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 */ 23 #include "sched.h" 24 25 /* 26 * Targeted preemption latency for CPU-bound tasks: 27 * 28 * NOTE: this latency value is not the same as the concept of 29 * 'timeslice length' - timeslices in CFS are of variable length 30 * and have no persistent notion like in traditional, time-slice 31 * based scheduling concepts. 32 * 33 * (to see the precise effective timeslice length of your workload, 34 * run vmstat and monitor the context-switches (cs) field) 35 * 36 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 37 */ 38 unsigned int sysctl_sched_latency = 6000000ULL; 39 static unsigned int normalized_sysctl_sched_latency = 6000000ULL; 40 41 /* 42 * The initial- and re-scaling of tunables is configurable 43 * 44 * Options are: 45 * 46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 49 * 50 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 51 */ 52 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 53 54 /* 55 * Minimal preemption granularity for CPU-bound tasks: 56 * 57 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 58 */ 59 unsigned int sysctl_sched_min_granularity = 750000ULL; 60 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 61 62 /* 63 * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks. 64 * Applies only when SCHED_IDLE tasks compete with normal tasks. 65 * 66 * (default: 0.75 msec) 67 */ 68 unsigned int sysctl_sched_idle_min_granularity = 750000ULL; 69 70 /* 71 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 72 */ 73 static unsigned int sched_nr_latency = 8; 74 75 /* 76 * After fork, child runs first. If set to 0 (default) then 77 * parent will (try to) run first. 78 */ 79 unsigned int sysctl_sched_child_runs_first __read_mostly; 80 81 /* 82 * SCHED_OTHER wake-up granularity. 83 * 84 * This option delays the preemption effects of decoupled workloads 85 * and reduces their over-scheduling. Synchronous workloads will still 86 * have immediate wakeup/sleep latencies. 87 * 88 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 89 */ 90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 91 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 92 93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 94 95 int sched_thermal_decay_shift; 96 static int __init setup_sched_thermal_decay_shift(char *str) 97 { 98 int _shift = 0; 99 100 if (kstrtoint(str, 0, &_shift)) 101 pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n"); 102 103 sched_thermal_decay_shift = clamp(_shift, 0, 10); 104 return 1; 105 } 106 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); 107 108 #ifdef CONFIG_SMP 109 /* 110 * For asym packing, by default the lower numbered CPU has higher priority. 111 */ 112 int __weak arch_asym_cpu_priority(int cpu) 113 { 114 return -cpu; 115 } 116 117 /* 118 * The margin used when comparing utilization with CPU capacity. 119 * 120 * (default: ~20%) 121 */ 122 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) 123 124 /* 125 * The margin used when comparing CPU capacities. 126 * is 'cap1' noticeably greater than 'cap2' 127 * 128 * (default: ~5%) 129 */ 130 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078) 131 #endif 132 133 #ifdef CONFIG_CFS_BANDWIDTH 134 /* 135 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 136 * each time a cfs_rq requests quota. 137 * 138 * Note: in the case that the slice exceeds the runtime remaining (either due 139 * to consumption or the quota being specified to be smaller than the slice) 140 * we will always only issue the remaining available time. 141 * 142 * (default: 5 msec, units: microseconds) 143 */ 144 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 145 #endif 146 147 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 148 { 149 lw->weight += inc; 150 lw->inv_weight = 0; 151 } 152 153 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 154 { 155 lw->weight -= dec; 156 lw->inv_weight = 0; 157 } 158 159 static inline void update_load_set(struct load_weight *lw, unsigned long w) 160 { 161 lw->weight = w; 162 lw->inv_weight = 0; 163 } 164 165 /* 166 * Increase the granularity value when there are more CPUs, 167 * because with more CPUs the 'effective latency' as visible 168 * to users decreases. But the relationship is not linear, 169 * so pick a second-best guess by going with the log2 of the 170 * number of CPUs. 171 * 172 * This idea comes from the SD scheduler of Con Kolivas: 173 */ 174 static unsigned int get_update_sysctl_factor(void) 175 { 176 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 177 unsigned int factor; 178 179 switch (sysctl_sched_tunable_scaling) { 180 case SCHED_TUNABLESCALING_NONE: 181 factor = 1; 182 break; 183 case SCHED_TUNABLESCALING_LINEAR: 184 factor = cpus; 185 break; 186 case SCHED_TUNABLESCALING_LOG: 187 default: 188 factor = 1 + ilog2(cpus); 189 break; 190 } 191 192 return factor; 193 } 194 195 static void update_sysctl(void) 196 { 197 unsigned int factor = get_update_sysctl_factor(); 198 199 #define SET_SYSCTL(name) \ 200 (sysctl_##name = (factor) * normalized_sysctl_##name) 201 SET_SYSCTL(sched_min_granularity); 202 SET_SYSCTL(sched_latency); 203 SET_SYSCTL(sched_wakeup_granularity); 204 #undef SET_SYSCTL 205 } 206 207 void __init sched_init_granularity(void) 208 { 209 update_sysctl(); 210 } 211 212 #define WMULT_CONST (~0U) 213 #define WMULT_SHIFT 32 214 215 static void __update_inv_weight(struct load_weight *lw) 216 { 217 unsigned long w; 218 219 if (likely(lw->inv_weight)) 220 return; 221 222 w = scale_load_down(lw->weight); 223 224 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 225 lw->inv_weight = 1; 226 else if (unlikely(!w)) 227 lw->inv_weight = WMULT_CONST; 228 else 229 lw->inv_weight = WMULT_CONST / w; 230 } 231 232 /* 233 * delta_exec * weight / lw.weight 234 * OR 235 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 236 * 237 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 238 * we're guaranteed shift stays positive because inv_weight is guaranteed to 239 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 240 * 241 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 242 * weight/lw.weight <= 1, and therefore our shift will also be positive. 243 */ 244 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 245 { 246 u64 fact = scale_load_down(weight); 247 u32 fact_hi = (u32)(fact >> 32); 248 int shift = WMULT_SHIFT; 249 int fs; 250 251 __update_inv_weight(lw); 252 253 if (unlikely(fact_hi)) { 254 fs = fls(fact_hi); 255 shift -= fs; 256 fact >>= fs; 257 } 258 259 fact = mul_u32_u32(fact, lw->inv_weight); 260 261 fact_hi = (u32)(fact >> 32); 262 if (fact_hi) { 263 fs = fls(fact_hi); 264 shift -= fs; 265 fact >>= fs; 266 } 267 268 return mul_u64_u32_shr(delta_exec, fact, shift); 269 } 270 271 272 const struct sched_class fair_sched_class; 273 274 /************************************************************** 275 * CFS operations on generic schedulable entities: 276 */ 277 278 #ifdef CONFIG_FAIR_GROUP_SCHED 279 280 /* Walk up scheduling entities hierarchy */ 281 #define for_each_sched_entity(se) \ 282 for (; se; se = se->parent) 283 284 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) 285 { 286 if (!path) 287 return; 288 289 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) 290 autogroup_path(cfs_rq->tg, path, len); 291 else if (cfs_rq && cfs_rq->tg->css.cgroup) 292 cgroup_path(cfs_rq->tg->css.cgroup, path, len); 293 else 294 strlcpy(path, "(null)", len); 295 } 296 297 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 298 { 299 struct rq *rq = rq_of(cfs_rq); 300 int cpu = cpu_of(rq); 301 302 if (cfs_rq->on_list) 303 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; 304 305 cfs_rq->on_list = 1; 306 307 /* 308 * Ensure we either appear before our parent (if already 309 * enqueued) or force our parent to appear after us when it is 310 * enqueued. The fact that we always enqueue bottom-up 311 * reduces this to two cases and a special case for the root 312 * cfs_rq. Furthermore, it also means that we will always reset 313 * tmp_alone_branch either when the branch is connected 314 * to a tree or when we reach the top of the tree 315 */ 316 if (cfs_rq->tg->parent && 317 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 318 /* 319 * If parent is already on the list, we add the child 320 * just before. Thanks to circular linked property of 321 * the list, this means to put the child at the tail 322 * of the list that starts by parent. 323 */ 324 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 325 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 326 /* 327 * The branch is now connected to its tree so we can 328 * reset tmp_alone_branch to the beginning of the 329 * list. 330 */ 331 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 332 return true; 333 } 334 335 if (!cfs_rq->tg->parent) { 336 /* 337 * cfs rq without parent should be put 338 * at the tail of the list. 339 */ 340 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 341 &rq->leaf_cfs_rq_list); 342 /* 343 * We have reach the top of a tree so we can reset 344 * tmp_alone_branch to the beginning of the list. 345 */ 346 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 347 return true; 348 } 349 350 /* 351 * The parent has not already been added so we want to 352 * make sure that it will be put after us. 353 * tmp_alone_branch points to the begin of the branch 354 * where we will add parent. 355 */ 356 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); 357 /* 358 * update tmp_alone_branch to points to the new begin 359 * of the branch 360 */ 361 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 362 return false; 363 } 364 365 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 366 { 367 if (cfs_rq->on_list) { 368 struct rq *rq = rq_of(cfs_rq); 369 370 /* 371 * With cfs_rq being unthrottled/throttled during an enqueue, 372 * it can happen the tmp_alone_branch points the a leaf that 373 * we finally want to del. In this case, tmp_alone_branch moves 374 * to the prev element but it will point to rq->leaf_cfs_rq_list 375 * at the end of the enqueue. 376 */ 377 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) 378 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; 379 380 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 381 cfs_rq->on_list = 0; 382 } 383 } 384 385 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 386 { 387 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); 388 } 389 390 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 391 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 392 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ 393 leaf_cfs_rq_list) 394 395 /* Do the two (enqueued) entities belong to the same group ? */ 396 static inline struct cfs_rq * 397 is_same_group(struct sched_entity *se, struct sched_entity *pse) 398 { 399 if (se->cfs_rq == pse->cfs_rq) 400 return se->cfs_rq; 401 402 return NULL; 403 } 404 405 static inline struct sched_entity *parent_entity(struct sched_entity *se) 406 { 407 return se->parent; 408 } 409 410 static void 411 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 412 { 413 int se_depth, pse_depth; 414 415 /* 416 * preemption test can be made between sibling entities who are in the 417 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 418 * both tasks until we find their ancestors who are siblings of common 419 * parent. 420 */ 421 422 /* First walk up until both entities are at same depth */ 423 se_depth = (*se)->depth; 424 pse_depth = (*pse)->depth; 425 426 while (se_depth > pse_depth) { 427 se_depth--; 428 *se = parent_entity(*se); 429 } 430 431 while (pse_depth > se_depth) { 432 pse_depth--; 433 *pse = parent_entity(*pse); 434 } 435 436 while (!is_same_group(*se, *pse)) { 437 *se = parent_entity(*se); 438 *pse = parent_entity(*pse); 439 } 440 } 441 442 static int tg_is_idle(struct task_group *tg) 443 { 444 return tg->idle > 0; 445 } 446 447 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) 448 { 449 return cfs_rq->idle > 0; 450 } 451 452 static int se_is_idle(struct sched_entity *se) 453 { 454 if (entity_is_task(se)) 455 return task_has_idle_policy(task_of(se)); 456 return cfs_rq_is_idle(group_cfs_rq(se)); 457 } 458 459 #else /* !CONFIG_FAIR_GROUP_SCHED */ 460 461 #define for_each_sched_entity(se) \ 462 for (; se; se = NULL) 463 464 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) 465 { 466 if (path) 467 strlcpy(path, "(null)", len); 468 } 469 470 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 471 { 472 return true; 473 } 474 475 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 476 { 477 } 478 479 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 480 { 481 } 482 483 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 484 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 485 486 static inline struct sched_entity *parent_entity(struct sched_entity *se) 487 { 488 return NULL; 489 } 490 491 static inline void 492 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 493 { 494 } 495 496 static inline int tg_is_idle(struct task_group *tg) 497 { 498 return 0; 499 } 500 501 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) 502 { 503 return 0; 504 } 505 506 static int se_is_idle(struct sched_entity *se) 507 { 508 return 0; 509 } 510 511 #endif /* CONFIG_FAIR_GROUP_SCHED */ 512 513 static __always_inline 514 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 515 516 /************************************************************** 517 * Scheduling class tree data structure manipulation methods: 518 */ 519 520 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 521 { 522 s64 delta = (s64)(vruntime - max_vruntime); 523 if (delta > 0) 524 max_vruntime = vruntime; 525 526 return max_vruntime; 527 } 528 529 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 530 { 531 s64 delta = (s64)(vruntime - min_vruntime); 532 if (delta < 0) 533 min_vruntime = vruntime; 534 535 return min_vruntime; 536 } 537 538 static inline bool entity_before(struct sched_entity *a, 539 struct sched_entity *b) 540 { 541 return (s64)(a->vruntime - b->vruntime) < 0; 542 } 543 544 #define __node_2_se(node) \ 545 rb_entry((node), struct sched_entity, run_node) 546 547 static void update_min_vruntime(struct cfs_rq *cfs_rq) 548 { 549 struct sched_entity *curr = cfs_rq->curr; 550 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); 551 552 u64 vruntime = cfs_rq->min_vruntime; 553 554 if (curr) { 555 if (curr->on_rq) 556 vruntime = curr->vruntime; 557 else 558 curr = NULL; 559 } 560 561 if (leftmost) { /* non-empty tree */ 562 struct sched_entity *se = __node_2_se(leftmost); 563 564 if (!curr) 565 vruntime = se->vruntime; 566 else 567 vruntime = min_vruntime(vruntime, se->vruntime); 568 } 569 570 /* ensure we never gain time by being placed backwards. */ 571 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 572 #ifndef CONFIG_64BIT 573 smp_wmb(); 574 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 575 #endif 576 } 577 578 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) 579 { 580 return entity_before(__node_2_se(a), __node_2_se(b)); 581 } 582 583 /* 584 * Enqueue an entity into the rb-tree: 585 */ 586 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 587 { 588 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less); 589 } 590 591 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 592 { 593 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); 594 } 595 596 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 597 { 598 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); 599 600 if (!left) 601 return NULL; 602 603 return __node_2_se(left); 604 } 605 606 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 607 { 608 struct rb_node *next = rb_next(&se->run_node); 609 610 if (!next) 611 return NULL; 612 613 return __node_2_se(next); 614 } 615 616 #ifdef CONFIG_SCHED_DEBUG 617 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 618 { 619 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); 620 621 if (!last) 622 return NULL; 623 624 return __node_2_se(last); 625 } 626 627 /************************************************************** 628 * Scheduling class statistics methods: 629 */ 630 631 int sched_update_scaling(void) 632 { 633 unsigned int factor = get_update_sysctl_factor(); 634 635 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 636 sysctl_sched_min_granularity); 637 638 #define WRT_SYSCTL(name) \ 639 (normalized_sysctl_##name = sysctl_##name / (factor)) 640 WRT_SYSCTL(sched_min_granularity); 641 WRT_SYSCTL(sched_latency); 642 WRT_SYSCTL(sched_wakeup_granularity); 643 #undef WRT_SYSCTL 644 645 return 0; 646 } 647 #endif 648 649 /* 650 * delta /= w 651 */ 652 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 653 { 654 if (unlikely(se->load.weight != NICE_0_LOAD)) 655 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 656 657 return delta; 658 } 659 660 /* 661 * The idea is to set a period in which each task runs once. 662 * 663 * When there are too many tasks (sched_nr_latency) we have to stretch 664 * this period because otherwise the slices get too small. 665 * 666 * p = (nr <= nl) ? l : l*nr/nl 667 */ 668 static u64 __sched_period(unsigned long nr_running) 669 { 670 if (unlikely(nr_running > sched_nr_latency)) 671 return nr_running * sysctl_sched_min_granularity; 672 else 673 return sysctl_sched_latency; 674 } 675 676 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq); 677 678 /* 679 * We calculate the wall-time slice from the period by taking a part 680 * proportional to the weight. 681 * 682 * s = p*P[w/rw] 683 */ 684 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 685 { 686 unsigned int nr_running = cfs_rq->nr_running; 687 struct sched_entity *init_se = se; 688 unsigned int min_gran; 689 u64 slice; 690 691 if (sched_feat(ALT_PERIOD)) 692 nr_running = rq_of(cfs_rq)->cfs.h_nr_running; 693 694 slice = __sched_period(nr_running + !se->on_rq); 695 696 for_each_sched_entity(se) { 697 struct load_weight *load; 698 struct load_weight lw; 699 struct cfs_rq *qcfs_rq; 700 701 qcfs_rq = cfs_rq_of(se); 702 load = &qcfs_rq->load; 703 704 if (unlikely(!se->on_rq)) { 705 lw = qcfs_rq->load; 706 707 update_load_add(&lw, se->load.weight); 708 load = &lw; 709 } 710 slice = __calc_delta(slice, se->load.weight, load); 711 } 712 713 if (sched_feat(BASE_SLICE)) { 714 if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq)) 715 min_gran = sysctl_sched_idle_min_granularity; 716 else 717 min_gran = sysctl_sched_min_granularity; 718 719 slice = max_t(u64, slice, min_gran); 720 } 721 722 return slice; 723 } 724 725 /* 726 * We calculate the vruntime slice of a to-be-inserted task. 727 * 728 * vs = s/w 729 */ 730 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 731 { 732 return calc_delta_fair(sched_slice(cfs_rq, se), se); 733 } 734 735 #include "pelt.h" 736 #ifdef CONFIG_SMP 737 738 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 739 static unsigned long task_h_load(struct task_struct *p); 740 static unsigned long capacity_of(int cpu); 741 742 /* Give new sched_entity start runnable values to heavy its load in infant time */ 743 void init_entity_runnable_average(struct sched_entity *se) 744 { 745 struct sched_avg *sa = &se->avg; 746 747 memset(sa, 0, sizeof(*sa)); 748 749 /* 750 * Tasks are initialized with full load to be seen as heavy tasks until 751 * they get a chance to stabilize to their real load level. 752 * Group entities are initialized with zero load to reflect the fact that 753 * nothing has been attached to the task group yet. 754 */ 755 if (entity_is_task(se)) 756 sa->load_avg = scale_load_down(se->load.weight); 757 758 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 759 } 760 761 static void attach_entity_cfs_rq(struct sched_entity *se); 762 763 /* 764 * With new tasks being created, their initial util_avgs are extrapolated 765 * based on the cfs_rq's current util_avg: 766 * 767 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 768 * 769 * However, in many cases, the above util_avg does not give a desired 770 * value. Moreover, the sum of the util_avgs may be divergent, such 771 * as when the series is a harmonic series. 772 * 773 * To solve this problem, we also cap the util_avg of successive tasks to 774 * only 1/2 of the left utilization budget: 775 * 776 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 777 * 778 * where n denotes the nth task and cpu_scale the CPU capacity. 779 * 780 * For example, for a CPU with 1024 of capacity, a simplest series from 781 * the beginning would be like: 782 * 783 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 784 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 785 * 786 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 787 * if util_avg > util_avg_cap. 788 */ 789 void post_init_entity_util_avg(struct task_struct *p) 790 { 791 struct sched_entity *se = &p->se; 792 struct cfs_rq *cfs_rq = cfs_rq_of(se); 793 struct sched_avg *sa = &se->avg; 794 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); 795 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; 796 797 if (cap > 0) { 798 if (cfs_rq->avg.util_avg != 0) { 799 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 800 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 801 802 if (sa->util_avg > cap) 803 sa->util_avg = cap; 804 } else { 805 sa->util_avg = cap; 806 } 807 } 808 809 sa->runnable_avg = sa->util_avg; 810 811 if (p->sched_class != &fair_sched_class) { 812 /* 813 * For !fair tasks do: 814 * 815 update_cfs_rq_load_avg(now, cfs_rq); 816 attach_entity_load_avg(cfs_rq, se); 817 switched_from_fair(rq, p); 818 * 819 * such that the next switched_to_fair() has the 820 * expected state. 821 */ 822 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); 823 return; 824 } 825 826 attach_entity_cfs_rq(se); 827 } 828 829 #else /* !CONFIG_SMP */ 830 void init_entity_runnable_average(struct sched_entity *se) 831 { 832 } 833 void post_init_entity_util_avg(struct task_struct *p) 834 { 835 } 836 static void update_tg_load_avg(struct cfs_rq *cfs_rq) 837 { 838 } 839 #endif /* CONFIG_SMP */ 840 841 /* 842 * Update the current task's runtime statistics. 843 */ 844 static void update_curr(struct cfs_rq *cfs_rq) 845 { 846 struct sched_entity *curr = cfs_rq->curr; 847 u64 now = rq_clock_task(rq_of(cfs_rq)); 848 u64 delta_exec; 849 850 if (unlikely(!curr)) 851 return; 852 853 delta_exec = now - curr->exec_start; 854 if (unlikely((s64)delta_exec <= 0)) 855 return; 856 857 curr->exec_start = now; 858 859 if (schedstat_enabled()) { 860 struct sched_statistics *stats; 861 862 stats = __schedstats_from_se(curr); 863 __schedstat_set(stats->exec_max, 864 max(delta_exec, stats->exec_max)); 865 } 866 867 curr->sum_exec_runtime += delta_exec; 868 schedstat_add(cfs_rq->exec_clock, delta_exec); 869 870 curr->vruntime += calc_delta_fair(delta_exec, curr); 871 update_min_vruntime(cfs_rq); 872 873 if (entity_is_task(curr)) { 874 struct task_struct *curtask = task_of(curr); 875 876 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 877 cgroup_account_cputime(curtask, delta_exec); 878 account_group_exec_runtime(curtask, delta_exec); 879 } 880 881 account_cfs_rq_runtime(cfs_rq, delta_exec); 882 } 883 884 static void update_curr_fair(struct rq *rq) 885 { 886 update_curr(cfs_rq_of(&rq->curr->se)); 887 } 888 889 static inline void 890 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 891 { 892 struct sched_statistics *stats; 893 struct task_struct *p = NULL; 894 895 if (!schedstat_enabled()) 896 return; 897 898 stats = __schedstats_from_se(se); 899 900 if (entity_is_task(se)) 901 p = task_of(se); 902 903 __update_stats_wait_start(rq_of(cfs_rq), p, stats); 904 } 905 906 static inline void 907 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 908 { 909 struct sched_statistics *stats; 910 struct task_struct *p = NULL; 911 912 if (!schedstat_enabled()) 913 return; 914 915 stats = __schedstats_from_se(se); 916 917 /* 918 * When the sched_schedstat changes from 0 to 1, some sched se 919 * maybe already in the runqueue, the se->statistics.wait_start 920 * will be 0.So it will let the delta wrong. We need to avoid this 921 * scenario. 922 */ 923 if (unlikely(!schedstat_val(stats->wait_start))) 924 return; 925 926 if (entity_is_task(se)) 927 p = task_of(se); 928 929 __update_stats_wait_end(rq_of(cfs_rq), p, stats); 930 } 931 932 static inline void 933 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 934 { 935 struct sched_statistics *stats; 936 struct task_struct *tsk = NULL; 937 938 if (!schedstat_enabled()) 939 return; 940 941 stats = __schedstats_from_se(se); 942 943 if (entity_is_task(se)) 944 tsk = task_of(se); 945 946 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats); 947 } 948 949 /* 950 * Task is being enqueued - update stats: 951 */ 952 static inline void 953 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 954 { 955 if (!schedstat_enabled()) 956 return; 957 958 /* 959 * Are we enqueueing a waiting task? (for current tasks 960 * a dequeue/enqueue event is a NOP) 961 */ 962 if (se != cfs_rq->curr) 963 update_stats_wait_start_fair(cfs_rq, se); 964 965 if (flags & ENQUEUE_WAKEUP) 966 update_stats_enqueue_sleeper_fair(cfs_rq, se); 967 } 968 969 static inline void 970 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 971 { 972 973 if (!schedstat_enabled()) 974 return; 975 976 /* 977 * Mark the end of the wait period if dequeueing a 978 * waiting task: 979 */ 980 if (se != cfs_rq->curr) 981 update_stats_wait_end_fair(cfs_rq, se); 982 983 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 984 struct task_struct *tsk = task_of(se); 985 unsigned int state; 986 987 /* XXX racy against TTWU */ 988 state = READ_ONCE(tsk->__state); 989 if (state & TASK_INTERRUPTIBLE) 990 __schedstat_set(tsk->stats.sleep_start, 991 rq_clock(rq_of(cfs_rq))); 992 if (state & TASK_UNINTERRUPTIBLE) 993 __schedstat_set(tsk->stats.block_start, 994 rq_clock(rq_of(cfs_rq))); 995 } 996 } 997 998 /* 999 * We are picking a new current task - update its stats: 1000 */ 1001 static inline void 1002 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1003 { 1004 /* 1005 * We are starting a new run period: 1006 */ 1007 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1008 } 1009 1010 /************************************************** 1011 * Scheduling class queueing methods: 1012 */ 1013 1014 #ifdef CONFIG_NUMA_BALANCING 1015 /* 1016 * Approximate time to scan a full NUMA task in ms. The task scan period is 1017 * calculated based on the tasks virtual memory size and 1018 * numa_balancing_scan_size. 1019 */ 1020 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1021 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1022 1023 /* Portion of address space to scan in MB */ 1024 unsigned int sysctl_numa_balancing_scan_size = 256; 1025 1026 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1027 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1028 1029 struct numa_group { 1030 refcount_t refcount; 1031 1032 spinlock_t lock; /* nr_tasks, tasks */ 1033 int nr_tasks; 1034 pid_t gid; 1035 int active_nodes; 1036 1037 struct rcu_head rcu; 1038 unsigned long total_faults; 1039 unsigned long max_faults_cpu; 1040 /* 1041 * faults[] array is split into two regions: faults_mem and faults_cpu. 1042 * 1043 * Faults_cpu is used to decide whether memory should move 1044 * towards the CPU. As a consequence, these stats are weighted 1045 * more by CPU use than by memory faults. 1046 */ 1047 unsigned long faults[]; 1048 }; 1049 1050 /* 1051 * For functions that can be called in multiple contexts that permit reading 1052 * ->numa_group (see struct task_struct for locking rules). 1053 */ 1054 static struct numa_group *deref_task_numa_group(struct task_struct *p) 1055 { 1056 return rcu_dereference_check(p->numa_group, p == current || 1057 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); 1058 } 1059 1060 static struct numa_group *deref_curr_numa_group(struct task_struct *p) 1061 { 1062 return rcu_dereference_protected(p->numa_group, p == current); 1063 } 1064 1065 static inline unsigned long group_faults_priv(struct numa_group *ng); 1066 static inline unsigned long group_faults_shared(struct numa_group *ng); 1067 1068 static unsigned int task_nr_scan_windows(struct task_struct *p) 1069 { 1070 unsigned long rss = 0; 1071 unsigned long nr_scan_pages; 1072 1073 /* 1074 * Calculations based on RSS as non-present and empty pages are skipped 1075 * by the PTE scanner and NUMA hinting faults should be trapped based 1076 * on resident pages 1077 */ 1078 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1079 rss = get_mm_rss(p->mm); 1080 if (!rss) 1081 rss = nr_scan_pages; 1082 1083 rss = round_up(rss, nr_scan_pages); 1084 return rss / nr_scan_pages; 1085 } 1086 1087 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1088 #define MAX_SCAN_WINDOW 2560 1089 1090 static unsigned int task_scan_min(struct task_struct *p) 1091 { 1092 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1093 unsigned int scan, floor; 1094 unsigned int windows = 1; 1095 1096 if (scan_size < MAX_SCAN_WINDOW) 1097 windows = MAX_SCAN_WINDOW / scan_size; 1098 floor = 1000 / windows; 1099 1100 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1101 return max_t(unsigned int, floor, scan); 1102 } 1103 1104 static unsigned int task_scan_start(struct task_struct *p) 1105 { 1106 unsigned long smin = task_scan_min(p); 1107 unsigned long period = smin; 1108 struct numa_group *ng; 1109 1110 /* Scale the maximum scan period with the amount of shared memory. */ 1111 rcu_read_lock(); 1112 ng = rcu_dereference(p->numa_group); 1113 if (ng) { 1114 unsigned long shared = group_faults_shared(ng); 1115 unsigned long private = group_faults_priv(ng); 1116 1117 period *= refcount_read(&ng->refcount); 1118 period *= shared + 1; 1119 period /= private + shared + 1; 1120 } 1121 rcu_read_unlock(); 1122 1123 return max(smin, period); 1124 } 1125 1126 static unsigned int task_scan_max(struct task_struct *p) 1127 { 1128 unsigned long smin = task_scan_min(p); 1129 unsigned long smax; 1130 struct numa_group *ng; 1131 1132 /* Watch for min being lower than max due to floor calculations */ 1133 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1134 1135 /* Scale the maximum scan period with the amount of shared memory. */ 1136 ng = deref_curr_numa_group(p); 1137 if (ng) { 1138 unsigned long shared = group_faults_shared(ng); 1139 unsigned long private = group_faults_priv(ng); 1140 unsigned long period = smax; 1141 1142 period *= refcount_read(&ng->refcount); 1143 period *= shared + 1; 1144 period /= private + shared + 1; 1145 1146 smax = max(smax, period); 1147 } 1148 1149 return max(smin, smax); 1150 } 1151 1152 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1153 { 1154 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); 1155 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1156 } 1157 1158 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1159 { 1160 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); 1161 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1162 } 1163 1164 /* Shared or private faults. */ 1165 #define NR_NUMA_HINT_FAULT_TYPES 2 1166 1167 /* Memory and CPU locality */ 1168 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1169 1170 /* Averaged statistics, and temporary buffers. */ 1171 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1172 1173 pid_t task_numa_group_id(struct task_struct *p) 1174 { 1175 struct numa_group *ng; 1176 pid_t gid = 0; 1177 1178 rcu_read_lock(); 1179 ng = rcu_dereference(p->numa_group); 1180 if (ng) 1181 gid = ng->gid; 1182 rcu_read_unlock(); 1183 1184 return gid; 1185 } 1186 1187 /* 1188 * The averaged statistics, shared & private, memory & CPU, 1189 * occupy the first half of the array. The second half of the 1190 * array is for current counters, which are averaged into the 1191 * first set by task_numa_placement. 1192 */ 1193 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1194 { 1195 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1196 } 1197 1198 static inline unsigned long task_faults(struct task_struct *p, int nid) 1199 { 1200 if (!p->numa_faults) 1201 return 0; 1202 1203 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1204 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1205 } 1206 1207 static inline unsigned long group_faults(struct task_struct *p, int nid) 1208 { 1209 struct numa_group *ng = deref_task_numa_group(p); 1210 1211 if (!ng) 1212 return 0; 1213 1214 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1215 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1216 } 1217 1218 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1219 { 1220 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] + 1221 group->faults[task_faults_idx(NUMA_CPU, nid, 1)]; 1222 } 1223 1224 static inline unsigned long group_faults_priv(struct numa_group *ng) 1225 { 1226 unsigned long faults = 0; 1227 int node; 1228 1229 for_each_online_node(node) { 1230 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 1231 } 1232 1233 return faults; 1234 } 1235 1236 static inline unsigned long group_faults_shared(struct numa_group *ng) 1237 { 1238 unsigned long faults = 0; 1239 int node; 1240 1241 for_each_online_node(node) { 1242 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; 1243 } 1244 1245 return faults; 1246 } 1247 1248 /* 1249 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1250 * considered part of a numa group's pseudo-interleaving set. Migrations 1251 * between these nodes are slowed down, to allow things to settle down. 1252 */ 1253 #define ACTIVE_NODE_FRACTION 3 1254 1255 static bool numa_is_active_node(int nid, struct numa_group *ng) 1256 { 1257 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1258 } 1259 1260 /* Handle placement on systems where not all nodes are directly connected. */ 1261 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1262 int maxdist, bool task) 1263 { 1264 unsigned long score = 0; 1265 int node; 1266 1267 /* 1268 * All nodes are directly connected, and the same distance 1269 * from each other. No need for fancy placement algorithms. 1270 */ 1271 if (sched_numa_topology_type == NUMA_DIRECT) 1272 return 0; 1273 1274 /* 1275 * This code is called for each node, introducing N^2 complexity, 1276 * which should be ok given the number of nodes rarely exceeds 8. 1277 */ 1278 for_each_online_node(node) { 1279 unsigned long faults; 1280 int dist = node_distance(nid, node); 1281 1282 /* 1283 * The furthest away nodes in the system are not interesting 1284 * for placement; nid was already counted. 1285 */ 1286 if (dist == sched_max_numa_distance || node == nid) 1287 continue; 1288 1289 /* 1290 * On systems with a backplane NUMA topology, compare groups 1291 * of nodes, and move tasks towards the group with the most 1292 * memory accesses. When comparing two nodes at distance 1293 * "hoplimit", only nodes closer by than "hoplimit" are part 1294 * of each group. Skip other nodes. 1295 */ 1296 if (sched_numa_topology_type == NUMA_BACKPLANE && 1297 dist >= maxdist) 1298 continue; 1299 1300 /* Add up the faults from nearby nodes. */ 1301 if (task) 1302 faults = task_faults(p, node); 1303 else 1304 faults = group_faults(p, node); 1305 1306 /* 1307 * On systems with a glueless mesh NUMA topology, there are 1308 * no fixed "groups of nodes". Instead, nodes that are not 1309 * directly connected bounce traffic through intermediate 1310 * nodes; a numa_group can occupy any set of nodes. 1311 * The further away a node is, the less the faults count. 1312 * This seems to result in good task placement. 1313 */ 1314 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1315 faults *= (sched_max_numa_distance - dist); 1316 faults /= (sched_max_numa_distance - LOCAL_DISTANCE); 1317 } 1318 1319 score += faults; 1320 } 1321 1322 return score; 1323 } 1324 1325 /* 1326 * These return the fraction of accesses done by a particular task, or 1327 * task group, on a particular numa node. The group weight is given a 1328 * larger multiplier, in order to group tasks together that are almost 1329 * evenly spread out between numa nodes. 1330 */ 1331 static inline unsigned long task_weight(struct task_struct *p, int nid, 1332 int dist) 1333 { 1334 unsigned long faults, total_faults; 1335 1336 if (!p->numa_faults) 1337 return 0; 1338 1339 total_faults = p->total_numa_faults; 1340 1341 if (!total_faults) 1342 return 0; 1343 1344 faults = task_faults(p, nid); 1345 faults += score_nearby_nodes(p, nid, dist, true); 1346 1347 return 1000 * faults / total_faults; 1348 } 1349 1350 static inline unsigned long group_weight(struct task_struct *p, int nid, 1351 int dist) 1352 { 1353 struct numa_group *ng = deref_task_numa_group(p); 1354 unsigned long faults, total_faults; 1355 1356 if (!ng) 1357 return 0; 1358 1359 total_faults = ng->total_faults; 1360 1361 if (!total_faults) 1362 return 0; 1363 1364 faults = group_faults(p, nid); 1365 faults += score_nearby_nodes(p, nid, dist, false); 1366 1367 return 1000 * faults / total_faults; 1368 } 1369 1370 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1371 int src_nid, int dst_cpu) 1372 { 1373 struct numa_group *ng = deref_curr_numa_group(p); 1374 int dst_nid = cpu_to_node(dst_cpu); 1375 int last_cpupid, this_cpupid; 1376 1377 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1378 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1379 1380 /* 1381 * Allow first faults or private faults to migrate immediately early in 1382 * the lifetime of a task. The magic number 4 is based on waiting for 1383 * two full passes of the "multi-stage node selection" test that is 1384 * executed below. 1385 */ 1386 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && 1387 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) 1388 return true; 1389 1390 /* 1391 * Multi-stage node selection is used in conjunction with a periodic 1392 * migration fault to build a temporal task<->page relation. By using 1393 * a two-stage filter we remove short/unlikely relations. 1394 * 1395 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1396 * a task's usage of a particular page (n_p) per total usage of this 1397 * page (n_t) (in a given time-span) to a probability. 1398 * 1399 * Our periodic faults will sample this probability and getting the 1400 * same result twice in a row, given these samples are fully 1401 * independent, is then given by P(n)^2, provided our sample period 1402 * is sufficiently short compared to the usage pattern. 1403 * 1404 * This quadric squishes small probabilities, making it less likely we 1405 * act on an unlikely task<->page relation. 1406 */ 1407 if (!cpupid_pid_unset(last_cpupid) && 1408 cpupid_to_nid(last_cpupid) != dst_nid) 1409 return false; 1410 1411 /* Always allow migrate on private faults */ 1412 if (cpupid_match_pid(p, last_cpupid)) 1413 return true; 1414 1415 /* A shared fault, but p->numa_group has not been set up yet. */ 1416 if (!ng) 1417 return true; 1418 1419 /* 1420 * Destination node is much more heavily used than the source 1421 * node? Allow migration. 1422 */ 1423 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1424 ACTIVE_NODE_FRACTION) 1425 return true; 1426 1427 /* 1428 * Distribute memory according to CPU & memory use on each node, 1429 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1430 * 1431 * faults_cpu(dst) 3 faults_cpu(src) 1432 * --------------- * - > --------------- 1433 * faults_mem(dst) 4 faults_mem(src) 1434 */ 1435 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1436 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1437 } 1438 1439 /* 1440 * 'numa_type' describes the node at the moment of load balancing. 1441 */ 1442 enum numa_type { 1443 /* The node has spare capacity that can be used to run more tasks. */ 1444 node_has_spare = 0, 1445 /* 1446 * The node is fully used and the tasks don't compete for more CPU 1447 * cycles. Nevertheless, some tasks might wait before running. 1448 */ 1449 node_fully_busy, 1450 /* 1451 * The node is overloaded and can't provide expected CPU cycles to all 1452 * tasks. 1453 */ 1454 node_overloaded 1455 }; 1456 1457 /* Cached statistics for all CPUs within a node */ 1458 struct numa_stats { 1459 unsigned long load; 1460 unsigned long runnable; 1461 unsigned long util; 1462 /* Total compute capacity of CPUs on a node */ 1463 unsigned long compute_capacity; 1464 unsigned int nr_running; 1465 unsigned int weight; 1466 enum numa_type node_type; 1467 int idle_cpu; 1468 }; 1469 1470 static inline bool is_core_idle(int cpu) 1471 { 1472 #ifdef CONFIG_SCHED_SMT 1473 int sibling; 1474 1475 for_each_cpu(sibling, cpu_smt_mask(cpu)) { 1476 if (cpu == sibling) 1477 continue; 1478 1479 if (!idle_cpu(sibling)) 1480 return false; 1481 } 1482 #endif 1483 1484 return true; 1485 } 1486 1487 struct task_numa_env { 1488 struct task_struct *p; 1489 1490 int src_cpu, src_nid; 1491 int dst_cpu, dst_nid; 1492 1493 struct numa_stats src_stats, dst_stats; 1494 1495 int imbalance_pct; 1496 int dist; 1497 1498 struct task_struct *best_task; 1499 long best_imp; 1500 int best_cpu; 1501 }; 1502 1503 static unsigned long cpu_load(struct rq *rq); 1504 static unsigned long cpu_runnable(struct rq *rq); 1505 static inline long adjust_numa_imbalance(int imbalance, 1506 int dst_running, int dst_weight); 1507 1508 static inline enum 1509 numa_type numa_classify(unsigned int imbalance_pct, 1510 struct numa_stats *ns) 1511 { 1512 if ((ns->nr_running > ns->weight) && 1513 (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) || 1514 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100)))) 1515 return node_overloaded; 1516 1517 if ((ns->nr_running < ns->weight) || 1518 (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) && 1519 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100)))) 1520 return node_has_spare; 1521 1522 return node_fully_busy; 1523 } 1524 1525 #ifdef CONFIG_SCHED_SMT 1526 /* Forward declarations of select_idle_sibling helpers */ 1527 static inline bool test_idle_cores(int cpu, bool def); 1528 static inline int numa_idle_core(int idle_core, int cpu) 1529 { 1530 if (!static_branch_likely(&sched_smt_present) || 1531 idle_core >= 0 || !test_idle_cores(cpu, false)) 1532 return idle_core; 1533 1534 /* 1535 * Prefer cores instead of packing HT siblings 1536 * and triggering future load balancing. 1537 */ 1538 if (is_core_idle(cpu)) 1539 idle_core = cpu; 1540 1541 return idle_core; 1542 } 1543 #else 1544 static inline int numa_idle_core(int idle_core, int cpu) 1545 { 1546 return idle_core; 1547 } 1548 #endif 1549 1550 /* 1551 * Gather all necessary information to make NUMA balancing placement 1552 * decisions that are compatible with standard load balancer. This 1553 * borrows code and logic from update_sg_lb_stats but sharing a 1554 * common implementation is impractical. 1555 */ 1556 static void update_numa_stats(struct task_numa_env *env, 1557 struct numa_stats *ns, int nid, 1558 bool find_idle) 1559 { 1560 int cpu, idle_core = -1; 1561 1562 memset(ns, 0, sizeof(*ns)); 1563 ns->idle_cpu = -1; 1564 1565 rcu_read_lock(); 1566 for_each_cpu(cpu, cpumask_of_node(nid)) { 1567 struct rq *rq = cpu_rq(cpu); 1568 1569 ns->load += cpu_load(rq); 1570 ns->runnable += cpu_runnable(rq); 1571 ns->util += cpu_util_cfs(cpu); 1572 ns->nr_running += rq->cfs.h_nr_running; 1573 ns->compute_capacity += capacity_of(cpu); 1574 1575 if (find_idle && !rq->nr_running && idle_cpu(cpu)) { 1576 if (READ_ONCE(rq->numa_migrate_on) || 1577 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1578 continue; 1579 1580 if (ns->idle_cpu == -1) 1581 ns->idle_cpu = cpu; 1582 1583 idle_core = numa_idle_core(idle_core, cpu); 1584 } 1585 } 1586 rcu_read_unlock(); 1587 1588 ns->weight = cpumask_weight(cpumask_of_node(nid)); 1589 1590 ns->node_type = numa_classify(env->imbalance_pct, ns); 1591 1592 if (idle_core >= 0) 1593 ns->idle_cpu = idle_core; 1594 } 1595 1596 static void task_numa_assign(struct task_numa_env *env, 1597 struct task_struct *p, long imp) 1598 { 1599 struct rq *rq = cpu_rq(env->dst_cpu); 1600 1601 /* Check if run-queue part of active NUMA balance. */ 1602 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { 1603 int cpu; 1604 int start = env->dst_cpu; 1605 1606 /* Find alternative idle CPU. */ 1607 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) { 1608 if (cpu == env->best_cpu || !idle_cpu(cpu) || 1609 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { 1610 continue; 1611 } 1612 1613 env->dst_cpu = cpu; 1614 rq = cpu_rq(env->dst_cpu); 1615 if (!xchg(&rq->numa_migrate_on, 1)) 1616 goto assign; 1617 } 1618 1619 /* Failed to find an alternative idle CPU */ 1620 return; 1621 } 1622 1623 assign: 1624 /* 1625 * Clear previous best_cpu/rq numa-migrate flag, since task now 1626 * found a better CPU to move/swap. 1627 */ 1628 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { 1629 rq = cpu_rq(env->best_cpu); 1630 WRITE_ONCE(rq->numa_migrate_on, 0); 1631 } 1632 1633 if (env->best_task) 1634 put_task_struct(env->best_task); 1635 if (p) 1636 get_task_struct(p); 1637 1638 env->best_task = p; 1639 env->best_imp = imp; 1640 env->best_cpu = env->dst_cpu; 1641 } 1642 1643 static bool load_too_imbalanced(long src_load, long dst_load, 1644 struct task_numa_env *env) 1645 { 1646 long imb, old_imb; 1647 long orig_src_load, orig_dst_load; 1648 long src_capacity, dst_capacity; 1649 1650 /* 1651 * The load is corrected for the CPU capacity available on each node. 1652 * 1653 * src_load dst_load 1654 * ------------ vs --------- 1655 * src_capacity dst_capacity 1656 */ 1657 src_capacity = env->src_stats.compute_capacity; 1658 dst_capacity = env->dst_stats.compute_capacity; 1659 1660 imb = abs(dst_load * src_capacity - src_load * dst_capacity); 1661 1662 orig_src_load = env->src_stats.load; 1663 orig_dst_load = env->dst_stats.load; 1664 1665 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); 1666 1667 /* Would this change make things worse? */ 1668 return (imb > old_imb); 1669 } 1670 1671 /* 1672 * Maximum NUMA importance can be 1998 (2*999); 1673 * SMALLIMP @ 30 would be close to 1998/64. 1674 * Used to deter task migration. 1675 */ 1676 #define SMALLIMP 30 1677 1678 /* 1679 * This checks if the overall compute and NUMA accesses of the system would 1680 * be improved if the source tasks was migrated to the target dst_cpu taking 1681 * into account that it might be best if task running on the dst_cpu should 1682 * be exchanged with the source task 1683 */ 1684 static bool task_numa_compare(struct task_numa_env *env, 1685 long taskimp, long groupimp, bool maymove) 1686 { 1687 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); 1688 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1689 long imp = p_ng ? groupimp : taskimp; 1690 struct task_struct *cur; 1691 long src_load, dst_load; 1692 int dist = env->dist; 1693 long moveimp = imp; 1694 long load; 1695 bool stopsearch = false; 1696 1697 if (READ_ONCE(dst_rq->numa_migrate_on)) 1698 return false; 1699 1700 rcu_read_lock(); 1701 cur = rcu_dereference(dst_rq->curr); 1702 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1703 cur = NULL; 1704 1705 /* 1706 * Because we have preemption enabled we can get migrated around and 1707 * end try selecting ourselves (current == env->p) as a swap candidate. 1708 */ 1709 if (cur == env->p) { 1710 stopsearch = true; 1711 goto unlock; 1712 } 1713 1714 if (!cur) { 1715 if (maymove && moveimp >= env->best_imp) 1716 goto assign; 1717 else 1718 goto unlock; 1719 } 1720 1721 /* Skip this swap candidate if cannot move to the source cpu. */ 1722 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) 1723 goto unlock; 1724 1725 /* 1726 * Skip this swap candidate if it is not moving to its preferred 1727 * node and the best task is. 1728 */ 1729 if (env->best_task && 1730 env->best_task->numa_preferred_nid == env->src_nid && 1731 cur->numa_preferred_nid != env->src_nid) { 1732 goto unlock; 1733 } 1734 1735 /* 1736 * "imp" is the fault differential for the source task between the 1737 * source and destination node. Calculate the total differential for 1738 * the source task and potential destination task. The more negative 1739 * the value is, the more remote accesses that would be expected to 1740 * be incurred if the tasks were swapped. 1741 * 1742 * If dst and source tasks are in the same NUMA group, or not 1743 * in any group then look only at task weights. 1744 */ 1745 cur_ng = rcu_dereference(cur->numa_group); 1746 if (cur_ng == p_ng) { 1747 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1748 task_weight(cur, env->dst_nid, dist); 1749 /* 1750 * Add some hysteresis to prevent swapping the 1751 * tasks within a group over tiny differences. 1752 */ 1753 if (cur_ng) 1754 imp -= imp / 16; 1755 } else { 1756 /* 1757 * Compare the group weights. If a task is all by itself 1758 * (not part of a group), use the task weight instead. 1759 */ 1760 if (cur_ng && p_ng) 1761 imp += group_weight(cur, env->src_nid, dist) - 1762 group_weight(cur, env->dst_nid, dist); 1763 else 1764 imp += task_weight(cur, env->src_nid, dist) - 1765 task_weight(cur, env->dst_nid, dist); 1766 } 1767 1768 /* Discourage picking a task already on its preferred node */ 1769 if (cur->numa_preferred_nid == env->dst_nid) 1770 imp -= imp / 16; 1771 1772 /* 1773 * Encourage picking a task that moves to its preferred node. 1774 * This potentially makes imp larger than it's maximum of 1775 * 1998 (see SMALLIMP and task_weight for why) but in this 1776 * case, it does not matter. 1777 */ 1778 if (cur->numa_preferred_nid == env->src_nid) 1779 imp += imp / 8; 1780 1781 if (maymove && moveimp > imp && moveimp > env->best_imp) { 1782 imp = moveimp; 1783 cur = NULL; 1784 goto assign; 1785 } 1786 1787 /* 1788 * Prefer swapping with a task moving to its preferred node over a 1789 * task that is not. 1790 */ 1791 if (env->best_task && cur->numa_preferred_nid == env->src_nid && 1792 env->best_task->numa_preferred_nid != env->src_nid) { 1793 goto assign; 1794 } 1795 1796 /* 1797 * If the NUMA importance is less than SMALLIMP, 1798 * task migration might only result in ping pong 1799 * of tasks and also hurt performance due to cache 1800 * misses. 1801 */ 1802 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) 1803 goto unlock; 1804 1805 /* 1806 * In the overloaded case, try and keep the load balanced. 1807 */ 1808 load = task_h_load(env->p) - task_h_load(cur); 1809 if (!load) 1810 goto assign; 1811 1812 dst_load = env->dst_stats.load + load; 1813 src_load = env->src_stats.load - load; 1814 1815 if (load_too_imbalanced(src_load, dst_load, env)) 1816 goto unlock; 1817 1818 assign: 1819 /* Evaluate an idle CPU for a task numa move. */ 1820 if (!cur) { 1821 int cpu = env->dst_stats.idle_cpu; 1822 1823 /* Nothing cached so current CPU went idle since the search. */ 1824 if (cpu < 0) 1825 cpu = env->dst_cpu; 1826 1827 /* 1828 * If the CPU is no longer truly idle and the previous best CPU 1829 * is, keep using it. 1830 */ 1831 if (!idle_cpu(cpu) && env->best_cpu >= 0 && 1832 idle_cpu(env->best_cpu)) { 1833 cpu = env->best_cpu; 1834 } 1835 1836 env->dst_cpu = cpu; 1837 } 1838 1839 task_numa_assign(env, cur, imp); 1840 1841 /* 1842 * If a move to idle is allowed because there is capacity or load 1843 * balance improves then stop the search. While a better swap 1844 * candidate may exist, a search is not free. 1845 */ 1846 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) 1847 stopsearch = true; 1848 1849 /* 1850 * If a swap candidate must be identified and the current best task 1851 * moves its preferred node then stop the search. 1852 */ 1853 if (!maymove && env->best_task && 1854 env->best_task->numa_preferred_nid == env->src_nid) { 1855 stopsearch = true; 1856 } 1857 unlock: 1858 rcu_read_unlock(); 1859 1860 return stopsearch; 1861 } 1862 1863 static void task_numa_find_cpu(struct task_numa_env *env, 1864 long taskimp, long groupimp) 1865 { 1866 bool maymove = false; 1867 int cpu; 1868 1869 /* 1870 * If dst node has spare capacity, then check if there is an 1871 * imbalance that would be overruled by the load balancer. 1872 */ 1873 if (env->dst_stats.node_type == node_has_spare) { 1874 unsigned int imbalance; 1875 int src_running, dst_running; 1876 1877 /* 1878 * Would movement cause an imbalance? Note that if src has 1879 * more running tasks that the imbalance is ignored as the 1880 * move improves the imbalance from the perspective of the 1881 * CPU load balancer. 1882 * */ 1883 src_running = env->src_stats.nr_running - 1; 1884 dst_running = env->dst_stats.nr_running + 1; 1885 imbalance = max(0, dst_running - src_running); 1886 imbalance = adjust_numa_imbalance(imbalance, dst_running, 1887 env->dst_stats.weight); 1888 1889 /* Use idle CPU if there is no imbalance */ 1890 if (!imbalance) { 1891 maymove = true; 1892 if (env->dst_stats.idle_cpu >= 0) { 1893 env->dst_cpu = env->dst_stats.idle_cpu; 1894 task_numa_assign(env, NULL, 0); 1895 return; 1896 } 1897 } 1898 } else { 1899 long src_load, dst_load, load; 1900 /* 1901 * If the improvement from just moving env->p direction is better 1902 * than swapping tasks around, check if a move is possible. 1903 */ 1904 load = task_h_load(env->p); 1905 dst_load = env->dst_stats.load + load; 1906 src_load = env->src_stats.load - load; 1907 maymove = !load_too_imbalanced(src_load, dst_load, env); 1908 } 1909 1910 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1911 /* Skip this CPU if the source task cannot migrate */ 1912 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1913 continue; 1914 1915 env->dst_cpu = cpu; 1916 if (task_numa_compare(env, taskimp, groupimp, maymove)) 1917 break; 1918 } 1919 } 1920 1921 static int task_numa_migrate(struct task_struct *p) 1922 { 1923 struct task_numa_env env = { 1924 .p = p, 1925 1926 .src_cpu = task_cpu(p), 1927 .src_nid = task_node(p), 1928 1929 .imbalance_pct = 112, 1930 1931 .best_task = NULL, 1932 .best_imp = 0, 1933 .best_cpu = -1, 1934 }; 1935 unsigned long taskweight, groupweight; 1936 struct sched_domain *sd; 1937 long taskimp, groupimp; 1938 struct numa_group *ng; 1939 struct rq *best_rq; 1940 int nid, ret, dist; 1941 1942 /* 1943 * Pick the lowest SD_NUMA domain, as that would have the smallest 1944 * imbalance and would be the first to start moving tasks about. 1945 * 1946 * And we want to avoid any moving of tasks about, as that would create 1947 * random movement of tasks -- counter the numa conditions we're trying 1948 * to satisfy here. 1949 */ 1950 rcu_read_lock(); 1951 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 1952 if (sd) 1953 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 1954 rcu_read_unlock(); 1955 1956 /* 1957 * Cpusets can break the scheduler domain tree into smaller 1958 * balance domains, some of which do not cross NUMA boundaries. 1959 * Tasks that are "trapped" in such domains cannot be migrated 1960 * elsewhere, so there is no point in (re)trying. 1961 */ 1962 if (unlikely(!sd)) { 1963 sched_setnuma(p, task_node(p)); 1964 return -EINVAL; 1965 } 1966 1967 env.dst_nid = p->numa_preferred_nid; 1968 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 1969 taskweight = task_weight(p, env.src_nid, dist); 1970 groupweight = group_weight(p, env.src_nid, dist); 1971 update_numa_stats(&env, &env.src_stats, env.src_nid, false); 1972 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 1973 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 1974 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); 1975 1976 /* Try to find a spot on the preferred nid. */ 1977 task_numa_find_cpu(&env, taskimp, groupimp); 1978 1979 /* 1980 * Look at other nodes in these cases: 1981 * - there is no space available on the preferred_nid 1982 * - the task is part of a numa_group that is interleaved across 1983 * multiple NUMA nodes; in order to better consolidate the group, 1984 * we need to check other locations. 1985 */ 1986 ng = deref_curr_numa_group(p); 1987 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { 1988 for_each_online_node(nid) { 1989 if (nid == env.src_nid || nid == p->numa_preferred_nid) 1990 continue; 1991 1992 dist = node_distance(env.src_nid, env.dst_nid); 1993 if (sched_numa_topology_type == NUMA_BACKPLANE && 1994 dist != env.dist) { 1995 taskweight = task_weight(p, env.src_nid, dist); 1996 groupweight = group_weight(p, env.src_nid, dist); 1997 } 1998 1999 /* Only consider nodes where both task and groups benefit */ 2000 taskimp = task_weight(p, nid, dist) - taskweight; 2001 groupimp = group_weight(p, nid, dist) - groupweight; 2002 if (taskimp < 0 && groupimp < 0) 2003 continue; 2004 2005 env.dist = dist; 2006 env.dst_nid = nid; 2007 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); 2008 task_numa_find_cpu(&env, taskimp, groupimp); 2009 } 2010 } 2011 2012 /* 2013 * If the task is part of a workload that spans multiple NUMA nodes, 2014 * and is migrating into one of the workload's active nodes, remember 2015 * this node as the task's preferred numa node, so the workload can 2016 * settle down. 2017 * A task that migrated to a second choice node will be better off 2018 * trying for a better one later. Do not set the preferred node here. 2019 */ 2020 if (ng) { 2021 if (env.best_cpu == -1) 2022 nid = env.src_nid; 2023 else 2024 nid = cpu_to_node(env.best_cpu); 2025 2026 if (nid != p->numa_preferred_nid) 2027 sched_setnuma(p, nid); 2028 } 2029 2030 /* No better CPU than the current one was found. */ 2031 if (env.best_cpu == -1) { 2032 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); 2033 return -EAGAIN; 2034 } 2035 2036 best_rq = cpu_rq(env.best_cpu); 2037 if (env.best_task == NULL) { 2038 ret = migrate_task_to(p, env.best_cpu); 2039 WRITE_ONCE(best_rq->numa_migrate_on, 0); 2040 if (ret != 0) 2041 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); 2042 return ret; 2043 } 2044 2045 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); 2046 WRITE_ONCE(best_rq->numa_migrate_on, 0); 2047 2048 if (ret != 0) 2049 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); 2050 put_task_struct(env.best_task); 2051 return ret; 2052 } 2053 2054 /* Attempt to migrate a task to a CPU on the preferred node. */ 2055 static void numa_migrate_preferred(struct task_struct *p) 2056 { 2057 unsigned long interval = HZ; 2058 2059 /* This task has no NUMA fault statistics yet */ 2060 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) 2061 return; 2062 2063 /* Periodically retry migrating the task to the preferred node */ 2064 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 2065 p->numa_migrate_retry = jiffies + interval; 2066 2067 /* Success if task is already running on preferred CPU */ 2068 if (task_node(p) == p->numa_preferred_nid) 2069 return; 2070 2071 /* Otherwise, try migrate to a CPU on the preferred node */ 2072 task_numa_migrate(p); 2073 } 2074 2075 /* 2076 * Find out how many nodes the workload is actively running on. Do this by 2077 * tracking the nodes from which NUMA hinting faults are triggered. This can 2078 * be different from the set of nodes where the workload's memory is currently 2079 * located. 2080 */ 2081 static void numa_group_count_active_nodes(struct numa_group *numa_group) 2082 { 2083 unsigned long faults, max_faults = 0; 2084 int nid, active_nodes = 0; 2085 2086 for_each_online_node(nid) { 2087 faults = group_faults_cpu(numa_group, nid); 2088 if (faults > max_faults) 2089 max_faults = faults; 2090 } 2091 2092 for_each_online_node(nid) { 2093 faults = group_faults_cpu(numa_group, nid); 2094 if (faults * ACTIVE_NODE_FRACTION > max_faults) 2095 active_nodes++; 2096 } 2097 2098 numa_group->max_faults_cpu = max_faults; 2099 numa_group->active_nodes = active_nodes; 2100 } 2101 2102 /* 2103 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 2104 * increments. The more local the fault statistics are, the higher the scan 2105 * period will be for the next scan window. If local/(local+remote) ratio is 2106 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 2107 * the scan period will decrease. Aim for 70% local accesses. 2108 */ 2109 #define NUMA_PERIOD_SLOTS 10 2110 #define NUMA_PERIOD_THRESHOLD 7 2111 2112 /* 2113 * Increase the scan period (slow down scanning) if the majority of 2114 * our memory is already on our local node, or if the majority of 2115 * the page accesses are shared with other processes. 2116 * Otherwise, decrease the scan period. 2117 */ 2118 static void update_task_scan_period(struct task_struct *p, 2119 unsigned long shared, unsigned long private) 2120 { 2121 unsigned int period_slot; 2122 int lr_ratio, ps_ratio; 2123 int diff; 2124 2125 unsigned long remote = p->numa_faults_locality[0]; 2126 unsigned long local = p->numa_faults_locality[1]; 2127 2128 /* 2129 * If there were no record hinting faults then either the task is 2130 * completely idle or all activity is in areas that are not of interest 2131 * to automatic numa balancing. Related to that, if there were failed 2132 * migration then it implies we are migrating too quickly or the local 2133 * node is overloaded. In either case, scan slower 2134 */ 2135 if (local + shared == 0 || p->numa_faults_locality[2]) { 2136 p->numa_scan_period = min(p->numa_scan_period_max, 2137 p->numa_scan_period << 1); 2138 2139 p->mm->numa_next_scan = jiffies + 2140 msecs_to_jiffies(p->numa_scan_period); 2141 2142 return; 2143 } 2144 2145 /* 2146 * Prepare to scale scan period relative to the current period. 2147 * == NUMA_PERIOD_THRESHOLD scan period stays the same 2148 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 2149 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 2150 */ 2151 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 2152 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 2153 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); 2154 2155 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { 2156 /* 2157 * Most memory accesses are local. There is no need to 2158 * do fast NUMA scanning, since memory is already local. 2159 */ 2160 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; 2161 if (!slot) 2162 slot = 1; 2163 diff = slot * period_slot; 2164 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { 2165 /* 2166 * Most memory accesses are shared with other tasks. 2167 * There is no point in continuing fast NUMA scanning, 2168 * since other tasks may just move the memory elsewhere. 2169 */ 2170 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; 2171 if (!slot) 2172 slot = 1; 2173 diff = slot * period_slot; 2174 } else { 2175 /* 2176 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, 2177 * yet they are not on the local NUMA node. Speed up 2178 * NUMA scanning to get the memory moved over. 2179 */ 2180 int ratio = max(lr_ratio, ps_ratio); 2181 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 2182 } 2183 2184 p->numa_scan_period = clamp(p->numa_scan_period + diff, 2185 task_scan_min(p), task_scan_max(p)); 2186 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2187 } 2188 2189 /* 2190 * Get the fraction of time the task has been running since the last 2191 * NUMA placement cycle. The scheduler keeps similar statistics, but 2192 * decays those on a 32ms period, which is orders of magnitude off 2193 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 2194 * stats only if the task is so new there are no NUMA statistics yet. 2195 */ 2196 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 2197 { 2198 u64 runtime, delta, now; 2199 /* Use the start of this time slice to avoid calculations. */ 2200 now = p->se.exec_start; 2201 runtime = p->se.sum_exec_runtime; 2202 2203 if (p->last_task_numa_placement) { 2204 delta = runtime - p->last_sum_exec_runtime; 2205 *period = now - p->last_task_numa_placement; 2206 2207 /* Avoid time going backwards, prevent potential divide error: */ 2208 if (unlikely((s64)*period < 0)) 2209 *period = 0; 2210 } else { 2211 delta = p->se.avg.load_sum; 2212 *period = LOAD_AVG_MAX; 2213 } 2214 2215 p->last_sum_exec_runtime = runtime; 2216 p->last_task_numa_placement = now; 2217 2218 return delta; 2219 } 2220 2221 /* 2222 * Determine the preferred nid for a task in a numa_group. This needs to 2223 * be done in a way that produces consistent results with group_weight, 2224 * otherwise workloads might not converge. 2225 */ 2226 static int preferred_group_nid(struct task_struct *p, int nid) 2227 { 2228 nodemask_t nodes; 2229 int dist; 2230 2231 /* Direct connections between all NUMA nodes. */ 2232 if (sched_numa_topology_type == NUMA_DIRECT) 2233 return nid; 2234 2235 /* 2236 * On a system with glueless mesh NUMA topology, group_weight 2237 * scores nodes according to the number of NUMA hinting faults on 2238 * both the node itself, and on nearby nodes. 2239 */ 2240 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 2241 unsigned long score, max_score = 0; 2242 int node, max_node = nid; 2243 2244 dist = sched_max_numa_distance; 2245 2246 for_each_online_node(node) { 2247 score = group_weight(p, node, dist); 2248 if (score > max_score) { 2249 max_score = score; 2250 max_node = node; 2251 } 2252 } 2253 return max_node; 2254 } 2255 2256 /* 2257 * Finding the preferred nid in a system with NUMA backplane 2258 * interconnect topology is more involved. The goal is to locate 2259 * tasks from numa_groups near each other in the system, and 2260 * untangle workloads from different sides of the system. This requires 2261 * searching down the hierarchy of node groups, recursively searching 2262 * inside the highest scoring group of nodes. The nodemask tricks 2263 * keep the complexity of the search down. 2264 */ 2265 nodes = node_online_map; 2266 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2267 unsigned long max_faults = 0; 2268 nodemask_t max_group = NODE_MASK_NONE; 2269 int a, b; 2270 2271 /* Are there nodes at this distance from each other? */ 2272 if (!find_numa_distance(dist)) 2273 continue; 2274 2275 for_each_node_mask(a, nodes) { 2276 unsigned long faults = 0; 2277 nodemask_t this_group; 2278 nodes_clear(this_group); 2279 2280 /* Sum group's NUMA faults; includes a==b case. */ 2281 for_each_node_mask(b, nodes) { 2282 if (node_distance(a, b) < dist) { 2283 faults += group_faults(p, b); 2284 node_set(b, this_group); 2285 node_clear(b, nodes); 2286 } 2287 } 2288 2289 /* Remember the top group. */ 2290 if (faults > max_faults) { 2291 max_faults = faults; 2292 max_group = this_group; 2293 /* 2294 * subtle: at the smallest distance there is 2295 * just one node left in each "group", the 2296 * winner is the preferred nid. 2297 */ 2298 nid = a; 2299 } 2300 } 2301 /* Next round, evaluate the nodes within max_group. */ 2302 if (!max_faults) 2303 break; 2304 nodes = max_group; 2305 } 2306 return nid; 2307 } 2308 2309 static void task_numa_placement(struct task_struct *p) 2310 { 2311 int seq, nid, max_nid = NUMA_NO_NODE; 2312 unsigned long max_faults = 0; 2313 unsigned long fault_types[2] = { 0, 0 }; 2314 unsigned long total_faults; 2315 u64 runtime, period; 2316 spinlock_t *group_lock = NULL; 2317 struct numa_group *ng; 2318 2319 /* 2320 * The p->mm->numa_scan_seq field gets updated without 2321 * exclusive access. Use READ_ONCE() here to ensure 2322 * that the field is read in a single access: 2323 */ 2324 seq = READ_ONCE(p->mm->numa_scan_seq); 2325 if (p->numa_scan_seq == seq) 2326 return; 2327 p->numa_scan_seq = seq; 2328 p->numa_scan_period_max = task_scan_max(p); 2329 2330 total_faults = p->numa_faults_locality[0] + 2331 p->numa_faults_locality[1]; 2332 runtime = numa_get_avg_runtime(p, &period); 2333 2334 /* If the task is part of a group prevent parallel updates to group stats */ 2335 ng = deref_curr_numa_group(p); 2336 if (ng) { 2337 group_lock = &ng->lock; 2338 spin_lock_irq(group_lock); 2339 } 2340 2341 /* Find the node with the highest number of faults */ 2342 for_each_online_node(nid) { 2343 /* Keep track of the offsets in numa_faults array */ 2344 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2345 unsigned long faults = 0, group_faults = 0; 2346 int priv; 2347 2348 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2349 long diff, f_diff, f_weight; 2350 2351 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2352 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2353 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2354 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2355 2356 /* Decay existing window, copy faults since last scan */ 2357 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2358 fault_types[priv] += p->numa_faults[membuf_idx]; 2359 p->numa_faults[membuf_idx] = 0; 2360 2361 /* 2362 * Normalize the faults_from, so all tasks in a group 2363 * count according to CPU use, instead of by the raw 2364 * number of faults. Tasks with little runtime have 2365 * little over-all impact on throughput, and thus their 2366 * faults are less important. 2367 */ 2368 f_weight = div64_u64(runtime << 16, period + 1); 2369 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2370 (total_faults + 1); 2371 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2372 p->numa_faults[cpubuf_idx] = 0; 2373 2374 p->numa_faults[mem_idx] += diff; 2375 p->numa_faults[cpu_idx] += f_diff; 2376 faults += p->numa_faults[mem_idx]; 2377 p->total_numa_faults += diff; 2378 if (ng) { 2379 /* 2380 * safe because we can only change our own group 2381 * 2382 * mem_idx represents the offset for a given 2383 * nid and priv in a specific region because it 2384 * is at the beginning of the numa_faults array. 2385 */ 2386 ng->faults[mem_idx] += diff; 2387 ng->faults[cpu_idx] += f_diff; 2388 ng->total_faults += diff; 2389 group_faults += ng->faults[mem_idx]; 2390 } 2391 } 2392 2393 if (!ng) { 2394 if (faults > max_faults) { 2395 max_faults = faults; 2396 max_nid = nid; 2397 } 2398 } else if (group_faults > max_faults) { 2399 max_faults = group_faults; 2400 max_nid = nid; 2401 } 2402 } 2403 2404 if (ng) { 2405 numa_group_count_active_nodes(ng); 2406 spin_unlock_irq(group_lock); 2407 max_nid = preferred_group_nid(p, max_nid); 2408 } 2409 2410 if (max_faults) { 2411 /* Set the new preferred node */ 2412 if (max_nid != p->numa_preferred_nid) 2413 sched_setnuma(p, max_nid); 2414 } 2415 2416 update_task_scan_period(p, fault_types[0], fault_types[1]); 2417 } 2418 2419 static inline int get_numa_group(struct numa_group *grp) 2420 { 2421 return refcount_inc_not_zero(&grp->refcount); 2422 } 2423 2424 static inline void put_numa_group(struct numa_group *grp) 2425 { 2426 if (refcount_dec_and_test(&grp->refcount)) 2427 kfree_rcu(grp, rcu); 2428 } 2429 2430 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2431 int *priv) 2432 { 2433 struct numa_group *grp, *my_grp; 2434 struct task_struct *tsk; 2435 bool join = false; 2436 int cpu = cpupid_to_cpu(cpupid); 2437 int i; 2438 2439 if (unlikely(!deref_curr_numa_group(p))) { 2440 unsigned int size = sizeof(struct numa_group) + 2441 NR_NUMA_HINT_FAULT_STATS * 2442 nr_node_ids * sizeof(unsigned long); 2443 2444 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2445 if (!grp) 2446 return; 2447 2448 refcount_set(&grp->refcount, 1); 2449 grp->active_nodes = 1; 2450 grp->max_faults_cpu = 0; 2451 spin_lock_init(&grp->lock); 2452 grp->gid = p->pid; 2453 2454 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2455 grp->faults[i] = p->numa_faults[i]; 2456 2457 grp->total_faults = p->total_numa_faults; 2458 2459 grp->nr_tasks++; 2460 rcu_assign_pointer(p->numa_group, grp); 2461 } 2462 2463 rcu_read_lock(); 2464 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2465 2466 if (!cpupid_match_pid(tsk, cpupid)) 2467 goto no_join; 2468 2469 grp = rcu_dereference(tsk->numa_group); 2470 if (!grp) 2471 goto no_join; 2472 2473 my_grp = deref_curr_numa_group(p); 2474 if (grp == my_grp) 2475 goto no_join; 2476 2477 /* 2478 * Only join the other group if its bigger; if we're the bigger group, 2479 * the other task will join us. 2480 */ 2481 if (my_grp->nr_tasks > grp->nr_tasks) 2482 goto no_join; 2483 2484 /* 2485 * Tie-break on the grp address. 2486 */ 2487 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2488 goto no_join; 2489 2490 /* Always join threads in the same process. */ 2491 if (tsk->mm == current->mm) 2492 join = true; 2493 2494 /* Simple filter to avoid false positives due to PID collisions */ 2495 if (flags & TNF_SHARED) 2496 join = true; 2497 2498 /* Update priv based on whether false sharing was detected */ 2499 *priv = !join; 2500 2501 if (join && !get_numa_group(grp)) 2502 goto no_join; 2503 2504 rcu_read_unlock(); 2505 2506 if (!join) 2507 return; 2508 2509 BUG_ON(irqs_disabled()); 2510 double_lock_irq(&my_grp->lock, &grp->lock); 2511 2512 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2513 my_grp->faults[i] -= p->numa_faults[i]; 2514 grp->faults[i] += p->numa_faults[i]; 2515 } 2516 my_grp->total_faults -= p->total_numa_faults; 2517 grp->total_faults += p->total_numa_faults; 2518 2519 my_grp->nr_tasks--; 2520 grp->nr_tasks++; 2521 2522 spin_unlock(&my_grp->lock); 2523 spin_unlock_irq(&grp->lock); 2524 2525 rcu_assign_pointer(p->numa_group, grp); 2526 2527 put_numa_group(my_grp); 2528 return; 2529 2530 no_join: 2531 rcu_read_unlock(); 2532 return; 2533 } 2534 2535 /* 2536 * Get rid of NUMA statistics associated with a task (either current or dead). 2537 * If @final is set, the task is dead and has reached refcount zero, so we can 2538 * safely free all relevant data structures. Otherwise, there might be 2539 * concurrent reads from places like load balancing and procfs, and we should 2540 * reset the data back to default state without freeing ->numa_faults. 2541 */ 2542 void task_numa_free(struct task_struct *p, bool final) 2543 { 2544 /* safe: p either is current or is being freed by current */ 2545 struct numa_group *grp = rcu_dereference_raw(p->numa_group); 2546 unsigned long *numa_faults = p->numa_faults; 2547 unsigned long flags; 2548 int i; 2549 2550 if (!numa_faults) 2551 return; 2552 2553 if (grp) { 2554 spin_lock_irqsave(&grp->lock, flags); 2555 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2556 grp->faults[i] -= p->numa_faults[i]; 2557 grp->total_faults -= p->total_numa_faults; 2558 2559 grp->nr_tasks--; 2560 spin_unlock_irqrestore(&grp->lock, flags); 2561 RCU_INIT_POINTER(p->numa_group, NULL); 2562 put_numa_group(grp); 2563 } 2564 2565 if (final) { 2566 p->numa_faults = NULL; 2567 kfree(numa_faults); 2568 } else { 2569 p->total_numa_faults = 0; 2570 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2571 numa_faults[i] = 0; 2572 } 2573 } 2574 2575 /* 2576 * Got a PROT_NONE fault for a page on @node. 2577 */ 2578 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2579 { 2580 struct task_struct *p = current; 2581 bool migrated = flags & TNF_MIGRATED; 2582 int cpu_node = task_node(current); 2583 int local = !!(flags & TNF_FAULT_LOCAL); 2584 struct numa_group *ng; 2585 int priv; 2586 2587 if (!static_branch_likely(&sched_numa_balancing)) 2588 return; 2589 2590 /* for example, ksmd faulting in a user's mm */ 2591 if (!p->mm) 2592 return; 2593 2594 /* Allocate buffer to track faults on a per-node basis */ 2595 if (unlikely(!p->numa_faults)) { 2596 int size = sizeof(*p->numa_faults) * 2597 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2598 2599 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2600 if (!p->numa_faults) 2601 return; 2602 2603 p->total_numa_faults = 0; 2604 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2605 } 2606 2607 /* 2608 * First accesses are treated as private, otherwise consider accesses 2609 * to be private if the accessing pid has not changed 2610 */ 2611 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2612 priv = 1; 2613 } else { 2614 priv = cpupid_match_pid(p, last_cpupid); 2615 if (!priv && !(flags & TNF_NO_GROUP)) 2616 task_numa_group(p, last_cpupid, flags, &priv); 2617 } 2618 2619 /* 2620 * If a workload spans multiple NUMA nodes, a shared fault that 2621 * occurs wholly within the set of nodes that the workload is 2622 * actively using should be counted as local. This allows the 2623 * scan rate to slow down when a workload has settled down. 2624 */ 2625 ng = deref_curr_numa_group(p); 2626 if (!priv && !local && ng && ng->active_nodes > 1 && 2627 numa_is_active_node(cpu_node, ng) && 2628 numa_is_active_node(mem_node, ng)) 2629 local = 1; 2630 2631 /* 2632 * Retry to migrate task to preferred node periodically, in case it 2633 * previously failed, or the scheduler moved us. 2634 */ 2635 if (time_after(jiffies, p->numa_migrate_retry)) { 2636 task_numa_placement(p); 2637 numa_migrate_preferred(p); 2638 } 2639 2640 if (migrated) 2641 p->numa_pages_migrated += pages; 2642 if (flags & TNF_MIGRATE_FAIL) 2643 p->numa_faults_locality[2] += pages; 2644 2645 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2646 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2647 p->numa_faults_locality[local] += pages; 2648 } 2649 2650 static void reset_ptenuma_scan(struct task_struct *p) 2651 { 2652 /* 2653 * We only did a read acquisition of the mmap sem, so 2654 * p->mm->numa_scan_seq is written to without exclusive access 2655 * and the update is not guaranteed to be atomic. That's not 2656 * much of an issue though, since this is just used for 2657 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2658 * expensive, to avoid any form of compiler optimizations: 2659 */ 2660 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2661 p->mm->numa_scan_offset = 0; 2662 } 2663 2664 /* 2665 * The expensive part of numa migration is done from task_work context. 2666 * Triggered from task_tick_numa(). 2667 */ 2668 static void task_numa_work(struct callback_head *work) 2669 { 2670 unsigned long migrate, next_scan, now = jiffies; 2671 struct task_struct *p = current; 2672 struct mm_struct *mm = p->mm; 2673 u64 runtime = p->se.sum_exec_runtime; 2674 struct vm_area_struct *vma; 2675 unsigned long start, end; 2676 unsigned long nr_pte_updates = 0; 2677 long pages, virtpages; 2678 2679 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2680 2681 work->next = work; 2682 /* 2683 * Who cares about NUMA placement when they're dying. 2684 * 2685 * NOTE: make sure not to dereference p->mm before this check, 2686 * exit_task_work() happens _after_ exit_mm() so we could be called 2687 * without p->mm even though we still had it when we enqueued this 2688 * work. 2689 */ 2690 if (p->flags & PF_EXITING) 2691 return; 2692 2693 if (!mm->numa_next_scan) { 2694 mm->numa_next_scan = now + 2695 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2696 } 2697 2698 /* 2699 * Enforce maximal scan/migration frequency.. 2700 */ 2701 migrate = mm->numa_next_scan; 2702 if (time_before(now, migrate)) 2703 return; 2704 2705 if (p->numa_scan_period == 0) { 2706 p->numa_scan_period_max = task_scan_max(p); 2707 p->numa_scan_period = task_scan_start(p); 2708 } 2709 2710 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2711 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2712 return; 2713 2714 /* 2715 * Delay this task enough that another task of this mm will likely win 2716 * the next time around. 2717 */ 2718 p->node_stamp += 2 * TICK_NSEC; 2719 2720 start = mm->numa_scan_offset; 2721 pages = sysctl_numa_balancing_scan_size; 2722 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2723 virtpages = pages * 8; /* Scan up to this much virtual space */ 2724 if (!pages) 2725 return; 2726 2727 2728 if (!mmap_read_trylock(mm)) 2729 return; 2730 vma = find_vma(mm, start); 2731 if (!vma) { 2732 reset_ptenuma_scan(p); 2733 start = 0; 2734 vma = mm->mmap; 2735 } 2736 for (; vma; vma = vma->vm_next) { 2737 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2738 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2739 continue; 2740 } 2741 2742 /* 2743 * Shared library pages mapped by multiple processes are not 2744 * migrated as it is expected they are cache replicated. Avoid 2745 * hinting faults in read-only file-backed mappings or the vdso 2746 * as migrating the pages will be of marginal benefit. 2747 */ 2748 if (!vma->vm_mm || 2749 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 2750 continue; 2751 2752 /* 2753 * Skip inaccessible VMAs to avoid any confusion between 2754 * PROT_NONE and NUMA hinting ptes 2755 */ 2756 if (!vma_is_accessible(vma)) 2757 continue; 2758 2759 do { 2760 start = max(start, vma->vm_start); 2761 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2762 end = min(end, vma->vm_end); 2763 nr_pte_updates = change_prot_numa(vma, start, end); 2764 2765 /* 2766 * Try to scan sysctl_numa_balancing_size worth of 2767 * hpages that have at least one present PTE that 2768 * is not already pte-numa. If the VMA contains 2769 * areas that are unused or already full of prot_numa 2770 * PTEs, scan up to virtpages, to skip through those 2771 * areas faster. 2772 */ 2773 if (nr_pte_updates) 2774 pages -= (end - start) >> PAGE_SHIFT; 2775 virtpages -= (end - start) >> PAGE_SHIFT; 2776 2777 start = end; 2778 if (pages <= 0 || virtpages <= 0) 2779 goto out; 2780 2781 cond_resched(); 2782 } while (end != vma->vm_end); 2783 } 2784 2785 out: 2786 /* 2787 * It is possible to reach the end of the VMA list but the last few 2788 * VMAs are not guaranteed to the vma_migratable. If they are not, we 2789 * would find the !migratable VMA on the next scan but not reset the 2790 * scanner to the start so check it now. 2791 */ 2792 if (vma) 2793 mm->numa_scan_offset = start; 2794 else 2795 reset_ptenuma_scan(p); 2796 mmap_read_unlock(mm); 2797 2798 /* 2799 * Make sure tasks use at least 32x as much time to run other code 2800 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 2801 * Usually update_task_scan_period slows down scanning enough; on an 2802 * overloaded system we need to limit overhead on a per task basis. 2803 */ 2804 if (unlikely(p->se.sum_exec_runtime != runtime)) { 2805 u64 diff = p->se.sum_exec_runtime - runtime; 2806 p->node_stamp += 32 * diff; 2807 } 2808 } 2809 2810 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 2811 { 2812 int mm_users = 0; 2813 struct mm_struct *mm = p->mm; 2814 2815 if (mm) { 2816 mm_users = atomic_read(&mm->mm_users); 2817 if (mm_users == 1) { 2818 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2819 mm->numa_scan_seq = 0; 2820 } 2821 } 2822 p->node_stamp = 0; 2823 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; 2824 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2825 /* Protect against double add, see task_tick_numa and task_numa_work */ 2826 p->numa_work.next = &p->numa_work; 2827 p->numa_faults = NULL; 2828 RCU_INIT_POINTER(p->numa_group, NULL); 2829 p->last_task_numa_placement = 0; 2830 p->last_sum_exec_runtime = 0; 2831 2832 init_task_work(&p->numa_work, task_numa_work); 2833 2834 /* New address space, reset the preferred nid */ 2835 if (!(clone_flags & CLONE_VM)) { 2836 p->numa_preferred_nid = NUMA_NO_NODE; 2837 return; 2838 } 2839 2840 /* 2841 * New thread, keep existing numa_preferred_nid which should be copied 2842 * already by arch_dup_task_struct but stagger when scans start. 2843 */ 2844 if (mm) { 2845 unsigned int delay; 2846 2847 delay = min_t(unsigned int, task_scan_max(current), 2848 current->numa_scan_period * mm_users * NSEC_PER_MSEC); 2849 delay += 2 * TICK_NSEC; 2850 p->node_stamp = delay; 2851 } 2852 } 2853 2854 /* 2855 * Drive the periodic memory faults.. 2856 */ 2857 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2858 { 2859 struct callback_head *work = &curr->numa_work; 2860 u64 period, now; 2861 2862 /* 2863 * We don't care about NUMA placement if we don't have memory. 2864 */ 2865 if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) 2866 return; 2867 2868 /* 2869 * Using runtime rather than walltime has the dual advantage that 2870 * we (mostly) drive the selection from busy threads and that the 2871 * task needs to have done some actual work before we bother with 2872 * NUMA placement. 2873 */ 2874 now = curr->se.sum_exec_runtime; 2875 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 2876 2877 if (now > curr->node_stamp + period) { 2878 if (!curr->node_stamp) 2879 curr->numa_scan_period = task_scan_start(curr); 2880 curr->node_stamp += period; 2881 2882 if (!time_before(jiffies, curr->mm->numa_next_scan)) 2883 task_work_add(curr, work, TWA_RESUME); 2884 } 2885 } 2886 2887 static void update_scan_period(struct task_struct *p, int new_cpu) 2888 { 2889 int src_nid = cpu_to_node(task_cpu(p)); 2890 int dst_nid = cpu_to_node(new_cpu); 2891 2892 if (!static_branch_likely(&sched_numa_balancing)) 2893 return; 2894 2895 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) 2896 return; 2897 2898 if (src_nid == dst_nid) 2899 return; 2900 2901 /* 2902 * Allow resets if faults have been trapped before one scan 2903 * has completed. This is most likely due to a new task that 2904 * is pulled cross-node due to wakeups or load balancing. 2905 */ 2906 if (p->numa_scan_seq) { 2907 /* 2908 * Avoid scan adjustments if moving to the preferred 2909 * node or if the task was not previously running on 2910 * the preferred node. 2911 */ 2912 if (dst_nid == p->numa_preferred_nid || 2913 (p->numa_preferred_nid != NUMA_NO_NODE && 2914 src_nid != p->numa_preferred_nid)) 2915 return; 2916 } 2917 2918 p->numa_scan_period = task_scan_start(p); 2919 } 2920 2921 #else 2922 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2923 { 2924 } 2925 2926 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 2927 { 2928 } 2929 2930 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 2931 { 2932 } 2933 2934 static inline void update_scan_period(struct task_struct *p, int new_cpu) 2935 { 2936 } 2937 2938 #endif /* CONFIG_NUMA_BALANCING */ 2939 2940 static void 2941 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2942 { 2943 update_load_add(&cfs_rq->load, se->load.weight); 2944 #ifdef CONFIG_SMP 2945 if (entity_is_task(se)) { 2946 struct rq *rq = rq_of(cfs_rq); 2947 2948 account_numa_enqueue(rq, task_of(se)); 2949 list_add(&se->group_node, &rq->cfs_tasks); 2950 } 2951 #endif 2952 cfs_rq->nr_running++; 2953 if (se_is_idle(se)) 2954 cfs_rq->idle_nr_running++; 2955 } 2956 2957 static void 2958 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2959 { 2960 update_load_sub(&cfs_rq->load, se->load.weight); 2961 #ifdef CONFIG_SMP 2962 if (entity_is_task(se)) { 2963 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 2964 list_del_init(&se->group_node); 2965 } 2966 #endif 2967 cfs_rq->nr_running--; 2968 if (se_is_idle(se)) 2969 cfs_rq->idle_nr_running--; 2970 } 2971 2972 /* 2973 * Signed add and clamp on underflow. 2974 * 2975 * Explicitly do a load-store to ensure the intermediate value never hits 2976 * memory. This allows lockless observations without ever seeing the negative 2977 * values. 2978 */ 2979 #define add_positive(_ptr, _val) do { \ 2980 typeof(_ptr) ptr = (_ptr); \ 2981 typeof(_val) val = (_val); \ 2982 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 2983 \ 2984 res = var + val; \ 2985 \ 2986 if (val < 0 && res > var) \ 2987 res = 0; \ 2988 \ 2989 WRITE_ONCE(*ptr, res); \ 2990 } while (0) 2991 2992 /* 2993 * Unsigned subtract and clamp on underflow. 2994 * 2995 * Explicitly do a load-store to ensure the intermediate value never hits 2996 * memory. This allows lockless observations without ever seeing the negative 2997 * values. 2998 */ 2999 #define sub_positive(_ptr, _val) do { \ 3000 typeof(_ptr) ptr = (_ptr); \ 3001 typeof(*ptr) val = (_val); \ 3002 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3003 res = var - val; \ 3004 if (res > var) \ 3005 res = 0; \ 3006 WRITE_ONCE(*ptr, res); \ 3007 } while (0) 3008 3009 /* 3010 * Remove and clamp on negative, from a local variable. 3011 * 3012 * A variant of sub_positive(), which does not use explicit load-store 3013 * and is thus optimized for local variable updates. 3014 */ 3015 #define lsub_positive(_ptr, _val) do { \ 3016 typeof(_ptr) ptr = (_ptr); \ 3017 *ptr -= min_t(typeof(*ptr), *ptr, _val); \ 3018 } while (0) 3019 3020 #ifdef CONFIG_SMP 3021 static inline void 3022 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3023 { 3024 cfs_rq->avg.load_avg += se->avg.load_avg; 3025 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; 3026 } 3027 3028 static inline void 3029 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3030 { 3031 u32 divider = get_pelt_divider(&se->avg); 3032 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 3033 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; 3034 } 3035 #else 3036 static inline void 3037 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3038 static inline void 3039 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3040 #endif 3041 3042 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 3043 unsigned long weight) 3044 { 3045 if (se->on_rq) { 3046 /* commit outstanding execution time */ 3047 if (cfs_rq->curr == se) 3048 update_curr(cfs_rq); 3049 update_load_sub(&cfs_rq->load, se->load.weight); 3050 } 3051 dequeue_load_avg(cfs_rq, se); 3052 3053 update_load_set(&se->load, weight); 3054 3055 #ifdef CONFIG_SMP 3056 do { 3057 u32 divider = get_pelt_divider(&se->avg); 3058 3059 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); 3060 } while (0); 3061 #endif 3062 3063 enqueue_load_avg(cfs_rq, se); 3064 if (se->on_rq) 3065 update_load_add(&cfs_rq->load, se->load.weight); 3066 3067 } 3068 3069 void reweight_task(struct task_struct *p, int prio) 3070 { 3071 struct sched_entity *se = &p->se; 3072 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3073 struct load_weight *load = &se->load; 3074 unsigned long weight = scale_load(sched_prio_to_weight[prio]); 3075 3076 reweight_entity(cfs_rq, se, weight); 3077 load->inv_weight = sched_prio_to_wmult[prio]; 3078 } 3079 3080 #ifdef CONFIG_FAIR_GROUP_SCHED 3081 #ifdef CONFIG_SMP 3082 /* 3083 * All this does is approximate the hierarchical proportion which includes that 3084 * global sum we all love to hate. 3085 * 3086 * That is, the weight of a group entity, is the proportional share of the 3087 * group weight based on the group runqueue weights. That is: 3088 * 3089 * tg->weight * grq->load.weight 3090 * ge->load.weight = ----------------------------- (1) 3091 * \Sum grq->load.weight 3092 * 3093 * Now, because computing that sum is prohibitively expensive to compute (been 3094 * there, done that) we approximate it with this average stuff. The average 3095 * moves slower and therefore the approximation is cheaper and more stable. 3096 * 3097 * So instead of the above, we substitute: 3098 * 3099 * grq->load.weight -> grq->avg.load_avg (2) 3100 * 3101 * which yields the following: 3102 * 3103 * tg->weight * grq->avg.load_avg 3104 * ge->load.weight = ------------------------------ (3) 3105 * tg->load_avg 3106 * 3107 * Where: tg->load_avg ~= \Sum grq->avg.load_avg 3108 * 3109 * That is shares_avg, and it is right (given the approximation (2)). 3110 * 3111 * The problem with it is that because the average is slow -- it was designed 3112 * to be exactly that of course -- this leads to transients in boundary 3113 * conditions. In specific, the case where the group was idle and we start the 3114 * one task. It takes time for our CPU's grq->avg.load_avg to build up, 3115 * yielding bad latency etc.. 3116 * 3117 * Now, in that special case (1) reduces to: 3118 * 3119 * tg->weight * grq->load.weight 3120 * ge->load.weight = ----------------------------- = tg->weight (4) 3121 * grp->load.weight 3122 * 3123 * That is, the sum collapses because all other CPUs are idle; the UP scenario. 3124 * 3125 * So what we do is modify our approximation (3) to approach (4) in the (near) 3126 * UP case, like: 3127 * 3128 * ge->load.weight = 3129 * 3130 * tg->weight * grq->load.weight 3131 * --------------------------------------------------- (5) 3132 * tg->load_avg - grq->avg.load_avg + grq->load.weight 3133 * 3134 * But because grq->load.weight can drop to 0, resulting in a divide by zero, 3135 * we need to use grq->avg.load_avg as its lower bound, which then gives: 3136 * 3137 * 3138 * tg->weight * grq->load.weight 3139 * ge->load.weight = ----------------------------- (6) 3140 * tg_load_avg' 3141 * 3142 * Where: 3143 * 3144 * tg_load_avg' = tg->load_avg - grq->avg.load_avg + 3145 * max(grq->load.weight, grq->avg.load_avg) 3146 * 3147 * And that is shares_weight and is icky. In the (near) UP case it approaches 3148 * (4) while in the normal case it approaches (3). It consistently 3149 * overestimates the ge->load.weight and therefore: 3150 * 3151 * \Sum ge->load.weight >= tg->weight 3152 * 3153 * hence icky! 3154 */ 3155 static long calc_group_shares(struct cfs_rq *cfs_rq) 3156 { 3157 long tg_weight, tg_shares, load, shares; 3158 struct task_group *tg = cfs_rq->tg; 3159 3160 tg_shares = READ_ONCE(tg->shares); 3161 3162 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); 3163 3164 tg_weight = atomic_long_read(&tg->load_avg); 3165 3166 /* Ensure tg_weight >= load */ 3167 tg_weight -= cfs_rq->tg_load_avg_contrib; 3168 tg_weight += load; 3169 3170 shares = (tg_shares * load); 3171 if (tg_weight) 3172 shares /= tg_weight; 3173 3174 /* 3175 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 3176 * of a group with small tg->shares value. It is a floor value which is 3177 * assigned as a minimum load.weight to the sched_entity representing 3178 * the group on a CPU. 3179 * 3180 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 3181 * on an 8-core system with 8 tasks each runnable on one CPU shares has 3182 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 3183 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 3184 * instead of 0. 3185 */ 3186 return clamp_t(long, shares, MIN_SHARES, tg_shares); 3187 } 3188 #endif /* CONFIG_SMP */ 3189 3190 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 3191 3192 /* 3193 * Recomputes the group entity based on the current state of its group 3194 * runqueue. 3195 */ 3196 static void update_cfs_group(struct sched_entity *se) 3197 { 3198 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3199 long shares; 3200 3201 if (!gcfs_rq) 3202 return; 3203 3204 if (throttled_hierarchy(gcfs_rq)) 3205 return; 3206 3207 #ifndef CONFIG_SMP 3208 shares = READ_ONCE(gcfs_rq->tg->shares); 3209 3210 if (likely(se->load.weight == shares)) 3211 return; 3212 #else 3213 shares = calc_group_shares(gcfs_rq); 3214 #endif 3215 3216 reweight_entity(cfs_rq_of(se), se, shares); 3217 } 3218 3219 #else /* CONFIG_FAIR_GROUP_SCHED */ 3220 static inline void update_cfs_group(struct sched_entity *se) 3221 { 3222 } 3223 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3224 3225 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) 3226 { 3227 struct rq *rq = rq_of(cfs_rq); 3228 3229 if (&rq->cfs == cfs_rq) { 3230 /* 3231 * There are a few boundary cases this might miss but it should 3232 * get called often enough that that should (hopefully) not be 3233 * a real problem. 3234 * 3235 * It will not get called when we go idle, because the idle 3236 * thread is a different class (!fair), nor will the utilization 3237 * number include things like RT tasks. 3238 * 3239 * As is, the util number is not freq-invariant (we'd have to 3240 * implement arch_scale_freq_capacity() for that). 3241 * 3242 * See cpu_util_cfs(). 3243 */ 3244 cpufreq_update_util(rq, flags); 3245 } 3246 } 3247 3248 #ifdef CONFIG_SMP 3249 #ifdef CONFIG_FAIR_GROUP_SCHED 3250 /* 3251 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list 3252 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list 3253 * bottom-up, we only have to test whether the cfs_rq before us on the list 3254 * is our child. 3255 * If cfs_rq is not on the list, test whether a child needs its to be added to 3256 * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details). 3257 */ 3258 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) 3259 { 3260 struct cfs_rq *prev_cfs_rq; 3261 struct list_head *prev; 3262 3263 if (cfs_rq->on_list) { 3264 prev = cfs_rq->leaf_cfs_rq_list.prev; 3265 } else { 3266 struct rq *rq = rq_of(cfs_rq); 3267 3268 prev = rq->tmp_alone_branch; 3269 } 3270 3271 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); 3272 3273 return (prev_cfs_rq->tg->parent == cfs_rq->tg); 3274 } 3275 3276 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 3277 { 3278 if (cfs_rq->load.weight) 3279 return false; 3280 3281 if (cfs_rq->avg.load_sum) 3282 return false; 3283 3284 if (cfs_rq->avg.util_sum) 3285 return false; 3286 3287 if (cfs_rq->avg.runnable_sum) 3288 return false; 3289 3290 if (child_cfs_rq_on_list(cfs_rq)) 3291 return false; 3292 3293 /* 3294 * _avg must be null when _sum are null because _avg = _sum / divider 3295 * Make sure that rounding and/or propagation of PELT values never 3296 * break this. 3297 */ 3298 SCHED_WARN_ON(cfs_rq->avg.load_avg || 3299 cfs_rq->avg.util_avg || 3300 cfs_rq->avg.runnable_avg); 3301 3302 return true; 3303 } 3304 3305 /** 3306 * update_tg_load_avg - update the tg's load avg 3307 * @cfs_rq: the cfs_rq whose avg changed 3308 * 3309 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3310 * However, because tg->load_avg is a global value there are performance 3311 * considerations. 3312 * 3313 * In order to avoid having to look at the other cfs_rq's, we use a 3314 * differential update where we store the last value we propagated. This in 3315 * turn allows skipping updates if the differential is 'small'. 3316 * 3317 * Updating tg's load_avg is necessary before update_cfs_share(). 3318 */ 3319 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) 3320 { 3321 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3322 3323 /* 3324 * No need to update load_avg for root_task_group as it is not used. 3325 */ 3326 if (cfs_rq->tg == &root_task_group) 3327 return; 3328 3329 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3330 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3331 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3332 } 3333 } 3334 3335 /* 3336 * Called within set_task_rq() right before setting a task's CPU. The 3337 * caller only guarantees p->pi_lock is held; no other assumptions, 3338 * including the state of rq->lock, should be made. 3339 */ 3340 void set_task_rq_fair(struct sched_entity *se, 3341 struct cfs_rq *prev, struct cfs_rq *next) 3342 { 3343 u64 p_last_update_time; 3344 u64 n_last_update_time; 3345 3346 if (!sched_feat(ATTACH_AGE_LOAD)) 3347 return; 3348 3349 /* 3350 * We are supposed to update the task to "current" time, then its up to 3351 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3352 * getting what current time is, so simply throw away the out-of-date 3353 * time. This will result in the wakee task is less decayed, but giving 3354 * the wakee more load sounds not bad. 3355 */ 3356 if (!(se->avg.last_update_time && prev)) 3357 return; 3358 3359 #ifndef CONFIG_64BIT 3360 { 3361 u64 p_last_update_time_copy; 3362 u64 n_last_update_time_copy; 3363 3364 do { 3365 p_last_update_time_copy = prev->load_last_update_time_copy; 3366 n_last_update_time_copy = next->load_last_update_time_copy; 3367 3368 smp_rmb(); 3369 3370 p_last_update_time = prev->avg.last_update_time; 3371 n_last_update_time = next->avg.last_update_time; 3372 3373 } while (p_last_update_time != p_last_update_time_copy || 3374 n_last_update_time != n_last_update_time_copy); 3375 } 3376 #else 3377 p_last_update_time = prev->avg.last_update_time; 3378 n_last_update_time = next->avg.last_update_time; 3379 #endif 3380 __update_load_avg_blocked_se(p_last_update_time, se); 3381 se->avg.last_update_time = n_last_update_time; 3382 } 3383 3384 3385 /* 3386 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to 3387 * propagate its contribution. The key to this propagation is the invariant 3388 * that for each group: 3389 * 3390 * ge->avg == grq->avg (1) 3391 * 3392 * _IFF_ we look at the pure running and runnable sums. Because they 3393 * represent the very same entity, just at different points in the hierarchy. 3394 * 3395 * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial 3396 * and simply copies the running/runnable sum over (but still wrong, because 3397 * the group entity and group rq do not have their PELT windows aligned). 3398 * 3399 * However, update_tg_cfs_load() is more complex. So we have: 3400 * 3401 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) 3402 * 3403 * And since, like util, the runnable part should be directly transferable, 3404 * the following would _appear_ to be the straight forward approach: 3405 * 3406 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) 3407 * 3408 * And per (1) we have: 3409 * 3410 * ge->avg.runnable_avg == grq->avg.runnable_avg 3411 * 3412 * Which gives: 3413 * 3414 * ge->load.weight * grq->avg.load_avg 3415 * ge->avg.load_avg = ----------------------------------- (4) 3416 * grq->load.weight 3417 * 3418 * Except that is wrong! 3419 * 3420 * Because while for entities historical weight is not important and we 3421 * really only care about our future and therefore can consider a pure 3422 * runnable sum, runqueues can NOT do this. 3423 * 3424 * We specifically want runqueues to have a load_avg that includes 3425 * historical weights. Those represent the blocked load, the load we expect 3426 * to (shortly) return to us. This only works by keeping the weights as 3427 * integral part of the sum. We therefore cannot decompose as per (3). 3428 * 3429 * Another reason this doesn't work is that runnable isn't a 0-sum entity. 3430 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the 3431 * rq itself is runnable anywhere between 2/3 and 1 depending on how the 3432 * runnable section of these tasks overlap (or not). If they were to perfectly 3433 * align the rq as a whole would be runnable 2/3 of the time. If however we 3434 * always have at least 1 runnable task, the rq as a whole is always runnable. 3435 * 3436 * So we'll have to approximate.. :/ 3437 * 3438 * Given the constraint: 3439 * 3440 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX 3441 * 3442 * We can construct a rule that adds runnable to a rq by assuming minimal 3443 * overlap. 3444 * 3445 * On removal, we'll assume each task is equally runnable; which yields: 3446 * 3447 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight 3448 * 3449 * XXX: only do this for the part of runnable > running ? 3450 * 3451 */ 3452 3453 static inline void 3454 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3455 { 3456 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; 3457 u32 divider; 3458 3459 /* Nothing to update */ 3460 if (!delta) 3461 return; 3462 3463 /* 3464 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3465 * See ___update_load_avg() for details. 3466 */ 3467 divider = get_pelt_divider(&cfs_rq->avg); 3468 3469 /* Set new sched_entity's utilization */ 3470 se->avg.util_avg = gcfs_rq->avg.util_avg; 3471 se->avg.util_sum = se->avg.util_avg * divider; 3472 3473 /* Update parent cfs_rq utilization */ 3474 add_positive(&cfs_rq->avg.util_avg, delta); 3475 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; 3476 } 3477 3478 static inline void 3479 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3480 { 3481 long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; 3482 u32 divider; 3483 3484 /* Nothing to update */ 3485 if (!delta) 3486 return; 3487 3488 /* 3489 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3490 * See ___update_load_avg() for details. 3491 */ 3492 divider = get_pelt_divider(&cfs_rq->avg); 3493 3494 /* Set new sched_entity's runnable */ 3495 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; 3496 se->avg.runnable_sum = se->avg.runnable_avg * divider; 3497 3498 /* Update parent cfs_rq runnable */ 3499 add_positive(&cfs_rq->avg.runnable_avg, delta); 3500 cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; 3501 } 3502 3503 static inline void 3504 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3505 { 3506 long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; 3507 unsigned long load_avg; 3508 u64 load_sum = 0; 3509 u32 divider; 3510 3511 if (!runnable_sum) 3512 return; 3513 3514 gcfs_rq->prop_runnable_sum = 0; 3515 3516 /* 3517 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3518 * See ___update_load_avg() for details. 3519 */ 3520 divider = get_pelt_divider(&cfs_rq->avg); 3521 3522 if (runnable_sum >= 0) { 3523 /* 3524 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until 3525 * the CPU is saturated running == runnable. 3526 */ 3527 runnable_sum += se->avg.load_sum; 3528 runnable_sum = min_t(long, runnable_sum, divider); 3529 } else { 3530 /* 3531 * Estimate the new unweighted runnable_sum of the gcfs_rq by 3532 * assuming all tasks are equally runnable. 3533 */ 3534 if (scale_load_down(gcfs_rq->load.weight)) { 3535 load_sum = div_s64(gcfs_rq->avg.load_sum, 3536 scale_load_down(gcfs_rq->load.weight)); 3537 } 3538 3539 /* But make sure to not inflate se's runnable */ 3540 runnable_sum = min(se->avg.load_sum, load_sum); 3541 } 3542 3543 /* 3544 * runnable_sum can't be lower than running_sum 3545 * Rescale running sum to be in the same range as runnable sum 3546 * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT] 3547 * runnable_sum is in [0 : LOAD_AVG_MAX] 3548 */ 3549 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; 3550 runnable_sum = max(runnable_sum, running_sum); 3551 3552 load_sum = (s64)se_weight(se) * runnable_sum; 3553 load_avg = div_s64(load_sum, divider); 3554 3555 se->avg.load_sum = runnable_sum; 3556 3557 delta = load_avg - se->avg.load_avg; 3558 if (!delta) 3559 return; 3560 3561 se->avg.load_avg = load_avg; 3562 3563 add_positive(&cfs_rq->avg.load_avg, delta); 3564 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; 3565 } 3566 3567 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) 3568 { 3569 cfs_rq->propagate = 1; 3570 cfs_rq->prop_runnable_sum += runnable_sum; 3571 } 3572 3573 /* Update task and its cfs_rq load average */ 3574 static inline int propagate_entity_load_avg(struct sched_entity *se) 3575 { 3576 struct cfs_rq *cfs_rq, *gcfs_rq; 3577 3578 if (entity_is_task(se)) 3579 return 0; 3580 3581 gcfs_rq = group_cfs_rq(se); 3582 if (!gcfs_rq->propagate) 3583 return 0; 3584 3585 gcfs_rq->propagate = 0; 3586 3587 cfs_rq = cfs_rq_of(se); 3588 3589 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); 3590 3591 update_tg_cfs_util(cfs_rq, se, gcfs_rq); 3592 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); 3593 update_tg_cfs_load(cfs_rq, se, gcfs_rq); 3594 3595 trace_pelt_cfs_tp(cfs_rq); 3596 trace_pelt_se_tp(se); 3597 3598 return 1; 3599 } 3600 3601 /* 3602 * Check if we need to update the load and the utilization of a blocked 3603 * group_entity: 3604 */ 3605 static inline bool skip_blocked_update(struct sched_entity *se) 3606 { 3607 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3608 3609 /* 3610 * If sched_entity still have not zero load or utilization, we have to 3611 * decay it: 3612 */ 3613 if (se->avg.load_avg || se->avg.util_avg) 3614 return false; 3615 3616 /* 3617 * If there is a pending propagation, we have to update the load and 3618 * the utilization of the sched_entity: 3619 */ 3620 if (gcfs_rq->propagate) 3621 return false; 3622 3623 /* 3624 * Otherwise, the load and the utilization of the sched_entity is 3625 * already zero and there is no pending propagation, so it will be a 3626 * waste of time to try to decay it: 3627 */ 3628 return true; 3629 } 3630 3631 #else /* CONFIG_FAIR_GROUP_SCHED */ 3632 3633 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} 3634 3635 static inline int propagate_entity_load_avg(struct sched_entity *se) 3636 { 3637 return 0; 3638 } 3639 3640 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} 3641 3642 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3643 3644 /** 3645 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 3646 * @now: current time, as per cfs_rq_clock_pelt() 3647 * @cfs_rq: cfs_rq to update 3648 * 3649 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 3650 * avg. The immediate corollary is that all (fair) tasks must be attached, see 3651 * post_init_entity_util_avg(). 3652 * 3653 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 3654 * 3655 * Returns true if the load decayed or we removed load. 3656 * 3657 * Since both these conditions indicate a changed cfs_rq->avg.load we should 3658 * call update_tg_load_avg() when this function returns true. 3659 */ 3660 static inline int 3661 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 3662 { 3663 unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0; 3664 struct sched_avg *sa = &cfs_rq->avg; 3665 int decayed = 0; 3666 3667 if (cfs_rq->removed.nr) { 3668 unsigned long r; 3669 u32 divider = get_pelt_divider(&cfs_rq->avg); 3670 3671 raw_spin_lock(&cfs_rq->removed.lock); 3672 swap(cfs_rq->removed.util_avg, removed_util); 3673 swap(cfs_rq->removed.load_avg, removed_load); 3674 swap(cfs_rq->removed.runnable_avg, removed_runnable); 3675 cfs_rq->removed.nr = 0; 3676 raw_spin_unlock(&cfs_rq->removed.lock); 3677 3678 r = removed_load; 3679 sub_positive(&sa->load_avg, r); 3680 sa->load_sum = sa->load_avg * divider; 3681 3682 r = removed_util; 3683 sub_positive(&sa->util_avg, r); 3684 sa->util_sum = sa->util_avg * divider; 3685 3686 r = removed_runnable; 3687 sub_positive(&sa->runnable_avg, r); 3688 sa->runnable_sum = sa->runnable_avg * divider; 3689 3690 /* 3691 * removed_runnable is the unweighted version of removed_load so we 3692 * can use it to estimate removed_load_sum. 3693 */ 3694 add_tg_cfs_propagate(cfs_rq, 3695 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT); 3696 3697 decayed = 1; 3698 } 3699 3700 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); 3701 3702 #ifndef CONFIG_64BIT 3703 smp_wmb(); 3704 cfs_rq->load_last_update_time_copy = sa->last_update_time; 3705 #endif 3706 3707 return decayed; 3708 } 3709 3710 /** 3711 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3712 * @cfs_rq: cfs_rq to attach to 3713 * @se: sched_entity to attach 3714 * 3715 * Must call update_cfs_rq_load_avg() before this, since we rely on 3716 * cfs_rq->avg.last_update_time being current. 3717 */ 3718 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3719 { 3720 /* 3721 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3722 * See ___update_load_avg() for details. 3723 */ 3724 u32 divider = get_pelt_divider(&cfs_rq->avg); 3725 3726 /* 3727 * When we attach the @se to the @cfs_rq, we must align the decay 3728 * window because without that, really weird and wonderful things can 3729 * happen. 3730 * 3731 * XXX illustrate 3732 */ 3733 se->avg.last_update_time = cfs_rq->avg.last_update_time; 3734 se->avg.period_contrib = cfs_rq->avg.period_contrib; 3735 3736 /* 3737 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new 3738 * period_contrib. This isn't strictly correct, but since we're 3739 * entirely outside of the PELT hierarchy, nobody cares if we truncate 3740 * _sum a little. 3741 */ 3742 se->avg.util_sum = se->avg.util_avg * divider; 3743 3744 se->avg.runnable_sum = se->avg.runnable_avg * divider; 3745 3746 se->avg.load_sum = divider; 3747 if (se_weight(se)) { 3748 se->avg.load_sum = 3749 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); 3750 } 3751 3752 enqueue_load_avg(cfs_rq, se); 3753 cfs_rq->avg.util_avg += se->avg.util_avg; 3754 cfs_rq->avg.util_sum += se->avg.util_sum; 3755 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; 3756 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; 3757 3758 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); 3759 3760 cfs_rq_util_change(cfs_rq, 0); 3761 3762 trace_pelt_cfs_tp(cfs_rq); 3763 } 3764 3765 /** 3766 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 3767 * @cfs_rq: cfs_rq to detach from 3768 * @se: sched_entity to detach 3769 * 3770 * Must call update_cfs_rq_load_avg() before this, since we rely on 3771 * cfs_rq->avg.last_update_time being current. 3772 */ 3773 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3774 { 3775 /* 3776 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3777 * See ___update_load_avg() for details. 3778 */ 3779 u32 divider = get_pelt_divider(&cfs_rq->avg); 3780 3781 dequeue_load_avg(cfs_rq, se); 3782 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 3783 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; 3784 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); 3785 cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; 3786 3787 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); 3788 3789 cfs_rq_util_change(cfs_rq, 0); 3790 3791 trace_pelt_cfs_tp(cfs_rq); 3792 } 3793 3794 /* 3795 * Optional action to be done while updating the load average 3796 */ 3797 #define UPDATE_TG 0x1 3798 #define SKIP_AGE_LOAD 0x2 3799 #define DO_ATTACH 0x4 3800 3801 /* Update task and its cfs_rq load average */ 3802 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3803 { 3804 u64 now = cfs_rq_clock_pelt(cfs_rq); 3805 int decayed; 3806 3807 /* 3808 * Track task load average for carrying it to new CPU after migrated, and 3809 * track group sched_entity load average for task_h_load calc in migration 3810 */ 3811 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 3812 __update_load_avg_se(now, cfs_rq, se); 3813 3814 decayed = update_cfs_rq_load_avg(now, cfs_rq); 3815 decayed |= propagate_entity_load_avg(se); 3816 3817 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { 3818 3819 /* 3820 * DO_ATTACH means we're here from enqueue_entity(). 3821 * !last_update_time means we've passed through 3822 * migrate_task_rq_fair() indicating we migrated. 3823 * 3824 * IOW we're enqueueing a task on a new CPU. 3825 */ 3826 attach_entity_load_avg(cfs_rq, se); 3827 update_tg_load_avg(cfs_rq); 3828 3829 } else if (decayed) { 3830 cfs_rq_util_change(cfs_rq, 0); 3831 3832 if (flags & UPDATE_TG) 3833 update_tg_load_avg(cfs_rq); 3834 } 3835 } 3836 3837 #ifndef CONFIG_64BIT 3838 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3839 { 3840 u64 last_update_time_copy; 3841 u64 last_update_time; 3842 3843 do { 3844 last_update_time_copy = cfs_rq->load_last_update_time_copy; 3845 smp_rmb(); 3846 last_update_time = cfs_rq->avg.last_update_time; 3847 } while (last_update_time != last_update_time_copy); 3848 3849 return last_update_time; 3850 } 3851 #else 3852 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3853 { 3854 return cfs_rq->avg.last_update_time; 3855 } 3856 #endif 3857 3858 /* 3859 * Synchronize entity load avg of dequeued entity without locking 3860 * the previous rq. 3861 */ 3862 static void sync_entity_load_avg(struct sched_entity *se) 3863 { 3864 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3865 u64 last_update_time; 3866 3867 last_update_time = cfs_rq_last_update_time(cfs_rq); 3868 __update_load_avg_blocked_se(last_update_time, se); 3869 } 3870 3871 /* 3872 * Task first catches up with cfs_rq, and then subtract 3873 * itself from the cfs_rq (task must be off the queue now). 3874 */ 3875 static void remove_entity_load_avg(struct sched_entity *se) 3876 { 3877 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3878 unsigned long flags; 3879 3880 /* 3881 * tasks cannot exit without having gone through wake_up_new_task() -> 3882 * post_init_entity_util_avg() which will have added things to the 3883 * cfs_rq, so we can remove unconditionally. 3884 */ 3885 3886 sync_entity_load_avg(se); 3887 3888 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); 3889 ++cfs_rq->removed.nr; 3890 cfs_rq->removed.util_avg += se->avg.util_avg; 3891 cfs_rq->removed.load_avg += se->avg.load_avg; 3892 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; 3893 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); 3894 } 3895 3896 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) 3897 { 3898 return cfs_rq->avg.runnable_avg; 3899 } 3900 3901 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 3902 { 3903 return cfs_rq->avg.load_avg; 3904 } 3905 3906 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 3907 3908 static inline unsigned long task_util(struct task_struct *p) 3909 { 3910 return READ_ONCE(p->se.avg.util_avg); 3911 } 3912 3913 static inline unsigned long _task_util_est(struct task_struct *p) 3914 { 3915 struct util_est ue = READ_ONCE(p->se.avg.util_est); 3916 3917 return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED)); 3918 } 3919 3920 static inline unsigned long task_util_est(struct task_struct *p) 3921 { 3922 return max(task_util(p), _task_util_est(p)); 3923 } 3924 3925 #ifdef CONFIG_UCLAMP_TASK 3926 static inline unsigned long uclamp_task_util(struct task_struct *p) 3927 { 3928 return clamp(task_util_est(p), 3929 uclamp_eff_value(p, UCLAMP_MIN), 3930 uclamp_eff_value(p, UCLAMP_MAX)); 3931 } 3932 #else 3933 static inline unsigned long uclamp_task_util(struct task_struct *p) 3934 { 3935 return task_util_est(p); 3936 } 3937 #endif 3938 3939 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, 3940 struct task_struct *p) 3941 { 3942 unsigned int enqueued; 3943 3944 if (!sched_feat(UTIL_EST)) 3945 return; 3946 3947 /* Update root cfs_rq's estimated utilization */ 3948 enqueued = cfs_rq->avg.util_est.enqueued; 3949 enqueued += _task_util_est(p); 3950 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 3951 3952 trace_sched_util_est_cfs_tp(cfs_rq); 3953 } 3954 3955 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, 3956 struct task_struct *p) 3957 { 3958 unsigned int enqueued; 3959 3960 if (!sched_feat(UTIL_EST)) 3961 return; 3962 3963 /* Update root cfs_rq's estimated utilization */ 3964 enqueued = cfs_rq->avg.util_est.enqueued; 3965 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); 3966 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 3967 3968 trace_sched_util_est_cfs_tp(cfs_rq); 3969 } 3970 3971 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100) 3972 3973 /* 3974 * Check if a (signed) value is within a specified (unsigned) margin, 3975 * based on the observation that: 3976 * 3977 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) 3978 * 3979 * NOTE: this only works when value + margin < INT_MAX. 3980 */ 3981 static inline bool within_margin(int value, int margin) 3982 { 3983 return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); 3984 } 3985 3986 static inline void util_est_update(struct cfs_rq *cfs_rq, 3987 struct task_struct *p, 3988 bool task_sleep) 3989 { 3990 long last_ewma_diff, last_enqueued_diff; 3991 struct util_est ue; 3992 3993 if (!sched_feat(UTIL_EST)) 3994 return; 3995 3996 /* 3997 * Skip update of task's estimated utilization when the task has not 3998 * yet completed an activation, e.g. being migrated. 3999 */ 4000 if (!task_sleep) 4001 return; 4002 4003 /* 4004 * If the PELT values haven't changed since enqueue time, 4005 * skip the util_est update. 4006 */ 4007 ue = p->se.avg.util_est; 4008 if (ue.enqueued & UTIL_AVG_UNCHANGED) 4009 return; 4010 4011 last_enqueued_diff = ue.enqueued; 4012 4013 /* 4014 * Reset EWMA on utilization increases, the moving average is used only 4015 * to smooth utilization decreases. 4016 */ 4017 ue.enqueued = task_util(p); 4018 if (sched_feat(UTIL_EST_FASTUP)) { 4019 if (ue.ewma < ue.enqueued) { 4020 ue.ewma = ue.enqueued; 4021 goto done; 4022 } 4023 } 4024 4025 /* 4026 * Skip update of task's estimated utilization when its members are 4027 * already ~1% close to its last activation value. 4028 */ 4029 last_ewma_diff = ue.enqueued - ue.ewma; 4030 last_enqueued_diff -= ue.enqueued; 4031 if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) { 4032 if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN)) 4033 goto done; 4034 4035 return; 4036 } 4037 4038 /* 4039 * To avoid overestimation of actual task utilization, skip updates if 4040 * we cannot grant there is idle time in this CPU. 4041 */ 4042 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) 4043 return; 4044 4045 /* 4046 * Update Task's estimated utilization 4047 * 4048 * When *p completes an activation we can consolidate another sample 4049 * of the task size. This is done by storing the current PELT value 4050 * as ue.enqueued and by using this value to update the Exponential 4051 * Weighted Moving Average (EWMA): 4052 * 4053 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) 4054 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) 4055 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) 4056 * = w * ( last_ewma_diff ) + ewma(t-1) 4057 * = w * (last_ewma_diff + ewma(t-1) / w) 4058 * 4059 * Where 'w' is the weight of new samples, which is configured to be 4060 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) 4061 */ 4062 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; 4063 ue.ewma += last_ewma_diff; 4064 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; 4065 done: 4066 ue.enqueued |= UTIL_AVG_UNCHANGED; 4067 WRITE_ONCE(p->se.avg.util_est, ue); 4068 4069 trace_sched_util_est_se_tp(&p->se); 4070 } 4071 4072 static inline int task_fits_capacity(struct task_struct *p, 4073 unsigned long capacity) 4074 { 4075 return fits_capacity(uclamp_task_util(p), capacity); 4076 } 4077 4078 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) 4079 { 4080 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 4081 return; 4082 4083 if (!p || p->nr_cpus_allowed == 1) { 4084 rq->misfit_task_load = 0; 4085 return; 4086 } 4087 4088 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { 4089 rq->misfit_task_load = 0; 4090 return; 4091 } 4092 4093 /* 4094 * Make sure that misfit_task_load will not be null even if 4095 * task_h_load() returns 0. 4096 */ 4097 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); 4098 } 4099 4100 #else /* CONFIG_SMP */ 4101 4102 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 4103 { 4104 return true; 4105 } 4106 4107 #define UPDATE_TG 0x0 4108 #define SKIP_AGE_LOAD 0x0 4109 #define DO_ATTACH 0x0 4110 4111 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 4112 { 4113 cfs_rq_util_change(cfs_rq, 0); 4114 } 4115 4116 static inline void remove_entity_load_avg(struct sched_entity *se) {} 4117 4118 static inline void 4119 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 4120 static inline void 4121 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 4122 4123 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf) 4124 { 4125 return 0; 4126 } 4127 4128 static inline void 4129 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 4130 4131 static inline void 4132 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 4133 4134 static inline void 4135 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, 4136 bool task_sleep) {} 4137 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} 4138 4139 #endif /* CONFIG_SMP */ 4140 4141 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 4142 { 4143 #ifdef CONFIG_SCHED_DEBUG 4144 s64 d = se->vruntime - cfs_rq->min_vruntime; 4145 4146 if (d < 0) 4147 d = -d; 4148 4149 if (d > 3*sysctl_sched_latency) 4150 schedstat_inc(cfs_rq->nr_spread_over); 4151 #endif 4152 } 4153 4154 static void 4155 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 4156 { 4157 u64 vruntime = cfs_rq->min_vruntime; 4158 4159 /* 4160 * The 'current' period is already promised to the current tasks, 4161 * however the extra weight of the new task will slow them down a 4162 * little, place the new task so that it fits in the slot that 4163 * stays open at the end. 4164 */ 4165 if (initial && sched_feat(START_DEBIT)) 4166 vruntime += sched_vslice(cfs_rq, se); 4167 4168 /* sleeps up to a single latency don't count. */ 4169 if (!initial) { 4170 unsigned long thresh; 4171 4172 if (se_is_idle(se)) 4173 thresh = sysctl_sched_min_granularity; 4174 else 4175 thresh = sysctl_sched_latency; 4176 4177 /* 4178 * Halve their sleep time's effect, to allow 4179 * for a gentler effect of sleepers: 4180 */ 4181 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 4182 thresh >>= 1; 4183 4184 vruntime -= thresh; 4185 } 4186 4187 /* ensure we never gain time by being placed backwards. */ 4188 se->vruntime = max_vruntime(se->vruntime, vruntime); 4189 } 4190 4191 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 4192 4193 static inline bool cfs_bandwidth_used(void); 4194 4195 /* 4196 * MIGRATION 4197 * 4198 * dequeue 4199 * update_curr() 4200 * update_min_vruntime() 4201 * vruntime -= min_vruntime 4202 * 4203 * enqueue 4204 * update_curr() 4205 * update_min_vruntime() 4206 * vruntime += min_vruntime 4207 * 4208 * this way the vruntime transition between RQs is done when both 4209 * min_vruntime are up-to-date. 4210 * 4211 * WAKEUP (remote) 4212 * 4213 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 4214 * vruntime -= min_vruntime 4215 * 4216 * enqueue 4217 * update_curr() 4218 * update_min_vruntime() 4219 * vruntime += min_vruntime 4220 * 4221 * this way we don't have the most up-to-date min_vruntime on the originating 4222 * CPU and an up-to-date min_vruntime on the destination CPU. 4223 */ 4224 4225 static void 4226 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4227 { 4228 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 4229 bool curr = cfs_rq->curr == se; 4230 4231 /* 4232 * If we're the current task, we must renormalise before calling 4233 * update_curr(). 4234 */ 4235 if (renorm && curr) 4236 se->vruntime += cfs_rq->min_vruntime; 4237 4238 update_curr(cfs_rq); 4239 4240 /* 4241 * Otherwise, renormalise after, such that we're placed at the current 4242 * moment in time, instead of some random moment in the past. Being 4243 * placed in the past could significantly boost this task to the 4244 * fairness detriment of existing tasks. 4245 */ 4246 if (renorm && !curr) 4247 se->vruntime += cfs_rq->min_vruntime; 4248 4249 /* 4250 * When enqueuing a sched_entity, we must: 4251 * - Update loads to have both entity and cfs_rq synced with now. 4252 * - Add its load to cfs_rq->runnable_avg 4253 * - For group_entity, update its weight to reflect the new share of 4254 * its group cfs_rq 4255 * - Add its new weight to cfs_rq->load.weight 4256 */ 4257 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); 4258 se_update_runnable(se); 4259 update_cfs_group(se); 4260 account_entity_enqueue(cfs_rq, se); 4261 4262 if (flags & ENQUEUE_WAKEUP) 4263 place_entity(cfs_rq, se, 0); 4264 4265 check_schedstat_required(); 4266 update_stats_enqueue_fair(cfs_rq, se, flags); 4267 check_spread(cfs_rq, se); 4268 if (!curr) 4269 __enqueue_entity(cfs_rq, se); 4270 se->on_rq = 1; 4271 4272 /* 4273 * When bandwidth control is enabled, cfs might have been removed 4274 * because of a parent been throttled but cfs->nr_running > 1. Try to 4275 * add it unconditionally. 4276 */ 4277 if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) 4278 list_add_leaf_cfs_rq(cfs_rq); 4279 4280 if (cfs_rq->nr_running == 1) 4281 check_enqueue_throttle(cfs_rq); 4282 } 4283 4284 static void __clear_buddies_last(struct sched_entity *se) 4285 { 4286 for_each_sched_entity(se) { 4287 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4288 if (cfs_rq->last != se) 4289 break; 4290 4291 cfs_rq->last = NULL; 4292 } 4293 } 4294 4295 static void __clear_buddies_next(struct sched_entity *se) 4296 { 4297 for_each_sched_entity(se) { 4298 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4299 if (cfs_rq->next != se) 4300 break; 4301 4302 cfs_rq->next = NULL; 4303 } 4304 } 4305 4306 static void __clear_buddies_skip(struct sched_entity *se) 4307 { 4308 for_each_sched_entity(se) { 4309 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4310 if (cfs_rq->skip != se) 4311 break; 4312 4313 cfs_rq->skip = NULL; 4314 } 4315 } 4316 4317 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 4318 { 4319 if (cfs_rq->last == se) 4320 __clear_buddies_last(se); 4321 4322 if (cfs_rq->next == se) 4323 __clear_buddies_next(se); 4324 4325 if (cfs_rq->skip == se) 4326 __clear_buddies_skip(se); 4327 } 4328 4329 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4330 4331 static void 4332 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4333 { 4334 /* 4335 * Update run-time statistics of the 'current'. 4336 */ 4337 update_curr(cfs_rq); 4338 4339 /* 4340 * When dequeuing a sched_entity, we must: 4341 * - Update loads to have both entity and cfs_rq synced with now. 4342 * - Subtract its load from the cfs_rq->runnable_avg. 4343 * - Subtract its previous weight from cfs_rq->load.weight. 4344 * - For group entity, update its weight to reflect the new share 4345 * of its group cfs_rq. 4346 */ 4347 update_load_avg(cfs_rq, se, UPDATE_TG); 4348 se_update_runnable(se); 4349 4350 update_stats_dequeue_fair(cfs_rq, se, flags); 4351 4352 clear_buddies(cfs_rq, se); 4353 4354 if (se != cfs_rq->curr) 4355 __dequeue_entity(cfs_rq, se); 4356 se->on_rq = 0; 4357 account_entity_dequeue(cfs_rq, se); 4358 4359 /* 4360 * Normalize after update_curr(); which will also have moved 4361 * min_vruntime if @se is the one holding it back. But before doing 4362 * update_min_vruntime() again, which will discount @se's position and 4363 * can move min_vruntime forward still more. 4364 */ 4365 if (!(flags & DEQUEUE_SLEEP)) 4366 se->vruntime -= cfs_rq->min_vruntime; 4367 4368 /* return excess runtime on last dequeue */ 4369 return_cfs_rq_runtime(cfs_rq); 4370 4371 update_cfs_group(se); 4372 4373 /* 4374 * Now advance min_vruntime if @se was the entity holding it back, 4375 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 4376 * put back on, and if we advance min_vruntime, we'll be placed back 4377 * further than we started -- ie. we'll be penalized. 4378 */ 4379 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) 4380 update_min_vruntime(cfs_rq); 4381 } 4382 4383 /* 4384 * Preempt the current task with a newly woken task if needed: 4385 */ 4386 static void 4387 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4388 { 4389 unsigned long ideal_runtime, delta_exec; 4390 struct sched_entity *se; 4391 s64 delta; 4392 4393 ideal_runtime = sched_slice(cfs_rq, curr); 4394 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 4395 if (delta_exec > ideal_runtime) { 4396 resched_curr(rq_of(cfs_rq)); 4397 /* 4398 * The current task ran long enough, ensure it doesn't get 4399 * re-elected due to buddy favours. 4400 */ 4401 clear_buddies(cfs_rq, curr); 4402 return; 4403 } 4404 4405 /* 4406 * Ensure that a task that missed wakeup preemption by a 4407 * narrow margin doesn't have to wait for a full slice. 4408 * This also mitigates buddy induced latencies under load. 4409 */ 4410 if (delta_exec < sysctl_sched_min_granularity) 4411 return; 4412 4413 se = __pick_first_entity(cfs_rq); 4414 delta = curr->vruntime - se->vruntime; 4415 4416 if (delta < 0) 4417 return; 4418 4419 if (delta > ideal_runtime) 4420 resched_curr(rq_of(cfs_rq)); 4421 } 4422 4423 static void 4424 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 4425 { 4426 clear_buddies(cfs_rq, se); 4427 4428 /* 'current' is not kept within the tree. */ 4429 if (se->on_rq) { 4430 /* 4431 * Any task has to be enqueued before it get to execute on 4432 * a CPU. So account for the time it spent waiting on the 4433 * runqueue. 4434 */ 4435 update_stats_wait_end_fair(cfs_rq, se); 4436 __dequeue_entity(cfs_rq, se); 4437 update_load_avg(cfs_rq, se, UPDATE_TG); 4438 } 4439 4440 update_stats_curr_start(cfs_rq, se); 4441 cfs_rq->curr = se; 4442 4443 /* 4444 * Track our maximum slice length, if the CPU's load is at 4445 * least twice that of our own weight (i.e. dont track it 4446 * when there are only lesser-weight tasks around): 4447 */ 4448 if (schedstat_enabled() && 4449 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { 4450 struct sched_statistics *stats; 4451 4452 stats = __schedstats_from_se(se); 4453 __schedstat_set(stats->slice_max, 4454 max((u64)stats->slice_max, 4455 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 4456 } 4457 4458 se->prev_sum_exec_runtime = se->sum_exec_runtime; 4459 } 4460 4461 static int 4462 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 4463 4464 /* 4465 * Pick the next process, keeping these things in mind, in this order: 4466 * 1) keep things fair between processes/task groups 4467 * 2) pick the "next" process, since someone really wants that to run 4468 * 3) pick the "last" process, for cache locality 4469 * 4) do not run the "skip" process, if something else is available 4470 */ 4471 static struct sched_entity * 4472 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4473 { 4474 struct sched_entity *left = __pick_first_entity(cfs_rq); 4475 struct sched_entity *se; 4476 4477 /* 4478 * If curr is set we have to see if its left of the leftmost entity 4479 * still in the tree, provided there was anything in the tree at all. 4480 */ 4481 if (!left || (curr && entity_before(curr, left))) 4482 left = curr; 4483 4484 se = left; /* ideally we run the leftmost entity */ 4485 4486 /* 4487 * Avoid running the skip buddy, if running something else can 4488 * be done without getting too unfair. 4489 */ 4490 if (cfs_rq->skip && cfs_rq->skip == se) { 4491 struct sched_entity *second; 4492 4493 if (se == curr) { 4494 second = __pick_first_entity(cfs_rq); 4495 } else { 4496 second = __pick_next_entity(se); 4497 if (!second || (curr && entity_before(curr, second))) 4498 second = curr; 4499 } 4500 4501 if (second && wakeup_preempt_entity(second, left) < 1) 4502 se = second; 4503 } 4504 4505 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) { 4506 /* 4507 * Someone really wants this to run. If it's not unfair, run it. 4508 */ 4509 se = cfs_rq->next; 4510 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { 4511 /* 4512 * Prefer last buddy, try to return the CPU to a preempted task. 4513 */ 4514 se = cfs_rq->last; 4515 } 4516 4517 return se; 4518 } 4519 4520 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4521 4522 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 4523 { 4524 /* 4525 * If still on the runqueue then deactivate_task() 4526 * was not called and update_curr() has to be done: 4527 */ 4528 if (prev->on_rq) 4529 update_curr(cfs_rq); 4530 4531 /* throttle cfs_rqs exceeding runtime */ 4532 check_cfs_rq_runtime(cfs_rq); 4533 4534 check_spread(cfs_rq, prev); 4535 4536 if (prev->on_rq) { 4537 update_stats_wait_start_fair(cfs_rq, prev); 4538 /* Put 'current' back into the tree. */ 4539 __enqueue_entity(cfs_rq, prev); 4540 /* in !on_rq case, update occurred at dequeue */ 4541 update_load_avg(cfs_rq, prev, 0); 4542 } 4543 cfs_rq->curr = NULL; 4544 } 4545 4546 static void 4547 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 4548 { 4549 /* 4550 * Update run-time statistics of the 'current'. 4551 */ 4552 update_curr(cfs_rq); 4553 4554 /* 4555 * Ensure that runnable average is periodically updated. 4556 */ 4557 update_load_avg(cfs_rq, curr, UPDATE_TG); 4558 update_cfs_group(curr); 4559 4560 #ifdef CONFIG_SCHED_HRTICK 4561 /* 4562 * queued ticks are scheduled to match the slice, so don't bother 4563 * validating it and just reschedule. 4564 */ 4565 if (queued) { 4566 resched_curr(rq_of(cfs_rq)); 4567 return; 4568 } 4569 /* 4570 * don't let the period tick interfere with the hrtick preemption 4571 */ 4572 if (!sched_feat(DOUBLE_TICK) && 4573 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4574 return; 4575 #endif 4576 4577 if (cfs_rq->nr_running > 1) 4578 check_preempt_tick(cfs_rq, curr); 4579 } 4580 4581 4582 /************************************************** 4583 * CFS bandwidth control machinery 4584 */ 4585 4586 #ifdef CONFIG_CFS_BANDWIDTH 4587 4588 #ifdef CONFIG_JUMP_LABEL 4589 static struct static_key __cfs_bandwidth_used; 4590 4591 static inline bool cfs_bandwidth_used(void) 4592 { 4593 return static_key_false(&__cfs_bandwidth_used); 4594 } 4595 4596 void cfs_bandwidth_usage_inc(void) 4597 { 4598 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); 4599 } 4600 4601 void cfs_bandwidth_usage_dec(void) 4602 { 4603 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); 4604 } 4605 #else /* CONFIG_JUMP_LABEL */ 4606 static bool cfs_bandwidth_used(void) 4607 { 4608 return true; 4609 } 4610 4611 void cfs_bandwidth_usage_inc(void) {} 4612 void cfs_bandwidth_usage_dec(void) {} 4613 #endif /* CONFIG_JUMP_LABEL */ 4614 4615 /* 4616 * default period for cfs group bandwidth. 4617 * default: 0.1s, units: nanoseconds 4618 */ 4619 static inline u64 default_cfs_period(void) 4620 { 4621 return 100000000ULL; 4622 } 4623 4624 static inline u64 sched_cfs_bandwidth_slice(void) 4625 { 4626 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4627 } 4628 4629 /* 4630 * Replenish runtime according to assigned quota. We use sched_clock_cpu 4631 * directly instead of rq->clock to avoid adding additional synchronization 4632 * around rq->lock. 4633 * 4634 * requires cfs_b->lock 4635 */ 4636 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4637 { 4638 s64 runtime; 4639 4640 if (unlikely(cfs_b->quota == RUNTIME_INF)) 4641 return; 4642 4643 cfs_b->runtime += cfs_b->quota; 4644 runtime = cfs_b->runtime_snap - cfs_b->runtime; 4645 if (runtime > 0) { 4646 cfs_b->burst_time += runtime; 4647 cfs_b->nr_burst++; 4648 } 4649 4650 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); 4651 cfs_b->runtime_snap = cfs_b->runtime; 4652 } 4653 4654 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4655 { 4656 return &tg->cfs_bandwidth; 4657 } 4658 4659 /* returns 0 on failure to allocate runtime */ 4660 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, 4661 struct cfs_rq *cfs_rq, u64 target_runtime) 4662 { 4663 u64 min_amount, amount = 0; 4664 4665 lockdep_assert_held(&cfs_b->lock); 4666 4667 /* note: this is a positive sum as runtime_remaining <= 0 */ 4668 min_amount = target_runtime - cfs_rq->runtime_remaining; 4669 4670 if (cfs_b->quota == RUNTIME_INF) 4671 amount = min_amount; 4672 else { 4673 start_cfs_bandwidth(cfs_b); 4674 4675 if (cfs_b->runtime > 0) { 4676 amount = min(cfs_b->runtime, min_amount); 4677 cfs_b->runtime -= amount; 4678 cfs_b->idle = 0; 4679 } 4680 } 4681 4682 cfs_rq->runtime_remaining += amount; 4683 4684 return cfs_rq->runtime_remaining > 0; 4685 } 4686 4687 /* returns 0 on failure to allocate runtime */ 4688 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4689 { 4690 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4691 int ret; 4692 4693 raw_spin_lock(&cfs_b->lock); 4694 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); 4695 raw_spin_unlock(&cfs_b->lock); 4696 4697 return ret; 4698 } 4699 4700 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4701 { 4702 /* dock delta_exec before expiring quota (as it could span periods) */ 4703 cfs_rq->runtime_remaining -= delta_exec; 4704 4705 if (likely(cfs_rq->runtime_remaining > 0)) 4706 return; 4707 4708 if (cfs_rq->throttled) 4709 return; 4710 /* 4711 * if we're unable to extend our runtime we resched so that the active 4712 * hierarchy can be throttled 4713 */ 4714 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 4715 resched_curr(rq_of(cfs_rq)); 4716 } 4717 4718 static __always_inline 4719 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4720 { 4721 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 4722 return; 4723 4724 __account_cfs_rq_runtime(cfs_rq, delta_exec); 4725 } 4726 4727 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4728 { 4729 return cfs_bandwidth_used() && cfs_rq->throttled; 4730 } 4731 4732 /* check whether cfs_rq, or any parent, is throttled */ 4733 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4734 { 4735 return cfs_bandwidth_used() && cfs_rq->throttle_count; 4736 } 4737 4738 /* 4739 * Ensure that neither of the group entities corresponding to src_cpu or 4740 * dest_cpu are members of a throttled hierarchy when performing group 4741 * load-balance operations. 4742 */ 4743 static inline int throttled_lb_pair(struct task_group *tg, 4744 int src_cpu, int dest_cpu) 4745 { 4746 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 4747 4748 src_cfs_rq = tg->cfs_rq[src_cpu]; 4749 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 4750 4751 return throttled_hierarchy(src_cfs_rq) || 4752 throttled_hierarchy(dest_cfs_rq); 4753 } 4754 4755 static int tg_unthrottle_up(struct task_group *tg, void *data) 4756 { 4757 struct rq *rq = data; 4758 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4759 4760 cfs_rq->throttle_count--; 4761 if (!cfs_rq->throttle_count) { 4762 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4763 cfs_rq->throttled_clock_task; 4764 4765 /* Add cfs_rq with load or one or more already running entities to the list */ 4766 if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running) 4767 list_add_leaf_cfs_rq(cfs_rq); 4768 } 4769 4770 return 0; 4771 } 4772 4773 static int tg_throttle_down(struct task_group *tg, void *data) 4774 { 4775 struct rq *rq = data; 4776 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4777 4778 /* group is entering throttled state, stop time */ 4779 if (!cfs_rq->throttle_count) { 4780 cfs_rq->throttled_clock_task = rq_clock_task(rq); 4781 list_del_leaf_cfs_rq(cfs_rq); 4782 } 4783 cfs_rq->throttle_count++; 4784 4785 return 0; 4786 } 4787 4788 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) 4789 { 4790 struct rq *rq = rq_of(cfs_rq); 4791 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4792 struct sched_entity *se; 4793 long task_delta, idle_task_delta, dequeue = 1; 4794 4795 raw_spin_lock(&cfs_b->lock); 4796 /* This will start the period timer if necessary */ 4797 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { 4798 /* 4799 * We have raced with bandwidth becoming available, and if we 4800 * actually throttled the timer might not unthrottle us for an 4801 * entire period. We additionally needed to make sure that any 4802 * subsequent check_cfs_rq_runtime calls agree not to throttle 4803 * us, as we may commit to do cfs put_prev+pick_next, so we ask 4804 * for 1ns of runtime rather than just check cfs_b. 4805 */ 4806 dequeue = 0; 4807 } else { 4808 list_add_tail_rcu(&cfs_rq->throttled_list, 4809 &cfs_b->throttled_cfs_rq); 4810 } 4811 raw_spin_unlock(&cfs_b->lock); 4812 4813 if (!dequeue) 4814 return false; /* Throttle no longer required. */ 4815 4816 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 4817 4818 /* freeze hierarchy runnable averages while throttled */ 4819 rcu_read_lock(); 4820 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 4821 rcu_read_unlock(); 4822 4823 task_delta = cfs_rq->h_nr_running; 4824 idle_task_delta = cfs_rq->idle_h_nr_running; 4825 for_each_sched_entity(se) { 4826 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4827 /* throttled entity or throttle-on-deactivate */ 4828 if (!se->on_rq) 4829 goto done; 4830 4831 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 4832 4833 if (cfs_rq_is_idle(group_cfs_rq(se))) 4834 idle_task_delta = cfs_rq->h_nr_running; 4835 4836 qcfs_rq->h_nr_running -= task_delta; 4837 qcfs_rq->idle_h_nr_running -= idle_task_delta; 4838 4839 if (qcfs_rq->load.weight) { 4840 /* Avoid re-evaluating load for this entity: */ 4841 se = parent_entity(se); 4842 break; 4843 } 4844 } 4845 4846 for_each_sched_entity(se) { 4847 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4848 /* throttled entity or throttle-on-deactivate */ 4849 if (!se->on_rq) 4850 goto done; 4851 4852 update_load_avg(qcfs_rq, se, 0); 4853 se_update_runnable(se); 4854 4855 if (cfs_rq_is_idle(group_cfs_rq(se))) 4856 idle_task_delta = cfs_rq->h_nr_running; 4857 4858 qcfs_rq->h_nr_running -= task_delta; 4859 qcfs_rq->idle_h_nr_running -= idle_task_delta; 4860 } 4861 4862 /* At this point se is NULL and we are at root level*/ 4863 sub_nr_running(rq, task_delta); 4864 4865 done: 4866 /* 4867 * Note: distribution will already see us throttled via the 4868 * throttled-list. rq->lock protects completion. 4869 */ 4870 cfs_rq->throttled = 1; 4871 cfs_rq->throttled_clock = rq_clock(rq); 4872 return true; 4873 } 4874 4875 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 4876 { 4877 struct rq *rq = rq_of(cfs_rq); 4878 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4879 struct sched_entity *se; 4880 long task_delta, idle_task_delta; 4881 4882 se = cfs_rq->tg->se[cpu_of(rq)]; 4883 4884 cfs_rq->throttled = 0; 4885 4886 update_rq_clock(rq); 4887 4888 raw_spin_lock(&cfs_b->lock); 4889 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 4890 list_del_rcu(&cfs_rq->throttled_list); 4891 raw_spin_unlock(&cfs_b->lock); 4892 4893 /* update hierarchical throttle state */ 4894 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4895 4896 /* Nothing to run but something to decay (on_list)? Complete the branch */ 4897 if (!cfs_rq->load.weight) { 4898 if (cfs_rq->on_list) 4899 goto unthrottle_throttle; 4900 return; 4901 } 4902 4903 task_delta = cfs_rq->h_nr_running; 4904 idle_task_delta = cfs_rq->idle_h_nr_running; 4905 for_each_sched_entity(se) { 4906 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4907 4908 if (se->on_rq) 4909 break; 4910 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); 4911 4912 if (cfs_rq_is_idle(group_cfs_rq(se))) 4913 idle_task_delta = cfs_rq->h_nr_running; 4914 4915 qcfs_rq->h_nr_running += task_delta; 4916 qcfs_rq->idle_h_nr_running += idle_task_delta; 4917 4918 /* end evaluation on encountering a throttled cfs_rq */ 4919 if (cfs_rq_throttled(qcfs_rq)) 4920 goto unthrottle_throttle; 4921 } 4922 4923 for_each_sched_entity(se) { 4924 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4925 4926 update_load_avg(qcfs_rq, se, UPDATE_TG); 4927 se_update_runnable(se); 4928 4929 if (cfs_rq_is_idle(group_cfs_rq(se))) 4930 idle_task_delta = cfs_rq->h_nr_running; 4931 4932 qcfs_rq->h_nr_running += task_delta; 4933 qcfs_rq->idle_h_nr_running += idle_task_delta; 4934 4935 /* end evaluation on encountering a throttled cfs_rq */ 4936 if (cfs_rq_throttled(qcfs_rq)) 4937 goto unthrottle_throttle; 4938 4939 /* 4940 * One parent has been throttled and cfs_rq removed from the 4941 * list. Add it back to not break the leaf list. 4942 */ 4943 if (throttled_hierarchy(qcfs_rq)) 4944 list_add_leaf_cfs_rq(qcfs_rq); 4945 } 4946 4947 /* At this point se is NULL and we are at root level*/ 4948 add_nr_running(rq, task_delta); 4949 4950 unthrottle_throttle: 4951 /* 4952 * The cfs_rq_throttled() breaks in the above iteration can result in 4953 * incomplete leaf list maintenance, resulting in triggering the 4954 * assertion below. 4955 */ 4956 for_each_sched_entity(se) { 4957 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4958 4959 if (list_add_leaf_cfs_rq(qcfs_rq)) 4960 break; 4961 } 4962 4963 assert_list_leaf_cfs_rq(rq); 4964 4965 /* Determine whether we need to wake up potentially idle CPU: */ 4966 if (rq->curr == rq->idle && rq->cfs.nr_running) 4967 resched_curr(rq); 4968 } 4969 4970 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) 4971 { 4972 struct cfs_rq *cfs_rq; 4973 u64 runtime, remaining = 1; 4974 4975 rcu_read_lock(); 4976 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 4977 throttled_list) { 4978 struct rq *rq = rq_of(cfs_rq); 4979 struct rq_flags rf; 4980 4981 rq_lock_irqsave(rq, &rf); 4982 if (!cfs_rq_throttled(cfs_rq)) 4983 goto next; 4984 4985 /* By the above check, this should never be true */ 4986 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); 4987 4988 raw_spin_lock(&cfs_b->lock); 4989 runtime = -cfs_rq->runtime_remaining + 1; 4990 if (runtime > cfs_b->runtime) 4991 runtime = cfs_b->runtime; 4992 cfs_b->runtime -= runtime; 4993 remaining = cfs_b->runtime; 4994 raw_spin_unlock(&cfs_b->lock); 4995 4996 cfs_rq->runtime_remaining += runtime; 4997 4998 /* we check whether we're throttled above */ 4999 if (cfs_rq->runtime_remaining > 0) 5000 unthrottle_cfs_rq(cfs_rq); 5001 5002 next: 5003 rq_unlock_irqrestore(rq, &rf); 5004 5005 if (!remaining) 5006 break; 5007 } 5008 rcu_read_unlock(); 5009 } 5010 5011 /* 5012 * Responsible for refilling a task_group's bandwidth and unthrottling its 5013 * cfs_rqs as appropriate. If there has been no activity within the last 5014 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 5015 * used to track this state. 5016 */ 5017 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) 5018 { 5019 int throttled; 5020 5021 /* no need to continue the timer with no bandwidth constraint */ 5022 if (cfs_b->quota == RUNTIME_INF) 5023 goto out_deactivate; 5024 5025 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 5026 cfs_b->nr_periods += overrun; 5027 5028 /* Refill extra burst quota even if cfs_b->idle */ 5029 __refill_cfs_bandwidth_runtime(cfs_b); 5030 5031 /* 5032 * idle depends on !throttled (for the case of a large deficit), and if 5033 * we're going inactive then everything else can be deferred 5034 */ 5035 if (cfs_b->idle && !throttled) 5036 goto out_deactivate; 5037 5038 if (!throttled) { 5039 /* mark as potentially idle for the upcoming period */ 5040 cfs_b->idle = 1; 5041 return 0; 5042 } 5043 5044 /* account preceding periods in which throttling occurred */ 5045 cfs_b->nr_throttled += overrun; 5046 5047 /* 5048 * This check is repeated as we release cfs_b->lock while we unthrottle. 5049 */ 5050 while (throttled && cfs_b->runtime > 0) { 5051 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5052 /* we can't nest cfs_b->lock while distributing bandwidth */ 5053 distribute_cfs_runtime(cfs_b); 5054 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5055 5056 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 5057 } 5058 5059 /* 5060 * While we are ensured activity in the period following an 5061 * unthrottle, this also covers the case in which the new bandwidth is 5062 * insufficient to cover the existing bandwidth deficit. (Forcing the 5063 * timer to remain active while there are any throttled entities.) 5064 */ 5065 cfs_b->idle = 0; 5066 5067 return 0; 5068 5069 out_deactivate: 5070 return 1; 5071 } 5072 5073 /* a cfs_rq won't donate quota below this amount */ 5074 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 5075 /* minimum remaining period time to redistribute slack quota */ 5076 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 5077 /* how long we wait to gather additional slack before distributing */ 5078 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 5079 5080 /* 5081 * Are we near the end of the current quota period? 5082 * 5083 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 5084 * hrtimer base being cleared by hrtimer_start. In the case of 5085 * migrate_hrtimers, base is never cleared, so we are fine. 5086 */ 5087 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 5088 { 5089 struct hrtimer *refresh_timer = &cfs_b->period_timer; 5090 s64 remaining; 5091 5092 /* if the call-back is running a quota refresh is already occurring */ 5093 if (hrtimer_callback_running(refresh_timer)) 5094 return 1; 5095 5096 /* is a quota refresh about to occur? */ 5097 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 5098 if (remaining < (s64)min_expire) 5099 return 1; 5100 5101 return 0; 5102 } 5103 5104 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 5105 { 5106 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 5107 5108 /* if there's a quota refresh soon don't bother with slack */ 5109 if (runtime_refresh_within(cfs_b, min_left)) 5110 return; 5111 5112 /* don't push forwards an existing deferred unthrottle */ 5113 if (cfs_b->slack_started) 5114 return; 5115 cfs_b->slack_started = true; 5116 5117 hrtimer_start(&cfs_b->slack_timer, 5118 ns_to_ktime(cfs_bandwidth_slack_period), 5119 HRTIMER_MODE_REL); 5120 } 5121 5122 /* we know any runtime found here is valid as update_curr() precedes return */ 5123 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5124 { 5125 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 5126 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 5127 5128 if (slack_runtime <= 0) 5129 return; 5130 5131 raw_spin_lock(&cfs_b->lock); 5132 if (cfs_b->quota != RUNTIME_INF) { 5133 cfs_b->runtime += slack_runtime; 5134 5135 /* we are under rq->lock, defer unthrottling using a timer */ 5136 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 5137 !list_empty(&cfs_b->throttled_cfs_rq)) 5138 start_cfs_slack_bandwidth(cfs_b); 5139 } 5140 raw_spin_unlock(&cfs_b->lock); 5141 5142 /* even if it's not valid for return we don't want to try again */ 5143 cfs_rq->runtime_remaining -= slack_runtime; 5144 } 5145 5146 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5147 { 5148 if (!cfs_bandwidth_used()) 5149 return; 5150 5151 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 5152 return; 5153 5154 __return_cfs_rq_runtime(cfs_rq); 5155 } 5156 5157 /* 5158 * This is done with a timer (instead of inline with bandwidth return) since 5159 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 5160 */ 5161 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 5162 { 5163 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 5164 unsigned long flags; 5165 5166 /* confirm we're still not at a refresh boundary */ 5167 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5168 cfs_b->slack_started = false; 5169 5170 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 5171 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5172 return; 5173 } 5174 5175 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 5176 runtime = cfs_b->runtime; 5177 5178 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5179 5180 if (!runtime) 5181 return; 5182 5183 distribute_cfs_runtime(cfs_b); 5184 } 5185 5186 /* 5187 * When a group wakes up we want to make sure that its quota is not already 5188 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 5189 * runtime as update_curr() throttling can not trigger until it's on-rq. 5190 */ 5191 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 5192 { 5193 if (!cfs_bandwidth_used()) 5194 return; 5195 5196 /* an active group must be handled by the update_curr()->put() path */ 5197 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 5198 return; 5199 5200 /* ensure the group is not already throttled */ 5201 if (cfs_rq_throttled(cfs_rq)) 5202 return; 5203 5204 /* update runtime allocation */ 5205 account_cfs_rq_runtime(cfs_rq, 0); 5206 if (cfs_rq->runtime_remaining <= 0) 5207 throttle_cfs_rq(cfs_rq); 5208 } 5209 5210 static void sync_throttle(struct task_group *tg, int cpu) 5211 { 5212 struct cfs_rq *pcfs_rq, *cfs_rq; 5213 5214 if (!cfs_bandwidth_used()) 5215 return; 5216 5217 if (!tg->parent) 5218 return; 5219 5220 cfs_rq = tg->cfs_rq[cpu]; 5221 pcfs_rq = tg->parent->cfs_rq[cpu]; 5222 5223 cfs_rq->throttle_count = pcfs_rq->throttle_count; 5224 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 5225 } 5226 5227 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 5228 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5229 { 5230 if (!cfs_bandwidth_used()) 5231 return false; 5232 5233 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 5234 return false; 5235 5236 /* 5237 * it's possible for a throttled entity to be forced into a running 5238 * state (e.g. set_curr_task), in this case we're finished. 5239 */ 5240 if (cfs_rq_throttled(cfs_rq)) 5241 return true; 5242 5243 return throttle_cfs_rq(cfs_rq); 5244 } 5245 5246 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 5247 { 5248 struct cfs_bandwidth *cfs_b = 5249 container_of(timer, struct cfs_bandwidth, slack_timer); 5250 5251 do_sched_cfs_slack_timer(cfs_b); 5252 5253 return HRTIMER_NORESTART; 5254 } 5255 5256 extern const u64 max_cfs_quota_period; 5257 5258 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 5259 { 5260 struct cfs_bandwidth *cfs_b = 5261 container_of(timer, struct cfs_bandwidth, period_timer); 5262 unsigned long flags; 5263 int overrun; 5264 int idle = 0; 5265 int count = 0; 5266 5267 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5268 for (;;) { 5269 overrun = hrtimer_forward_now(timer, cfs_b->period); 5270 if (!overrun) 5271 break; 5272 5273 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); 5274 5275 if (++count > 3) { 5276 u64 new, old = ktime_to_ns(cfs_b->period); 5277 5278 /* 5279 * Grow period by a factor of 2 to avoid losing precision. 5280 * Precision loss in the quota/period ratio can cause __cfs_schedulable 5281 * to fail. 5282 */ 5283 new = old * 2; 5284 if (new < max_cfs_quota_period) { 5285 cfs_b->period = ns_to_ktime(new); 5286 cfs_b->quota *= 2; 5287 cfs_b->burst *= 2; 5288 5289 pr_warn_ratelimited( 5290 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", 5291 smp_processor_id(), 5292 div_u64(new, NSEC_PER_USEC), 5293 div_u64(cfs_b->quota, NSEC_PER_USEC)); 5294 } else { 5295 pr_warn_ratelimited( 5296 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n", 5297 smp_processor_id(), 5298 div_u64(old, NSEC_PER_USEC), 5299 div_u64(cfs_b->quota, NSEC_PER_USEC)); 5300 } 5301 5302 /* reset count so we don't come right back in here */ 5303 count = 0; 5304 } 5305 } 5306 if (idle) 5307 cfs_b->period_active = 0; 5308 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5309 5310 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 5311 } 5312 5313 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5314 { 5315 raw_spin_lock_init(&cfs_b->lock); 5316 cfs_b->runtime = 0; 5317 cfs_b->quota = RUNTIME_INF; 5318 cfs_b->period = ns_to_ktime(default_cfs_period()); 5319 cfs_b->burst = 0; 5320 5321 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 5322 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 5323 cfs_b->period_timer.function = sched_cfs_period_timer; 5324 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5325 cfs_b->slack_timer.function = sched_cfs_slack_timer; 5326 cfs_b->slack_started = false; 5327 } 5328 5329 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5330 { 5331 cfs_rq->runtime_enabled = 0; 5332 INIT_LIST_HEAD(&cfs_rq->throttled_list); 5333 } 5334 5335 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5336 { 5337 lockdep_assert_held(&cfs_b->lock); 5338 5339 if (cfs_b->period_active) 5340 return; 5341 5342 cfs_b->period_active = 1; 5343 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 5344 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 5345 } 5346 5347 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5348 { 5349 /* init_cfs_bandwidth() was not called */ 5350 if (!cfs_b->throttled_cfs_rq.next) 5351 return; 5352 5353 hrtimer_cancel(&cfs_b->period_timer); 5354 hrtimer_cancel(&cfs_b->slack_timer); 5355 } 5356 5357 /* 5358 * Both these CPU hotplug callbacks race against unregister_fair_sched_group() 5359 * 5360 * The race is harmless, since modifying bandwidth settings of unhooked group 5361 * bits doesn't do much. 5362 */ 5363 5364 /* cpu online callback */ 5365 static void __maybe_unused update_runtime_enabled(struct rq *rq) 5366 { 5367 struct task_group *tg; 5368 5369 lockdep_assert_rq_held(rq); 5370 5371 rcu_read_lock(); 5372 list_for_each_entry_rcu(tg, &task_groups, list) { 5373 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 5374 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5375 5376 raw_spin_lock(&cfs_b->lock); 5377 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 5378 raw_spin_unlock(&cfs_b->lock); 5379 } 5380 rcu_read_unlock(); 5381 } 5382 5383 /* cpu offline callback */ 5384 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 5385 { 5386 struct task_group *tg; 5387 5388 lockdep_assert_rq_held(rq); 5389 5390 rcu_read_lock(); 5391 list_for_each_entry_rcu(tg, &task_groups, list) { 5392 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5393 5394 if (!cfs_rq->runtime_enabled) 5395 continue; 5396 5397 /* 5398 * clock_task is not advancing so we just need to make sure 5399 * there's some valid quota amount 5400 */ 5401 cfs_rq->runtime_remaining = 1; 5402 /* 5403 * Offline rq is schedulable till CPU is completely disabled 5404 * in take_cpu_down(), so we prevent new cfs throttling here. 5405 */ 5406 cfs_rq->runtime_enabled = 0; 5407 5408 if (cfs_rq_throttled(cfs_rq)) 5409 unthrottle_cfs_rq(cfs_rq); 5410 } 5411 rcu_read_unlock(); 5412 } 5413 5414 #else /* CONFIG_CFS_BANDWIDTH */ 5415 5416 static inline bool cfs_bandwidth_used(void) 5417 { 5418 return false; 5419 } 5420 5421 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 5422 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 5423 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 5424 static inline void sync_throttle(struct task_group *tg, int cpu) {} 5425 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5426 5427 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 5428 { 5429 return 0; 5430 } 5431 5432 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 5433 { 5434 return 0; 5435 } 5436 5437 static inline int throttled_lb_pair(struct task_group *tg, 5438 int src_cpu, int dest_cpu) 5439 { 5440 return 0; 5441 } 5442 5443 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5444 5445 #ifdef CONFIG_FAIR_GROUP_SCHED 5446 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5447 #endif 5448 5449 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 5450 { 5451 return NULL; 5452 } 5453 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5454 static inline void update_runtime_enabled(struct rq *rq) {} 5455 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 5456 5457 #endif /* CONFIG_CFS_BANDWIDTH */ 5458 5459 /************************************************** 5460 * CFS operations on tasks: 5461 */ 5462 5463 #ifdef CONFIG_SCHED_HRTICK 5464 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 5465 { 5466 struct sched_entity *se = &p->se; 5467 struct cfs_rq *cfs_rq = cfs_rq_of(se); 5468 5469 SCHED_WARN_ON(task_rq(p) != rq); 5470 5471 if (rq->cfs.h_nr_running > 1) { 5472 u64 slice = sched_slice(cfs_rq, se); 5473 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 5474 s64 delta = slice - ran; 5475 5476 if (delta < 0) { 5477 if (task_current(rq, p)) 5478 resched_curr(rq); 5479 return; 5480 } 5481 hrtick_start(rq, delta); 5482 } 5483 } 5484 5485 /* 5486 * called from enqueue/dequeue and updates the hrtick when the 5487 * current task is from our class and nr_running is low enough 5488 * to matter. 5489 */ 5490 static void hrtick_update(struct rq *rq) 5491 { 5492 struct task_struct *curr = rq->curr; 5493 5494 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class) 5495 return; 5496 5497 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 5498 hrtick_start_fair(rq, curr); 5499 } 5500 #else /* !CONFIG_SCHED_HRTICK */ 5501 static inline void 5502 hrtick_start_fair(struct rq *rq, struct task_struct *p) 5503 { 5504 } 5505 5506 static inline void hrtick_update(struct rq *rq) 5507 { 5508 } 5509 #endif 5510 5511 #ifdef CONFIG_SMP 5512 static inline bool cpu_overutilized(int cpu) 5513 { 5514 return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu)); 5515 } 5516 5517 static inline void update_overutilized_status(struct rq *rq) 5518 { 5519 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { 5520 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); 5521 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); 5522 } 5523 } 5524 #else 5525 static inline void update_overutilized_status(struct rq *rq) { } 5526 #endif 5527 5528 /* Runqueue only has SCHED_IDLE tasks enqueued */ 5529 static int sched_idle_rq(struct rq *rq) 5530 { 5531 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && 5532 rq->nr_running); 5533 } 5534 5535 /* 5536 * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use 5537 * of idle_nr_running, which does not consider idle descendants of normal 5538 * entities. 5539 */ 5540 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq) 5541 { 5542 return cfs_rq->nr_running && 5543 cfs_rq->nr_running == cfs_rq->idle_nr_running; 5544 } 5545 5546 #ifdef CONFIG_SMP 5547 static int sched_idle_cpu(int cpu) 5548 { 5549 return sched_idle_rq(cpu_rq(cpu)); 5550 } 5551 #endif 5552 5553 /* 5554 * The enqueue_task method is called before nr_running is 5555 * increased. Here we update the fair scheduling stats and 5556 * then put the task into the rbtree: 5557 */ 5558 static void 5559 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5560 { 5561 struct cfs_rq *cfs_rq; 5562 struct sched_entity *se = &p->se; 5563 int idle_h_nr_running = task_has_idle_policy(p); 5564 int task_new = !(flags & ENQUEUE_WAKEUP); 5565 5566 /* 5567 * The code below (indirectly) updates schedutil which looks at 5568 * the cfs_rq utilization to select a frequency. 5569 * Let's add the task's estimated utilization to the cfs_rq's 5570 * estimated utilization, before we update schedutil. 5571 */ 5572 util_est_enqueue(&rq->cfs, p); 5573 5574 /* 5575 * If in_iowait is set, the code below may not trigger any cpufreq 5576 * utilization updates, so do it here explicitly with the IOWAIT flag 5577 * passed. 5578 */ 5579 if (p->in_iowait) 5580 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); 5581 5582 for_each_sched_entity(se) { 5583 if (se->on_rq) 5584 break; 5585 cfs_rq = cfs_rq_of(se); 5586 enqueue_entity(cfs_rq, se, flags); 5587 5588 cfs_rq->h_nr_running++; 5589 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5590 5591 if (cfs_rq_is_idle(cfs_rq)) 5592 idle_h_nr_running = 1; 5593 5594 /* end evaluation on encountering a throttled cfs_rq */ 5595 if (cfs_rq_throttled(cfs_rq)) 5596 goto enqueue_throttle; 5597 5598 flags = ENQUEUE_WAKEUP; 5599 } 5600 5601 for_each_sched_entity(se) { 5602 cfs_rq = cfs_rq_of(se); 5603 5604 update_load_avg(cfs_rq, se, UPDATE_TG); 5605 se_update_runnable(se); 5606 update_cfs_group(se); 5607 5608 cfs_rq->h_nr_running++; 5609 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5610 5611 if (cfs_rq_is_idle(cfs_rq)) 5612 idle_h_nr_running = 1; 5613 5614 /* end evaluation on encountering a throttled cfs_rq */ 5615 if (cfs_rq_throttled(cfs_rq)) 5616 goto enqueue_throttle; 5617 5618 /* 5619 * One parent has been throttled and cfs_rq removed from the 5620 * list. Add it back to not break the leaf list. 5621 */ 5622 if (throttled_hierarchy(cfs_rq)) 5623 list_add_leaf_cfs_rq(cfs_rq); 5624 } 5625 5626 /* At this point se is NULL and we are at root level*/ 5627 add_nr_running(rq, 1); 5628 5629 /* 5630 * Since new tasks are assigned an initial util_avg equal to 5631 * half of the spare capacity of their CPU, tiny tasks have the 5632 * ability to cross the overutilized threshold, which will 5633 * result in the load balancer ruining all the task placement 5634 * done by EAS. As a way to mitigate that effect, do not account 5635 * for the first enqueue operation of new tasks during the 5636 * overutilized flag detection. 5637 * 5638 * A better way of solving this problem would be to wait for 5639 * the PELT signals of tasks to converge before taking them 5640 * into account, but that is not straightforward to implement, 5641 * and the following generally works well enough in practice. 5642 */ 5643 if (!task_new) 5644 update_overutilized_status(rq); 5645 5646 enqueue_throttle: 5647 if (cfs_bandwidth_used()) { 5648 /* 5649 * When bandwidth control is enabled; the cfs_rq_throttled() 5650 * breaks in the above iteration can result in incomplete 5651 * leaf list maintenance, resulting in triggering the assertion 5652 * below. 5653 */ 5654 for_each_sched_entity(se) { 5655 cfs_rq = cfs_rq_of(se); 5656 5657 if (list_add_leaf_cfs_rq(cfs_rq)) 5658 break; 5659 } 5660 } 5661 5662 assert_list_leaf_cfs_rq(rq); 5663 5664 hrtick_update(rq); 5665 } 5666 5667 static void set_next_buddy(struct sched_entity *se); 5668 5669 /* 5670 * The dequeue_task method is called before nr_running is 5671 * decreased. We remove the task from the rbtree and 5672 * update the fair scheduling stats: 5673 */ 5674 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5675 { 5676 struct cfs_rq *cfs_rq; 5677 struct sched_entity *se = &p->se; 5678 int task_sleep = flags & DEQUEUE_SLEEP; 5679 int idle_h_nr_running = task_has_idle_policy(p); 5680 bool was_sched_idle = sched_idle_rq(rq); 5681 5682 util_est_dequeue(&rq->cfs, p); 5683 5684 for_each_sched_entity(se) { 5685 cfs_rq = cfs_rq_of(se); 5686 dequeue_entity(cfs_rq, se, flags); 5687 5688 cfs_rq->h_nr_running--; 5689 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 5690 5691 if (cfs_rq_is_idle(cfs_rq)) 5692 idle_h_nr_running = 1; 5693 5694 /* end evaluation on encountering a throttled cfs_rq */ 5695 if (cfs_rq_throttled(cfs_rq)) 5696 goto dequeue_throttle; 5697 5698 /* Don't dequeue parent if it has other entities besides us */ 5699 if (cfs_rq->load.weight) { 5700 /* Avoid re-evaluating load for this entity: */ 5701 se = parent_entity(se); 5702 /* 5703 * Bias pick_next to pick a task from this cfs_rq, as 5704 * p is sleeping when it is within its sched_slice. 5705 */ 5706 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 5707 set_next_buddy(se); 5708 break; 5709 } 5710 flags |= DEQUEUE_SLEEP; 5711 } 5712 5713 for_each_sched_entity(se) { 5714 cfs_rq = cfs_rq_of(se); 5715 5716 update_load_avg(cfs_rq, se, UPDATE_TG); 5717 se_update_runnable(se); 5718 update_cfs_group(se); 5719 5720 cfs_rq->h_nr_running--; 5721 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 5722 5723 if (cfs_rq_is_idle(cfs_rq)) 5724 idle_h_nr_running = 1; 5725 5726 /* end evaluation on encountering a throttled cfs_rq */ 5727 if (cfs_rq_throttled(cfs_rq)) 5728 goto dequeue_throttle; 5729 5730 } 5731 5732 /* At this point se is NULL and we are at root level*/ 5733 sub_nr_running(rq, 1); 5734 5735 /* balance early to pull high priority tasks */ 5736 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) 5737 rq->next_balance = jiffies; 5738 5739 dequeue_throttle: 5740 util_est_update(&rq->cfs, p, task_sleep); 5741 hrtick_update(rq); 5742 } 5743 5744 #ifdef CONFIG_SMP 5745 5746 /* Working cpumask for: load_balance, load_balance_newidle. */ 5747 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 5748 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); 5749 5750 #ifdef CONFIG_NO_HZ_COMMON 5751 5752 static struct { 5753 cpumask_var_t idle_cpus_mask; 5754 atomic_t nr_cpus; 5755 int has_blocked; /* Idle CPUS has blocked load */ 5756 int needs_update; /* Newly idle CPUs need their next_balance collated */ 5757 unsigned long next_balance; /* in jiffy units */ 5758 unsigned long next_blocked; /* Next update of blocked load in jiffies */ 5759 } nohz ____cacheline_aligned; 5760 5761 #endif /* CONFIG_NO_HZ_COMMON */ 5762 5763 static unsigned long cpu_load(struct rq *rq) 5764 { 5765 return cfs_rq_load_avg(&rq->cfs); 5766 } 5767 5768 /* 5769 * cpu_load_without - compute CPU load without any contributions from *p 5770 * @cpu: the CPU which load is requested 5771 * @p: the task which load should be discounted 5772 * 5773 * The load of a CPU is defined by the load of tasks currently enqueued on that 5774 * CPU as well as tasks which are currently sleeping after an execution on that 5775 * CPU. 5776 * 5777 * This method returns the load of the specified CPU by discounting the load of 5778 * the specified task, whenever the task is currently contributing to the CPU 5779 * load. 5780 */ 5781 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) 5782 { 5783 struct cfs_rq *cfs_rq; 5784 unsigned int load; 5785 5786 /* Task has no contribution or is new */ 5787 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 5788 return cpu_load(rq); 5789 5790 cfs_rq = &rq->cfs; 5791 load = READ_ONCE(cfs_rq->avg.load_avg); 5792 5793 /* Discount task's util from CPU's util */ 5794 lsub_positive(&load, task_h_load(p)); 5795 5796 return load; 5797 } 5798 5799 static unsigned long cpu_runnable(struct rq *rq) 5800 { 5801 return cfs_rq_runnable_avg(&rq->cfs); 5802 } 5803 5804 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) 5805 { 5806 struct cfs_rq *cfs_rq; 5807 unsigned int runnable; 5808 5809 /* Task has no contribution or is new */ 5810 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 5811 return cpu_runnable(rq); 5812 5813 cfs_rq = &rq->cfs; 5814 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); 5815 5816 /* Discount task's runnable from CPU's runnable */ 5817 lsub_positive(&runnable, p->se.avg.runnable_avg); 5818 5819 return runnable; 5820 } 5821 5822 static unsigned long capacity_of(int cpu) 5823 { 5824 return cpu_rq(cpu)->cpu_capacity; 5825 } 5826 5827 static void record_wakee(struct task_struct *p) 5828 { 5829 /* 5830 * Only decay a single time; tasks that have less then 1 wakeup per 5831 * jiffy will not have built up many flips. 5832 */ 5833 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 5834 current->wakee_flips >>= 1; 5835 current->wakee_flip_decay_ts = jiffies; 5836 } 5837 5838 if (current->last_wakee != p) { 5839 current->last_wakee = p; 5840 current->wakee_flips++; 5841 } 5842 } 5843 5844 /* 5845 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 5846 * 5847 * A waker of many should wake a different task than the one last awakened 5848 * at a frequency roughly N times higher than one of its wakees. 5849 * 5850 * In order to determine whether we should let the load spread vs consolidating 5851 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 5852 * partner, and a factor of lls_size higher frequency in the other. 5853 * 5854 * With both conditions met, we can be relatively sure that the relationship is 5855 * non-monogamous, with partner count exceeding socket size. 5856 * 5857 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 5858 * whatever is irrelevant, spread criteria is apparent partner count exceeds 5859 * socket size. 5860 */ 5861 static int wake_wide(struct task_struct *p) 5862 { 5863 unsigned int master = current->wakee_flips; 5864 unsigned int slave = p->wakee_flips; 5865 int factor = __this_cpu_read(sd_llc_size); 5866 5867 if (master < slave) 5868 swap(master, slave); 5869 if (slave < factor || master < slave * factor) 5870 return 0; 5871 return 1; 5872 } 5873 5874 /* 5875 * The purpose of wake_affine() is to quickly determine on which CPU we can run 5876 * soonest. For the purpose of speed we only consider the waking and previous 5877 * CPU. 5878 * 5879 * wake_affine_idle() - only considers 'now', it check if the waking CPU is 5880 * cache-affine and is (or will be) idle. 5881 * 5882 * wake_affine_weight() - considers the weight to reflect the average 5883 * scheduling latency of the CPUs. This seems to work 5884 * for the overloaded case. 5885 */ 5886 static int 5887 wake_affine_idle(int this_cpu, int prev_cpu, int sync) 5888 { 5889 /* 5890 * If this_cpu is idle, it implies the wakeup is from interrupt 5891 * context. Only allow the move if cache is shared. Otherwise an 5892 * interrupt intensive workload could force all tasks onto one 5893 * node depending on the IO topology or IRQ affinity settings. 5894 * 5895 * If the prev_cpu is idle and cache affine then avoid a migration. 5896 * There is no guarantee that the cache hot data from an interrupt 5897 * is more important than cache hot data on the prev_cpu and from 5898 * a cpufreq perspective, it's better to have higher utilisation 5899 * on one CPU. 5900 */ 5901 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) 5902 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; 5903 5904 if (sync && cpu_rq(this_cpu)->nr_running == 1) 5905 return this_cpu; 5906 5907 if (available_idle_cpu(prev_cpu)) 5908 return prev_cpu; 5909 5910 return nr_cpumask_bits; 5911 } 5912 5913 static int 5914 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, 5915 int this_cpu, int prev_cpu, int sync) 5916 { 5917 s64 this_eff_load, prev_eff_load; 5918 unsigned long task_load; 5919 5920 this_eff_load = cpu_load(cpu_rq(this_cpu)); 5921 5922 if (sync) { 5923 unsigned long current_load = task_h_load(current); 5924 5925 if (current_load > this_eff_load) 5926 return this_cpu; 5927 5928 this_eff_load -= current_load; 5929 } 5930 5931 task_load = task_h_load(p); 5932 5933 this_eff_load += task_load; 5934 if (sched_feat(WA_BIAS)) 5935 this_eff_load *= 100; 5936 this_eff_load *= capacity_of(prev_cpu); 5937 5938 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); 5939 prev_eff_load -= task_load; 5940 if (sched_feat(WA_BIAS)) 5941 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; 5942 prev_eff_load *= capacity_of(this_cpu); 5943 5944 /* 5945 * If sync, adjust the weight of prev_eff_load such that if 5946 * prev_eff == this_eff that select_idle_sibling() will consider 5947 * stacking the wakee on top of the waker if no other CPU is 5948 * idle. 5949 */ 5950 if (sync) 5951 prev_eff_load += 1; 5952 5953 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; 5954 } 5955 5956 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 5957 int this_cpu, int prev_cpu, int sync) 5958 { 5959 int target = nr_cpumask_bits; 5960 5961 if (sched_feat(WA_IDLE)) 5962 target = wake_affine_idle(this_cpu, prev_cpu, sync); 5963 5964 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) 5965 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); 5966 5967 schedstat_inc(p->stats.nr_wakeups_affine_attempts); 5968 if (target == nr_cpumask_bits) 5969 return prev_cpu; 5970 5971 schedstat_inc(sd->ttwu_move_affine); 5972 schedstat_inc(p->stats.nr_wakeups_affine); 5973 return target; 5974 } 5975 5976 static struct sched_group * 5977 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); 5978 5979 /* 5980 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. 5981 */ 5982 static int 5983 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 5984 { 5985 unsigned long load, min_load = ULONG_MAX; 5986 unsigned int min_exit_latency = UINT_MAX; 5987 u64 latest_idle_timestamp = 0; 5988 int least_loaded_cpu = this_cpu; 5989 int shallowest_idle_cpu = -1; 5990 int i; 5991 5992 /* Check if we have any choice: */ 5993 if (group->group_weight == 1) 5994 return cpumask_first(sched_group_span(group)); 5995 5996 /* Traverse only the allowed CPUs */ 5997 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { 5998 struct rq *rq = cpu_rq(i); 5999 6000 if (!sched_core_cookie_match(rq, p)) 6001 continue; 6002 6003 if (sched_idle_cpu(i)) 6004 return i; 6005 6006 if (available_idle_cpu(i)) { 6007 struct cpuidle_state *idle = idle_get_state(rq); 6008 if (idle && idle->exit_latency < min_exit_latency) { 6009 /* 6010 * We give priority to a CPU whose idle state 6011 * has the smallest exit latency irrespective 6012 * of any idle timestamp. 6013 */ 6014 min_exit_latency = idle->exit_latency; 6015 latest_idle_timestamp = rq->idle_stamp; 6016 shallowest_idle_cpu = i; 6017 } else if ((!idle || idle->exit_latency == min_exit_latency) && 6018 rq->idle_stamp > latest_idle_timestamp) { 6019 /* 6020 * If equal or no active idle state, then 6021 * the most recently idled CPU might have 6022 * a warmer cache. 6023 */ 6024 latest_idle_timestamp = rq->idle_stamp; 6025 shallowest_idle_cpu = i; 6026 } 6027 } else if (shallowest_idle_cpu == -1) { 6028 load = cpu_load(cpu_rq(i)); 6029 if (load < min_load) { 6030 min_load = load; 6031 least_loaded_cpu = i; 6032 } 6033 } 6034 } 6035 6036 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 6037 } 6038 6039 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, 6040 int cpu, int prev_cpu, int sd_flag) 6041 { 6042 int new_cpu = cpu; 6043 6044 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) 6045 return prev_cpu; 6046 6047 /* 6048 * We need task's util for cpu_util_without, sync it up to 6049 * prev_cpu's last_update_time. 6050 */ 6051 if (!(sd_flag & SD_BALANCE_FORK)) 6052 sync_entity_load_avg(&p->se); 6053 6054 while (sd) { 6055 struct sched_group *group; 6056 struct sched_domain *tmp; 6057 int weight; 6058 6059 if (!(sd->flags & sd_flag)) { 6060 sd = sd->child; 6061 continue; 6062 } 6063 6064 group = find_idlest_group(sd, p, cpu); 6065 if (!group) { 6066 sd = sd->child; 6067 continue; 6068 } 6069 6070 new_cpu = find_idlest_group_cpu(group, p, cpu); 6071 if (new_cpu == cpu) { 6072 /* Now try balancing at a lower domain level of 'cpu': */ 6073 sd = sd->child; 6074 continue; 6075 } 6076 6077 /* Now try balancing at a lower domain level of 'new_cpu': */ 6078 cpu = new_cpu; 6079 weight = sd->span_weight; 6080 sd = NULL; 6081 for_each_domain(cpu, tmp) { 6082 if (weight <= tmp->span_weight) 6083 break; 6084 if (tmp->flags & sd_flag) 6085 sd = tmp; 6086 } 6087 } 6088 6089 return new_cpu; 6090 } 6091 6092 static inline int __select_idle_cpu(int cpu, struct task_struct *p) 6093 { 6094 if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) && 6095 sched_cpu_cookie_match(cpu_rq(cpu), p)) 6096 return cpu; 6097 6098 return -1; 6099 } 6100 6101 #ifdef CONFIG_SCHED_SMT 6102 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 6103 EXPORT_SYMBOL_GPL(sched_smt_present); 6104 6105 static inline void set_idle_cores(int cpu, int val) 6106 { 6107 struct sched_domain_shared *sds; 6108 6109 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 6110 if (sds) 6111 WRITE_ONCE(sds->has_idle_cores, val); 6112 } 6113 6114 static inline bool test_idle_cores(int cpu, bool def) 6115 { 6116 struct sched_domain_shared *sds; 6117 6118 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 6119 if (sds) 6120 return READ_ONCE(sds->has_idle_cores); 6121 6122 return def; 6123 } 6124 6125 /* 6126 * Scans the local SMT mask to see if the entire core is idle, and records this 6127 * information in sd_llc_shared->has_idle_cores. 6128 * 6129 * Since SMT siblings share all cache levels, inspecting this limited remote 6130 * state should be fairly cheap. 6131 */ 6132 void __update_idle_core(struct rq *rq) 6133 { 6134 int core = cpu_of(rq); 6135 int cpu; 6136 6137 rcu_read_lock(); 6138 if (test_idle_cores(core, true)) 6139 goto unlock; 6140 6141 for_each_cpu(cpu, cpu_smt_mask(core)) { 6142 if (cpu == core) 6143 continue; 6144 6145 if (!available_idle_cpu(cpu)) 6146 goto unlock; 6147 } 6148 6149 set_idle_cores(core, 1); 6150 unlock: 6151 rcu_read_unlock(); 6152 } 6153 6154 /* 6155 * Scan the entire LLC domain for idle cores; this dynamically switches off if 6156 * there are no idle cores left in the system; tracked through 6157 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 6158 */ 6159 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) 6160 { 6161 bool idle = true; 6162 int cpu; 6163 6164 if (!static_branch_likely(&sched_smt_present)) 6165 return __select_idle_cpu(core, p); 6166 6167 for_each_cpu(cpu, cpu_smt_mask(core)) { 6168 if (!available_idle_cpu(cpu)) { 6169 idle = false; 6170 if (*idle_cpu == -1) { 6171 if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { 6172 *idle_cpu = cpu; 6173 break; 6174 } 6175 continue; 6176 } 6177 break; 6178 } 6179 if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) 6180 *idle_cpu = cpu; 6181 } 6182 6183 if (idle) 6184 return core; 6185 6186 cpumask_andnot(cpus, cpus, cpu_smt_mask(core)); 6187 return -1; 6188 } 6189 6190 /* 6191 * Scan the local SMT mask for idle CPUs. 6192 */ 6193 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6194 { 6195 int cpu; 6196 6197 for_each_cpu(cpu, cpu_smt_mask(target)) { 6198 if (!cpumask_test_cpu(cpu, p->cpus_ptr) || 6199 !cpumask_test_cpu(cpu, sched_domain_span(sd))) 6200 continue; 6201 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) 6202 return cpu; 6203 } 6204 6205 return -1; 6206 } 6207 6208 #else /* CONFIG_SCHED_SMT */ 6209 6210 static inline void set_idle_cores(int cpu, int val) 6211 { 6212 } 6213 6214 static inline bool test_idle_cores(int cpu, bool def) 6215 { 6216 return def; 6217 } 6218 6219 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) 6220 { 6221 return __select_idle_cpu(core, p); 6222 } 6223 6224 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6225 { 6226 return -1; 6227 } 6228 6229 #endif /* CONFIG_SCHED_SMT */ 6230 6231 /* 6232 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 6233 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 6234 * average idle time for this rq (as found in rq->avg_idle). 6235 */ 6236 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target) 6237 { 6238 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 6239 int i, cpu, idle_cpu = -1, nr = INT_MAX; 6240 struct rq *this_rq = this_rq(); 6241 int this = smp_processor_id(); 6242 struct sched_domain *this_sd; 6243 u64 time = 0; 6244 6245 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 6246 if (!this_sd) 6247 return -1; 6248 6249 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6250 6251 if (sched_feat(SIS_PROP) && !has_idle_core) { 6252 u64 avg_cost, avg_idle, span_avg; 6253 unsigned long now = jiffies; 6254 6255 /* 6256 * If we're busy, the assumption that the last idle period 6257 * predicts the future is flawed; age away the remaining 6258 * predicted idle time. 6259 */ 6260 if (unlikely(this_rq->wake_stamp < now)) { 6261 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { 6262 this_rq->wake_stamp++; 6263 this_rq->wake_avg_idle >>= 1; 6264 } 6265 } 6266 6267 avg_idle = this_rq->wake_avg_idle; 6268 avg_cost = this_sd->avg_scan_cost + 1; 6269 6270 span_avg = sd->span_weight * avg_idle; 6271 if (span_avg > 4*avg_cost) 6272 nr = div_u64(span_avg, avg_cost); 6273 else 6274 nr = 4; 6275 6276 time = cpu_clock(this); 6277 } 6278 6279 for_each_cpu_wrap(cpu, cpus, target + 1) { 6280 if (has_idle_core) { 6281 i = select_idle_core(p, cpu, cpus, &idle_cpu); 6282 if ((unsigned int)i < nr_cpumask_bits) 6283 return i; 6284 6285 } else { 6286 if (!--nr) 6287 return -1; 6288 idle_cpu = __select_idle_cpu(cpu, p); 6289 if ((unsigned int)idle_cpu < nr_cpumask_bits) 6290 break; 6291 } 6292 } 6293 6294 if (has_idle_core) 6295 set_idle_cores(target, false); 6296 6297 if (sched_feat(SIS_PROP) && !has_idle_core) { 6298 time = cpu_clock(this) - time; 6299 6300 /* 6301 * Account for the scan cost of wakeups against the average 6302 * idle time. 6303 */ 6304 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time); 6305 6306 update_avg(&this_sd->avg_scan_cost, time); 6307 } 6308 6309 return idle_cpu; 6310 } 6311 6312 /* 6313 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which 6314 * the task fits. If no CPU is big enough, but there are idle ones, try to 6315 * maximize capacity. 6316 */ 6317 static int 6318 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) 6319 { 6320 unsigned long task_util, best_cap = 0; 6321 int cpu, best_cpu = -1; 6322 struct cpumask *cpus; 6323 6324 cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 6325 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6326 6327 task_util = uclamp_task_util(p); 6328 6329 for_each_cpu_wrap(cpu, cpus, target) { 6330 unsigned long cpu_cap = capacity_of(cpu); 6331 6332 if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) 6333 continue; 6334 if (fits_capacity(task_util, cpu_cap)) 6335 return cpu; 6336 6337 if (cpu_cap > best_cap) { 6338 best_cap = cpu_cap; 6339 best_cpu = cpu; 6340 } 6341 } 6342 6343 return best_cpu; 6344 } 6345 6346 static inline bool asym_fits_capacity(unsigned long task_util, int cpu) 6347 { 6348 if (static_branch_unlikely(&sched_asym_cpucapacity)) 6349 return fits_capacity(task_util, capacity_of(cpu)); 6350 6351 return true; 6352 } 6353 6354 /* 6355 * Try and locate an idle core/thread in the LLC cache domain. 6356 */ 6357 static int select_idle_sibling(struct task_struct *p, int prev, int target) 6358 { 6359 bool has_idle_core = false; 6360 struct sched_domain *sd; 6361 unsigned long task_util; 6362 int i, recent_used_cpu; 6363 6364 /* 6365 * On asymmetric system, update task utilization because we will check 6366 * that the task fits with cpu's capacity. 6367 */ 6368 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6369 sync_entity_load_avg(&p->se); 6370 task_util = uclamp_task_util(p); 6371 } 6372 6373 /* 6374 * per-cpu select_idle_mask usage 6375 */ 6376 lockdep_assert_irqs_disabled(); 6377 6378 if ((available_idle_cpu(target) || sched_idle_cpu(target)) && 6379 asym_fits_capacity(task_util, target)) 6380 return target; 6381 6382 /* 6383 * If the previous CPU is cache affine and idle, don't be stupid: 6384 */ 6385 if (prev != target && cpus_share_cache(prev, target) && 6386 (available_idle_cpu(prev) || sched_idle_cpu(prev)) && 6387 asym_fits_capacity(task_util, prev)) 6388 return prev; 6389 6390 /* 6391 * Allow a per-cpu kthread to stack with the wakee if the 6392 * kworker thread and the tasks previous CPUs are the same. 6393 * The assumption is that the wakee queued work for the 6394 * per-cpu kthread that is now complete and the wakeup is 6395 * essentially a sync wakeup. An obvious example of this 6396 * pattern is IO completions. 6397 */ 6398 if (is_per_cpu_kthread(current) && 6399 in_task() && 6400 prev == smp_processor_id() && 6401 this_rq()->nr_running <= 1 && 6402 asym_fits_capacity(task_util, prev)) { 6403 return prev; 6404 } 6405 6406 /* Check a recently used CPU as a potential idle candidate: */ 6407 recent_used_cpu = p->recent_used_cpu; 6408 p->recent_used_cpu = prev; 6409 if (recent_used_cpu != prev && 6410 recent_used_cpu != target && 6411 cpus_share_cache(recent_used_cpu, target) && 6412 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && 6413 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && 6414 asym_fits_capacity(task_util, recent_used_cpu)) { 6415 return recent_used_cpu; 6416 } 6417 6418 /* 6419 * For asymmetric CPU capacity systems, our domain of interest is 6420 * sd_asym_cpucapacity rather than sd_llc. 6421 */ 6422 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6423 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); 6424 /* 6425 * On an asymmetric CPU capacity system where an exclusive 6426 * cpuset defines a symmetric island (i.e. one unique 6427 * capacity_orig value through the cpuset), the key will be set 6428 * but the CPUs within that cpuset will not have a domain with 6429 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric 6430 * capacity path. 6431 */ 6432 if (sd) { 6433 i = select_idle_capacity(p, sd, target); 6434 return ((unsigned)i < nr_cpumask_bits) ? i : target; 6435 } 6436 } 6437 6438 sd = rcu_dereference(per_cpu(sd_llc, target)); 6439 if (!sd) 6440 return target; 6441 6442 if (sched_smt_active()) { 6443 has_idle_core = test_idle_cores(target, false); 6444 6445 if (!has_idle_core && cpus_share_cache(prev, target)) { 6446 i = select_idle_smt(p, sd, prev); 6447 if ((unsigned int)i < nr_cpumask_bits) 6448 return i; 6449 } 6450 } 6451 6452 i = select_idle_cpu(p, sd, has_idle_core, target); 6453 if ((unsigned)i < nr_cpumask_bits) 6454 return i; 6455 6456 return target; 6457 } 6458 6459 /* 6460 * cpu_util_without: compute cpu utilization without any contributions from *p 6461 * @cpu: the CPU which utilization is requested 6462 * @p: the task which utilization should be discounted 6463 * 6464 * The utilization of a CPU is defined by the utilization of tasks currently 6465 * enqueued on that CPU as well as tasks which are currently sleeping after an 6466 * execution on that CPU. 6467 * 6468 * This method returns the utilization of the specified CPU by discounting the 6469 * utilization of the specified task, whenever the task is currently 6470 * contributing to the CPU utilization. 6471 */ 6472 static unsigned long cpu_util_without(int cpu, struct task_struct *p) 6473 { 6474 struct cfs_rq *cfs_rq; 6475 unsigned int util; 6476 6477 /* Task has no contribution or is new */ 6478 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6479 return cpu_util_cfs(cpu); 6480 6481 cfs_rq = &cpu_rq(cpu)->cfs; 6482 util = READ_ONCE(cfs_rq->avg.util_avg); 6483 6484 /* Discount task's util from CPU's util */ 6485 lsub_positive(&util, task_util(p)); 6486 6487 /* 6488 * Covered cases: 6489 * 6490 * a) if *p is the only task sleeping on this CPU, then: 6491 * cpu_util (== task_util) > util_est (== 0) 6492 * and thus we return: 6493 * cpu_util_without = (cpu_util - task_util) = 0 6494 * 6495 * b) if other tasks are SLEEPING on this CPU, which is now exiting 6496 * IDLE, then: 6497 * cpu_util >= task_util 6498 * cpu_util > util_est (== 0) 6499 * and thus we discount *p's blocked utilization to return: 6500 * cpu_util_without = (cpu_util - task_util) >= 0 6501 * 6502 * c) if other tasks are RUNNABLE on that CPU and 6503 * util_est > cpu_util 6504 * then we use util_est since it returns a more restrictive 6505 * estimation of the spare capacity on that CPU, by just 6506 * considering the expected utilization of tasks already 6507 * runnable on that CPU. 6508 * 6509 * Cases a) and b) are covered by the above code, while case c) is 6510 * covered by the following code when estimated utilization is 6511 * enabled. 6512 */ 6513 if (sched_feat(UTIL_EST)) { 6514 unsigned int estimated = 6515 READ_ONCE(cfs_rq->avg.util_est.enqueued); 6516 6517 /* 6518 * Despite the following checks we still have a small window 6519 * for a possible race, when an execl's select_task_rq_fair() 6520 * races with LB's detach_task(): 6521 * 6522 * detach_task() 6523 * p->on_rq = TASK_ON_RQ_MIGRATING; 6524 * ---------------------------------- A 6525 * deactivate_task() \ 6526 * dequeue_task() + RaceTime 6527 * util_est_dequeue() / 6528 * ---------------------------------- B 6529 * 6530 * The additional check on "current == p" it's required to 6531 * properly fix the execl regression and it helps in further 6532 * reducing the chances for the above race. 6533 */ 6534 if (unlikely(task_on_rq_queued(p) || current == p)) 6535 lsub_positive(&estimated, _task_util_est(p)); 6536 6537 util = max(util, estimated); 6538 } 6539 6540 /* 6541 * Utilization (estimated) can exceed the CPU capacity, thus let's 6542 * clamp to the maximum CPU capacity to ensure consistency with 6543 * cpu_util. 6544 */ 6545 return min_t(unsigned long, util, capacity_orig_of(cpu)); 6546 } 6547 6548 /* 6549 * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) 6550 * to @dst_cpu. 6551 */ 6552 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) 6553 { 6554 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; 6555 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); 6556 6557 /* 6558 * If @p migrates from @cpu to another, remove its contribution. Or, 6559 * if @p migrates from another CPU to @cpu, add its contribution. In 6560 * the other cases, @cpu is not impacted by the migration, so the 6561 * util_avg should already be correct. 6562 */ 6563 if (task_cpu(p) == cpu && dst_cpu != cpu) 6564 lsub_positive(&util, task_util(p)); 6565 else if (task_cpu(p) != cpu && dst_cpu == cpu) 6566 util += task_util(p); 6567 6568 if (sched_feat(UTIL_EST)) { 6569 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); 6570 6571 /* 6572 * During wake-up, the task isn't enqueued yet and doesn't 6573 * appear in the cfs_rq->avg.util_est.enqueued of any rq, 6574 * so just add it (if needed) to "simulate" what will be 6575 * cpu_util after the task has been enqueued. 6576 */ 6577 if (dst_cpu == cpu) 6578 util_est += _task_util_est(p); 6579 6580 util = max(util, util_est); 6581 } 6582 6583 return min(util, capacity_orig_of(cpu)); 6584 } 6585 6586 /* 6587 * compute_energy(): Estimates the energy that @pd would consume if @p was 6588 * migrated to @dst_cpu. compute_energy() predicts what will be the utilization 6589 * landscape of @pd's CPUs after the task migration, and uses the Energy Model 6590 * to compute what would be the energy if we decided to actually migrate that 6591 * task. 6592 */ 6593 static long 6594 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) 6595 { 6596 struct cpumask *pd_mask = perf_domain_span(pd); 6597 unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask)); 6598 unsigned long max_util = 0, sum_util = 0; 6599 unsigned long _cpu_cap = cpu_cap; 6600 int cpu; 6601 6602 _cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask)); 6603 6604 /* 6605 * The capacity state of CPUs of the current rd can be driven by CPUs 6606 * of another rd if they belong to the same pd. So, account for the 6607 * utilization of these CPUs too by masking pd with cpu_online_mask 6608 * instead of the rd span. 6609 * 6610 * If an entire pd is outside of the current rd, it will not appear in 6611 * its pd list and will not be accounted by compute_energy(). 6612 */ 6613 for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { 6614 unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu); 6615 unsigned long cpu_util, util_running = util_freq; 6616 struct task_struct *tsk = NULL; 6617 6618 /* 6619 * When @p is placed on @cpu: 6620 * 6621 * util_running = max(cpu_util, cpu_util_est) + 6622 * max(task_util, _task_util_est) 6623 * 6624 * while cpu_util_next is: max(cpu_util + task_util, 6625 * cpu_util_est + _task_util_est) 6626 */ 6627 if (cpu == dst_cpu) { 6628 tsk = p; 6629 util_running = 6630 cpu_util_next(cpu, p, -1) + task_util_est(p); 6631 } 6632 6633 /* 6634 * Busy time computation: utilization clamping is not 6635 * required since the ratio (sum_util / cpu_capacity) 6636 * is already enough to scale the EM reported power 6637 * consumption at the (eventually clamped) cpu_capacity. 6638 */ 6639 cpu_util = effective_cpu_util(cpu, util_running, cpu_cap, 6640 ENERGY_UTIL, NULL); 6641 6642 sum_util += min(cpu_util, _cpu_cap); 6643 6644 /* 6645 * Performance domain frequency: utilization clamping 6646 * must be considered since it affects the selection 6647 * of the performance domain frequency. 6648 * NOTE: in case RT tasks are running, by default the 6649 * FREQUENCY_UTIL's utilization can be max OPP. 6650 */ 6651 cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap, 6652 FREQUENCY_UTIL, tsk); 6653 max_util = max(max_util, min(cpu_util, _cpu_cap)); 6654 } 6655 6656 return em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap); 6657 } 6658 6659 /* 6660 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the 6661 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum 6662 * spare capacity in each performance domain and uses it as a potential 6663 * candidate to execute the task. Then, it uses the Energy Model to figure 6664 * out which of the CPU candidates is the most energy-efficient. 6665 * 6666 * The rationale for this heuristic is as follows. In a performance domain, 6667 * all the most energy efficient CPU candidates (according to the Energy 6668 * Model) are those for which we'll request a low frequency. When there are 6669 * several CPUs for which the frequency request will be the same, we don't 6670 * have enough data to break the tie between them, because the Energy Model 6671 * only includes active power costs. With this model, if we assume that 6672 * frequency requests follow utilization (e.g. using schedutil), the CPU with 6673 * the maximum spare capacity in a performance domain is guaranteed to be among 6674 * the best candidates of the performance domain. 6675 * 6676 * In practice, it could be preferable from an energy standpoint to pack 6677 * small tasks on a CPU in order to let other CPUs go in deeper idle states, 6678 * but that could also hurt our chances to go cluster idle, and we have no 6679 * ways to tell with the current Energy Model if this is actually a good 6680 * idea or not. So, find_energy_efficient_cpu() basically favors 6681 * cluster-packing, and spreading inside a cluster. That should at least be 6682 * a good thing for latency, and this is consistent with the idea that most 6683 * of the energy savings of EAS come from the asymmetry of the system, and 6684 * not so much from breaking the tie between identical CPUs. That's also the 6685 * reason why EAS is enabled in the topology code only for systems where 6686 * SD_ASYM_CPUCAPACITY is set. 6687 * 6688 * NOTE: Forkees are not accepted in the energy-aware wake-up path because 6689 * they don't have any useful utilization data yet and it's not possible to 6690 * forecast their impact on energy consumption. Consequently, they will be 6691 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out 6692 * to be energy-inefficient in some use-cases. The alternative would be to 6693 * bias new tasks towards specific types of CPUs first, or to try to infer 6694 * their util_avg from the parent task, but those heuristics could hurt 6695 * other use-cases too. So, until someone finds a better way to solve this, 6696 * let's keep things simple by re-using the existing slow path. 6697 */ 6698 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) 6699 { 6700 unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; 6701 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 6702 int cpu, best_energy_cpu = prev_cpu, target = -1; 6703 unsigned long cpu_cap, util, base_energy = 0; 6704 struct sched_domain *sd; 6705 struct perf_domain *pd; 6706 6707 rcu_read_lock(); 6708 pd = rcu_dereference(rd->pd); 6709 if (!pd || READ_ONCE(rd->overutilized)) 6710 goto unlock; 6711 6712 /* 6713 * Energy-aware wake-up happens on the lowest sched_domain starting 6714 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. 6715 */ 6716 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); 6717 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 6718 sd = sd->parent; 6719 if (!sd) 6720 goto unlock; 6721 6722 target = prev_cpu; 6723 6724 sync_entity_load_avg(&p->se); 6725 if (!task_util_est(p)) 6726 goto unlock; 6727 6728 for (; pd; pd = pd->next) { 6729 unsigned long cur_delta, spare_cap, max_spare_cap = 0; 6730 bool compute_prev_delta = false; 6731 unsigned long base_energy_pd; 6732 int max_spare_cap_cpu = -1; 6733 6734 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { 6735 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 6736 continue; 6737 6738 util = cpu_util_next(cpu, p, cpu); 6739 cpu_cap = capacity_of(cpu); 6740 spare_cap = cpu_cap; 6741 lsub_positive(&spare_cap, util); 6742 6743 /* 6744 * Skip CPUs that cannot satisfy the capacity request. 6745 * IOW, placing the task there would make the CPU 6746 * overutilized. Take uclamp into account to see how 6747 * much capacity we can get out of the CPU; this is 6748 * aligned with sched_cpu_util(). 6749 */ 6750 util = uclamp_rq_util_with(cpu_rq(cpu), util, p); 6751 if (!fits_capacity(util, cpu_cap)) 6752 continue; 6753 6754 if (cpu == prev_cpu) { 6755 /* Always use prev_cpu as a candidate. */ 6756 compute_prev_delta = true; 6757 } else if (spare_cap > max_spare_cap) { 6758 /* 6759 * Find the CPU with the maximum spare capacity 6760 * in the performance domain. 6761 */ 6762 max_spare_cap = spare_cap; 6763 max_spare_cap_cpu = cpu; 6764 } 6765 } 6766 6767 if (max_spare_cap_cpu < 0 && !compute_prev_delta) 6768 continue; 6769 6770 /* Compute the 'base' energy of the pd, without @p */ 6771 base_energy_pd = compute_energy(p, -1, pd); 6772 base_energy += base_energy_pd; 6773 6774 /* Evaluate the energy impact of using prev_cpu. */ 6775 if (compute_prev_delta) { 6776 prev_delta = compute_energy(p, prev_cpu, pd); 6777 if (prev_delta < base_energy_pd) 6778 goto unlock; 6779 prev_delta -= base_energy_pd; 6780 best_delta = min(best_delta, prev_delta); 6781 } 6782 6783 /* Evaluate the energy impact of using max_spare_cap_cpu. */ 6784 if (max_spare_cap_cpu >= 0) { 6785 cur_delta = compute_energy(p, max_spare_cap_cpu, pd); 6786 if (cur_delta < base_energy_pd) 6787 goto unlock; 6788 cur_delta -= base_energy_pd; 6789 if (cur_delta < best_delta) { 6790 best_delta = cur_delta; 6791 best_energy_cpu = max_spare_cap_cpu; 6792 } 6793 } 6794 } 6795 rcu_read_unlock(); 6796 6797 /* 6798 * Pick the best CPU if prev_cpu cannot be used, or if it saves at 6799 * least 6% of the energy used by prev_cpu. 6800 */ 6801 if ((prev_delta == ULONG_MAX) || 6802 (prev_delta - best_delta) > ((prev_delta + base_energy) >> 4)) 6803 target = best_energy_cpu; 6804 6805 return target; 6806 6807 unlock: 6808 rcu_read_unlock(); 6809 6810 return target; 6811 } 6812 6813 /* 6814 * select_task_rq_fair: Select target runqueue for the waking task in domains 6815 * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE, 6816 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 6817 * 6818 * Balances load by selecting the idlest CPU in the idlest group, or under 6819 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. 6820 * 6821 * Returns the target CPU number. 6822 */ 6823 static int 6824 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) 6825 { 6826 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); 6827 struct sched_domain *tmp, *sd = NULL; 6828 int cpu = smp_processor_id(); 6829 int new_cpu = prev_cpu; 6830 int want_affine = 0; 6831 /* SD_flags and WF_flags share the first nibble */ 6832 int sd_flag = wake_flags & 0xF; 6833 6834 /* 6835 * required for stable ->cpus_allowed 6836 */ 6837 lockdep_assert_held(&p->pi_lock); 6838 if (wake_flags & WF_TTWU) { 6839 record_wakee(p); 6840 6841 if (sched_energy_enabled()) { 6842 new_cpu = find_energy_efficient_cpu(p, prev_cpu); 6843 if (new_cpu >= 0) 6844 return new_cpu; 6845 new_cpu = prev_cpu; 6846 } 6847 6848 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); 6849 } 6850 6851 rcu_read_lock(); 6852 for_each_domain(cpu, tmp) { 6853 /* 6854 * If both 'cpu' and 'prev_cpu' are part of this domain, 6855 * cpu is a valid SD_WAKE_AFFINE target. 6856 */ 6857 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 6858 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 6859 if (cpu != prev_cpu) 6860 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); 6861 6862 sd = NULL; /* Prefer wake_affine over balance flags */ 6863 break; 6864 } 6865 6866 /* 6867 * Usually only true for WF_EXEC and WF_FORK, as sched_domains 6868 * usually do not have SD_BALANCE_WAKE set. That means wakeup 6869 * will usually go to the fast path. 6870 */ 6871 if (tmp->flags & sd_flag) 6872 sd = tmp; 6873 else if (!want_affine) 6874 break; 6875 } 6876 6877 if (unlikely(sd)) { 6878 /* Slow path */ 6879 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); 6880 } else if (wake_flags & WF_TTWU) { /* XXX always ? */ 6881 /* Fast path */ 6882 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 6883 } 6884 rcu_read_unlock(); 6885 6886 return new_cpu; 6887 } 6888 6889 static void detach_entity_cfs_rq(struct sched_entity *se); 6890 6891 /* 6892 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and 6893 * cfs_rq_of(p) references at time of call are still valid and identify the 6894 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 6895 */ 6896 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) 6897 { 6898 /* 6899 * As blocked tasks retain absolute vruntime the migration needs to 6900 * deal with this by subtracting the old and adding the new 6901 * min_vruntime -- the latter is done by enqueue_entity() when placing 6902 * the task on the new runqueue. 6903 */ 6904 if (READ_ONCE(p->__state) == TASK_WAKING) { 6905 struct sched_entity *se = &p->se; 6906 struct cfs_rq *cfs_rq = cfs_rq_of(se); 6907 u64 min_vruntime; 6908 6909 #ifndef CONFIG_64BIT 6910 u64 min_vruntime_copy; 6911 6912 do { 6913 min_vruntime_copy = cfs_rq->min_vruntime_copy; 6914 smp_rmb(); 6915 min_vruntime = cfs_rq->min_vruntime; 6916 } while (min_vruntime != min_vruntime_copy); 6917 #else 6918 min_vruntime = cfs_rq->min_vruntime; 6919 #endif 6920 6921 se->vruntime -= min_vruntime; 6922 } 6923 6924 if (p->on_rq == TASK_ON_RQ_MIGRATING) { 6925 /* 6926 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' 6927 * rq->lock and can modify state directly. 6928 */ 6929 lockdep_assert_rq_held(task_rq(p)); 6930 detach_entity_cfs_rq(&p->se); 6931 6932 } else { 6933 /* 6934 * We are supposed to update the task to "current" time, then 6935 * its up to date and ready to go to new CPU/cfs_rq. But we 6936 * have difficulty in getting what current time is, so simply 6937 * throw away the out-of-date time. This will result in the 6938 * wakee task is less decayed, but giving the wakee more load 6939 * sounds not bad. 6940 */ 6941 remove_entity_load_avg(&p->se); 6942 } 6943 6944 /* Tell new CPU we are migrated */ 6945 p->se.avg.last_update_time = 0; 6946 6947 /* We have migrated, no longer consider this task hot */ 6948 p->se.exec_start = 0; 6949 6950 update_scan_period(p, new_cpu); 6951 } 6952 6953 static void task_dead_fair(struct task_struct *p) 6954 { 6955 remove_entity_load_avg(&p->se); 6956 } 6957 6958 static int 6959 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6960 { 6961 if (rq->nr_running) 6962 return 1; 6963 6964 return newidle_balance(rq, rf) != 0; 6965 } 6966 #endif /* CONFIG_SMP */ 6967 6968 static unsigned long wakeup_gran(struct sched_entity *se) 6969 { 6970 unsigned long gran = sysctl_sched_wakeup_granularity; 6971 6972 /* 6973 * Since its curr running now, convert the gran from real-time 6974 * to virtual-time in his units. 6975 * 6976 * By using 'se' instead of 'curr' we penalize light tasks, so 6977 * they get preempted easier. That is, if 'se' < 'curr' then 6978 * the resulting gran will be larger, therefore penalizing the 6979 * lighter, if otoh 'se' > 'curr' then the resulting gran will 6980 * be smaller, again penalizing the lighter task. 6981 * 6982 * This is especially important for buddies when the leftmost 6983 * task is higher priority than the buddy. 6984 */ 6985 return calc_delta_fair(gran, se); 6986 } 6987 6988 /* 6989 * Should 'se' preempt 'curr'. 6990 * 6991 * |s1 6992 * |s2 6993 * |s3 6994 * g 6995 * |<--->|c 6996 * 6997 * w(c, s1) = -1 6998 * w(c, s2) = 0 6999 * w(c, s3) = 1 7000 * 7001 */ 7002 static int 7003 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 7004 { 7005 s64 gran, vdiff = curr->vruntime - se->vruntime; 7006 7007 if (vdiff <= 0) 7008 return -1; 7009 7010 gran = wakeup_gran(se); 7011 if (vdiff > gran) 7012 return 1; 7013 7014 return 0; 7015 } 7016 7017 static void set_last_buddy(struct sched_entity *se) 7018 { 7019 for_each_sched_entity(se) { 7020 if (SCHED_WARN_ON(!se->on_rq)) 7021 return; 7022 if (se_is_idle(se)) 7023 return; 7024 cfs_rq_of(se)->last = se; 7025 } 7026 } 7027 7028 static void set_next_buddy(struct sched_entity *se) 7029 { 7030 for_each_sched_entity(se) { 7031 if (SCHED_WARN_ON(!se->on_rq)) 7032 return; 7033 if (se_is_idle(se)) 7034 return; 7035 cfs_rq_of(se)->next = se; 7036 } 7037 } 7038 7039 static void set_skip_buddy(struct sched_entity *se) 7040 { 7041 for_each_sched_entity(se) 7042 cfs_rq_of(se)->skip = se; 7043 } 7044 7045 /* 7046 * Preempt the current task with a newly woken task if needed: 7047 */ 7048 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 7049 { 7050 struct task_struct *curr = rq->curr; 7051 struct sched_entity *se = &curr->se, *pse = &p->se; 7052 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7053 int scale = cfs_rq->nr_running >= sched_nr_latency; 7054 int next_buddy_marked = 0; 7055 int cse_is_idle, pse_is_idle; 7056 7057 if (unlikely(se == pse)) 7058 return; 7059 7060 /* 7061 * This is possible from callers such as attach_tasks(), in which we 7062 * unconditionally check_preempt_curr() after an enqueue (which may have 7063 * lead to a throttle). This both saves work and prevents false 7064 * next-buddy nomination below. 7065 */ 7066 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 7067 return; 7068 7069 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 7070 set_next_buddy(pse); 7071 next_buddy_marked = 1; 7072 } 7073 7074 /* 7075 * We can come here with TIF_NEED_RESCHED already set from new task 7076 * wake up path. 7077 * 7078 * Note: this also catches the edge-case of curr being in a throttled 7079 * group (e.g. via set_curr_task), since update_curr() (in the 7080 * enqueue of curr) will have resulted in resched being set. This 7081 * prevents us from potentially nominating it as a false LAST_BUDDY 7082 * below. 7083 */ 7084 if (test_tsk_need_resched(curr)) 7085 return; 7086 7087 /* Idle tasks are by definition preempted by non-idle tasks. */ 7088 if (unlikely(task_has_idle_policy(curr)) && 7089 likely(!task_has_idle_policy(p))) 7090 goto preempt; 7091 7092 /* 7093 * Batch and idle tasks do not preempt non-idle tasks (their preemption 7094 * is driven by the tick): 7095 */ 7096 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 7097 return; 7098 7099 find_matching_se(&se, &pse); 7100 BUG_ON(!pse); 7101 7102 cse_is_idle = se_is_idle(se); 7103 pse_is_idle = se_is_idle(pse); 7104 7105 /* 7106 * Preempt an idle group in favor of a non-idle group (and don't preempt 7107 * in the inverse case). 7108 */ 7109 if (cse_is_idle && !pse_is_idle) 7110 goto preempt; 7111 if (cse_is_idle != pse_is_idle) 7112 return; 7113 7114 update_curr(cfs_rq_of(se)); 7115 if (wakeup_preempt_entity(se, pse) == 1) { 7116 /* 7117 * Bias pick_next to pick the sched entity that is 7118 * triggering this preemption. 7119 */ 7120 if (!next_buddy_marked) 7121 set_next_buddy(pse); 7122 goto preempt; 7123 } 7124 7125 return; 7126 7127 preempt: 7128 resched_curr(rq); 7129 /* 7130 * Only set the backward buddy when the current task is still 7131 * on the rq. This can happen when a wakeup gets interleaved 7132 * with schedule on the ->pre_schedule() or idle_balance() 7133 * point, either of which can * drop the rq lock. 7134 * 7135 * Also, during early boot the idle thread is in the fair class, 7136 * for obvious reasons its a bad idea to schedule back to it. 7137 */ 7138 if (unlikely(!se->on_rq || curr == rq->idle)) 7139 return; 7140 7141 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 7142 set_last_buddy(se); 7143 } 7144 7145 #ifdef CONFIG_SMP 7146 static struct task_struct *pick_task_fair(struct rq *rq) 7147 { 7148 struct sched_entity *se; 7149 struct cfs_rq *cfs_rq; 7150 7151 again: 7152 cfs_rq = &rq->cfs; 7153 if (!cfs_rq->nr_running) 7154 return NULL; 7155 7156 do { 7157 struct sched_entity *curr = cfs_rq->curr; 7158 7159 /* When we pick for a remote RQ, we'll not have done put_prev_entity() */ 7160 if (curr) { 7161 if (curr->on_rq) 7162 update_curr(cfs_rq); 7163 else 7164 curr = NULL; 7165 7166 if (unlikely(check_cfs_rq_runtime(cfs_rq))) 7167 goto again; 7168 } 7169 7170 se = pick_next_entity(cfs_rq, curr); 7171 cfs_rq = group_cfs_rq(se); 7172 } while (cfs_rq); 7173 7174 return task_of(se); 7175 } 7176 #endif 7177 7178 struct task_struct * 7179 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 7180 { 7181 struct cfs_rq *cfs_rq = &rq->cfs; 7182 struct sched_entity *se; 7183 struct task_struct *p; 7184 int new_tasks; 7185 7186 again: 7187 if (!sched_fair_runnable(rq)) 7188 goto idle; 7189 7190 #ifdef CONFIG_FAIR_GROUP_SCHED 7191 if (!prev || prev->sched_class != &fair_sched_class) 7192 goto simple; 7193 7194 /* 7195 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 7196 * likely that a next task is from the same cgroup as the current. 7197 * 7198 * Therefore attempt to avoid putting and setting the entire cgroup 7199 * hierarchy, only change the part that actually changes. 7200 */ 7201 7202 do { 7203 struct sched_entity *curr = cfs_rq->curr; 7204 7205 /* 7206 * Since we got here without doing put_prev_entity() we also 7207 * have to consider cfs_rq->curr. If it is still a runnable 7208 * entity, update_curr() will update its vruntime, otherwise 7209 * forget we've ever seen it. 7210 */ 7211 if (curr) { 7212 if (curr->on_rq) 7213 update_curr(cfs_rq); 7214 else 7215 curr = NULL; 7216 7217 /* 7218 * This call to check_cfs_rq_runtime() will do the 7219 * throttle and dequeue its entity in the parent(s). 7220 * Therefore the nr_running test will indeed 7221 * be correct. 7222 */ 7223 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { 7224 cfs_rq = &rq->cfs; 7225 7226 if (!cfs_rq->nr_running) 7227 goto idle; 7228 7229 goto simple; 7230 } 7231 } 7232 7233 se = pick_next_entity(cfs_rq, curr); 7234 cfs_rq = group_cfs_rq(se); 7235 } while (cfs_rq); 7236 7237 p = task_of(se); 7238 7239 /* 7240 * Since we haven't yet done put_prev_entity and if the selected task 7241 * is a different task than we started out with, try and touch the 7242 * least amount of cfs_rqs. 7243 */ 7244 if (prev != p) { 7245 struct sched_entity *pse = &prev->se; 7246 7247 while (!(cfs_rq = is_same_group(se, pse))) { 7248 int se_depth = se->depth; 7249 int pse_depth = pse->depth; 7250 7251 if (se_depth <= pse_depth) { 7252 put_prev_entity(cfs_rq_of(pse), pse); 7253 pse = parent_entity(pse); 7254 } 7255 if (se_depth >= pse_depth) { 7256 set_next_entity(cfs_rq_of(se), se); 7257 se = parent_entity(se); 7258 } 7259 } 7260 7261 put_prev_entity(cfs_rq, pse); 7262 set_next_entity(cfs_rq, se); 7263 } 7264 7265 goto done; 7266 simple: 7267 #endif 7268 if (prev) 7269 put_prev_task(rq, prev); 7270 7271 do { 7272 se = pick_next_entity(cfs_rq, NULL); 7273 set_next_entity(cfs_rq, se); 7274 cfs_rq = group_cfs_rq(se); 7275 } while (cfs_rq); 7276 7277 p = task_of(se); 7278 7279 done: __maybe_unused; 7280 #ifdef CONFIG_SMP 7281 /* 7282 * Move the next running task to the front of 7283 * the list, so our cfs_tasks list becomes MRU 7284 * one. 7285 */ 7286 list_move(&p->se.group_node, &rq->cfs_tasks); 7287 #endif 7288 7289 if (hrtick_enabled_fair(rq)) 7290 hrtick_start_fair(rq, p); 7291 7292 update_misfit_status(p, rq); 7293 7294 return p; 7295 7296 idle: 7297 if (!rf) 7298 return NULL; 7299 7300 new_tasks = newidle_balance(rq, rf); 7301 7302 /* 7303 * Because newidle_balance() releases (and re-acquires) rq->lock, it is 7304 * possible for any higher priority task to appear. In that case we 7305 * must re-start the pick_next_entity() loop. 7306 */ 7307 if (new_tasks < 0) 7308 return RETRY_TASK; 7309 7310 if (new_tasks > 0) 7311 goto again; 7312 7313 /* 7314 * rq is about to be idle, check if we need to update the 7315 * lost_idle_time of clock_pelt 7316 */ 7317 update_idle_rq_clock_pelt(rq); 7318 7319 return NULL; 7320 } 7321 7322 static struct task_struct *__pick_next_task_fair(struct rq *rq) 7323 { 7324 return pick_next_task_fair(rq, NULL, NULL); 7325 } 7326 7327 /* 7328 * Account for a descheduled task: 7329 */ 7330 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 7331 { 7332 struct sched_entity *se = &prev->se; 7333 struct cfs_rq *cfs_rq; 7334 7335 for_each_sched_entity(se) { 7336 cfs_rq = cfs_rq_of(se); 7337 put_prev_entity(cfs_rq, se); 7338 } 7339 } 7340 7341 /* 7342 * sched_yield() is very simple 7343 * 7344 * The magic of dealing with the ->skip buddy is in pick_next_entity. 7345 */ 7346 static void yield_task_fair(struct rq *rq) 7347 { 7348 struct task_struct *curr = rq->curr; 7349 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7350 struct sched_entity *se = &curr->se; 7351 7352 /* 7353 * Are we the only task in the tree? 7354 */ 7355 if (unlikely(rq->nr_running == 1)) 7356 return; 7357 7358 clear_buddies(cfs_rq, se); 7359 7360 if (curr->policy != SCHED_BATCH) { 7361 update_rq_clock(rq); 7362 /* 7363 * Update run-time statistics of the 'current'. 7364 */ 7365 update_curr(cfs_rq); 7366 /* 7367 * Tell update_rq_clock() that we've just updated, 7368 * so we don't do microscopic update in schedule() 7369 * and double the fastpath cost. 7370 */ 7371 rq_clock_skip_update(rq); 7372 } 7373 7374 set_skip_buddy(se); 7375 } 7376 7377 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) 7378 { 7379 struct sched_entity *se = &p->se; 7380 7381 /* throttled hierarchies are not runnable */ 7382 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 7383 return false; 7384 7385 /* Tell the scheduler that we'd really like pse to run next. */ 7386 set_next_buddy(se); 7387 7388 yield_task_fair(rq); 7389 7390 return true; 7391 } 7392 7393 #ifdef CONFIG_SMP 7394 /************************************************** 7395 * Fair scheduling class load-balancing methods. 7396 * 7397 * BASICS 7398 * 7399 * The purpose of load-balancing is to achieve the same basic fairness the 7400 * per-CPU scheduler provides, namely provide a proportional amount of compute 7401 * time to each task. This is expressed in the following equation: 7402 * 7403 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 7404 * 7405 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight 7406 * W_i,0 is defined as: 7407 * 7408 * W_i,0 = \Sum_j w_i,j (2) 7409 * 7410 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight 7411 * is derived from the nice value as per sched_prio_to_weight[]. 7412 * 7413 * The weight average is an exponential decay average of the instantaneous 7414 * weight: 7415 * 7416 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 7417 * 7418 * C_i is the compute capacity of CPU i, typically it is the 7419 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 7420 * can also include other factors [XXX]. 7421 * 7422 * To achieve this balance we define a measure of imbalance which follows 7423 * directly from (1): 7424 * 7425 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 7426 * 7427 * We them move tasks around to minimize the imbalance. In the continuous 7428 * function space it is obvious this converges, in the discrete case we get 7429 * a few fun cases generally called infeasible weight scenarios. 7430 * 7431 * [XXX expand on: 7432 * - infeasible weights; 7433 * - local vs global optima in the discrete case. ] 7434 * 7435 * 7436 * SCHED DOMAINS 7437 * 7438 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 7439 * for all i,j solution, we create a tree of CPUs that follows the hardware 7440 * topology where each level pairs two lower groups (or better). This results 7441 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the 7442 * tree to only the first of the previous level and we decrease the frequency 7443 * of load-balance at each level inv. proportional to the number of CPUs in 7444 * the groups. 7445 * 7446 * This yields: 7447 * 7448 * log_2 n 1 n 7449 * \Sum { --- * --- * 2^i } = O(n) (5) 7450 * i = 0 2^i 2^i 7451 * `- size of each group 7452 * | | `- number of CPUs doing load-balance 7453 * | `- freq 7454 * `- sum over all levels 7455 * 7456 * Coupled with a limit on how many tasks we can migrate every balance pass, 7457 * this makes (5) the runtime complexity of the balancer. 7458 * 7459 * An important property here is that each CPU is still (indirectly) connected 7460 * to every other CPU in at most O(log n) steps: 7461 * 7462 * The adjacency matrix of the resulting graph is given by: 7463 * 7464 * log_2 n 7465 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 7466 * k = 0 7467 * 7468 * And you'll find that: 7469 * 7470 * A^(log_2 n)_i,j != 0 for all i,j (7) 7471 * 7472 * Showing there's indeed a path between every CPU in at most O(log n) steps. 7473 * The task movement gives a factor of O(m), giving a convergence complexity 7474 * of: 7475 * 7476 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 7477 * 7478 * 7479 * WORK CONSERVING 7480 * 7481 * In order to avoid CPUs going idle while there's still work to do, new idle 7482 * balancing is more aggressive and has the newly idle CPU iterate up the domain 7483 * tree itself instead of relying on other CPUs to bring it work. 7484 * 7485 * This adds some complexity to both (5) and (8) but it reduces the total idle 7486 * time. 7487 * 7488 * [XXX more?] 7489 * 7490 * 7491 * CGROUPS 7492 * 7493 * Cgroups make a horror show out of (2), instead of a simple sum we get: 7494 * 7495 * s_k,i 7496 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 7497 * S_k 7498 * 7499 * Where 7500 * 7501 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 7502 * 7503 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. 7504 * 7505 * The big problem is S_k, its a global sum needed to compute a local (W_i) 7506 * property. 7507 * 7508 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 7509 * rewrite all of this once again.] 7510 */ 7511 7512 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 7513 7514 enum fbq_type { regular, remote, all }; 7515 7516 /* 7517 * 'group_type' describes the group of CPUs at the moment of load balancing. 7518 * 7519 * The enum is ordered by pulling priority, with the group with lowest priority 7520 * first so the group_type can simply be compared when selecting the busiest 7521 * group. See update_sd_pick_busiest(). 7522 */ 7523 enum group_type { 7524 /* The group has spare capacity that can be used to run more tasks. */ 7525 group_has_spare = 0, 7526 /* 7527 * The group is fully used and the tasks don't compete for more CPU 7528 * cycles. Nevertheless, some tasks might wait before running. 7529 */ 7530 group_fully_busy, 7531 /* 7532 * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity 7533 * and must be migrated to a more powerful CPU. 7534 */ 7535 group_misfit_task, 7536 /* 7537 * SD_ASYM_PACKING only: One local CPU with higher capacity is available, 7538 * and the task should be migrated to it instead of running on the 7539 * current CPU. 7540 */ 7541 group_asym_packing, 7542 /* 7543 * The tasks' affinity constraints previously prevented the scheduler 7544 * from balancing the load across the system. 7545 */ 7546 group_imbalanced, 7547 /* 7548 * The CPU is overloaded and can't provide expected CPU cycles to all 7549 * tasks. 7550 */ 7551 group_overloaded 7552 }; 7553 7554 enum migration_type { 7555 migrate_load = 0, 7556 migrate_util, 7557 migrate_task, 7558 migrate_misfit 7559 }; 7560 7561 #define LBF_ALL_PINNED 0x01 7562 #define LBF_NEED_BREAK 0x02 7563 #define LBF_DST_PINNED 0x04 7564 #define LBF_SOME_PINNED 0x08 7565 #define LBF_ACTIVE_LB 0x10 7566 7567 struct lb_env { 7568 struct sched_domain *sd; 7569 7570 struct rq *src_rq; 7571 int src_cpu; 7572 7573 int dst_cpu; 7574 struct rq *dst_rq; 7575 7576 struct cpumask *dst_grpmask; 7577 int new_dst_cpu; 7578 enum cpu_idle_type idle; 7579 long imbalance; 7580 /* The set of CPUs under consideration for load-balancing */ 7581 struct cpumask *cpus; 7582 7583 unsigned int flags; 7584 7585 unsigned int loop; 7586 unsigned int loop_break; 7587 unsigned int loop_max; 7588 7589 enum fbq_type fbq_type; 7590 enum migration_type migration_type; 7591 struct list_head tasks; 7592 }; 7593 7594 /* 7595 * Is this task likely cache-hot: 7596 */ 7597 static int task_hot(struct task_struct *p, struct lb_env *env) 7598 { 7599 s64 delta; 7600 7601 lockdep_assert_rq_held(env->src_rq); 7602 7603 if (p->sched_class != &fair_sched_class) 7604 return 0; 7605 7606 if (unlikely(task_has_idle_policy(p))) 7607 return 0; 7608 7609 /* SMT siblings share cache */ 7610 if (env->sd->flags & SD_SHARE_CPUCAPACITY) 7611 return 0; 7612 7613 /* 7614 * Buddy candidates are cache hot: 7615 */ 7616 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 7617 (&p->se == cfs_rq_of(&p->se)->next || 7618 &p->se == cfs_rq_of(&p->se)->last)) 7619 return 1; 7620 7621 if (sysctl_sched_migration_cost == -1) 7622 return 1; 7623 7624 /* 7625 * Don't migrate task if the task's cookie does not match 7626 * with the destination CPU's core cookie. 7627 */ 7628 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) 7629 return 1; 7630 7631 if (sysctl_sched_migration_cost == 0) 7632 return 0; 7633 7634 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 7635 7636 return delta < (s64)sysctl_sched_migration_cost; 7637 } 7638 7639 #ifdef CONFIG_NUMA_BALANCING 7640 /* 7641 * Returns 1, if task migration degrades locality 7642 * Returns 0, if task migration improves locality i.e migration preferred. 7643 * Returns -1, if task migration is not affected by locality. 7644 */ 7645 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 7646 { 7647 struct numa_group *numa_group = rcu_dereference(p->numa_group); 7648 unsigned long src_weight, dst_weight; 7649 int src_nid, dst_nid, dist; 7650 7651 if (!static_branch_likely(&sched_numa_balancing)) 7652 return -1; 7653 7654 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 7655 return -1; 7656 7657 src_nid = cpu_to_node(env->src_cpu); 7658 dst_nid = cpu_to_node(env->dst_cpu); 7659 7660 if (src_nid == dst_nid) 7661 return -1; 7662 7663 /* Migrating away from the preferred node is always bad. */ 7664 if (src_nid == p->numa_preferred_nid) { 7665 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 7666 return 1; 7667 else 7668 return -1; 7669 } 7670 7671 /* Encourage migration to the preferred node. */ 7672 if (dst_nid == p->numa_preferred_nid) 7673 return 0; 7674 7675 /* Leaving a core idle is often worse than degrading locality. */ 7676 if (env->idle == CPU_IDLE) 7677 return -1; 7678 7679 dist = node_distance(src_nid, dst_nid); 7680 if (numa_group) { 7681 src_weight = group_weight(p, src_nid, dist); 7682 dst_weight = group_weight(p, dst_nid, dist); 7683 } else { 7684 src_weight = task_weight(p, src_nid, dist); 7685 dst_weight = task_weight(p, dst_nid, dist); 7686 } 7687 7688 return dst_weight < src_weight; 7689 } 7690 7691 #else 7692 static inline int migrate_degrades_locality(struct task_struct *p, 7693 struct lb_env *env) 7694 { 7695 return -1; 7696 } 7697 #endif 7698 7699 /* 7700 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 7701 */ 7702 static 7703 int can_migrate_task(struct task_struct *p, struct lb_env *env) 7704 { 7705 int tsk_cache_hot; 7706 7707 lockdep_assert_rq_held(env->src_rq); 7708 7709 /* 7710 * We do not migrate tasks that are: 7711 * 1) throttled_lb_pair, or 7712 * 2) cannot be migrated to this CPU due to cpus_ptr, or 7713 * 3) running (obviously), or 7714 * 4) are cache-hot on their current CPU. 7715 */ 7716 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 7717 return 0; 7718 7719 /* Disregard pcpu kthreads; they are where they need to be. */ 7720 if (kthread_is_per_cpu(p)) 7721 return 0; 7722 7723 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { 7724 int cpu; 7725 7726 schedstat_inc(p->stats.nr_failed_migrations_affine); 7727 7728 env->flags |= LBF_SOME_PINNED; 7729 7730 /* 7731 * Remember if this task can be migrated to any other CPU in 7732 * our sched_group. We may want to revisit it if we couldn't 7733 * meet load balance goals by pulling other tasks on src_cpu. 7734 * 7735 * Avoid computing new_dst_cpu 7736 * - for NEWLY_IDLE 7737 * - if we have already computed one in current iteration 7738 * - if it's an active balance 7739 */ 7740 if (env->idle == CPU_NEWLY_IDLE || 7741 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB)) 7742 return 0; 7743 7744 /* Prevent to re-select dst_cpu via env's CPUs: */ 7745 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 7746 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { 7747 env->flags |= LBF_DST_PINNED; 7748 env->new_dst_cpu = cpu; 7749 break; 7750 } 7751 } 7752 7753 return 0; 7754 } 7755 7756 /* Record that we found at least one task that could run on dst_cpu */ 7757 env->flags &= ~LBF_ALL_PINNED; 7758 7759 if (task_running(env->src_rq, p)) { 7760 schedstat_inc(p->stats.nr_failed_migrations_running); 7761 return 0; 7762 } 7763 7764 /* 7765 * Aggressive migration if: 7766 * 1) active balance 7767 * 2) destination numa is preferred 7768 * 3) task is cache cold, or 7769 * 4) too many balance attempts have failed. 7770 */ 7771 if (env->flags & LBF_ACTIVE_LB) 7772 return 1; 7773 7774 tsk_cache_hot = migrate_degrades_locality(p, env); 7775 if (tsk_cache_hot == -1) 7776 tsk_cache_hot = task_hot(p, env); 7777 7778 if (tsk_cache_hot <= 0 || 7779 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 7780 if (tsk_cache_hot == 1) { 7781 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 7782 schedstat_inc(p->stats.nr_forced_migrations); 7783 } 7784 return 1; 7785 } 7786 7787 schedstat_inc(p->stats.nr_failed_migrations_hot); 7788 return 0; 7789 } 7790 7791 /* 7792 * detach_task() -- detach the task for the migration specified in env 7793 */ 7794 static void detach_task(struct task_struct *p, struct lb_env *env) 7795 { 7796 lockdep_assert_rq_held(env->src_rq); 7797 7798 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 7799 set_task_cpu(p, env->dst_cpu); 7800 } 7801 7802 /* 7803 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 7804 * part of active balancing operations within "domain". 7805 * 7806 * Returns a task if successful and NULL otherwise. 7807 */ 7808 static struct task_struct *detach_one_task(struct lb_env *env) 7809 { 7810 struct task_struct *p; 7811 7812 lockdep_assert_rq_held(env->src_rq); 7813 7814 list_for_each_entry_reverse(p, 7815 &env->src_rq->cfs_tasks, se.group_node) { 7816 if (!can_migrate_task(p, env)) 7817 continue; 7818 7819 detach_task(p, env); 7820 7821 /* 7822 * Right now, this is only the second place where 7823 * lb_gained[env->idle] is updated (other is detach_tasks) 7824 * so we can safely collect stats here rather than 7825 * inside detach_tasks(). 7826 */ 7827 schedstat_inc(env->sd->lb_gained[env->idle]); 7828 return p; 7829 } 7830 return NULL; 7831 } 7832 7833 static const unsigned int sched_nr_migrate_break = 32; 7834 7835 /* 7836 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from 7837 * busiest_rq, as part of a balancing operation within domain "sd". 7838 * 7839 * Returns number of detached tasks if successful and 0 otherwise. 7840 */ 7841 static int detach_tasks(struct lb_env *env) 7842 { 7843 struct list_head *tasks = &env->src_rq->cfs_tasks; 7844 unsigned long util, load; 7845 struct task_struct *p; 7846 int detached = 0; 7847 7848 lockdep_assert_rq_held(env->src_rq); 7849 7850 /* 7851 * Source run queue has been emptied by another CPU, clear 7852 * LBF_ALL_PINNED flag as we will not test any task. 7853 */ 7854 if (env->src_rq->nr_running <= 1) { 7855 env->flags &= ~LBF_ALL_PINNED; 7856 return 0; 7857 } 7858 7859 if (env->imbalance <= 0) 7860 return 0; 7861 7862 while (!list_empty(tasks)) { 7863 /* 7864 * We don't want to steal all, otherwise we may be treated likewise, 7865 * which could at worst lead to a livelock crash. 7866 */ 7867 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 7868 break; 7869 7870 p = list_last_entry(tasks, struct task_struct, se.group_node); 7871 7872 env->loop++; 7873 /* We've more or less seen every task there is, call it quits */ 7874 if (env->loop > env->loop_max) 7875 break; 7876 7877 /* take a breather every nr_migrate tasks */ 7878 if (env->loop > env->loop_break) { 7879 env->loop_break += sched_nr_migrate_break; 7880 env->flags |= LBF_NEED_BREAK; 7881 break; 7882 } 7883 7884 if (!can_migrate_task(p, env)) 7885 goto next; 7886 7887 switch (env->migration_type) { 7888 case migrate_load: 7889 /* 7890 * Depending of the number of CPUs and tasks and the 7891 * cgroup hierarchy, task_h_load() can return a null 7892 * value. Make sure that env->imbalance decreases 7893 * otherwise detach_tasks() will stop only after 7894 * detaching up to loop_max tasks. 7895 */ 7896 load = max_t(unsigned long, task_h_load(p), 1); 7897 7898 if (sched_feat(LB_MIN) && 7899 load < 16 && !env->sd->nr_balance_failed) 7900 goto next; 7901 7902 /* 7903 * Make sure that we don't migrate too much load. 7904 * Nevertheless, let relax the constraint if 7905 * scheduler fails to find a good waiting task to 7906 * migrate. 7907 */ 7908 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) 7909 goto next; 7910 7911 env->imbalance -= load; 7912 break; 7913 7914 case migrate_util: 7915 util = task_util_est(p); 7916 7917 if (util > env->imbalance) 7918 goto next; 7919 7920 env->imbalance -= util; 7921 break; 7922 7923 case migrate_task: 7924 env->imbalance--; 7925 break; 7926 7927 case migrate_misfit: 7928 /* This is not a misfit task */ 7929 if (task_fits_capacity(p, capacity_of(env->src_cpu))) 7930 goto next; 7931 7932 env->imbalance = 0; 7933 break; 7934 } 7935 7936 detach_task(p, env); 7937 list_add(&p->se.group_node, &env->tasks); 7938 7939 detached++; 7940 7941 #ifdef CONFIG_PREEMPTION 7942 /* 7943 * NEWIDLE balancing is a source of latency, so preemptible 7944 * kernels will stop after the first task is detached to minimize 7945 * the critical section. 7946 */ 7947 if (env->idle == CPU_NEWLY_IDLE) 7948 break; 7949 #endif 7950 7951 /* 7952 * We only want to steal up to the prescribed amount of 7953 * load/util/tasks. 7954 */ 7955 if (env->imbalance <= 0) 7956 break; 7957 7958 continue; 7959 next: 7960 list_move(&p->se.group_node, tasks); 7961 } 7962 7963 /* 7964 * Right now, this is one of only two places we collect this stat 7965 * so we can safely collect detach_one_task() stats here rather 7966 * than inside detach_one_task(). 7967 */ 7968 schedstat_add(env->sd->lb_gained[env->idle], detached); 7969 7970 return detached; 7971 } 7972 7973 /* 7974 * attach_task() -- attach the task detached by detach_task() to its new rq. 7975 */ 7976 static void attach_task(struct rq *rq, struct task_struct *p) 7977 { 7978 lockdep_assert_rq_held(rq); 7979 7980 BUG_ON(task_rq(p) != rq); 7981 activate_task(rq, p, ENQUEUE_NOCLOCK); 7982 check_preempt_curr(rq, p, 0); 7983 } 7984 7985 /* 7986 * attach_one_task() -- attaches the task returned from detach_one_task() to 7987 * its new rq. 7988 */ 7989 static void attach_one_task(struct rq *rq, struct task_struct *p) 7990 { 7991 struct rq_flags rf; 7992 7993 rq_lock(rq, &rf); 7994 update_rq_clock(rq); 7995 attach_task(rq, p); 7996 rq_unlock(rq, &rf); 7997 } 7998 7999 /* 8000 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 8001 * new rq. 8002 */ 8003 static void attach_tasks(struct lb_env *env) 8004 { 8005 struct list_head *tasks = &env->tasks; 8006 struct task_struct *p; 8007 struct rq_flags rf; 8008 8009 rq_lock(env->dst_rq, &rf); 8010 update_rq_clock(env->dst_rq); 8011 8012 while (!list_empty(tasks)) { 8013 p = list_first_entry(tasks, struct task_struct, se.group_node); 8014 list_del_init(&p->se.group_node); 8015 8016 attach_task(env->dst_rq, p); 8017 } 8018 8019 rq_unlock(env->dst_rq, &rf); 8020 } 8021 8022 #ifdef CONFIG_NO_HZ_COMMON 8023 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) 8024 { 8025 if (cfs_rq->avg.load_avg) 8026 return true; 8027 8028 if (cfs_rq->avg.util_avg) 8029 return true; 8030 8031 return false; 8032 } 8033 8034 static inline bool others_have_blocked(struct rq *rq) 8035 { 8036 if (READ_ONCE(rq->avg_rt.util_avg)) 8037 return true; 8038 8039 if (READ_ONCE(rq->avg_dl.util_avg)) 8040 return true; 8041 8042 if (thermal_load_avg(rq)) 8043 return true; 8044 8045 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 8046 if (READ_ONCE(rq->avg_irq.util_avg)) 8047 return true; 8048 #endif 8049 8050 return false; 8051 } 8052 8053 static inline void update_blocked_load_tick(struct rq *rq) 8054 { 8055 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies); 8056 } 8057 8058 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) 8059 { 8060 if (!has_blocked) 8061 rq->has_blocked_load = 0; 8062 } 8063 #else 8064 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } 8065 static inline bool others_have_blocked(struct rq *rq) { return false; } 8066 static inline void update_blocked_load_tick(struct rq *rq) {} 8067 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} 8068 #endif 8069 8070 static bool __update_blocked_others(struct rq *rq, bool *done) 8071 { 8072 const struct sched_class *curr_class; 8073 u64 now = rq_clock_pelt(rq); 8074 unsigned long thermal_pressure; 8075 bool decayed; 8076 8077 /* 8078 * update_load_avg() can call cpufreq_update_util(). Make sure that RT, 8079 * DL and IRQ signals have been updated before updating CFS. 8080 */ 8081 curr_class = rq->curr->sched_class; 8082 8083 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 8084 8085 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | 8086 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | 8087 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | 8088 update_irq_load_avg(rq, 0); 8089 8090 if (others_have_blocked(rq)) 8091 *done = false; 8092 8093 return decayed; 8094 } 8095 8096 #ifdef CONFIG_FAIR_GROUP_SCHED 8097 8098 static bool __update_blocked_fair(struct rq *rq, bool *done) 8099 { 8100 struct cfs_rq *cfs_rq, *pos; 8101 bool decayed = false; 8102 int cpu = cpu_of(rq); 8103 8104 /* 8105 * Iterates the task_group tree in a bottom up fashion, see 8106 * list_add_leaf_cfs_rq() for details. 8107 */ 8108 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { 8109 struct sched_entity *se; 8110 8111 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { 8112 update_tg_load_avg(cfs_rq); 8113 8114 if (cfs_rq == &rq->cfs) 8115 decayed = true; 8116 } 8117 8118 /* Propagate pending load changes to the parent, if any: */ 8119 se = cfs_rq->tg->se[cpu]; 8120 if (se && !skip_blocked_update(se)) 8121 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 8122 8123 /* 8124 * There can be a lot of idle CPU cgroups. Don't let fully 8125 * decayed cfs_rqs linger on the list. 8126 */ 8127 if (cfs_rq_is_decayed(cfs_rq)) 8128 list_del_leaf_cfs_rq(cfs_rq); 8129 8130 /* Don't need periodic decay once load/util_avg are null */ 8131 if (cfs_rq_has_blocked(cfs_rq)) 8132 *done = false; 8133 } 8134 8135 return decayed; 8136 } 8137 8138 /* 8139 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 8140 * This needs to be done in a top-down fashion because the load of a child 8141 * group is a fraction of its parents load. 8142 */ 8143 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 8144 { 8145 struct rq *rq = rq_of(cfs_rq); 8146 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 8147 unsigned long now = jiffies; 8148 unsigned long load; 8149 8150 if (cfs_rq->last_h_load_update == now) 8151 return; 8152 8153 WRITE_ONCE(cfs_rq->h_load_next, NULL); 8154 for_each_sched_entity(se) { 8155 cfs_rq = cfs_rq_of(se); 8156 WRITE_ONCE(cfs_rq->h_load_next, se); 8157 if (cfs_rq->last_h_load_update == now) 8158 break; 8159 } 8160 8161 if (!se) { 8162 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 8163 cfs_rq->last_h_load_update = now; 8164 } 8165 8166 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { 8167 load = cfs_rq->h_load; 8168 load = div64_ul(load * se->avg.load_avg, 8169 cfs_rq_load_avg(cfs_rq) + 1); 8170 cfs_rq = group_cfs_rq(se); 8171 cfs_rq->h_load = load; 8172 cfs_rq->last_h_load_update = now; 8173 } 8174 } 8175 8176 static unsigned long task_h_load(struct task_struct *p) 8177 { 8178 struct cfs_rq *cfs_rq = task_cfs_rq(p); 8179 8180 update_cfs_rq_h_load(cfs_rq); 8181 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 8182 cfs_rq_load_avg(cfs_rq) + 1); 8183 } 8184 #else 8185 static bool __update_blocked_fair(struct rq *rq, bool *done) 8186 { 8187 struct cfs_rq *cfs_rq = &rq->cfs; 8188 bool decayed; 8189 8190 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); 8191 if (cfs_rq_has_blocked(cfs_rq)) 8192 *done = false; 8193 8194 return decayed; 8195 } 8196 8197 static unsigned long task_h_load(struct task_struct *p) 8198 { 8199 return p->se.avg.load_avg; 8200 } 8201 #endif 8202 8203 static void update_blocked_averages(int cpu) 8204 { 8205 bool decayed = false, done = true; 8206 struct rq *rq = cpu_rq(cpu); 8207 struct rq_flags rf; 8208 8209 rq_lock_irqsave(rq, &rf); 8210 update_blocked_load_tick(rq); 8211 update_rq_clock(rq); 8212 8213 decayed |= __update_blocked_others(rq, &done); 8214 decayed |= __update_blocked_fair(rq, &done); 8215 8216 update_blocked_load_status(rq, !done); 8217 if (decayed) 8218 cpufreq_update_util(rq, 0); 8219 rq_unlock_irqrestore(rq, &rf); 8220 } 8221 8222 /********** Helpers for find_busiest_group ************************/ 8223 8224 /* 8225 * sg_lb_stats - stats of a sched_group required for load_balancing 8226 */ 8227 struct sg_lb_stats { 8228 unsigned long avg_load; /*Avg load across the CPUs of the group */ 8229 unsigned long group_load; /* Total load over the CPUs of the group */ 8230 unsigned long group_capacity; 8231 unsigned long group_util; /* Total utilization over the CPUs of the group */ 8232 unsigned long group_runnable; /* Total runnable time over the CPUs of the group */ 8233 unsigned int sum_nr_running; /* Nr of tasks running in the group */ 8234 unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */ 8235 unsigned int idle_cpus; 8236 unsigned int group_weight; 8237 enum group_type group_type; 8238 unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */ 8239 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ 8240 #ifdef CONFIG_NUMA_BALANCING 8241 unsigned int nr_numa_running; 8242 unsigned int nr_preferred_running; 8243 #endif 8244 }; 8245 8246 /* 8247 * sd_lb_stats - Structure to store the statistics of a sched_domain 8248 * during load balancing. 8249 */ 8250 struct sd_lb_stats { 8251 struct sched_group *busiest; /* Busiest group in this sd */ 8252 struct sched_group *local; /* Local group in this sd */ 8253 unsigned long total_load; /* Total load of all groups in sd */ 8254 unsigned long total_capacity; /* Total capacity of all groups in sd */ 8255 unsigned long avg_load; /* Average load across all groups in sd */ 8256 unsigned int prefer_sibling; /* tasks should go to sibling first */ 8257 8258 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 8259 struct sg_lb_stats local_stat; /* Statistics of the local group */ 8260 }; 8261 8262 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 8263 { 8264 /* 8265 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 8266 * local_stat because update_sg_lb_stats() does a full clear/assignment. 8267 * We must however set busiest_stat::group_type and 8268 * busiest_stat::idle_cpus to the worst busiest group because 8269 * update_sd_pick_busiest() reads these before assignment. 8270 */ 8271 *sds = (struct sd_lb_stats){ 8272 .busiest = NULL, 8273 .local = NULL, 8274 .total_load = 0UL, 8275 .total_capacity = 0UL, 8276 .busiest_stat = { 8277 .idle_cpus = UINT_MAX, 8278 .group_type = group_has_spare, 8279 }, 8280 }; 8281 } 8282 8283 static unsigned long scale_rt_capacity(int cpu) 8284 { 8285 struct rq *rq = cpu_rq(cpu); 8286 unsigned long max = arch_scale_cpu_capacity(cpu); 8287 unsigned long used, free; 8288 unsigned long irq; 8289 8290 irq = cpu_util_irq(rq); 8291 8292 if (unlikely(irq >= max)) 8293 return 1; 8294 8295 /* 8296 * avg_rt.util_avg and avg_dl.util_avg track binary signals 8297 * (running and not running) with weights 0 and 1024 respectively. 8298 * avg_thermal.load_avg tracks thermal pressure and the weighted 8299 * average uses the actual delta max capacity(load). 8300 */ 8301 used = READ_ONCE(rq->avg_rt.util_avg); 8302 used += READ_ONCE(rq->avg_dl.util_avg); 8303 used += thermal_load_avg(rq); 8304 8305 if (unlikely(used >= max)) 8306 return 1; 8307 8308 free = max - used; 8309 8310 return scale_irq_capacity(free, irq, max); 8311 } 8312 8313 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 8314 { 8315 unsigned long capacity = scale_rt_capacity(cpu); 8316 struct sched_group *sdg = sd->groups; 8317 8318 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); 8319 8320 if (!capacity) 8321 capacity = 1; 8322 8323 cpu_rq(cpu)->cpu_capacity = capacity; 8324 trace_sched_cpu_capacity_tp(cpu_rq(cpu)); 8325 8326 sdg->sgc->capacity = capacity; 8327 sdg->sgc->min_capacity = capacity; 8328 sdg->sgc->max_capacity = capacity; 8329 } 8330 8331 void update_group_capacity(struct sched_domain *sd, int cpu) 8332 { 8333 struct sched_domain *child = sd->child; 8334 struct sched_group *group, *sdg = sd->groups; 8335 unsigned long capacity, min_capacity, max_capacity; 8336 unsigned long interval; 8337 8338 interval = msecs_to_jiffies(sd->balance_interval); 8339 interval = clamp(interval, 1UL, max_load_balance_interval); 8340 sdg->sgc->next_update = jiffies + interval; 8341 8342 if (!child) { 8343 update_cpu_capacity(sd, cpu); 8344 return; 8345 } 8346 8347 capacity = 0; 8348 min_capacity = ULONG_MAX; 8349 max_capacity = 0; 8350 8351 if (child->flags & SD_OVERLAP) { 8352 /* 8353 * SD_OVERLAP domains cannot assume that child groups 8354 * span the current group. 8355 */ 8356 8357 for_each_cpu(cpu, sched_group_span(sdg)) { 8358 unsigned long cpu_cap = capacity_of(cpu); 8359 8360 capacity += cpu_cap; 8361 min_capacity = min(cpu_cap, min_capacity); 8362 max_capacity = max(cpu_cap, max_capacity); 8363 } 8364 } else { 8365 /* 8366 * !SD_OVERLAP domains can assume that child groups 8367 * span the current group. 8368 */ 8369 8370 group = child->groups; 8371 do { 8372 struct sched_group_capacity *sgc = group->sgc; 8373 8374 capacity += sgc->capacity; 8375 min_capacity = min(sgc->min_capacity, min_capacity); 8376 max_capacity = max(sgc->max_capacity, max_capacity); 8377 group = group->next; 8378 } while (group != child->groups); 8379 } 8380 8381 sdg->sgc->capacity = capacity; 8382 sdg->sgc->min_capacity = min_capacity; 8383 sdg->sgc->max_capacity = max_capacity; 8384 } 8385 8386 /* 8387 * Check whether the capacity of the rq has been noticeably reduced by side 8388 * activity. The imbalance_pct is used for the threshold. 8389 * Return true is the capacity is reduced 8390 */ 8391 static inline int 8392 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 8393 { 8394 return ((rq->cpu_capacity * sd->imbalance_pct) < 8395 (rq->cpu_capacity_orig * 100)); 8396 } 8397 8398 /* 8399 * Check whether a rq has a misfit task and if it looks like we can actually 8400 * help that task: we can migrate the task to a CPU of higher capacity, or 8401 * the task's current CPU is heavily pressured. 8402 */ 8403 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) 8404 { 8405 return rq->misfit_task_load && 8406 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || 8407 check_cpu_capacity(rq, sd)); 8408 } 8409 8410 /* 8411 * Group imbalance indicates (and tries to solve) the problem where balancing 8412 * groups is inadequate due to ->cpus_ptr constraints. 8413 * 8414 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 8415 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. 8416 * Something like: 8417 * 8418 * { 0 1 2 3 } { 4 5 6 7 } 8419 * * * * * 8420 * 8421 * If we were to balance group-wise we'd place two tasks in the first group and 8422 * two tasks in the second group. Clearly this is undesired as it will overload 8423 * cpu 3 and leave one of the CPUs in the second group unused. 8424 * 8425 * The current solution to this issue is detecting the skew in the first group 8426 * by noticing the lower domain failed to reach balance and had difficulty 8427 * moving tasks due to affinity constraints. 8428 * 8429 * When this is so detected; this group becomes a candidate for busiest; see 8430 * update_sd_pick_busiest(). And calculate_imbalance() and 8431 * find_busiest_group() avoid some of the usual balance conditions to allow it 8432 * to create an effective group imbalance. 8433 * 8434 * This is a somewhat tricky proposition since the next run might not find the 8435 * group imbalance and decide the groups need to be balanced again. A most 8436 * subtle and fragile situation. 8437 */ 8438 8439 static inline int sg_imbalanced(struct sched_group *group) 8440 { 8441 return group->sgc->imbalance; 8442 } 8443 8444 /* 8445 * group_has_capacity returns true if the group has spare capacity that could 8446 * be used by some tasks. 8447 * We consider that a group has spare capacity if the * number of task is 8448 * smaller than the number of CPUs or if the utilization is lower than the 8449 * available capacity for CFS tasks. 8450 * For the latter, we use a threshold to stabilize the state, to take into 8451 * account the variance of the tasks' load and to return true if the available 8452 * capacity in meaningful for the load balancer. 8453 * As an example, an available capacity of 1% can appear but it doesn't make 8454 * any benefit for the load balance. 8455 */ 8456 static inline bool 8457 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 8458 { 8459 if (sgs->sum_nr_running < sgs->group_weight) 8460 return true; 8461 8462 if ((sgs->group_capacity * imbalance_pct) < 8463 (sgs->group_runnable * 100)) 8464 return false; 8465 8466 if ((sgs->group_capacity * 100) > 8467 (sgs->group_util * imbalance_pct)) 8468 return true; 8469 8470 return false; 8471 } 8472 8473 /* 8474 * group_is_overloaded returns true if the group has more tasks than it can 8475 * handle. 8476 * group_is_overloaded is not equals to !group_has_capacity because a group 8477 * with the exact right number of tasks, has no more spare capacity but is not 8478 * overloaded so both group_has_capacity and group_is_overloaded return 8479 * false. 8480 */ 8481 static inline bool 8482 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 8483 { 8484 if (sgs->sum_nr_running <= sgs->group_weight) 8485 return false; 8486 8487 if ((sgs->group_capacity * 100) < 8488 (sgs->group_util * imbalance_pct)) 8489 return true; 8490 8491 if ((sgs->group_capacity * imbalance_pct) < 8492 (sgs->group_runnable * 100)) 8493 return true; 8494 8495 return false; 8496 } 8497 8498 static inline enum 8499 group_type group_classify(unsigned int imbalance_pct, 8500 struct sched_group *group, 8501 struct sg_lb_stats *sgs) 8502 { 8503 if (group_is_overloaded(imbalance_pct, sgs)) 8504 return group_overloaded; 8505 8506 if (sg_imbalanced(group)) 8507 return group_imbalanced; 8508 8509 if (sgs->group_asym_packing) 8510 return group_asym_packing; 8511 8512 if (sgs->group_misfit_task_load) 8513 return group_misfit_task; 8514 8515 if (!group_has_capacity(imbalance_pct, sgs)) 8516 return group_fully_busy; 8517 8518 return group_has_spare; 8519 } 8520 8521 /** 8522 * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks 8523 * @dst_cpu: Destination CPU of the load balancing 8524 * @sds: Load-balancing data with statistics of the local group 8525 * @sgs: Load-balancing statistics of the candidate busiest group 8526 * @sg: The candidate busiest group 8527 * 8528 * Check the state of the SMT siblings of both @sds::local and @sg and decide 8529 * if @dst_cpu can pull tasks. 8530 * 8531 * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of 8532 * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks 8533 * only if @dst_cpu has higher priority. 8534 * 8535 * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more 8536 * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority. 8537 * Bigger imbalances in the number of busy CPUs will be dealt with in 8538 * update_sd_pick_busiest(). 8539 * 8540 * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings 8541 * of @dst_cpu are idle and @sg has lower priority. 8542 */ 8543 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, 8544 struct sg_lb_stats *sgs, 8545 struct sched_group *sg) 8546 { 8547 #ifdef CONFIG_SCHED_SMT 8548 bool local_is_smt, sg_is_smt; 8549 int sg_busy_cpus; 8550 8551 local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; 8552 sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY; 8553 8554 sg_busy_cpus = sgs->group_weight - sgs->idle_cpus; 8555 8556 if (!local_is_smt) { 8557 /* 8558 * If we are here, @dst_cpu is idle and does not have SMT 8559 * siblings. Pull tasks if candidate group has two or more 8560 * busy CPUs. 8561 */ 8562 if (sg_busy_cpus >= 2) /* implies sg_is_smt */ 8563 return true; 8564 8565 /* 8566 * @dst_cpu does not have SMT siblings. @sg may have SMT 8567 * siblings and only one is busy. In such case, @dst_cpu 8568 * can help if it has higher priority and is idle (i.e., 8569 * it has no running tasks). 8570 */ 8571 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8572 } 8573 8574 /* @dst_cpu has SMT siblings. */ 8575 8576 if (sg_is_smt) { 8577 int local_busy_cpus = sds->local->group_weight - 8578 sds->local_stat.idle_cpus; 8579 int busy_cpus_delta = sg_busy_cpus - local_busy_cpus; 8580 8581 if (busy_cpus_delta == 1) 8582 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8583 8584 return false; 8585 } 8586 8587 /* 8588 * @sg does not have SMT siblings. Ensure that @sds::local does not end 8589 * up with more than one busy SMT sibling and only pull tasks if there 8590 * are not busy CPUs (i.e., no CPU has running tasks). 8591 */ 8592 if (!sds->local_stat.sum_nr_running) 8593 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8594 8595 return false; 8596 #else 8597 /* Always return false so that callers deal with non-SMT cases. */ 8598 return false; 8599 #endif 8600 } 8601 8602 static inline bool 8603 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, 8604 struct sched_group *group) 8605 { 8606 /* Only do SMT checks if either local or candidate have SMT siblings */ 8607 if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || 8608 (group->flags & SD_SHARE_CPUCAPACITY)) 8609 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); 8610 8611 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); 8612 } 8613 8614 /** 8615 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 8616 * @env: The load balancing environment. 8617 * @group: sched_group whose statistics are to be updated. 8618 * @sgs: variable to hold the statistics for this group. 8619 * @sg_status: Holds flag indicating the status of the sched_group 8620 */ 8621 static inline void update_sg_lb_stats(struct lb_env *env, 8622 struct sd_lb_stats *sds, 8623 struct sched_group *group, 8624 struct sg_lb_stats *sgs, 8625 int *sg_status) 8626 { 8627 int i, nr_running, local_group; 8628 8629 memset(sgs, 0, sizeof(*sgs)); 8630 8631 local_group = group == sds->local; 8632 8633 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 8634 struct rq *rq = cpu_rq(i); 8635 8636 sgs->group_load += cpu_load(rq); 8637 sgs->group_util += cpu_util_cfs(i); 8638 sgs->group_runnable += cpu_runnable(rq); 8639 sgs->sum_h_nr_running += rq->cfs.h_nr_running; 8640 8641 nr_running = rq->nr_running; 8642 sgs->sum_nr_running += nr_running; 8643 8644 if (nr_running > 1) 8645 *sg_status |= SG_OVERLOAD; 8646 8647 if (cpu_overutilized(i)) 8648 *sg_status |= SG_OVERUTILIZED; 8649 8650 #ifdef CONFIG_NUMA_BALANCING 8651 sgs->nr_numa_running += rq->nr_numa_running; 8652 sgs->nr_preferred_running += rq->nr_preferred_running; 8653 #endif 8654 /* 8655 * No need to call idle_cpu() if nr_running is not 0 8656 */ 8657 if (!nr_running && idle_cpu(i)) { 8658 sgs->idle_cpus++; 8659 /* Idle cpu can't have misfit task */ 8660 continue; 8661 } 8662 8663 if (local_group) 8664 continue; 8665 8666 /* Check for a misfit task on the cpu */ 8667 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 8668 sgs->group_misfit_task_load < rq->misfit_task_load) { 8669 sgs->group_misfit_task_load = rq->misfit_task_load; 8670 *sg_status |= SG_OVERLOAD; 8671 } 8672 } 8673 8674 sgs->group_capacity = group->sgc->capacity; 8675 8676 sgs->group_weight = group->group_weight; 8677 8678 /* Check if dst CPU is idle and preferred to this group */ 8679 if (!local_group && env->sd->flags & SD_ASYM_PACKING && 8680 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && 8681 sched_asym(env, sds, sgs, group)) { 8682 sgs->group_asym_packing = 1; 8683 } 8684 8685 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); 8686 8687 /* Computing avg_load makes sense only when group is overloaded */ 8688 if (sgs->group_type == group_overloaded) 8689 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 8690 sgs->group_capacity; 8691 } 8692 8693 /** 8694 * update_sd_pick_busiest - return 1 on busiest group 8695 * @env: The load balancing environment. 8696 * @sds: sched_domain statistics 8697 * @sg: sched_group candidate to be checked for being the busiest 8698 * @sgs: sched_group statistics 8699 * 8700 * Determine if @sg is a busier group than the previously selected 8701 * busiest group. 8702 * 8703 * Return: %true if @sg is a busier group than the previously selected 8704 * busiest group. %false otherwise. 8705 */ 8706 static bool update_sd_pick_busiest(struct lb_env *env, 8707 struct sd_lb_stats *sds, 8708 struct sched_group *sg, 8709 struct sg_lb_stats *sgs) 8710 { 8711 struct sg_lb_stats *busiest = &sds->busiest_stat; 8712 8713 /* Make sure that there is at least one task to pull */ 8714 if (!sgs->sum_h_nr_running) 8715 return false; 8716 8717 /* 8718 * Don't try to pull misfit tasks we can't help. 8719 * We can use max_capacity here as reduction in capacity on some 8720 * CPUs in the group should either be possible to resolve 8721 * internally or be covered by avg_load imbalance (eventually). 8722 */ 8723 if (sgs->group_type == group_misfit_task && 8724 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || 8725 sds->local_stat.group_type != group_has_spare)) 8726 return false; 8727 8728 if (sgs->group_type > busiest->group_type) 8729 return true; 8730 8731 if (sgs->group_type < busiest->group_type) 8732 return false; 8733 8734 /* 8735 * The candidate and the current busiest group are the same type of 8736 * group. Let check which one is the busiest according to the type. 8737 */ 8738 8739 switch (sgs->group_type) { 8740 case group_overloaded: 8741 /* Select the overloaded group with highest avg_load. */ 8742 if (sgs->avg_load <= busiest->avg_load) 8743 return false; 8744 break; 8745 8746 case group_imbalanced: 8747 /* 8748 * Select the 1st imbalanced group as we don't have any way to 8749 * choose one more than another. 8750 */ 8751 return false; 8752 8753 case group_asym_packing: 8754 /* Prefer to move from lowest priority CPU's work */ 8755 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) 8756 return false; 8757 break; 8758 8759 case group_misfit_task: 8760 /* 8761 * If we have more than one misfit sg go with the biggest 8762 * misfit. 8763 */ 8764 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) 8765 return false; 8766 break; 8767 8768 case group_fully_busy: 8769 /* 8770 * Select the fully busy group with highest avg_load. In 8771 * theory, there is no need to pull task from such kind of 8772 * group because tasks have all compute capacity that they need 8773 * but we can still improve the overall throughput by reducing 8774 * contention when accessing shared HW resources. 8775 * 8776 * XXX for now avg_load is not computed and always 0 so we 8777 * select the 1st one. 8778 */ 8779 if (sgs->avg_load <= busiest->avg_load) 8780 return false; 8781 break; 8782 8783 case group_has_spare: 8784 /* 8785 * Select not overloaded group with lowest number of idle cpus 8786 * and highest number of running tasks. We could also compare 8787 * the spare capacity which is more stable but it can end up 8788 * that the group has less spare capacity but finally more idle 8789 * CPUs which means less opportunity to pull tasks. 8790 */ 8791 if (sgs->idle_cpus > busiest->idle_cpus) 8792 return false; 8793 else if ((sgs->idle_cpus == busiest->idle_cpus) && 8794 (sgs->sum_nr_running <= busiest->sum_nr_running)) 8795 return false; 8796 8797 break; 8798 } 8799 8800 /* 8801 * Candidate sg has no more than one task per CPU and has higher 8802 * per-CPU capacity. Migrating tasks to less capable CPUs may harm 8803 * throughput. Maximize throughput, power/energy consequences are not 8804 * considered. 8805 */ 8806 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && 8807 (sgs->group_type <= group_fully_busy) && 8808 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) 8809 return false; 8810 8811 return true; 8812 } 8813 8814 #ifdef CONFIG_NUMA_BALANCING 8815 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8816 { 8817 if (sgs->sum_h_nr_running > sgs->nr_numa_running) 8818 return regular; 8819 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) 8820 return remote; 8821 return all; 8822 } 8823 8824 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8825 { 8826 if (rq->nr_running > rq->nr_numa_running) 8827 return regular; 8828 if (rq->nr_running > rq->nr_preferred_running) 8829 return remote; 8830 return all; 8831 } 8832 #else 8833 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8834 { 8835 return all; 8836 } 8837 8838 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8839 { 8840 return regular; 8841 } 8842 #endif /* CONFIG_NUMA_BALANCING */ 8843 8844 8845 struct sg_lb_stats; 8846 8847 /* 8848 * task_running_on_cpu - return 1 if @p is running on @cpu. 8849 */ 8850 8851 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) 8852 { 8853 /* Task has no contribution or is new */ 8854 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 8855 return 0; 8856 8857 if (task_on_rq_queued(p)) 8858 return 1; 8859 8860 return 0; 8861 } 8862 8863 /** 8864 * idle_cpu_without - would a given CPU be idle without p ? 8865 * @cpu: the processor on which idleness is tested. 8866 * @p: task which should be ignored. 8867 * 8868 * Return: 1 if the CPU would be idle. 0 otherwise. 8869 */ 8870 static int idle_cpu_without(int cpu, struct task_struct *p) 8871 { 8872 struct rq *rq = cpu_rq(cpu); 8873 8874 if (rq->curr != rq->idle && rq->curr != p) 8875 return 0; 8876 8877 /* 8878 * rq->nr_running can't be used but an updated version without the 8879 * impact of p on cpu must be used instead. The updated nr_running 8880 * be computed and tested before calling idle_cpu_without(). 8881 */ 8882 8883 #ifdef CONFIG_SMP 8884 if (rq->ttwu_pending) 8885 return 0; 8886 #endif 8887 8888 return 1; 8889 } 8890 8891 /* 8892 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup. 8893 * @sd: The sched_domain level to look for idlest group. 8894 * @group: sched_group whose statistics are to be updated. 8895 * @sgs: variable to hold the statistics for this group. 8896 * @p: The task for which we look for the idlest group/CPU. 8897 */ 8898 static inline void update_sg_wakeup_stats(struct sched_domain *sd, 8899 struct sched_group *group, 8900 struct sg_lb_stats *sgs, 8901 struct task_struct *p) 8902 { 8903 int i, nr_running; 8904 8905 memset(sgs, 0, sizeof(*sgs)); 8906 8907 for_each_cpu(i, sched_group_span(group)) { 8908 struct rq *rq = cpu_rq(i); 8909 unsigned int local; 8910 8911 sgs->group_load += cpu_load_without(rq, p); 8912 sgs->group_util += cpu_util_without(i, p); 8913 sgs->group_runnable += cpu_runnable_without(rq, p); 8914 local = task_running_on_cpu(i, p); 8915 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; 8916 8917 nr_running = rq->nr_running - local; 8918 sgs->sum_nr_running += nr_running; 8919 8920 /* 8921 * No need to call idle_cpu_without() if nr_running is not 0 8922 */ 8923 if (!nr_running && idle_cpu_without(i, p)) 8924 sgs->idle_cpus++; 8925 8926 } 8927 8928 /* Check if task fits in the group */ 8929 if (sd->flags & SD_ASYM_CPUCAPACITY && 8930 !task_fits_capacity(p, group->sgc->max_capacity)) { 8931 sgs->group_misfit_task_load = 1; 8932 } 8933 8934 sgs->group_capacity = group->sgc->capacity; 8935 8936 sgs->group_weight = group->group_weight; 8937 8938 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); 8939 8940 /* 8941 * Computing avg_load makes sense only when group is fully busy or 8942 * overloaded 8943 */ 8944 if (sgs->group_type == group_fully_busy || 8945 sgs->group_type == group_overloaded) 8946 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 8947 sgs->group_capacity; 8948 } 8949 8950 static bool update_pick_idlest(struct sched_group *idlest, 8951 struct sg_lb_stats *idlest_sgs, 8952 struct sched_group *group, 8953 struct sg_lb_stats *sgs) 8954 { 8955 if (sgs->group_type < idlest_sgs->group_type) 8956 return true; 8957 8958 if (sgs->group_type > idlest_sgs->group_type) 8959 return false; 8960 8961 /* 8962 * The candidate and the current idlest group are the same type of 8963 * group. Let check which one is the idlest according to the type. 8964 */ 8965 8966 switch (sgs->group_type) { 8967 case group_overloaded: 8968 case group_fully_busy: 8969 /* Select the group with lowest avg_load. */ 8970 if (idlest_sgs->avg_load <= sgs->avg_load) 8971 return false; 8972 break; 8973 8974 case group_imbalanced: 8975 case group_asym_packing: 8976 /* Those types are not used in the slow wakeup path */ 8977 return false; 8978 8979 case group_misfit_task: 8980 /* Select group with the highest max capacity */ 8981 if (idlest->sgc->max_capacity >= group->sgc->max_capacity) 8982 return false; 8983 break; 8984 8985 case group_has_spare: 8986 /* Select group with most idle CPUs */ 8987 if (idlest_sgs->idle_cpus > sgs->idle_cpus) 8988 return false; 8989 8990 /* Select group with lowest group_util */ 8991 if (idlest_sgs->idle_cpus == sgs->idle_cpus && 8992 idlest_sgs->group_util <= sgs->group_util) 8993 return false; 8994 8995 break; 8996 } 8997 8998 return true; 8999 } 9000 9001 /* 9002 * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain. 9003 * This is an approximation as the number of running tasks may not be 9004 * related to the number of busy CPUs due to sched_setaffinity. 9005 */ 9006 static inline bool allow_numa_imbalance(int dst_running, int dst_weight) 9007 { 9008 return (dst_running < (dst_weight >> 2)); 9009 } 9010 9011 /* 9012 * find_idlest_group() finds and returns the least busy CPU group within the 9013 * domain. 9014 * 9015 * Assumes p is allowed on at least one CPU in sd. 9016 */ 9017 static struct sched_group * 9018 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) 9019 { 9020 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; 9021 struct sg_lb_stats local_sgs, tmp_sgs; 9022 struct sg_lb_stats *sgs; 9023 unsigned long imbalance; 9024 struct sg_lb_stats idlest_sgs = { 9025 .avg_load = UINT_MAX, 9026 .group_type = group_overloaded, 9027 }; 9028 9029 do { 9030 int local_group; 9031 9032 /* Skip over this group if it has no CPUs allowed */ 9033 if (!cpumask_intersects(sched_group_span(group), 9034 p->cpus_ptr)) 9035 continue; 9036 9037 /* Skip over this group if no cookie matched */ 9038 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) 9039 continue; 9040 9041 local_group = cpumask_test_cpu(this_cpu, 9042 sched_group_span(group)); 9043 9044 if (local_group) { 9045 sgs = &local_sgs; 9046 local = group; 9047 } else { 9048 sgs = &tmp_sgs; 9049 } 9050 9051 update_sg_wakeup_stats(sd, group, sgs, p); 9052 9053 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { 9054 idlest = group; 9055 idlest_sgs = *sgs; 9056 } 9057 9058 } while (group = group->next, group != sd->groups); 9059 9060 9061 /* There is no idlest group to push tasks to */ 9062 if (!idlest) 9063 return NULL; 9064 9065 /* The local group has been skipped because of CPU affinity */ 9066 if (!local) 9067 return idlest; 9068 9069 /* 9070 * If the local group is idler than the selected idlest group 9071 * don't try and push the task. 9072 */ 9073 if (local_sgs.group_type < idlest_sgs.group_type) 9074 return NULL; 9075 9076 /* 9077 * If the local group is busier than the selected idlest group 9078 * try and push the task. 9079 */ 9080 if (local_sgs.group_type > idlest_sgs.group_type) 9081 return idlest; 9082 9083 switch (local_sgs.group_type) { 9084 case group_overloaded: 9085 case group_fully_busy: 9086 9087 /* Calculate allowed imbalance based on load */ 9088 imbalance = scale_load_down(NICE_0_LOAD) * 9089 (sd->imbalance_pct-100) / 100; 9090 9091 /* 9092 * When comparing groups across NUMA domains, it's possible for 9093 * the local domain to be very lightly loaded relative to the 9094 * remote domains but "imbalance" skews the comparison making 9095 * remote CPUs look much more favourable. When considering 9096 * cross-domain, add imbalance to the load on the remote node 9097 * and consider staying local. 9098 */ 9099 9100 if ((sd->flags & SD_NUMA) && 9101 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) 9102 return NULL; 9103 9104 /* 9105 * If the local group is less loaded than the selected 9106 * idlest group don't try and push any tasks. 9107 */ 9108 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance)) 9109 return NULL; 9110 9111 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) 9112 return NULL; 9113 break; 9114 9115 case group_imbalanced: 9116 case group_asym_packing: 9117 /* Those type are not used in the slow wakeup path */ 9118 return NULL; 9119 9120 case group_misfit_task: 9121 /* Select group with the highest max capacity */ 9122 if (local->sgc->max_capacity >= idlest->sgc->max_capacity) 9123 return NULL; 9124 break; 9125 9126 case group_has_spare: 9127 if (sd->flags & SD_NUMA) { 9128 #ifdef CONFIG_NUMA_BALANCING 9129 int idlest_cpu; 9130 /* 9131 * If there is spare capacity at NUMA, try to select 9132 * the preferred node 9133 */ 9134 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) 9135 return NULL; 9136 9137 idlest_cpu = cpumask_first(sched_group_span(idlest)); 9138 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) 9139 return idlest; 9140 #endif 9141 /* 9142 * Otherwise, keep the task on this node to stay close 9143 * its wakeup source and improve locality. If there is 9144 * a real need of migration, periodic load balance will 9145 * take care of it. 9146 */ 9147 if (allow_numa_imbalance(local_sgs.sum_nr_running, sd->span_weight)) 9148 return NULL; 9149 } 9150 9151 /* 9152 * Select group with highest number of idle CPUs. We could also 9153 * compare the utilization which is more stable but it can end 9154 * up that the group has less spare capacity but finally more 9155 * idle CPUs which means more opportunity to run task. 9156 */ 9157 if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus) 9158 return NULL; 9159 break; 9160 } 9161 9162 return idlest; 9163 } 9164 9165 /** 9166 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 9167 * @env: The load balancing environment. 9168 * @sds: variable to hold the statistics for this sched_domain. 9169 */ 9170 9171 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 9172 { 9173 struct sched_domain *child = env->sd->child; 9174 struct sched_group *sg = env->sd->groups; 9175 struct sg_lb_stats *local = &sds->local_stat; 9176 struct sg_lb_stats tmp_sgs; 9177 int sg_status = 0; 9178 9179 do { 9180 struct sg_lb_stats *sgs = &tmp_sgs; 9181 int local_group; 9182 9183 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 9184 if (local_group) { 9185 sds->local = sg; 9186 sgs = local; 9187 9188 if (env->idle != CPU_NEWLY_IDLE || 9189 time_after_eq(jiffies, sg->sgc->next_update)) 9190 update_group_capacity(env->sd, env->dst_cpu); 9191 } 9192 9193 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); 9194 9195 if (local_group) 9196 goto next_group; 9197 9198 9199 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 9200 sds->busiest = sg; 9201 sds->busiest_stat = *sgs; 9202 } 9203 9204 next_group: 9205 /* Now, start updating sd_lb_stats */ 9206 sds->total_load += sgs->group_load; 9207 sds->total_capacity += sgs->group_capacity; 9208 9209 sg = sg->next; 9210 } while (sg != env->sd->groups); 9211 9212 /* Tag domain that child domain prefers tasks go to siblings first */ 9213 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; 9214 9215 9216 if (env->sd->flags & SD_NUMA) 9217 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 9218 9219 if (!env->sd->parent) { 9220 struct root_domain *rd = env->dst_rq->rd; 9221 9222 /* update overload indicator if we are at root domain */ 9223 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); 9224 9225 /* Update over-utilization (tipping point, U >= 0) indicator */ 9226 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); 9227 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); 9228 } else if (sg_status & SG_OVERUTILIZED) { 9229 struct root_domain *rd = env->dst_rq->rd; 9230 9231 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); 9232 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); 9233 } 9234 } 9235 9236 #define NUMA_IMBALANCE_MIN 2 9237 9238 static inline long adjust_numa_imbalance(int imbalance, 9239 int dst_running, int dst_weight) 9240 { 9241 if (!allow_numa_imbalance(dst_running, dst_weight)) 9242 return imbalance; 9243 9244 /* 9245 * Allow a small imbalance based on a simple pair of communicating 9246 * tasks that remain local when the destination is lightly loaded. 9247 */ 9248 if (imbalance <= NUMA_IMBALANCE_MIN) 9249 return 0; 9250 9251 return imbalance; 9252 } 9253 9254 /** 9255 * calculate_imbalance - Calculate the amount of imbalance present within the 9256 * groups of a given sched_domain during load balance. 9257 * @env: load balance environment 9258 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 9259 */ 9260 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 9261 { 9262 struct sg_lb_stats *local, *busiest; 9263 9264 local = &sds->local_stat; 9265 busiest = &sds->busiest_stat; 9266 9267 if (busiest->group_type == group_misfit_task) { 9268 /* Set imbalance to allow misfit tasks to be balanced. */ 9269 env->migration_type = migrate_misfit; 9270 env->imbalance = 1; 9271 return; 9272 } 9273 9274 if (busiest->group_type == group_asym_packing) { 9275 /* 9276 * In case of asym capacity, we will try to migrate all load to 9277 * the preferred CPU. 9278 */ 9279 env->migration_type = migrate_task; 9280 env->imbalance = busiest->sum_h_nr_running; 9281 return; 9282 } 9283 9284 if (busiest->group_type == group_imbalanced) { 9285 /* 9286 * In the group_imb case we cannot rely on group-wide averages 9287 * to ensure CPU-load equilibrium, try to move any task to fix 9288 * the imbalance. The next load balance will take care of 9289 * balancing back the system. 9290 */ 9291 env->migration_type = migrate_task; 9292 env->imbalance = 1; 9293 return; 9294 } 9295 9296 /* 9297 * Try to use spare capacity of local group without overloading it or 9298 * emptying busiest. 9299 */ 9300 if (local->group_type == group_has_spare) { 9301 if ((busiest->group_type > group_fully_busy) && 9302 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { 9303 /* 9304 * If busiest is overloaded, try to fill spare 9305 * capacity. This might end up creating spare capacity 9306 * in busiest or busiest still being overloaded but 9307 * there is no simple way to directly compute the 9308 * amount of load to migrate in order to balance the 9309 * system. 9310 */ 9311 env->migration_type = migrate_util; 9312 env->imbalance = max(local->group_capacity, local->group_util) - 9313 local->group_util; 9314 9315 /* 9316 * In some cases, the group's utilization is max or even 9317 * higher than capacity because of migrations but the 9318 * local CPU is (newly) idle. There is at least one 9319 * waiting task in this overloaded busiest group. Let's 9320 * try to pull it. 9321 */ 9322 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { 9323 env->migration_type = migrate_task; 9324 env->imbalance = 1; 9325 } 9326 9327 return; 9328 } 9329 9330 if (busiest->group_weight == 1 || sds->prefer_sibling) { 9331 unsigned int nr_diff = busiest->sum_nr_running; 9332 /* 9333 * When prefer sibling, evenly spread running tasks on 9334 * groups. 9335 */ 9336 env->migration_type = migrate_task; 9337 lsub_positive(&nr_diff, local->sum_nr_running); 9338 env->imbalance = nr_diff >> 1; 9339 } else { 9340 9341 /* 9342 * If there is no overload, we just want to even the number of 9343 * idle cpus. 9344 */ 9345 env->migration_type = migrate_task; 9346 env->imbalance = max_t(long, 0, (local->idle_cpus - 9347 busiest->idle_cpus) >> 1); 9348 } 9349 9350 /* Consider allowing a small imbalance between NUMA groups */ 9351 if (env->sd->flags & SD_NUMA) { 9352 env->imbalance = adjust_numa_imbalance(env->imbalance, 9353 busiest->sum_nr_running, busiest->group_weight); 9354 } 9355 9356 return; 9357 } 9358 9359 /* 9360 * Local is fully busy but has to take more load to relieve the 9361 * busiest group 9362 */ 9363 if (local->group_type < group_overloaded) { 9364 /* 9365 * Local will become overloaded so the avg_load metrics are 9366 * finally needed. 9367 */ 9368 9369 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / 9370 local->group_capacity; 9371 9372 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / 9373 sds->total_capacity; 9374 /* 9375 * If the local group is more loaded than the selected 9376 * busiest group don't try to pull any tasks. 9377 */ 9378 if (local->avg_load >= busiest->avg_load) { 9379 env->imbalance = 0; 9380 return; 9381 } 9382 } 9383 9384 /* 9385 * Both group are or will become overloaded and we're trying to get all 9386 * the CPUs to the average_load, so we don't want to push ourselves 9387 * above the average load, nor do we wish to reduce the max loaded CPU 9388 * below the average load. At the same time, we also don't want to 9389 * reduce the group load below the group capacity. Thus we look for 9390 * the minimum possible imbalance. 9391 */ 9392 env->migration_type = migrate_load; 9393 env->imbalance = min( 9394 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, 9395 (sds->avg_load - local->avg_load) * local->group_capacity 9396 ) / SCHED_CAPACITY_SCALE; 9397 } 9398 9399 /******* find_busiest_group() helpers end here *********************/ 9400 9401 /* 9402 * Decision matrix according to the local and busiest group type: 9403 * 9404 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded 9405 * has_spare nr_idle balanced N/A N/A balanced balanced 9406 * fully_busy nr_idle nr_idle N/A N/A balanced balanced 9407 * misfit_task force N/A N/A N/A force force 9408 * asym_packing force force N/A N/A force force 9409 * imbalanced force force N/A N/A force force 9410 * overloaded force force N/A N/A force avg_load 9411 * 9412 * N/A : Not Applicable because already filtered while updating 9413 * statistics. 9414 * balanced : The system is balanced for these 2 groups. 9415 * force : Calculate the imbalance as load migration is probably needed. 9416 * avg_load : Only if imbalance is significant enough. 9417 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite 9418 * different in groups. 9419 */ 9420 9421 /** 9422 * find_busiest_group - Returns the busiest group within the sched_domain 9423 * if there is an imbalance. 9424 * 9425 * Also calculates the amount of runnable load which should be moved 9426 * to restore balance. 9427 * 9428 * @env: The load balancing environment. 9429 * 9430 * Return: - The busiest group if imbalance exists. 9431 */ 9432 static struct sched_group *find_busiest_group(struct lb_env *env) 9433 { 9434 struct sg_lb_stats *local, *busiest; 9435 struct sd_lb_stats sds; 9436 9437 init_sd_lb_stats(&sds); 9438 9439 /* 9440 * Compute the various statistics relevant for load balancing at 9441 * this level. 9442 */ 9443 update_sd_lb_stats(env, &sds); 9444 9445 if (sched_energy_enabled()) { 9446 struct root_domain *rd = env->dst_rq->rd; 9447 9448 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) 9449 goto out_balanced; 9450 } 9451 9452 local = &sds.local_stat; 9453 busiest = &sds.busiest_stat; 9454 9455 /* There is no busy sibling group to pull tasks from */ 9456 if (!sds.busiest) 9457 goto out_balanced; 9458 9459 /* Misfit tasks should be dealt with regardless of the avg load */ 9460 if (busiest->group_type == group_misfit_task) 9461 goto force_balance; 9462 9463 /* ASYM feature bypasses nice load balance check */ 9464 if (busiest->group_type == group_asym_packing) 9465 goto force_balance; 9466 9467 /* 9468 * If the busiest group is imbalanced the below checks don't 9469 * work because they assume all things are equal, which typically 9470 * isn't true due to cpus_ptr constraints and the like. 9471 */ 9472 if (busiest->group_type == group_imbalanced) 9473 goto force_balance; 9474 9475 /* 9476 * If the local group is busier than the selected busiest group 9477 * don't try and pull any tasks. 9478 */ 9479 if (local->group_type > busiest->group_type) 9480 goto out_balanced; 9481 9482 /* 9483 * When groups are overloaded, use the avg_load to ensure fairness 9484 * between tasks. 9485 */ 9486 if (local->group_type == group_overloaded) { 9487 /* 9488 * If the local group is more loaded than the selected 9489 * busiest group don't try to pull any tasks. 9490 */ 9491 if (local->avg_load >= busiest->avg_load) 9492 goto out_balanced; 9493 9494 /* XXX broken for overlapping NUMA groups */ 9495 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / 9496 sds.total_capacity; 9497 9498 /* 9499 * Don't pull any tasks if this group is already above the 9500 * domain average load. 9501 */ 9502 if (local->avg_load >= sds.avg_load) 9503 goto out_balanced; 9504 9505 /* 9506 * If the busiest group is more loaded, use imbalance_pct to be 9507 * conservative. 9508 */ 9509 if (100 * busiest->avg_load <= 9510 env->sd->imbalance_pct * local->avg_load) 9511 goto out_balanced; 9512 } 9513 9514 /* Try to move all excess tasks to child's sibling domain */ 9515 if (sds.prefer_sibling && local->group_type == group_has_spare && 9516 busiest->sum_nr_running > local->sum_nr_running + 1) 9517 goto force_balance; 9518 9519 if (busiest->group_type != group_overloaded) { 9520 if (env->idle == CPU_NOT_IDLE) 9521 /* 9522 * If the busiest group is not overloaded (and as a 9523 * result the local one too) but this CPU is already 9524 * busy, let another idle CPU try to pull task. 9525 */ 9526 goto out_balanced; 9527 9528 if (busiest->group_weight > 1 && 9529 local->idle_cpus <= (busiest->idle_cpus + 1)) 9530 /* 9531 * If the busiest group is not overloaded 9532 * and there is no imbalance between this and busiest 9533 * group wrt idle CPUs, it is balanced. The imbalance 9534 * becomes significant if the diff is greater than 1 9535 * otherwise we might end up to just move the imbalance 9536 * on another group. Of course this applies only if 9537 * there is more than 1 CPU per group. 9538 */ 9539 goto out_balanced; 9540 9541 if (busiest->sum_h_nr_running == 1) 9542 /* 9543 * busiest doesn't have any tasks waiting to run 9544 */ 9545 goto out_balanced; 9546 } 9547 9548 force_balance: 9549 /* Looks like there is an imbalance. Compute it */ 9550 calculate_imbalance(env, &sds); 9551 return env->imbalance ? sds.busiest : NULL; 9552 9553 out_balanced: 9554 env->imbalance = 0; 9555 return NULL; 9556 } 9557 9558 /* 9559 * find_busiest_queue - find the busiest runqueue among the CPUs in the group. 9560 */ 9561 static struct rq *find_busiest_queue(struct lb_env *env, 9562 struct sched_group *group) 9563 { 9564 struct rq *busiest = NULL, *rq; 9565 unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1; 9566 unsigned int busiest_nr = 0; 9567 int i; 9568 9569 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 9570 unsigned long capacity, load, util; 9571 unsigned int nr_running; 9572 enum fbq_type rt; 9573 9574 rq = cpu_rq(i); 9575 rt = fbq_classify_rq(rq); 9576 9577 /* 9578 * We classify groups/runqueues into three groups: 9579 * - regular: there are !numa tasks 9580 * - remote: there are numa tasks that run on the 'wrong' node 9581 * - all: there is no distinction 9582 * 9583 * In order to avoid migrating ideally placed numa tasks, 9584 * ignore those when there's better options. 9585 * 9586 * If we ignore the actual busiest queue to migrate another 9587 * task, the next balance pass can still reduce the busiest 9588 * queue by moving tasks around inside the node. 9589 * 9590 * If we cannot move enough load due to this classification 9591 * the next pass will adjust the group classification and 9592 * allow migration of more tasks. 9593 * 9594 * Both cases only affect the total convergence complexity. 9595 */ 9596 if (rt > env->fbq_type) 9597 continue; 9598 9599 nr_running = rq->cfs.h_nr_running; 9600 if (!nr_running) 9601 continue; 9602 9603 capacity = capacity_of(i); 9604 9605 /* 9606 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could 9607 * eventually lead to active_balancing high->low capacity. 9608 * Higher per-CPU capacity is considered better than balancing 9609 * average load. 9610 */ 9611 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 9612 !capacity_greater(capacity_of(env->dst_cpu), capacity) && 9613 nr_running == 1) 9614 continue; 9615 9616 /* Make sure we only pull tasks from a CPU of lower priority */ 9617 if ((env->sd->flags & SD_ASYM_PACKING) && 9618 sched_asym_prefer(i, env->dst_cpu) && 9619 nr_running == 1) 9620 continue; 9621 9622 switch (env->migration_type) { 9623 case migrate_load: 9624 /* 9625 * When comparing with load imbalance, use cpu_load() 9626 * which is not scaled with the CPU capacity. 9627 */ 9628 load = cpu_load(rq); 9629 9630 if (nr_running == 1 && load > env->imbalance && 9631 !check_cpu_capacity(rq, env->sd)) 9632 break; 9633 9634 /* 9635 * For the load comparisons with the other CPUs, 9636 * consider the cpu_load() scaled with the CPU 9637 * capacity, so that the load can be moved away 9638 * from the CPU that is potentially running at a 9639 * lower capacity. 9640 * 9641 * Thus we're looking for max(load_i / capacity_i), 9642 * crosswise multiplication to rid ourselves of the 9643 * division works out to: 9644 * load_i * capacity_j > load_j * capacity_i; 9645 * where j is our previous maximum. 9646 */ 9647 if (load * busiest_capacity > busiest_load * capacity) { 9648 busiest_load = load; 9649 busiest_capacity = capacity; 9650 busiest = rq; 9651 } 9652 break; 9653 9654 case migrate_util: 9655 util = cpu_util_cfs(i); 9656 9657 /* 9658 * Don't try to pull utilization from a CPU with one 9659 * running task. Whatever its utilization, we will fail 9660 * detach the task. 9661 */ 9662 if (nr_running <= 1) 9663 continue; 9664 9665 if (busiest_util < util) { 9666 busiest_util = util; 9667 busiest = rq; 9668 } 9669 break; 9670 9671 case migrate_task: 9672 if (busiest_nr < nr_running) { 9673 busiest_nr = nr_running; 9674 busiest = rq; 9675 } 9676 break; 9677 9678 case migrate_misfit: 9679 /* 9680 * For ASYM_CPUCAPACITY domains with misfit tasks we 9681 * simply seek the "biggest" misfit task. 9682 */ 9683 if (rq->misfit_task_load > busiest_load) { 9684 busiest_load = rq->misfit_task_load; 9685 busiest = rq; 9686 } 9687 9688 break; 9689 9690 } 9691 } 9692 9693 return busiest; 9694 } 9695 9696 /* 9697 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 9698 * so long as it is large enough. 9699 */ 9700 #define MAX_PINNED_INTERVAL 512 9701 9702 static inline bool 9703 asym_active_balance(struct lb_env *env) 9704 { 9705 /* 9706 * ASYM_PACKING needs to force migrate tasks from busy but 9707 * lower priority CPUs in order to pack all tasks in the 9708 * highest priority CPUs. 9709 */ 9710 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && 9711 sched_asym_prefer(env->dst_cpu, env->src_cpu); 9712 } 9713 9714 static inline bool 9715 imbalanced_active_balance(struct lb_env *env) 9716 { 9717 struct sched_domain *sd = env->sd; 9718 9719 /* 9720 * The imbalanced case includes the case of pinned tasks preventing a fair 9721 * distribution of the load on the system but also the even distribution of the 9722 * threads on a system with spare capacity 9723 */ 9724 if ((env->migration_type == migrate_task) && 9725 (sd->nr_balance_failed > sd->cache_nice_tries+2)) 9726 return 1; 9727 9728 return 0; 9729 } 9730 9731 static int need_active_balance(struct lb_env *env) 9732 { 9733 struct sched_domain *sd = env->sd; 9734 9735 if (asym_active_balance(env)) 9736 return 1; 9737 9738 if (imbalanced_active_balance(env)) 9739 return 1; 9740 9741 /* 9742 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 9743 * It's worth migrating the task if the src_cpu's capacity is reduced 9744 * because of other sched_class or IRQs if more capacity stays 9745 * available on dst_cpu. 9746 */ 9747 if ((env->idle != CPU_NOT_IDLE) && 9748 (env->src_rq->cfs.h_nr_running == 1)) { 9749 if ((check_cpu_capacity(env->src_rq, sd)) && 9750 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 9751 return 1; 9752 } 9753 9754 if (env->migration_type == migrate_misfit) 9755 return 1; 9756 9757 return 0; 9758 } 9759 9760 static int active_load_balance_cpu_stop(void *data); 9761 9762 static int should_we_balance(struct lb_env *env) 9763 { 9764 struct sched_group *sg = env->sd->groups; 9765 int cpu; 9766 9767 /* 9768 * Ensure the balancing environment is consistent; can happen 9769 * when the softirq triggers 'during' hotplug. 9770 */ 9771 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) 9772 return 0; 9773 9774 /* 9775 * In the newly idle case, we will allow all the CPUs 9776 * to do the newly idle load balance. 9777 */ 9778 if (env->idle == CPU_NEWLY_IDLE) 9779 return 1; 9780 9781 /* Try to find first idle CPU */ 9782 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 9783 if (!idle_cpu(cpu)) 9784 continue; 9785 9786 /* Are we the first idle CPU? */ 9787 return cpu == env->dst_cpu; 9788 } 9789 9790 /* Are we the first CPU of this group ? */ 9791 return group_balance_cpu(sg) == env->dst_cpu; 9792 } 9793 9794 /* 9795 * Check this_cpu to ensure it is balanced within domain. Attempt to move 9796 * tasks if there is an imbalance. 9797 */ 9798 static int load_balance(int this_cpu, struct rq *this_rq, 9799 struct sched_domain *sd, enum cpu_idle_type idle, 9800 int *continue_balancing) 9801 { 9802 int ld_moved, cur_ld_moved, active_balance = 0; 9803 struct sched_domain *sd_parent = sd->parent; 9804 struct sched_group *group; 9805 struct rq *busiest; 9806 struct rq_flags rf; 9807 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 9808 9809 struct lb_env env = { 9810 .sd = sd, 9811 .dst_cpu = this_cpu, 9812 .dst_rq = this_rq, 9813 .dst_grpmask = sched_group_span(sd->groups), 9814 .idle = idle, 9815 .loop_break = sched_nr_migrate_break, 9816 .cpus = cpus, 9817 .fbq_type = all, 9818 .tasks = LIST_HEAD_INIT(env.tasks), 9819 }; 9820 9821 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 9822 9823 schedstat_inc(sd->lb_count[idle]); 9824 9825 redo: 9826 if (!should_we_balance(&env)) { 9827 *continue_balancing = 0; 9828 goto out_balanced; 9829 } 9830 9831 group = find_busiest_group(&env); 9832 if (!group) { 9833 schedstat_inc(sd->lb_nobusyg[idle]); 9834 goto out_balanced; 9835 } 9836 9837 busiest = find_busiest_queue(&env, group); 9838 if (!busiest) { 9839 schedstat_inc(sd->lb_nobusyq[idle]); 9840 goto out_balanced; 9841 } 9842 9843 BUG_ON(busiest == env.dst_rq); 9844 9845 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 9846 9847 env.src_cpu = busiest->cpu; 9848 env.src_rq = busiest; 9849 9850 ld_moved = 0; 9851 /* Clear this flag as soon as we find a pullable task */ 9852 env.flags |= LBF_ALL_PINNED; 9853 if (busiest->nr_running > 1) { 9854 /* 9855 * Attempt to move tasks. If find_busiest_group has found 9856 * an imbalance but busiest->nr_running <= 1, the group is 9857 * still unbalanced. ld_moved simply stays zero, so it is 9858 * correctly treated as an imbalance. 9859 */ 9860 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 9861 9862 more_balance: 9863 rq_lock_irqsave(busiest, &rf); 9864 update_rq_clock(busiest); 9865 9866 /* 9867 * cur_ld_moved - load moved in current iteration 9868 * ld_moved - cumulative load moved across iterations 9869 */ 9870 cur_ld_moved = detach_tasks(&env); 9871 9872 /* 9873 * We've detached some tasks from busiest_rq. Every 9874 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 9875 * unlock busiest->lock, and we are able to be sure 9876 * that nobody can manipulate the tasks in parallel. 9877 * See task_rq_lock() family for the details. 9878 */ 9879 9880 rq_unlock(busiest, &rf); 9881 9882 if (cur_ld_moved) { 9883 attach_tasks(&env); 9884 ld_moved += cur_ld_moved; 9885 } 9886 9887 local_irq_restore(rf.flags); 9888 9889 if (env.flags & LBF_NEED_BREAK) { 9890 env.flags &= ~LBF_NEED_BREAK; 9891 goto more_balance; 9892 } 9893 9894 /* 9895 * Revisit (affine) tasks on src_cpu that couldn't be moved to 9896 * us and move them to an alternate dst_cpu in our sched_group 9897 * where they can run. The upper limit on how many times we 9898 * iterate on same src_cpu is dependent on number of CPUs in our 9899 * sched_group. 9900 * 9901 * This changes load balance semantics a bit on who can move 9902 * load to a given_cpu. In addition to the given_cpu itself 9903 * (or a ilb_cpu acting on its behalf where given_cpu is 9904 * nohz-idle), we now have balance_cpu in a position to move 9905 * load to given_cpu. In rare situations, this may cause 9906 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 9907 * _independently_ and at _same_ time to move some load to 9908 * given_cpu) causing excess load to be moved to given_cpu. 9909 * This however should not happen so much in practice and 9910 * moreover subsequent load balance cycles should correct the 9911 * excess load moved. 9912 */ 9913 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 9914 9915 /* Prevent to re-select dst_cpu via env's CPUs */ 9916 __cpumask_clear_cpu(env.dst_cpu, env.cpus); 9917 9918 env.dst_rq = cpu_rq(env.new_dst_cpu); 9919 env.dst_cpu = env.new_dst_cpu; 9920 env.flags &= ~LBF_DST_PINNED; 9921 env.loop = 0; 9922 env.loop_break = sched_nr_migrate_break; 9923 9924 /* 9925 * Go back to "more_balance" rather than "redo" since we 9926 * need to continue with same src_cpu. 9927 */ 9928 goto more_balance; 9929 } 9930 9931 /* 9932 * We failed to reach balance because of affinity. 9933 */ 9934 if (sd_parent) { 9935 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 9936 9937 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 9938 *group_imbalance = 1; 9939 } 9940 9941 /* All tasks on this runqueue were pinned by CPU affinity */ 9942 if (unlikely(env.flags & LBF_ALL_PINNED)) { 9943 __cpumask_clear_cpu(cpu_of(busiest), cpus); 9944 /* 9945 * Attempting to continue load balancing at the current 9946 * sched_domain level only makes sense if there are 9947 * active CPUs remaining as possible busiest CPUs to 9948 * pull load from which are not contained within the 9949 * destination group that is receiving any migrated 9950 * load. 9951 */ 9952 if (!cpumask_subset(cpus, env.dst_grpmask)) { 9953 env.loop = 0; 9954 env.loop_break = sched_nr_migrate_break; 9955 goto redo; 9956 } 9957 goto out_all_pinned; 9958 } 9959 } 9960 9961 if (!ld_moved) { 9962 schedstat_inc(sd->lb_failed[idle]); 9963 /* 9964 * Increment the failure counter only on periodic balance. 9965 * We do not want newidle balance, which can be very 9966 * frequent, pollute the failure counter causing 9967 * excessive cache_hot migrations and active balances. 9968 */ 9969 if (idle != CPU_NEWLY_IDLE) 9970 sd->nr_balance_failed++; 9971 9972 if (need_active_balance(&env)) { 9973 unsigned long flags; 9974 9975 raw_spin_rq_lock_irqsave(busiest, flags); 9976 9977 /* 9978 * Don't kick the active_load_balance_cpu_stop, 9979 * if the curr task on busiest CPU can't be 9980 * moved to this_cpu: 9981 */ 9982 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { 9983 raw_spin_rq_unlock_irqrestore(busiest, flags); 9984 goto out_one_pinned; 9985 } 9986 9987 /* Record that we found at least one task that could run on this_cpu */ 9988 env.flags &= ~LBF_ALL_PINNED; 9989 9990 /* 9991 * ->active_balance synchronizes accesses to 9992 * ->active_balance_work. Once set, it's cleared 9993 * only after active load balance is finished. 9994 */ 9995 if (!busiest->active_balance) { 9996 busiest->active_balance = 1; 9997 busiest->push_cpu = this_cpu; 9998 active_balance = 1; 9999 } 10000 raw_spin_rq_unlock_irqrestore(busiest, flags); 10001 10002 if (active_balance) { 10003 stop_one_cpu_nowait(cpu_of(busiest), 10004 active_load_balance_cpu_stop, busiest, 10005 &busiest->active_balance_work); 10006 } 10007 } 10008 } else { 10009 sd->nr_balance_failed = 0; 10010 } 10011 10012 if (likely(!active_balance) || need_active_balance(&env)) { 10013 /* We were unbalanced, so reset the balancing interval */ 10014 sd->balance_interval = sd->min_interval; 10015 } 10016 10017 goto out; 10018 10019 out_balanced: 10020 /* 10021 * We reach balance although we may have faced some affinity 10022 * constraints. Clear the imbalance flag only if other tasks got 10023 * a chance to move and fix the imbalance. 10024 */ 10025 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { 10026 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 10027 10028 if (*group_imbalance) 10029 *group_imbalance = 0; 10030 } 10031 10032 out_all_pinned: 10033 /* 10034 * We reach balance because all tasks are pinned at this level so 10035 * we can't migrate them. Let the imbalance flag set so parent level 10036 * can try to migrate them. 10037 */ 10038 schedstat_inc(sd->lb_balanced[idle]); 10039 10040 sd->nr_balance_failed = 0; 10041 10042 out_one_pinned: 10043 ld_moved = 0; 10044 10045 /* 10046 * newidle_balance() disregards balance intervals, so we could 10047 * repeatedly reach this code, which would lead to balance_interval 10048 * skyrocketing in a short amount of time. Skip the balance_interval 10049 * increase logic to avoid that. 10050 */ 10051 if (env.idle == CPU_NEWLY_IDLE) 10052 goto out; 10053 10054 /* tune up the balancing interval */ 10055 if ((env.flags & LBF_ALL_PINNED && 10056 sd->balance_interval < MAX_PINNED_INTERVAL) || 10057 sd->balance_interval < sd->max_interval) 10058 sd->balance_interval *= 2; 10059 out: 10060 return ld_moved; 10061 } 10062 10063 static inline unsigned long 10064 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 10065 { 10066 unsigned long interval = sd->balance_interval; 10067 10068 if (cpu_busy) 10069 interval *= sd->busy_factor; 10070 10071 /* scale ms to jiffies */ 10072 interval = msecs_to_jiffies(interval); 10073 10074 /* 10075 * Reduce likelihood of busy balancing at higher domains racing with 10076 * balancing at lower domains by preventing their balancing periods 10077 * from being multiples of each other. 10078 */ 10079 if (cpu_busy) 10080 interval -= 1; 10081 10082 interval = clamp(interval, 1UL, max_load_balance_interval); 10083 10084 return interval; 10085 } 10086 10087 static inline void 10088 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 10089 { 10090 unsigned long interval, next; 10091 10092 /* used by idle balance, so cpu_busy = 0 */ 10093 interval = get_sd_balance_interval(sd, 0); 10094 next = sd->last_balance + interval; 10095 10096 if (time_after(*next_balance, next)) 10097 *next_balance = next; 10098 } 10099 10100 /* 10101 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes 10102 * running tasks off the busiest CPU onto idle CPUs. It requires at 10103 * least 1 task to be running on each physical CPU where possible, and 10104 * avoids physical / logical imbalances. 10105 */ 10106 static int active_load_balance_cpu_stop(void *data) 10107 { 10108 struct rq *busiest_rq = data; 10109 int busiest_cpu = cpu_of(busiest_rq); 10110 int target_cpu = busiest_rq->push_cpu; 10111 struct rq *target_rq = cpu_rq(target_cpu); 10112 struct sched_domain *sd; 10113 struct task_struct *p = NULL; 10114 struct rq_flags rf; 10115 10116 rq_lock_irq(busiest_rq, &rf); 10117 /* 10118 * Between queueing the stop-work and running it is a hole in which 10119 * CPUs can become inactive. We should not move tasks from or to 10120 * inactive CPUs. 10121 */ 10122 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 10123 goto out_unlock; 10124 10125 /* Make sure the requested CPU hasn't gone down in the meantime: */ 10126 if (unlikely(busiest_cpu != smp_processor_id() || 10127 !busiest_rq->active_balance)) 10128 goto out_unlock; 10129 10130 /* Is there any task to move? */ 10131 if (busiest_rq->nr_running <= 1) 10132 goto out_unlock; 10133 10134 /* 10135 * This condition is "impossible", if it occurs 10136 * we need to fix it. Originally reported by 10137 * Bjorn Helgaas on a 128-CPU setup. 10138 */ 10139 BUG_ON(busiest_rq == target_rq); 10140 10141 /* Search for an sd spanning us and the target CPU. */ 10142 rcu_read_lock(); 10143 for_each_domain(target_cpu, sd) { 10144 if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 10145 break; 10146 } 10147 10148 if (likely(sd)) { 10149 struct lb_env env = { 10150 .sd = sd, 10151 .dst_cpu = target_cpu, 10152 .dst_rq = target_rq, 10153 .src_cpu = busiest_rq->cpu, 10154 .src_rq = busiest_rq, 10155 .idle = CPU_IDLE, 10156 .flags = LBF_ACTIVE_LB, 10157 }; 10158 10159 schedstat_inc(sd->alb_count); 10160 update_rq_clock(busiest_rq); 10161 10162 p = detach_one_task(&env); 10163 if (p) { 10164 schedstat_inc(sd->alb_pushed); 10165 /* Active balancing done, reset the failure counter. */ 10166 sd->nr_balance_failed = 0; 10167 } else { 10168 schedstat_inc(sd->alb_failed); 10169 } 10170 } 10171 rcu_read_unlock(); 10172 out_unlock: 10173 busiest_rq->active_balance = 0; 10174 rq_unlock(busiest_rq, &rf); 10175 10176 if (p) 10177 attach_one_task(target_rq, p); 10178 10179 local_irq_enable(); 10180 10181 return 0; 10182 } 10183 10184 static DEFINE_SPINLOCK(balancing); 10185 10186 /* 10187 * Scale the max load_balance interval with the number of CPUs in the system. 10188 * This trades load-balance latency on larger machines for less cross talk. 10189 */ 10190 void update_max_interval(void) 10191 { 10192 max_load_balance_interval = HZ*num_online_cpus()/10; 10193 } 10194 10195 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) 10196 { 10197 if (cost > sd->max_newidle_lb_cost) { 10198 /* 10199 * Track max cost of a domain to make sure to not delay the 10200 * next wakeup on the CPU. 10201 */ 10202 sd->max_newidle_lb_cost = cost; 10203 sd->last_decay_max_lb_cost = jiffies; 10204 } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { 10205 /* 10206 * Decay the newidle max times by ~1% per second to ensure that 10207 * it is not outdated and the current max cost is actually 10208 * shorter. 10209 */ 10210 sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256; 10211 sd->last_decay_max_lb_cost = jiffies; 10212 10213 return true; 10214 } 10215 10216 return false; 10217 } 10218 10219 /* 10220 * It checks each scheduling domain to see if it is due to be balanced, 10221 * and initiates a balancing operation if so. 10222 * 10223 * Balancing parameters are set up in init_sched_domains. 10224 */ 10225 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 10226 { 10227 int continue_balancing = 1; 10228 int cpu = rq->cpu; 10229 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 10230 unsigned long interval; 10231 struct sched_domain *sd; 10232 /* Earliest time when we have to do rebalance again */ 10233 unsigned long next_balance = jiffies + 60*HZ; 10234 int update_next_balance = 0; 10235 int need_serialize, need_decay = 0; 10236 u64 max_cost = 0; 10237 10238 rcu_read_lock(); 10239 for_each_domain(cpu, sd) { 10240 /* 10241 * Decay the newidle max times here because this is a regular 10242 * visit to all the domains. 10243 */ 10244 need_decay = update_newidle_cost(sd, 0); 10245 max_cost += sd->max_newidle_lb_cost; 10246 10247 /* 10248 * Stop the load balance at this level. There is another 10249 * CPU in our sched group which is doing load balancing more 10250 * actively. 10251 */ 10252 if (!continue_balancing) { 10253 if (need_decay) 10254 continue; 10255 break; 10256 } 10257 10258 interval = get_sd_balance_interval(sd, busy); 10259 10260 need_serialize = sd->flags & SD_SERIALIZE; 10261 if (need_serialize) { 10262 if (!spin_trylock(&balancing)) 10263 goto out; 10264 } 10265 10266 if (time_after_eq(jiffies, sd->last_balance + interval)) { 10267 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 10268 /* 10269 * The LBF_DST_PINNED logic could have changed 10270 * env->dst_cpu, so we can't know our idle 10271 * state even if we migrated tasks. Update it. 10272 */ 10273 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 10274 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 10275 } 10276 sd->last_balance = jiffies; 10277 interval = get_sd_balance_interval(sd, busy); 10278 } 10279 if (need_serialize) 10280 spin_unlock(&balancing); 10281 out: 10282 if (time_after(next_balance, sd->last_balance + interval)) { 10283 next_balance = sd->last_balance + interval; 10284 update_next_balance = 1; 10285 } 10286 } 10287 if (need_decay) { 10288 /* 10289 * Ensure the rq-wide value also decays but keep it at a 10290 * reasonable floor to avoid funnies with rq->avg_idle. 10291 */ 10292 rq->max_idle_balance_cost = 10293 max((u64)sysctl_sched_migration_cost, max_cost); 10294 } 10295 rcu_read_unlock(); 10296 10297 /* 10298 * next_balance will be updated only when there is a need. 10299 * When the cpu is attached to null domain for ex, it will not be 10300 * updated. 10301 */ 10302 if (likely(update_next_balance)) 10303 rq->next_balance = next_balance; 10304 10305 } 10306 10307 static inline int on_null_domain(struct rq *rq) 10308 { 10309 return unlikely(!rcu_dereference_sched(rq->sd)); 10310 } 10311 10312 #ifdef CONFIG_NO_HZ_COMMON 10313 /* 10314 * idle load balancing details 10315 * - When one of the busy CPUs notice that there may be an idle rebalancing 10316 * needed, they will kick the idle load balancer, which then does idle 10317 * load balancing for all the idle CPUs. 10318 * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set 10319 * anywhere yet. 10320 */ 10321 10322 static inline int find_new_ilb(void) 10323 { 10324 int ilb; 10325 const struct cpumask *hk_mask; 10326 10327 hk_mask = housekeeping_cpumask(HK_FLAG_MISC); 10328 10329 for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) { 10330 10331 if (ilb == smp_processor_id()) 10332 continue; 10333 10334 if (idle_cpu(ilb)) 10335 return ilb; 10336 } 10337 10338 return nr_cpu_ids; 10339 } 10340 10341 /* 10342 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any 10343 * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). 10344 */ 10345 static void kick_ilb(unsigned int flags) 10346 { 10347 int ilb_cpu; 10348 10349 /* 10350 * Increase nohz.next_balance only when if full ilb is triggered but 10351 * not if we only update stats. 10352 */ 10353 if (flags & NOHZ_BALANCE_KICK) 10354 nohz.next_balance = jiffies+1; 10355 10356 ilb_cpu = find_new_ilb(); 10357 10358 if (ilb_cpu >= nr_cpu_ids) 10359 return; 10360 10361 /* 10362 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets 10363 * the first flag owns it; cleared by nohz_csd_func(). 10364 */ 10365 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); 10366 if (flags & NOHZ_KICK_MASK) 10367 return; 10368 10369 /* 10370 * This way we generate an IPI on the target CPU which 10371 * is idle. And the softirq performing nohz idle load balance 10372 * will be run before returning from the IPI. 10373 */ 10374 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd); 10375 } 10376 10377 /* 10378 * Current decision point for kicking the idle load balancer in the presence 10379 * of idle CPUs in the system. 10380 */ 10381 static void nohz_balancer_kick(struct rq *rq) 10382 { 10383 unsigned long now = jiffies; 10384 struct sched_domain_shared *sds; 10385 struct sched_domain *sd; 10386 int nr_busy, i, cpu = rq->cpu; 10387 unsigned int flags = 0; 10388 10389 if (unlikely(rq->idle_balance)) 10390 return; 10391 10392 /* 10393 * We may be recently in ticked or tickless idle mode. At the first 10394 * busy tick after returning from idle, we will update the busy stats. 10395 */ 10396 nohz_balance_exit_idle(rq); 10397 10398 /* 10399 * None are in tickless mode and hence no need for NOHZ idle load 10400 * balancing. 10401 */ 10402 if (likely(!atomic_read(&nohz.nr_cpus))) 10403 return; 10404 10405 if (READ_ONCE(nohz.has_blocked) && 10406 time_after(now, READ_ONCE(nohz.next_blocked))) 10407 flags = NOHZ_STATS_KICK; 10408 10409 if (time_before(now, nohz.next_balance)) 10410 goto out; 10411 10412 if (rq->nr_running >= 2) { 10413 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10414 goto out; 10415 } 10416 10417 rcu_read_lock(); 10418 10419 sd = rcu_dereference(rq->sd); 10420 if (sd) { 10421 /* 10422 * If there's a CFS task and the current CPU has reduced 10423 * capacity; kick the ILB to see if there's a better CPU to run 10424 * on. 10425 */ 10426 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { 10427 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10428 goto unlock; 10429 } 10430 } 10431 10432 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 10433 if (sd) { 10434 /* 10435 * When ASYM_PACKING; see if there's a more preferred CPU 10436 * currently idle; in which case, kick the ILB to move tasks 10437 * around. 10438 */ 10439 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { 10440 if (sched_asym_prefer(i, cpu)) { 10441 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10442 goto unlock; 10443 } 10444 } 10445 } 10446 10447 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); 10448 if (sd) { 10449 /* 10450 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU 10451 * to run the misfit task on. 10452 */ 10453 if (check_misfit_status(rq, sd)) { 10454 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10455 goto unlock; 10456 } 10457 10458 /* 10459 * For asymmetric systems, we do not want to nicely balance 10460 * cache use, instead we want to embrace asymmetry and only 10461 * ensure tasks have enough CPU capacity. 10462 * 10463 * Skip the LLC logic because it's not relevant in that case. 10464 */ 10465 goto unlock; 10466 } 10467 10468 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 10469 if (sds) { 10470 /* 10471 * If there is an imbalance between LLC domains (IOW we could 10472 * increase the overall cache use), we need some less-loaded LLC 10473 * domain to pull some load. Likewise, we may need to spread 10474 * load within the current LLC domain (e.g. packed SMT cores but 10475 * other CPUs are idle). We can't really know from here how busy 10476 * the others are - so just get a nohz balance going if it looks 10477 * like this LLC domain has tasks we could move. 10478 */ 10479 nr_busy = atomic_read(&sds->nr_busy_cpus); 10480 if (nr_busy > 1) { 10481 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10482 goto unlock; 10483 } 10484 } 10485 unlock: 10486 rcu_read_unlock(); 10487 out: 10488 if (READ_ONCE(nohz.needs_update)) 10489 flags |= NOHZ_NEXT_KICK; 10490 10491 if (flags) 10492 kick_ilb(flags); 10493 } 10494 10495 static void set_cpu_sd_state_busy(int cpu) 10496 { 10497 struct sched_domain *sd; 10498 10499 rcu_read_lock(); 10500 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 10501 10502 if (!sd || !sd->nohz_idle) 10503 goto unlock; 10504 sd->nohz_idle = 0; 10505 10506 atomic_inc(&sd->shared->nr_busy_cpus); 10507 unlock: 10508 rcu_read_unlock(); 10509 } 10510 10511 void nohz_balance_exit_idle(struct rq *rq) 10512 { 10513 SCHED_WARN_ON(rq != this_rq()); 10514 10515 if (likely(!rq->nohz_tick_stopped)) 10516 return; 10517 10518 rq->nohz_tick_stopped = 0; 10519 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); 10520 atomic_dec(&nohz.nr_cpus); 10521 10522 set_cpu_sd_state_busy(rq->cpu); 10523 } 10524 10525 static void set_cpu_sd_state_idle(int cpu) 10526 { 10527 struct sched_domain *sd; 10528 10529 rcu_read_lock(); 10530 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 10531 10532 if (!sd || sd->nohz_idle) 10533 goto unlock; 10534 sd->nohz_idle = 1; 10535 10536 atomic_dec(&sd->shared->nr_busy_cpus); 10537 unlock: 10538 rcu_read_unlock(); 10539 } 10540 10541 /* 10542 * This routine will record that the CPU is going idle with tick stopped. 10543 * This info will be used in performing idle load balancing in the future. 10544 */ 10545 void nohz_balance_enter_idle(int cpu) 10546 { 10547 struct rq *rq = cpu_rq(cpu); 10548 10549 SCHED_WARN_ON(cpu != smp_processor_id()); 10550 10551 /* If this CPU is going down, then nothing needs to be done: */ 10552 if (!cpu_active(cpu)) 10553 return; 10554 10555 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 10556 if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) 10557 return; 10558 10559 /* 10560 * Can be set safely without rq->lock held 10561 * If a clear happens, it will have evaluated last additions because 10562 * rq->lock is held during the check and the clear 10563 */ 10564 rq->has_blocked_load = 1; 10565 10566 /* 10567 * The tick is still stopped but load could have been added in the 10568 * meantime. We set the nohz.has_blocked flag to trig a check of the 10569 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear 10570 * of nohz.has_blocked can only happen after checking the new load 10571 */ 10572 if (rq->nohz_tick_stopped) 10573 goto out; 10574 10575 /* If we're a completely isolated CPU, we don't play: */ 10576 if (on_null_domain(rq)) 10577 return; 10578 10579 rq->nohz_tick_stopped = 1; 10580 10581 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 10582 atomic_inc(&nohz.nr_cpus); 10583 10584 /* 10585 * Ensures that if nohz_idle_balance() fails to observe our 10586 * @idle_cpus_mask store, it must observe the @has_blocked 10587 * and @needs_update stores. 10588 */ 10589 smp_mb__after_atomic(); 10590 10591 set_cpu_sd_state_idle(cpu); 10592 10593 WRITE_ONCE(nohz.needs_update, 1); 10594 out: 10595 /* 10596 * Each time a cpu enter idle, we assume that it has blocked load and 10597 * enable the periodic update of the load of idle cpus 10598 */ 10599 WRITE_ONCE(nohz.has_blocked, 1); 10600 } 10601 10602 static bool update_nohz_stats(struct rq *rq) 10603 { 10604 unsigned int cpu = rq->cpu; 10605 10606 if (!rq->has_blocked_load) 10607 return false; 10608 10609 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) 10610 return false; 10611 10612 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) 10613 return true; 10614 10615 update_blocked_averages(cpu); 10616 10617 return rq->has_blocked_load; 10618 } 10619 10620 /* 10621 * Internal function that runs load balance for all idle cpus. The load balance 10622 * can be a simple update of blocked load or a complete load balance with 10623 * tasks movement depending of flags. 10624 */ 10625 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, 10626 enum cpu_idle_type idle) 10627 { 10628 /* Earliest time when we have to do rebalance again */ 10629 unsigned long now = jiffies; 10630 unsigned long next_balance = now + 60*HZ; 10631 bool has_blocked_load = false; 10632 int update_next_balance = 0; 10633 int this_cpu = this_rq->cpu; 10634 int balance_cpu; 10635 struct rq *rq; 10636 10637 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 10638 10639 /* 10640 * We assume there will be no idle load after this update and clear 10641 * the has_blocked flag. If a cpu enters idle in the mean time, it will 10642 * set the has_blocked flag and trigger another update of idle load. 10643 * Because a cpu that becomes idle, is added to idle_cpus_mask before 10644 * setting the flag, we are sure to not clear the state and not 10645 * check the load of an idle cpu. 10646 * 10647 * Same applies to idle_cpus_mask vs needs_update. 10648 */ 10649 if (flags & NOHZ_STATS_KICK) 10650 WRITE_ONCE(nohz.has_blocked, 0); 10651 if (flags & NOHZ_NEXT_KICK) 10652 WRITE_ONCE(nohz.needs_update, 0); 10653 10654 /* 10655 * Ensures that if we miss the CPU, we must see the has_blocked 10656 * store from nohz_balance_enter_idle(). 10657 */ 10658 smp_mb(); 10659 10660 /* 10661 * Start with the next CPU after this_cpu so we will end with this_cpu and let a 10662 * chance for other idle cpu to pull load. 10663 */ 10664 for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) { 10665 if (!idle_cpu(balance_cpu)) 10666 continue; 10667 10668 /* 10669 * If this CPU gets work to do, stop the load balancing 10670 * work being done for other CPUs. Next load 10671 * balancing owner will pick it up. 10672 */ 10673 if (need_resched()) { 10674 if (flags & NOHZ_STATS_KICK) 10675 has_blocked_load = true; 10676 if (flags & NOHZ_NEXT_KICK) 10677 WRITE_ONCE(nohz.needs_update, 1); 10678 goto abort; 10679 } 10680 10681 rq = cpu_rq(balance_cpu); 10682 10683 if (flags & NOHZ_STATS_KICK) 10684 has_blocked_load |= update_nohz_stats(rq); 10685 10686 /* 10687 * If time for next balance is due, 10688 * do the balance. 10689 */ 10690 if (time_after_eq(jiffies, rq->next_balance)) { 10691 struct rq_flags rf; 10692 10693 rq_lock_irqsave(rq, &rf); 10694 update_rq_clock(rq); 10695 rq_unlock_irqrestore(rq, &rf); 10696 10697 if (flags & NOHZ_BALANCE_KICK) 10698 rebalance_domains(rq, CPU_IDLE); 10699 } 10700 10701 if (time_after(next_balance, rq->next_balance)) { 10702 next_balance = rq->next_balance; 10703 update_next_balance = 1; 10704 } 10705 } 10706 10707 /* 10708 * next_balance will be updated only when there is a need. 10709 * When the CPU is attached to null domain for ex, it will not be 10710 * updated. 10711 */ 10712 if (likely(update_next_balance)) 10713 nohz.next_balance = next_balance; 10714 10715 if (flags & NOHZ_STATS_KICK) 10716 WRITE_ONCE(nohz.next_blocked, 10717 now + msecs_to_jiffies(LOAD_AVG_PERIOD)); 10718 10719 abort: 10720 /* There is still blocked load, enable periodic update */ 10721 if (has_blocked_load) 10722 WRITE_ONCE(nohz.has_blocked, 1); 10723 } 10724 10725 /* 10726 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 10727 * rebalancing for all the cpus for whom scheduler ticks are stopped. 10728 */ 10729 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 10730 { 10731 unsigned int flags = this_rq->nohz_idle_balance; 10732 10733 if (!flags) 10734 return false; 10735 10736 this_rq->nohz_idle_balance = 0; 10737 10738 if (idle != CPU_IDLE) 10739 return false; 10740 10741 _nohz_idle_balance(this_rq, flags, idle); 10742 10743 return true; 10744 } 10745 10746 /* 10747 * Check if we need to run the ILB for updating blocked load before entering 10748 * idle state. 10749 */ 10750 void nohz_run_idle_balance(int cpu) 10751 { 10752 unsigned int flags; 10753 10754 flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu)); 10755 10756 /* 10757 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen 10758 * (ie NOHZ_STATS_KICK set) and will do the same. 10759 */ 10760 if ((flags == NOHZ_NEWILB_KICK) && !need_resched()) 10761 _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE); 10762 } 10763 10764 static void nohz_newidle_balance(struct rq *this_rq) 10765 { 10766 int this_cpu = this_rq->cpu; 10767 10768 /* 10769 * This CPU doesn't want to be disturbed by scheduler 10770 * housekeeping 10771 */ 10772 if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) 10773 return; 10774 10775 /* Will wake up very soon. No time for doing anything else*/ 10776 if (this_rq->avg_idle < sysctl_sched_migration_cost) 10777 return; 10778 10779 /* Don't need to update blocked load of idle CPUs*/ 10780 if (!READ_ONCE(nohz.has_blocked) || 10781 time_before(jiffies, READ_ONCE(nohz.next_blocked))) 10782 return; 10783 10784 /* 10785 * Set the need to trigger ILB in order to update blocked load 10786 * before entering idle state. 10787 */ 10788 atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu)); 10789 } 10790 10791 #else /* !CONFIG_NO_HZ_COMMON */ 10792 static inline void nohz_balancer_kick(struct rq *rq) { } 10793 10794 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 10795 { 10796 return false; 10797 } 10798 10799 static inline void nohz_newidle_balance(struct rq *this_rq) { } 10800 #endif /* CONFIG_NO_HZ_COMMON */ 10801 10802 /* 10803 * newidle_balance is called by schedule() if this_cpu is about to become 10804 * idle. Attempts to pull tasks from other CPUs. 10805 * 10806 * Returns: 10807 * < 0 - we released the lock and there are !fair tasks present 10808 * 0 - failed, no new tasks 10809 * > 0 - success, new (fair) tasks present 10810 */ 10811 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) 10812 { 10813 unsigned long next_balance = jiffies + HZ; 10814 int this_cpu = this_rq->cpu; 10815 u64 t0, t1, curr_cost = 0; 10816 struct sched_domain *sd; 10817 int pulled_task = 0; 10818 10819 update_misfit_status(NULL, this_rq); 10820 10821 /* 10822 * There is a task waiting to run. No need to search for one. 10823 * Return 0; the task will be enqueued when switching to idle. 10824 */ 10825 if (this_rq->ttwu_pending) 10826 return 0; 10827 10828 /* 10829 * We must set idle_stamp _before_ calling idle_balance(), such that we 10830 * measure the duration of idle_balance() as idle time. 10831 */ 10832 this_rq->idle_stamp = rq_clock(this_rq); 10833 10834 /* 10835 * Do not pull tasks towards !active CPUs... 10836 */ 10837 if (!cpu_active(this_cpu)) 10838 return 0; 10839 10840 /* 10841 * This is OK, because current is on_cpu, which avoids it being picked 10842 * for load-balance and preemption/IRQs are still disabled avoiding 10843 * further scheduler activity on it and we're being very careful to 10844 * re-start the picking loop. 10845 */ 10846 rq_unpin_lock(this_rq, rf); 10847 10848 rcu_read_lock(); 10849 sd = rcu_dereference_check_sched_domain(this_rq->sd); 10850 10851 if (!READ_ONCE(this_rq->rd->overload) || 10852 (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) { 10853 10854 if (sd) 10855 update_next_balance(sd, &next_balance); 10856 rcu_read_unlock(); 10857 10858 goto out; 10859 } 10860 rcu_read_unlock(); 10861 10862 raw_spin_rq_unlock(this_rq); 10863 10864 t0 = sched_clock_cpu(this_cpu); 10865 update_blocked_averages(this_cpu); 10866 10867 rcu_read_lock(); 10868 for_each_domain(this_cpu, sd) { 10869 int continue_balancing = 1; 10870 u64 domain_cost; 10871 10872 update_next_balance(sd, &next_balance); 10873 10874 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) 10875 break; 10876 10877 if (sd->flags & SD_BALANCE_NEWIDLE) { 10878 10879 pulled_task = load_balance(this_cpu, this_rq, 10880 sd, CPU_NEWLY_IDLE, 10881 &continue_balancing); 10882 10883 t1 = sched_clock_cpu(this_cpu); 10884 domain_cost = t1 - t0; 10885 update_newidle_cost(sd, domain_cost); 10886 10887 curr_cost += domain_cost; 10888 t0 = t1; 10889 } 10890 10891 /* 10892 * Stop searching for tasks to pull if there are 10893 * now runnable tasks on this rq. 10894 */ 10895 if (pulled_task || this_rq->nr_running > 0 || 10896 this_rq->ttwu_pending) 10897 break; 10898 } 10899 rcu_read_unlock(); 10900 10901 raw_spin_rq_lock(this_rq); 10902 10903 if (curr_cost > this_rq->max_idle_balance_cost) 10904 this_rq->max_idle_balance_cost = curr_cost; 10905 10906 /* 10907 * While browsing the domains, we released the rq lock, a task could 10908 * have been enqueued in the meantime. Since we're not going idle, 10909 * pretend we pulled a task. 10910 */ 10911 if (this_rq->cfs.h_nr_running && !pulled_task) 10912 pulled_task = 1; 10913 10914 /* Is there a task of a high priority class? */ 10915 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 10916 pulled_task = -1; 10917 10918 out: 10919 /* Move the next balance forward */ 10920 if (time_after(this_rq->next_balance, next_balance)) 10921 this_rq->next_balance = next_balance; 10922 10923 if (pulled_task) 10924 this_rq->idle_stamp = 0; 10925 else 10926 nohz_newidle_balance(this_rq); 10927 10928 rq_repin_lock(this_rq, rf); 10929 10930 return pulled_task; 10931 } 10932 10933 /* 10934 * run_rebalance_domains is triggered when needed from the scheduler tick. 10935 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 10936 */ 10937 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 10938 { 10939 struct rq *this_rq = this_rq(); 10940 enum cpu_idle_type idle = this_rq->idle_balance ? 10941 CPU_IDLE : CPU_NOT_IDLE; 10942 10943 /* 10944 * If this CPU has a pending nohz_balance_kick, then do the 10945 * balancing on behalf of the other idle CPUs whose ticks are 10946 * stopped. Do nohz_idle_balance *before* rebalance_domains to 10947 * give the idle CPUs a chance to load balance. Else we may 10948 * load balance only within the local sched_domain hierarchy 10949 * and abort nohz_idle_balance altogether if we pull some load. 10950 */ 10951 if (nohz_idle_balance(this_rq, idle)) 10952 return; 10953 10954 /* normal load balance */ 10955 update_blocked_averages(this_rq->cpu); 10956 rebalance_domains(this_rq, idle); 10957 } 10958 10959 /* 10960 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 10961 */ 10962 void trigger_load_balance(struct rq *rq) 10963 { 10964 /* 10965 * Don't need to rebalance while attached to NULL domain or 10966 * runqueue CPU is not active 10967 */ 10968 if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq)))) 10969 return; 10970 10971 if (time_after_eq(jiffies, rq->next_balance)) 10972 raise_softirq(SCHED_SOFTIRQ); 10973 10974 nohz_balancer_kick(rq); 10975 } 10976 10977 static void rq_online_fair(struct rq *rq) 10978 { 10979 update_sysctl(); 10980 10981 update_runtime_enabled(rq); 10982 } 10983 10984 static void rq_offline_fair(struct rq *rq) 10985 { 10986 update_sysctl(); 10987 10988 /* Ensure any throttled groups are reachable by pick_next_task */ 10989 unthrottle_offline_cfs_rqs(rq); 10990 } 10991 10992 #endif /* CONFIG_SMP */ 10993 10994 #ifdef CONFIG_SCHED_CORE 10995 static inline bool 10996 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) 10997 { 10998 u64 slice = sched_slice(cfs_rq_of(se), se); 10999 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; 11000 11001 return (rtime * min_nr_tasks > slice); 11002 } 11003 11004 #define MIN_NR_TASKS_DURING_FORCEIDLE 2 11005 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) 11006 { 11007 if (!sched_core_enabled(rq)) 11008 return; 11009 11010 /* 11011 * If runqueue has only one task which used up its slice and 11012 * if the sibling is forced idle, then trigger schedule to 11013 * give forced idle task a chance. 11014 * 11015 * sched_slice() considers only this active rq and it gets the 11016 * whole slice. But during force idle, we have siblings acting 11017 * like a single runqueue and hence we need to consider runnable 11018 * tasks on this CPU and the forced idle CPU. Ideally, we should 11019 * go through the forced idle rq, but that would be a perf hit. 11020 * We can assume that the forced idle CPU has at least 11021 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check 11022 * if we need to give up the CPU. 11023 */ 11024 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && 11025 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) 11026 resched_curr(rq); 11027 } 11028 11029 /* 11030 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed. 11031 */ 11032 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) 11033 { 11034 for_each_sched_entity(se) { 11035 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11036 11037 if (forceidle) { 11038 if (cfs_rq->forceidle_seq == fi_seq) 11039 break; 11040 cfs_rq->forceidle_seq = fi_seq; 11041 } 11042 11043 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; 11044 } 11045 } 11046 11047 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) 11048 { 11049 struct sched_entity *se = &p->se; 11050 11051 if (p->sched_class != &fair_sched_class) 11052 return; 11053 11054 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); 11055 } 11056 11057 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) 11058 { 11059 struct rq *rq = task_rq(a); 11060 struct sched_entity *sea = &a->se; 11061 struct sched_entity *seb = &b->se; 11062 struct cfs_rq *cfs_rqa; 11063 struct cfs_rq *cfs_rqb; 11064 s64 delta; 11065 11066 SCHED_WARN_ON(task_rq(b)->core != rq->core); 11067 11068 #ifdef CONFIG_FAIR_GROUP_SCHED 11069 /* 11070 * Find an se in the hierarchy for tasks a and b, such that the se's 11071 * are immediate siblings. 11072 */ 11073 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { 11074 int sea_depth = sea->depth; 11075 int seb_depth = seb->depth; 11076 11077 if (sea_depth >= seb_depth) 11078 sea = parent_entity(sea); 11079 if (sea_depth <= seb_depth) 11080 seb = parent_entity(seb); 11081 } 11082 11083 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi); 11084 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi); 11085 11086 cfs_rqa = sea->cfs_rq; 11087 cfs_rqb = seb->cfs_rq; 11088 #else 11089 cfs_rqa = &task_rq(a)->cfs; 11090 cfs_rqb = &task_rq(b)->cfs; 11091 #endif 11092 11093 /* 11094 * Find delta after normalizing se's vruntime with its cfs_rq's 11095 * min_vruntime_fi, which would have been updated in prior calls 11096 * to se_fi_update(). 11097 */ 11098 delta = (s64)(sea->vruntime - seb->vruntime) + 11099 (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi); 11100 11101 return delta > 0; 11102 } 11103 #else 11104 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} 11105 #endif 11106 11107 /* 11108 * scheduler tick hitting a task of our scheduling class. 11109 * 11110 * NOTE: This function can be called remotely by the tick offload that 11111 * goes along full dynticks. Therefore no local assumption can be made 11112 * and everything must be accessed through the @rq and @curr passed in 11113 * parameters. 11114 */ 11115 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 11116 { 11117 struct cfs_rq *cfs_rq; 11118 struct sched_entity *se = &curr->se; 11119 11120 for_each_sched_entity(se) { 11121 cfs_rq = cfs_rq_of(se); 11122 entity_tick(cfs_rq, se, queued); 11123 } 11124 11125 if (static_branch_unlikely(&sched_numa_balancing)) 11126 task_tick_numa(rq, curr); 11127 11128 update_misfit_status(curr, rq); 11129 update_overutilized_status(task_rq(curr)); 11130 11131 task_tick_core(rq, curr); 11132 } 11133 11134 /* 11135 * called on fork with the child task as argument from the parent's context 11136 * - child not yet on the tasklist 11137 * - preemption disabled 11138 */ 11139 static void task_fork_fair(struct task_struct *p) 11140 { 11141 struct cfs_rq *cfs_rq; 11142 struct sched_entity *se = &p->se, *curr; 11143 struct rq *rq = this_rq(); 11144 struct rq_flags rf; 11145 11146 rq_lock(rq, &rf); 11147 update_rq_clock(rq); 11148 11149 cfs_rq = task_cfs_rq(current); 11150 curr = cfs_rq->curr; 11151 if (curr) { 11152 update_curr(cfs_rq); 11153 se->vruntime = curr->vruntime; 11154 } 11155 place_entity(cfs_rq, se, 1); 11156 11157 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 11158 /* 11159 * Upon rescheduling, sched_class::put_prev_task() will place 11160 * 'current' within the tree based on its new key value. 11161 */ 11162 swap(curr->vruntime, se->vruntime); 11163 resched_curr(rq); 11164 } 11165 11166 se->vruntime -= cfs_rq->min_vruntime; 11167 rq_unlock(rq, &rf); 11168 } 11169 11170 /* 11171 * Priority of the task has changed. Check to see if we preempt 11172 * the current task. 11173 */ 11174 static void 11175 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 11176 { 11177 if (!task_on_rq_queued(p)) 11178 return; 11179 11180 if (rq->cfs.nr_running == 1) 11181 return; 11182 11183 /* 11184 * Reschedule if we are currently running on this runqueue and 11185 * our priority decreased, or if we are not currently running on 11186 * this runqueue and our priority is higher than the current's 11187 */ 11188 if (task_current(rq, p)) { 11189 if (p->prio > oldprio) 11190 resched_curr(rq); 11191 } else 11192 check_preempt_curr(rq, p, 0); 11193 } 11194 11195 static inline bool vruntime_normalized(struct task_struct *p) 11196 { 11197 struct sched_entity *se = &p->se; 11198 11199 /* 11200 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 11201 * the dequeue_entity(.flags=0) will already have normalized the 11202 * vruntime. 11203 */ 11204 if (p->on_rq) 11205 return true; 11206 11207 /* 11208 * When !on_rq, vruntime of the task has usually NOT been normalized. 11209 * But there are some cases where it has already been normalized: 11210 * 11211 * - A forked child which is waiting for being woken up by 11212 * wake_up_new_task(). 11213 * - A task which has been woken up by try_to_wake_up() and 11214 * waiting for actually being woken up by sched_ttwu_pending(). 11215 */ 11216 if (!se->sum_exec_runtime || 11217 (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup)) 11218 return true; 11219 11220 return false; 11221 } 11222 11223 #ifdef CONFIG_FAIR_GROUP_SCHED 11224 /* 11225 * Propagate the changes of the sched_entity across the tg tree to make it 11226 * visible to the root 11227 */ 11228 static void propagate_entity_cfs_rq(struct sched_entity *se) 11229 { 11230 struct cfs_rq *cfs_rq; 11231 11232 list_add_leaf_cfs_rq(cfs_rq_of(se)); 11233 11234 /* Start to propagate at parent */ 11235 se = se->parent; 11236 11237 for_each_sched_entity(se) { 11238 cfs_rq = cfs_rq_of(se); 11239 11240 if (!cfs_rq_throttled(cfs_rq)){ 11241 update_load_avg(cfs_rq, se, UPDATE_TG); 11242 list_add_leaf_cfs_rq(cfs_rq); 11243 continue; 11244 } 11245 11246 if (list_add_leaf_cfs_rq(cfs_rq)) 11247 break; 11248 } 11249 } 11250 #else 11251 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 11252 #endif 11253 11254 static void detach_entity_cfs_rq(struct sched_entity *se) 11255 { 11256 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11257 11258 /* Catch up with the cfs_rq and remove our load when we leave */ 11259 update_load_avg(cfs_rq, se, 0); 11260 detach_entity_load_avg(cfs_rq, se); 11261 update_tg_load_avg(cfs_rq); 11262 propagate_entity_cfs_rq(se); 11263 } 11264 11265 static void attach_entity_cfs_rq(struct sched_entity *se) 11266 { 11267 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11268 11269 #ifdef CONFIG_FAIR_GROUP_SCHED 11270 /* 11271 * Since the real-depth could have been changed (only FAIR 11272 * class maintain depth value), reset depth properly. 11273 */ 11274 se->depth = se->parent ? se->parent->depth + 1 : 0; 11275 #endif 11276 11277 /* Synchronize entity with its cfs_rq */ 11278 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 11279 attach_entity_load_avg(cfs_rq, se); 11280 update_tg_load_avg(cfs_rq); 11281 propagate_entity_cfs_rq(se); 11282 } 11283 11284 static void detach_task_cfs_rq(struct task_struct *p) 11285 { 11286 struct sched_entity *se = &p->se; 11287 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11288 11289 if (!vruntime_normalized(p)) { 11290 /* 11291 * Fix up our vruntime so that the current sleep doesn't 11292 * cause 'unlimited' sleep bonus. 11293 */ 11294 place_entity(cfs_rq, se, 0); 11295 se->vruntime -= cfs_rq->min_vruntime; 11296 } 11297 11298 detach_entity_cfs_rq(se); 11299 } 11300 11301 static void attach_task_cfs_rq(struct task_struct *p) 11302 { 11303 struct sched_entity *se = &p->se; 11304 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11305 11306 attach_entity_cfs_rq(se); 11307 11308 if (!vruntime_normalized(p)) 11309 se->vruntime += cfs_rq->min_vruntime; 11310 } 11311 11312 static void switched_from_fair(struct rq *rq, struct task_struct *p) 11313 { 11314 detach_task_cfs_rq(p); 11315 } 11316 11317 static void switched_to_fair(struct rq *rq, struct task_struct *p) 11318 { 11319 attach_task_cfs_rq(p); 11320 11321 if (task_on_rq_queued(p)) { 11322 /* 11323 * We were most likely switched from sched_rt, so 11324 * kick off the schedule if running, otherwise just see 11325 * if we can still preempt the current task. 11326 */ 11327 if (task_current(rq, p)) 11328 resched_curr(rq); 11329 else 11330 check_preempt_curr(rq, p, 0); 11331 } 11332 } 11333 11334 /* Account for a task changing its policy or group. 11335 * 11336 * This routine is mostly called to set cfs_rq->curr field when a task 11337 * migrates between groups/classes. 11338 */ 11339 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) 11340 { 11341 struct sched_entity *se = &p->se; 11342 11343 #ifdef CONFIG_SMP 11344 if (task_on_rq_queued(p)) { 11345 /* 11346 * Move the next running task to the front of the list, so our 11347 * cfs_tasks list becomes MRU one. 11348 */ 11349 list_move(&se->group_node, &rq->cfs_tasks); 11350 } 11351 #endif 11352 11353 for_each_sched_entity(se) { 11354 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11355 11356 set_next_entity(cfs_rq, se); 11357 /* ensure bandwidth has been allocated on our new cfs_rq */ 11358 account_cfs_rq_runtime(cfs_rq, 0); 11359 } 11360 } 11361 11362 void init_cfs_rq(struct cfs_rq *cfs_rq) 11363 { 11364 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 11365 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 11366 #ifndef CONFIG_64BIT 11367 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 11368 #endif 11369 #ifdef CONFIG_SMP 11370 raw_spin_lock_init(&cfs_rq->removed.lock); 11371 #endif 11372 } 11373 11374 #ifdef CONFIG_FAIR_GROUP_SCHED 11375 static void task_set_group_fair(struct task_struct *p) 11376 { 11377 struct sched_entity *se = &p->se; 11378 11379 set_task_rq(p, task_cpu(p)); 11380 se->depth = se->parent ? se->parent->depth + 1 : 0; 11381 } 11382 11383 static void task_move_group_fair(struct task_struct *p) 11384 { 11385 detach_task_cfs_rq(p); 11386 set_task_rq(p, task_cpu(p)); 11387 11388 #ifdef CONFIG_SMP 11389 /* Tell se's cfs_rq has been changed -- migrated */ 11390 p->se.avg.last_update_time = 0; 11391 #endif 11392 attach_task_cfs_rq(p); 11393 } 11394 11395 static void task_change_group_fair(struct task_struct *p, int type) 11396 { 11397 switch (type) { 11398 case TASK_SET_GROUP: 11399 task_set_group_fair(p); 11400 break; 11401 11402 case TASK_MOVE_GROUP: 11403 task_move_group_fair(p); 11404 break; 11405 } 11406 } 11407 11408 void free_fair_sched_group(struct task_group *tg) 11409 { 11410 int i; 11411 11412 for_each_possible_cpu(i) { 11413 if (tg->cfs_rq) 11414 kfree(tg->cfs_rq[i]); 11415 if (tg->se) 11416 kfree(tg->se[i]); 11417 } 11418 11419 kfree(tg->cfs_rq); 11420 kfree(tg->se); 11421 } 11422 11423 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 11424 { 11425 struct sched_entity *se; 11426 struct cfs_rq *cfs_rq; 11427 int i; 11428 11429 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); 11430 if (!tg->cfs_rq) 11431 goto err; 11432 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); 11433 if (!tg->se) 11434 goto err; 11435 11436 tg->shares = NICE_0_LOAD; 11437 11438 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 11439 11440 for_each_possible_cpu(i) { 11441 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 11442 GFP_KERNEL, cpu_to_node(i)); 11443 if (!cfs_rq) 11444 goto err; 11445 11446 se = kzalloc_node(sizeof(struct sched_entity_stats), 11447 GFP_KERNEL, cpu_to_node(i)); 11448 if (!se) 11449 goto err_free_rq; 11450 11451 init_cfs_rq(cfs_rq); 11452 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 11453 init_entity_runnable_average(se); 11454 } 11455 11456 return 1; 11457 11458 err_free_rq: 11459 kfree(cfs_rq); 11460 err: 11461 return 0; 11462 } 11463 11464 void online_fair_sched_group(struct task_group *tg) 11465 { 11466 struct sched_entity *se; 11467 struct rq_flags rf; 11468 struct rq *rq; 11469 int i; 11470 11471 for_each_possible_cpu(i) { 11472 rq = cpu_rq(i); 11473 se = tg->se[i]; 11474 rq_lock_irq(rq, &rf); 11475 update_rq_clock(rq); 11476 attach_entity_cfs_rq(se); 11477 sync_throttle(tg, i); 11478 rq_unlock_irq(rq, &rf); 11479 } 11480 } 11481 11482 void unregister_fair_sched_group(struct task_group *tg) 11483 { 11484 unsigned long flags; 11485 struct rq *rq; 11486 int cpu; 11487 11488 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 11489 11490 for_each_possible_cpu(cpu) { 11491 if (tg->se[cpu]) 11492 remove_entity_load_avg(tg->se[cpu]); 11493 11494 /* 11495 * Only empty task groups can be destroyed; so we can speculatively 11496 * check on_list without danger of it being re-added. 11497 */ 11498 if (!tg->cfs_rq[cpu]->on_list) 11499 continue; 11500 11501 rq = cpu_rq(cpu); 11502 11503 raw_spin_rq_lock_irqsave(rq, flags); 11504 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 11505 raw_spin_rq_unlock_irqrestore(rq, flags); 11506 } 11507 } 11508 11509 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 11510 struct sched_entity *se, int cpu, 11511 struct sched_entity *parent) 11512 { 11513 struct rq *rq = cpu_rq(cpu); 11514 11515 cfs_rq->tg = tg; 11516 cfs_rq->rq = rq; 11517 init_cfs_rq_runtime(cfs_rq); 11518 11519 tg->cfs_rq[cpu] = cfs_rq; 11520 tg->se[cpu] = se; 11521 11522 /* se could be NULL for root_task_group */ 11523 if (!se) 11524 return; 11525 11526 if (!parent) { 11527 se->cfs_rq = &rq->cfs; 11528 se->depth = 0; 11529 } else { 11530 se->cfs_rq = parent->my_q; 11531 se->depth = parent->depth + 1; 11532 } 11533 11534 se->my_q = cfs_rq; 11535 /* guarantee group entities always have weight */ 11536 update_load_set(&se->load, NICE_0_LOAD); 11537 se->parent = parent; 11538 } 11539 11540 static DEFINE_MUTEX(shares_mutex); 11541 11542 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares) 11543 { 11544 int i; 11545 11546 lockdep_assert_held(&shares_mutex); 11547 11548 /* 11549 * We can't change the weight of the root cgroup. 11550 */ 11551 if (!tg->se[0]) 11552 return -EINVAL; 11553 11554 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 11555 11556 if (tg->shares == shares) 11557 return 0; 11558 11559 tg->shares = shares; 11560 for_each_possible_cpu(i) { 11561 struct rq *rq = cpu_rq(i); 11562 struct sched_entity *se = tg->se[i]; 11563 struct rq_flags rf; 11564 11565 /* Propagate contribution to hierarchy */ 11566 rq_lock_irqsave(rq, &rf); 11567 update_rq_clock(rq); 11568 for_each_sched_entity(se) { 11569 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 11570 update_cfs_group(se); 11571 } 11572 rq_unlock_irqrestore(rq, &rf); 11573 } 11574 11575 return 0; 11576 } 11577 11578 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 11579 { 11580 int ret; 11581 11582 mutex_lock(&shares_mutex); 11583 if (tg_is_idle(tg)) 11584 ret = -EINVAL; 11585 else 11586 ret = __sched_group_set_shares(tg, shares); 11587 mutex_unlock(&shares_mutex); 11588 11589 return ret; 11590 } 11591 11592 int sched_group_set_idle(struct task_group *tg, long idle) 11593 { 11594 int i; 11595 11596 if (tg == &root_task_group) 11597 return -EINVAL; 11598 11599 if (idle < 0 || idle > 1) 11600 return -EINVAL; 11601 11602 mutex_lock(&shares_mutex); 11603 11604 if (tg->idle == idle) { 11605 mutex_unlock(&shares_mutex); 11606 return 0; 11607 } 11608 11609 tg->idle = idle; 11610 11611 for_each_possible_cpu(i) { 11612 struct rq *rq = cpu_rq(i); 11613 struct sched_entity *se = tg->se[i]; 11614 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; 11615 bool was_idle = cfs_rq_is_idle(grp_cfs_rq); 11616 long idle_task_delta; 11617 struct rq_flags rf; 11618 11619 rq_lock_irqsave(rq, &rf); 11620 11621 grp_cfs_rq->idle = idle; 11622 if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq))) 11623 goto next_cpu; 11624 11625 if (se->on_rq) { 11626 parent_cfs_rq = cfs_rq_of(se); 11627 if (cfs_rq_is_idle(grp_cfs_rq)) 11628 parent_cfs_rq->idle_nr_running++; 11629 else 11630 parent_cfs_rq->idle_nr_running--; 11631 } 11632 11633 idle_task_delta = grp_cfs_rq->h_nr_running - 11634 grp_cfs_rq->idle_h_nr_running; 11635 if (!cfs_rq_is_idle(grp_cfs_rq)) 11636 idle_task_delta *= -1; 11637 11638 for_each_sched_entity(se) { 11639 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11640 11641 if (!se->on_rq) 11642 break; 11643 11644 cfs_rq->idle_h_nr_running += idle_task_delta; 11645 11646 /* Already accounted at parent level and above. */ 11647 if (cfs_rq_is_idle(cfs_rq)) 11648 break; 11649 } 11650 11651 next_cpu: 11652 rq_unlock_irqrestore(rq, &rf); 11653 } 11654 11655 /* Idle groups have minimum weight. */ 11656 if (tg_is_idle(tg)) 11657 __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO)); 11658 else 11659 __sched_group_set_shares(tg, NICE_0_LOAD); 11660 11661 mutex_unlock(&shares_mutex); 11662 return 0; 11663 } 11664 11665 #else /* CONFIG_FAIR_GROUP_SCHED */ 11666 11667 void free_fair_sched_group(struct task_group *tg) { } 11668 11669 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 11670 { 11671 return 1; 11672 } 11673 11674 void online_fair_sched_group(struct task_group *tg) { } 11675 11676 void unregister_fair_sched_group(struct task_group *tg) { } 11677 11678 #endif /* CONFIG_FAIR_GROUP_SCHED */ 11679 11680 11681 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 11682 { 11683 struct sched_entity *se = &task->se; 11684 unsigned int rr_interval = 0; 11685 11686 /* 11687 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 11688 * idle runqueue: 11689 */ 11690 if (rq->cfs.load.weight) 11691 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 11692 11693 return rr_interval; 11694 } 11695 11696 /* 11697 * All the scheduling class methods: 11698 */ 11699 DEFINE_SCHED_CLASS(fair) = { 11700 11701 .enqueue_task = enqueue_task_fair, 11702 .dequeue_task = dequeue_task_fair, 11703 .yield_task = yield_task_fair, 11704 .yield_to_task = yield_to_task_fair, 11705 11706 .check_preempt_curr = check_preempt_wakeup, 11707 11708 .pick_next_task = __pick_next_task_fair, 11709 .put_prev_task = put_prev_task_fair, 11710 .set_next_task = set_next_task_fair, 11711 11712 #ifdef CONFIG_SMP 11713 .balance = balance_fair, 11714 .pick_task = pick_task_fair, 11715 .select_task_rq = select_task_rq_fair, 11716 .migrate_task_rq = migrate_task_rq_fair, 11717 11718 .rq_online = rq_online_fair, 11719 .rq_offline = rq_offline_fair, 11720 11721 .task_dead = task_dead_fair, 11722 .set_cpus_allowed = set_cpus_allowed_common, 11723 #endif 11724 11725 .task_tick = task_tick_fair, 11726 .task_fork = task_fork_fair, 11727 11728 .prio_changed = prio_changed_fair, 11729 .switched_from = switched_from_fair, 11730 .switched_to = switched_to_fair, 11731 11732 .get_rr_interval = get_rr_interval_fair, 11733 11734 .update_curr = update_curr_fair, 11735 11736 #ifdef CONFIG_FAIR_GROUP_SCHED 11737 .task_change_group = task_change_group_fair, 11738 #endif 11739 11740 #ifdef CONFIG_UCLAMP_TASK 11741 .uclamp_enabled = 1, 11742 #endif 11743 }; 11744 11745 #ifdef CONFIG_SCHED_DEBUG 11746 void print_cfs_stats(struct seq_file *m, int cpu) 11747 { 11748 struct cfs_rq *cfs_rq, *pos; 11749 11750 rcu_read_lock(); 11751 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) 11752 print_cfs_rq(m, cpu, cfs_rq); 11753 rcu_read_unlock(); 11754 } 11755 11756 #ifdef CONFIG_NUMA_BALANCING 11757 void show_numa_stats(struct task_struct *p, struct seq_file *m) 11758 { 11759 int node; 11760 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 11761 struct numa_group *ng; 11762 11763 rcu_read_lock(); 11764 ng = rcu_dereference(p->numa_group); 11765 for_each_online_node(node) { 11766 if (p->numa_faults) { 11767 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 11768 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 11769 } 11770 if (ng) { 11771 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], 11772 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 11773 } 11774 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 11775 } 11776 rcu_read_unlock(); 11777 } 11778 #endif /* CONFIG_NUMA_BALANCING */ 11779 #endif /* CONFIG_SCHED_DEBUG */ 11780 11781 __init void init_sched_fair_class(void) 11782 { 11783 #ifdef CONFIG_SMP 11784 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 11785 11786 #ifdef CONFIG_NO_HZ_COMMON 11787 nohz.next_balance = jiffies; 11788 nohz.next_blocked = jiffies; 11789 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 11790 #endif 11791 #endif /* SMP */ 11792 11793 } 11794 11795 /* 11796 * Helper functions to facilitate extracting info from tracepoints. 11797 */ 11798 11799 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) 11800 { 11801 #ifdef CONFIG_SMP 11802 return cfs_rq ? &cfs_rq->avg : NULL; 11803 #else 11804 return NULL; 11805 #endif 11806 } 11807 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg); 11808 11809 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) 11810 { 11811 if (!cfs_rq) { 11812 if (str) 11813 strlcpy(str, "(null)", len); 11814 else 11815 return NULL; 11816 } 11817 11818 cfs_rq_tg_path(cfs_rq, str, len); 11819 return str; 11820 } 11821 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path); 11822 11823 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) 11824 { 11825 return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; 11826 } 11827 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu); 11828 11829 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq) 11830 { 11831 #ifdef CONFIG_SMP 11832 return rq ? &rq->avg_rt : NULL; 11833 #else 11834 return NULL; 11835 #endif 11836 } 11837 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt); 11838 11839 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq) 11840 { 11841 #ifdef CONFIG_SMP 11842 return rq ? &rq->avg_dl : NULL; 11843 #else 11844 return NULL; 11845 #endif 11846 } 11847 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl); 11848 11849 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq) 11850 { 11851 #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ) 11852 return rq ? &rq->avg_irq : NULL; 11853 #else 11854 return NULL; 11855 #endif 11856 } 11857 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq); 11858 11859 int sched_trace_rq_cpu(struct rq *rq) 11860 { 11861 return rq ? cpu_of(rq) : -1; 11862 } 11863 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu); 11864 11865 int sched_trace_rq_cpu_capacity(struct rq *rq) 11866 { 11867 return rq ? 11868 #ifdef CONFIG_SMP 11869 rq->cpu_capacity 11870 #else 11871 SCHED_CAPACITY_SCALE 11872 #endif 11873 : -1; 11874 } 11875 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity); 11876 11877 const struct cpumask *sched_trace_rd_span(struct root_domain *rd) 11878 { 11879 #ifdef CONFIG_SMP 11880 return rd ? rd->span : NULL; 11881 #else 11882 return NULL; 11883 #endif 11884 } 11885 EXPORT_SYMBOL_GPL(sched_trace_rd_span); 11886 11887 int sched_trace_rq_nr_running(struct rq *rq) 11888 { 11889 return rq ? rq->nr_running : -1; 11890 } 11891 EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running); 11892