1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 */ 23 #include <linux/energy_model.h> 24 #include <linux/mmap_lock.h> 25 #include <linux/hugetlb_inline.h> 26 #include <linux/jiffies.h> 27 #include <linux/mm_api.h> 28 #include <linux/highmem.h> 29 #include <linux/spinlock_api.h> 30 #include <linux/cpumask_api.h> 31 #include <linux/lockdep_api.h> 32 #include <linux/softirq.h> 33 #include <linux/refcount_api.h> 34 #include <linux/topology.h> 35 #include <linux/sched/clock.h> 36 #include <linux/sched/cond_resched.h> 37 #include <linux/sched/cputime.h> 38 #include <linux/sched/isolation.h> 39 40 #include <linux/cpuidle.h> 41 #include <linux/interrupt.h> 42 #include <linux/mempolicy.h> 43 #include <linux/mutex_api.h> 44 #include <linux/profile.h> 45 #include <linux/psi.h> 46 #include <linux/ratelimit.h> 47 48 #include <asm/switch_to.h> 49 50 #include <linux/sched/cond_resched.h> 51 52 #include "sched.h" 53 #include "stats.h" 54 #include "autogroup.h" 55 56 /* 57 * Targeted preemption latency for CPU-bound tasks: 58 * 59 * NOTE: this latency value is not the same as the concept of 60 * 'timeslice length' - timeslices in CFS are of variable length 61 * and have no persistent notion like in traditional, time-slice 62 * based scheduling concepts. 63 * 64 * (to see the precise effective timeslice length of your workload, 65 * run vmstat and monitor the context-switches (cs) field) 66 * 67 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 68 */ 69 unsigned int sysctl_sched_latency = 6000000ULL; 70 static unsigned int normalized_sysctl_sched_latency = 6000000ULL; 71 72 /* 73 * The initial- and re-scaling of tunables is configurable 74 * 75 * Options are: 76 * 77 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 78 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 79 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 80 * 81 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 82 */ 83 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 84 85 /* 86 * Minimal preemption granularity for CPU-bound tasks: 87 * 88 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 89 */ 90 unsigned int sysctl_sched_min_granularity = 750000ULL; 91 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 92 93 /* 94 * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks. 95 * Applies only when SCHED_IDLE tasks compete with normal tasks. 96 * 97 * (default: 0.75 msec) 98 */ 99 unsigned int sysctl_sched_idle_min_granularity = 750000ULL; 100 101 /* 102 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 103 */ 104 static unsigned int sched_nr_latency = 8; 105 106 /* 107 * After fork, child runs first. If set to 0 (default) then 108 * parent will (try to) run first. 109 */ 110 unsigned int sysctl_sched_child_runs_first __read_mostly; 111 112 /* 113 * SCHED_OTHER wake-up granularity. 114 * 115 * This option delays the preemption effects of decoupled workloads 116 * and reduces their over-scheduling. Synchronous workloads will still 117 * have immediate wakeup/sleep latencies. 118 * 119 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 120 */ 121 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 122 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 123 124 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 125 126 int sched_thermal_decay_shift; 127 static int __init setup_sched_thermal_decay_shift(char *str) 128 { 129 int _shift = 0; 130 131 if (kstrtoint(str, 0, &_shift)) 132 pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n"); 133 134 sched_thermal_decay_shift = clamp(_shift, 0, 10); 135 return 1; 136 } 137 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); 138 139 #ifdef CONFIG_SMP 140 /* 141 * For asym packing, by default the lower numbered CPU has higher priority. 142 */ 143 int __weak arch_asym_cpu_priority(int cpu) 144 { 145 return -cpu; 146 } 147 148 /* 149 * The margin used when comparing utilization with CPU capacity. 150 * 151 * (default: ~20%) 152 */ 153 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) 154 155 /* 156 * The margin used when comparing CPU capacities. 157 * is 'cap1' noticeably greater than 'cap2' 158 * 159 * (default: ~5%) 160 */ 161 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078) 162 #endif 163 164 #ifdef CONFIG_CFS_BANDWIDTH 165 /* 166 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 167 * each time a cfs_rq requests quota. 168 * 169 * Note: in the case that the slice exceeds the runtime remaining (either due 170 * to consumption or the quota being specified to be smaller than the slice) 171 * we will always only issue the remaining available time. 172 * 173 * (default: 5 msec, units: microseconds) 174 */ 175 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 176 #endif 177 178 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 179 { 180 lw->weight += inc; 181 lw->inv_weight = 0; 182 } 183 184 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 185 { 186 lw->weight -= dec; 187 lw->inv_weight = 0; 188 } 189 190 static inline void update_load_set(struct load_weight *lw, unsigned long w) 191 { 192 lw->weight = w; 193 lw->inv_weight = 0; 194 } 195 196 /* 197 * Increase the granularity value when there are more CPUs, 198 * because with more CPUs the 'effective latency' as visible 199 * to users decreases. But the relationship is not linear, 200 * so pick a second-best guess by going with the log2 of the 201 * number of CPUs. 202 * 203 * This idea comes from the SD scheduler of Con Kolivas: 204 */ 205 static unsigned int get_update_sysctl_factor(void) 206 { 207 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 208 unsigned int factor; 209 210 switch (sysctl_sched_tunable_scaling) { 211 case SCHED_TUNABLESCALING_NONE: 212 factor = 1; 213 break; 214 case SCHED_TUNABLESCALING_LINEAR: 215 factor = cpus; 216 break; 217 case SCHED_TUNABLESCALING_LOG: 218 default: 219 factor = 1 + ilog2(cpus); 220 break; 221 } 222 223 return factor; 224 } 225 226 static void update_sysctl(void) 227 { 228 unsigned int factor = get_update_sysctl_factor(); 229 230 #define SET_SYSCTL(name) \ 231 (sysctl_##name = (factor) * normalized_sysctl_##name) 232 SET_SYSCTL(sched_min_granularity); 233 SET_SYSCTL(sched_latency); 234 SET_SYSCTL(sched_wakeup_granularity); 235 #undef SET_SYSCTL 236 } 237 238 void __init sched_init_granularity(void) 239 { 240 update_sysctl(); 241 } 242 243 #define WMULT_CONST (~0U) 244 #define WMULT_SHIFT 32 245 246 static void __update_inv_weight(struct load_weight *lw) 247 { 248 unsigned long w; 249 250 if (likely(lw->inv_weight)) 251 return; 252 253 w = scale_load_down(lw->weight); 254 255 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 256 lw->inv_weight = 1; 257 else if (unlikely(!w)) 258 lw->inv_weight = WMULT_CONST; 259 else 260 lw->inv_weight = WMULT_CONST / w; 261 } 262 263 /* 264 * delta_exec * weight / lw.weight 265 * OR 266 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 267 * 268 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 269 * we're guaranteed shift stays positive because inv_weight is guaranteed to 270 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 271 * 272 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 273 * weight/lw.weight <= 1, and therefore our shift will also be positive. 274 */ 275 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 276 { 277 u64 fact = scale_load_down(weight); 278 u32 fact_hi = (u32)(fact >> 32); 279 int shift = WMULT_SHIFT; 280 int fs; 281 282 __update_inv_weight(lw); 283 284 if (unlikely(fact_hi)) { 285 fs = fls(fact_hi); 286 shift -= fs; 287 fact >>= fs; 288 } 289 290 fact = mul_u32_u32(fact, lw->inv_weight); 291 292 fact_hi = (u32)(fact >> 32); 293 if (fact_hi) { 294 fs = fls(fact_hi); 295 shift -= fs; 296 fact >>= fs; 297 } 298 299 return mul_u64_u32_shr(delta_exec, fact, shift); 300 } 301 302 303 const struct sched_class fair_sched_class; 304 305 /************************************************************** 306 * CFS operations on generic schedulable entities: 307 */ 308 309 #ifdef CONFIG_FAIR_GROUP_SCHED 310 311 /* Walk up scheduling entities hierarchy */ 312 #define for_each_sched_entity(se) \ 313 for (; se; se = se->parent) 314 315 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) 316 { 317 if (!path) 318 return; 319 320 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) 321 autogroup_path(cfs_rq->tg, path, len); 322 else if (cfs_rq && cfs_rq->tg->css.cgroup) 323 cgroup_path(cfs_rq->tg->css.cgroup, path, len); 324 else 325 strlcpy(path, "(null)", len); 326 } 327 328 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 329 { 330 struct rq *rq = rq_of(cfs_rq); 331 int cpu = cpu_of(rq); 332 333 if (cfs_rq->on_list) 334 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; 335 336 cfs_rq->on_list = 1; 337 338 /* 339 * Ensure we either appear before our parent (if already 340 * enqueued) or force our parent to appear after us when it is 341 * enqueued. The fact that we always enqueue bottom-up 342 * reduces this to two cases and a special case for the root 343 * cfs_rq. Furthermore, it also means that we will always reset 344 * tmp_alone_branch either when the branch is connected 345 * to a tree or when we reach the top of the tree 346 */ 347 if (cfs_rq->tg->parent && 348 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 349 /* 350 * If parent is already on the list, we add the child 351 * just before. Thanks to circular linked property of 352 * the list, this means to put the child at the tail 353 * of the list that starts by parent. 354 */ 355 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 356 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 357 /* 358 * The branch is now connected to its tree so we can 359 * reset tmp_alone_branch to the beginning of the 360 * list. 361 */ 362 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 363 return true; 364 } 365 366 if (!cfs_rq->tg->parent) { 367 /* 368 * cfs rq without parent should be put 369 * at the tail of the list. 370 */ 371 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 372 &rq->leaf_cfs_rq_list); 373 /* 374 * We have reach the top of a tree so we can reset 375 * tmp_alone_branch to the beginning of the list. 376 */ 377 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 378 return true; 379 } 380 381 /* 382 * The parent has not already been added so we want to 383 * make sure that it will be put after us. 384 * tmp_alone_branch points to the begin of the branch 385 * where we will add parent. 386 */ 387 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); 388 /* 389 * update tmp_alone_branch to points to the new begin 390 * of the branch 391 */ 392 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 393 return false; 394 } 395 396 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 397 { 398 if (cfs_rq->on_list) { 399 struct rq *rq = rq_of(cfs_rq); 400 401 /* 402 * With cfs_rq being unthrottled/throttled during an enqueue, 403 * it can happen the tmp_alone_branch points the a leaf that 404 * we finally want to del. In this case, tmp_alone_branch moves 405 * to the prev element but it will point to rq->leaf_cfs_rq_list 406 * at the end of the enqueue. 407 */ 408 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) 409 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; 410 411 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 412 cfs_rq->on_list = 0; 413 } 414 } 415 416 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 417 { 418 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); 419 } 420 421 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 422 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 423 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ 424 leaf_cfs_rq_list) 425 426 /* Do the two (enqueued) entities belong to the same group ? */ 427 static inline struct cfs_rq * 428 is_same_group(struct sched_entity *se, struct sched_entity *pse) 429 { 430 if (se->cfs_rq == pse->cfs_rq) 431 return se->cfs_rq; 432 433 return NULL; 434 } 435 436 static inline struct sched_entity *parent_entity(struct sched_entity *se) 437 { 438 return se->parent; 439 } 440 441 static void 442 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 443 { 444 int se_depth, pse_depth; 445 446 /* 447 * preemption test can be made between sibling entities who are in the 448 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 449 * both tasks until we find their ancestors who are siblings of common 450 * parent. 451 */ 452 453 /* First walk up until both entities are at same depth */ 454 se_depth = (*se)->depth; 455 pse_depth = (*pse)->depth; 456 457 while (se_depth > pse_depth) { 458 se_depth--; 459 *se = parent_entity(*se); 460 } 461 462 while (pse_depth > se_depth) { 463 pse_depth--; 464 *pse = parent_entity(*pse); 465 } 466 467 while (!is_same_group(*se, *pse)) { 468 *se = parent_entity(*se); 469 *pse = parent_entity(*pse); 470 } 471 } 472 473 static int tg_is_idle(struct task_group *tg) 474 { 475 return tg->idle > 0; 476 } 477 478 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) 479 { 480 return cfs_rq->idle > 0; 481 } 482 483 static int se_is_idle(struct sched_entity *se) 484 { 485 if (entity_is_task(se)) 486 return task_has_idle_policy(task_of(se)); 487 return cfs_rq_is_idle(group_cfs_rq(se)); 488 } 489 490 #else /* !CONFIG_FAIR_GROUP_SCHED */ 491 492 #define for_each_sched_entity(se) \ 493 for (; se; se = NULL) 494 495 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) 496 { 497 if (path) 498 strlcpy(path, "(null)", len); 499 } 500 501 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 502 { 503 return true; 504 } 505 506 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 507 { 508 } 509 510 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 511 { 512 } 513 514 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 515 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 516 517 static inline struct sched_entity *parent_entity(struct sched_entity *se) 518 { 519 return NULL; 520 } 521 522 static inline void 523 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 524 { 525 } 526 527 static inline int tg_is_idle(struct task_group *tg) 528 { 529 return 0; 530 } 531 532 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) 533 { 534 return 0; 535 } 536 537 static int se_is_idle(struct sched_entity *se) 538 { 539 return 0; 540 } 541 542 #endif /* CONFIG_FAIR_GROUP_SCHED */ 543 544 static __always_inline 545 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 546 547 /************************************************************** 548 * Scheduling class tree data structure manipulation methods: 549 */ 550 551 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 552 { 553 s64 delta = (s64)(vruntime - max_vruntime); 554 if (delta > 0) 555 max_vruntime = vruntime; 556 557 return max_vruntime; 558 } 559 560 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 561 { 562 s64 delta = (s64)(vruntime - min_vruntime); 563 if (delta < 0) 564 min_vruntime = vruntime; 565 566 return min_vruntime; 567 } 568 569 static inline bool entity_before(struct sched_entity *a, 570 struct sched_entity *b) 571 { 572 return (s64)(a->vruntime - b->vruntime) < 0; 573 } 574 575 #define __node_2_se(node) \ 576 rb_entry((node), struct sched_entity, run_node) 577 578 static void update_min_vruntime(struct cfs_rq *cfs_rq) 579 { 580 struct sched_entity *curr = cfs_rq->curr; 581 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); 582 583 u64 vruntime = cfs_rq->min_vruntime; 584 585 if (curr) { 586 if (curr->on_rq) 587 vruntime = curr->vruntime; 588 else 589 curr = NULL; 590 } 591 592 if (leftmost) { /* non-empty tree */ 593 struct sched_entity *se = __node_2_se(leftmost); 594 595 if (!curr) 596 vruntime = se->vruntime; 597 else 598 vruntime = min_vruntime(vruntime, se->vruntime); 599 } 600 601 /* ensure we never gain time by being placed backwards. */ 602 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 603 #ifndef CONFIG_64BIT 604 smp_wmb(); 605 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 606 #endif 607 } 608 609 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) 610 { 611 return entity_before(__node_2_se(a), __node_2_se(b)); 612 } 613 614 /* 615 * Enqueue an entity into the rb-tree: 616 */ 617 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 618 { 619 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less); 620 } 621 622 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 623 { 624 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); 625 } 626 627 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 628 { 629 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); 630 631 if (!left) 632 return NULL; 633 634 return __node_2_se(left); 635 } 636 637 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 638 { 639 struct rb_node *next = rb_next(&se->run_node); 640 641 if (!next) 642 return NULL; 643 644 return __node_2_se(next); 645 } 646 647 #ifdef CONFIG_SCHED_DEBUG 648 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 649 { 650 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); 651 652 if (!last) 653 return NULL; 654 655 return __node_2_se(last); 656 } 657 658 /************************************************************** 659 * Scheduling class statistics methods: 660 */ 661 662 int sched_update_scaling(void) 663 { 664 unsigned int factor = get_update_sysctl_factor(); 665 666 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 667 sysctl_sched_min_granularity); 668 669 #define WRT_SYSCTL(name) \ 670 (normalized_sysctl_##name = sysctl_##name / (factor)) 671 WRT_SYSCTL(sched_min_granularity); 672 WRT_SYSCTL(sched_latency); 673 WRT_SYSCTL(sched_wakeup_granularity); 674 #undef WRT_SYSCTL 675 676 return 0; 677 } 678 #endif 679 680 /* 681 * delta /= w 682 */ 683 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 684 { 685 if (unlikely(se->load.weight != NICE_0_LOAD)) 686 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 687 688 return delta; 689 } 690 691 /* 692 * The idea is to set a period in which each task runs once. 693 * 694 * When there are too many tasks (sched_nr_latency) we have to stretch 695 * this period because otherwise the slices get too small. 696 * 697 * p = (nr <= nl) ? l : l*nr/nl 698 */ 699 static u64 __sched_period(unsigned long nr_running) 700 { 701 if (unlikely(nr_running > sched_nr_latency)) 702 return nr_running * sysctl_sched_min_granularity; 703 else 704 return sysctl_sched_latency; 705 } 706 707 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq); 708 709 /* 710 * We calculate the wall-time slice from the period by taking a part 711 * proportional to the weight. 712 * 713 * s = p*P[w/rw] 714 */ 715 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 716 { 717 unsigned int nr_running = cfs_rq->nr_running; 718 struct sched_entity *init_se = se; 719 unsigned int min_gran; 720 u64 slice; 721 722 if (sched_feat(ALT_PERIOD)) 723 nr_running = rq_of(cfs_rq)->cfs.h_nr_running; 724 725 slice = __sched_period(nr_running + !se->on_rq); 726 727 for_each_sched_entity(se) { 728 struct load_weight *load; 729 struct load_weight lw; 730 struct cfs_rq *qcfs_rq; 731 732 qcfs_rq = cfs_rq_of(se); 733 load = &qcfs_rq->load; 734 735 if (unlikely(!se->on_rq)) { 736 lw = qcfs_rq->load; 737 738 update_load_add(&lw, se->load.weight); 739 load = &lw; 740 } 741 slice = __calc_delta(slice, se->load.weight, load); 742 } 743 744 if (sched_feat(BASE_SLICE)) { 745 if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq)) 746 min_gran = sysctl_sched_idle_min_granularity; 747 else 748 min_gran = sysctl_sched_min_granularity; 749 750 slice = max_t(u64, slice, min_gran); 751 } 752 753 return slice; 754 } 755 756 /* 757 * We calculate the vruntime slice of a to-be-inserted task. 758 * 759 * vs = s/w 760 */ 761 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 762 { 763 return calc_delta_fair(sched_slice(cfs_rq, se), se); 764 } 765 766 #include "pelt.h" 767 #ifdef CONFIG_SMP 768 769 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 770 static unsigned long task_h_load(struct task_struct *p); 771 static unsigned long capacity_of(int cpu); 772 773 /* Give new sched_entity start runnable values to heavy its load in infant time */ 774 void init_entity_runnable_average(struct sched_entity *se) 775 { 776 struct sched_avg *sa = &se->avg; 777 778 memset(sa, 0, sizeof(*sa)); 779 780 /* 781 * Tasks are initialized with full load to be seen as heavy tasks until 782 * they get a chance to stabilize to their real load level. 783 * Group entities are initialized with zero load to reflect the fact that 784 * nothing has been attached to the task group yet. 785 */ 786 if (entity_is_task(se)) 787 sa->load_avg = scale_load_down(se->load.weight); 788 789 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 790 } 791 792 static void attach_entity_cfs_rq(struct sched_entity *se); 793 794 /* 795 * With new tasks being created, their initial util_avgs are extrapolated 796 * based on the cfs_rq's current util_avg: 797 * 798 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 799 * 800 * However, in many cases, the above util_avg does not give a desired 801 * value. Moreover, the sum of the util_avgs may be divergent, such 802 * as when the series is a harmonic series. 803 * 804 * To solve this problem, we also cap the util_avg of successive tasks to 805 * only 1/2 of the left utilization budget: 806 * 807 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 808 * 809 * where n denotes the nth task and cpu_scale the CPU capacity. 810 * 811 * For example, for a CPU with 1024 of capacity, a simplest series from 812 * the beginning would be like: 813 * 814 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 815 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 816 * 817 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 818 * if util_avg > util_avg_cap. 819 */ 820 void post_init_entity_util_avg(struct task_struct *p) 821 { 822 struct sched_entity *se = &p->se; 823 struct cfs_rq *cfs_rq = cfs_rq_of(se); 824 struct sched_avg *sa = &se->avg; 825 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); 826 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; 827 828 if (cap > 0) { 829 if (cfs_rq->avg.util_avg != 0) { 830 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 831 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 832 833 if (sa->util_avg > cap) 834 sa->util_avg = cap; 835 } else { 836 sa->util_avg = cap; 837 } 838 } 839 840 sa->runnable_avg = sa->util_avg; 841 842 if (p->sched_class != &fair_sched_class) { 843 /* 844 * For !fair tasks do: 845 * 846 update_cfs_rq_load_avg(now, cfs_rq); 847 attach_entity_load_avg(cfs_rq, se); 848 switched_from_fair(rq, p); 849 * 850 * such that the next switched_to_fair() has the 851 * expected state. 852 */ 853 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); 854 return; 855 } 856 857 attach_entity_cfs_rq(se); 858 } 859 860 #else /* !CONFIG_SMP */ 861 void init_entity_runnable_average(struct sched_entity *se) 862 { 863 } 864 void post_init_entity_util_avg(struct task_struct *p) 865 { 866 } 867 static void update_tg_load_avg(struct cfs_rq *cfs_rq) 868 { 869 } 870 #endif /* CONFIG_SMP */ 871 872 /* 873 * Update the current task's runtime statistics. 874 */ 875 static void update_curr(struct cfs_rq *cfs_rq) 876 { 877 struct sched_entity *curr = cfs_rq->curr; 878 u64 now = rq_clock_task(rq_of(cfs_rq)); 879 u64 delta_exec; 880 881 if (unlikely(!curr)) 882 return; 883 884 delta_exec = now - curr->exec_start; 885 if (unlikely((s64)delta_exec <= 0)) 886 return; 887 888 curr->exec_start = now; 889 890 if (schedstat_enabled()) { 891 struct sched_statistics *stats; 892 893 stats = __schedstats_from_se(curr); 894 __schedstat_set(stats->exec_max, 895 max(delta_exec, stats->exec_max)); 896 } 897 898 curr->sum_exec_runtime += delta_exec; 899 schedstat_add(cfs_rq->exec_clock, delta_exec); 900 901 curr->vruntime += calc_delta_fair(delta_exec, curr); 902 update_min_vruntime(cfs_rq); 903 904 if (entity_is_task(curr)) { 905 struct task_struct *curtask = task_of(curr); 906 907 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 908 cgroup_account_cputime(curtask, delta_exec); 909 account_group_exec_runtime(curtask, delta_exec); 910 } 911 912 account_cfs_rq_runtime(cfs_rq, delta_exec); 913 } 914 915 static void update_curr_fair(struct rq *rq) 916 { 917 update_curr(cfs_rq_of(&rq->curr->se)); 918 } 919 920 static inline void 921 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 922 { 923 struct sched_statistics *stats; 924 struct task_struct *p = NULL; 925 926 if (!schedstat_enabled()) 927 return; 928 929 stats = __schedstats_from_se(se); 930 931 if (entity_is_task(se)) 932 p = task_of(se); 933 934 __update_stats_wait_start(rq_of(cfs_rq), p, stats); 935 } 936 937 static inline void 938 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 939 { 940 struct sched_statistics *stats; 941 struct task_struct *p = NULL; 942 943 if (!schedstat_enabled()) 944 return; 945 946 stats = __schedstats_from_se(se); 947 948 /* 949 * When the sched_schedstat changes from 0 to 1, some sched se 950 * maybe already in the runqueue, the se->statistics.wait_start 951 * will be 0.So it will let the delta wrong. We need to avoid this 952 * scenario. 953 */ 954 if (unlikely(!schedstat_val(stats->wait_start))) 955 return; 956 957 if (entity_is_task(se)) 958 p = task_of(se); 959 960 __update_stats_wait_end(rq_of(cfs_rq), p, stats); 961 } 962 963 static inline void 964 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 965 { 966 struct sched_statistics *stats; 967 struct task_struct *tsk = NULL; 968 969 if (!schedstat_enabled()) 970 return; 971 972 stats = __schedstats_from_se(se); 973 974 if (entity_is_task(se)) 975 tsk = task_of(se); 976 977 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats); 978 } 979 980 /* 981 * Task is being enqueued - update stats: 982 */ 983 static inline void 984 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 985 { 986 if (!schedstat_enabled()) 987 return; 988 989 /* 990 * Are we enqueueing a waiting task? (for current tasks 991 * a dequeue/enqueue event is a NOP) 992 */ 993 if (se != cfs_rq->curr) 994 update_stats_wait_start_fair(cfs_rq, se); 995 996 if (flags & ENQUEUE_WAKEUP) 997 update_stats_enqueue_sleeper_fair(cfs_rq, se); 998 } 999 1000 static inline void 1001 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1002 { 1003 1004 if (!schedstat_enabled()) 1005 return; 1006 1007 /* 1008 * Mark the end of the wait period if dequeueing a 1009 * waiting task: 1010 */ 1011 if (se != cfs_rq->curr) 1012 update_stats_wait_end_fair(cfs_rq, se); 1013 1014 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 1015 struct task_struct *tsk = task_of(se); 1016 unsigned int state; 1017 1018 /* XXX racy against TTWU */ 1019 state = READ_ONCE(tsk->__state); 1020 if (state & TASK_INTERRUPTIBLE) 1021 __schedstat_set(tsk->stats.sleep_start, 1022 rq_clock(rq_of(cfs_rq))); 1023 if (state & TASK_UNINTERRUPTIBLE) 1024 __schedstat_set(tsk->stats.block_start, 1025 rq_clock(rq_of(cfs_rq))); 1026 } 1027 } 1028 1029 /* 1030 * We are picking a new current task - update its stats: 1031 */ 1032 static inline void 1033 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1034 { 1035 /* 1036 * We are starting a new run period: 1037 */ 1038 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1039 } 1040 1041 /************************************************** 1042 * Scheduling class queueing methods: 1043 */ 1044 1045 #ifdef CONFIG_NUMA_BALANCING 1046 /* 1047 * Approximate time to scan a full NUMA task in ms. The task scan period is 1048 * calculated based on the tasks virtual memory size and 1049 * numa_balancing_scan_size. 1050 */ 1051 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1052 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1053 1054 /* Portion of address space to scan in MB */ 1055 unsigned int sysctl_numa_balancing_scan_size = 256; 1056 1057 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1058 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1059 1060 struct numa_group { 1061 refcount_t refcount; 1062 1063 spinlock_t lock; /* nr_tasks, tasks */ 1064 int nr_tasks; 1065 pid_t gid; 1066 int active_nodes; 1067 1068 struct rcu_head rcu; 1069 unsigned long total_faults; 1070 unsigned long max_faults_cpu; 1071 /* 1072 * faults[] array is split into two regions: faults_mem and faults_cpu. 1073 * 1074 * Faults_cpu is used to decide whether memory should move 1075 * towards the CPU. As a consequence, these stats are weighted 1076 * more by CPU use than by memory faults. 1077 */ 1078 unsigned long faults[]; 1079 }; 1080 1081 /* 1082 * For functions that can be called in multiple contexts that permit reading 1083 * ->numa_group (see struct task_struct for locking rules). 1084 */ 1085 static struct numa_group *deref_task_numa_group(struct task_struct *p) 1086 { 1087 return rcu_dereference_check(p->numa_group, p == current || 1088 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); 1089 } 1090 1091 static struct numa_group *deref_curr_numa_group(struct task_struct *p) 1092 { 1093 return rcu_dereference_protected(p->numa_group, p == current); 1094 } 1095 1096 static inline unsigned long group_faults_priv(struct numa_group *ng); 1097 static inline unsigned long group_faults_shared(struct numa_group *ng); 1098 1099 static unsigned int task_nr_scan_windows(struct task_struct *p) 1100 { 1101 unsigned long rss = 0; 1102 unsigned long nr_scan_pages; 1103 1104 /* 1105 * Calculations based on RSS as non-present and empty pages are skipped 1106 * by the PTE scanner and NUMA hinting faults should be trapped based 1107 * on resident pages 1108 */ 1109 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1110 rss = get_mm_rss(p->mm); 1111 if (!rss) 1112 rss = nr_scan_pages; 1113 1114 rss = round_up(rss, nr_scan_pages); 1115 return rss / nr_scan_pages; 1116 } 1117 1118 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1119 #define MAX_SCAN_WINDOW 2560 1120 1121 static unsigned int task_scan_min(struct task_struct *p) 1122 { 1123 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1124 unsigned int scan, floor; 1125 unsigned int windows = 1; 1126 1127 if (scan_size < MAX_SCAN_WINDOW) 1128 windows = MAX_SCAN_WINDOW / scan_size; 1129 floor = 1000 / windows; 1130 1131 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1132 return max_t(unsigned int, floor, scan); 1133 } 1134 1135 static unsigned int task_scan_start(struct task_struct *p) 1136 { 1137 unsigned long smin = task_scan_min(p); 1138 unsigned long period = smin; 1139 struct numa_group *ng; 1140 1141 /* Scale the maximum scan period with the amount of shared memory. */ 1142 rcu_read_lock(); 1143 ng = rcu_dereference(p->numa_group); 1144 if (ng) { 1145 unsigned long shared = group_faults_shared(ng); 1146 unsigned long private = group_faults_priv(ng); 1147 1148 period *= refcount_read(&ng->refcount); 1149 period *= shared + 1; 1150 period /= private + shared + 1; 1151 } 1152 rcu_read_unlock(); 1153 1154 return max(smin, period); 1155 } 1156 1157 static unsigned int task_scan_max(struct task_struct *p) 1158 { 1159 unsigned long smin = task_scan_min(p); 1160 unsigned long smax; 1161 struct numa_group *ng; 1162 1163 /* Watch for min being lower than max due to floor calculations */ 1164 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1165 1166 /* Scale the maximum scan period with the amount of shared memory. */ 1167 ng = deref_curr_numa_group(p); 1168 if (ng) { 1169 unsigned long shared = group_faults_shared(ng); 1170 unsigned long private = group_faults_priv(ng); 1171 unsigned long period = smax; 1172 1173 period *= refcount_read(&ng->refcount); 1174 period *= shared + 1; 1175 period /= private + shared + 1; 1176 1177 smax = max(smax, period); 1178 } 1179 1180 return max(smin, smax); 1181 } 1182 1183 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1184 { 1185 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); 1186 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1187 } 1188 1189 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1190 { 1191 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); 1192 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1193 } 1194 1195 /* Shared or private faults. */ 1196 #define NR_NUMA_HINT_FAULT_TYPES 2 1197 1198 /* Memory and CPU locality */ 1199 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1200 1201 /* Averaged statistics, and temporary buffers. */ 1202 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1203 1204 pid_t task_numa_group_id(struct task_struct *p) 1205 { 1206 struct numa_group *ng; 1207 pid_t gid = 0; 1208 1209 rcu_read_lock(); 1210 ng = rcu_dereference(p->numa_group); 1211 if (ng) 1212 gid = ng->gid; 1213 rcu_read_unlock(); 1214 1215 return gid; 1216 } 1217 1218 /* 1219 * The averaged statistics, shared & private, memory & CPU, 1220 * occupy the first half of the array. The second half of the 1221 * array is for current counters, which are averaged into the 1222 * first set by task_numa_placement. 1223 */ 1224 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1225 { 1226 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1227 } 1228 1229 static inline unsigned long task_faults(struct task_struct *p, int nid) 1230 { 1231 if (!p->numa_faults) 1232 return 0; 1233 1234 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1235 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1236 } 1237 1238 static inline unsigned long group_faults(struct task_struct *p, int nid) 1239 { 1240 struct numa_group *ng = deref_task_numa_group(p); 1241 1242 if (!ng) 1243 return 0; 1244 1245 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1246 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1247 } 1248 1249 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1250 { 1251 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] + 1252 group->faults[task_faults_idx(NUMA_CPU, nid, 1)]; 1253 } 1254 1255 static inline unsigned long group_faults_priv(struct numa_group *ng) 1256 { 1257 unsigned long faults = 0; 1258 int node; 1259 1260 for_each_online_node(node) { 1261 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 1262 } 1263 1264 return faults; 1265 } 1266 1267 static inline unsigned long group_faults_shared(struct numa_group *ng) 1268 { 1269 unsigned long faults = 0; 1270 int node; 1271 1272 for_each_online_node(node) { 1273 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; 1274 } 1275 1276 return faults; 1277 } 1278 1279 /* 1280 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1281 * considered part of a numa group's pseudo-interleaving set. Migrations 1282 * between these nodes are slowed down, to allow things to settle down. 1283 */ 1284 #define ACTIVE_NODE_FRACTION 3 1285 1286 static bool numa_is_active_node(int nid, struct numa_group *ng) 1287 { 1288 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1289 } 1290 1291 /* Handle placement on systems where not all nodes are directly connected. */ 1292 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1293 int lim_dist, bool task) 1294 { 1295 unsigned long score = 0; 1296 int node, max_dist; 1297 1298 /* 1299 * All nodes are directly connected, and the same distance 1300 * from each other. No need for fancy placement algorithms. 1301 */ 1302 if (sched_numa_topology_type == NUMA_DIRECT) 1303 return 0; 1304 1305 /* sched_max_numa_distance may be changed in parallel. */ 1306 max_dist = READ_ONCE(sched_max_numa_distance); 1307 /* 1308 * This code is called for each node, introducing N^2 complexity, 1309 * which should be ok given the number of nodes rarely exceeds 8. 1310 */ 1311 for_each_online_node(node) { 1312 unsigned long faults; 1313 int dist = node_distance(nid, node); 1314 1315 /* 1316 * The furthest away nodes in the system are not interesting 1317 * for placement; nid was already counted. 1318 */ 1319 if (dist >= max_dist || node == nid) 1320 continue; 1321 1322 /* 1323 * On systems with a backplane NUMA topology, compare groups 1324 * of nodes, and move tasks towards the group with the most 1325 * memory accesses. When comparing two nodes at distance 1326 * "hoplimit", only nodes closer by than "hoplimit" are part 1327 * of each group. Skip other nodes. 1328 */ 1329 if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist) 1330 continue; 1331 1332 /* Add up the faults from nearby nodes. */ 1333 if (task) 1334 faults = task_faults(p, node); 1335 else 1336 faults = group_faults(p, node); 1337 1338 /* 1339 * On systems with a glueless mesh NUMA topology, there are 1340 * no fixed "groups of nodes". Instead, nodes that are not 1341 * directly connected bounce traffic through intermediate 1342 * nodes; a numa_group can occupy any set of nodes. 1343 * The further away a node is, the less the faults count. 1344 * This seems to result in good task placement. 1345 */ 1346 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1347 faults *= (max_dist - dist); 1348 faults /= (max_dist - LOCAL_DISTANCE); 1349 } 1350 1351 score += faults; 1352 } 1353 1354 return score; 1355 } 1356 1357 /* 1358 * These return the fraction of accesses done by a particular task, or 1359 * task group, on a particular numa node. The group weight is given a 1360 * larger multiplier, in order to group tasks together that are almost 1361 * evenly spread out between numa nodes. 1362 */ 1363 static inline unsigned long task_weight(struct task_struct *p, int nid, 1364 int dist) 1365 { 1366 unsigned long faults, total_faults; 1367 1368 if (!p->numa_faults) 1369 return 0; 1370 1371 total_faults = p->total_numa_faults; 1372 1373 if (!total_faults) 1374 return 0; 1375 1376 faults = task_faults(p, nid); 1377 faults += score_nearby_nodes(p, nid, dist, true); 1378 1379 return 1000 * faults / total_faults; 1380 } 1381 1382 static inline unsigned long group_weight(struct task_struct *p, int nid, 1383 int dist) 1384 { 1385 struct numa_group *ng = deref_task_numa_group(p); 1386 unsigned long faults, total_faults; 1387 1388 if (!ng) 1389 return 0; 1390 1391 total_faults = ng->total_faults; 1392 1393 if (!total_faults) 1394 return 0; 1395 1396 faults = group_faults(p, nid); 1397 faults += score_nearby_nodes(p, nid, dist, false); 1398 1399 return 1000 * faults / total_faults; 1400 } 1401 1402 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1403 int src_nid, int dst_cpu) 1404 { 1405 struct numa_group *ng = deref_curr_numa_group(p); 1406 int dst_nid = cpu_to_node(dst_cpu); 1407 int last_cpupid, this_cpupid; 1408 1409 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1410 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1411 1412 /* 1413 * Allow first faults or private faults to migrate immediately early in 1414 * the lifetime of a task. The magic number 4 is based on waiting for 1415 * two full passes of the "multi-stage node selection" test that is 1416 * executed below. 1417 */ 1418 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && 1419 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) 1420 return true; 1421 1422 /* 1423 * Multi-stage node selection is used in conjunction with a periodic 1424 * migration fault to build a temporal task<->page relation. By using 1425 * a two-stage filter we remove short/unlikely relations. 1426 * 1427 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1428 * a task's usage of a particular page (n_p) per total usage of this 1429 * page (n_t) (in a given time-span) to a probability. 1430 * 1431 * Our periodic faults will sample this probability and getting the 1432 * same result twice in a row, given these samples are fully 1433 * independent, is then given by P(n)^2, provided our sample period 1434 * is sufficiently short compared to the usage pattern. 1435 * 1436 * This quadric squishes small probabilities, making it less likely we 1437 * act on an unlikely task<->page relation. 1438 */ 1439 if (!cpupid_pid_unset(last_cpupid) && 1440 cpupid_to_nid(last_cpupid) != dst_nid) 1441 return false; 1442 1443 /* Always allow migrate on private faults */ 1444 if (cpupid_match_pid(p, last_cpupid)) 1445 return true; 1446 1447 /* A shared fault, but p->numa_group has not been set up yet. */ 1448 if (!ng) 1449 return true; 1450 1451 /* 1452 * Destination node is much more heavily used than the source 1453 * node? Allow migration. 1454 */ 1455 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1456 ACTIVE_NODE_FRACTION) 1457 return true; 1458 1459 /* 1460 * Distribute memory according to CPU & memory use on each node, 1461 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1462 * 1463 * faults_cpu(dst) 3 faults_cpu(src) 1464 * --------------- * - > --------------- 1465 * faults_mem(dst) 4 faults_mem(src) 1466 */ 1467 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1468 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1469 } 1470 1471 /* 1472 * 'numa_type' describes the node at the moment of load balancing. 1473 */ 1474 enum numa_type { 1475 /* The node has spare capacity that can be used to run more tasks. */ 1476 node_has_spare = 0, 1477 /* 1478 * The node is fully used and the tasks don't compete for more CPU 1479 * cycles. Nevertheless, some tasks might wait before running. 1480 */ 1481 node_fully_busy, 1482 /* 1483 * The node is overloaded and can't provide expected CPU cycles to all 1484 * tasks. 1485 */ 1486 node_overloaded 1487 }; 1488 1489 /* Cached statistics for all CPUs within a node */ 1490 struct numa_stats { 1491 unsigned long load; 1492 unsigned long runnable; 1493 unsigned long util; 1494 /* Total compute capacity of CPUs on a node */ 1495 unsigned long compute_capacity; 1496 unsigned int nr_running; 1497 unsigned int weight; 1498 enum numa_type node_type; 1499 int idle_cpu; 1500 }; 1501 1502 static inline bool is_core_idle(int cpu) 1503 { 1504 #ifdef CONFIG_SCHED_SMT 1505 int sibling; 1506 1507 for_each_cpu(sibling, cpu_smt_mask(cpu)) { 1508 if (cpu == sibling) 1509 continue; 1510 1511 if (!idle_cpu(sibling)) 1512 return false; 1513 } 1514 #endif 1515 1516 return true; 1517 } 1518 1519 struct task_numa_env { 1520 struct task_struct *p; 1521 1522 int src_cpu, src_nid; 1523 int dst_cpu, dst_nid; 1524 int imb_numa_nr; 1525 1526 struct numa_stats src_stats, dst_stats; 1527 1528 int imbalance_pct; 1529 int dist; 1530 1531 struct task_struct *best_task; 1532 long best_imp; 1533 int best_cpu; 1534 }; 1535 1536 static unsigned long cpu_load(struct rq *rq); 1537 static unsigned long cpu_runnable(struct rq *rq); 1538 static inline long adjust_numa_imbalance(int imbalance, 1539 int dst_running, int imb_numa_nr); 1540 1541 static inline enum 1542 numa_type numa_classify(unsigned int imbalance_pct, 1543 struct numa_stats *ns) 1544 { 1545 if ((ns->nr_running > ns->weight) && 1546 (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) || 1547 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100)))) 1548 return node_overloaded; 1549 1550 if ((ns->nr_running < ns->weight) || 1551 (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) && 1552 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100)))) 1553 return node_has_spare; 1554 1555 return node_fully_busy; 1556 } 1557 1558 #ifdef CONFIG_SCHED_SMT 1559 /* Forward declarations of select_idle_sibling helpers */ 1560 static inline bool test_idle_cores(int cpu, bool def); 1561 static inline int numa_idle_core(int idle_core, int cpu) 1562 { 1563 if (!static_branch_likely(&sched_smt_present) || 1564 idle_core >= 0 || !test_idle_cores(cpu, false)) 1565 return idle_core; 1566 1567 /* 1568 * Prefer cores instead of packing HT siblings 1569 * and triggering future load balancing. 1570 */ 1571 if (is_core_idle(cpu)) 1572 idle_core = cpu; 1573 1574 return idle_core; 1575 } 1576 #else 1577 static inline int numa_idle_core(int idle_core, int cpu) 1578 { 1579 return idle_core; 1580 } 1581 #endif 1582 1583 /* 1584 * Gather all necessary information to make NUMA balancing placement 1585 * decisions that are compatible with standard load balancer. This 1586 * borrows code and logic from update_sg_lb_stats but sharing a 1587 * common implementation is impractical. 1588 */ 1589 static void update_numa_stats(struct task_numa_env *env, 1590 struct numa_stats *ns, int nid, 1591 bool find_idle) 1592 { 1593 int cpu, idle_core = -1; 1594 1595 memset(ns, 0, sizeof(*ns)); 1596 ns->idle_cpu = -1; 1597 1598 rcu_read_lock(); 1599 for_each_cpu(cpu, cpumask_of_node(nid)) { 1600 struct rq *rq = cpu_rq(cpu); 1601 1602 ns->load += cpu_load(rq); 1603 ns->runnable += cpu_runnable(rq); 1604 ns->util += cpu_util_cfs(cpu); 1605 ns->nr_running += rq->cfs.h_nr_running; 1606 ns->compute_capacity += capacity_of(cpu); 1607 1608 if (find_idle && !rq->nr_running && idle_cpu(cpu)) { 1609 if (READ_ONCE(rq->numa_migrate_on) || 1610 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1611 continue; 1612 1613 if (ns->idle_cpu == -1) 1614 ns->idle_cpu = cpu; 1615 1616 idle_core = numa_idle_core(idle_core, cpu); 1617 } 1618 } 1619 rcu_read_unlock(); 1620 1621 ns->weight = cpumask_weight(cpumask_of_node(nid)); 1622 1623 ns->node_type = numa_classify(env->imbalance_pct, ns); 1624 1625 if (idle_core >= 0) 1626 ns->idle_cpu = idle_core; 1627 } 1628 1629 static void task_numa_assign(struct task_numa_env *env, 1630 struct task_struct *p, long imp) 1631 { 1632 struct rq *rq = cpu_rq(env->dst_cpu); 1633 1634 /* Check if run-queue part of active NUMA balance. */ 1635 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { 1636 int cpu; 1637 int start = env->dst_cpu; 1638 1639 /* Find alternative idle CPU. */ 1640 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) { 1641 if (cpu == env->best_cpu || !idle_cpu(cpu) || 1642 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { 1643 continue; 1644 } 1645 1646 env->dst_cpu = cpu; 1647 rq = cpu_rq(env->dst_cpu); 1648 if (!xchg(&rq->numa_migrate_on, 1)) 1649 goto assign; 1650 } 1651 1652 /* Failed to find an alternative idle CPU */ 1653 return; 1654 } 1655 1656 assign: 1657 /* 1658 * Clear previous best_cpu/rq numa-migrate flag, since task now 1659 * found a better CPU to move/swap. 1660 */ 1661 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { 1662 rq = cpu_rq(env->best_cpu); 1663 WRITE_ONCE(rq->numa_migrate_on, 0); 1664 } 1665 1666 if (env->best_task) 1667 put_task_struct(env->best_task); 1668 if (p) 1669 get_task_struct(p); 1670 1671 env->best_task = p; 1672 env->best_imp = imp; 1673 env->best_cpu = env->dst_cpu; 1674 } 1675 1676 static bool load_too_imbalanced(long src_load, long dst_load, 1677 struct task_numa_env *env) 1678 { 1679 long imb, old_imb; 1680 long orig_src_load, orig_dst_load; 1681 long src_capacity, dst_capacity; 1682 1683 /* 1684 * The load is corrected for the CPU capacity available on each node. 1685 * 1686 * src_load dst_load 1687 * ------------ vs --------- 1688 * src_capacity dst_capacity 1689 */ 1690 src_capacity = env->src_stats.compute_capacity; 1691 dst_capacity = env->dst_stats.compute_capacity; 1692 1693 imb = abs(dst_load * src_capacity - src_load * dst_capacity); 1694 1695 orig_src_load = env->src_stats.load; 1696 orig_dst_load = env->dst_stats.load; 1697 1698 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); 1699 1700 /* Would this change make things worse? */ 1701 return (imb > old_imb); 1702 } 1703 1704 /* 1705 * Maximum NUMA importance can be 1998 (2*999); 1706 * SMALLIMP @ 30 would be close to 1998/64. 1707 * Used to deter task migration. 1708 */ 1709 #define SMALLIMP 30 1710 1711 /* 1712 * This checks if the overall compute and NUMA accesses of the system would 1713 * be improved if the source tasks was migrated to the target dst_cpu taking 1714 * into account that it might be best if task running on the dst_cpu should 1715 * be exchanged with the source task 1716 */ 1717 static bool task_numa_compare(struct task_numa_env *env, 1718 long taskimp, long groupimp, bool maymove) 1719 { 1720 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); 1721 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1722 long imp = p_ng ? groupimp : taskimp; 1723 struct task_struct *cur; 1724 long src_load, dst_load; 1725 int dist = env->dist; 1726 long moveimp = imp; 1727 long load; 1728 bool stopsearch = false; 1729 1730 if (READ_ONCE(dst_rq->numa_migrate_on)) 1731 return false; 1732 1733 rcu_read_lock(); 1734 cur = rcu_dereference(dst_rq->curr); 1735 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1736 cur = NULL; 1737 1738 /* 1739 * Because we have preemption enabled we can get migrated around and 1740 * end try selecting ourselves (current == env->p) as a swap candidate. 1741 */ 1742 if (cur == env->p) { 1743 stopsearch = true; 1744 goto unlock; 1745 } 1746 1747 if (!cur) { 1748 if (maymove && moveimp >= env->best_imp) 1749 goto assign; 1750 else 1751 goto unlock; 1752 } 1753 1754 /* Skip this swap candidate if cannot move to the source cpu. */ 1755 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) 1756 goto unlock; 1757 1758 /* 1759 * Skip this swap candidate if it is not moving to its preferred 1760 * node and the best task is. 1761 */ 1762 if (env->best_task && 1763 env->best_task->numa_preferred_nid == env->src_nid && 1764 cur->numa_preferred_nid != env->src_nid) { 1765 goto unlock; 1766 } 1767 1768 /* 1769 * "imp" is the fault differential for the source task between the 1770 * source and destination node. Calculate the total differential for 1771 * the source task and potential destination task. The more negative 1772 * the value is, the more remote accesses that would be expected to 1773 * be incurred if the tasks were swapped. 1774 * 1775 * If dst and source tasks are in the same NUMA group, or not 1776 * in any group then look only at task weights. 1777 */ 1778 cur_ng = rcu_dereference(cur->numa_group); 1779 if (cur_ng == p_ng) { 1780 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1781 task_weight(cur, env->dst_nid, dist); 1782 /* 1783 * Add some hysteresis to prevent swapping the 1784 * tasks within a group over tiny differences. 1785 */ 1786 if (cur_ng) 1787 imp -= imp / 16; 1788 } else { 1789 /* 1790 * Compare the group weights. If a task is all by itself 1791 * (not part of a group), use the task weight instead. 1792 */ 1793 if (cur_ng && p_ng) 1794 imp += group_weight(cur, env->src_nid, dist) - 1795 group_weight(cur, env->dst_nid, dist); 1796 else 1797 imp += task_weight(cur, env->src_nid, dist) - 1798 task_weight(cur, env->dst_nid, dist); 1799 } 1800 1801 /* Discourage picking a task already on its preferred node */ 1802 if (cur->numa_preferred_nid == env->dst_nid) 1803 imp -= imp / 16; 1804 1805 /* 1806 * Encourage picking a task that moves to its preferred node. 1807 * This potentially makes imp larger than it's maximum of 1808 * 1998 (see SMALLIMP and task_weight for why) but in this 1809 * case, it does not matter. 1810 */ 1811 if (cur->numa_preferred_nid == env->src_nid) 1812 imp += imp / 8; 1813 1814 if (maymove && moveimp > imp && moveimp > env->best_imp) { 1815 imp = moveimp; 1816 cur = NULL; 1817 goto assign; 1818 } 1819 1820 /* 1821 * Prefer swapping with a task moving to its preferred node over a 1822 * task that is not. 1823 */ 1824 if (env->best_task && cur->numa_preferred_nid == env->src_nid && 1825 env->best_task->numa_preferred_nid != env->src_nid) { 1826 goto assign; 1827 } 1828 1829 /* 1830 * If the NUMA importance is less than SMALLIMP, 1831 * task migration might only result in ping pong 1832 * of tasks and also hurt performance due to cache 1833 * misses. 1834 */ 1835 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) 1836 goto unlock; 1837 1838 /* 1839 * In the overloaded case, try and keep the load balanced. 1840 */ 1841 load = task_h_load(env->p) - task_h_load(cur); 1842 if (!load) 1843 goto assign; 1844 1845 dst_load = env->dst_stats.load + load; 1846 src_load = env->src_stats.load - load; 1847 1848 if (load_too_imbalanced(src_load, dst_load, env)) 1849 goto unlock; 1850 1851 assign: 1852 /* Evaluate an idle CPU for a task numa move. */ 1853 if (!cur) { 1854 int cpu = env->dst_stats.idle_cpu; 1855 1856 /* Nothing cached so current CPU went idle since the search. */ 1857 if (cpu < 0) 1858 cpu = env->dst_cpu; 1859 1860 /* 1861 * If the CPU is no longer truly idle and the previous best CPU 1862 * is, keep using it. 1863 */ 1864 if (!idle_cpu(cpu) && env->best_cpu >= 0 && 1865 idle_cpu(env->best_cpu)) { 1866 cpu = env->best_cpu; 1867 } 1868 1869 env->dst_cpu = cpu; 1870 } 1871 1872 task_numa_assign(env, cur, imp); 1873 1874 /* 1875 * If a move to idle is allowed because there is capacity or load 1876 * balance improves then stop the search. While a better swap 1877 * candidate may exist, a search is not free. 1878 */ 1879 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) 1880 stopsearch = true; 1881 1882 /* 1883 * If a swap candidate must be identified and the current best task 1884 * moves its preferred node then stop the search. 1885 */ 1886 if (!maymove && env->best_task && 1887 env->best_task->numa_preferred_nid == env->src_nid) { 1888 stopsearch = true; 1889 } 1890 unlock: 1891 rcu_read_unlock(); 1892 1893 return stopsearch; 1894 } 1895 1896 static void task_numa_find_cpu(struct task_numa_env *env, 1897 long taskimp, long groupimp) 1898 { 1899 bool maymove = false; 1900 int cpu; 1901 1902 /* 1903 * If dst node has spare capacity, then check if there is an 1904 * imbalance that would be overruled by the load balancer. 1905 */ 1906 if (env->dst_stats.node_type == node_has_spare) { 1907 unsigned int imbalance; 1908 int src_running, dst_running; 1909 1910 /* 1911 * Would movement cause an imbalance? Note that if src has 1912 * more running tasks that the imbalance is ignored as the 1913 * move improves the imbalance from the perspective of the 1914 * CPU load balancer. 1915 * */ 1916 src_running = env->src_stats.nr_running - 1; 1917 dst_running = env->dst_stats.nr_running + 1; 1918 imbalance = max(0, dst_running - src_running); 1919 imbalance = adjust_numa_imbalance(imbalance, dst_running, 1920 env->imb_numa_nr); 1921 1922 /* Use idle CPU if there is no imbalance */ 1923 if (!imbalance) { 1924 maymove = true; 1925 if (env->dst_stats.idle_cpu >= 0) { 1926 env->dst_cpu = env->dst_stats.idle_cpu; 1927 task_numa_assign(env, NULL, 0); 1928 return; 1929 } 1930 } 1931 } else { 1932 long src_load, dst_load, load; 1933 /* 1934 * If the improvement from just moving env->p direction is better 1935 * than swapping tasks around, check if a move is possible. 1936 */ 1937 load = task_h_load(env->p); 1938 dst_load = env->dst_stats.load + load; 1939 src_load = env->src_stats.load - load; 1940 maymove = !load_too_imbalanced(src_load, dst_load, env); 1941 } 1942 1943 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1944 /* Skip this CPU if the source task cannot migrate */ 1945 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1946 continue; 1947 1948 env->dst_cpu = cpu; 1949 if (task_numa_compare(env, taskimp, groupimp, maymove)) 1950 break; 1951 } 1952 } 1953 1954 static int task_numa_migrate(struct task_struct *p) 1955 { 1956 struct task_numa_env env = { 1957 .p = p, 1958 1959 .src_cpu = task_cpu(p), 1960 .src_nid = task_node(p), 1961 1962 .imbalance_pct = 112, 1963 1964 .best_task = NULL, 1965 .best_imp = 0, 1966 .best_cpu = -1, 1967 }; 1968 unsigned long taskweight, groupweight; 1969 struct sched_domain *sd; 1970 long taskimp, groupimp; 1971 struct numa_group *ng; 1972 struct rq *best_rq; 1973 int nid, ret, dist; 1974 1975 /* 1976 * Pick the lowest SD_NUMA domain, as that would have the smallest 1977 * imbalance and would be the first to start moving tasks about. 1978 * 1979 * And we want to avoid any moving of tasks about, as that would create 1980 * random movement of tasks -- counter the numa conditions we're trying 1981 * to satisfy here. 1982 */ 1983 rcu_read_lock(); 1984 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 1985 if (sd) { 1986 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 1987 env.imb_numa_nr = sd->imb_numa_nr; 1988 } 1989 rcu_read_unlock(); 1990 1991 /* 1992 * Cpusets can break the scheduler domain tree into smaller 1993 * balance domains, some of which do not cross NUMA boundaries. 1994 * Tasks that are "trapped" in such domains cannot be migrated 1995 * elsewhere, so there is no point in (re)trying. 1996 */ 1997 if (unlikely(!sd)) { 1998 sched_setnuma(p, task_node(p)); 1999 return -EINVAL; 2000 } 2001 2002 env.dst_nid = p->numa_preferred_nid; 2003 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 2004 taskweight = task_weight(p, env.src_nid, dist); 2005 groupweight = group_weight(p, env.src_nid, dist); 2006 update_numa_stats(&env, &env.src_stats, env.src_nid, false); 2007 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 2008 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 2009 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); 2010 2011 /* Try to find a spot on the preferred nid. */ 2012 task_numa_find_cpu(&env, taskimp, groupimp); 2013 2014 /* 2015 * Look at other nodes in these cases: 2016 * - there is no space available on the preferred_nid 2017 * - the task is part of a numa_group that is interleaved across 2018 * multiple NUMA nodes; in order to better consolidate the group, 2019 * we need to check other locations. 2020 */ 2021 ng = deref_curr_numa_group(p); 2022 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { 2023 for_each_node_state(nid, N_CPU) { 2024 if (nid == env.src_nid || nid == p->numa_preferred_nid) 2025 continue; 2026 2027 dist = node_distance(env.src_nid, env.dst_nid); 2028 if (sched_numa_topology_type == NUMA_BACKPLANE && 2029 dist != env.dist) { 2030 taskweight = task_weight(p, env.src_nid, dist); 2031 groupweight = group_weight(p, env.src_nid, dist); 2032 } 2033 2034 /* Only consider nodes where both task and groups benefit */ 2035 taskimp = task_weight(p, nid, dist) - taskweight; 2036 groupimp = group_weight(p, nid, dist) - groupweight; 2037 if (taskimp < 0 && groupimp < 0) 2038 continue; 2039 2040 env.dist = dist; 2041 env.dst_nid = nid; 2042 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); 2043 task_numa_find_cpu(&env, taskimp, groupimp); 2044 } 2045 } 2046 2047 /* 2048 * If the task is part of a workload that spans multiple NUMA nodes, 2049 * and is migrating into one of the workload's active nodes, remember 2050 * this node as the task's preferred numa node, so the workload can 2051 * settle down. 2052 * A task that migrated to a second choice node will be better off 2053 * trying for a better one later. Do not set the preferred node here. 2054 */ 2055 if (ng) { 2056 if (env.best_cpu == -1) 2057 nid = env.src_nid; 2058 else 2059 nid = cpu_to_node(env.best_cpu); 2060 2061 if (nid != p->numa_preferred_nid) 2062 sched_setnuma(p, nid); 2063 } 2064 2065 /* No better CPU than the current one was found. */ 2066 if (env.best_cpu == -1) { 2067 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); 2068 return -EAGAIN; 2069 } 2070 2071 best_rq = cpu_rq(env.best_cpu); 2072 if (env.best_task == NULL) { 2073 ret = migrate_task_to(p, env.best_cpu); 2074 WRITE_ONCE(best_rq->numa_migrate_on, 0); 2075 if (ret != 0) 2076 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); 2077 return ret; 2078 } 2079 2080 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); 2081 WRITE_ONCE(best_rq->numa_migrate_on, 0); 2082 2083 if (ret != 0) 2084 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); 2085 put_task_struct(env.best_task); 2086 return ret; 2087 } 2088 2089 /* Attempt to migrate a task to a CPU on the preferred node. */ 2090 static void numa_migrate_preferred(struct task_struct *p) 2091 { 2092 unsigned long interval = HZ; 2093 2094 /* This task has no NUMA fault statistics yet */ 2095 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) 2096 return; 2097 2098 /* Periodically retry migrating the task to the preferred node */ 2099 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 2100 p->numa_migrate_retry = jiffies + interval; 2101 2102 /* Success if task is already running on preferred CPU */ 2103 if (task_node(p) == p->numa_preferred_nid) 2104 return; 2105 2106 /* Otherwise, try migrate to a CPU on the preferred node */ 2107 task_numa_migrate(p); 2108 } 2109 2110 /* 2111 * Find out how many nodes the workload is actively running on. Do this by 2112 * tracking the nodes from which NUMA hinting faults are triggered. This can 2113 * be different from the set of nodes where the workload's memory is currently 2114 * located. 2115 */ 2116 static void numa_group_count_active_nodes(struct numa_group *numa_group) 2117 { 2118 unsigned long faults, max_faults = 0; 2119 int nid, active_nodes = 0; 2120 2121 for_each_node_state(nid, N_CPU) { 2122 faults = group_faults_cpu(numa_group, nid); 2123 if (faults > max_faults) 2124 max_faults = faults; 2125 } 2126 2127 for_each_node_state(nid, N_CPU) { 2128 faults = group_faults_cpu(numa_group, nid); 2129 if (faults * ACTIVE_NODE_FRACTION > max_faults) 2130 active_nodes++; 2131 } 2132 2133 numa_group->max_faults_cpu = max_faults; 2134 numa_group->active_nodes = active_nodes; 2135 } 2136 2137 /* 2138 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 2139 * increments. The more local the fault statistics are, the higher the scan 2140 * period will be for the next scan window. If local/(local+remote) ratio is 2141 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 2142 * the scan period will decrease. Aim for 70% local accesses. 2143 */ 2144 #define NUMA_PERIOD_SLOTS 10 2145 #define NUMA_PERIOD_THRESHOLD 7 2146 2147 /* 2148 * Increase the scan period (slow down scanning) if the majority of 2149 * our memory is already on our local node, or if the majority of 2150 * the page accesses are shared with other processes. 2151 * Otherwise, decrease the scan period. 2152 */ 2153 static void update_task_scan_period(struct task_struct *p, 2154 unsigned long shared, unsigned long private) 2155 { 2156 unsigned int period_slot; 2157 int lr_ratio, ps_ratio; 2158 int diff; 2159 2160 unsigned long remote = p->numa_faults_locality[0]; 2161 unsigned long local = p->numa_faults_locality[1]; 2162 2163 /* 2164 * If there were no record hinting faults then either the task is 2165 * completely idle or all activity is in areas that are not of interest 2166 * to automatic numa balancing. Related to that, if there were failed 2167 * migration then it implies we are migrating too quickly or the local 2168 * node is overloaded. In either case, scan slower 2169 */ 2170 if (local + shared == 0 || p->numa_faults_locality[2]) { 2171 p->numa_scan_period = min(p->numa_scan_period_max, 2172 p->numa_scan_period << 1); 2173 2174 p->mm->numa_next_scan = jiffies + 2175 msecs_to_jiffies(p->numa_scan_period); 2176 2177 return; 2178 } 2179 2180 /* 2181 * Prepare to scale scan period relative to the current period. 2182 * == NUMA_PERIOD_THRESHOLD scan period stays the same 2183 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 2184 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 2185 */ 2186 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 2187 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 2188 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); 2189 2190 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { 2191 /* 2192 * Most memory accesses are local. There is no need to 2193 * do fast NUMA scanning, since memory is already local. 2194 */ 2195 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; 2196 if (!slot) 2197 slot = 1; 2198 diff = slot * period_slot; 2199 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { 2200 /* 2201 * Most memory accesses are shared with other tasks. 2202 * There is no point in continuing fast NUMA scanning, 2203 * since other tasks may just move the memory elsewhere. 2204 */ 2205 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; 2206 if (!slot) 2207 slot = 1; 2208 diff = slot * period_slot; 2209 } else { 2210 /* 2211 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, 2212 * yet they are not on the local NUMA node. Speed up 2213 * NUMA scanning to get the memory moved over. 2214 */ 2215 int ratio = max(lr_ratio, ps_ratio); 2216 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 2217 } 2218 2219 p->numa_scan_period = clamp(p->numa_scan_period + diff, 2220 task_scan_min(p), task_scan_max(p)); 2221 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2222 } 2223 2224 /* 2225 * Get the fraction of time the task has been running since the last 2226 * NUMA placement cycle. The scheduler keeps similar statistics, but 2227 * decays those on a 32ms period, which is orders of magnitude off 2228 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 2229 * stats only if the task is so new there are no NUMA statistics yet. 2230 */ 2231 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 2232 { 2233 u64 runtime, delta, now; 2234 /* Use the start of this time slice to avoid calculations. */ 2235 now = p->se.exec_start; 2236 runtime = p->se.sum_exec_runtime; 2237 2238 if (p->last_task_numa_placement) { 2239 delta = runtime - p->last_sum_exec_runtime; 2240 *period = now - p->last_task_numa_placement; 2241 2242 /* Avoid time going backwards, prevent potential divide error: */ 2243 if (unlikely((s64)*period < 0)) 2244 *period = 0; 2245 } else { 2246 delta = p->se.avg.load_sum; 2247 *period = LOAD_AVG_MAX; 2248 } 2249 2250 p->last_sum_exec_runtime = runtime; 2251 p->last_task_numa_placement = now; 2252 2253 return delta; 2254 } 2255 2256 /* 2257 * Determine the preferred nid for a task in a numa_group. This needs to 2258 * be done in a way that produces consistent results with group_weight, 2259 * otherwise workloads might not converge. 2260 */ 2261 static int preferred_group_nid(struct task_struct *p, int nid) 2262 { 2263 nodemask_t nodes; 2264 int dist; 2265 2266 /* Direct connections between all NUMA nodes. */ 2267 if (sched_numa_topology_type == NUMA_DIRECT) 2268 return nid; 2269 2270 /* 2271 * On a system with glueless mesh NUMA topology, group_weight 2272 * scores nodes according to the number of NUMA hinting faults on 2273 * both the node itself, and on nearby nodes. 2274 */ 2275 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 2276 unsigned long score, max_score = 0; 2277 int node, max_node = nid; 2278 2279 dist = sched_max_numa_distance; 2280 2281 for_each_node_state(node, N_CPU) { 2282 score = group_weight(p, node, dist); 2283 if (score > max_score) { 2284 max_score = score; 2285 max_node = node; 2286 } 2287 } 2288 return max_node; 2289 } 2290 2291 /* 2292 * Finding the preferred nid in a system with NUMA backplane 2293 * interconnect topology is more involved. The goal is to locate 2294 * tasks from numa_groups near each other in the system, and 2295 * untangle workloads from different sides of the system. This requires 2296 * searching down the hierarchy of node groups, recursively searching 2297 * inside the highest scoring group of nodes. The nodemask tricks 2298 * keep the complexity of the search down. 2299 */ 2300 nodes = node_states[N_CPU]; 2301 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2302 unsigned long max_faults = 0; 2303 nodemask_t max_group = NODE_MASK_NONE; 2304 int a, b; 2305 2306 /* Are there nodes at this distance from each other? */ 2307 if (!find_numa_distance(dist)) 2308 continue; 2309 2310 for_each_node_mask(a, nodes) { 2311 unsigned long faults = 0; 2312 nodemask_t this_group; 2313 nodes_clear(this_group); 2314 2315 /* Sum group's NUMA faults; includes a==b case. */ 2316 for_each_node_mask(b, nodes) { 2317 if (node_distance(a, b) < dist) { 2318 faults += group_faults(p, b); 2319 node_set(b, this_group); 2320 node_clear(b, nodes); 2321 } 2322 } 2323 2324 /* Remember the top group. */ 2325 if (faults > max_faults) { 2326 max_faults = faults; 2327 max_group = this_group; 2328 /* 2329 * subtle: at the smallest distance there is 2330 * just one node left in each "group", the 2331 * winner is the preferred nid. 2332 */ 2333 nid = a; 2334 } 2335 } 2336 /* Next round, evaluate the nodes within max_group. */ 2337 if (!max_faults) 2338 break; 2339 nodes = max_group; 2340 } 2341 return nid; 2342 } 2343 2344 static void task_numa_placement(struct task_struct *p) 2345 { 2346 int seq, nid, max_nid = NUMA_NO_NODE; 2347 unsigned long max_faults = 0; 2348 unsigned long fault_types[2] = { 0, 0 }; 2349 unsigned long total_faults; 2350 u64 runtime, period; 2351 spinlock_t *group_lock = NULL; 2352 struct numa_group *ng; 2353 2354 /* 2355 * The p->mm->numa_scan_seq field gets updated without 2356 * exclusive access. Use READ_ONCE() here to ensure 2357 * that the field is read in a single access: 2358 */ 2359 seq = READ_ONCE(p->mm->numa_scan_seq); 2360 if (p->numa_scan_seq == seq) 2361 return; 2362 p->numa_scan_seq = seq; 2363 p->numa_scan_period_max = task_scan_max(p); 2364 2365 total_faults = p->numa_faults_locality[0] + 2366 p->numa_faults_locality[1]; 2367 runtime = numa_get_avg_runtime(p, &period); 2368 2369 /* If the task is part of a group prevent parallel updates to group stats */ 2370 ng = deref_curr_numa_group(p); 2371 if (ng) { 2372 group_lock = &ng->lock; 2373 spin_lock_irq(group_lock); 2374 } 2375 2376 /* Find the node with the highest number of faults */ 2377 for_each_online_node(nid) { 2378 /* Keep track of the offsets in numa_faults array */ 2379 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2380 unsigned long faults = 0, group_faults = 0; 2381 int priv; 2382 2383 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2384 long diff, f_diff, f_weight; 2385 2386 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2387 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2388 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2389 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2390 2391 /* Decay existing window, copy faults since last scan */ 2392 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2393 fault_types[priv] += p->numa_faults[membuf_idx]; 2394 p->numa_faults[membuf_idx] = 0; 2395 2396 /* 2397 * Normalize the faults_from, so all tasks in a group 2398 * count according to CPU use, instead of by the raw 2399 * number of faults. Tasks with little runtime have 2400 * little over-all impact on throughput, and thus their 2401 * faults are less important. 2402 */ 2403 f_weight = div64_u64(runtime << 16, period + 1); 2404 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2405 (total_faults + 1); 2406 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2407 p->numa_faults[cpubuf_idx] = 0; 2408 2409 p->numa_faults[mem_idx] += diff; 2410 p->numa_faults[cpu_idx] += f_diff; 2411 faults += p->numa_faults[mem_idx]; 2412 p->total_numa_faults += diff; 2413 if (ng) { 2414 /* 2415 * safe because we can only change our own group 2416 * 2417 * mem_idx represents the offset for a given 2418 * nid and priv in a specific region because it 2419 * is at the beginning of the numa_faults array. 2420 */ 2421 ng->faults[mem_idx] += diff; 2422 ng->faults[cpu_idx] += f_diff; 2423 ng->total_faults += diff; 2424 group_faults += ng->faults[mem_idx]; 2425 } 2426 } 2427 2428 if (!ng) { 2429 if (faults > max_faults) { 2430 max_faults = faults; 2431 max_nid = nid; 2432 } 2433 } else if (group_faults > max_faults) { 2434 max_faults = group_faults; 2435 max_nid = nid; 2436 } 2437 } 2438 2439 /* Cannot migrate task to CPU-less node */ 2440 if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) { 2441 int near_nid = max_nid; 2442 int distance, near_distance = INT_MAX; 2443 2444 for_each_node_state(nid, N_CPU) { 2445 distance = node_distance(max_nid, nid); 2446 if (distance < near_distance) { 2447 near_nid = nid; 2448 near_distance = distance; 2449 } 2450 } 2451 max_nid = near_nid; 2452 } 2453 2454 if (ng) { 2455 numa_group_count_active_nodes(ng); 2456 spin_unlock_irq(group_lock); 2457 max_nid = preferred_group_nid(p, max_nid); 2458 } 2459 2460 if (max_faults) { 2461 /* Set the new preferred node */ 2462 if (max_nid != p->numa_preferred_nid) 2463 sched_setnuma(p, max_nid); 2464 } 2465 2466 update_task_scan_period(p, fault_types[0], fault_types[1]); 2467 } 2468 2469 static inline int get_numa_group(struct numa_group *grp) 2470 { 2471 return refcount_inc_not_zero(&grp->refcount); 2472 } 2473 2474 static inline void put_numa_group(struct numa_group *grp) 2475 { 2476 if (refcount_dec_and_test(&grp->refcount)) 2477 kfree_rcu(grp, rcu); 2478 } 2479 2480 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2481 int *priv) 2482 { 2483 struct numa_group *grp, *my_grp; 2484 struct task_struct *tsk; 2485 bool join = false; 2486 int cpu = cpupid_to_cpu(cpupid); 2487 int i; 2488 2489 if (unlikely(!deref_curr_numa_group(p))) { 2490 unsigned int size = sizeof(struct numa_group) + 2491 NR_NUMA_HINT_FAULT_STATS * 2492 nr_node_ids * sizeof(unsigned long); 2493 2494 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2495 if (!grp) 2496 return; 2497 2498 refcount_set(&grp->refcount, 1); 2499 grp->active_nodes = 1; 2500 grp->max_faults_cpu = 0; 2501 spin_lock_init(&grp->lock); 2502 grp->gid = p->pid; 2503 2504 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2505 grp->faults[i] = p->numa_faults[i]; 2506 2507 grp->total_faults = p->total_numa_faults; 2508 2509 grp->nr_tasks++; 2510 rcu_assign_pointer(p->numa_group, grp); 2511 } 2512 2513 rcu_read_lock(); 2514 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2515 2516 if (!cpupid_match_pid(tsk, cpupid)) 2517 goto no_join; 2518 2519 grp = rcu_dereference(tsk->numa_group); 2520 if (!grp) 2521 goto no_join; 2522 2523 my_grp = deref_curr_numa_group(p); 2524 if (grp == my_grp) 2525 goto no_join; 2526 2527 /* 2528 * Only join the other group if its bigger; if we're the bigger group, 2529 * the other task will join us. 2530 */ 2531 if (my_grp->nr_tasks > grp->nr_tasks) 2532 goto no_join; 2533 2534 /* 2535 * Tie-break on the grp address. 2536 */ 2537 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2538 goto no_join; 2539 2540 /* Always join threads in the same process. */ 2541 if (tsk->mm == current->mm) 2542 join = true; 2543 2544 /* Simple filter to avoid false positives due to PID collisions */ 2545 if (flags & TNF_SHARED) 2546 join = true; 2547 2548 /* Update priv based on whether false sharing was detected */ 2549 *priv = !join; 2550 2551 if (join && !get_numa_group(grp)) 2552 goto no_join; 2553 2554 rcu_read_unlock(); 2555 2556 if (!join) 2557 return; 2558 2559 BUG_ON(irqs_disabled()); 2560 double_lock_irq(&my_grp->lock, &grp->lock); 2561 2562 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2563 my_grp->faults[i] -= p->numa_faults[i]; 2564 grp->faults[i] += p->numa_faults[i]; 2565 } 2566 my_grp->total_faults -= p->total_numa_faults; 2567 grp->total_faults += p->total_numa_faults; 2568 2569 my_grp->nr_tasks--; 2570 grp->nr_tasks++; 2571 2572 spin_unlock(&my_grp->lock); 2573 spin_unlock_irq(&grp->lock); 2574 2575 rcu_assign_pointer(p->numa_group, grp); 2576 2577 put_numa_group(my_grp); 2578 return; 2579 2580 no_join: 2581 rcu_read_unlock(); 2582 return; 2583 } 2584 2585 /* 2586 * Get rid of NUMA statistics associated with a task (either current or dead). 2587 * If @final is set, the task is dead and has reached refcount zero, so we can 2588 * safely free all relevant data structures. Otherwise, there might be 2589 * concurrent reads from places like load balancing and procfs, and we should 2590 * reset the data back to default state without freeing ->numa_faults. 2591 */ 2592 void task_numa_free(struct task_struct *p, bool final) 2593 { 2594 /* safe: p either is current or is being freed by current */ 2595 struct numa_group *grp = rcu_dereference_raw(p->numa_group); 2596 unsigned long *numa_faults = p->numa_faults; 2597 unsigned long flags; 2598 int i; 2599 2600 if (!numa_faults) 2601 return; 2602 2603 if (grp) { 2604 spin_lock_irqsave(&grp->lock, flags); 2605 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2606 grp->faults[i] -= p->numa_faults[i]; 2607 grp->total_faults -= p->total_numa_faults; 2608 2609 grp->nr_tasks--; 2610 spin_unlock_irqrestore(&grp->lock, flags); 2611 RCU_INIT_POINTER(p->numa_group, NULL); 2612 put_numa_group(grp); 2613 } 2614 2615 if (final) { 2616 p->numa_faults = NULL; 2617 kfree(numa_faults); 2618 } else { 2619 p->total_numa_faults = 0; 2620 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2621 numa_faults[i] = 0; 2622 } 2623 } 2624 2625 /* 2626 * Got a PROT_NONE fault for a page on @node. 2627 */ 2628 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2629 { 2630 struct task_struct *p = current; 2631 bool migrated = flags & TNF_MIGRATED; 2632 int cpu_node = task_node(current); 2633 int local = !!(flags & TNF_FAULT_LOCAL); 2634 struct numa_group *ng; 2635 int priv; 2636 2637 if (!static_branch_likely(&sched_numa_balancing)) 2638 return; 2639 2640 /* for example, ksmd faulting in a user's mm */ 2641 if (!p->mm) 2642 return; 2643 2644 /* Allocate buffer to track faults on a per-node basis */ 2645 if (unlikely(!p->numa_faults)) { 2646 int size = sizeof(*p->numa_faults) * 2647 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2648 2649 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2650 if (!p->numa_faults) 2651 return; 2652 2653 p->total_numa_faults = 0; 2654 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2655 } 2656 2657 /* 2658 * First accesses are treated as private, otherwise consider accesses 2659 * to be private if the accessing pid has not changed 2660 */ 2661 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2662 priv = 1; 2663 } else { 2664 priv = cpupid_match_pid(p, last_cpupid); 2665 if (!priv && !(flags & TNF_NO_GROUP)) 2666 task_numa_group(p, last_cpupid, flags, &priv); 2667 } 2668 2669 /* 2670 * If a workload spans multiple NUMA nodes, a shared fault that 2671 * occurs wholly within the set of nodes that the workload is 2672 * actively using should be counted as local. This allows the 2673 * scan rate to slow down when a workload has settled down. 2674 */ 2675 ng = deref_curr_numa_group(p); 2676 if (!priv && !local && ng && ng->active_nodes > 1 && 2677 numa_is_active_node(cpu_node, ng) && 2678 numa_is_active_node(mem_node, ng)) 2679 local = 1; 2680 2681 /* 2682 * Retry to migrate task to preferred node periodically, in case it 2683 * previously failed, or the scheduler moved us. 2684 */ 2685 if (time_after(jiffies, p->numa_migrate_retry)) { 2686 task_numa_placement(p); 2687 numa_migrate_preferred(p); 2688 } 2689 2690 if (migrated) 2691 p->numa_pages_migrated += pages; 2692 if (flags & TNF_MIGRATE_FAIL) 2693 p->numa_faults_locality[2] += pages; 2694 2695 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2696 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2697 p->numa_faults_locality[local] += pages; 2698 } 2699 2700 static void reset_ptenuma_scan(struct task_struct *p) 2701 { 2702 /* 2703 * We only did a read acquisition of the mmap sem, so 2704 * p->mm->numa_scan_seq is written to without exclusive access 2705 * and the update is not guaranteed to be atomic. That's not 2706 * much of an issue though, since this is just used for 2707 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2708 * expensive, to avoid any form of compiler optimizations: 2709 */ 2710 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2711 p->mm->numa_scan_offset = 0; 2712 } 2713 2714 /* 2715 * The expensive part of numa migration is done from task_work context. 2716 * Triggered from task_tick_numa(). 2717 */ 2718 static void task_numa_work(struct callback_head *work) 2719 { 2720 unsigned long migrate, next_scan, now = jiffies; 2721 struct task_struct *p = current; 2722 struct mm_struct *mm = p->mm; 2723 u64 runtime = p->se.sum_exec_runtime; 2724 struct vm_area_struct *vma; 2725 unsigned long start, end; 2726 unsigned long nr_pte_updates = 0; 2727 long pages, virtpages; 2728 2729 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2730 2731 work->next = work; 2732 /* 2733 * Who cares about NUMA placement when they're dying. 2734 * 2735 * NOTE: make sure not to dereference p->mm before this check, 2736 * exit_task_work() happens _after_ exit_mm() so we could be called 2737 * without p->mm even though we still had it when we enqueued this 2738 * work. 2739 */ 2740 if (p->flags & PF_EXITING) 2741 return; 2742 2743 if (!mm->numa_next_scan) { 2744 mm->numa_next_scan = now + 2745 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2746 } 2747 2748 /* 2749 * Enforce maximal scan/migration frequency.. 2750 */ 2751 migrate = mm->numa_next_scan; 2752 if (time_before(now, migrate)) 2753 return; 2754 2755 if (p->numa_scan_period == 0) { 2756 p->numa_scan_period_max = task_scan_max(p); 2757 p->numa_scan_period = task_scan_start(p); 2758 } 2759 2760 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2761 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2762 return; 2763 2764 /* 2765 * Delay this task enough that another task of this mm will likely win 2766 * the next time around. 2767 */ 2768 p->node_stamp += 2 * TICK_NSEC; 2769 2770 start = mm->numa_scan_offset; 2771 pages = sysctl_numa_balancing_scan_size; 2772 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2773 virtpages = pages * 8; /* Scan up to this much virtual space */ 2774 if (!pages) 2775 return; 2776 2777 2778 if (!mmap_read_trylock(mm)) 2779 return; 2780 vma = find_vma(mm, start); 2781 if (!vma) { 2782 reset_ptenuma_scan(p); 2783 start = 0; 2784 vma = mm->mmap; 2785 } 2786 for (; vma; vma = vma->vm_next) { 2787 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2788 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2789 continue; 2790 } 2791 2792 /* 2793 * Shared library pages mapped by multiple processes are not 2794 * migrated as it is expected they are cache replicated. Avoid 2795 * hinting faults in read-only file-backed mappings or the vdso 2796 * as migrating the pages will be of marginal benefit. 2797 */ 2798 if (!vma->vm_mm || 2799 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 2800 continue; 2801 2802 /* 2803 * Skip inaccessible VMAs to avoid any confusion between 2804 * PROT_NONE and NUMA hinting ptes 2805 */ 2806 if (!vma_is_accessible(vma)) 2807 continue; 2808 2809 do { 2810 start = max(start, vma->vm_start); 2811 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2812 end = min(end, vma->vm_end); 2813 nr_pte_updates = change_prot_numa(vma, start, end); 2814 2815 /* 2816 * Try to scan sysctl_numa_balancing_size worth of 2817 * hpages that have at least one present PTE that 2818 * is not already pte-numa. If the VMA contains 2819 * areas that are unused or already full of prot_numa 2820 * PTEs, scan up to virtpages, to skip through those 2821 * areas faster. 2822 */ 2823 if (nr_pte_updates) 2824 pages -= (end - start) >> PAGE_SHIFT; 2825 virtpages -= (end - start) >> PAGE_SHIFT; 2826 2827 start = end; 2828 if (pages <= 0 || virtpages <= 0) 2829 goto out; 2830 2831 cond_resched(); 2832 } while (end != vma->vm_end); 2833 } 2834 2835 out: 2836 /* 2837 * It is possible to reach the end of the VMA list but the last few 2838 * VMAs are not guaranteed to the vma_migratable. If they are not, we 2839 * would find the !migratable VMA on the next scan but not reset the 2840 * scanner to the start so check it now. 2841 */ 2842 if (vma) 2843 mm->numa_scan_offset = start; 2844 else 2845 reset_ptenuma_scan(p); 2846 mmap_read_unlock(mm); 2847 2848 /* 2849 * Make sure tasks use at least 32x as much time to run other code 2850 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 2851 * Usually update_task_scan_period slows down scanning enough; on an 2852 * overloaded system we need to limit overhead on a per task basis. 2853 */ 2854 if (unlikely(p->se.sum_exec_runtime != runtime)) { 2855 u64 diff = p->se.sum_exec_runtime - runtime; 2856 p->node_stamp += 32 * diff; 2857 } 2858 } 2859 2860 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 2861 { 2862 int mm_users = 0; 2863 struct mm_struct *mm = p->mm; 2864 2865 if (mm) { 2866 mm_users = atomic_read(&mm->mm_users); 2867 if (mm_users == 1) { 2868 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2869 mm->numa_scan_seq = 0; 2870 } 2871 } 2872 p->node_stamp = 0; 2873 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; 2874 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2875 /* Protect against double add, see task_tick_numa and task_numa_work */ 2876 p->numa_work.next = &p->numa_work; 2877 p->numa_faults = NULL; 2878 p->numa_pages_migrated = 0; 2879 p->total_numa_faults = 0; 2880 RCU_INIT_POINTER(p->numa_group, NULL); 2881 p->last_task_numa_placement = 0; 2882 p->last_sum_exec_runtime = 0; 2883 2884 init_task_work(&p->numa_work, task_numa_work); 2885 2886 /* New address space, reset the preferred nid */ 2887 if (!(clone_flags & CLONE_VM)) { 2888 p->numa_preferred_nid = NUMA_NO_NODE; 2889 return; 2890 } 2891 2892 /* 2893 * New thread, keep existing numa_preferred_nid which should be copied 2894 * already by arch_dup_task_struct but stagger when scans start. 2895 */ 2896 if (mm) { 2897 unsigned int delay; 2898 2899 delay = min_t(unsigned int, task_scan_max(current), 2900 current->numa_scan_period * mm_users * NSEC_PER_MSEC); 2901 delay += 2 * TICK_NSEC; 2902 p->node_stamp = delay; 2903 } 2904 } 2905 2906 /* 2907 * Drive the periodic memory faults.. 2908 */ 2909 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2910 { 2911 struct callback_head *work = &curr->numa_work; 2912 u64 period, now; 2913 2914 /* 2915 * We don't care about NUMA placement if we don't have memory. 2916 */ 2917 if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) 2918 return; 2919 2920 /* 2921 * Using runtime rather than walltime has the dual advantage that 2922 * we (mostly) drive the selection from busy threads and that the 2923 * task needs to have done some actual work before we bother with 2924 * NUMA placement. 2925 */ 2926 now = curr->se.sum_exec_runtime; 2927 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 2928 2929 if (now > curr->node_stamp + period) { 2930 if (!curr->node_stamp) 2931 curr->numa_scan_period = task_scan_start(curr); 2932 curr->node_stamp += period; 2933 2934 if (!time_before(jiffies, curr->mm->numa_next_scan)) 2935 task_work_add(curr, work, TWA_RESUME); 2936 } 2937 } 2938 2939 static void update_scan_period(struct task_struct *p, int new_cpu) 2940 { 2941 int src_nid = cpu_to_node(task_cpu(p)); 2942 int dst_nid = cpu_to_node(new_cpu); 2943 2944 if (!static_branch_likely(&sched_numa_balancing)) 2945 return; 2946 2947 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) 2948 return; 2949 2950 if (src_nid == dst_nid) 2951 return; 2952 2953 /* 2954 * Allow resets if faults have been trapped before one scan 2955 * has completed. This is most likely due to a new task that 2956 * is pulled cross-node due to wakeups or load balancing. 2957 */ 2958 if (p->numa_scan_seq) { 2959 /* 2960 * Avoid scan adjustments if moving to the preferred 2961 * node or if the task was not previously running on 2962 * the preferred node. 2963 */ 2964 if (dst_nid == p->numa_preferred_nid || 2965 (p->numa_preferred_nid != NUMA_NO_NODE && 2966 src_nid != p->numa_preferred_nid)) 2967 return; 2968 } 2969 2970 p->numa_scan_period = task_scan_start(p); 2971 } 2972 2973 #else 2974 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2975 { 2976 } 2977 2978 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 2979 { 2980 } 2981 2982 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 2983 { 2984 } 2985 2986 static inline void update_scan_period(struct task_struct *p, int new_cpu) 2987 { 2988 } 2989 2990 #endif /* CONFIG_NUMA_BALANCING */ 2991 2992 static void 2993 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2994 { 2995 update_load_add(&cfs_rq->load, se->load.weight); 2996 #ifdef CONFIG_SMP 2997 if (entity_is_task(se)) { 2998 struct rq *rq = rq_of(cfs_rq); 2999 3000 account_numa_enqueue(rq, task_of(se)); 3001 list_add(&se->group_node, &rq->cfs_tasks); 3002 } 3003 #endif 3004 cfs_rq->nr_running++; 3005 if (se_is_idle(se)) 3006 cfs_rq->idle_nr_running++; 3007 } 3008 3009 static void 3010 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 3011 { 3012 update_load_sub(&cfs_rq->load, se->load.weight); 3013 #ifdef CONFIG_SMP 3014 if (entity_is_task(se)) { 3015 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 3016 list_del_init(&se->group_node); 3017 } 3018 #endif 3019 cfs_rq->nr_running--; 3020 if (se_is_idle(se)) 3021 cfs_rq->idle_nr_running--; 3022 } 3023 3024 /* 3025 * Signed add and clamp on underflow. 3026 * 3027 * Explicitly do a load-store to ensure the intermediate value never hits 3028 * memory. This allows lockless observations without ever seeing the negative 3029 * values. 3030 */ 3031 #define add_positive(_ptr, _val) do { \ 3032 typeof(_ptr) ptr = (_ptr); \ 3033 typeof(_val) val = (_val); \ 3034 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3035 \ 3036 res = var + val; \ 3037 \ 3038 if (val < 0 && res > var) \ 3039 res = 0; \ 3040 \ 3041 WRITE_ONCE(*ptr, res); \ 3042 } while (0) 3043 3044 /* 3045 * Unsigned subtract and clamp on underflow. 3046 * 3047 * Explicitly do a load-store to ensure the intermediate value never hits 3048 * memory. This allows lockless observations without ever seeing the negative 3049 * values. 3050 */ 3051 #define sub_positive(_ptr, _val) do { \ 3052 typeof(_ptr) ptr = (_ptr); \ 3053 typeof(*ptr) val = (_val); \ 3054 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3055 res = var - val; \ 3056 if (res > var) \ 3057 res = 0; \ 3058 WRITE_ONCE(*ptr, res); \ 3059 } while (0) 3060 3061 /* 3062 * Remove and clamp on negative, from a local variable. 3063 * 3064 * A variant of sub_positive(), which does not use explicit load-store 3065 * and is thus optimized for local variable updates. 3066 */ 3067 #define lsub_positive(_ptr, _val) do { \ 3068 typeof(_ptr) ptr = (_ptr); \ 3069 *ptr -= min_t(typeof(*ptr), *ptr, _val); \ 3070 } while (0) 3071 3072 #ifdef CONFIG_SMP 3073 static inline void 3074 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3075 { 3076 cfs_rq->avg.load_avg += se->avg.load_avg; 3077 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; 3078 } 3079 3080 static inline void 3081 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3082 { 3083 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 3084 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); 3085 /* See update_cfs_rq_load_avg() */ 3086 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, 3087 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); 3088 } 3089 #else 3090 static inline void 3091 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3092 static inline void 3093 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3094 #endif 3095 3096 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 3097 unsigned long weight) 3098 { 3099 if (se->on_rq) { 3100 /* commit outstanding execution time */ 3101 if (cfs_rq->curr == se) 3102 update_curr(cfs_rq); 3103 update_load_sub(&cfs_rq->load, se->load.weight); 3104 } 3105 dequeue_load_avg(cfs_rq, se); 3106 3107 update_load_set(&se->load, weight); 3108 3109 #ifdef CONFIG_SMP 3110 do { 3111 u32 divider = get_pelt_divider(&se->avg); 3112 3113 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); 3114 } while (0); 3115 #endif 3116 3117 enqueue_load_avg(cfs_rq, se); 3118 if (se->on_rq) 3119 update_load_add(&cfs_rq->load, se->load.weight); 3120 3121 } 3122 3123 void reweight_task(struct task_struct *p, int prio) 3124 { 3125 struct sched_entity *se = &p->se; 3126 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3127 struct load_weight *load = &se->load; 3128 unsigned long weight = scale_load(sched_prio_to_weight[prio]); 3129 3130 reweight_entity(cfs_rq, se, weight); 3131 load->inv_weight = sched_prio_to_wmult[prio]; 3132 } 3133 3134 #ifdef CONFIG_FAIR_GROUP_SCHED 3135 #ifdef CONFIG_SMP 3136 /* 3137 * All this does is approximate the hierarchical proportion which includes that 3138 * global sum we all love to hate. 3139 * 3140 * That is, the weight of a group entity, is the proportional share of the 3141 * group weight based on the group runqueue weights. That is: 3142 * 3143 * tg->weight * grq->load.weight 3144 * ge->load.weight = ----------------------------- (1) 3145 * \Sum grq->load.weight 3146 * 3147 * Now, because computing that sum is prohibitively expensive to compute (been 3148 * there, done that) we approximate it with this average stuff. The average 3149 * moves slower and therefore the approximation is cheaper and more stable. 3150 * 3151 * So instead of the above, we substitute: 3152 * 3153 * grq->load.weight -> grq->avg.load_avg (2) 3154 * 3155 * which yields the following: 3156 * 3157 * tg->weight * grq->avg.load_avg 3158 * ge->load.weight = ------------------------------ (3) 3159 * tg->load_avg 3160 * 3161 * Where: tg->load_avg ~= \Sum grq->avg.load_avg 3162 * 3163 * That is shares_avg, and it is right (given the approximation (2)). 3164 * 3165 * The problem with it is that because the average is slow -- it was designed 3166 * to be exactly that of course -- this leads to transients in boundary 3167 * conditions. In specific, the case where the group was idle and we start the 3168 * one task. It takes time for our CPU's grq->avg.load_avg to build up, 3169 * yielding bad latency etc.. 3170 * 3171 * Now, in that special case (1) reduces to: 3172 * 3173 * tg->weight * grq->load.weight 3174 * ge->load.weight = ----------------------------- = tg->weight (4) 3175 * grp->load.weight 3176 * 3177 * That is, the sum collapses because all other CPUs are idle; the UP scenario. 3178 * 3179 * So what we do is modify our approximation (3) to approach (4) in the (near) 3180 * UP case, like: 3181 * 3182 * ge->load.weight = 3183 * 3184 * tg->weight * grq->load.weight 3185 * --------------------------------------------------- (5) 3186 * tg->load_avg - grq->avg.load_avg + grq->load.weight 3187 * 3188 * But because grq->load.weight can drop to 0, resulting in a divide by zero, 3189 * we need to use grq->avg.load_avg as its lower bound, which then gives: 3190 * 3191 * 3192 * tg->weight * grq->load.weight 3193 * ge->load.weight = ----------------------------- (6) 3194 * tg_load_avg' 3195 * 3196 * Where: 3197 * 3198 * tg_load_avg' = tg->load_avg - grq->avg.load_avg + 3199 * max(grq->load.weight, grq->avg.load_avg) 3200 * 3201 * And that is shares_weight and is icky. In the (near) UP case it approaches 3202 * (4) while in the normal case it approaches (3). It consistently 3203 * overestimates the ge->load.weight and therefore: 3204 * 3205 * \Sum ge->load.weight >= tg->weight 3206 * 3207 * hence icky! 3208 */ 3209 static long calc_group_shares(struct cfs_rq *cfs_rq) 3210 { 3211 long tg_weight, tg_shares, load, shares; 3212 struct task_group *tg = cfs_rq->tg; 3213 3214 tg_shares = READ_ONCE(tg->shares); 3215 3216 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); 3217 3218 tg_weight = atomic_long_read(&tg->load_avg); 3219 3220 /* Ensure tg_weight >= load */ 3221 tg_weight -= cfs_rq->tg_load_avg_contrib; 3222 tg_weight += load; 3223 3224 shares = (tg_shares * load); 3225 if (tg_weight) 3226 shares /= tg_weight; 3227 3228 /* 3229 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 3230 * of a group with small tg->shares value. It is a floor value which is 3231 * assigned as a minimum load.weight to the sched_entity representing 3232 * the group on a CPU. 3233 * 3234 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 3235 * on an 8-core system with 8 tasks each runnable on one CPU shares has 3236 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 3237 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 3238 * instead of 0. 3239 */ 3240 return clamp_t(long, shares, MIN_SHARES, tg_shares); 3241 } 3242 #endif /* CONFIG_SMP */ 3243 3244 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 3245 3246 /* 3247 * Recomputes the group entity based on the current state of its group 3248 * runqueue. 3249 */ 3250 static void update_cfs_group(struct sched_entity *se) 3251 { 3252 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3253 long shares; 3254 3255 if (!gcfs_rq) 3256 return; 3257 3258 if (throttled_hierarchy(gcfs_rq)) 3259 return; 3260 3261 #ifndef CONFIG_SMP 3262 shares = READ_ONCE(gcfs_rq->tg->shares); 3263 3264 if (likely(se->load.weight == shares)) 3265 return; 3266 #else 3267 shares = calc_group_shares(gcfs_rq); 3268 #endif 3269 3270 reweight_entity(cfs_rq_of(se), se, shares); 3271 } 3272 3273 #else /* CONFIG_FAIR_GROUP_SCHED */ 3274 static inline void update_cfs_group(struct sched_entity *se) 3275 { 3276 } 3277 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3278 3279 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) 3280 { 3281 struct rq *rq = rq_of(cfs_rq); 3282 3283 if (&rq->cfs == cfs_rq) { 3284 /* 3285 * There are a few boundary cases this might miss but it should 3286 * get called often enough that that should (hopefully) not be 3287 * a real problem. 3288 * 3289 * It will not get called when we go idle, because the idle 3290 * thread is a different class (!fair), nor will the utilization 3291 * number include things like RT tasks. 3292 * 3293 * As is, the util number is not freq-invariant (we'd have to 3294 * implement arch_scale_freq_capacity() for that). 3295 * 3296 * See cpu_util_cfs(). 3297 */ 3298 cpufreq_update_util(rq, flags); 3299 } 3300 } 3301 3302 #ifdef CONFIG_SMP 3303 #ifdef CONFIG_FAIR_GROUP_SCHED 3304 /* 3305 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list 3306 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list 3307 * bottom-up, we only have to test whether the cfs_rq before us on the list 3308 * is our child. 3309 * If cfs_rq is not on the list, test whether a child needs its to be added to 3310 * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details). 3311 */ 3312 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) 3313 { 3314 struct cfs_rq *prev_cfs_rq; 3315 struct list_head *prev; 3316 3317 if (cfs_rq->on_list) { 3318 prev = cfs_rq->leaf_cfs_rq_list.prev; 3319 } else { 3320 struct rq *rq = rq_of(cfs_rq); 3321 3322 prev = rq->tmp_alone_branch; 3323 } 3324 3325 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); 3326 3327 return (prev_cfs_rq->tg->parent == cfs_rq->tg); 3328 } 3329 3330 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 3331 { 3332 if (cfs_rq->load.weight) 3333 return false; 3334 3335 if (cfs_rq->avg.load_sum) 3336 return false; 3337 3338 if (cfs_rq->avg.util_sum) 3339 return false; 3340 3341 if (cfs_rq->avg.runnable_sum) 3342 return false; 3343 3344 if (child_cfs_rq_on_list(cfs_rq)) 3345 return false; 3346 3347 /* 3348 * _avg must be null when _sum are null because _avg = _sum / divider 3349 * Make sure that rounding and/or propagation of PELT values never 3350 * break this. 3351 */ 3352 SCHED_WARN_ON(cfs_rq->avg.load_avg || 3353 cfs_rq->avg.util_avg || 3354 cfs_rq->avg.runnable_avg); 3355 3356 return true; 3357 } 3358 3359 /** 3360 * update_tg_load_avg - update the tg's load avg 3361 * @cfs_rq: the cfs_rq whose avg changed 3362 * 3363 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3364 * However, because tg->load_avg is a global value there are performance 3365 * considerations. 3366 * 3367 * In order to avoid having to look at the other cfs_rq's, we use a 3368 * differential update where we store the last value we propagated. This in 3369 * turn allows skipping updates if the differential is 'small'. 3370 * 3371 * Updating tg's load_avg is necessary before update_cfs_share(). 3372 */ 3373 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) 3374 { 3375 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3376 3377 /* 3378 * No need to update load_avg for root_task_group as it is not used. 3379 */ 3380 if (cfs_rq->tg == &root_task_group) 3381 return; 3382 3383 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3384 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3385 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3386 } 3387 } 3388 3389 /* 3390 * Called within set_task_rq() right before setting a task's CPU. The 3391 * caller only guarantees p->pi_lock is held; no other assumptions, 3392 * including the state of rq->lock, should be made. 3393 */ 3394 void set_task_rq_fair(struct sched_entity *se, 3395 struct cfs_rq *prev, struct cfs_rq *next) 3396 { 3397 u64 p_last_update_time; 3398 u64 n_last_update_time; 3399 3400 if (!sched_feat(ATTACH_AGE_LOAD)) 3401 return; 3402 3403 /* 3404 * We are supposed to update the task to "current" time, then its up to 3405 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3406 * getting what current time is, so simply throw away the out-of-date 3407 * time. This will result in the wakee task is less decayed, but giving 3408 * the wakee more load sounds not bad. 3409 */ 3410 if (!(se->avg.last_update_time && prev)) 3411 return; 3412 3413 #ifndef CONFIG_64BIT 3414 { 3415 u64 p_last_update_time_copy; 3416 u64 n_last_update_time_copy; 3417 3418 do { 3419 p_last_update_time_copy = prev->load_last_update_time_copy; 3420 n_last_update_time_copy = next->load_last_update_time_copy; 3421 3422 smp_rmb(); 3423 3424 p_last_update_time = prev->avg.last_update_time; 3425 n_last_update_time = next->avg.last_update_time; 3426 3427 } while (p_last_update_time != p_last_update_time_copy || 3428 n_last_update_time != n_last_update_time_copy); 3429 } 3430 #else 3431 p_last_update_time = prev->avg.last_update_time; 3432 n_last_update_time = next->avg.last_update_time; 3433 #endif 3434 __update_load_avg_blocked_se(p_last_update_time, se); 3435 se->avg.last_update_time = n_last_update_time; 3436 } 3437 3438 /* 3439 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to 3440 * propagate its contribution. The key to this propagation is the invariant 3441 * that for each group: 3442 * 3443 * ge->avg == grq->avg (1) 3444 * 3445 * _IFF_ we look at the pure running and runnable sums. Because they 3446 * represent the very same entity, just at different points in the hierarchy. 3447 * 3448 * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial 3449 * and simply copies the running/runnable sum over (but still wrong, because 3450 * the group entity and group rq do not have their PELT windows aligned). 3451 * 3452 * However, update_tg_cfs_load() is more complex. So we have: 3453 * 3454 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) 3455 * 3456 * And since, like util, the runnable part should be directly transferable, 3457 * the following would _appear_ to be the straight forward approach: 3458 * 3459 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) 3460 * 3461 * And per (1) we have: 3462 * 3463 * ge->avg.runnable_avg == grq->avg.runnable_avg 3464 * 3465 * Which gives: 3466 * 3467 * ge->load.weight * grq->avg.load_avg 3468 * ge->avg.load_avg = ----------------------------------- (4) 3469 * grq->load.weight 3470 * 3471 * Except that is wrong! 3472 * 3473 * Because while for entities historical weight is not important and we 3474 * really only care about our future and therefore can consider a pure 3475 * runnable sum, runqueues can NOT do this. 3476 * 3477 * We specifically want runqueues to have a load_avg that includes 3478 * historical weights. Those represent the blocked load, the load we expect 3479 * to (shortly) return to us. This only works by keeping the weights as 3480 * integral part of the sum. We therefore cannot decompose as per (3). 3481 * 3482 * Another reason this doesn't work is that runnable isn't a 0-sum entity. 3483 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the 3484 * rq itself is runnable anywhere between 2/3 and 1 depending on how the 3485 * runnable section of these tasks overlap (or not). If they were to perfectly 3486 * align the rq as a whole would be runnable 2/3 of the time. If however we 3487 * always have at least 1 runnable task, the rq as a whole is always runnable. 3488 * 3489 * So we'll have to approximate.. :/ 3490 * 3491 * Given the constraint: 3492 * 3493 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX 3494 * 3495 * We can construct a rule that adds runnable to a rq by assuming minimal 3496 * overlap. 3497 * 3498 * On removal, we'll assume each task is equally runnable; which yields: 3499 * 3500 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight 3501 * 3502 * XXX: only do this for the part of runnable > running ? 3503 * 3504 */ 3505 static inline void 3506 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3507 { 3508 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; 3509 u32 new_sum, divider; 3510 3511 /* Nothing to update */ 3512 if (!delta_avg) 3513 return; 3514 3515 /* 3516 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3517 * See ___update_load_avg() for details. 3518 */ 3519 divider = get_pelt_divider(&cfs_rq->avg); 3520 3521 3522 /* Set new sched_entity's utilization */ 3523 se->avg.util_avg = gcfs_rq->avg.util_avg; 3524 new_sum = se->avg.util_avg * divider; 3525 delta_sum = (long)new_sum - (long)se->avg.util_sum; 3526 se->avg.util_sum = new_sum; 3527 3528 /* Update parent cfs_rq utilization */ 3529 add_positive(&cfs_rq->avg.util_avg, delta_avg); 3530 add_positive(&cfs_rq->avg.util_sum, delta_sum); 3531 3532 /* See update_cfs_rq_load_avg() */ 3533 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, 3534 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); 3535 } 3536 3537 static inline void 3538 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3539 { 3540 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; 3541 u32 new_sum, divider; 3542 3543 /* Nothing to update */ 3544 if (!delta_avg) 3545 return; 3546 3547 /* 3548 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3549 * See ___update_load_avg() for details. 3550 */ 3551 divider = get_pelt_divider(&cfs_rq->avg); 3552 3553 /* Set new sched_entity's runnable */ 3554 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; 3555 new_sum = se->avg.runnable_avg * divider; 3556 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; 3557 se->avg.runnable_sum = new_sum; 3558 3559 /* Update parent cfs_rq runnable */ 3560 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); 3561 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); 3562 /* See update_cfs_rq_load_avg() */ 3563 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, 3564 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); 3565 } 3566 3567 static inline void 3568 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3569 { 3570 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; 3571 unsigned long load_avg; 3572 u64 load_sum = 0; 3573 s64 delta_sum; 3574 u32 divider; 3575 3576 if (!runnable_sum) 3577 return; 3578 3579 gcfs_rq->prop_runnable_sum = 0; 3580 3581 /* 3582 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3583 * See ___update_load_avg() for details. 3584 */ 3585 divider = get_pelt_divider(&cfs_rq->avg); 3586 3587 if (runnable_sum >= 0) { 3588 /* 3589 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until 3590 * the CPU is saturated running == runnable. 3591 */ 3592 runnable_sum += se->avg.load_sum; 3593 runnable_sum = min_t(long, runnable_sum, divider); 3594 } else { 3595 /* 3596 * Estimate the new unweighted runnable_sum of the gcfs_rq by 3597 * assuming all tasks are equally runnable. 3598 */ 3599 if (scale_load_down(gcfs_rq->load.weight)) { 3600 load_sum = div_u64(gcfs_rq->avg.load_sum, 3601 scale_load_down(gcfs_rq->load.weight)); 3602 } 3603 3604 /* But make sure to not inflate se's runnable */ 3605 runnable_sum = min(se->avg.load_sum, load_sum); 3606 } 3607 3608 /* 3609 * runnable_sum can't be lower than running_sum 3610 * Rescale running sum to be in the same range as runnable sum 3611 * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT] 3612 * runnable_sum is in [0 : LOAD_AVG_MAX] 3613 */ 3614 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; 3615 runnable_sum = max(runnable_sum, running_sum); 3616 3617 load_sum = se_weight(se) * runnable_sum; 3618 load_avg = div_u64(load_sum, divider); 3619 3620 delta_avg = load_avg - se->avg.load_avg; 3621 if (!delta_avg) 3622 return; 3623 3624 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; 3625 3626 se->avg.load_sum = runnable_sum; 3627 se->avg.load_avg = load_avg; 3628 add_positive(&cfs_rq->avg.load_avg, delta_avg); 3629 add_positive(&cfs_rq->avg.load_sum, delta_sum); 3630 /* See update_cfs_rq_load_avg() */ 3631 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, 3632 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); 3633 } 3634 3635 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) 3636 { 3637 cfs_rq->propagate = 1; 3638 cfs_rq->prop_runnable_sum += runnable_sum; 3639 } 3640 3641 /* Update task and its cfs_rq load average */ 3642 static inline int propagate_entity_load_avg(struct sched_entity *se) 3643 { 3644 struct cfs_rq *cfs_rq, *gcfs_rq; 3645 3646 if (entity_is_task(se)) 3647 return 0; 3648 3649 gcfs_rq = group_cfs_rq(se); 3650 if (!gcfs_rq->propagate) 3651 return 0; 3652 3653 gcfs_rq->propagate = 0; 3654 3655 cfs_rq = cfs_rq_of(se); 3656 3657 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); 3658 3659 update_tg_cfs_util(cfs_rq, se, gcfs_rq); 3660 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); 3661 update_tg_cfs_load(cfs_rq, se, gcfs_rq); 3662 3663 trace_pelt_cfs_tp(cfs_rq); 3664 trace_pelt_se_tp(se); 3665 3666 return 1; 3667 } 3668 3669 /* 3670 * Check if we need to update the load and the utilization of a blocked 3671 * group_entity: 3672 */ 3673 static inline bool skip_blocked_update(struct sched_entity *se) 3674 { 3675 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3676 3677 /* 3678 * If sched_entity still have not zero load or utilization, we have to 3679 * decay it: 3680 */ 3681 if (se->avg.load_avg || se->avg.util_avg) 3682 return false; 3683 3684 /* 3685 * If there is a pending propagation, we have to update the load and 3686 * the utilization of the sched_entity: 3687 */ 3688 if (gcfs_rq->propagate) 3689 return false; 3690 3691 /* 3692 * Otherwise, the load and the utilization of the sched_entity is 3693 * already zero and there is no pending propagation, so it will be a 3694 * waste of time to try to decay it: 3695 */ 3696 return true; 3697 } 3698 3699 #else /* CONFIG_FAIR_GROUP_SCHED */ 3700 3701 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} 3702 3703 static inline int propagate_entity_load_avg(struct sched_entity *se) 3704 { 3705 return 0; 3706 } 3707 3708 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} 3709 3710 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3711 3712 /** 3713 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 3714 * @now: current time, as per cfs_rq_clock_pelt() 3715 * @cfs_rq: cfs_rq to update 3716 * 3717 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 3718 * avg. The immediate corollary is that all (fair) tasks must be attached, see 3719 * post_init_entity_util_avg(). 3720 * 3721 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 3722 * 3723 * Return: true if the load decayed or we removed load. 3724 * 3725 * Since both these conditions indicate a changed cfs_rq->avg.load we should 3726 * call update_tg_load_avg() when this function returns true. 3727 */ 3728 static inline int 3729 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 3730 { 3731 unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0; 3732 struct sched_avg *sa = &cfs_rq->avg; 3733 int decayed = 0; 3734 3735 if (cfs_rq->removed.nr) { 3736 unsigned long r; 3737 u32 divider = get_pelt_divider(&cfs_rq->avg); 3738 3739 raw_spin_lock(&cfs_rq->removed.lock); 3740 swap(cfs_rq->removed.util_avg, removed_util); 3741 swap(cfs_rq->removed.load_avg, removed_load); 3742 swap(cfs_rq->removed.runnable_avg, removed_runnable); 3743 cfs_rq->removed.nr = 0; 3744 raw_spin_unlock(&cfs_rq->removed.lock); 3745 3746 r = removed_load; 3747 sub_positive(&sa->load_avg, r); 3748 sub_positive(&sa->load_sum, r * divider); 3749 /* See sa->util_sum below */ 3750 sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER); 3751 3752 r = removed_util; 3753 sub_positive(&sa->util_avg, r); 3754 sub_positive(&sa->util_sum, r * divider); 3755 /* 3756 * Because of rounding, se->util_sum might ends up being +1 more than 3757 * cfs->util_sum. Although this is not a problem by itself, detaching 3758 * a lot of tasks with the rounding problem between 2 updates of 3759 * util_avg (~1ms) can make cfs->util_sum becoming null whereas 3760 * cfs_util_avg is not. 3761 * Check that util_sum is still above its lower bound for the new 3762 * util_avg. Given that period_contrib might have moved since the last 3763 * sync, we are only sure that util_sum must be above or equal to 3764 * util_avg * minimum possible divider 3765 */ 3766 sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER); 3767 3768 r = removed_runnable; 3769 sub_positive(&sa->runnable_avg, r); 3770 sub_positive(&sa->runnable_sum, r * divider); 3771 /* See sa->util_sum above */ 3772 sa->runnable_sum = max_t(u32, sa->runnable_sum, 3773 sa->runnable_avg * PELT_MIN_DIVIDER); 3774 3775 /* 3776 * removed_runnable is the unweighted version of removed_load so we 3777 * can use it to estimate removed_load_sum. 3778 */ 3779 add_tg_cfs_propagate(cfs_rq, 3780 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT); 3781 3782 decayed = 1; 3783 } 3784 3785 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); 3786 3787 #ifndef CONFIG_64BIT 3788 smp_wmb(); 3789 cfs_rq->load_last_update_time_copy = sa->last_update_time; 3790 #endif 3791 3792 return decayed; 3793 } 3794 3795 /** 3796 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3797 * @cfs_rq: cfs_rq to attach to 3798 * @se: sched_entity to attach 3799 * 3800 * Must call update_cfs_rq_load_avg() before this, since we rely on 3801 * cfs_rq->avg.last_update_time being current. 3802 */ 3803 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3804 { 3805 /* 3806 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3807 * See ___update_load_avg() for details. 3808 */ 3809 u32 divider = get_pelt_divider(&cfs_rq->avg); 3810 3811 /* 3812 * When we attach the @se to the @cfs_rq, we must align the decay 3813 * window because without that, really weird and wonderful things can 3814 * happen. 3815 * 3816 * XXX illustrate 3817 */ 3818 se->avg.last_update_time = cfs_rq->avg.last_update_time; 3819 se->avg.period_contrib = cfs_rq->avg.period_contrib; 3820 3821 /* 3822 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new 3823 * period_contrib. This isn't strictly correct, but since we're 3824 * entirely outside of the PELT hierarchy, nobody cares if we truncate 3825 * _sum a little. 3826 */ 3827 se->avg.util_sum = se->avg.util_avg * divider; 3828 3829 se->avg.runnable_sum = se->avg.runnable_avg * divider; 3830 3831 se->avg.load_sum = divider; 3832 if (se_weight(se)) { 3833 se->avg.load_sum = 3834 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); 3835 } 3836 3837 enqueue_load_avg(cfs_rq, se); 3838 cfs_rq->avg.util_avg += se->avg.util_avg; 3839 cfs_rq->avg.util_sum += se->avg.util_sum; 3840 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; 3841 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; 3842 3843 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); 3844 3845 cfs_rq_util_change(cfs_rq, 0); 3846 3847 trace_pelt_cfs_tp(cfs_rq); 3848 } 3849 3850 /** 3851 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 3852 * @cfs_rq: cfs_rq to detach from 3853 * @se: sched_entity to detach 3854 * 3855 * Must call update_cfs_rq_load_avg() before this, since we rely on 3856 * cfs_rq->avg.last_update_time being current. 3857 */ 3858 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3859 { 3860 dequeue_load_avg(cfs_rq, se); 3861 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 3862 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); 3863 /* See update_cfs_rq_load_avg() */ 3864 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, 3865 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); 3866 3867 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); 3868 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); 3869 /* See update_cfs_rq_load_avg() */ 3870 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, 3871 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); 3872 3873 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); 3874 3875 cfs_rq_util_change(cfs_rq, 0); 3876 3877 trace_pelt_cfs_tp(cfs_rq); 3878 } 3879 3880 /* 3881 * Optional action to be done while updating the load average 3882 */ 3883 #define UPDATE_TG 0x1 3884 #define SKIP_AGE_LOAD 0x2 3885 #define DO_ATTACH 0x4 3886 3887 /* Update task and its cfs_rq load average */ 3888 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3889 { 3890 u64 now = cfs_rq_clock_pelt(cfs_rq); 3891 int decayed; 3892 3893 /* 3894 * Track task load average for carrying it to new CPU after migrated, and 3895 * track group sched_entity load average for task_h_load calc in migration 3896 */ 3897 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 3898 __update_load_avg_se(now, cfs_rq, se); 3899 3900 decayed = update_cfs_rq_load_avg(now, cfs_rq); 3901 decayed |= propagate_entity_load_avg(se); 3902 3903 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { 3904 3905 /* 3906 * DO_ATTACH means we're here from enqueue_entity(). 3907 * !last_update_time means we've passed through 3908 * migrate_task_rq_fair() indicating we migrated. 3909 * 3910 * IOW we're enqueueing a task on a new CPU. 3911 */ 3912 attach_entity_load_avg(cfs_rq, se); 3913 update_tg_load_avg(cfs_rq); 3914 3915 } else if (decayed) { 3916 cfs_rq_util_change(cfs_rq, 0); 3917 3918 if (flags & UPDATE_TG) 3919 update_tg_load_avg(cfs_rq); 3920 } 3921 } 3922 3923 #ifndef CONFIG_64BIT 3924 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3925 { 3926 u64 last_update_time_copy; 3927 u64 last_update_time; 3928 3929 do { 3930 last_update_time_copy = cfs_rq->load_last_update_time_copy; 3931 smp_rmb(); 3932 last_update_time = cfs_rq->avg.last_update_time; 3933 } while (last_update_time != last_update_time_copy); 3934 3935 return last_update_time; 3936 } 3937 #else 3938 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3939 { 3940 return cfs_rq->avg.last_update_time; 3941 } 3942 #endif 3943 3944 /* 3945 * Synchronize entity load avg of dequeued entity without locking 3946 * the previous rq. 3947 */ 3948 static void sync_entity_load_avg(struct sched_entity *se) 3949 { 3950 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3951 u64 last_update_time; 3952 3953 last_update_time = cfs_rq_last_update_time(cfs_rq); 3954 __update_load_avg_blocked_se(last_update_time, se); 3955 } 3956 3957 /* 3958 * Task first catches up with cfs_rq, and then subtract 3959 * itself from the cfs_rq (task must be off the queue now). 3960 */ 3961 static void remove_entity_load_avg(struct sched_entity *se) 3962 { 3963 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3964 unsigned long flags; 3965 3966 /* 3967 * tasks cannot exit without having gone through wake_up_new_task() -> 3968 * post_init_entity_util_avg() which will have added things to the 3969 * cfs_rq, so we can remove unconditionally. 3970 */ 3971 3972 sync_entity_load_avg(se); 3973 3974 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); 3975 ++cfs_rq->removed.nr; 3976 cfs_rq->removed.util_avg += se->avg.util_avg; 3977 cfs_rq->removed.load_avg += se->avg.load_avg; 3978 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; 3979 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); 3980 } 3981 3982 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) 3983 { 3984 return cfs_rq->avg.runnable_avg; 3985 } 3986 3987 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 3988 { 3989 return cfs_rq->avg.load_avg; 3990 } 3991 3992 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 3993 3994 static inline unsigned long task_util(struct task_struct *p) 3995 { 3996 return READ_ONCE(p->se.avg.util_avg); 3997 } 3998 3999 static inline unsigned long _task_util_est(struct task_struct *p) 4000 { 4001 struct util_est ue = READ_ONCE(p->se.avg.util_est); 4002 4003 return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED)); 4004 } 4005 4006 static inline unsigned long task_util_est(struct task_struct *p) 4007 { 4008 return max(task_util(p), _task_util_est(p)); 4009 } 4010 4011 #ifdef CONFIG_UCLAMP_TASK 4012 static inline unsigned long uclamp_task_util(struct task_struct *p) 4013 { 4014 return clamp(task_util_est(p), 4015 uclamp_eff_value(p, UCLAMP_MIN), 4016 uclamp_eff_value(p, UCLAMP_MAX)); 4017 } 4018 #else 4019 static inline unsigned long uclamp_task_util(struct task_struct *p) 4020 { 4021 return task_util_est(p); 4022 } 4023 #endif 4024 4025 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, 4026 struct task_struct *p) 4027 { 4028 unsigned int enqueued; 4029 4030 if (!sched_feat(UTIL_EST)) 4031 return; 4032 4033 /* Update root cfs_rq's estimated utilization */ 4034 enqueued = cfs_rq->avg.util_est.enqueued; 4035 enqueued += _task_util_est(p); 4036 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 4037 4038 trace_sched_util_est_cfs_tp(cfs_rq); 4039 } 4040 4041 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, 4042 struct task_struct *p) 4043 { 4044 unsigned int enqueued; 4045 4046 if (!sched_feat(UTIL_EST)) 4047 return; 4048 4049 /* Update root cfs_rq's estimated utilization */ 4050 enqueued = cfs_rq->avg.util_est.enqueued; 4051 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); 4052 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 4053 4054 trace_sched_util_est_cfs_tp(cfs_rq); 4055 } 4056 4057 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100) 4058 4059 /* 4060 * Check if a (signed) value is within a specified (unsigned) margin, 4061 * based on the observation that: 4062 * 4063 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) 4064 * 4065 * NOTE: this only works when value + margin < INT_MAX. 4066 */ 4067 static inline bool within_margin(int value, int margin) 4068 { 4069 return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); 4070 } 4071 4072 static inline void util_est_update(struct cfs_rq *cfs_rq, 4073 struct task_struct *p, 4074 bool task_sleep) 4075 { 4076 long last_ewma_diff, last_enqueued_diff; 4077 struct util_est ue; 4078 4079 if (!sched_feat(UTIL_EST)) 4080 return; 4081 4082 /* 4083 * Skip update of task's estimated utilization when the task has not 4084 * yet completed an activation, e.g. being migrated. 4085 */ 4086 if (!task_sleep) 4087 return; 4088 4089 /* 4090 * If the PELT values haven't changed since enqueue time, 4091 * skip the util_est update. 4092 */ 4093 ue = p->se.avg.util_est; 4094 if (ue.enqueued & UTIL_AVG_UNCHANGED) 4095 return; 4096 4097 last_enqueued_diff = ue.enqueued; 4098 4099 /* 4100 * Reset EWMA on utilization increases, the moving average is used only 4101 * to smooth utilization decreases. 4102 */ 4103 ue.enqueued = task_util(p); 4104 if (sched_feat(UTIL_EST_FASTUP)) { 4105 if (ue.ewma < ue.enqueued) { 4106 ue.ewma = ue.enqueued; 4107 goto done; 4108 } 4109 } 4110 4111 /* 4112 * Skip update of task's estimated utilization when its members are 4113 * already ~1% close to its last activation value. 4114 */ 4115 last_ewma_diff = ue.enqueued - ue.ewma; 4116 last_enqueued_diff -= ue.enqueued; 4117 if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) { 4118 if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN)) 4119 goto done; 4120 4121 return; 4122 } 4123 4124 /* 4125 * To avoid overestimation of actual task utilization, skip updates if 4126 * we cannot grant there is idle time in this CPU. 4127 */ 4128 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) 4129 return; 4130 4131 /* 4132 * Update Task's estimated utilization 4133 * 4134 * When *p completes an activation we can consolidate another sample 4135 * of the task size. This is done by storing the current PELT value 4136 * as ue.enqueued and by using this value to update the Exponential 4137 * Weighted Moving Average (EWMA): 4138 * 4139 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) 4140 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) 4141 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) 4142 * = w * ( last_ewma_diff ) + ewma(t-1) 4143 * = w * (last_ewma_diff + ewma(t-1) / w) 4144 * 4145 * Where 'w' is the weight of new samples, which is configured to be 4146 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) 4147 */ 4148 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; 4149 ue.ewma += last_ewma_diff; 4150 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; 4151 done: 4152 ue.enqueued |= UTIL_AVG_UNCHANGED; 4153 WRITE_ONCE(p->se.avg.util_est, ue); 4154 4155 trace_sched_util_est_se_tp(&p->se); 4156 } 4157 4158 static inline int task_fits_capacity(struct task_struct *p, 4159 unsigned long capacity) 4160 { 4161 return fits_capacity(uclamp_task_util(p), capacity); 4162 } 4163 4164 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) 4165 { 4166 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 4167 return; 4168 4169 if (!p || p->nr_cpus_allowed == 1) { 4170 rq->misfit_task_load = 0; 4171 return; 4172 } 4173 4174 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { 4175 rq->misfit_task_load = 0; 4176 return; 4177 } 4178 4179 /* 4180 * Make sure that misfit_task_load will not be null even if 4181 * task_h_load() returns 0. 4182 */ 4183 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); 4184 } 4185 4186 #else /* CONFIG_SMP */ 4187 4188 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 4189 { 4190 return true; 4191 } 4192 4193 #define UPDATE_TG 0x0 4194 #define SKIP_AGE_LOAD 0x0 4195 #define DO_ATTACH 0x0 4196 4197 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 4198 { 4199 cfs_rq_util_change(cfs_rq, 0); 4200 } 4201 4202 static inline void remove_entity_load_avg(struct sched_entity *se) {} 4203 4204 static inline void 4205 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 4206 static inline void 4207 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 4208 4209 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf) 4210 { 4211 return 0; 4212 } 4213 4214 static inline void 4215 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 4216 4217 static inline void 4218 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 4219 4220 static inline void 4221 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, 4222 bool task_sleep) {} 4223 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} 4224 4225 #endif /* CONFIG_SMP */ 4226 4227 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 4228 { 4229 #ifdef CONFIG_SCHED_DEBUG 4230 s64 d = se->vruntime - cfs_rq->min_vruntime; 4231 4232 if (d < 0) 4233 d = -d; 4234 4235 if (d > 3*sysctl_sched_latency) 4236 schedstat_inc(cfs_rq->nr_spread_over); 4237 #endif 4238 } 4239 4240 static void 4241 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 4242 { 4243 u64 vruntime = cfs_rq->min_vruntime; 4244 4245 /* 4246 * The 'current' period is already promised to the current tasks, 4247 * however the extra weight of the new task will slow them down a 4248 * little, place the new task so that it fits in the slot that 4249 * stays open at the end. 4250 */ 4251 if (initial && sched_feat(START_DEBIT)) 4252 vruntime += sched_vslice(cfs_rq, se); 4253 4254 /* sleeps up to a single latency don't count. */ 4255 if (!initial) { 4256 unsigned long thresh; 4257 4258 if (se_is_idle(se)) 4259 thresh = sysctl_sched_min_granularity; 4260 else 4261 thresh = sysctl_sched_latency; 4262 4263 /* 4264 * Halve their sleep time's effect, to allow 4265 * for a gentler effect of sleepers: 4266 */ 4267 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 4268 thresh >>= 1; 4269 4270 vruntime -= thresh; 4271 } 4272 4273 /* ensure we never gain time by being placed backwards. */ 4274 se->vruntime = max_vruntime(se->vruntime, vruntime); 4275 } 4276 4277 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 4278 4279 static inline bool cfs_bandwidth_used(void); 4280 4281 /* 4282 * MIGRATION 4283 * 4284 * dequeue 4285 * update_curr() 4286 * update_min_vruntime() 4287 * vruntime -= min_vruntime 4288 * 4289 * enqueue 4290 * update_curr() 4291 * update_min_vruntime() 4292 * vruntime += min_vruntime 4293 * 4294 * this way the vruntime transition between RQs is done when both 4295 * min_vruntime are up-to-date. 4296 * 4297 * WAKEUP (remote) 4298 * 4299 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 4300 * vruntime -= min_vruntime 4301 * 4302 * enqueue 4303 * update_curr() 4304 * update_min_vruntime() 4305 * vruntime += min_vruntime 4306 * 4307 * this way we don't have the most up-to-date min_vruntime on the originating 4308 * CPU and an up-to-date min_vruntime on the destination CPU. 4309 */ 4310 4311 static void 4312 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4313 { 4314 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 4315 bool curr = cfs_rq->curr == se; 4316 4317 /* 4318 * If we're the current task, we must renormalise before calling 4319 * update_curr(). 4320 */ 4321 if (renorm && curr) 4322 se->vruntime += cfs_rq->min_vruntime; 4323 4324 update_curr(cfs_rq); 4325 4326 /* 4327 * Otherwise, renormalise after, such that we're placed at the current 4328 * moment in time, instead of some random moment in the past. Being 4329 * placed in the past could significantly boost this task to the 4330 * fairness detriment of existing tasks. 4331 */ 4332 if (renorm && !curr) 4333 se->vruntime += cfs_rq->min_vruntime; 4334 4335 /* 4336 * When enqueuing a sched_entity, we must: 4337 * - Update loads to have both entity and cfs_rq synced with now. 4338 * - Add its load to cfs_rq->runnable_avg 4339 * - For group_entity, update its weight to reflect the new share of 4340 * its group cfs_rq 4341 * - Add its new weight to cfs_rq->load.weight 4342 */ 4343 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); 4344 se_update_runnable(se); 4345 update_cfs_group(se); 4346 account_entity_enqueue(cfs_rq, se); 4347 4348 if (flags & ENQUEUE_WAKEUP) 4349 place_entity(cfs_rq, se, 0); 4350 4351 check_schedstat_required(); 4352 update_stats_enqueue_fair(cfs_rq, se, flags); 4353 check_spread(cfs_rq, se); 4354 if (!curr) 4355 __enqueue_entity(cfs_rq, se); 4356 se->on_rq = 1; 4357 4358 /* 4359 * When bandwidth control is enabled, cfs might have been removed 4360 * because of a parent been throttled but cfs->nr_running > 1. Try to 4361 * add it unconditionally. 4362 */ 4363 if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) 4364 list_add_leaf_cfs_rq(cfs_rq); 4365 4366 if (cfs_rq->nr_running == 1) 4367 check_enqueue_throttle(cfs_rq); 4368 } 4369 4370 static void __clear_buddies_last(struct sched_entity *se) 4371 { 4372 for_each_sched_entity(se) { 4373 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4374 if (cfs_rq->last != se) 4375 break; 4376 4377 cfs_rq->last = NULL; 4378 } 4379 } 4380 4381 static void __clear_buddies_next(struct sched_entity *se) 4382 { 4383 for_each_sched_entity(se) { 4384 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4385 if (cfs_rq->next != se) 4386 break; 4387 4388 cfs_rq->next = NULL; 4389 } 4390 } 4391 4392 static void __clear_buddies_skip(struct sched_entity *se) 4393 { 4394 for_each_sched_entity(se) { 4395 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4396 if (cfs_rq->skip != se) 4397 break; 4398 4399 cfs_rq->skip = NULL; 4400 } 4401 } 4402 4403 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 4404 { 4405 if (cfs_rq->last == se) 4406 __clear_buddies_last(se); 4407 4408 if (cfs_rq->next == se) 4409 __clear_buddies_next(se); 4410 4411 if (cfs_rq->skip == se) 4412 __clear_buddies_skip(se); 4413 } 4414 4415 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4416 4417 static void 4418 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4419 { 4420 /* 4421 * Update run-time statistics of the 'current'. 4422 */ 4423 update_curr(cfs_rq); 4424 4425 /* 4426 * When dequeuing a sched_entity, we must: 4427 * - Update loads to have both entity and cfs_rq synced with now. 4428 * - Subtract its load from the cfs_rq->runnable_avg. 4429 * - Subtract its previous weight from cfs_rq->load.weight. 4430 * - For group entity, update its weight to reflect the new share 4431 * of its group cfs_rq. 4432 */ 4433 update_load_avg(cfs_rq, se, UPDATE_TG); 4434 se_update_runnable(se); 4435 4436 update_stats_dequeue_fair(cfs_rq, se, flags); 4437 4438 clear_buddies(cfs_rq, se); 4439 4440 if (se != cfs_rq->curr) 4441 __dequeue_entity(cfs_rq, se); 4442 se->on_rq = 0; 4443 account_entity_dequeue(cfs_rq, se); 4444 4445 /* 4446 * Normalize after update_curr(); which will also have moved 4447 * min_vruntime if @se is the one holding it back. But before doing 4448 * update_min_vruntime() again, which will discount @se's position and 4449 * can move min_vruntime forward still more. 4450 */ 4451 if (!(flags & DEQUEUE_SLEEP)) 4452 se->vruntime -= cfs_rq->min_vruntime; 4453 4454 /* return excess runtime on last dequeue */ 4455 return_cfs_rq_runtime(cfs_rq); 4456 4457 update_cfs_group(se); 4458 4459 /* 4460 * Now advance min_vruntime if @se was the entity holding it back, 4461 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 4462 * put back on, and if we advance min_vruntime, we'll be placed back 4463 * further than we started -- ie. we'll be penalized. 4464 */ 4465 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) 4466 update_min_vruntime(cfs_rq); 4467 } 4468 4469 /* 4470 * Preempt the current task with a newly woken task if needed: 4471 */ 4472 static void 4473 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4474 { 4475 unsigned long ideal_runtime, delta_exec; 4476 struct sched_entity *se; 4477 s64 delta; 4478 4479 ideal_runtime = sched_slice(cfs_rq, curr); 4480 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 4481 if (delta_exec > ideal_runtime) { 4482 resched_curr(rq_of(cfs_rq)); 4483 /* 4484 * The current task ran long enough, ensure it doesn't get 4485 * re-elected due to buddy favours. 4486 */ 4487 clear_buddies(cfs_rq, curr); 4488 return; 4489 } 4490 4491 /* 4492 * Ensure that a task that missed wakeup preemption by a 4493 * narrow margin doesn't have to wait for a full slice. 4494 * This also mitigates buddy induced latencies under load. 4495 */ 4496 if (delta_exec < sysctl_sched_min_granularity) 4497 return; 4498 4499 se = __pick_first_entity(cfs_rq); 4500 delta = curr->vruntime - se->vruntime; 4501 4502 if (delta < 0) 4503 return; 4504 4505 if (delta > ideal_runtime) 4506 resched_curr(rq_of(cfs_rq)); 4507 } 4508 4509 static void 4510 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 4511 { 4512 clear_buddies(cfs_rq, se); 4513 4514 /* 'current' is not kept within the tree. */ 4515 if (se->on_rq) { 4516 /* 4517 * Any task has to be enqueued before it get to execute on 4518 * a CPU. So account for the time it spent waiting on the 4519 * runqueue. 4520 */ 4521 update_stats_wait_end_fair(cfs_rq, se); 4522 __dequeue_entity(cfs_rq, se); 4523 update_load_avg(cfs_rq, se, UPDATE_TG); 4524 } 4525 4526 update_stats_curr_start(cfs_rq, se); 4527 cfs_rq->curr = se; 4528 4529 /* 4530 * Track our maximum slice length, if the CPU's load is at 4531 * least twice that of our own weight (i.e. dont track it 4532 * when there are only lesser-weight tasks around): 4533 */ 4534 if (schedstat_enabled() && 4535 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { 4536 struct sched_statistics *stats; 4537 4538 stats = __schedstats_from_se(se); 4539 __schedstat_set(stats->slice_max, 4540 max((u64)stats->slice_max, 4541 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 4542 } 4543 4544 se->prev_sum_exec_runtime = se->sum_exec_runtime; 4545 } 4546 4547 static int 4548 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 4549 4550 /* 4551 * Pick the next process, keeping these things in mind, in this order: 4552 * 1) keep things fair between processes/task groups 4553 * 2) pick the "next" process, since someone really wants that to run 4554 * 3) pick the "last" process, for cache locality 4555 * 4) do not run the "skip" process, if something else is available 4556 */ 4557 static struct sched_entity * 4558 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4559 { 4560 struct sched_entity *left = __pick_first_entity(cfs_rq); 4561 struct sched_entity *se; 4562 4563 /* 4564 * If curr is set we have to see if its left of the leftmost entity 4565 * still in the tree, provided there was anything in the tree at all. 4566 */ 4567 if (!left || (curr && entity_before(curr, left))) 4568 left = curr; 4569 4570 se = left; /* ideally we run the leftmost entity */ 4571 4572 /* 4573 * Avoid running the skip buddy, if running something else can 4574 * be done without getting too unfair. 4575 */ 4576 if (cfs_rq->skip && cfs_rq->skip == se) { 4577 struct sched_entity *second; 4578 4579 if (se == curr) { 4580 second = __pick_first_entity(cfs_rq); 4581 } else { 4582 second = __pick_next_entity(se); 4583 if (!second || (curr && entity_before(curr, second))) 4584 second = curr; 4585 } 4586 4587 if (second && wakeup_preempt_entity(second, left) < 1) 4588 se = second; 4589 } 4590 4591 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) { 4592 /* 4593 * Someone really wants this to run. If it's not unfair, run it. 4594 */ 4595 se = cfs_rq->next; 4596 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { 4597 /* 4598 * Prefer last buddy, try to return the CPU to a preempted task. 4599 */ 4600 se = cfs_rq->last; 4601 } 4602 4603 return se; 4604 } 4605 4606 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4607 4608 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 4609 { 4610 /* 4611 * If still on the runqueue then deactivate_task() 4612 * was not called and update_curr() has to be done: 4613 */ 4614 if (prev->on_rq) 4615 update_curr(cfs_rq); 4616 4617 /* throttle cfs_rqs exceeding runtime */ 4618 check_cfs_rq_runtime(cfs_rq); 4619 4620 check_spread(cfs_rq, prev); 4621 4622 if (prev->on_rq) { 4623 update_stats_wait_start_fair(cfs_rq, prev); 4624 /* Put 'current' back into the tree. */ 4625 __enqueue_entity(cfs_rq, prev); 4626 /* in !on_rq case, update occurred at dequeue */ 4627 update_load_avg(cfs_rq, prev, 0); 4628 } 4629 cfs_rq->curr = NULL; 4630 } 4631 4632 static void 4633 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 4634 { 4635 /* 4636 * Update run-time statistics of the 'current'. 4637 */ 4638 update_curr(cfs_rq); 4639 4640 /* 4641 * Ensure that runnable average is periodically updated. 4642 */ 4643 update_load_avg(cfs_rq, curr, UPDATE_TG); 4644 update_cfs_group(curr); 4645 4646 #ifdef CONFIG_SCHED_HRTICK 4647 /* 4648 * queued ticks are scheduled to match the slice, so don't bother 4649 * validating it and just reschedule. 4650 */ 4651 if (queued) { 4652 resched_curr(rq_of(cfs_rq)); 4653 return; 4654 } 4655 /* 4656 * don't let the period tick interfere with the hrtick preemption 4657 */ 4658 if (!sched_feat(DOUBLE_TICK) && 4659 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4660 return; 4661 #endif 4662 4663 if (cfs_rq->nr_running > 1) 4664 check_preempt_tick(cfs_rq, curr); 4665 } 4666 4667 4668 /************************************************** 4669 * CFS bandwidth control machinery 4670 */ 4671 4672 #ifdef CONFIG_CFS_BANDWIDTH 4673 4674 #ifdef CONFIG_JUMP_LABEL 4675 static struct static_key __cfs_bandwidth_used; 4676 4677 static inline bool cfs_bandwidth_used(void) 4678 { 4679 return static_key_false(&__cfs_bandwidth_used); 4680 } 4681 4682 void cfs_bandwidth_usage_inc(void) 4683 { 4684 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); 4685 } 4686 4687 void cfs_bandwidth_usage_dec(void) 4688 { 4689 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); 4690 } 4691 #else /* CONFIG_JUMP_LABEL */ 4692 static bool cfs_bandwidth_used(void) 4693 { 4694 return true; 4695 } 4696 4697 void cfs_bandwidth_usage_inc(void) {} 4698 void cfs_bandwidth_usage_dec(void) {} 4699 #endif /* CONFIG_JUMP_LABEL */ 4700 4701 /* 4702 * default period for cfs group bandwidth. 4703 * default: 0.1s, units: nanoseconds 4704 */ 4705 static inline u64 default_cfs_period(void) 4706 { 4707 return 100000000ULL; 4708 } 4709 4710 static inline u64 sched_cfs_bandwidth_slice(void) 4711 { 4712 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4713 } 4714 4715 /* 4716 * Replenish runtime according to assigned quota. We use sched_clock_cpu 4717 * directly instead of rq->clock to avoid adding additional synchronization 4718 * around rq->lock. 4719 * 4720 * requires cfs_b->lock 4721 */ 4722 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4723 { 4724 s64 runtime; 4725 4726 if (unlikely(cfs_b->quota == RUNTIME_INF)) 4727 return; 4728 4729 cfs_b->runtime += cfs_b->quota; 4730 runtime = cfs_b->runtime_snap - cfs_b->runtime; 4731 if (runtime > 0) { 4732 cfs_b->burst_time += runtime; 4733 cfs_b->nr_burst++; 4734 } 4735 4736 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); 4737 cfs_b->runtime_snap = cfs_b->runtime; 4738 } 4739 4740 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4741 { 4742 return &tg->cfs_bandwidth; 4743 } 4744 4745 /* returns 0 on failure to allocate runtime */ 4746 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, 4747 struct cfs_rq *cfs_rq, u64 target_runtime) 4748 { 4749 u64 min_amount, amount = 0; 4750 4751 lockdep_assert_held(&cfs_b->lock); 4752 4753 /* note: this is a positive sum as runtime_remaining <= 0 */ 4754 min_amount = target_runtime - cfs_rq->runtime_remaining; 4755 4756 if (cfs_b->quota == RUNTIME_INF) 4757 amount = min_amount; 4758 else { 4759 start_cfs_bandwidth(cfs_b); 4760 4761 if (cfs_b->runtime > 0) { 4762 amount = min(cfs_b->runtime, min_amount); 4763 cfs_b->runtime -= amount; 4764 cfs_b->idle = 0; 4765 } 4766 } 4767 4768 cfs_rq->runtime_remaining += amount; 4769 4770 return cfs_rq->runtime_remaining > 0; 4771 } 4772 4773 /* returns 0 on failure to allocate runtime */ 4774 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4775 { 4776 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4777 int ret; 4778 4779 raw_spin_lock(&cfs_b->lock); 4780 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); 4781 raw_spin_unlock(&cfs_b->lock); 4782 4783 return ret; 4784 } 4785 4786 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4787 { 4788 /* dock delta_exec before expiring quota (as it could span periods) */ 4789 cfs_rq->runtime_remaining -= delta_exec; 4790 4791 if (likely(cfs_rq->runtime_remaining > 0)) 4792 return; 4793 4794 if (cfs_rq->throttled) 4795 return; 4796 /* 4797 * if we're unable to extend our runtime we resched so that the active 4798 * hierarchy can be throttled 4799 */ 4800 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 4801 resched_curr(rq_of(cfs_rq)); 4802 } 4803 4804 static __always_inline 4805 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4806 { 4807 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 4808 return; 4809 4810 __account_cfs_rq_runtime(cfs_rq, delta_exec); 4811 } 4812 4813 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4814 { 4815 return cfs_bandwidth_used() && cfs_rq->throttled; 4816 } 4817 4818 /* check whether cfs_rq, or any parent, is throttled */ 4819 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4820 { 4821 return cfs_bandwidth_used() && cfs_rq->throttle_count; 4822 } 4823 4824 /* 4825 * Ensure that neither of the group entities corresponding to src_cpu or 4826 * dest_cpu are members of a throttled hierarchy when performing group 4827 * load-balance operations. 4828 */ 4829 static inline int throttled_lb_pair(struct task_group *tg, 4830 int src_cpu, int dest_cpu) 4831 { 4832 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 4833 4834 src_cfs_rq = tg->cfs_rq[src_cpu]; 4835 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 4836 4837 return throttled_hierarchy(src_cfs_rq) || 4838 throttled_hierarchy(dest_cfs_rq); 4839 } 4840 4841 static int tg_unthrottle_up(struct task_group *tg, void *data) 4842 { 4843 struct rq *rq = data; 4844 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4845 4846 cfs_rq->throttle_count--; 4847 if (!cfs_rq->throttle_count) { 4848 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4849 cfs_rq->throttled_clock_task; 4850 4851 /* Add cfs_rq with load or one or more already running entities to the list */ 4852 if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running) 4853 list_add_leaf_cfs_rq(cfs_rq); 4854 } 4855 4856 return 0; 4857 } 4858 4859 static int tg_throttle_down(struct task_group *tg, void *data) 4860 { 4861 struct rq *rq = data; 4862 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4863 4864 /* group is entering throttled state, stop time */ 4865 if (!cfs_rq->throttle_count) { 4866 cfs_rq->throttled_clock_task = rq_clock_task(rq); 4867 list_del_leaf_cfs_rq(cfs_rq); 4868 } 4869 cfs_rq->throttle_count++; 4870 4871 return 0; 4872 } 4873 4874 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) 4875 { 4876 struct rq *rq = rq_of(cfs_rq); 4877 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4878 struct sched_entity *se; 4879 long task_delta, idle_task_delta, dequeue = 1; 4880 4881 raw_spin_lock(&cfs_b->lock); 4882 /* This will start the period timer if necessary */ 4883 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { 4884 /* 4885 * We have raced with bandwidth becoming available, and if we 4886 * actually throttled the timer might not unthrottle us for an 4887 * entire period. We additionally needed to make sure that any 4888 * subsequent check_cfs_rq_runtime calls agree not to throttle 4889 * us, as we may commit to do cfs put_prev+pick_next, so we ask 4890 * for 1ns of runtime rather than just check cfs_b. 4891 */ 4892 dequeue = 0; 4893 } else { 4894 list_add_tail_rcu(&cfs_rq->throttled_list, 4895 &cfs_b->throttled_cfs_rq); 4896 } 4897 raw_spin_unlock(&cfs_b->lock); 4898 4899 if (!dequeue) 4900 return false; /* Throttle no longer required. */ 4901 4902 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 4903 4904 /* freeze hierarchy runnable averages while throttled */ 4905 rcu_read_lock(); 4906 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 4907 rcu_read_unlock(); 4908 4909 task_delta = cfs_rq->h_nr_running; 4910 idle_task_delta = cfs_rq->idle_h_nr_running; 4911 for_each_sched_entity(se) { 4912 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4913 /* throttled entity or throttle-on-deactivate */ 4914 if (!se->on_rq) 4915 goto done; 4916 4917 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 4918 4919 if (cfs_rq_is_idle(group_cfs_rq(se))) 4920 idle_task_delta = cfs_rq->h_nr_running; 4921 4922 qcfs_rq->h_nr_running -= task_delta; 4923 qcfs_rq->idle_h_nr_running -= idle_task_delta; 4924 4925 if (qcfs_rq->load.weight) { 4926 /* Avoid re-evaluating load for this entity: */ 4927 se = parent_entity(se); 4928 break; 4929 } 4930 } 4931 4932 for_each_sched_entity(se) { 4933 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4934 /* throttled entity or throttle-on-deactivate */ 4935 if (!se->on_rq) 4936 goto done; 4937 4938 update_load_avg(qcfs_rq, se, 0); 4939 se_update_runnable(se); 4940 4941 if (cfs_rq_is_idle(group_cfs_rq(se))) 4942 idle_task_delta = cfs_rq->h_nr_running; 4943 4944 qcfs_rq->h_nr_running -= task_delta; 4945 qcfs_rq->idle_h_nr_running -= idle_task_delta; 4946 } 4947 4948 /* At this point se is NULL and we are at root level*/ 4949 sub_nr_running(rq, task_delta); 4950 4951 done: 4952 /* 4953 * Note: distribution will already see us throttled via the 4954 * throttled-list. rq->lock protects completion. 4955 */ 4956 cfs_rq->throttled = 1; 4957 cfs_rq->throttled_clock = rq_clock(rq); 4958 return true; 4959 } 4960 4961 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 4962 { 4963 struct rq *rq = rq_of(cfs_rq); 4964 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4965 struct sched_entity *se; 4966 long task_delta, idle_task_delta; 4967 4968 se = cfs_rq->tg->se[cpu_of(rq)]; 4969 4970 cfs_rq->throttled = 0; 4971 4972 update_rq_clock(rq); 4973 4974 raw_spin_lock(&cfs_b->lock); 4975 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 4976 list_del_rcu(&cfs_rq->throttled_list); 4977 raw_spin_unlock(&cfs_b->lock); 4978 4979 /* update hierarchical throttle state */ 4980 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4981 4982 /* Nothing to run but something to decay (on_list)? Complete the branch */ 4983 if (!cfs_rq->load.weight) { 4984 if (cfs_rq->on_list) 4985 goto unthrottle_throttle; 4986 return; 4987 } 4988 4989 task_delta = cfs_rq->h_nr_running; 4990 idle_task_delta = cfs_rq->idle_h_nr_running; 4991 for_each_sched_entity(se) { 4992 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4993 4994 if (se->on_rq) 4995 break; 4996 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); 4997 4998 if (cfs_rq_is_idle(group_cfs_rq(se))) 4999 idle_task_delta = cfs_rq->h_nr_running; 5000 5001 qcfs_rq->h_nr_running += task_delta; 5002 qcfs_rq->idle_h_nr_running += idle_task_delta; 5003 5004 /* end evaluation on encountering a throttled cfs_rq */ 5005 if (cfs_rq_throttled(qcfs_rq)) 5006 goto unthrottle_throttle; 5007 } 5008 5009 for_each_sched_entity(se) { 5010 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 5011 5012 update_load_avg(qcfs_rq, se, UPDATE_TG); 5013 se_update_runnable(se); 5014 5015 if (cfs_rq_is_idle(group_cfs_rq(se))) 5016 idle_task_delta = cfs_rq->h_nr_running; 5017 5018 qcfs_rq->h_nr_running += task_delta; 5019 qcfs_rq->idle_h_nr_running += idle_task_delta; 5020 5021 /* end evaluation on encountering a throttled cfs_rq */ 5022 if (cfs_rq_throttled(qcfs_rq)) 5023 goto unthrottle_throttle; 5024 5025 /* 5026 * One parent has been throttled and cfs_rq removed from the 5027 * list. Add it back to not break the leaf list. 5028 */ 5029 if (throttled_hierarchy(qcfs_rq)) 5030 list_add_leaf_cfs_rq(qcfs_rq); 5031 } 5032 5033 /* At this point se is NULL and we are at root level*/ 5034 add_nr_running(rq, task_delta); 5035 5036 unthrottle_throttle: 5037 /* 5038 * The cfs_rq_throttled() breaks in the above iteration can result in 5039 * incomplete leaf list maintenance, resulting in triggering the 5040 * assertion below. 5041 */ 5042 for_each_sched_entity(se) { 5043 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 5044 5045 if (list_add_leaf_cfs_rq(qcfs_rq)) 5046 break; 5047 } 5048 5049 assert_list_leaf_cfs_rq(rq); 5050 5051 /* Determine whether we need to wake up potentially idle CPU: */ 5052 if (rq->curr == rq->idle && rq->cfs.nr_running) 5053 resched_curr(rq); 5054 } 5055 5056 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) 5057 { 5058 struct cfs_rq *cfs_rq; 5059 u64 runtime, remaining = 1; 5060 5061 rcu_read_lock(); 5062 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 5063 throttled_list) { 5064 struct rq *rq = rq_of(cfs_rq); 5065 struct rq_flags rf; 5066 5067 rq_lock_irqsave(rq, &rf); 5068 if (!cfs_rq_throttled(cfs_rq)) 5069 goto next; 5070 5071 /* By the above check, this should never be true */ 5072 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); 5073 5074 raw_spin_lock(&cfs_b->lock); 5075 runtime = -cfs_rq->runtime_remaining + 1; 5076 if (runtime > cfs_b->runtime) 5077 runtime = cfs_b->runtime; 5078 cfs_b->runtime -= runtime; 5079 remaining = cfs_b->runtime; 5080 raw_spin_unlock(&cfs_b->lock); 5081 5082 cfs_rq->runtime_remaining += runtime; 5083 5084 /* we check whether we're throttled above */ 5085 if (cfs_rq->runtime_remaining > 0) 5086 unthrottle_cfs_rq(cfs_rq); 5087 5088 next: 5089 rq_unlock_irqrestore(rq, &rf); 5090 5091 if (!remaining) 5092 break; 5093 } 5094 rcu_read_unlock(); 5095 } 5096 5097 /* 5098 * Responsible for refilling a task_group's bandwidth and unthrottling its 5099 * cfs_rqs as appropriate. If there has been no activity within the last 5100 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 5101 * used to track this state. 5102 */ 5103 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) 5104 { 5105 int throttled; 5106 5107 /* no need to continue the timer with no bandwidth constraint */ 5108 if (cfs_b->quota == RUNTIME_INF) 5109 goto out_deactivate; 5110 5111 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 5112 cfs_b->nr_periods += overrun; 5113 5114 /* Refill extra burst quota even if cfs_b->idle */ 5115 __refill_cfs_bandwidth_runtime(cfs_b); 5116 5117 /* 5118 * idle depends on !throttled (for the case of a large deficit), and if 5119 * we're going inactive then everything else can be deferred 5120 */ 5121 if (cfs_b->idle && !throttled) 5122 goto out_deactivate; 5123 5124 if (!throttled) { 5125 /* mark as potentially idle for the upcoming period */ 5126 cfs_b->idle = 1; 5127 return 0; 5128 } 5129 5130 /* account preceding periods in which throttling occurred */ 5131 cfs_b->nr_throttled += overrun; 5132 5133 /* 5134 * This check is repeated as we release cfs_b->lock while we unthrottle. 5135 */ 5136 while (throttled && cfs_b->runtime > 0) { 5137 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5138 /* we can't nest cfs_b->lock while distributing bandwidth */ 5139 distribute_cfs_runtime(cfs_b); 5140 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5141 5142 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 5143 } 5144 5145 /* 5146 * While we are ensured activity in the period following an 5147 * unthrottle, this also covers the case in which the new bandwidth is 5148 * insufficient to cover the existing bandwidth deficit. (Forcing the 5149 * timer to remain active while there are any throttled entities.) 5150 */ 5151 cfs_b->idle = 0; 5152 5153 return 0; 5154 5155 out_deactivate: 5156 return 1; 5157 } 5158 5159 /* a cfs_rq won't donate quota below this amount */ 5160 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 5161 /* minimum remaining period time to redistribute slack quota */ 5162 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 5163 /* how long we wait to gather additional slack before distributing */ 5164 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 5165 5166 /* 5167 * Are we near the end of the current quota period? 5168 * 5169 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 5170 * hrtimer base being cleared by hrtimer_start. In the case of 5171 * migrate_hrtimers, base is never cleared, so we are fine. 5172 */ 5173 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 5174 { 5175 struct hrtimer *refresh_timer = &cfs_b->period_timer; 5176 s64 remaining; 5177 5178 /* if the call-back is running a quota refresh is already occurring */ 5179 if (hrtimer_callback_running(refresh_timer)) 5180 return 1; 5181 5182 /* is a quota refresh about to occur? */ 5183 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 5184 if (remaining < (s64)min_expire) 5185 return 1; 5186 5187 return 0; 5188 } 5189 5190 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 5191 { 5192 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 5193 5194 /* if there's a quota refresh soon don't bother with slack */ 5195 if (runtime_refresh_within(cfs_b, min_left)) 5196 return; 5197 5198 /* don't push forwards an existing deferred unthrottle */ 5199 if (cfs_b->slack_started) 5200 return; 5201 cfs_b->slack_started = true; 5202 5203 hrtimer_start(&cfs_b->slack_timer, 5204 ns_to_ktime(cfs_bandwidth_slack_period), 5205 HRTIMER_MODE_REL); 5206 } 5207 5208 /* we know any runtime found here is valid as update_curr() precedes return */ 5209 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5210 { 5211 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 5212 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 5213 5214 if (slack_runtime <= 0) 5215 return; 5216 5217 raw_spin_lock(&cfs_b->lock); 5218 if (cfs_b->quota != RUNTIME_INF) { 5219 cfs_b->runtime += slack_runtime; 5220 5221 /* we are under rq->lock, defer unthrottling using a timer */ 5222 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 5223 !list_empty(&cfs_b->throttled_cfs_rq)) 5224 start_cfs_slack_bandwidth(cfs_b); 5225 } 5226 raw_spin_unlock(&cfs_b->lock); 5227 5228 /* even if it's not valid for return we don't want to try again */ 5229 cfs_rq->runtime_remaining -= slack_runtime; 5230 } 5231 5232 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5233 { 5234 if (!cfs_bandwidth_used()) 5235 return; 5236 5237 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 5238 return; 5239 5240 __return_cfs_rq_runtime(cfs_rq); 5241 } 5242 5243 /* 5244 * This is done with a timer (instead of inline with bandwidth return) since 5245 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 5246 */ 5247 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 5248 { 5249 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 5250 unsigned long flags; 5251 5252 /* confirm we're still not at a refresh boundary */ 5253 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5254 cfs_b->slack_started = false; 5255 5256 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 5257 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5258 return; 5259 } 5260 5261 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 5262 runtime = cfs_b->runtime; 5263 5264 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5265 5266 if (!runtime) 5267 return; 5268 5269 distribute_cfs_runtime(cfs_b); 5270 } 5271 5272 /* 5273 * When a group wakes up we want to make sure that its quota is not already 5274 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 5275 * runtime as update_curr() throttling can not trigger until it's on-rq. 5276 */ 5277 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 5278 { 5279 if (!cfs_bandwidth_used()) 5280 return; 5281 5282 /* an active group must be handled by the update_curr()->put() path */ 5283 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 5284 return; 5285 5286 /* ensure the group is not already throttled */ 5287 if (cfs_rq_throttled(cfs_rq)) 5288 return; 5289 5290 /* update runtime allocation */ 5291 account_cfs_rq_runtime(cfs_rq, 0); 5292 if (cfs_rq->runtime_remaining <= 0) 5293 throttle_cfs_rq(cfs_rq); 5294 } 5295 5296 static void sync_throttle(struct task_group *tg, int cpu) 5297 { 5298 struct cfs_rq *pcfs_rq, *cfs_rq; 5299 5300 if (!cfs_bandwidth_used()) 5301 return; 5302 5303 if (!tg->parent) 5304 return; 5305 5306 cfs_rq = tg->cfs_rq[cpu]; 5307 pcfs_rq = tg->parent->cfs_rq[cpu]; 5308 5309 cfs_rq->throttle_count = pcfs_rq->throttle_count; 5310 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 5311 } 5312 5313 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 5314 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5315 { 5316 if (!cfs_bandwidth_used()) 5317 return false; 5318 5319 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 5320 return false; 5321 5322 /* 5323 * it's possible for a throttled entity to be forced into a running 5324 * state (e.g. set_curr_task), in this case we're finished. 5325 */ 5326 if (cfs_rq_throttled(cfs_rq)) 5327 return true; 5328 5329 return throttle_cfs_rq(cfs_rq); 5330 } 5331 5332 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 5333 { 5334 struct cfs_bandwidth *cfs_b = 5335 container_of(timer, struct cfs_bandwidth, slack_timer); 5336 5337 do_sched_cfs_slack_timer(cfs_b); 5338 5339 return HRTIMER_NORESTART; 5340 } 5341 5342 extern const u64 max_cfs_quota_period; 5343 5344 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 5345 { 5346 struct cfs_bandwidth *cfs_b = 5347 container_of(timer, struct cfs_bandwidth, period_timer); 5348 unsigned long flags; 5349 int overrun; 5350 int idle = 0; 5351 int count = 0; 5352 5353 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5354 for (;;) { 5355 overrun = hrtimer_forward_now(timer, cfs_b->period); 5356 if (!overrun) 5357 break; 5358 5359 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); 5360 5361 if (++count > 3) { 5362 u64 new, old = ktime_to_ns(cfs_b->period); 5363 5364 /* 5365 * Grow period by a factor of 2 to avoid losing precision. 5366 * Precision loss in the quota/period ratio can cause __cfs_schedulable 5367 * to fail. 5368 */ 5369 new = old * 2; 5370 if (new < max_cfs_quota_period) { 5371 cfs_b->period = ns_to_ktime(new); 5372 cfs_b->quota *= 2; 5373 cfs_b->burst *= 2; 5374 5375 pr_warn_ratelimited( 5376 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", 5377 smp_processor_id(), 5378 div_u64(new, NSEC_PER_USEC), 5379 div_u64(cfs_b->quota, NSEC_PER_USEC)); 5380 } else { 5381 pr_warn_ratelimited( 5382 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n", 5383 smp_processor_id(), 5384 div_u64(old, NSEC_PER_USEC), 5385 div_u64(cfs_b->quota, NSEC_PER_USEC)); 5386 } 5387 5388 /* reset count so we don't come right back in here */ 5389 count = 0; 5390 } 5391 } 5392 if (idle) 5393 cfs_b->period_active = 0; 5394 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5395 5396 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 5397 } 5398 5399 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5400 { 5401 raw_spin_lock_init(&cfs_b->lock); 5402 cfs_b->runtime = 0; 5403 cfs_b->quota = RUNTIME_INF; 5404 cfs_b->period = ns_to_ktime(default_cfs_period()); 5405 cfs_b->burst = 0; 5406 5407 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 5408 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 5409 cfs_b->period_timer.function = sched_cfs_period_timer; 5410 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5411 cfs_b->slack_timer.function = sched_cfs_slack_timer; 5412 cfs_b->slack_started = false; 5413 } 5414 5415 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5416 { 5417 cfs_rq->runtime_enabled = 0; 5418 INIT_LIST_HEAD(&cfs_rq->throttled_list); 5419 } 5420 5421 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5422 { 5423 lockdep_assert_held(&cfs_b->lock); 5424 5425 if (cfs_b->period_active) 5426 return; 5427 5428 cfs_b->period_active = 1; 5429 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 5430 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 5431 } 5432 5433 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5434 { 5435 /* init_cfs_bandwidth() was not called */ 5436 if (!cfs_b->throttled_cfs_rq.next) 5437 return; 5438 5439 hrtimer_cancel(&cfs_b->period_timer); 5440 hrtimer_cancel(&cfs_b->slack_timer); 5441 } 5442 5443 /* 5444 * Both these CPU hotplug callbacks race against unregister_fair_sched_group() 5445 * 5446 * The race is harmless, since modifying bandwidth settings of unhooked group 5447 * bits doesn't do much. 5448 */ 5449 5450 /* cpu online callback */ 5451 static void __maybe_unused update_runtime_enabled(struct rq *rq) 5452 { 5453 struct task_group *tg; 5454 5455 lockdep_assert_rq_held(rq); 5456 5457 rcu_read_lock(); 5458 list_for_each_entry_rcu(tg, &task_groups, list) { 5459 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 5460 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5461 5462 raw_spin_lock(&cfs_b->lock); 5463 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 5464 raw_spin_unlock(&cfs_b->lock); 5465 } 5466 rcu_read_unlock(); 5467 } 5468 5469 /* cpu offline callback */ 5470 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 5471 { 5472 struct task_group *tg; 5473 5474 lockdep_assert_rq_held(rq); 5475 5476 rcu_read_lock(); 5477 list_for_each_entry_rcu(tg, &task_groups, list) { 5478 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5479 5480 if (!cfs_rq->runtime_enabled) 5481 continue; 5482 5483 /* 5484 * clock_task is not advancing so we just need to make sure 5485 * there's some valid quota amount 5486 */ 5487 cfs_rq->runtime_remaining = 1; 5488 /* 5489 * Offline rq is schedulable till CPU is completely disabled 5490 * in take_cpu_down(), so we prevent new cfs throttling here. 5491 */ 5492 cfs_rq->runtime_enabled = 0; 5493 5494 if (cfs_rq_throttled(cfs_rq)) 5495 unthrottle_cfs_rq(cfs_rq); 5496 } 5497 rcu_read_unlock(); 5498 } 5499 5500 #else /* CONFIG_CFS_BANDWIDTH */ 5501 5502 static inline bool cfs_bandwidth_used(void) 5503 { 5504 return false; 5505 } 5506 5507 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 5508 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 5509 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 5510 static inline void sync_throttle(struct task_group *tg, int cpu) {} 5511 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5512 5513 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 5514 { 5515 return 0; 5516 } 5517 5518 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 5519 { 5520 return 0; 5521 } 5522 5523 static inline int throttled_lb_pair(struct task_group *tg, 5524 int src_cpu, int dest_cpu) 5525 { 5526 return 0; 5527 } 5528 5529 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5530 5531 #ifdef CONFIG_FAIR_GROUP_SCHED 5532 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5533 #endif 5534 5535 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 5536 { 5537 return NULL; 5538 } 5539 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5540 static inline void update_runtime_enabled(struct rq *rq) {} 5541 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 5542 5543 #endif /* CONFIG_CFS_BANDWIDTH */ 5544 5545 /************************************************** 5546 * CFS operations on tasks: 5547 */ 5548 5549 #ifdef CONFIG_SCHED_HRTICK 5550 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 5551 { 5552 struct sched_entity *se = &p->se; 5553 struct cfs_rq *cfs_rq = cfs_rq_of(se); 5554 5555 SCHED_WARN_ON(task_rq(p) != rq); 5556 5557 if (rq->cfs.h_nr_running > 1) { 5558 u64 slice = sched_slice(cfs_rq, se); 5559 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 5560 s64 delta = slice - ran; 5561 5562 if (delta < 0) { 5563 if (task_current(rq, p)) 5564 resched_curr(rq); 5565 return; 5566 } 5567 hrtick_start(rq, delta); 5568 } 5569 } 5570 5571 /* 5572 * called from enqueue/dequeue and updates the hrtick when the 5573 * current task is from our class and nr_running is low enough 5574 * to matter. 5575 */ 5576 static void hrtick_update(struct rq *rq) 5577 { 5578 struct task_struct *curr = rq->curr; 5579 5580 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class) 5581 return; 5582 5583 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 5584 hrtick_start_fair(rq, curr); 5585 } 5586 #else /* !CONFIG_SCHED_HRTICK */ 5587 static inline void 5588 hrtick_start_fair(struct rq *rq, struct task_struct *p) 5589 { 5590 } 5591 5592 static inline void hrtick_update(struct rq *rq) 5593 { 5594 } 5595 #endif 5596 5597 #ifdef CONFIG_SMP 5598 static inline bool cpu_overutilized(int cpu) 5599 { 5600 return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu)); 5601 } 5602 5603 static inline void update_overutilized_status(struct rq *rq) 5604 { 5605 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { 5606 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); 5607 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); 5608 } 5609 } 5610 #else 5611 static inline void update_overutilized_status(struct rq *rq) { } 5612 #endif 5613 5614 /* Runqueue only has SCHED_IDLE tasks enqueued */ 5615 static int sched_idle_rq(struct rq *rq) 5616 { 5617 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && 5618 rq->nr_running); 5619 } 5620 5621 /* 5622 * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use 5623 * of idle_nr_running, which does not consider idle descendants of normal 5624 * entities. 5625 */ 5626 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq) 5627 { 5628 return cfs_rq->nr_running && 5629 cfs_rq->nr_running == cfs_rq->idle_nr_running; 5630 } 5631 5632 #ifdef CONFIG_SMP 5633 static int sched_idle_cpu(int cpu) 5634 { 5635 return sched_idle_rq(cpu_rq(cpu)); 5636 } 5637 #endif 5638 5639 /* 5640 * The enqueue_task method is called before nr_running is 5641 * increased. Here we update the fair scheduling stats and 5642 * then put the task into the rbtree: 5643 */ 5644 static void 5645 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5646 { 5647 struct cfs_rq *cfs_rq; 5648 struct sched_entity *se = &p->se; 5649 int idle_h_nr_running = task_has_idle_policy(p); 5650 int task_new = !(flags & ENQUEUE_WAKEUP); 5651 5652 /* 5653 * The code below (indirectly) updates schedutil which looks at 5654 * the cfs_rq utilization to select a frequency. 5655 * Let's add the task's estimated utilization to the cfs_rq's 5656 * estimated utilization, before we update schedutil. 5657 */ 5658 util_est_enqueue(&rq->cfs, p); 5659 5660 /* 5661 * If in_iowait is set, the code below may not trigger any cpufreq 5662 * utilization updates, so do it here explicitly with the IOWAIT flag 5663 * passed. 5664 */ 5665 if (p->in_iowait) 5666 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); 5667 5668 for_each_sched_entity(se) { 5669 if (se->on_rq) 5670 break; 5671 cfs_rq = cfs_rq_of(se); 5672 enqueue_entity(cfs_rq, se, flags); 5673 5674 cfs_rq->h_nr_running++; 5675 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5676 5677 if (cfs_rq_is_idle(cfs_rq)) 5678 idle_h_nr_running = 1; 5679 5680 /* end evaluation on encountering a throttled cfs_rq */ 5681 if (cfs_rq_throttled(cfs_rq)) 5682 goto enqueue_throttle; 5683 5684 flags = ENQUEUE_WAKEUP; 5685 } 5686 5687 for_each_sched_entity(se) { 5688 cfs_rq = cfs_rq_of(se); 5689 5690 update_load_avg(cfs_rq, se, UPDATE_TG); 5691 se_update_runnable(se); 5692 update_cfs_group(se); 5693 5694 cfs_rq->h_nr_running++; 5695 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5696 5697 if (cfs_rq_is_idle(cfs_rq)) 5698 idle_h_nr_running = 1; 5699 5700 /* end evaluation on encountering a throttled cfs_rq */ 5701 if (cfs_rq_throttled(cfs_rq)) 5702 goto enqueue_throttle; 5703 5704 /* 5705 * One parent has been throttled and cfs_rq removed from the 5706 * list. Add it back to not break the leaf list. 5707 */ 5708 if (throttled_hierarchy(cfs_rq)) 5709 list_add_leaf_cfs_rq(cfs_rq); 5710 } 5711 5712 /* At this point se is NULL and we are at root level*/ 5713 add_nr_running(rq, 1); 5714 5715 /* 5716 * Since new tasks are assigned an initial util_avg equal to 5717 * half of the spare capacity of their CPU, tiny tasks have the 5718 * ability to cross the overutilized threshold, which will 5719 * result in the load balancer ruining all the task placement 5720 * done by EAS. As a way to mitigate that effect, do not account 5721 * for the first enqueue operation of new tasks during the 5722 * overutilized flag detection. 5723 * 5724 * A better way of solving this problem would be to wait for 5725 * the PELT signals of tasks to converge before taking them 5726 * into account, but that is not straightforward to implement, 5727 * and the following generally works well enough in practice. 5728 */ 5729 if (!task_new) 5730 update_overutilized_status(rq); 5731 5732 enqueue_throttle: 5733 if (cfs_bandwidth_used()) { 5734 /* 5735 * When bandwidth control is enabled; the cfs_rq_throttled() 5736 * breaks in the above iteration can result in incomplete 5737 * leaf list maintenance, resulting in triggering the assertion 5738 * below. 5739 */ 5740 for_each_sched_entity(se) { 5741 cfs_rq = cfs_rq_of(se); 5742 5743 if (list_add_leaf_cfs_rq(cfs_rq)) 5744 break; 5745 } 5746 } 5747 5748 assert_list_leaf_cfs_rq(rq); 5749 5750 hrtick_update(rq); 5751 } 5752 5753 static void set_next_buddy(struct sched_entity *se); 5754 5755 /* 5756 * The dequeue_task method is called before nr_running is 5757 * decreased. We remove the task from the rbtree and 5758 * update the fair scheduling stats: 5759 */ 5760 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5761 { 5762 struct cfs_rq *cfs_rq; 5763 struct sched_entity *se = &p->se; 5764 int task_sleep = flags & DEQUEUE_SLEEP; 5765 int idle_h_nr_running = task_has_idle_policy(p); 5766 bool was_sched_idle = sched_idle_rq(rq); 5767 5768 util_est_dequeue(&rq->cfs, p); 5769 5770 for_each_sched_entity(se) { 5771 cfs_rq = cfs_rq_of(se); 5772 dequeue_entity(cfs_rq, se, flags); 5773 5774 cfs_rq->h_nr_running--; 5775 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 5776 5777 if (cfs_rq_is_idle(cfs_rq)) 5778 idle_h_nr_running = 1; 5779 5780 /* end evaluation on encountering a throttled cfs_rq */ 5781 if (cfs_rq_throttled(cfs_rq)) 5782 goto dequeue_throttle; 5783 5784 /* Don't dequeue parent if it has other entities besides us */ 5785 if (cfs_rq->load.weight) { 5786 /* Avoid re-evaluating load for this entity: */ 5787 se = parent_entity(se); 5788 /* 5789 * Bias pick_next to pick a task from this cfs_rq, as 5790 * p is sleeping when it is within its sched_slice. 5791 */ 5792 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 5793 set_next_buddy(se); 5794 break; 5795 } 5796 flags |= DEQUEUE_SLEEP; 5797 } 5798 5799 for_each_sched_entity(se) { 5800 cfs_rq = cfs_rq_of(se); 5801 5802 update_load_avg(cfs_rq, se, UPDATE_TG); 5803 se_update_runnable(se); 5804 update_cfs_group(se); 5805 5806 cfs_rq->h_nr_running--; 5807 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 5808 5809 if (cfs_rq_is_idle(cfs_rq)) 5810 idle_h_nr_running = 1; 5811 5812 /* end evaluation on encountering a throttled cfs_rq */ 5813 if (cfs_rq_throttled(cfs_rq)) 5814 goto dequeue_throttle; 5815 5816 } 5817 5818 /* At this point se is NULL and we are at root level*/ 5819 sub_nr_running(rq, 1); 5820 5821 /* balance early to pull high priority tasks */ 5822 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) 5823 rq->next_balance = jiffies; 5824 5825 dequeue_throttle: 5826 util_est_update(&rq->cfs, p, task_sleep); 5827 hrtick_update(rq); 5828 } 5829 5830 #ifdef CONFIG_SMP 5831 5832 /* Working cpumask for: load_balance, load_balance_newidle. */ 5833 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 5834 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); 5835 5836 #ifdef CONFIG_NO_HZ_COMMON 5837 5838 static struct { 5839 cpumask_var_t idle_cpus_mask; 5840 atomic_t nr_cpus; 5841 int has_blocked; /* Idle CPUS has blocked load */ 5842 int needs_update; /* Newly idle CPUs need their next_balance collated */ 5843 unsigned long next_balance; /* in jiffy units */ 5844 unsigned long next_blocked; /* Next update of blocked load in jiffies */ 5845 } nohz ____cacheline_aligned; 5846 5847 #endif /* CONFIG_NO_HZ_COMMON */ 5848 5849 static unsigned long cpu_load(struct rq *rq) 5850 { 5851 return cfs_rq_load_avg(&rq->cfs); 5852 } 5853 5854 /* 5855 * cpu_load_without - compute CPU load without any contributions from *p 5856 * @cpu: the CPU which load is requested 5857 * @p: the task which load should be discounted 5858 * 5859 * The load of a CPU is defined by the load of tasks currently enqueued on that 5860 * CPU as well as tasks which are currently sleeping after an execution on that 5861 * CPU. 5862 * 5863 * This method returns the load of the specified CPU by discounting the load of 5864 * the specified task, whenever the task is currently contributing to the CPU 5865 * load. 5866 */ 5867 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) 5868 { 5869 struct cfs_rq *cfs_rq; 5870 unsigned int load; 5871 5872 /* Task has no contribution or is new */ 5873 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 5874 return cpu_load(rq); 5875 5876 cfs_rq = &rq->cfs; 5877 load = READ_ONCE(cfs_rq->avg.load_avg); 5878 5879 /* Discount task's util from CPU's util */ 5880 lsub_positive(&load, task_h_load(p)); 5881 5882 return load; 5883 } 5884 5885 static unsigned long cpu_runnable(struct rq *rq) 5886 { 5887 return cfs_rq_runnable_avg(&rq->cfs); 5888 } 5889 5890 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) 5891 { 5892 struct cfs_rq *cfs_rq; 5893 unsigned int runnable; 5894 5895 /* Task has no contribution or is new */ 5896 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 5897 return cpu_runnable(rq); 5898 5899 cfs_rq = &rq->cfs; 5900 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); 5901 5902 /* Discount task's runnable from CPU's runnable */ 5903 lsub_positive(&runnable, p->se.avg.runnable_avg); 5904 5905 return runnable; 5906 } 5907 5908 static unsigned long capacity_of(int cpu) 5909 { 5910 return cpu_rq(cpu)->cpu_capacity; 5911 } 5912 5913 static void record_wakee(struct task_struct *p) 5914 { 5915 /* 5916 * Only decay a single time; tasks that have less then 1 wakeup per 5917 * jiffy will not have built up many flips. 5918 */ 5919 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 5920 current->wakee_flips >>= 1; 5921 current->wakee_flip_decay_ts = jiffies; 5922 } 5923 5924 if (current->last_wakee != p) { 5925 current->last_wakee = p; 5926 current->wakee_flips++; 5927 } 5928 } 5929 5930 /* 5931 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 5932 * 5933 * A waker of many should wake a different task than the one last awakened 5934 * at a frequency roughly N times higher than one of its wakees. 5935 * 5936 * In order to determine whether we should let the load spread vs consolidating 5937 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 5938 * partner, and a factor of lls_size higher frequency in the other. 5939 * 5940 * With both conditions met, we can be relatively sure that the relationship is 5941 * non-monogamous, with partner count exceeding socket size. 5942 * 5943 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 5944 * whatever is irrelevant, spread criteria is apparent partner count exceeds 5945 * socket size. 5946 */ 5947 static int wake_wide(struct task_struct *p) 5948 { 5949 unsigned int master = current->wakee_flips; 5950 unsigned int slave = p->wakee_flips; 5951 int factor = __this_cpu_read(sd_llc_size); 5952 5953 if (master < slave) 5954 swap(master, slave); 5955 if (slave < factor || master < slave * factor) 5956 return 0; 5957 return 1; 5958 } 5959 5960 /* 5961 * The purpose of wake_affine() is to quickly determine on which CPU we can run 5962 * soonest. For the purpose of speed we only consider the waking and previous 5963 * CPU. 5964 * 5965 * wake_affine_idle() - only considers 'now', it check if the waking CPU is 5966 * cache-affine and is (or will be) idle. 5967 * 5968 * wake_affine_weight() - considers the weight to reflect the average 5969 * scheduling latency of the CPUs. This seems to work 5970 * for the overloaded case. 5971 */ 5972 static int 5973 wake_affine_idle(int this_cpu, int prev_cpu, int sync) 5974 { 5975 /* 5976 * If this_cpu is idle, it implies the wakeup is from interrupt 5977 * context. Only allow the move if cache is shared. Otherwise an 5978 * interrupt intensive workload could force all tasks onto one 5979 * node depending on the IO topology or IRQ affinity settings. 5980 * 5981 * If the prev_cpu is idle and cache affine then avoid a migration. 5982 * There is no guarantee that the cache hot data from an interrupt 5983 * is more important than cache hot data on the prev_cpu and from 5984 * a cpufreq perspective, it's better to have higher utilisation 5985 * on one CPU. 5986 */ 5987 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) 5988 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; 5989 5990 if (sync && cpu_rq(this_cpu)->nr_running == 1) 5991 return this_cpu; 5992 5993 if (available_idle_cpu(prev_cpu)) 5994 return prev_cpu; 5995 5996 return nr_cpumask_bits; 5997 } 5998 5999 static int 6000 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, 6001 int this_cpu, int prev_cpu, int sync) 6002 { 6003 s64 this_eff_load, prev_eff_load; 6004 unsigned long task_load; 6005 6006 this_eff_load = cpu_load(cpu_rq(this_cpu)); 6007 6008 if (sync) { 6009 unsigned long current_load = task_h_load(current); 6010 6011 if (current_load > this_eff_load) 6012 return this_cpu; 6013 6014 this_eff_load -= current_load; 6015 } 6016 6017 task_load = task_h_load(p); 6018 6019 this_eff_load += task_load; 6020 if (sched_feat(WA_BIAS)) 6021 this_eff_load *= 100; 6022 this_eff_load *= capacity_of(prev_cpu); 6023 6024 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); 6025 prev_eff_load -= task_load; 6026 if (sched_feat(WA_BIAS)) 6027 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; 6028 prev_eff_load *= capacity_of(this_cpu); 6029 6030 /* 6031 * If sync, adjust the weight of prev_eff_load such that if 6032 * prev_eff == this_eff that select_idle_sibling() will consider 6033 * stacking the wakee on top of the waker if no other CPU is 6034 * idle. 6035 */ 6036 if (sync) 6037 prev_eff_load += 1; 6038 6039 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; 6040 } 6041 6042 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 6043 int this_cpu, int prev_cpu, int sync) 6044 { 6045 int target = nr_cpumask_bits; 6046 6047 if (sched_feat(WA_IDLE)) 6048 target = wake_affine_idle(this_cpu, prev_cpu, sync); 6049 6050 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) 6051 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); 6052 6053 schedstat_inc(p->stats.nr_wakeups_affine_attempts); 6054 if (target == nr_cpumask_bits) 6055 return prev_cpu; 6056 6057 schedstat_inc(sd->ttwu_move_affine); 6058 schedstat_inc(p->stats.nr_wakeups_affine); 6059 return target; 6060 } 6061 6062 static struct sched_group * 6063 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); 6064 6065 /* 6066 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. 6067 */ 6068 static int 6069 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 6070 { 6071 unsigned long load, min_load = ULONG_MAX; 6072 unsigned int min_exit_latency = UINT_MAX; 6073 u64 latest_idle_timestamp = 0; 6074 int least_loaded_cpu = this_cpu; 6075 int shallowest_idle_cpu = -1; 6076 int i; 6077 6078 /* Check if we have any choice: */ 6079 if (group->group_weight == 1) 6080 return cpumask_first(sched_group_span(group)); 6081 6082 /* Traverse only the allowed CPUs */ 6083 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { 6084 struct rq *rq = cpu_rq(i); 6085 6086 if (!sched_core_cookie_match(rq, p)) 6087 continue; 6088 6089 if (sched_idle_cpu(i)) 6090 return i; 6091 6092 if (available_idle_cpu(i)) { 6093 struct cpuidle_state *idle = idle_get_state(rq); 6094 if (idle && idle->exit_latency < min_exit_latency) { 6095 /* 6096 * We give priority to a CPU whose idle state 6097 * has the smallest exit latency irrespective 6098 * of any idle timestamp. 6099 */ 6100 min_exit_latency = idle->exit_latency; 6101 latest_idle_timestamp = rq->idle_stamp; 6102 shallowest_idle_cpu = i; 6103 } else if ((!idle || idle->exit_latency == min_exit_latency) && 6104 rq->idle_stamp > latest_idle_timestamp) { 6105 /* 6106 * If equal or no active idle state, then 6107 * the most recently idled CPU might have 6108 * a warmer cache. 6109 */ 6110 latest_idle_timestamp = rq->idle_stamp; 6111 shallowest_idle_cpu = i; 6112 } 6113 } else if (shallowest_idle_cpu == -1) { 6114 load = cpu_load(cpu_rq(i)); 6115 if (load < min_load) { 6116 min_load = load; 6117 least_loaded_cpu = i; 6118 } 6119 } 6120 } 6121 6122 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 6123 } 6124 6125 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, 6126 int cpu, int prev_cpu, int sd_flag) 6127 { 6128 int new_cpu = cpu; 6129 6130 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) 6131 return prev_cpu; 6132 6133 /* 6134 * We need task's util for cpu_util_without, sync it up to 6135 * prev_cpu's last_update_time. 6136 */ 6137 if (!(sd_flag & SD_BALANCE_FORK)) 6138 sync_entity_load_avg(&p->se); 6139 6140 while (sd) { 6141 struct sched_group *group; 6142 struct sched_domain *tmp; 6143 int weight; 6144 6145 if (!(sd->flags & sd_flag)) { 6146 sd = sd->child; 6147 continue; 6148 } 6149 6150 group = find_idlest_group(sd, p, cpu); 6151 if (!group) { 6152 sd = sd->child; 6153 continue; 6154 } 6155 6156 new_cpu = find_idlest_group_cpu(group, p, cpu); 6157 if (new_cpu == cpu) { 6158 /* Now try balancing at a lower domain level of 'cpu': */ 6159 sd = sd->child; 6160 continue; 6161 } 6162 6163 /* Now try balancing at a lower domain level of 'new_cpu': */ 6164 cpu = new_cpu; 6165 weight = sd->span_weight; 6166 sd = NULL; 6167 for_each_domain(cpu, tmp) { 6168 if (weight <= tmp->span_weight) 6169 break; 6170 if (tmp->flags & sd_flag) 6171 sd = tmp; 6172 } 6173 } 6174 6175 return new_cpu; 6176 } 6177 6178 static inline int __select_idle_cpu(int cpu, struct task_struct *p) 6179 { 6180 if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) && 6181 sched_cpu_cookie_match(cpu_rq(cpu), p)) 6182 return cpu; 6183 6184 return -1; 6185 } 6186 6187 #ifdef CONFIG_SCHED_SMT 6188 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 6189 EXPORT_SYMBOL_GPL(sched_smt_present); 6190 6191 static inline void set_idle_cores(int cpu, int val) 6192 { 6193 struct sched_domain_shared *sds; 6194 6195 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 6196 if (sds) 6197 WRITE_ONCE(sds->has_idle_cores, val); 6198 } 6199 6200 static inline bool test_idle_cores(int cpu, bool def) 6201 { 6202 struct sched_domain_shared *sds; 6203 6204 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 6205 if (sds) 6206 return READ_ONCE(sds->has_idle_cores); 6207 6208 return def; 6209 } 6210 6211 /* 6212 * Scans the local SMT mask to see if the entire core is idle, and records this 6213 * information in sd_llc_shared->has_idle_cores. 6214 * 6215 * Since SMT siblings share all cache levels, inspecting this limited remote 6216 * state should be fairly cheap. 6217 */ 6218 void __update_idle_core(struct rq *rq) 6219 { 6220 int core = cpu_of(rq); 6221 int cpu; 6222 6223 rcu_read_lock(); 6224 if (test_idle_cores(core, true)) 6225 goto unlock; 6226 6227 for_each_cpu(cpu, cpu_smt_mask(core)) { 6228 if (cpu == core) 6229 continue; 6230 6231 if (!available_idle_cpu(cpu)) 6232 goto unlock; 6233 } 6234 6235 set_idle_cores(core, 1); 6236 unlock: 6237 rcu_read_unlock(); 6238 } 6239 6240 /* 6241 * Scan the entire LLC domain for idle cores; this dynamically switches off if 6242 * there are no idle cores left in the system; tracked through 6243 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 6244 */ 6245 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) 6246 { 6247 bool idle = true; 6248 int cpu; 6249 6250 if (!static_branch_likely(&sched_smt_present)) 6251 return __select_idle_cpu(core, p); 6252 6253 for_each_cpu(cpu, cpu_smt_mask(core)) { 6254 if (!available_idle_cpu(cpu)) { 6255 idle = false; 6256 if (*idle_cpu == -1) { 6257 if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { 6258 *idle_cpu = cpu; 6259 break; 6260 } 6261 continue; 6262 } 6263 break; 6264 } 6265 if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) 6266 *idle_cpu = cpu; 6267 } 6268 6269 if (idle) 6270 return core; 6271 6272 cpumask_andnot(cpus, cpus, cpu_smt_mask(core)); 6273 return -1; 6274 } 6275 6276 /* 6277 * Scan the local SMT mask for idle CPUs. 6278 */ 6279 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6280 { 6281 int cpu; 6282 6283 for_each_cpu(cpu, cpu_smt_mask(target)) { 6284 if (!cpumask_test_cpu(cpu, p->cpus_ptr) || 6285 !cpumask_test_cpu(cpu, sched_domain_span(sd))) 6286 continue; 6287 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) 6288 return cpu; 6289 } 6290 6291 return -1; 6292 } 6293 6294 #else /* CONFIG_SCHED_SMT */ 6295 6296 static inline void set_idle_cores(int cpu, int val) 6297 { 6298 } 6299 6300 static inline bool test_idle_cores(int cpu, bool def) 6301 { 6302 return def; 6303 } 6304 6305 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) 6306 { 6307 return __select_idle_cpu(core, p); 6308 } 6309 6310 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6311 { 6312 return -1; 6313 } 6314 6315 #endif /* CONFIG_SCHED_SMT */ 6316 6317 /* 6318 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 6319 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 6320 * average idle time for this rq (as found in rq->avg_idle). 6321 */ 6322 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target) 6323 { 6324 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 6325 int i, cpu, idle_cpu = -1, nr = INT_MAX; 6326 struct rq *this_rq = this_rq(); 6327 int this = smp_processor_id(); 6328 struct sched_domain *this_sd; 6329 u64 time = 0; 6330 6331 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 6332 if (!this_sd) 6333 return -1; 6334 6335 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6336 6337 if (sched_feat(SIS_PROP) && !has_idle_core) { 6338 u64 avg_cost, avg_idle, span_avg; 6339 unsigned long now = jiffies; 6340 6341 /* 6342 * If we're busy, the assumption that the last idle period 6343 * predicts the future is flawed; age away the remaining 6344 * predicted idle time. 6345 */ 6346 if (unlikely(this_rq->wake_stamp < now)) { 6347 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { 6348 this_rq->wake_stamp++; 6349 this_rq->wake_avg_idle >>= 1; 6350 } 6351 } 6352 6353 avg_idle = this_rq->wake_avg_idle; 6354 avg_cost = this_sd->avg_scan_cost + 1; 6355 6356 span_avg = sd->span_weight * avg_idle; 6357 if (span_avg > 4*avg_cost) 6358 nr = div_u64(span_avg, avg_cost); 6359 else 6360 nr = 4; 6361 6362 time = cpu_clock(this); 6363 } 6364 6365 for_each_cpu_wrap(cpu, cpus, target + 1) { 6366 if (has_idle_core) { 6367 i = select_idle_core(p, cpu, cpus, &idle_cpu); 6368 if ((unsigned int)i < nr_cpumask_bits) 6369 return i; 6370 6371 } else { 6372 if (!--nr) 6373 return -1; 6374 idle_cpu = __select_idle_cpu(cpu, p); 6375 if ((unsigned int)idle_cpu < nr_cpumask_bits) 6376 break; 6377 } 6378 } 6379 6380 if (has_idle_core) 6381 set_idle_cores(target, false); 6382 6383 if (sched_feat(SIS_PROP) && !has_idle_core) { 6384 time = cpu_clock(this) - time; 6385 6386 /* 6387 * Account for the scan cost of wakeups against the average 6388 * idle time. 6389 */ 6390 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time); 6391 6392 update_avg(&this_sd->avg_scan_cost, time); 6393 } 6394 6395 return idle_cpu; 6396 } 6397 6398 /* 6399 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which 6400 * the task fits. If no CPU is big enough, but there are idle ones, try to 6401 * maximize capacity. 6402 */ 6403 static int 6404 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) 6405 { 6406 unsigned long task_util, best_cap = 0; 6407 int cpu, best_cpu = -1; 6408 struct cpumask *cpus; 6409 6410 cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 6411 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6412 6413 task_util = uclamp_task_util(p); 6414 6415 for_each_cpu_wrap(cpu, cpus, target) { 6416 unsigned long cpu_cap = capacity_of(cpu); 6417 6418 if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) 6419 continue; 6420 if (fits_capacity(task_util, cpu_cap)) 6421 return cpu; 6422 6423 if (cpu_cap > best_cap) { 6424 best_cap = cpu_cap; 6425 best_cpu = cpu; 6426 } 6427 } 6428 6429 return best_cpu; 6430 } 6431 6432 static inline bool asym_fits_capacity(unsigned long task_util, int cpu) 6433 { 6434 if (static_branch_unlikely(&sched_asym_cpucapacity)) 6435 return fits_capacity(task_util, capacity_of(cpu)); 6436 6437 return true; 6438 } 6439 6440 /* 6441 * Try and locate an idle core/thread in the LLC cache domain. 6442 */ 6443 static int select_idle_sibling(struct task_struct *p, int prev, int target) 6444 { 6445 bool has_idle_core = false; 6446 struct sched_domain *sd; 6447 unsigned long task_util; 6448 int i, recent_used_cpu; 6449 6450 /* 6451 * On asymmetric system, update task utilization because we will check 6452 * that the task fits with cpu's capacity. 6453 */ 6454 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6455 sync_entity_load_avg(&p->se); 6456 task_util = uclamp_task_util(p); 6457 } 6458 6459 /* 6460 * per-cpu select_idle_mask usage 6461 */ 6462 lockdep_assert_irqs_disabled(); 6463 6464 if ((available_idle_cpu(target) || sched_idle_cpu(target)) && 6465 asym_fits_capacity(task_util, target)) 6466 return target; 6467 6468 /* 6469 * If the previous CPU is cache affine and idle, don't be stupid: 6470 */ 6471 if (prev != target && cpus_share_cache(prev, target) && 6472 (available_idle_cpu(prev) || sched_idle_cpu(prev)) && 6473 asym_fits_capacity(task_util, prev)) 6474 return prev; 6475 6476 /* 6477 * Allow a per-cpu kthread to stack with the wakee if the 6478 * kworker thread and the tasks previous CPUs are the same. 6479 * The assumption is that the wakee queued work for the 6480 * per-cpu kthread that is now complete and the wakeup is 6481 * essentially a sync wakeup. An obvious example of this 6482 * pattern is IO completions. 6483 */ 6484 if (is_per_cpu_kthread(current) && 6485 in_task() && 6486 prev == smp_processor_id() && 6487 this_rq()->nr_running <= 1 && 6488 asym_fits_capacity(task_util, prev)) { 6489 return prev; 6490 } 6491 6492 /* Check a recently used CPU as a potential idle candidate: */ 6493 recent_used_cpu = p->recent_used_cpu; 6494 p->recent_used_cpu = prev; 6495 if (recent_used_cpu != prev && 6496 recent_used_cpu != target && 6497 cpus_share_cache(recent_used_cpu, target) && 6498 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && 6499 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && 6500 asym_fits_capacity(task_util, recent_used_cpu)) { 6501 return recent_used_cpu; 6502 } 6503 6504 /* 6505 * For asymmetric CPU capacity systems, our domain of interest is 6506 * sd_asym_cpucapacity rather than sd_llc. 6507 */ 6508 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6509 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); 6510 /* 6511 * On an asymmetric CPU capacity system where an exclusive 6512 * cpuset defines a symmetric island (i.e. one unique 6513 * capacity_orig value through the cpuset), the key will be set 6514 * but the CPUs within that cpuset will not have a domain with 6515 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric 6516 * capacity path. 6517 */ 6518 if (sd) { 6519 i = select_idle_capacity(p, sd, target); 6520 return ((unsigned)i < nr_cpumask_bits) ? i : target; 6521 } 6522 } 6523 6524 sd = rcu_dereference(per_cpu(sd_llc, target)); 6525 if (!sd) 6526 return target; 6527 6528 if (sched_smt_active()) { 6529 has_idle_core = test_idle_cores(target, false); 6530 6531 if (!has_idle_core && cpus_share_cache(prev, target)) { 6532 i = select_idle_smt(p, sd, prev); 6533 if ((unsigned int)i < nr_cpumask_bits) 6534 return i; 6535 } 6536 } 6537 6538 i = select_idle_cpu(p, sd, has_idle_core, target); 6539 if ((unsigned)i < nr_cpumask_bits) 6540 return i; 6541 6542 return target; 6543 } 6544 6545 /* 6546 * cpu_util_without: compute cpu utilization without any contributions from *p 6547 * @cpu: the CPU which utilization is requested 6548 * @p: the task which utilization should be discounted 6549 * 6550 * The utilization of a CPU is defined by the utilization of tasks currently 6551 * enqueued on that CPU as well as tasks which are currently sleeping after an 6552 * execution on that CPU. 6553 * 6554 * This method returns the utilization of the specified CPU by discounting the 6555 * utilization of the specified task, whenever the task is currently 6556 * contributing to the CPU utilization. 6557 */ 6558 static unsigned long cpu_util_without(int cpu, struct task_struct *p) 6559 { 6560 struct cfs_rq *cfs_rq; 6561 unsigned int util; 6562 6563 /* Task has no contribution or is new */ 6564 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6565 return cpu_util_cfs(cpu); 6566 6567 cfs_rq = &cpu_rq(cpu)->cfs; 6568 util = READ_ONCE(cfs_rq->avg.util_avg); 6569 6570 /* Discount task's util from CPU's util */ 6571 lsub_positive(&util, task_util(p)); 6572 6573 /* 6574 * Covered cases: 6575 * 6576 * a) if *p is the only task sleeping on this CPU, then: 6577 * cpu_util (== task_util) > util_est (== 0) 6578 * and thus we return: 6579 * cpu_util_without = (cpu_util - task_util) = 0 6580 * 6581 * b) if other tasks are SLEEPING on this CPU, which is now exiting 6582 * IDLE, then: 6583 * cpu_util >= task_util 6584 * cpu_util > util_est (== 0) 6585 * and thus we discount *p's blocked utilization to return: 6586 * cpu_util_without = (cpu_util - task_util) >= 0 6587 * 6588 * c) if other tasks are RUNNABLE on that CPU and 6589 * util_est > cpu_util 6590 * then we use util_est since it returns a more restrictive 6591 * estimation of the spare capacity on that CPU, by just 6592 * considering the expected utilization of tasks already 6593 * runnable on that CPU. 6594 * 6595 * Cases a) and b) are covered by the above code, while case c) is 6596 * covered by the following code when estimated utilization is 6597 * enabled. 6598 */ 6599 if (sched_feat(UTIL_EST)) { 6600 unsigned int estimated = 6601 READ_ONCE(cfs_rq->avg.util_est.enqueued); 6602 6603 /* 6604 * Despite the following checks we still have a small window 6605 * for a possible race, when an execl's select_task_rq_fair() 6606 * races with LB's detach_task(): 6607 * 6608 * detach_task() 6609 * p->on_rq = TASK_ON_RQ_MIGRATING; 6610 * ---------------------------------- A 6611 * deactivate_task() \ 6612 * dequeue_task() + RaceTime 6613 * util_est_dequeue() / 6614 * ---------------------------------- B 6615 * 6616 * The additional check on "current == p" it's required to 6617 * properly fix the execl regression and it helps in further 6618 * reducing the chances for the above race. 6619 */ 6620 if (unlikely(task_on_rq_queued(p) || current == p)) 6621 lsub_positive(&estimated, _task_util_est(p)); 6622 6623 util = max(util, estimated); 6624 } 6625 6626 /* 6627 * Utilization (estimated) can exceed the CPU capacity, thus let's 6628 * clamp to the maximum CPU capacity to ensure consistency with 6629 * cpu_util. 6630 */ 6631 return min_t(unsigned long, util, capacity_orig_of(cpu)); 6632 } 6633 6634 /* 6635 * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) 6636 * to @dst_cpu. 6637 */ 6638 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) 6639 { 6640 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; 6641 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); 6642 6643 /* 6644 * If @p migrates from @cpu to another, remove its contribution. Or, 6645 * if @p migrates from another CPU to @cpu, add its contribution. In 6646 * the other cases, @cpu is not impacted by the migration, so the 6647 * util_avg should already be correct. 6648 */ 6649 if (task_cpu(p) == cpu && dst_cpu != cpu) 6650 lsub_positive(&util, task_util(p)); 6651 else if (task_cpu(p) != cpu && dst_cpu == cpu) 6652 util += task_util(p); 6653 6654 if (sched_feat(UTIL_EST)) { 6655 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); 6656 6657 /* 6658 * During wake-up, the task isn't enqueued yet and doesn't 6659 * appear in the cfs_rq->avg.util_est.enqueued of any rq, 6660 * so just add it (if needed) to "simulate" what will be 6661 * cpu_util after the task has been enqueued. 6662 */ 6663 if (dst_cpu == cpu) 6664 util_est += _task_util_est(p); 6665 6666 util = max(util, util_est); 6667 } 6668 6669 return min(util, capacity_orig_of(cpu)); 6670 } 6671 6672 /* 6673 * compute_energy(): Estimates the energy that @pd would consume if @p was 6674 * migrated to @dst_cpu. compute_energy() predicts what will be the utilization 6675 * landscape of @pd's CPUs after the task migration, and uses the Energy Model 6676 * to compute what would be the energy if we decided to actually migrate that 6677 * task. 6678 */ 6679 static long 6680 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) 6681 { 6682 struct cpumask *pd_mask = perf_domain_span(pd); 6683 unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask)); 6684 unsigned long max_util = 0, sum_util = 0; 6685 unsigned long _cpu_cap = cpu_cap; 6686 int cpu; 6687 6688 _cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask)); 6689 6690 /* 6691 * The capacity state of CPUs of the current rd can be driven by CPUs 6692 * of another rd if they belong to the same pd. So, account for the 6693 * utilization of these CPUs too by masking pd with cpu_online_mask 6694 * instead of the rd span. 6695 * 6696 * If an entire pd is outside of the current rd, it will not appear in 6697 * its pd list and will not be accounted by compute_energy(). 6698 */ 6699 for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { 6700 unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu); 6701 unsigned long cpu_util, util_running = util_freq; 6702 struct task_struct *tsk = NULL; 6703 6704 /* 6705 * When @p is placed on @cpu: 6706 * 6707 * util_running = max(cpu_util, cpu_util_est) + 6708 * max(task_util, _task_util_est) 6709 * 6710 * while cpu_util_next is: max(cpu_util + task_util, 6711 * cpu_util_est + _task_util_est) 6712 */ 6713 if (cpu == dst_cpu) { 6714 tsk = p; 6715 util_running = 6716 cpu_util_next(cpu, p, -1) + task_util_est(p); 6717 } 6718 6719 /* 6720 * Busy time computation: utilization clamping is not 6721 * required since the ratio (sum_util / cpu_capacity) 6722 * is already enough to scale the EM reported power 6723 * consumption at the (eventually clamped) cpu_capacity. 6724 */ 6725 cpu_util = effective_cpu_util(cpu, util_running, cpu_cap, 6726 ENERGY_UTIL, NULL); 6727 6728 sum_util += min(cpu_util, _cpu_cap); 6729 6730 /* 6731 * Performance domain frequency: utilization clamping 6732 * must be considered since it affects the selection 6733 * of the performance domain frequency. 6734 * NOTE: in case RT tasks are running, by default the 6735 * FREQUENCY_UTIL's utilization can be max OPP. 6736 */ 6737 cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap, 6738 FREQUENCY_UTIL, tsk); 6739 max_util = max(max_util, min(cpu_util, _cpu_cap)); 6740 } 6741 6742 return em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap); 6743 } 6744 6745 /* 6746 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the 6747 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum 6748 * spare capacity in each performance domain and uses it as a potential 6749 * candidate to execute the task. Then, it uses the Energy Model to figure 6750 * out which of the CPU candidates is the most energy-efficient. 6751 * 6752 * The rationale for this heuristic is as follows. In a performance domain, 6753 * all the most energy efficient CPU candidates (according to the Energy 6754 * Model) are those for which we'll request a low frequency. When there are 6755 * several CPUs for which the frequency request will be the same, we don't 6756 * have enough data to break the tie between them, because the Energy Model 6757 * only includes active power costs. With this model, if we assume that 6758 * frequency requests follow utilization (e.g. using schedutil), the CPU with 6759 * the maximum spare capacity in a performance domain is guaranteed to be among 6760 * the best candidates of the performance domain. 6761 * 6762 * In practice, it could be preferable from an energy standpoint to pack 6763 * small tasks on a CPU in order to let other CPUs go in deeper idle states, 6764 * but that could also hurt our chances to go cluster idle, and we have no 6765 * ways to tell with the current Energy Model if this is actually a good 6766 * idea or not. So, find_energy_efficient_cpu() basically favors 6767 * cluster-packing, and spreading inside a cluster. That should at least be 6768 * a good thing for latency, and this is consistent with the idea that most 6769 * of the energy savings of EAS come from the asymmetry of the system, and 6770 * not so much from breaking the tie between identical CPUs. That's also the 6771 * reason why EAS is enabled in the topology code only for systems where 6772 * SD_ASYM_CPUCAPACITY is set. 6773 * 6774 * NOTE: Forkees are not accepted in the energy-aware wake-up path because 6775 * they don't have any useful utilization data yet and it's not possible to 6776 * forecast their impact on energy consumption. Consequently, they will be 6777 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out 6778 * to be energy-inefficient in some use-cases. The alternative would be to 6779 * bias new tasks towards specific types of CPUs first, or to try to infer 6780 * their util_avg from the parent task, but those heuristics could hurt 6781 * other use-cases too. So, until someone finds a better way to solve this, 6782 * let's keep things simple by re-using the existing slow path. 6783 */ 6784 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) 6785 { 6786 unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; 6787 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 6788 int cpu, best_energy_cpu = prev_cpu, target = -1; 6789 unsigned long cpu_cap, util, base_energy = 0; 6790 struct sched_domain *sd; 6791 struct perf_domain *pd; 6792 6793 rcu_read_lock(); 6794 pd = rcu_dereference(rd->pd); 6795 if (!pd || READ_ONCE(rd->overutilized)) 6796 goto unlock; 6797 6798 /* 6799 * Energy-aware wake-up happens on the lowest sched_domain starting 6800 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. 6801 */ 6802 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); 6803 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 6804 sd = sd->parent; 6805 if (!sd) 6806 goto unlock; 6807 6808 target = prev_cpu; 6809 6810 sync_entity_load_avg(&p->se); 6811 if (!task_util_est(p)) 6812 goto unlock; 6813 6814 for (; pd; pd = pd->next) { 6815 unsigned long cur_delta, spare_cap, max_spare_cap = 0; 6816 bool compute_prev_delta = false; 6817 unsigned long base_energy_pd; 6818 int max_spare_cap_cpu = -1; 6819 6820 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { 6821 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 6822 continue; 6823 6824 util = cpu_util_next(cpu, p, cpu); 6825 cpu_cap = capacity_of(cpu); 6826 spare_cap = cpu_cap; 6827 lsub_positive(&spare_cap, util); 6828 6829 /* 6830 * Skip CPUs that cannot satisfy the capacity request. 6831 * IOW, placing the task there would make the CPU 6832 * overutilized. Take uclamp into account to see how 6833 * much capacity we can get out of the CPU; this is 6834 * aligned with sched_cpu_util(). 6835 */ 6836 util = uclamp_rq_util_with(cpu_rq(cpu), util, p); 6837 if (!fits_capacity(util, cpu_cap)) 6838 continue; 6839 6840 if (cpu == prev_cpu) { 6841 /* Always use prev_cpu as a candidate. */ 6842 compute_prev_delta = true; 6843 } else if (spare_cap > max_spare_cap) { 6844 /* 6845 * Find the CPU with the maximum spare capacity 6846 * in the performance domain. 6847 */ 6848 max_spare_cap = spare_cap; 6849 max_spare_cap_cpu = cpu; 6850 } 6851 } 6852 6853 if (max_spare_cap_cpu < 0 && !compute_prev_delta) 6854 continue; 6855 6856 /* Compute the 'base' energy of the pd, without @p */ 6857 base_energy_pd = compute_energy(p, -1, pd); 6858 base_energy += base_energy_pd; 6859 6860 /* Evaluate the energy impact of using prev_cpu. */ 6861 if (compute_prev_delta) { 6862 prev_delta = compute_energy(p, prev_cpu, pd); 6863 if (prev_delta < base_energy_pd) 6864 goto unlock; 6865 prev_delta -= base_energy_pd; 6866 best_delta = min(best_delta, prev_delta); 6867 } 6868 6869 /* Evaluate the energy impact of using max_spare_cap_cpu. */ 6870 if (max_spare_cap_cpu >= 0) { 6871 cur_delta = compute_energy(p, max_spare_cap_cpu, pd); 6872 if (cur_delta < base_energy_pd) 6873 goto unlock; 6874 cur_delta -= base_energy_pd; 6875 if (cur_delta < best_delta) { 6876 best_delta = cur_delta; 6877 best_energy_cpu = max_spare_cap_cpu; 6878 } 6879 } 6880 } 6881 rcu_read_unlock(); 6882 6883 /* 6884 * Pick the best CPU if prev_cpu cannot be used, or if it saves at 6885 * least 6% of the energy used by prev_cpu. 6886 */ 6887 if ((prev_delta == ULONG_MAX) || 6888 (prev_delta - best_delta) > ((prev_delta + base_energy) >> 4)) 6889 target = best_energy_cpu; 6890 6891 return target; 6892 6893 unlock: 6894 rcu_read_unlock(); 6895 6896 return target; 6897 } 6898 6899 /* 6900 * select_task_rq_fair: Select target runqueue for the waking task in domains 6901 * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE, 6902 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 6903 * 6904 * Balances load by selecting the idlest CPU in the idlest group, or under 6905 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. 6906 * 6907 * Returns the target CPU number. 6908 */ 6909 static int 6910 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) 6911 { 6912 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); 6913 struct sched_domain *tmp, *sd = NULL; 6914 int cpu = smp_processor_id(); 6915 int new_cpu = prev_cpu; 6916 int want_affine = 0; 6917 /* SD_flags and WF_flags share the first nibble */ 6918 int sd_flag = wake_flags & 0xF; 6919 6920 /* 6921 * required for stable ->cpus_allowed 6922 */ 6923 lockdep_assert_held(&p->pi_lock); 6924 if (wake_flags & WF_TTWU) { 6925 record_wakee(p); 6926 6927 if (sched_energy_enabled()) { 6928 new_cpu = find_energy_efficient_cpu(p, prev_cpu); 6929 if (new_cpu >= 0) 6930 return new_cpu; 6931 new_cpu = prev_cpu; 6932 } 6933 6934 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); 6935 } 6936 6937 rcu_read_lock(); 6938 for_each_domain(cpu, tmp) { 6939 /* 6940 * If both 'cpu' and 'prev_cpu' are part of this domain, 6941 * cpu is a valid SD_WAKE_AFFINE target. 6942 */ 6943 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 6944 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 6945 if (cpu != prev_cpu) 6946 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); 6947 6948 sd = NULL; /* Prefer wake_affine over balance flags */ 6949 break; 6950 } 6951 6952 /* 6953 * Usually only true for WF_EXEC and WF_FORK, as sched_domains 6954 * usually do not have SD_BALANCE_WAKE set. That means wakeup 6955 * will usually go to the fast path. 6956 */ 6957 if (tmp->flags & sd_flag) 6958 sd = tmp; 6959 else if (!want_affine) 6960 break; 6961 } 6962 6963 if (unlikely(sd)) { 6964 /* Slow path */ 6965 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); 6966 } else if (wake_flags & WF_TTWU) { /* XXX always ? */ 6967 /* Fast path */ 6968 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 6969 } 6970 rcu_read_unlock(); 6971 6972 return new_cpu; 6973 } 6974 6975 static void detach_entity_cfs_rq(struct sched_entity *se); 6976 6977 /* 6978 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and 6979 * cfs_rq_of(p) references at time of call are still valid and identify the 6980 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 6981 */ 6982 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) 6983 { 6984 /* 6985 * As blocked tasks retain absolute vruntime the migration needs to 6986 * deal with this by subtracting the old and adding the new 6987 * min_vruntime -- the latter is done by enqueue_entity() when placing 6988 * the task on the new runqueue. 6989 */ 6990 if (READ_ONCE(p->__state) == TASK_WAKING) { 6991 struct sched_entity *se = &p->se; 6992 struct cfs_rq *cfs_rq = cfs_rq_of(se); 6993 u64 min_vruntime; 6994 6995 #ifndef CONFIG_64BIT 6996 u64 min_vruntime_copy; 6997 6998 do { 6999 min_vruntime_copy = cfs_rq->min_vruntime_copy; 7000 smp_rmb(); 7001 min_vruntime = cfs_rq->min_vruntime; 7002 } while (min_vruntime != min_vruntime_copy); 7003 #else 7004 min_vruntime = cfs_rq->min_vruntime; 7005 #endif 7006 7007 se->vruntime -= min_vruntime; 7008 } 7009 7010 if (p->on_rq == TASK_ON_RQ_MIGRATING) { 7011 /* 7012 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' 7013 * rq->lock and can modify state directly. 7014 */ 7015 lockdep_assert_rq_held(task_rq(p)); 7016 detach_entity_cfs_rq(&p->se); 7017 7018 } else { 7019 /* 7020 * We are supposed to update the task to "current" time, then 7021 * its up to date and ready to go to new CPU/cfs_rq. But we 7022 * have difficulty in getting what current time is, so simply 7023 * throw away the out-of-date time. This will result in the 7024 * wakee task is less decayed, but giving the wakee more load 7025 * sounds not bad. 7026 */ 7027 remove_entity_load_avg(&p->se); 7028 } 7029 7030 /* Tell new CPU we are migrated */ 7031 p->se.avg.last_update_time = 0; 7032 7033 /* We have migrated, no longer consider this task hot */ 7034 p->se.exec_start = 0; 7035 7036 update_scan_period(p, new_cpu); 7037 } 7038 7039 static void task_dead_fair(struct task_struct *p) 7040 { 7041 remove_entity_load_avg(&p->se); 7042 } 7043 7044 static int 7045 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 7046 { 7047 if (rq->nr_running) 7048 return 1; 7049 7050 return newidle_balance(rq, rf) != 0; 7051 } 7052 #endif /* CONFIG_SMP */ 7053 7054 static unsigned long wakeup_gran(struct sched_entity *se) 7055 { 7056 unsigned long gran = sysctl_sched_wakeup_granularity; 7057 7058 /* 7059 * Since its curr running now, convert the gran from real-time 7060 * to virtual-time in his units. 7061 * 7062 * By using 'se' instead of 'curr' we penalize light tasks, so 7063 * they get preempted easier. That is, if 'se' < 'curr' then 7064 * the resulting gran will be larger, therefore penalizing the 7065 * lighter, if otoh 'se' > 'curr' then the resulting gran will 7066 * be smaller, again penalizing the lighter task. 7067 * 7068 * This is especially important for buddies when the leftmost 7069 * task is higher priority than the buddy. 7070 */ 7071 return calc_delta_fair(gran, se); 7072 } 7073 7074 /* 7075 * Should 'se' preempt 'curr'. 7076 * 7077 * |s1 7078 * |s2 7079 * |s3 7080 * g 7081 * |<--->|c 7082 * 7083 * w(c, s1) = -1 7084 * w(c, s2) = 0 7085 * w(c, s3) = 1 7086 * 7087 */ 7088 static int 7089 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 7090 { 7091 s64 gran, vdiff = curr->vruntime - se->vruntime; 7092 7093 if (vdiff <= 0) 7094 return -1; 7095 7096 gran = wakeup_gran(se); 7097 if (vdiff > gran) 7098 return 1; 7099 7100 return 0; 7101 } 7102 7103 static void set_last_buddy(struct sched_entity *se) 7104 { 7105 for_each_sched_entity(se) { 7106 if (SCHED_WARN_ON(!se->on_rq)) 7107 return; 7108 if (se_is_idle(se)) 7109 return; 7110 cfs_rq_of(se)->last = se; 7111 } 7112 } 7113 7114 static void set_next_buddy(struct sched_entity *se) 7115 { 7116 for_each_sched_entity(se) { 7117 if (SCHED_WARN_ON(!se->on_rq)) 7118 return; 7119 if (se_is_idle(se)) 7120 return; 7121 cfs_rq_of(se)->next = se; 7122 } 7123 } 7124 7125 static void set_skip_buddy(struct sched_entity *se) 7126 { 7127 for_each_sched_entity(se) 7128 cfs_rq_of(se)->skip = se; 7129 } 7130 7131 /* 7132 * Preempt the current task with a newly woken task if needed: 7133 */ 7134 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 7135 { 7136 struct task_struct *curr = rq->curr; 7137 struct sched_entity *se = &curr->se, *pse = &p->se; 7138 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7139 int scale = cfs_rq->nr_running >= sched_nr_latency; 7140 int next_buddy_marked = 0; 7141 int cse_is_idle, pse_is_idle; 7142 7143 if (unlikely(se == pse)) 7144 return; 7145 7146 /* 7147 * This is possible from callers such as attach_tasks(), in which we 7148 * unconditionally check_preempt_curr() after an enqueue (which may have 7149 * lead to a throttle). This both saves work and prevents false 7150 * next-buddy nomination below. 7151 */ 7152 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 7153 return; 7154 7155 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 7156 set_next_buddy(pse); 7157 next_buddy_marked = 1; 7158 } 7159 7160 /* 7161 * We can come here with TIF_NEED_RESCHED already set from new task 7162 * wake up path. 7163 * 7164 * Note: this also catches the edge-case of curr being in a throttled 7165 * group (e.g. via set_curr_task), since update_curr() (in the 7166 * enqueue of curr) will have resulted in resched being set. This 7167 * prevents us from potentially nominating it as a false LAST_BUDDY 7168 * below. 7169 */ 7170 if (test_tsk_need_resched(curr)) 7171 return; 7172 7173 /* Idle tasks are by definition preempted by non-idle tasks. */ 7174 if (unlikely(task_has_idle_policy(curr)) && 7175 likely(!task_has_idle_policy(p))) 7176 goto preempt; 7177 7178 /* 7179 * Batch and idle tasks do not preempt non-idle tasks (their preemption 7180 * is driven by the tick): 7181 */ 7182 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 7183 return; 7184 7185 find_matching_se(&se, &pse); 7186 BUG_ON(!pse); 7187 7188 cse_is_idle = se_is_idle(se); 7189 pse_is_idle = se_is_idle(pse); 7190 7191 /* 7192 * Preempt an idle group in favor of a non-idle group (and don't preempt 7193 * in the inverse case). 7194 */ 7195 if (cse_is_idle && !pse_is_idle) 7196 goto preempt; 7197 if (cse_is_idle != pse_is_idle) 7198 return; 7199 7200 update_curr(cfs_rq_of(se)); 7201 if (wakeup_preempt_entity(se, pse) == 1) { 7202 /* 7203 * Bias pick_next to pick the sched entity that is 7204 * triggering this preemption. 7205 */ 7206 if (!next_buddy_marked) 7207 set_next_buddy(pse); 7208 goto preempt; 7209 } 7210 7211 return; 7212 7213 preempt: 7214 resched_curr(rq); 7215 /* 7216 * Only set the backward buddy when the current task is still 7217 * on the rq. This can happen when a wakeup gets interleaved 7218 * with schedule on the ->pre_schedule() or idle_balance() 7219 * point, either of which can * drop the rq lock. 7220 * 7221 * Also, during early boot the idle thread is in the fair class, 7222 * for obvious reasons its a bad idea to schedule back to it. 7223 */ 7224 if (unlikely(!se->on_rq || curr == rq->idle)) 7225 return; 7226 7227 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 7228 set_last_buddy(se); 7229 } 7230 7231 #ifdef CONFIG_SMP 7232 static struct task_struct *pick_task_fair(struct rq *rq) 7233 { 7234 struct sched_entity *se; 7235 struct cfs_rq *cfs_rq; 7236 7237 again: 7238 cfs_rq = &rq->cfs; 7239 if (!cfs_rq->nr_running) 7240 return NULL; 7241 7242 do { 7243 struct sched_entity *curr = cfs_rq->curr; 7244 7245 /* When we pick for a remote RQ, we'll not have done put_prev_entity() */ 7246 if (curr) { 7247 if (curr->on_rq) 7248 update_curr(cfs_rq); 7249 else 7250 curr = NULL; 7251 7252 if (unlikely(check_cfs_rq_runtime(cfs_rq))) 7253 goto again; 7254 } 7255 7256 se = pick_next_entity(cfs_rq, curr); 7257 cfs_rq = group_cfs_rq(se); 7258 } while (cfs_rq); 7259 7260 return task_of(se); 7261 } 7262 #endif 7263 7264 struct task_struct * 7265 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 7266 { 7267 struct cfs_rq *cfs_rq = &rq->cfs; 7268 struct sched_entity *se; 7269 struct task_struct *p; 7270 int new_tasks; 7271 7272 again: 7273 if (!sched_fair_runnable(rq)) 7274 goto idle; 7275 7276 #ifdef CONFIG_FAIR_GROUP_SCHED 7277 if (!prev || prev->sched_class != &fair_sched_class) 7278 goto simple; 7279 7280 /* 7281 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 7282 * likely that a next task is from the same cgroup as the current. 7283 * 7284 * Therefore attempt to avoid putting and setting the entire cgroup 7285 * hierarchy, only change the part that actually changes. 7286 */ 7287 7288 do { 7289 struct sched_entity *curr = cfs_rq->curr; 7290 7291 /* 7292 * Since we got here without doing put_prev_entity() we also 7293 * have to consider cfs_rq->curr. If it is still a runnable 7294 * entity, update_curr() will update its vruntime, otherwise 7295 * forget we've ever seen it. 7296 */ 7297 if (curr) { 7298 if (curr->on_rq) 7299 update_curr(cfs_rq); 7300 else 7301 curr = NULL; 7302 7303 /* 7304 * This call to check_cfs_rq_runtime() will do the 7305 * throttle and dequeue its entity in the parent(s). 7306 * Therefore the nr_running test will indeed 7307 * be correct. 7308 */ 7309 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { 7310 cfs_rq = &rq->cfs; 7311 7312 if (!cfs_rq->nr_running) 7313 goto idle; 7314 7315 goto simple; 7316 } 7317 } 7318 7319 se = pick_next_entity(cfs_rq, curr); 7320 cfs_rq = group_cfs_rq(se); 7321 } while (cfs_rq); 7322 7323 p = task_of(se); 7324 7325 /* 7326 * Since we haven't yet done put_prev_entity and if the selected task 7327 * is a different task than we started out with, try and touch the 7328 * least amount of cfs_rqs. 7329 */ 7330 if (prev != p) { 7331 struct sched_entity *pse = &prev->se; 7332 7333 while (!(cfs_rq = is_same_group(se, pse))) { 7334 int se_depth = se->depth; 7335 int pse_depth = pse->depth; 7336 7337 if (se_depth <= pse_depth) { 7338 put_prev_entity(cfs_rq_of(pse), pse); 7339 pse = parent_entity(pse); 7340 } 7341 if (se_depth >= pse_depth) { 7342 set_next_entity(cfs_rq_of(se), se); 7343 se = parent_entity(se); 7344 } 7345 } 7346 7347 put_prev_entity(cfs_rq, pse); 7348 set_next_entity(cfs_rq, se); 7349 } 7350 7351 goto done; 7352 simple: 7353 #endif 7354 if (prev) 7355 put_prev_task(rq, prev); 7356 7357 do { 7358 se = pick_next_entity(cfs_rq, NULL); 7359 set_next_entity(cfs_rq, se); 7360 cfs_rq = group_cfs_rq(se); 7361 } while (cfs_rq); 7362 7363 p = task_of(se); 7364 7365 done: __maybe_unused; 7366 #ifdef CONFIG_SMP 7367 /* 7368 * Move the next running task to the front of 7369 * the list, so our cfs_tasks list becomes MRU 7370 * one. 7371 */ 7372 list_move(&p->se.group_node, &rq->cfs_tasks); 7373 #endif 7374 7375 if (hrtick_enabled_fair(rq)) 7376 hrtick_start_fair(rq, p); 7377 7378 update_misfit_status(p, rq); 7379 7380 return p; 7381 7382 idle: 7383 if (!rf) 7384 return NULL; 7385 7386 new_tasks = newidle_balance(rq, rf); 7387 7388 /* 7389 * Because newidle_balance() releases (and re-acquires) rq->lock, it is 7390 * possible for any higher priority task to appear. In that case we 7391 * must re-start the pick_next_entity() loop. 7392 */ 7393 if (new_tasks < 0) 7394 return RETRY_TASK; 7395 7396 if (new_tasks > 0) 7397 goto again; 7398 7399 /* 7400 * rq is about to be idle, check if we need to update the 7401 * lost_idle_time of clock_pelt 7402 */ 7403 update_idle_rq_clock_pelt(rq); 7404 7405 return NULL; 7406 } 7407 7408 static struct task_struct *__pick_next_task_fair(struct rq *rq) 7409 { 7410 return pick_next_task_fair(rq, NULL, NULL); 7411 } 7412 7413 /* 7414 * Account for a descheduled task: 7415 */ 7416 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 7417 { 7418 struct sched_entity *se = &prev->se; 7419 struct cfs_rq *cfs_rq; 7420 7421 for_each_sched_entity(se) { 7422 cfs_rq = cfs_rq_of(se); 7423 put_prev_entity(cfs_rq, se); 7424 } 7425 } 7426 7427 /* 7428 * sched_yield() is very simple 7429 * 7430 * The magic of dealing with the ->skip buddy is in pick_next_entity. 7431 */ 7432 static void yield_task_fair(struct rq *rq) 7433 { 7434 struct task_struct *curr = rq->curr; 7435 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7436 struct sched_entity *se = &curr->se; 7437 7438 /* 7439 * Are we the only task in the tree? 7440 */ 7441 if (unlikely(rq->nr_running == 1)) 7442 return; 7443 7444 clear_buddies(cfs_rq, se); 7445 7446 if (curr->policy != SCHED_BATCH) { 7447 update_rq_clock(rq); 7448 /* 7449 * Update run-time statistics of the 'current'. 7450 */ 7451 update_curr(cfs_rq); 7452 /* 7453 * Tell update_rq_clock() that we've just updated, 7454 * so we don't do microscopic update in schedule() 7455 * and double the fastpath cost. 7456 */ 7457 rq_clock_skip_update(rq); 7458 } 7459 7460 set_skip_buddy(se); 7461 } 7462 7463 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) 7464 { 7465 struct sched_entity *se = &p->se; 7466 7467 /* throttled hierarchies are not runnable */ 7468 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 7469 return false; 7470 7471 /* Tell the scheduler that we'd really like pse to run next. */ 7472 set_next_buddy(se); 7473 7474 yield_task_fair(rq); 7475 7476 return true; 7477 } 7478 7479 #ifdef CONFIG_SMP 7480 /************************************************** 7481 * Fair scheduling class load-balancing methods. 7482 * 7483 * BASICS 7484 * 7485 * The purpose of load-balancing is to achieve the same basic fairness the 7486 * per-CPU scheduler provides, namely provide a proportional amount of compute 7487 * time to each task. This is expressed in the following equation: 7488 * 7489 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 7490 * 7491 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight 7492 * W_i,0 is defined as: 7493 * 7494 * W_i,0 = \Sum_j w_i,j (2) 7495 * 7496 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight 7497 * is derived from the nice value as per sched_prio_to_weight[]. 7498 * 7499 * The weight average is an exponential decay average of the instantaneous 7500 * weight: 7501 * 7502 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 7503 * 7504 * C_i is the compute capacity of CPU i, typically it is the 7505 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 7506 * can also include other factors [XXX]. 7507 * 7508 * To achieve this balance we define a measure of imbalance which follows 7509 * directly from (1): 7510 * 7511 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 7512 * 7513 * We them move tasks around to minimize the imbalance. In the continuous 7514 * function space it is obvious this converges, in the discrete case we get 7515 * a few fun cases generally called infeasible weight scenarios. 7516 * 7517 * [XXX expand on: 7518 * - infeasible weights; 7519 * - local vs global optima in the discrete case. ] 7520 * 7521 * 7522 * SCHED DOMAINS 7523 * 7524 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 7525 * for all i,j solution, we create a tree of CPUs that follows the hardware 7526 * topology where each level pairs two lower groups (or better). This results 7527 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the 7528 * tree to only the first of the previous level and we decrease the frequency 7529 * of load-balance at each level inv. proportional to the number of CPUs in 7530 * the groups. 7531 * 7532 * This yields: 7533 * 7534 * log_2 n 1 n 7535 * \Sum { --- * --- * 2^i } = O(n) (5) 7536 * i = 0 2^i 2^i 7537 * `- size of each group 7538 * | | `- number of CPUs doing load-balance 7539 * | `- freq 7540 * `- sum over all levels 7541 * 7542 * Coupled with a limit on how many tasks we can migrate every balance pass, 7543 * this makes (5) the runtime complexity of the balancer. 7544 * 7545 * An important property here is that each CPU is still (indirectly) connected 7546 * to every other CPU in at most O(log n) steps: 7547 * 7548 * The adjacency matrix of the resulting graph is given by: 7549 * 7550 * log_2 n 7551 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 7552 * k = 0 7553 * 7554 * And you'll find that: 7555 * 7556 * A^(log_2 n)_i,j != 0 for all i,j (7) 7557 * 7558 * Showing there's indeed a path between every CPU in at most O(log n) steps. 7559 * The task movement gives a factor of O(m), giving a convergence complexity 7560 * of: 7561 * 7562 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 7563 * 7564 * 7565 * WORK CONSERVING 7566 * 7567 * In order to avoid CPUs going idle while there's still work to do, new idle 7568 * balancing is more aggressive and has the newly idle CPU iterate up the domain 7569 * tree itself instead of relying on other CPUs to bring it work. 7570 * 7571 * This adds some complexity to both (5) and (8) but it reduces the total idle 7572 * time. 7573 * 7574 * [XXX more?] 7575 * 7576 * 7577 * CGROUPS 7578 * 7579 * Cgroups make a horror show out of (2), instead of a simple sum we get: 7580 * 7581 * s_k,i 7582 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 7583 * S_k 7584 * 7585 * Where 7586 * 7587 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 7588 * 7589 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. 7590 * 7591 * The big problem is S_k, its a global sum needed to compute a local (W_i) 7592 * property. 7593 * 7594 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 7595 * rewrite all of this once again.] 7596 */ 7597 7598 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 7599 7600 enum fbq_type { regular, remote, all }; 7601 7602 /* 7603 * 'group_type' describes the group of CPUs at the moment of load balancing. 7604 * 7605 * The enum is ordered by pulling priority, with the group with lowest priority 7606 * first so the group_type can simply be compared when selecting the busiest 7607 * group. See update_sd_pick_busiest(). 7608 */ 7609 enum group_type { 7610 /* The group has spare capacity that can be used to run more tasks. */ 7611 group_has_spare = 0, 7612 /* 7613 * The group is fully used and the tasks don't compete for more CPU 7614 * cycles. Nevertheless, some tasks might wait before running. 7615 */ 7616 group_fully_busy, 7617 /* 7618 * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity 7619 * and must be migrated to a more powerful CPU. 7620 */ 7621 group_misfit_task, 7622 /* 7623 * SD_ASYM_PACKING only: One local CPU with higher capacity is available, 7624 * and the task should be migrated to it instead of running on the 7625 * current CPU. 7626 */ 7627 group_asym_packing, 7628 /* 7629 * The tasks' affinity constraints previously prevented the scheduler 7630 * from balancing the load across the system. 7631 */ 7632 group_imbalanced, 7633 /* 7634 * The CPU is overloaded and can't provide expected CPU cycles to all 7635 * tasks. 7636 */ 7637 group_overloaded 7638 }; 7639 7640 enum migration_type { 7641 migrate_load = 0, 7642 migrate_util, 7643 migrate_task, 7644 migrate_misfit 7645 }; 7646 7647 #define LBF_ALL_PINNED 0x01 7648 #define LBF_NEED_BREAK 0x02 7649 #define LBF_DST_PINNED 0x04 7650 #define LBF_SOME_PINNED 0x08 7651 #define LBF_ACTIVE_LB 0x10 7652 7653 struct lb_env { 7654 struct sched_domain *sd; 7655 7656 struct rq *src_rq; 7657 int src_cpu; 7658 7659 int dst_cpu; 7660 struct rq *dst_rq; 7661 7662 struct cpumask *dst_grpmask; 7663 int new_dst_cpu; 7664 enum cpu_idle_type idle; 7665 long imbalance; 7666 /* The set of CPUs under consideration for load-balancing */ 7667 struct cpumask *cpus; 7668 7669 unsigned int flags; 7670 7671 unsigned int loop; 7672 unsigned int loop_break; 7673 unsigned int loop_max; 7674 7675 enum fbq_type fbq_type; 7676 enum migration_type migration_type; 7677 struct list_head tasks; 7678 }; 7679 7680 /* 7681 * Is this task likely cache-hot: 7682 */ 7683 static int task_hot(struct task_struct *p, struct lb_env *env) 7684 { 7685 s64 delta; 7686 7687 lockdep_assert_rq_held(env->src_rq); 7688 7689 if (p->sched_class != &fair_sched_class) 7690 return 0; 7691 7692 if (unlikely(task_has_idle_policy(p))) 7693 return 0; 7694 7695 /* SMT siblings share cache */ 7696 if (env->sd->flags & SD_SHARE_CPUCAPACITY) 7697 return 0; 7698 7699 /* 7700 * Buddy candidates are cache hot: 7701 */ 7702 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 7703 (&p->se == cfs_rq_of(&p->se)->next || 7704 &p->se == cfs_rq_of(&p->se)->last)) 7705 return 1; 7706 7707 if (sysctl_sched_migration_cost == -1) 7708 return 1; 7709 7710 /* 7711 * Don't migrate task if the task's cookie does not match 7712 * with the destination CPU's core cookie. 7713 */ 7714 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) 7715 return 1; 7716 7717 if (sysctl_sched_migration_cost == 0) 7718 return 0; 7719 7720 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 7721 7722 return delta < (s64)sysctl_sched_migration_cost; 7723 } 7724 7725 #ifdef CONFIG_NUMA_BALANCING 7726 /* 7727 * Returns 1, if task migration degrades locality 7728 * Returns 0, if task migration improves locality i.e migration preferred. 7729 * Returns -1, if task migration is not affected by locality. 7730 */ 7731 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 7732 { 7733 struct numa_group *numa_group = rcu_dereference(p->numa_group); 7734 unsigned long src_weight, dst_weight; 7735 int src_nid, dst_nid, dist; 7736 7737 if (!static_branch_likely(&sched_numa_balancing)) 7738 return -1; 7739 7740 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 7741 return -1; 7742 7743 src_nid = cpu_to_node(env->src_cpu); 7744 dst_nid = cpu_to_node(env->dst_cpu); 7745 7746 if (src_nid == dst_nid) 7747 return -1; 7748 7749 /* Migrating away from the preferred node is always bad. */ 7750 if (src_nid == p->numa_preferred_nid) { 7751 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 7752 return 1; 7753 else 7754 return -1; 7755 } 7756 7757 /* Encourage migration to the preferred node. */ 7758 if (dst_nid == p->numa_preferred_nid) 7759 return 0; 7760 7761 /* Leaving a core idle is often worse than degrading locality. */ 7762 if (env->idle == CPU_IDLE) 7763 return -1; 7764 7765 dist = node_distance(src_nid, dst_nid); 7766 if (numa_group) { 7767 src_weight = group_weight(p, src_nid, dist); 7768 dst_weight = group_weight(p, dst_nid, dist); 7769 } else { 7770 src_weight = task_weight(p, src_nid, dist); 7771 dst_weight = task_weight(p, dst_nid, dist); 7772 } 7773 7774 return dst_weight < src_weight; 7775 } 7776 7777 #else 7778 static inline int migrate_degrades_locality(struct task_struct *p, 7779 struct lb_env *env) 7780 { 7781 return -1; 7782 } 7783 #endif 7784 7785 /* 7786 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 7787 */ 7788 static 7789 int can_migrate_task(struct task_struct *p, struct lb_env *env) 7790 { 7791 int tsk_cache_hot; 7792 7793 lockdep_assert_rq_held(env->src_rq); 7794 7795 /* 7796 * We do not migrate tasks that are: 7797 * 1) throttled_lb_pair, or 7798 * 2) cannot be migrated to this CPU due to cpus_ptr, or 7799 * 3) running (obviously), or 7800 * 4) are cache-hot on their current CPU. 7801 */ 7802 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 7803 return 0; 7804 7805 /* Disregard pcpu kthreads; they are where they need to be. */ 7806 if (kthread_is_per_cpu(p)) 7807 return 0; 7808 7809 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { 7810 int cpu; 7811 7812 schedstat_inc(p->stats.nr_failed_migrations_affine); 7813 7814 env->flags |= LBF_SOME_PINNED; 7815 7816 /* 7817 * Remember if this task can be migrated to any other CPU in 7818 * our sched_group. We may want to revisit it if we couldn't 7819 * meet load balance goals by pulling other tasks on src_cpu. 7820 * 7821 * Avoid computing new_dst_cpu 7822 * - for NEWLY_IDLE 7823 * - if we have already computed one in current iteration 7824 * - if it's an active balance 7825 */ 7826 if (env->idle == CPU_NEWLY_IDLE || 7827 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB)) 7828 return 0; 7829 7830 /* Prevent to re-select dst_cpu via env's CPUs: */ 7831 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 7832 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { 7833 env->flags |= LBF_DST_PINNED; 7834 env->new_dst_cpu = cpu; 7835 break; 7836 } 7837 } 7838 7839 return 0; 7840 } 7841 7842 /* Record that we found at least one task that could run on dst_cpu */ 7843 env->flags &= ~LBF_ALL_PINNED; 7844 7845 if (task_running(env->src_rq, p)) { 7846 schedstat_inc(p->stats.nr_failed_migrations_running); 7847 return 0; 7848 } 7849 7850 /* 7851 * Aggressive migration if: 7852 * 1) active balance 7853 * 2) destination numa is preferred 7854 * 3) task is cache cold, or 7855 * 4) too many balance attempts have failed. 7856 */ 7857 if (env->flags & LBF_ACTIVE_LB) 7858 return 1; 7859 7860 tsk_cache_hot = migrate_degrades_locality(p, env); 7861 if (tsk_cache_hot == -1) 7862 tsk_cache_hot = task_hot(p, env); 7863 7864 if (tsk_cache_hot <= 0 || 7865 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 7866 if (tsk_cache_hot == 1) { 7867 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 7868 schedstat_inc(p->stats.nr_forced_migrations); 7869 } 7870 return 1; 7871 } 7872 7873 schedstat_inc(p->stats.nr_failed_migrations_hot); 7874 return 0; 7875 } 7876 7877 /* 7878 * detach_task() -- detach the task for the migration specified in env 7879 */ 7880 static void detach_task(struct task_struct *p, struct lb_env *env) 7881 { 7882 lockdep_assert_rq_held(env->src_rq); 7883 7884 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 7885 set_task_cpu(p, env->dst_cpu); 7886 } 7887 7888 /* 7889 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 7890 * part of active balancing operations within "domain". 7891 * 7892 * Returns a task if successful and NULL otherwise. 7893 */ 7894 static struct task_struct *detach_one_task(struct lb_env *env) 7895 { 7896 struct task_struct *p; 7897 7898 lockdep_assert_rq_held(env->src_rq); 7899 7900 list_for_each_entry_reverse(p, 7901 &env->src_rq->cfs_tasks, se.group_node) { 7902 if (!can_migrate_task(p, env)) 7903 continue; 7904 7905 detach_task(p, env); 7906 7907 /* 7908 * Right now, this is only the second place where 7909 * lb_gained[env->idle] is updated (other is detach_tasks) 7910 * so we can safely collect stats here rather than 7911 * inside detach_tasks(). 7912 */ 7913 schedstat_inc(env->sd->lb_gained[env->idle]); 7914 return p; 7915 } 7916 return NULL; 7917 } 7918 7919 static const unsigned int sched_nr_migrate_break = 32; 7920 7921 /* 7922 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from 7923 * busiest_rq, as part of a balancing operation within domain "sd". 7924 * 7925 * Returns number of detached tasks if successful and 0 otherwise. 7926 */ 7927 static int detach_tasks(struct lb_env *env) 7928 { 7929 struct list_head *tasks = &env->src_rq->cfs_tasks; 7930 unsigned long util, load; 7931 struct task_struct *p; 7932 int detached = 0; 7933 7934 lockdep_assert_rq_held(env->src_rq); 7935 7936 /* 7937 * Source run queue has been emptied by another CPU, clear 7938 * LBF_ALL_PINNED flag as we will not test any task. 7939 */ 7940 if (env->src_rq->nr_running <= 1) { 7941 env->flags &= ~LBF_ALL_PINNED; 7942 return 0; 7943 } 7944 7945 if (env->imbalance <= 0) 7946 return 0; 7947 7948 while (!list_empty(tasks)) { 7949 /* 7950 * We don't want to steal all, otherwise we may be treated likewise, 7951 * which could at worst lead to a livelock crash. 7952 */ 7953 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 7954 break; 7955 7956 p = list_last_entry(tasks, struct task_struct, se.group_node); 7957 7958 env->loop++; 7959 /* We've more or less seen every task there is, call it quits */ 7960 if (env->loop > env->loop_max) 7961 break; 7962 7963 /* take a breather every nr_migrate tasks */ 7964 if (env->loop > env->loop_break) { 7965 env->loop_break += sched_nr_migrate_break; 7966 env->flags |= LBF_NEED_BREAK; 7967 break; 7968 } 7969 7970 if (!can_migrate_task(p, env)) 7971 goto next; 7972 7973 switch (env->migration_type) { 7974 case migrate_load: 7975 /* 7976 * Depending of the number of CPUs and tasks and the 7977 * cgroup hierarchy, task_h_load() can return a null 7978 * value. Make sure that env->imbalance decreases 7979 * otherwise detach_tasks() will stop only after 7980 * detaching up to loop_max tasks. 7981 */ 7982 load = max_t(unsigned long, task_h_load(p), 1); 7983 7984 if (sched_feat(LB_MIN) && 7985 load < 16 && !env->sd->nr_balance_failed) 7986 goto next; 7987 7988 /* 7989 * Make sure that we don't migrate too much load. 7990 * Nevertheless, let relax the constraint if 7991 * scheduler fails to find a good waiting task to 7992 * migrate. 7993 */ 7994 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) 7995 goto next; 7996 7997 env->imbalance -= load; 7998 break; 7999 8000 case migrate_util: 8001 util = task_util_est(p); 8002 8003 if (util > env->imbalance) 8004 goto next; 8005 8006 env->imbalance -= util; 8007 break; 8008 8009 case migrate_task: 8010 env->imbalance--; 8011 break; 8012 8013 case migrate_misfit: 8014 /* This is not a misfit task */ 8015 if (task_fits_capacity(p, capacity_of(env->src_cpu))) 8016 goto next; 8017 8018 env->imbalance = 0; 8019 break; 8020 } 8021 8022 detach_task(p, env); 8023 list_add(&p->se.group_node, &env->tasks); 8024 8025 detached++; 8026 8027 #ifdef CONFIG_PREEMPTION 8028 /* 8029 * NEWIDLE balancing is a source of latency, so preemptible 8030 * kernels will stop after the first task is detached to minimize 8031 * the critical section. 8032 */ 8033 if (env->idle == CPU_NEWLY_IDLE) 8034 break; 8035 #endif 8036 8037 /* 8038 * We only want to steal up to the prescribed amount of 8039 * load/util/tasks. 8040 */ 8041 if (env->imbalance <= 0) 8042 break; 8043 8044 continue; 8045 next: 8046 list_move(&p->se.group_node, tasks); 8047 } 8048 8049 /* 8050 * Right now, this is one of only two places we collect this stat 8051 * so we can safely collect detach_one_task() stats here rather 8052 * than inside detach_one_task(). 8053 */ 8054 schedstat_add(env->sd->lb_gained[env->idle], detached); 8055 8056 return detached; 8057 } 8058 8059 /* 8060 * attach_task() -- attach the task detached by detach_task() to its new rq. 8061 */ 8062 static void attach_task(struct rq *rq, struct task_struct *p) 8063 { 8064 lockdep_assert_rq_held(rq); 8065 8066 BUG_ON(task_rq(p) != rq); 8067 activate_task(rq, p, ENQUEUE_NOCLOCK); 8068 check_preempt_curr(rq, p, 0); 8069 } 8070 8071 /* 8072 * attach_one_task() -- attaches the task returned from detach_one_task() to 8073 * its new rq. 8074 */ 8075 static void attach_one_task(struct rq *rq, struct task_struct *p) 8076 { 8077 struct rq_flags rf; 8078 8079 rq_lock(rq, &rf); 8080 update_rq_clock(rq); 8081 attach_task(rq, p); 8082 rq_unlock(rq, &rf); 8083 } 8084 8085 /* 8086 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 8087 * new rq. 8088 */ 8089 static void attach_tasks(struct lb_env *env) 8090 { 8091 struct list_head *tasks = &env->tasks; 8092 struct task_struct *p; 8093 struct rq_flags rf; 8094 8095 rq_lock(env->dst_rq, &rf); 8096 update_rq_clock(env->dst_rq); 8097 8098 while (!list_empty(tasks)) { 8099 p = list_first_entry(tasks, struct task_struct, se.group_node); 8100 list_del_init(&p->se.group_node); 8101 8102 attach_task(env->dst_rq, p); 8103 } 8104 8105 rq_unlock(env->dst_rq, &rf); 8106 } 8107 8108 #ifdef CONFIG_NO_HZ_COMMON 8109 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) 8110 { 8111 if (cfs_rq->avg.load_avg) 8112 return true; 8113 8114 if (cfs_rq->avg.util_avg) 8115 return true; 8116 8117 return false; 8118 } 8119 8120 static inline bool others_have_blocked(struct rq *rq) 8121 { 8122 if (READ_ONCE(rq->avg_rt.util_avg)) 8123 return true; 8124 8125 if (READ_ONCE(rq->avg_dl.util_avg)) 8126 return true; 8127 8128 if (thermal_load_avg(rq)) 8129 return true; 8130 8131 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 8132 if (READ_ONCE(rq->avg_irq.util_avg)) 8133 return true; 8134 #endif 8135 8136 return false; 8137 } 8138 8139 static inline void update_blocked_load_tick(struct rq *rq) 8140 { 8141 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies); 8142 } 8143 8144 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) 8145 { 8146 if (!has_blocked) 8147 rq->has_blocked_load = 0; 8148 } 8149 #else 8150 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } 8151 static inline bool others_have_blocked(struct rq *rq) { return false; } 8152 static inline void update_blocked_load_tick(struct rq *rq) {} 8153 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} 8154 #endif 8155 8156 static bool __update_blocked_others(struct rq *rq, bool *done) 8157 { 8158 const struct sched_class *curr_class; 8159 u64 now = rq_clock_pelt(rq); 8160 unsigned long thermal_pressure; 8161 bool decayed; 8162 8163 /* 8164 * update_load_avg() can call cpufreq_update_util(). Make sure that RT, 8165 * DL and IRQ signals have been updated before updating CFS. 8166 */ 8167 curr_class = rq->curr->sched_class; 8168 8169 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 8170 8171 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | 8172 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | 8173 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | 8174 update_irq_load_avg(rq, 0); 8175 8176 if (others_have_blocked(rq)) 8177 *done = false; 8178 8179 return decayed; 8180 } 8181 8182 #ifdef CONFIG_FAIR_GROUP_SCHED 8183 8184 static bool __update_blocked_fair(struct rq *rq, bool *done) 8185 { 8186 struct cfs_rq *cfs_rq, *pos; 8187 bool decayed = false; 8188 int cpu = cpu_of(rq); 8189 8190 /* 8191 * Iterates the task_group tree in a bottom up fashion, see 8192 * list_add_leaf_cfs_rq() for details. 8193 */ 8194 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { 8195 struct sched_entity *se; 8196 8197 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { 8198 update_tg_load_avg(cfs_rq); 8199 8200 if (cfs_rq == &rq->cfs) 8201 decayed = true; 8202 } 8203 8204 /* Propagate pending load changes to the parent, if any: */ 8205 se = cfs_rq->tg->se[cpu]; 8206 if (se && !skip_blocked_update(se)) 8207 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 8208 8209 /* 8210 * There can be a lot of idle CPU cgroups. Don't let fully 8211 * decayed cfs_rqs linger on the list. 8212 */ 8213 if (cfs_rq_is_decayed(cfs_rq)) 8214 list_del_leaf_cfs_rq(cfs_rq); 8215 8216 /* Don't need periodic decay once load/util_avg are null */ 8217 if (cfs_rq_has_blocked(cfs_rq)) 8218 *done = false; 8219 } 8220 8221 return decayed; 8222 } 8223 8224 /* 8225 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 8226 * This needs to be done in a top-down fashion because the load of a child 8227 * group is a fraction of its parents load. 8228 */ 8229 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 8230 { 8231 struct rq *rq = rq_of(cfs_rq); 8232 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 8233 unsigned long now = jiffies; 8234 unsigned long load; 8235 8236 if (cfs_rq->last_h_load_update == now) 8237 return; 8238 8239 WRITE_ONCE(cfs_rq->h_load_next, NULL); 8240 for_each_sched_entity(se) { 8241 cfs_rq = cfs_rq_of(se); 8242 WRITE_ONCE(cfs_rq->h_load_next, se); 8243 if (cfs_rq->last_h_load_update == now) 8244 break; 8245 } 8246 8247 if (!se) { 8248 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 8249 cfs_rq->last_h_load_update = now; 8250 } 8251 8252 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { 8253 load = cfs_rq->h_load; 8254 load = div64_ul(load * se->avg.load_avg, 8255 cfs_rq_load_avg(cfs_rq) + 1); 8256 cfs_rq = group_cfs_rq(se); 8257 cfs_rq->h_load = load; 8258 cfs_rq->last_h_load_update = now; 8259 } 8260 } 8261 8262 static unsigned long task_h_load(struct task_struct *p) 8263 { 8264 struct cfs_rq *cfs_rq = task_cfs_rq(p); 8265 8266 update_cfs_rq_h_load(cfs_rq); 8267 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 8268 cfs_rq_load_avg(cfs_rq) + 1); 8269 } 8270 #else 8271 static bool __update_blocked_fair(struct rq *rq, bool *done) 8272 { 8273 struct cfs_rq *cfs_rq = &rq->cfs; 8274 bool decayed; 8275 8276 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); 8277 if (cfs_rq_has_blocked(cfs_rq)) 8278 *done = false; 8279 8280 return decayed; 8281 } 8282 8283 static unsigned long task_h_load(struct task_struct *p) 8284 { 8285 return p->se.avg.load_avg; 8286 } 8287 #endif 8288 8289 static void update_blocked_averages(int cpu) 8290 { 8291 bool decayed = false, done = true; 8292 struct rq *rq = cpu_rq(cpu); 8293 struct rq_flags rf; 8294 8295 rq_lock_irqsave(rq, &rf); 8296 update_blocked_load_tick(rq); 8297 update_rq_clock(rq); 8298 8299 decayed |= __update_blocked_others(rq, &done); 8300 decayed |= __update_blocked_fair(rq, &done); 8301 8302 update_blocked_load_status(rq, !done); 8303 if (decayed) 8304 cpufreq_update_util(rq, 0); 8305 rq_unlock_irqrestore(rq, &rf); 8306 } 8307 8308 /********** Helpers for find_busiest_group ************************/ 8309 8310 /* 8311 * sg_lb_stats - stats of a sched_group required for load_balancing 8312 */ 8313 struct sg_lb_stats { 8314 unsigned long avg_load; /*Avg load across the CPUs of the group */ 8315 unsigned long group_load; /* Total load over the CPUs of the group */ 8316 unsigned long group_capacity; 8317 unsigned long group_util; /* Total utilization over the CPUs of the group */ 8318 unsigned long group_runnable; /* Total runnable time over the CPUs of the group */ 8319 unsigned int sum_nr_running; /* Nr of tasks running in the group */ 8320 unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */ 8321 unsigned int idle_cpus; 8322 unsigned int group_weight; 8323 enum group_type group_type; 8324 unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */ 8325 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ 8326 #ifdef CONFIG_NUMA_BALANCING 8327 unsigned int nr_numa_running; 8328 unsigned int nr_preferred_running; 8329 #endif 8330 }; 8331 8332 /* 8333 * sd_lb_stats - Structure to store the statistics of a sched_domain 8334 * during load balancing. 8335 */ 8336 struct sd_lb_stats { 8337 struct sched_group *busiest; /* Busiest group in this sd */ 8338 struct sched_group *local; /* Local group in this sd */ 8339 unsigned long total_load; /* Total load of all groups in sd */ 8340 unsigned long total_capacity; /* Total capacity of all groups in sd */ 8341 unsigned long avg_load; /* Average load across all groups in sd */ 8342 unsigned int prefer_sibling; /* tasks should go to sibling first */ 8343 8344 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 8345 struct sg_lb_stats local_stat; /* Statistics of the local group */ 8346 }; 8347 8348 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 8349 { 8350 /* 8351 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 8352 * local_stat because update_sg_lb_stats() does a full clear/assignment. 8353 * We must however set busiest_stat::group_type and 8354 * busiest_stat::idle_cpus to the worst busiest group because 8355 * update_sd_pick_busiest() reads these before assignment. 8356 */ 8357 *sds = (struct sd_lb_stats){ 8358 .busiest = NULL, 8359 .local = NULL, 8360 .total_load = 0UL, 8361 .total_capacity = 0UL, 8362 .busiest_stat = { 8363 .idle_cpus = UINT_MAX, 8364 .group_type = group_has_spare, 8365 }, 8366 }; 8367 } 8368 8369 static unsigned long scale_rt_capacity(int cpu) 8370 { 8371 struct rq *rq = cpu_rq(cpu); 8372 unsigned long max = arch_scale_cpu_capacity(cpu); 8373 unsigned long used, free; 8374 unsigned long irq; 8375 8376 irq = cpu_util_irq(rq); 8377 8378 if (unlikely(irq >= max)) 8379 return 1; 8380 8381 /* 8382 * avg_rt.util_avg and avg_dl.util_avg track binary signals 8383 * (running and not running) with weights 0 and 1024 respectively. 8384 * avg_thermal.load_avg tracks thermal pressure and the weighted 8385 * average uses the actual delta max capacity(load). 8386 */ 8387 used = READ_ONCE(rq->avg_rt.util_avg); 8388 used += READ_ONCE(rq->avg_dl.util_avg); 8389 used += thermal_load_avg(rq); 8390 8391 if (unlikely(used >= max)) 8392 return 1; 8393 8394 free = max - used; 8395 8396 return scale_irq_capacity(free, irq, max); 8397 } 8398 8399 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 8400 { 8401 unsigned long capacity = scale_rt_capacity(cpu); 8402 struct sched_group *sdg = sd->groups; 8403 8404 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); 8405 8406 if (!capacity) 8407 capacity = 1; 8408 8409 cpu_rq(cpu)->cpu_capacity = capacity; 8410 trace_sched_cpu_capacity_tp(cpu_rq(cpu)); 8411 8412 sdg->sgc->capacity = capacity; 8413 sdg->sgc->min_capacity = capacity; 8414 sdg->sgc->max_capacity = capacity; 8415 } 8416 8417 void update_group_capacity(struct sched_domain *sd, int cpu) 8418 { 8419 struct sched_domain *child = sd->child; 8420 struct sched_group *group, *sdg = sd->groups; 8421 unsigned long capacity, min_capacity, max_capacity; 8422 unsigned long interval; 8423 8424 interval = msecs_to_jiffies(sd->balance_interval); 8425 interval = clamp(interval, 1UL, max_load_balance_interval); 8426 sdg->sgc->next_update = jiffies + interval; 8427 8428 if (!child) { 8429 update_cpu_capacity(sd, cpu); 8430 return; 8431 } 8432 8433 capacity = 0; 8434 min_capacity = ULONG_MAX; 8435 max_capacity = 0; 8436 8437 if (child->flags & SD_OVERLAP) { 8438 /* 8439 * SD_OVERLAP domains cannot assume that child groups 8440 * span the current group. 8441 */ 8442 8443 for_each_cpu(cpu, sched_group_span(sdg)) { 8444 unsigned long cpu_cap = capacity_of(cpu); 8445 8446 capacity += cpu_cap; 8447 min_capacity = min(cpu_cap, min_capacity); 8448 max_capacity = max(cpu_cap, max_capacity); 8449 } 8450 } else { 8451 /* 8452 * !SD_OVERLAP domains can assume that child groups 8453 * span the current group. 8454 */ 8455 8456 group = child->groups; 8457 do { 8458 struct sched_group_capacity *sgc = group->sgc; 8459 8460 capacity += sgc->capacity; 8461 min_capacity = min(sgc->min_capacity, min_capacity); 8462 max_capacity = max(sgc->max_capacity, max_capacity); 8463 group = group->next; 8464 } while (group != child->groups); 8465 } 8466 8467 sdg->sgc->capacity = capacity; 8468 sdg->sgc->min_capacity = min_capacity; 8469 sdg->sgc->max_capacity = max_capacity; 8470 } 8471 8472 /* 8473 * Check whether the capacity of the rq has been noticeably reduced by side 8474 * activity. The imbalance_pct is used for the threshold. 8475 * Return true is the capacity is reduced 8476 */ 8477 static inline int 8478 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 8479 { 8480 return ((rq->cpu_capacity * sd->imbalance_pct) < 8481 (rq->cpu_capacity_orig * 100)); 8482 } 8483 8484 /* 8485 * Check whether a rq has a misfit task and if it looks like we can actually 8486 * help that task: we can migrate the task to a CPU of higher capacity, or 8487 * the task's current CPU is heavily pressured. 8488 */ 8489 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) 8490 { 8491 return rq->misfit_task_load && 8492 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || 8493 check_cpu_capacity(rq, sd)); 8494 } 8495 8496 /* 8497 * Group imbalance indicates (and tries to solve) the problem where balancing 8498 * groups is inadequate due to ->cpus_ptr constraints. 8499 * 8500 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 8501 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. 8502 * Something like: 8503 * 8504 * { 0 1 2 3 } { 4 5 6 7 } 8505 * * * * * 8506 * 8507 * If we were to balance group-wise we'd place two tasks in the first group and 8508 * two tasks in the second group. Clearly this is undesired as it will overload 8509 * cpu 3 and leave one of the CPUs in the second group unused. 8510 * 8511 * The current solution to this issue is detecting the skew in the first group 8512 * by noticing the lower domain failed to reach balance and had difficulty 8513 * moving tasks due to affinity constraints. 8514 * 8515 * When this is so detected; this group becomes a candidate for busiest; see 8516 * update_sd_pick_busiest(). And calculate_imbalance() and 8517 * find_busiest_group() avoid some of the usual balance conditions to allow it 8518 * to create an effective group imbalance. 8519 * 8520 * This is a somewhat tricky proposition since the next run might not find the 8521 * group imbalance and decide the groups need to be balanced again. A most 8522 * subtle and fragile situation. 8523 */ 8524 8525 static inline int sg_imbalanced(struct sched_group *group) 8526 { 8527 return group->sgc->imbalance; 8528 } 8529 8530 /* 8531 * group_has_capacity returns true if the group has spare capacity that could 8532 * be used by some tasks. 8533 * We consider that a group has spare capacity if the * number of task is 8534 * smaller than the number of CPUs or if the utilization is lower than the 8535 * available capacity for CFS tasks. 8536 * For the latter, we use a threshold to stabilize the state, to take into 8537 * account the variance of the tasks' load and to return true if the available 8538 * capacity in meaningful for the load balancer. 8539 * As an example, an available capacity of 1% can appear but it doesn't make 8540 * any benefit for the load balance. 8541 */ 8542 static inline bool 8543 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 8544 { 8545 if (sgs->sum_nr_running < sgs->group_weight) 8546 return true; 8547 8548 if ((sgs->group_capacity * imbalance_pct) < 8549 (sgs->group_runnable * 100)) 8550 return false; 8551 8552 if ((sgs->group_capacity * 100) > 8553 (sgs->group_util * imbalance_pct)) 8554 return true; 8555 8556 return false; 8557 } 8558 8559 /* 8560 * group_is_overloaded returns true if the group has more tasks than it can 8561 * handle. 8562 * group_is_overloaded is not equals to !group_has_capacity because a group 8563 * with the exact right number of tasks, has no more spare capacity but is not 8564 * overloaded so both group_has_capacity and group_is_overloaded return 8565 * false. 8566 */ 8567 static inline bool 8568 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 8569 { 8570 if (sgs->sum_nr_running <= sgs->group_weight) 8571 return false; 8572 8573 if ((sgs->group_capacity * 100) < 8574 (sgs->group_util * imbalance_pct)) 8575 return true; 8576 8577 if ((sgs->group_capacity * imbalance_pct) < 8578 (sgs->group_runnable * 100)) 8579 return true; 8580 8581 return false; 8582 } 8583 8584 static inline enum 8585 group_type group_classify(unsigned int imbalance_pct, 8586 struct sched_group *group, 8587 struct sg_lb_stats *sgs) 8588 { 8589 if (group_is_overloaded(imbalance_pct, sgs)) 8590 return group_overloaded; 8591 8592 if (sg_imbalanced(group)) 8593 return group_imbalanced; 8594 8595 if (sgs->group_asym_packing) 8596 return group_asym_packing; 8597 8598 if (sgs->group_misfit_task_load) 8599 return group_misfit_task; 8600 8601 if (!group_has_capacity(imbalance_pct, sgs)) 8602 return group_fully_busy; 8603 8604 return group_has_spare; 8605 } 8606 8607 /** 8608 * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks 8609 * @dst_cpu: Destination CPU of the load balancing 8610 * @sds: Load-balancing data with statistics of the local group 8611 * @sgs: Load-balancing statistics of the candidate busiest group 8612 * @sg: The candidate busiest group 8613 * 8614 * Check the state of the SMT siblings of both @sds::local and @sg and decide 8615 * if @dst_cpu can pull tasks. 8616 * 8617 * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of 8618 * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks 8619 * only if @dst_cpu has higher priority. 8620 * 8621 * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more 8622 * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority. 8623 * Bigger imbalances in the number of busy CPUs will be dealt with in 8624 * update_sd_pick_busiest(). 8625 * 8626 * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings 8627 * of @dst_cpu are idle and @sg has lower priority. 8628 * 8629 * Return: true if @dst_cpu can pull tasks, false otherwise. 8630 */ 8631 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, 8632 struct sg_lb_stats *sgs, 8633 struct sched_group *sg) 8634 { 8635 #ifdef CONFIG_SCHED_SMT 8636 bool local_is_smt, sg_is_smt; 8637 int sg_busy_cpus; 8638 8639 local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; 8640 sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY; 8641 8642 sg_busy_cpus = sgs->group_weight - sgs->idle_cpus; 8643 8644 if (!local_is_smt) { 8645 /* 8646 * If we are here, @dst_cpu is idle and does not have SMT 8647 * siblings. Pull tasks if candidate group has two or more 8648 * busy CPUs. 8649 */ 8650 if (sg_busy_cpus >= 2) /* implies sg_is_smt */ 8651 return true; 8652 8653 /* 8654 * @dst_cpu does not have SMT siblings. @sg may have SMT 8655 * siblings and only one is busy. In such case, @dst_cpu 8656 * can help if it has higher priority and is idle (i.e., 8657 * it has no running tasks). 8658 */ 8659 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8660 } 8661 8662 /* @dst_cpu has SMT siblings. */ 8663 8664 if (sg_is_smt) { 8665 int local_busy_cpus = sds->local->group_weight - 8666 sds->local_stat.idle_cpus; 8667 int busy_cpus_delta = sg_busy_cpus - local_busy_cpus; 8668 8669 if (busy_cpus_delta == 1) 8670 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8671 8672 return false; 8673 } 8674 8675 /* 8676 * @sg does not have SMT siblings. Ensure that @sds::local does not end 8677 * up with more than one busy SMT sibling and only pull tasks if there 8678 * are not busy CPUs (i.e., no CPU has running tasks). 8679 */ 8680 if (!sds->local_stat.sum_nr_running) 8681 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8682 8683 return false; 8684 #else 8685 /* Always return false so that callers deal with non-SMT cases. */ 8686 return false; 8687 #endif 8688 } 8689 8690 static inline bool 8691 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, 8692 struct sched_group *group) 8693 { 8694 /* Only do SMT checks if either local or candidate have SMT siblings */ 8695 if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || 8696 (group->flags & SD_SHARE_CPUCAPACITY)) 8697 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); 8698 8699 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); 8700 } 8701 8702 /** 8703 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 8704 * @env: The load balancing environment. 8705 * @sds: Load-balancing data with statistics of the local group. 8706 * @group: sched_group whose statistics are to be updated. 8707 * @sgs: variable to hold the statistics for this group. 8708 * @sg_status: Holds flag indicating the status of the sched_group 8709 */ 8710 static inline void update_sg_lb_stats(struct lb_env *env, 8711 struct sd_lb_stats *sds, 8712 struct sched_group *group, 8713 struct sg_lb_stats *sgs, 8714 int *sg_status) 8715 { 8716 int i, nr_running, local_group; 8717 8718 memset(sgs, 0, sizeof(*sgs)); 8719 8720 local_group = group == sds->local; 8721 8722 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 8723 struct rq *rq = cpu_rq(i); 8724 8725 sgs->group_load += cpu_load(rq); 8726 sgs->group_util += cpu_util_cfs(i); 8727 sgs->group_runnable += cpu_runnable(rq); 8728 sgs->sum_h_nr_running += rq->cfs.h_nr_running; 8729 8730 nr_running = rq->nr_running; 8731 sgs->sum_nr_running += nr_running; 8732 8733 if (nr_running > 1) 8734 *sg_status |= SG_OVERLOAD; 8735 8736 if (cpu_overutilized(i)) 8737 *sg_status |= SG_OVERUTILIZED; 8738 8739 #ifdef CONFIG_NUMA_BALANCING 8740 sgs->nr_numa_running += rq->nr_numa_running; 8741 sgs->nr_preferred_running += rq->nr_preferred_running; 8742 #endif 8743 /* 8744 * No need to call idle_cpu() if nr_running is not 0 8745 */ 8746 if (!nr_running && idle_cpu(i)) { 8747 sgs->idle_cpus++; 8748 /* Idle cpu can't have misfit task */ 8749 continue; 8750 } 8751 8752 if (local_group) 8753 continue; 8754 8755 /* Check for a misfit task on the cpu */ 8756 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 8757 sgs->group_misfit_task_load < rq->misfit_task_load) { 8758 sgs->group_misfit_task_load = rq->misfit_task_load; 8759 *sg_status |= SG_OVERLOAD; 8760 } 8761 } 8762 8763 sgs->group_capacity = group->sgc->capacity; 8764 8765 sgs->group_weight = group->group_weight; 8766 8767 /* Check if dst CPU is idle and preferred to this group */ 8768 if (!local_group && env->sd->flags & SD_ASYM_PACKING && 8769 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && 8770 sched_asym(env, sds, sgs, group)) { 8771 sgs->group_asym_packing = 1; 8772 } 8773 8774 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); 8775 8776 /* Computing avg_load makes sense only when group is overloaded */ 8777 if (sgs->group_type == group_overloaded) 8778 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 8779 sgs->group_capacity; 8780 } 8781 8782 /** 8783 * update_sd_pick_busiest - return 1 on busiest group 8784 * @env: The load balancing environment. 8785 * @sds: sched_domain statistics 8786 * @sg: sched_group candidate to be checked for being the busiest 8787 * @sgs: sched_group statistics 8788 * 8789 * Determine if @sg is a busier group than the previously selected 8790 * busiest group. 8791 * 8792 * Return: %true if @sg is a busier group than the previously selected 8793 * busiest group. %false otherwise. 8794 */ 8795 static bool update_sd_pick_busiest(struct lb_env *env, 8796 struct sd_lb_stats *sds, 8797 struct sched_group *sg, 8798 struct sg_lb_stats *sgs) 8799 { 8800 struct sg_lb_stats *busiest = &sds->busiest_stat; 8801 8802 /* Make sure that there is at least one task to pull */ 8803 if (!sgs->sum_h_nr_running) 8804 return false; 8805 8806 /* 8807 * Don't try to pull misfit tasks we can't help. 8808 * We can use max_capacity here as reduction in capacity on some 8809 * CPUs in the group should either be possible to resolve 8810 * internally or be covered by avg_load imbalance (eventually). 8811 */ 8812 if (sgs->group_type == group_misfit_task && 8813 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || 8814 sds->local_stat.group_type != group_has_spare)) 8815 return false; 8816 8817 if (sgs->group_type > busiest->group_type) 8818 return true; 8819 8820 if (sgs->group_type < busiest->group_type) 8821 return false; 8822 8823 /* 8824 * The candidate and the current busiest group are the same type of 8825 * group. Let check which one is the busiest according to the type. 8826 */ 8827 8828 switch (sgs->group_type) { 8829 case group_overloaded: 8830 /* Select the overloaded group with highest avg_load. */ 8831 if (sgs->avg_load <= busiest->avg_load) 8832 return false; 8833 break; 8834 8835 case group_imbalanced: 8836 /* 8837 * Select the 1st imbalanced group as we don't have any way to 8838 * choose one more than another. 8839 */ 8840 return false; 8841 8842 case group_asym_packing: 8843 /* Prefer to move from lowest priority CPU's work */ 8844 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) 8845 return false; 8846 break; 8847 8848 case group_misfit_task: 8849 /* 8850 * If we have more than one misfit sg go with the biggest 8851 * misfit. 8852 */ 8853 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) 8854 return false; 8855 break; 8856 8857 case group_fully_busy: 8858 /* 8859 * Select the fully busy group with highest avg_load. In 8860 * theory, there is no need to pull task from such kind of 8861 * group because tasks have all compute capacity that they need 8862 * but we can still improve the overall throughput by reducing 8863 * contention when accessing shared HW resources. 8864 * 8865 * XXX for now avg_load is not computed and always 0 so we 8866 * select the 1st one. 8867 */ 8868 if (sgs->avg_load <= busiest->avg_load) 8869 return false; 8870 break; 8871 8872 case group_has_spare: 8873 /* 8874 * Select not overloaded group with lowest number of idle cpus 8875 * and highest number of running tasks. We could also compare 8876 * the spare capacity which is more stable but it can end up 8877 * that the group has less spare capacity but finally more idle 8878 * CPUs which means less opportunity to pull tasks. 8879 */ 8880 if (sgs->idle_cpus > busiest->idle_cpus) 8881 return false; 8882 else if ((sgs->idle_cpus == busiest->idle_cpus) && 8883 (sgs->sum_nr_running <= busiest->sum_nr_running)) 8884 return false; 8885 8886 break; 8887 } 8888 8889 /* 8890 * Candidate sg has no more than one task per CPU and has higher 8891 * per-CPU capacity. Migrating tasks to less capable CPUs may harm 8892 * throughput. Maximize throughput, power/energy consequences are not 8893 * considered. 8894 */ 8895 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && 8896 (sgs->group_type <= group_fully_busy) && 8897 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) 8898 return false; 8899 8900 return true; 8901 } 8902 8903 #ifdef CONFIG_NUMA_BALANCING 8904 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8905 { 8906 if (sgs->sum_h_nr_running > sgs->nr_numa_running) 8907 return regular; 8908 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) 8909 return remote; 8910 return all; 8911 } 8912 8913 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8914 { 8915 if (rq->nr_running > rq->nr_numa_running) 8916 return regular; 8917 if (rq->nr_running > rq->nr_preferred_running) 8918 return remote; 8919 return all; 8920 } 8921 #else 8922 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8923 { 8924 return all; 8925 } 8926 8927 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8928 { 8929 return regular; 8930 } 8931 #endif /* CONFIG_NUMA_BALANCING */ 8932 8933 8934 struct sg_lb_stats; 8935 8936 /* 8937 * task_running_on_cpu - return 1 if @p is running on @cpu. 8938 */ 8939 8940 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) 8941 { 8942 /* Task has no contribution or is new */ 8943 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 8944 return 0; 8945 8946 if (task_on_rq_queued(p)) 8947 return 1; 8948 8949 return 0; 8950 } 8951 8952 /** 8953 * idle_cpu_without - would a given CPU be idle without p ? 8954 * @cpu: the processor on which idleness is tested. 8955 * @p: task which should be ignored. 8956 * 8957 * Return: 1 if the CPU would be idle. 0 otherwise. 8958 */ 8959 static int idle_cpu_without(int cpu, struct task_struct *p) 8960 { 8961 struct rq *rq = cpu_rq(cpu); 8962 8963 if (rq->curr != rq->idle && rq->curr != p) 8964 return 0; 8965 8966 /* 8967 * rq->nr_running can't be used but an updated version without the 8968 * impact of p on cpu must be used instead. The updated nr_running 8969 * be computed and tested before calling idle_cpu_without(). 8970 */ 8971 8972 #ifdef CONFIG_SMP 8973 if (rq->ttwu_pending) 8974 return 0; 8975 #endif 8976 8977 return 1; 8978 } 8979 8980 /* 8981 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup. 8982 * @sd: The sched_domain level to look for idlest group. 8983 * @group: sched_group whose statistics are to be updated. 8984 * @sgs: variable to hold the statistics for this group. 8985 * @p: The task for which we look for the idlest group/CPU. 8986 */ 8987 static inline void update_sg_wakeup_stats(struct sched_domain *sd, 8988 struct sched_group *group, 8989 struct sg_lb_stats *sgs, 8990 struct task_struct *p) 8991 { 8992 int i, nr_running; 8993 8994 memset(sgs, 0, sizeof(*sgs)); 8995 8996 for_each_cpu(i, sched_group_span(group)) { 8997 struct rq *rq = cpu_rq(i); 8998 unsigned int local; 8999 9000 sgs->group_load += cpu_load_without(rq, p); 9001 sgs->group_util += cpu_util_without(i, p); 9002 sgs->group_runnable += cpu_runnable_without(rq, p); 9003 local = task_running_on_cpu(i, p); 9004 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; 9005 9006 nr_running = rq->nr_running - local; 9007 sgs->sum_nr_running += nr_running; 9008 9009 /* 9010 * No need to call idle_cpu_without() if nr_running is not 0 9011 */ 9012 if (!nr_running && idle_cpu_without(i, p)) 9013 sgs->idle_cpus++; 9014 9015 } 9016 9017 /* Check if task fits in the group */ 9018 if (sd->flags & SD_ASYM_CPUCAPACITY && 9019 !task_fits_capacity(p, group->sgc->max_capacity)) { 9020 sgs->group_misfit_task_load = 1; 9021 } 9022 9023 sgs->group_capacity = group->sgc->capacity; 9024 9025 sgs->group_weight = group->group_weight; 9026 9027 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); 9028 9029 /* 9030 * Computing avg_load makes sense only when group is fully busy or 9031 * overloaded 9032 */ 9033 if (sgs->group_type == group_fully_busy || 9034 sgs->group_type == group_overloaded) 9035 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 9036 sgs->group_capacity; 9037 } 9038 9039 static bool update_pick_idlest(struct sched_group *idlest, 9040 struct sg_lb_stats *idlest_sgs, 9041 struct sched_group *group, 9042 struct sg_lb_stats *sgs) 9043 { 9044 if (sgs->group_type < idlest_sgs->group_type) 9045 return true; 9046 9047 if (sgs->group_type > idlest_sgs->group_type) 9048 return false; 9049 9050 /* 9051 * The candidate and the current idlest group are the same type of 9052 * group. Let check which one is the idlest according to the type. 9053 */ 9054 9055 switch (sgs->group_type) { 9056 case group_overloaded: 9057 case group_fully_busy: 9058 /* Select the group with lowest avg_load. */ 9059 if (idlest_sgs->avg_load <= sgs->avg_load) 9060 return false; 9061 break; 9062 9063 case group_imbalanced: 9064 case group_asym_packing: 9065 /* Those types are not used in the slow wakeup path */ 9066 return false; 9067 9068 case group_misfit_task: 9069 /* Select group with the highest max capacity */ 9070 if (idlest->sgc->max_capacity >= group->sgc->max_capacity) 9071 return false; 9072 break; 9073 9074 case group_has_spare: 9075 /* Select group with most idle CPUs */ 9076 if (idlest_sgs->idle_cpus > sgs->idle_cpus) 9077 return false; 9078 9079 /* Select group with lowest group_util */ 9080 if (idlest_sgs->idle_cpus == sgs->idle_cpus && 9081 idlest_sgs->group_util <= sgs->group_util) 9082 return false; 9083 9084 break; 9085 } 9086 9087 return true; 9088 } 9089 9090 /* 9091 * Allow a NUMA imbalance if busy CPUs is less than 25% of the domain. 9092 * This is an approximation as the number of running tasks may not be 9093 * related to the number of busy CPUs due to sched_setaffinity. 9094 */ 9095 static inline bool allow_numa_imbalance(int running, int imb_numa_nr) 9096 { 9097 return running <= imb_numa_nr; 9098 } 9099 9100 /* 9101 * find_idlest_group() finds and returns the least busy CPU group within the 9102 * domain. 9103 * 9104 * Assumes p is allowed on at least one CPU in sd. 9105 */ 9106 static struct sched_group * 9107 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) 9108 { 9109 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; 9110 struct sg_lb_stats local_sgs, tmp_sgs; 9111 struct sg_lb_stats *sgs; 9112 unsigned long imbalance; 9113 struct sg_lb_stats idlest_sgs = { 9114 .avg_load = UINT_MAX, 9115 .group_type = group_overloaded, 9116 }; 9117 9118 do { 9119 int local_group; 9120 9121 /* Skip over this group if it has no CPUs allowed */ 9122 if (!cpumask_intersects(sched_group_span(group), 9123 p->cpus_ptr)) 9124 continue; 9125 9126 /* Skip over this group if no cookie matched */ 9127 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) 9128 continue; 9129 9130 local_group = cpumask_test_cpu(this_cpu, 9131 sched_group_span(group)); 9132 9133 if (local_group) { 9134 sgs = &local_sgs; 9135 local = group; 9136 } else { 9137 sgs = &tmp_sgs; 9138 } 9139 9140 update_sg_wakeup_stats(sd, group, sgs, p); 9141 9142 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { 9143 idlest = group; 9144 idlest_sgs = *sgs; 9145 } 9146 9147 } while (group = group->next, group != sd->groups); 9148 9149 9150 /* There is no idlest group to push tasks to */ 9151 if (!idlest) 9152 return NULL; 9153 9154 /* The local group has been skipped because of CPU affinity */ 9155 if (!local) 9156 return idlest; 9157 9158 /* 9159 * If the local group is idler than the selected idlest group 9160 * don't try and push the task. 9161 */ 9162 if (local_sgs.group_type < idlest_sgs.group_type) 9163 return NULL; 9164 9165 /* 9166 * If the local group is busier than the selected idlest group 9167 * try and push the task. 9168 */ 9169 if (local_sgs.group_type > idlest_sgs.group_type) 9170 return idlest; 9171 9172 switch (local_sgs.group_type) { 9173 case group_overloaded: 9174 case group_fully_busy: 9175 9176 /* Calculate allowed imbalance based on load */ 9177 imbalance = scale_load_down(NICE_0_LOAD) * 9178 (sd->imbalance_pct-100) / 100; 9179 9180 /* 9181 * When comparing groups across NUMA domains, it's possible for 9182 * the local domain to be very lightly loaded relative to the 9183 * remote domains but "imbalance" skews the comparison making 9184 * remote CPUs look much more favourable. When considering 9185 * cross-domain, add imbalance to the load on the remote node 9186 * and consider staying local. 9187 */ 9188 9189 if ((sd->flags & SD_NUMA) && 9190 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) 9191 return NULL; 9192 9193 /* 9194 * If the local group is less loaded than the selected 9195 * idlest group don't try and push any tasks. 9196 */ 9197 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance)) 9198 return NULL; 9199 9200 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) 9201 return NULL; 9202 break; 9203 9204 case group_imbalanced: 9205 case group_asym_packing: 9206 /* Those type are not used in the slow wakeup path */ 9207 return NULL; 9208 9209 case group_misfit_task: 9210 /* Select group with the highest max capacity */ 9211 if (local->sgc->max_capacity >= idlest->sgc->max_capacity) 9212 return NULL; 9213 break; 9214 9215 case group_has_spare: 9216 if (sd->flags & SD_NUMA) { 9217 #ifdef CONFIG_NUMA_BALANCING 9218 int idlest_cpu; 9219 /* 9220 * If there is spare capacity at NUMA, try to select 9221 * the preferred node 9222 */ 9223 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) 9224 return NULL; 9225 9226 idlest_cpu = cpumask_first(sched_group_span(idlest)); 9227 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) 9228 return idlest; 9229 #endif 9230 /* 9231 * Otherwise, keep the task close to the wakeup source 9232 * and improve locality if the number of running tasks 9233 * would remain below threshold where an imbalance is 9234 * allowed. If there is a real need of migration, 9235 * periodic load balance will take care of it. 9236 */ 9237 if (allow_numa_imbalance(local_sgs.sum_nr_running + 1, sd->imb_numa_nr)) 9238 return NULL; 9239 } 9240 9241 /* 9242 * Select group with highest number of idle CPUs. We could also 9243 * compare the utilization which is more stable but it can end 9244 * up that the group has less spare capacity but finally more 9245 * idle CPUs which means more opportunity to run task. 9246 */ 9247 if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus) 9248 return NULL; 9249 break; 9250 } 9251 9252 return idlest; 9253 } 9254 9255 /** 9256 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 9257 * @env: The load balancing environment. 9258 * @sds: variable to hold the statistics for this sched_domain. 9259 */ 9260 9261 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 9262 { 9263 struct sched_domain *child = env->sd->child; 9264 struct sched_group *sg = env->sd->groups; 9265 struct sg_lb_stats *local = &sds->local_stat; 9266 struct sg_lb_stats tmp_sgs; 9267 int sg_status = 0; 9268 9269 do { 9270 struct sg_lb_stats *sgs = &tmp_sgs; 9271 int local_group; 9272 9273 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 9274 if (local_group) { 9275 sds->local = sg; 9276 sgs = local; 9277 9278 if (env->idle != CPU_NEWLY_IDLE || 9279 time_after_eq(jiffies, sg->sgc->next_update)) 9280 update_group_capacity(env->sd, env->dst_cpu); 9281 } 9282 9283 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); 9284 9285 if (local_group) 9286 goto next_group; 9287 9288 9289 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 9290 sds->busiest = sg; 9291 sds->busiest_stat = *sgs; 9292 } 9293 9294 next_group: 9295 /* Now, start updating sd_lb_stats */ 9296 sds->total_load += sgs->group_load; 9297 sds->total_capacity += sgs->group_capacity; 9298 9299 sg = sg->next; 9300 } while (sg != env->sd->groups); 9301 9302 /* Tag domain that child domain prefers tasks go to siblings first */ 9303 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; 9304 9305 9306 if (env->sd->flags & SD_NUMA) 9307 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 9308 9309 if (!env->sd->parent) { 9310 struct root_domain *rd = env->dst_rq->rd; 9311 9312 /* update overload indicator if we are at root domain */ 9313 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); 9314 9315 /* Update over-utilization (tipping point, U >= 0) indicator */ 9316 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); 9317 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); 9318 } else if (sg_status & SG_OVERUTILIZED) { 9319 struct root_domain *rd = env->dst_rq->rd; 9320 9321 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); 9322 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); 9323 } 9324 } 9325 9326 #define NUMA_IMBALANCE_MIN 2 9327 9328 static inline long adjust_numa_imbalance(int imbalance, 9329 int dst_running, int imb_numa_nr) 9330 { 9331 if (!allow_numa_imbalance(dst_running, imb_numa_nr)) 9332 return imbalance; 9333 9334 /* 9335 * Allow a small imbalance based on a simple pair of communicating 9336 * tasks that remain local when the destination is lightly loaded. 9337 */ 9338 if (imbalance <= NUMA_IMBALANCE_MIN) 9339 return 0; 9340 9341 return imbalance; 9342 } 9343 9344 /** 9345 * calculate_imbalance - Calculate the amount of imbalance present within the 9346 * groups of a given sched_domain during load balance. 9347 * @env: load balance environment 9348 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 9349 */ 9350 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 9351 { 9352 struct sg_lb_stats *local, *busiest; 9353 9354 local = &sds->local_stat; 9355 busiest = &sds->busiest_stat; 9356 9357 if (busiest->group_type == group_misfit_task) { 9358 /* Set imbalance to allow misfit tasks to be balanced. */ 9359 env->migration_type = migrate_misfit; 9360 env->imbalance = 1; 9361 return; 9362 } 9363 9364 if (busiest->group_type == group_asym_packing) { 9365 /* 9366 * In case of asym capacity, we will try to migrate all load to 9367 * the preferred CPU. 9368 */ 9369 env->migration_type = migrate_task; 9370 env->imbalance = busiest->sum_h_nr_running; 9371 return; 9372 } 9373 9374 if (busiest->group_type == group_imbalanced) { 9375 /* 9376 * In the group_imb case we cannot rely on group-wide averages 9377 * to ensure CPU-load equilibrium, try to move any task to fix 9378 * the imbalance. The next load balance will take care of 9379 * balancing back the system. 9380 */ 9381 env->migration_type = migrate_task; 9382 env->imbalance = 1; 9383 return; 9384 } 9385 9386 /* 9387 * Try to use spare capacity of local group without overloading it or 9388 * emptying busiest. 9389 */ 9390 if (local->group_type == group_has_spare) { 9391 if ((busiest->group_type > group_fully_busy) && 9392 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { 9393 /* 9394 * If busiest is overloaded, try to fill spare 9395 * capacity. This might end up creating spare capacity 9396 * in busiest or busiest still being overloaded but 9397 * there is no simple way to directly compute the 9398 * amount of load to migrate in order to balance the 9399 * system. 9400 */ 9401 env->migration_type = migrate_util; 9402 env->imbalance = max(local->group_capacity, local->group_util) - 9403 local->group_util; 9404 9405 /* 9406 * In some cases, the group's utilization is max or even 9407 * higher than capacity because of migrations but the 9408 * local CPU is (newly) idle. There is at least one 9409 * waiting task in this overloaded busiest group. Let's 9410 * try to pull it. 9411 */ 9412 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { 9413 env->migration_type = migrate_task; 9414 env->imbalance = 1; 9415 } 9416 9417 return; 9418 } 9419 9420 if (busiest->group_weight == 1 || sds->prefer_sibling) { 9421 unsigned int nr_diff = busiest->sum_nr_running; 9422 /* 9423 * When prefer sibling, evenly spread running tasks on 9424 * groups. 9425 */ 9426 env->migration_type = migrate_task; 9427 lsub_positive(&nr_diff, local->sum_nr_running); 9428 env->imbalance = nr_diff >> 1; 9429 } else { 9430 9431 /* 9432 * If there is no overload, we just want to even the number of 9433 * idle cpus. 9434 */ 9435 env->migration_type = migrate_task; 9436 env->imbalance = max_t(long, 0, (local->idle_cpus - 9437 busiest->idle_cpus) >> 1); 9438 } 9439 9440 /* Consider allowing a small imbalance between NUMA groups */ 9441 if (env->sd->flags & SD_NUMA) { 9442 env->imbalance = adjust_numa_imbalance(env->imbalance, 9443 local->sum_nr_running + 1, env->sd->imb_numa_nr); 9444 } 9445 9446 return; 9447 } 9448 9449 /* 9450 * Local is fully busy but has to take more load to relieve the 9451 * busiest group 9452 */ 9453 if (local->group_type < group_overloaded) { 9454 /* 9455 * Local will become overloaded so the avg_load metrics are 9456 * finally needed. 9457 */ 9458 9459 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / 9460 local->group_capacity; 9461 9462 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / 9463 sds->total_capacity; 9464 /* 9465 * If the local group is more loaded than the selected 9466 * busiest group don't try to pull any tasks. 9467 */ 9468 if (local->avg_load >= busiest->avg_load) { 9469 env->imbalance = 0; 9470 return; 9471 } 9472 } 9473 9474 /* 9475 * Both group are or will become overloaded and we're trying to get all 9476 * the CPUs to the average_load, so we don't want to push ourselves 9477 * above the average load, nor do we wish to reduce the max loaded CPU 9478 * below the average load. At the same time, we also don't want to 9479 * reduce the group load below the group capacity. Thus we look for 9480 * the minimum possible imbalance. 9481 */ 9482 env->migration_type = migrate_load; 9483 env->imbalance = min( 9484 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, 9485 (sds->avg_load - local->avg_load) * local->group_capacity 9486 ) / SCHED_CAPACITY_SCALE; 9487 } 9488 9489 /******* find_busiest_group() helpers end here *********************/ 9490 9491 /* 9492 * Decision matrix according to the local and busiest group type: 9493 * 9494 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded 9495 * has_spare nr_idle balanced N/A N/A balanced balanced 9496 * fully_busy nr_idle nr_idle N/A N/A balanced balanced 9497 * misfit_task force N/A N/A N/A force force 9498 * asym_packing force force N/A N/A force force 9499 * imbalanced force force N/A N/A force force 9500 * overloaded force force N/A N/A force avg_load 9501 * 9502 * N/A : Not Applicable because already filtered while updating 9503 * statistics. 9504 * balanced : The system is balanced for these 2 groups. 9505 * force : Calculate the imbalance as load migration is probably needed. 9506 * avg_load : Only if imbalance is significant enough. 9507 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite 9508 * different in groups. 9509 */ 9510 9511 /** 9512 * find_busiest_group - Returns the busiest group within the sched_domain 9513 * if there is an imbalance. 9514 * @env: The load balancing environment. 9515 * 9516 * Also calculates the amount of runnable load which should be moved 9517 * to restore balance. 9518 * 9519 * Return: - The busiest group if imbalance exists. 9520 */ 9521 static struct sched_group *find_busiest_group(struct lb_env *env) 9522 { 9523 struct sg_lb_stats *local, *busiest; 9524 struct sd_lb_stats sds; 9525 9526 init_sd_lb_stats(&sds); 9527 9528 /* 9529 * Compute the various statistics relevant for load balancing at 9530 * this level. 9531 */ 9532 update_sd_lb_stats(env, &sds); 9533 9534 if (sched_energy_enabled()) { 9535 struct root_domain *rd = env->dst_rq->rd; 9536 9537 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) 9538 goto out_balanced; 9539 } 9540 9541 local = &sds.local_stat; 9542 busiest = &sds.busiest_stat; 9543 9544 /* There is no busy sibling group to pull tasks from */ 9545 if (!sds.busiest) 9546 goto out_balanced; 9547 9548 /* Misfit tasks should be dealt with regardless of the avg load */ 9549 if (busiest->group_type == group_misfit_task) 9550 goto force_balance; 9551 9552 /* ASYM feature bypasses nice load balance check */ 9553 if (busiest->group_type == group_asym_packing) 9554 goto force_balance; 9555 9556 /* 9557 * If the busiest group is imbalanced the below checks don't 9558 * work because they assume all things are equal, which typically 9559 * isn't true due to cpus_ptr constraints and the like. 9560 */ 9561 if (busiest->group_type == group_imbalanced) 9562 goto force_balance; 9563 9564 /* 9565 * If the local group is busier than the selected busiest group 9566 * don't try and pull any tasks. 9567 */ 9568 if (local->group_type > busiest->group_type) 9569 goto out_balanced; 9570 9571 /* 9572 * When groups are overloaded, use the avg_load to ensure fairness 9573 * between tasks. 9574 */ 9575 if (local->group_type == group_overloaded) { 9576 /* 9577 * If the local group is more loaded than the selected 9578 * busiest group don't try to pull any tasks. 9579 */ 9580 if (local->avg_load >= busiest->avg_load) 9581 goto out_balanced; 9582 9583 /* XXX broken for overlapping NUMA groups */ 9584 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / 9585 sds.total_capacity; 9586 9587 /* 9588 * Don't pull any tasks if this group is already above the 9589 * domain average load. 9590 */ 9591 if (local->avg_load >= sds.avg_load) 9592 goto out_balanced; 9593 9594 /* 9595 * If the busiest group is more loaded, use imbalance_pct to be 9596 * conservative. 9597 */ 9598 if (100 * busiest->avg_load <= 9599 env->sd->imbalance_pct * local->avg_load) 9600 goto out_balanced; 9601 } 9602 9603 /* Try to move all excess tasks to child's sibling domain */ 9604 if (sds.prefer_sibling && local->group_type == group_has_spare && 9605 busiest->sum_nr_running > local->sum_nr_running + 1) 9606 goto force_balance; 9607 9608 if (busiest->group_type != group_overloaded) { 9609 if (env->idle == CPU_NOT_IDLE) 9610 /* 9611 * If the busiest group is not overloaded (and as a 9612 * result the local one too) but this CPU is already 9613 * busy, let another idle CPU try to pull task. 9614 */ 9615 goto out_balanced; 9616 9617 if (busiest->group_weight > 1 && 9618 local->idle_cpus <= (busiest->idle_cpus + 1)) 9619 /* 9620 * If the busiest group is not overloaded 9621 * and there is no imbalance between this and busiest 9622 * group wrt idle CPUs, it is balanced. The imbalance 9623 * becomes significant if the diff is greater than 1 9624 * otherwise we might end up to just move the imbalance 9625 * on another group. Of course this applies only if 9626 * there is more than 1 CPU per group. 9627 */ 9628 goto out_balanced; 9629 9630 if (busiest->sum_h_nr_running == 1) 9631 /* 9632 * busiest doesn't have any tasks waiting to run 9633 */ 9634 goto out_balanced; 9635 } 9636 9637 force_balance: 9638 /* Looks like there is an imbalance. Compute it */ 9639 calculate_imbalance(env, &sds); 9640 return env->imbalance ? sds.busiest : NULL; 9641 9642 out_balanced: 9643 env->imbalance = 0; 9644 return NULL; 9645 } 9646 9647 /* 9648 * find_busiest_queue - find the busiest runqueue among the CPUs in the group. 9649 */ 9650 static struct rq *find_busiest_queue(struct lb_env *env, 9651 struct sched_group *group) 9652 { 9653 struct rq *busiest = NULL, *rq; 9654 unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1; 9655 unsigned int busiest_nr = 0; 9656 int i; 9657 9658 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 9659 unsigned long capacity, load, util; 9660 unsigned int nr_running; 9661 enum fbq_type rt; 9662 9663 rq = cpu_rq(i); 9664 rt = fbq_classify_rq(rq); 9665 9666 /* 9667 * We classify groups/runqueues into three groups: 9668 * - regular: there are !numa tasks 9669 * - remote: there are numa tasks that run on the 'wrong' node 9670 * - all: there is no distinction 9671 * 9672 * In order to avoid migrating ideally placed numa tasks, 9673 * ignore those when there's better options. 9674 * 9675 * If we ignore the actual busiest queue to migrate another 9676 * task, the next balance pass can still reduce the busiest 9677 * queue by moving tasks around inside the node. 9678 * 9679 * If we cannot move enough load due to this classification 9680 * the next pass will adjust the group classification and 9681 * allow migration of more tasks. 9682 * 9683 * Both cases only affect the total convergence complexity. 9684 */ 9685 if (rt > env->fbq_type) 9686 continue; 9687 9688 nr_running = rq->cfs.h_nr_running; 9689 if (!nr_running) 9690 continue; 9691 9692 capacity = capacity_of(i); 9693 9694 /* 9695 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could 9696 * eventually lead to active_balancing high->low capacity. 9697 * Higher per-CPU capacity is considered better than balancing 9698 * average load. 9699 */ 9700 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 9701 !capacity_greater(capacity_of(env->dst_cpu), capacity) && 9702 nr_running == 1) 9703 continue; 9704 9705 /* Make sure we only pull tasks from a CPU of lower priority */ 9706 if ((env->sd->flags & SD_ASYM_PACKING) && 9707 sched_asym_prefer(i, env->dst_cpu) && 9708 nr_running == 1) 9709 continue; 9710 9711 switch (env->migration_type) { 9712 case migrate_load: 9713 /* 9714 * When comparing with load imbalance, use cpu_load() 9715 * which is not scaled with the CPU capacity. 9716 */ 9717 load = cpu_load(rq); 9718 9719 if (nr_running == 1 && load > env->imbalance && 9720 !check_cpu_capacity(rq, env->sd)) 9721 break; 9722 9723 /* 9724 * For the load comparisons with the other CPUs, 9725 * consider the cpu_load() scaled with the CPU 9726 * capacity, so that the load can be moved away 9727 * from the CPU that is potentially running at a 9728 * lower capacity. 9729 * 9730 * Thus we're looking for max(load_i / capacity_i), 9731 * crosswise multiplication to rid ourselves of the 9732 * division works out to: 9733 * load_i * capacity_j > load_j * capacity_i; 9734 * where j is our previous maximum. 9735 */ 9736 if (load * busiest_capacity > busiest_load * capacity) { 9737 busiest_load = load; 9738 busiest_capacity = capacity; 9739 busiest = rq; 9740 } 9741 break; 9742 9743 case migrate_util: 9744 util = cpu_util_cfs(i); 9745 9746 /* 9747 * Don't try to pull utilization from a CPU with one 9748 * running task. Whatever its utilization, we will fail 9749 * detach the task. 9750 */ 9751 if (nr_running <= 1) 9752 continue; 9753 9754 if (busiest_util < util) { 9755 busiest_util = util; 9756 busiest = rq; 9757 } 9758 break; 9759 9760 case migrate_task: 9761 if (busiest_nr < nr_running) { 9762 busiest_nr = nr_running; 9763 busiest = rq; 9764 } 9765 break; 9766 9767 case migrate_misfit: 9768 /* 9769 * For ASYM_CPUCAPACITY domains with misfit tasks we 9770 * simply seek the "biggest" misfit task. 9771 */ 9772 if (rq->misfit_task_load > busiest_load) { 9773 busiest_load = rq->misfit_task_load; 9774 busiest = rq; 9775 } 9776 9777 break; 9778 9779 } 9780 } 9781 9782 return busiest; 9783 } 9784 9785 /* 9786 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 9787 * so long as it is large enough. 9788 */ 9789 #define MAX_PINNED_INTERVAL 512 9790 9791 static inline bool 9792 asym_active_balance(struct lb_env *env) 9793 { 9794 /* 9795 * ASYM_PACKING needs to force migrate tasks from busy but 9796 * lower priority CPUs in order to pack all tasks in the 9797 * highest priority CPUs. 9798 */ 9799 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && 9800 sched_asym_prefer(env->dst_cpu, env->src_cpu); 9801 } 9802 9803 static inline bool 9804 imbalanced_active_balance(struct lb_env *env) 9805 { 9806 struct sched_domain *sd = env->sd; 9807 9808 /* 9809 * The imbalanced case includes the case of pinned tasks preventing a fair 9810 * distribution of the load on the system but also the even distribution of the 9811 * threads on a system with spare capacity 9812 */ 9813 if ((env->migration_type == migrate_task) && 9814 (sd->nr_balance_failed > sd->cache_nice_tries+2)) 9815 return 1; 9816 9817 return 0; 9818 } 9819 9820 static int need_active_balance(struct lb_env *env) 9821 { 9822 struct sched_domain *sd = env->sd; 9823 9824 if (asym_active_balance(env)) 9825 return 1; 9826 9827 if (imbalanced_active_balance(env)) 9828 return 1; 9829 9830 /* 9831 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 9832 * It's worth migrating the task if the src_cpu's capacity is reduced 9833 * because of other sched_class or IRQs if more capacity stays 9834 * available on dst_cpu. 9835 */ 9836 if ((env->idle != CPU_NOT_IDLE) && 9837 (env->src_rq->cfs.h_nr_running == 1)) { 9838 if ((check_cpu_capacity(env->src_rq, sd)) && 9839 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 9840 return 1; 9841 } 9842 9843 if (env->migration_type == migrate_misfit) 9844 return 1; 9845 9846 return 0; 9847 } 9848 9849 static int active_load_balance_cpu_stop(void *data); 9850 9851 static int should_we_balance(struct lb_env *env) 9852 { 9853 struct sched_group *sg = env->sd->groups; 9854 int cpu; 9855 9856 /* 9857 * Ensure the balancing environment is consistent; can happen 9858 * when the softirq triggers 'during' hotplug. 9859 */ 9860 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) 9861 return 0; 9862 9863 /* 9864 * In the newly idle case, we will allow all the CPUs 9865 * to do the newly idle load balance. 9866 */ 9867 if (env->idle == CPU_NEWLY_IDLE) 9868 return 1; 9869 9870 /* Try to find first idle CPU */ 9871 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 9872 if (!idle_cpu(cpu)) 9873 continue; 9874 9875 /* Are we the first idle CPU? */ 9876 return cpu == env->dst_cpu; 9877 } 9878 9879 /* Are we the first CPU of this group ? */ 9880 return group_balance_cpu(sg) == env->dst_cpu; 9881 } 9882 9883 /* 9884 * Check this_cpu to ensure it is balanced within domain. Attempt to move 9885 * tasks if there is an imbalance. 9886 */ 9887 static int load_balance(int this_cpu, struct rq *this_rq, 9888 struct sched_domain *sd, enum cpu_idle_type idle, 9889 int *continue_balancing) 9890 { 9891 int ld_moved, cur_ld_moved, active_balance = 0; 9892 struct sched_domain *sd_parent = sd->parent; 9893 struct sched_group *group; 9894 struct rq *busiest; 9895 struct rq_flags rf; 9896 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 9897 9898 struct lb_env env = { 9899 .sd = sd, 9900 .dst_cpu = this_cpu, 9901 .dst_rq = this_rq, 9902 .dst_grpmask = sched_group_span(sd->groups), 9903 .idle = idle, 9904 .loop_break = sched_nr_migrate_break, 9905 .cpus = cpus, 9906 .fbq_type = all, 9907 .tasks = LIST_HEAD_INIT(env.tasks), 9908 }; 9909 9910 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 9911 9912 schedstat_inc(sd->lb_count[idle]); 9913 9914 redo: 9915 if (!should_we_balance(&env)) { 9916 *continue_balancing = 0; 9917 goto out_balanced; 9918 } 9919 9920 group = find_busiest_group(&env); 9921 if (!group) { 9922 schedstat_inc(sd->lb_nobusyg[idle]); 9923 goto out_balanced; 9924 } 9925 9926 busiest = find_busiest_queue(&env, group); 9927 if (!busiest) { 9928 schedstat_inc(sd->lb_nobusyq[idle]); 9929 goto out_balanced; 9930 } 9931 9932 BUG_ON(busiest == env.dst_rq); 9933 9934 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 9935 9936 env.src_cpu = busiest->cpu; 9937 env.src_rq = busiest; 9938 9939 ld_moved = 0; 9940 /* Clear this flag as soon as we find a pullable task */ 9941 env.flags |= LBF_ALL_PINNED; 9942 if (busiest->nr_running > 1) { 9943 /* 9944 * Attempt to move tasks. If find_busiest_group has found 9945 * an imbalance but busiest->nr_running <= 1, the group is 9946 * still unbalanced. ld_moved simply stays zero, so it is 9947 * correctly treated as an imbalance. 9948 */ 9949 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 9950 9951 more_balance: 9952 rq_lock_irqsave(busiest, &rf); 9953 update_rq_clock(busiest); 9954 9955 /* 9956 * cur_ld_moved - load moved in current iteration 9957 * ld_moved - cumulative load moved across iterations 9958 */ 9959 cur_ld_moved = detach_tasks(&env); 9960 9961 /* 9962 * We've detached some tasks from busiest_rq. Every 9963 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 9964 * unlock busiest->lock, and we are able to be sure 9965 * that nobody can manipulate the tasks in parallel. 9966 * See task_rq_lock() family for the details. 9967 */ 9968 9969 rq_unlock(busiest, &rf); 9970 9971 if (cur_ld_moved) { 9972 attach_tasks(&env); 9973 ld_moved += cur_ld_moved; 9974 } 9975 9976 local_irq_restore(rf.flags); 9977 9978 if (env.flags & LBF_NEED_BREAK) { 9979 env.flags &= ~LBF_NEED_BREAK; 9980 goto more_balance; 9981 } 9982 9983 /* 9984 * Revisit (affine) tasks on src_cpu that couldn't be moved to 9985 * us and move them to an alternate dst_cpu in our sched_group 9986 * where they can run. The upper limit on how many times we 9987 * iterate on same src_cpu is dependent on number of CPUs in our 9988 * sched_group. 9989 * 9990 * This changes load balance semantics a bit on who can move 9991 * load to a given_cpu. In addition to the given_cpu itself 9992 * (or a ilb_cpu acting on its behalf where given_cpu is 9993 * nohz-idle), we now have balance_cpu in a position to move 9994 * load to given_cpu. In rare situations, this may cause 9995 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 9996 * _independently_ and at _same_ time to move some load to 9997 * given_cpu) causing excess load to be moved to given_cpu. 9998 * This however should not happen so much in practice and 9999 * moreover subsequent load balance cycles should correct the 10000 * excess load moved. 10001 */ 10002 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 10003 10004 /* Prevent to re-select dst_cpu via env's CPUs */ 10005 __cpumask_clear_cpu(env.dst_cpu, env.cpus); 10006 10007 env.dst_rq = cpu_rq(env.new_dst_cpu); 10008 env.dst_cpu = env.new_dst_cpu; 10009 env.flags &= ~LBF_DST_PINNED; 10010 env.loop = 0; 10011 env.loop_break = sched_nr_migrate_break; 10012 10013 /* 10014 * Go back to "more_balance" rather than "redo" since we 10015 * need to continue with same src_cpu. 10016 */ 10017 goto more_balance; 10018 } 10019 10020 /* 10021 * We failed to reach balance because of affinity. 10022 */ 10023 if (sd_parent) { 10024 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 10025 10026 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 10027 *group_imbalance = 1; 10028 } 10029 10030 /* All tasks on this runqueue were pinned by CPU affinity */ 10031 if (unlikely(env.flags & LBF_ALL_PINNED)) { 10032 __cpumask_clear_cpu(cpu_of(busiest), cpus); 10033 /* 10034 * Attempting to continue load balancing at the current 10035 * sched_domain level only makes sense if there are 10036 * active CPUs remaining as possible busiest CPUs to 10037 * pull load from which are not contained within the 10038 * destination group that is receiving any migrated 10039 * load. 10040 */ 10041 if (!cpumask_subset(cpus, env.dst_grpmask)) { 10042 env.loop = 0; 10043 env.loop_break = sched_nr_migrate_break; 10044 goto redo; 10045 } 10046 goto out_all_pinned; 10047 } 10048 } 10049 10050 if (!ld_moved) { 10051 schedstat_inc(sd->lb_failed[idle]); 10052 /* 10053 * Increment the failure counter only on periodic balance. 10054 * We do not want newidle balance, which can be very 10055 * frequent, pollute the failure counter causing 10056 * excessive cache_hot migrations and active balances. 10057 */ 10058 if (idle != CPU_NEWLY_IDLE) 10059 sd->nr_balance_failed++; 10060 10061 if (need_active_balance(&env)) { 10062 unsigned long flags; 10063 10064 raw_spin_rq_lock_irqsave(busiest, flags); 10065 10066 /* 10067 * Don't kick the active_load_balance_cpu_stop, 10068 * if the curr task on busiest CPU can't be 10069 * moved to this_cpu: 10070 */ 10071 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { 10072 raw_spin_rq_unlock_irqrestore(busiest, flags); 10073 goto out_one_pinned; 10074 } 10075 10076 /* Record that we found at least one task that could run on this_cpu */ 10077 env.flags &= ~LBF_ALL_PINNED; 10078 10079 /* 10080 * ->active_balance synchronizes accesses to 10081 * ->active_balance_work. Once set, it's cleared 10082 * only after active load balance is finished. 10083 */ 10084 if (!busiest->active_balance) { 10085 busiest->active_balance = 1; 10086 busiest->push_cpu = this_cpu; 10087 active_balance = 1; 10088 } 10089 raw_spin_rq_unlock_irqrestore(busiest, flags); 10090 10091 if (active_balance) { 10092 stop_one_cpu_nowait(cpu_of(busiest), 10093 active_load_balance_cpu_stop, busiest, 10094 &busiest->active_balance_work); 10095 } 10096 } 10097 } else { 10098 sd->nr_balance_failed = 0; 10099 } 10100 10101 if (likely(!active_balance) || need_active_balance(&env)) { 10102 /* We were unbalanced, so reset the balancing interval */ 10103 sd->balance_interval = sd->min_interval; 10104 } 10105 10106 goto out; 10107 10108 out_balanced: 10109 /* 10110 * We reach balance although we may have faced some affinity 10111 * constraints. Clear the imbalance flag only if other tasks got 10112 * a chance to move and fix the imbalance. 10113 */ 10114 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { 10115 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 10116 10117 if (*group_imbalance) 10118 *group_imbalance = 0; 10119 } 10120 10121 out_all_pinned: 10122 /* 10123 * We reach balance because all tasks are pinned at this level so 10124 * we can't migrate them. Let the imbalance flag set so parent level 10125 * can try to migrate them. 10126 */ 10127 schedstat_inc(sd->lb_balanced[idle]); 10128 10129 sd->nr_balance_failed = 0; 10130 10131 out_one_pinned: 10132 ld_moved = 0; 10133 10134 /* 10135 * newidle_balance() disregards balance intervals, so we could 10136 * repeatedly reach this code, which would lead to balance_interval 10137 * skyrocketing in a short amount of time. Skip the balance_interval 10138 * increase logic to avoid that. 10139 */ 10140 if (env.idle == CPU_NEWLY_IDLE) 10141 goto out; 10142 10143 /* tune up the balancing interval */ 10144 if ((env.flags & LBF_ALL_PINNED && 10145 sd->balance_interval < MAX_PINNED_INTERVAL) || 10146 sd->balance_interval < sd->max_interval) 10147 sd->balance_interval *= 2; 10148 out: 10149 return ld_moved; 10150 } 10151 10152 static inline unsigned long 10153 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 10154 { 10155 unsigned long interval = sd->balance_interval; 10156 10157 if (cpu_busy) 10158 interval *= sd->busy_factor; 10159 10160 /* scale ms to jiffies */ 10161 interval = msecs_to_jiffies(interval); 10162 10163 /* 10164 * Reduce likelihood of busy balancing at higher domains racing with 10165 * balancing at lower domains by preventing their balancing periods 10166 * from being multiples of each other. 10167 */ 10168 if (cpu_busy) 10169 interval -= 1; 10170 10171 interval = clamp(interval, 1UL, max_load_balance_interval); 10172 10173 return interval; 10174 } 10175 10176 static inline void 10177 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 10178 { 10179 unsigned long interval, next; 10180 10181 /* used by idle balance, so cpu_busy = 0 */ 10182 interval = get_sd_balance_interval(sd, 0); 10183 next = sd->last_balance + interval; 10184 10185 if (time_after(*next_balance, next)) 10186 *next_balance = next; 10187 } 10188 10189 /* 10190 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes 10191 * running tasks off the busiest CPU onto idle CPUs. It requires at 10192 * least 1 task to be running on each physical CPU where possible, and 10193 * avoids physical / logical imbalances. 10194 */ 10195 static int active_load_balance_cpu_stop(void *data) 10196 { 10197 struct rq *busiest_rq = data; 10198 int busiest_cpu = cpu_of(busiest_rq); 10199 int target_cpu = busiest_rq->push_cpu; 10200 struct rq *target_rq = cpu_rq(target_cpu); 10201 struct sched_domain *sd; 10202 struct task_struct *p = NULL; 10203 struct rq_flags rf; 10204 10205 rq_lock_irq(busiest_rq, &rf); 10206 /* 10207 * Between queueing the stop-work and running it is a hole in which 10208 * CPUs can become inactive. We should not move tasks from or to 10209 * inactive CPUs. 10210 */ 10211 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 10212 goto out_unlock; 10213 10214 /* Make sure the requested CPU hasn't gone down in the meantime: */ 10215 if (unlikely(busiest_cpu != smp_processor_id() || 10216 !busiest_rq->active_balance)) 10217 goto out_unlock; 10218 10219 /* Is there any task to move? */ 10220 if (busiest_rq->nr_running <= 1) 10221 goto out_unlock; 10222 10223 /* 10224 * This condition is "impossible", if it occurs 10225 * we need to fix it. Originally reported by 10226 * Bjorn Helgaas on a 128-CPU setup. 10227 */ 10228 BUG_ON(busiest_rq == target_rq); 10229 10230 /* Search for an sd spanning us and the target CPU. */ 10231 rcu_read_lock(); 10232 for_each_domain(target_cpu, sd) { 10233 if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 10234 break; 10235 } 10236 10237 if (likely(sd)) { 10238 struct lb_env env = { 10239 .sd = sd, 10240 .dst_cpu = target_cpu, 10241 .dst_rq = target_rq, 10242 .src_cpu = busiest_rq->cpu, 10243 .src_rq = busiest_rq, 10244 .idle = CPU_IDLE, 10245 .flags = LBF_ACTIVE_LB, 10246 }; 10247 10248 schedstat_inc(sd->alb_count); 10249 update_rq_clock(busiest_rq); 10250 10251 p = detach_one_task(&env); 10252 if (p) { 10253 schedstat_inc(sd->alb_pushed); 10254 /* Active balancing done, reset the failure counter. */ 10255 sd->nr_balance_failed = 0; 10256 } else { 10257 schedstat_inc(sd->alb_failed); 10258 } 10259 } 10260 rcu_read_unlock(); 10261 out_unlock: 10262 busiest_rq->active_balance = 0; 10263 rq_unlock(busiest_rq, &rf); 10264 10265 if (p) 10266 attach_one_task(target_rq, p); 10267 10268 local_irq_enable(); 10269 10270 return 0; 10271 } 10272 10273 static DEFINE_SPINLOCK(balancing); 10274 10275 /* 10276 * Scale the max load_balance interval with the number of CPUs in the system. 10277 * This trades load-balance latency on larger machines for less cross talk. 10278 */ 10279 void update_max_interval(void) 10280 { 10281 max_load_balance_interval = HZ*num_online_cpus()/10; 10282 } 10283 10284 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) 10285 { 10286 if (cost > sd->max_newidle_lb_cost) { 10287 /* 10288 * Track max cost of a domain to make sure to not delay the 10289 * next wakeup on the CPU. 10290 */ 10291 sd->max_newidle_lb_cost = cost; 10292 sd->last_decay_max_lb_cost = jiffies; 10293 } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { 10294 /* 10295 * Decay the newidle max times by ~1% per second to ensure that 10296 * it is not outdated and the current max cost is actually 10297 * shorter. 10298 */ 10299 sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256; 10300 sd->last_decay_max_lb_cost = jiffies; 10301 10302 return true; 10303 } 10304 10305 return false; 10306 } 10307 10308 /* 10309 * It checks each scheduling domain to see if it is due to be balanced, 10310 * and initiates a balancing operation if so. 10311 * 10312 * Balancing parameters are set up in init_sched_domains. 10313 */ 10314 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 10315 { 10316 int continue_balancing = 1; 10317 int cpu = rq->cpu; 10318 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 10319 unsigned long interval; 10320 struct sched_domain *sd; 10321 /* Earliest time when we have to do rebalance again */ 10322 unsigned long next_balance = jiffies + 60*HZ; 10323 int update_next_balance = 0; 10324 int need_serialize, need_decay = 0; 10325 u64 max_cost = 0; 10326 10327 rcu_read_lock(); 10328 for_each_domain(cpu, sd) { 10329 /* 10330 * Decay the newidle max times here because this is a regular 10331 * visit to all the domains. 10332 */ 10333 need_decay = update_newidle_cost(sd, 0); 10334 max_cost += sd->max_newidle_lb_cost; 10335 10336 /* 10337 * Stop the load balance at this level. There is another 10338 * CPU in our sched group which is doing load balancing more 10339 * actively. 10340 */ 10341 if (!continue_balancing) { 10342 if (need_decay) 10343 continue; 10344 break; 10345 } 10346 10347 interval = get_sd_balance_interval(sd, busy); 10348 10349 need_serialize = sd->flags & SD_SERIALIZE; 10350 if (need_serialize) { 10351 if (!spin_trylock(&balancing)) 10352 goto out; 10353 } 10354 10355 if (time_after_eq(jiffies, sd->last_balance + interval)) { 10356 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 10357 /* 10358 * The LBF_DST_PINNED logic could have changed 10359 * env->dst_cpu, so we can't know our idle 10360 * state even if we migrated tasks. Update it. 10361 */ 10362 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 10363 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 10364 } 10365 sd->last_balance = jiffies; 10366 interval = get_sd_balance_interval(sd, busy); 10367 } 10368 if (need_serialize) 10369 spin_unlock(&balancing); 10370 out: 10371 if (time_after(next_balance, sd->last_balance + interval)) { 10372 next_balance = sd->last_balance + interval; 10373 update_next_balance = 1; 10374 } 10375 } 10376 if (need_decay) { 10377 /* 10378 * Ensure the rq-wide value also decays but keep it at a 10379 * reasonable floor to avoid funnies with rq->avg_idle. 10380 */ 10381 rq->max_idle_balance_cost = 10382 max((u64)sysctl_sched_migration_cost, max_cost); 10383 } 10384 rcu_read_unlock(); 10385 10386 /* 10387 * next_balance will be updated only when there is a need. 10388 * When the cpu is attached to null domain for ex, it will not be 10389 * updated. 10390 */ 10391 if (likely(update_next_balance)) 10392 rq->next_balance = next_balance; 10393 10394 } 10395 10396 static inline int on_null_domain(struct rq *rq) 10397 { 10398 return unlikely(!rcu_dereference_sched(rq->sd)); 10399 } 10400 10401 #ifdef CONFIG_NO_HZ_COMMON 10402 /* 10403 * idle load balancing details 10404 * - When one of the busy CPUs notice that there may be an idle rebalancing 10405 * needed, they will kick the idle load balancer, which then does idle 10406 * load balancing for all the idle CPUs. 10407 * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set 10408 * anywhere yet. 10409 */ 10410 10411 static inline int find_new_ilb(void) 10412 { 10413 int ilb; 10414 const struct cpumask *hk_mask; 10415 10416 hk_mask = housekeeping_cpumask(HK_TYPE_MISC); 10417 10418 for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) { 10419 10420 if (ilb == smp_processor_id()) 10421 continue; 10422 10423 if (idle_cpu(ilb)) 10424 return ilb; 10425 } 10426 10427 return nr_cpu_ids; 10428 } 10429 10430 /* 10431 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any 10432 * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one). 10433 */ 10434 static void kick_ilb(unsigned int flags) 10435 { 10436 int ilb_cpu; 10437 10438 /* 10439 * Increase nohz.next_balance only when if full ilb is triggered but 10440 * not if we only update stats. 10441 */ 10442 if (flags & NOHZ_BALANCE_KICK) 10443 nohz.next_balance = jiffies+1; 10444 10445 ilb_cpu = find_new_ilb(); 10446 10447 if (ilb_cpu >= nr_cpu_ids) 10448 return; 10449 10450 /* 10451 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets 10452 * the first flag owns it; cleared by nohz_csd_func(). 10453 */ 10454 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); 10455 if (flags & NOHZ_KICK_MASK) 10456 return; 10457 10458 /* 10459 * This way we generate an IPI on the target CPU which 10460 * is idle. And the softirq performing nohz idle load balance 10461 * will be run before returning from the IPI. 10462 */ 10463 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd); 10464 } 10465 10466 /* 10467 * Current decision point for kicking the idle load balancer in the presence 10468 * of idle CPUs in the system. 10469 */ 10470 static void nohz_balancer_kick(struct rq *rq) 10471 { 10472 unsigned long now = jiffies; 10473 struct sched_domain_shared *sds; 10474 struct sched_domain *sd; 10475 int nr_busy, i, cpu = rq->cpu; 10476 unsigned int flags = 0; 10477 10478 if (unlikely(rq->idle_balance)) 10479 return; 10480 10481 /* 10482 * We may be recently in ticked or tickless idle mode. At the first 10483 * busy tick after returning from idle, we will update the busy stats. 10484 */ 10485 nohz_balance_exit_idle(rq); 10486 10487 /* 10488 * None are in tickless mode and hence no need for NOHZ idle load 10489 * balancing. 10490 */ 10491 if (likely(!atomic_read(&nohz.nr_cpus))) 10492 return; 10493 10494 if (READ_ONCE(nohz.has_blocked) && 10495 time_after(now, READ_ONCE(nohz.next_blocked))) 10496 flags = NOHZ_STATS_KICK; 10497 10498 if (time_before(now, nohz.next_balance)) 10499 goto out; 10500 10501 if (rq->nr_running >= 2) { 10502 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10503 goto out; 10504 } 10505 10506 rcu_read_lock(); 10507 10508 sd = rcu_dereference(rq->sd); 10509 if (sd) { 10510 /* 10511 * If there's a CFS task and the current CPU has reduced 10512 * capacity; kick the ILB to see if there's a better CPU to run 10513 * on. 10514 */ 10515 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { 10516 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10517 goto unlock; 10518 } 10519 } 10520 10521 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 10522 if (sd) { 10523 /* 10524 * When ASYM_PACKING; see if there's a more preferred CPU 10525 * currently idle; in which case, kick the ILB to move tasks 10526 * around. 10527 */ 10528 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { 10529 if (sched_asym_prefer(i, cpu)) { 10530 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10531 goto unlock; 10532 } 10533 } 10534 } 10535 10536 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); 10537 if (sd) { 10538 /* 10539 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU 10540 * to run the misfit task on. 10541 */ 10542 if (check_misfit_status(rq, sd)) { 10543 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10544 goto unlock; 10545 } 10546 10547 /* 10548 * For asymmetric systems, we do not want to nicely balance 10549 * cache use, instead we want to embrace asymmetry and only 10550 * ensure tasks have enough CPU capacity. 10551 * 10552 * Skip the LLC logic because it's not relevant in that case. 10553 */ 10554 goto unlock; 10555 } 10556 10557 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 10558 if (sds) { 10559 /* 10560 * If there is an imbalance between LLC domains (IOW we could 10561 * increase the overall cache use), we need some less-loaded LLC 10562 * domain to pull some load. Likewise, we may need to spread 10563 * load within the current LLC domain (e.g. packed SMT cores but 10564 * other CPUs are idle). We can't really know from here how busy 10565 * the others are - so just get a nohz balance going if it looks 10566 * like this LLC domain has tasks we could move. 10567 */ 10568 nr_busy = atomic_read(&sds->nr_busy_cpus); 10569 if (nr_busy > 1) { 10570 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10571 goto unlock; 10572 } 10573 } 10574 unlock: 10575 rcu_read_unlock(); 10576 out: 10577 if (READ_ONCE(nohz.needs_update)) 10578 flags |= NOHZ_NEXT_KICK; 10579 10580 if (flags) 10581 kick_ilb(flags); 10582 } 10583 10584 static void set_cpu_sd_state_busy(int cpu) 10585 { 10586 struct sched_domain *sd; 10587 10588 rcu_read_lock(); 10589 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 10590 10591 if (!sd || !sd->nohz_idle) 10592 goto unlock; 10593 sd->nohz_idle = 0; 10594 10595 atomic_inc(&sd->shared->nr_busy_cpus); 10596 unlock: 10597 rcu_read_unlock(); 10598 } 10599 10600 void nohz_balance_exit_idle(struct rq *rq) 10601 { 10602 SCHED_WARN_ON(rq != this_rq()); 10603 10604 if (likely(!rq->nohz_tick_stopped)) 10605 return; 10606 10607 rq->nohz_tick_stopped = 0; 10608 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); 10609 atomic_dec(&nohz.nr_cpus); 10610 10611 set_cpu_sd_state_busy(rq->cpu); 10612 } 10613 10614 static void set_cpu_sd_state_idle(int cpu) 10615 { 10616 struct sched_domain *sd; 10617 10618 rcu_read_lock(); 10619 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 10620 10621 if (!sd || sd->nohz_idle) 10622 goto unlock; 10623 sd->nohz_idle = 1; 10624 10625 atomic_dec(&sd->shared->nr_busy_cpus); 10626 unlock: 10627 rcu_read_unlock(); 10628 } 10629 10630 /* 10631 * This routine will record that the CPU is going idle with tick stopped. 10632 * This info will be used in performing idle load balancing in the future. 10633 */ 10634 void nohz_balance_enter_idle(int cpu) 10635 { 10636 struct rq *rq = cpu_rq(cpu); 10637 10638 SCHED_WARN_ON(cpu != smp_processor_id()); 10639 10640 /* If this CPU is going down, then nothing needs to be done: */ 10641 if (!cpu_active(cpu)) 10642 return; 10643 10644 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 10645 if (!housekeeping_cpu(cpu, HK_TYPE_SCHED)) 10646 return; 10647 10648 /* 10649 * Can be set safely without rq->lock held 10650 * If a clear happens, it will have evaluated last additions because 10651 * rq->lock is held during the check and the clear 10652 */ 10653 rq->has_blocked_load = 1; 10654 10655 /* 10656 * The tick is still stopped but load could have been added in the 10657 * meantime. We set the nohz.has_blocked flag to trig a check of the 10658 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear 10659 * of nohz.has_blocked can only happen after checking the new load 10660 */ 10661 if (rq->nohz_tick_stopped) 10662 goto out; 10663 10664 /* If we're a completely isolated CPU, we don't play: */ 10665 if (on_null_domain(rq)) 10666 return; 10667 10668 rq->nohz_tick_stopped = 1; 10669 10670 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 10671 atomic_inc(&nohz.nr_cpus); 10672 10673 /* 10674 * Ensures that if nohz_idle_balance() fails to observe our 10675 * @idle_cpus_mask store, it must observe the @has_blocked 10676 * and @needs_update stores. 10677 */ 10678 smp_mb__after_atomic(); 10679 10680 set_cpu_sd_state_idle(cpu); 10681 10682 WRITE_ONCE(nohz.needs_update, 1); 10683 out: 10684 /* 10685 * Each time a cpu enter idle, we assume that it has blocked load and 10686 * enable the periodic update of the load of idle cpus 10687 */ 10688 WRITE_ONCE(nohz.has_blocked, 1); 10689 } 10690 10691 static bool update_nohz_stats(struct rq *rq) 10692 { 10693 unsigned int cpu = rq->cpu; 10694 10695 if (!rq->has_blocked_load) 10696 return false; 10697 10698 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) 10699 return false; 10700 10701 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) 10702 return true; 10703 10704 update_blocked_averages(cpu); 10705 10706 return rq->has_blocked_load; 10707 } 10708 10709 /* 10710 * Internal function that runs load balance for all idle cpus. The load balance 10711 * can be a simple update of blocked load or a complete load balance with 10712 * tasks movement depending of flags. 10713 */ 10714 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, 10715 enum cpu_idle_type idle) 10716 { 10717 /* Earliest time when we have to do rebalance again */ 10718 unsigned long now = jiffies; 10719 unsigned long next_balance = now + 60*HZ; 10720 bool has_blocked_load = false; 10721 int update_next_balance = 0; 10722 int this_cpu = this_rq->cpu; 10723 int balance_cpu; 10724 struct rq *rq; 10725 10726 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 10727 10728 /* 10729 * We assume there will be no idle load after this update and clear 10730 * the has_blocked flag. If a cpu enters idle in the mean time, it will 10731 * set the has_blocked flag and trigger another update of idle load. 10732 * Because a cpu that becomes idle, is added to idle_cpus_mask before 10733 * setting the flag, we are sure to not clear the state and not 10734 * check the load of an idle cpu. 10735 * 10736 * Same applies to idle_cpus_mask vs needs_update. 10737 */ 10738 if (flags & NOHZ_STATS_KICK) 10739 WRITE_ONCE(nohz.has_blocked, 0); 10740 if (flags & NOHZ_NEXT_KICK) 10741 WRITE_ONCE(nohz.needs_update, 0); 10742 10743 /* 10744 * Ensures that if we miss the CPU, we must see the has_blocked 10745 * store from nohz_balance_enter_idle(). 10746 */ 10747 smp_mb(); 10748 10749 /* 10750 * Start with the next CPU after this_cpu so we will end with this_cpu and let a 10751 * chance for other idle cpu to pull load. 10752 */ 10753 for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) { 10754 if (!idle_cpu(balance_cpu)) 10755 continue; 10756 10757 /* 10758 * If this CPU gets work to do, stop the load balancing 10759 * work being done for other CPUs. Next load 10760 * balancing owner will pick it up. 10761 */ 10762 if (need_resched()) { 10763 if (flags & NOHZ_STATS_KICK) 10764 has_blocked_load = true; 10765 if (flags & NOHZ_NEXT_KICK) 10766 WRITE_ONCE(nohz.needs_update, 1); 10767 goto abort; 10768 } 10769 10770 rq = cpu_rq(balance_cpu); 10771 10772 if (flags & NOHZ_STATS_KICK) 10773 has_blocked_load |= update_nohz_stats(rq); 10774 10775 /* 10776 * If time for next balance is due, 10777 * do the balance. 10778 */ 10779 if (time_after_eq(jiffies, rq->next_balance)) { 10780 struct rq_flags rf; 10781 10782 rq_lock_irqsave(rq, &rf); 10783 update_rq_clock(rq); 10784 rq_unlock_irqrestore(rq, &rf); 10785 10786 if (flags & NOHZ_BALANCE_KICK) 10787 rebalance_domains(rq, CPU_IDLE); 10788 } 10789 10790 if (time_after(next_balance, rq->next_balance)) { 10791 next_balance = rq->next_balance; 10792 update_next_balance = 1; 10793 } 10794 } 10795 10796 /* 10797 * next_balance will be updated only when there is a need. 10798 * When the CPU is attached to null domain for ex, it will not be 10799 * updated. 10800 */ 10801 if (likely(update_next_balance)) 10802 nohz.next_balance = next_balance; 10803 10804 if (flags & NOHZ_STATS_KICK) 10805 WRITE_ONCE(nohz.next_blocked, 10806 now + msecs_to_jiffies(LOAD_AVG_PERIOD)); 10807 10808 abort: 10809 /* There is still blocked load, enable periodic update */ 10810 if (has_blocked_load) 10811 WRITE_ONCE(nohz.has_blocked, 1); 10812 } 10813 10814 /* 10815 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 10816 * rebalancing for all the cpus for whom scheduler ticks are stopped. 10817 */ 10818 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 10819 { 10820 unsigned int flags = this_rq->nohz_idle_balance; 10821 10822 if (!flags) 10823 return false; 10824 10825 this_rq->nohz_idle_balance = 0; 10826 10827 if (idle != CPU_IDLE) 10828 return false; 10829 10830 _nohz_idle_balance(this_rq, flags, idle); 10831 10832 return true; 10833 } 10834 10835 /* 10836 * Check if we need to run the ILB for updating blocked load before entering 10837 * idle state. 10838 */ 10839 void nohz_run_idle_balance(int cpu) 10840 { 10841 unsigned int flags; 10842 10843 flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu)); 10844 10845 /* 10846 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen 10847 * (ie NOHZ_STATS_KICK set) and will do the same. 10848 */ 10849 if ((flags == NOHZ_NEWILB_KICK) && !need_resched()) 10850 _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE); 10851 } 10852 10853 static void nohz_newidle_balance(struct rq *this_rq) 10854 { 10855 int this_cpu = this_rq->cpu; 10856 10857 /* 10858 * This CPU doesn't want to be disturbed by scheduler 10859 * housekeeping 10860 */ 10861 if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED)) 10862 return; 10863 10864 /* Will wake up very soon. No time for doing anything else*/ 10865 if (this_rq->avg_idle < sysctl_sched_migration_cost) 10866 return; 10867 10868 /* Don't need to update blocked load of idle CPUs*/ 10869 if (!READ_ONCE(nohz.has_blocked) || 10870 time_before(jiffies, READ_ONCE(nohz.next_blocked))) 10871 return; 10872 10873 /* 10874 * Set the need to trigger ILB in order to update blocked load 10875 * before entering idle state. 10876 */ 10877 atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu)); 10878 } 10879 10880 #else /* !CONFIG_NO_HZ_COMMON */ 10881 static inline void nohz_balancer_kick(struct rq *rq) { } 10882 10883 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 10884 { 10885 return false; 10886 } 10887 10888 static inline void nohz_newidle_balance(struct rq *this_rq) { } 10889 #endif /* CONFIG_NO_HZ_COMMON */ 10890 10891 /* 10892 * newidle_balance is called by schedule() if this_cpu is about to become 10893 * idle. Attempts to pull tasks from other CPUs. 10894 * 10895 * Returns: 10896 * < 0 - we released the lock and there are !fair tasks present 10897 * 0 - failed, no new tasks 10898 * > 0 - success, new (fair) tasks present 10899 */ 10900 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) 10901 { 10902 unsigned long next_balance = jiffies + HZ; 10903 int this_cpu = this_rq->cpu; 10904 u64 t0, t1, curr_cost = 0; 10905 struct sched_domain *sd; 10906 int pulled_task = 0; 10907 10908 update_misfit_status(NULL, this_rq); 10909 10910 /* 10911 * There is a task waiting to run. No need to search for one. 10912 * Return 0; the task will be enqueued when switching to idle. 10913 */ 10914 if (this_rq->ttwu_pending) 10915 return 0; 10916 10917 /* 10918 * We must set idle_stamp _before_ calling idle_balance(), such that we 10919 * measure the duration of idle_balance() as idle time. 10920 */ 10921 this_rq->idle_stamp = rq_clock(this_rq); 10922 10923 /* 10924 * Do not pull tasks towards !active CPUs... 10925 */ 10926 if (!cpu_active(this_cpu)) 10927 return 0; 10928 10929 /* 10930 * This is OK, because current is on_cpu, which avoids it being picked 10931 * for load-balance and preemption/IRQs are still disabled avoiding 10932 * further scheduler activity on it and we're being very careful to 10933 * re-start the picking loop. 10934 */ 10935 rq_unpin_lock(this_rq, rf); 10936 10937 rcu_read_lock(); 10938 sd = rcu_dereference_check_sched_domain(this_rq->sd); 10939 10940 if (!READ_ONCE(this_rq->rd->overload) || 10941 (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) { 10942 10943 if (sd) 10944 update_next_balance(sd, &next_balance); 10945 rcu_read_unlock(); 10946 10947 goto out; 10948 } 10949 rcu_read_unlock(); 10950 10951 raw_spin_rq_unlock(this_rq); 10952 10953 t0 = sched_clock_cpu(this_cpu); 10954 update_blocked_averages(this_cpu); 10955 10956 rcu_read_lock(); 10957 for_each_domain(this_cpu, sd) { 10958 int continue_balancing = 1; 10959 u64 domain_cost; 10960 10961 update_next_balance(sd, &next_balance); 10962 10963 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) 10964 break; 10965 10966 if (sd->flags & SD_BALANCE_NEWIDLE) { 10967 10968 pulled_task = load_balance(this_cpu, this_rq, 10969 sd, CPU_NEWLY_IDLE, 10970 &continue_balancing); 10971 10972 t1 = sched_clock_cpu(this_cpu); 10973 domain_cost = t1 - t0; 10974 update_newidle_cost(sd, domain_cost); 10975 10976 curr_cost += domain_cost; 10977 t0 = t1; 10978 } 10979 10980 /* 10981 * Stop searching for tasks to pull if there are 10982 * now runnable tasks on this rq. 10983 */ 10984 if (pulled_task || this_rq->nr_running > 0 || 10985 this_rq->ttwu_pending) 10986 break; 10987 } 10988 rcu_read_unlock(); 10989 10990 raw_spin_rq_lock(this_rq); 10991 10992 if (curr_cost > this_rq->max_idle_balance_cost) 10993 this_rq->max_idle_balance_cost = curr_cost; 10994 10995 /* 10996 * While browsing the domains, we released the rq lock, a task could 10997 * have been enqueued in the meantime. Since we're not going idle, 10998 * pretend we pulled a task. 10999 */ 11000 if (this_rq->cfs.h_nr_running && !pulled_task) 11001 pulled_task = 1; 11002 11003 /* Is there a task of a high priority class? */ 11004 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 11005 pulled_task = -1; 11006 11007 out: 11008 /* Move the next balance forward */ 11009 if (time_after(this_rq->next_balance, next_balance)) 11010 this_rq->next_balance = next_balance; 11011 11012 if (pulled_task) 11013 this_rq->idle_stamp = 0; 11014 else 11015 nohz_newidle_balance(this_rq); 11016 11017 rq_repin_lock(this_rq, rf); 11018 11019 return pulled_task; 11020 } 11021 11022 /* 11023 * run_rebalance_domains is triggered when needed from the scheduler tick. 11024 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 11025 */ 11026 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 11027 { 11028 struct rq *this_rq = this_rq(); 11029 enum cpu_idle_type idle = this_rq->idle_balance ? 11030 CPU_IDLE : CPU_NOT_IDLE; 11031 11032 /* 11033 * If this CPU has a pending nohz_balance_kick, then do the 11034 * balancing on behalf of the other idle CPUs whose ticks are 11035 * stopped. Do nohz_idle_balance *before* rebalance_domains to 11036 * give the idle CPUs a chance to load balance. Else we may 11037 * load balance only within the local sched_domain hierarchy 11038 * and abort nohz_idle_balance altogether if we pull some load. 11039 */ 11040 if (nohz_idle_balance(this_rq, idle)) 11041 return; 11042 11043 /* normal load balance */ 11044 update_blocked_averages(this_rq->cpu); 11045 rebalance_domains(this_rq, idle); 11046 } 11047 11048 /* 11049 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 11050 */ 11051 void trigger_load_balance(struct rq *rq) 11052 { 11053 /* 11054 * Don't need to rebalance while attached to NULL domain or 11055 * runqueue CPU is not active 11056 */ 11057 if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq)))) 11058 return; 11059 11060 if (time_after_eq(jiffies, rq->next_balance)) 11061 raise_softirq(SCHED_SOFTIRQ); 11062 11063 nohz_balancer_kick(rq); 11064 } 11065 11066 static void rq_online_fair(struct rq *rq) 11067 { 11068 update_sysctl(); 11069 11070 update_runtime_enabled(rq); 11071 } 11072 11073 static void rq_offline_fair(struct rq *rq) 11074 { 11075 update_sysctl(); 11076 11077 /* Ensure any throttled groups are reachable by pick_next_task */ 11078 unthrottle_offline_cfs_rqs(rq); 11079 } 11080 11081 #endif /* CONFIG_SMP */ 11082 11083 #ifdef CONFIG_SCHED_CORE 11084 static inline bool 11085 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) 11086 { 11087 u64 slice = sched_slice(cfs_rq_of(se), se); 11088 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; 11089 11090 return (rtime * min_nr_tasks > slice); 11091 } 11092 11093 #define MIN_NR_TASKS_DURING_FORCEIDLE 2 11094 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) 11095 { 11096 if (!sched_core_enabled(rq)) 11097 return; 11098 11099 /* 11100 * If runqueue has only one task which used up its slice and 11101 * if the sibling is forced idle, then trigger schedule to 11102 * give forced idle task a chance. 11103 * 11104 * sched_slice() considers only this active rq and it gets the 11105 * whole slice. But during force idle, we have siblings acting 11106 * like a single runqueue and hence we need to consider runnable 11107 * tasks on this CPU and the forced idle CPU. Ideally, we should 11108 * go through the forced idle rq, but that would be a perf hit. 11109 * We can assume that the forced idle CPU has at least 11110 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check 11111 * if we need to give up the CPU. 11112 */ 11113 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && 11114 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) 11115 resched_curr(rq); 11116 } 11117 11118 /* 11119 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed. 11120 */ 11121 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) 11122 { 11123 for_each_sched_entity(se) { 11124 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11125 11126 if (forceidle) { 11127 if (cfs_rq->forceidle_seq == fi_seq) 11128 break; 11129 cfs_rq->forceidle_seq = fi_seq; 11130 } 11131 11132 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; 11133 } 11134 } 11135 11136 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) 11137 { 11138 struct sched_entity *se = &p->se; 11139 11140 if (p->sched_class != &fair_sched_class) 11141 return; 11142 11143 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); 11144 } 11145 11146 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) 11147 { 11148 struct rq *rq = task_rq(a); 11149 struct sched_entity *sea = &a->se; 11150 struct sched_entity *seb = &b->se; 11151 struct cfs_rq *cfs_rqa; 11152 struct cfs_rq *cfs_rqb; 11153 s64 delta; 11154 11155 SCHED_WARN_ON(task_rq(b)->core != rq->core); 11156 11157 #ifdef CONFIG_FAIR_GROUP_SCHED 11158 /* 11159 * Find an se in the hierarchy for tasks a and b, such that the se's 11160 * are immediate siblings. 11161 */ 11162 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { 11163 int sea_depth = sea->depth; 11164 int seb_depth = seb->depth; 11165 11166 if (sea_depth >= seb_depth) 11167 sea = parent_entity(sea); 11168 if (sea_depth <= seb_depth) 11169 seb = parent_entity(seb); 11170 } 11171 11172 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi); 11173 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi); 11174 11175 cfs_rqa = sea->cfs_rq; 11176 cfs_rqb = seb->cfs_rq; 11177 #else 11178 cfs_rqa = &task_rq(a)->cfs; 11179 cfs_rqb = &task_rq(b)->cfs; 11180 #endif 11181 11182 /* 11183 * Find delta after normalizing se's vruntime with its cfs_rq's 11184 * min_vruntime_fi, which would have been updated in prior calls 11185 * to se_fi_update(). 11186 */ 11187 delta = (s64)(sea->vruntime - seb->vruntime) + 11188 (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi); 11189 11190 return delta > 0; 11191 } 11192 #else 11193 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} 11194 #endif 11195 11196 /* 11197 * scheduler tick hitting a task of our scheduling class. 11198 * 11199 * NOTE: This function can be called remotely by the tick offload that 11200 * goes along full dynticks. Therefore no local assumption can be made 11201 * and everything must be accessed through the @rq and @curr passed in 11202 * parameters. 11203 */ 11204 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 11205 { 11206 struct cfs_rq *cfs_rq; 11207 struct sched_entity *se = &curr->se; 11208 11209 for_each_sched_entity(se) { 11210 cfs_rq = cfs_rq_of(se); 11211 entity_tick(cfs_rq, se, queued); 11212 } 11213 11214 if (static_branch_unlikely(&sched_numa_balancing)) 11215 task_tick_numa(rq, curr); 11216 11217 update_misfit_status(curr, rq); 11218 update_overutilized_status(task_rq(curr)); 11219 11220 task_tick_core(rq, curr); 11221 } 11222 11223 /* 11224 * called on fork with the child task as argument from the parent's context 11225 * - child not yet on the tasklist 11226 * - preemption disabled 11227 */ 11228 static void task_fork_fair(struct task_struct *p) 11229 { 11230 struct cfs_rq *cfs_rq; 11231 struct sched_entity *se = &p->se, *curr; 11232 struct rq *rq = this_rq(); 11233 struct rq_flags rf; 11234 11235 rq_lock(rq, &rf); 11236 update_rq_clock(rq); 11237 11238 cfs_rq = task_cfs_rq(current); 11239 curr = cfs_rq->curr; 11240 if (curr) { 11241 update_curr(cfs_rq); 11242 se->vruntime = curr->vruntime; 11243 } 11244 place_entity(cfs_rq, se, 1); 11245 11246 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 11247 /* 11248 * Upon rescheduling, sched_class::put_prev_task() will place 11249 * 'current' within the tree based on its new key value. 11250 */ 11251 swap(curr->vruntime, se->vruntime); 11252 resched_curr(rq); 11253 } 11254 11255 se->vruntime -= cfs_rq->min_vruntime; 11256 rq_unlock(rq, &rf); 11257 } 11258 11259 /* 11260 * Priority of the task has changed. Check to see if we preempt 11261 * the current task. 11262 */ 11263 static void 11264 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 11265 { 11266 if (!task_on_rq_queued(p)) 11267 return; 11268 11269 if (rq->cfs.nr_running == 1) 11270 return; 11271 11272 /* 11273 * Reschedule if we are currently running on this runqueue and 11274 * our priority decreased, or if we are not currently running on 11275 * this runqueue and our priority is higher than the current's 11276 */ 11277 if (task_current(rq, p)) { 11278 if (p->prio > oldprio) 11279 resched_curr(rq); 11280 } else 11281 check_preempt_curr(rq, p, 0); 11282 } 11283 11284 static inline bool vruntime_normalized(struct task_struct *p) 11285 { 11286 struct sched_entity *se = &p->se; 11287 11288 /* 11289 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 11290 * the dequeue_entity(.flags=0) will already have normalized the 11291 * vruntime. 11292 */ 11293 if (p->on_rq) 11294 return true; 11295 11296 /* 11297 * When !on_rq, vruntime of the task has usually NOT been normalized. 11298 * But there are some cases where it has already been normalized: 11299 * 11300 * - A forked child which is waiting for being woken up by 11301 * wake_up_new_task(). 11302 * - A task which has been woken up by try_to_wake_up() and 11303 * waiting for actually being woken up by sched_ttwu_pending(). 11304 */ 11305 if (!se->sum_exec_runtime || 11306 (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup)) 11307 return true; 11308 11309 return false; 11310 } 11311 11312 #ifdef CONFIG_FAIR_GROUP_SCHED 11313 /* 11314 * Propagate the changes of the sched_entity across the tg tree to make it 11315 * visible to the root 11316 */ 11317 static void propagate_entity_cfs_rq(struct sched_entity *se) 11318 { 11319 struct cfs_rq *cfs_rq; 11320 11321 list_add_leaf_cfs_rq(cfs_rq_of(se)); 11322 11323 /* Start to propagate at parent */ 11324 se = se->parent; 11325 11326 for_each_sched_entity(se) { 11327 cfs_rq = cfs_rq_of(se); 11328 11329 if (!cfs_rq_throttled(cfs_rq)){ 11330 update_load_avg(cfs_rq, se, UPDATE_TG); 11331 list_add_leaf_cfs_rq(cfs_rq); 11332 continue; 11333 } 11334 11335 if (list_add_leaf_cfs_rq(cfs_rq)) 11336 break; 11337 } 11338 } 11339 #else 11340 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 11341 #endif 11342 11343 static void detach_entity_cfs_rq(struct sched_entity *se) 11344 { 11345 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11346 11347 /* Catch up with the cfs_rq and remove our load when we leave */ 11348 update_load_avg(cfs_rq, se, 0); 11349 detach_entity_load_avg(cfs_rq, se); 11350 update_tg_load_avg(cfs_rq); 11351 propagate_entity_cfs_rq(se); 11352 } 11353 11354 static void attach_entity_cfs_rq(struct sched_entity *se) 11355 { 11356 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11357 11358 #ifdef CONFIG_FAIR_GROUP_SCHED 11359 /* 11360 * Since the real-depth could have been changed (only FAIR 11361 * class maintain depth value), reset depth properly. 11362 */ 11363 se->depth = se->parent ? se->parent->depth + 1 : 0; 11364 #endif 11365 11366 /* Synchronize entity with its cfs_rq */ 11367 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 11368 attach_entity_load_avg(cfs_rq, se); 11369 update_tg_load_avg(cfs_rq); 11370 propagate_entity_cfs_rq(se); 11371 } 11372 11373 static void detach_task_cfs_rq(struct task_struct *p) 11374 { 11375 struct sched_entity *se = &p->se; 11376 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11377 11378 if (!vruntime_normalized(p)) { 11379 /* 11380 * Fix up our vruntime so that the current sleep doesn't 11381 * cause 'unlimited' sleep bonus. 11382 */ 11383 place_entity(cfs_rq, se, 0); 11384 se->vruntime -= cfs_rq->min_vruntime; 11385 } 11386 11387 detach_entity_cfs_rq(se); 11388 } 11389 11390 static void attach_task_cfs_rq(struct task_struct *p) 11391 { 11392 struct sched_entity *se = &p->se; 11393 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11394 11395 attach_entity_cfs_rq(se); 11396 11397 if (!vruntime_normalized(p)) 11398 se->vruntime += cfs_rq->min_vruntime; 11399 } 11400 11401 static void switched_from_fair(struct rq *rq, struct task_struct *p) 11402 { 11403 detach_task_cfs_rq(p); 11404 } 11405 11406 static void switched_to_fair(struct rq *rq, struct task_struct *p) 11407 { 11408 attach_task_cfs_rq(p); 11409 11410 if (task_on_rq_queued(p)) { 11411 /* 11412 * We were most likely switched from sched_rt, so 11413 * kick off the schedule if running, otherwise just see 11414 * if we can still preempt the current task. 11415 */ 11416 if (task_current(rq, p)) 11417 resched_curr(rq); 11418 else 11419 check_preempt_curr(rq, p, 0); 11420 } 11421 } 11422 11423 /* Account for a task changing its policy or group. 11424 * 11425 * This routine is mostly called to set cfs_rq->curr field when a task 11426 * migrates between groups/classes. 11427 */ 11428 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) 11429 { 11430 struct sched_entity *se = &p->se; 11431 11432 #ifdef CONFIG_SMP 11433 if (task_on_rq_queued(p)) { 11434 /* 11435 * Move the next running task to the front of the list, so our 11436 * cfs_tasks list becomes MRU one. 11437 */ 11438 list_move(&se->group_node, &rq->cfs_tasks); 11439 } 11440 #endif 11441 11442 for_each_sched_entity(se) { 11443 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11444 11445 set_next_entity(cfs_rq, se); 11446 /* ensure bandwidth has been allocated on our new cfs_rq */ 11447 account_cfs_rq_runtime(cfs_rq, 0); 11448 } 11449 } 11450 11451 void init_cfs_rq(struct cfs_rq *cfs_rq) 11452 { 11453 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 11454 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 11455 #ifndef CONFIG_64BIT 11456 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 11457 #endif 11458 #ifdef CONFIG_SMP 11459 raw_spin_lock_init(&cfs_rq->removed.lock); 11460 #endif 11461 } 11462 11463 #ifdef CONFIG_FAIR_GROUP_SCHED 11464 static void task_set_group_fair(struct task_struct *p) 11465 { 11466 struct sched_entity *se = &p->se; 11467 11468 set_task_rq(p, task_cpu(p)); 11469 se->depth = se->parent ? se->parent->depth + 1 : 0; 11470 } 11471 11472 static void task_move_group_fair(struct task_struct *p) 11473 { 11474 detach_task_cfs_rq(p); 11475 set_task_rq(p, task_cpu(p)); 11476 11477 #ifdef CONFIG_SMP 11478 /* Tell se's cfs_rq has been changed -- migrated */ 11479 p->se.avg.last_update_time = 0; 11480 #endif 11481 attach_task_cfs_rq(p); 11482 } 11483 11484 static void task_change_group_fair(struct task_struct *p, int type) 11485 { 11486 switch (type) { 11487 case TASK_SET_GROUP: 11488 task_set_group_fair(p); 11489 break; 11490 11491 case TASK_MOVE_GROUP: 11492 task_move_group_fair(p); 11493 break; 11494 } 11495 } 11496 11497 void free_fair_sched_group(struct task_group *tg) 11498 { 11499 int i; 11500 11501 for_each_possible_cpu(i) { 11502 if (tg->cfs_rq) 11503 kfree(tg->cfs_rq[i]); 11504 if (tg->se) 11505 kfree(tg->se[i]); 11506 } 11507 11508 kfree(tg->cfs_rq); 11509 kfree(tg->se); 11510 } 11511 11512 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 11513 { 11514 struct sched_entity *se; 11515 struct cfs_rq *cfs_rq; 11516 int i; 11517 11518 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); 11519 if (!tg->cfs_rq) 11520 goto err; 11521 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); 11522 if (!tg->se) 11523 goto err; 11524 11525 tg->shares = NICE_0_LOAD; 11526 11527 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 11528 11529 for_each_possible_cpu(i) { 11530 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 11531 GFP_KERNEL, cpu_to_node(i)); 11532 if (!cfs_rq) 11533 goto err; 11534 11535 se = kzalloc_node(sizeof(struct sched_entity_stats), 11536 GFP_KERNEL, cpu_to_node(i)); 11537 if (!se) 11538 goto err_free_rq; 11539 11540 init_cfs_rq(cfs_rq); 11541 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 11542 init_entity_runnable_average(se); 11543 } 11544 11545 return 1; 11546 11547 err_free_rq: 11548 kfree(cfs_rq); 11549 err: 11550 return 0; 11551 } 11552 11553 void online_fair_sched_group(struct task_group *tg) 11554 { 11555 struct sched_entity *se; 11556 struct rq_flags rf; 11557 struct rq *rq; 11558 int i; 11559 11560 for_each_possible_cpu(i) { 11561 rq = cpu_rq(i); 11562 se = tg->se[i]; 11563 rq_lock_irq(rq, &rf); 11564 update_rq_clock(rq); 11565 attach_entity_cfs_rq(se); 11566 sync_throttle(tg, i); 11567 rq_unlock_irq(rq, &rf); 11568 } 11569 } 11570 11571 void unregister_fair_sched_group(struct task_group *tg) 11572 { 11573 unsigned long flags; 11574 struct rq *rq; 11575 int cpu; 11576 11577 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 11578 11579 for_each_possible_cpu(cpu) { 11580 if (tg->se[cpu]) 11581 remove_entity_load_avg(tg->se[cpu]); 11582 11583 /* 11584 * Only empty task groups can be destroyed; so we can speculatively 11585 * check on_list without danger of it being re-added. 11586 */ 11587 if (!tg->cfs_rq[cpu]->on_list) 11588 continue; 11589 11590 rq = cpu_rq(cpu); 11591 11592 raw_spin_rq_lock_irqsave(rq, flags); 11593 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 11594 raw_spin_rq_unlock_irqrestore(rq, flags); 11595 } 11596 } 11597 11598 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 11599 struct sched_entity *se, int cpu, 11600 struct sched_entity *parent) 11601 { 11602 struct rq *rq = cpu_rq(cpu); 11603 11604 cfs_rq->tg = tg; 11605 cfs_rq->rq = rq; 11606 init_cfs_rq_runtime(cfs_rq); 11607 11608 tg->cfs_rq[cpu] = cfs_rq; 11609 tg->se[cpu] = se; 11610 11611 /* se could be NULL for root_task_group */ 11612 if (!se) 11613 return; 11614 11615 if (!parent) { 11616 se->cfs_rq = &rq->cfs; 11617 se->depth = 0; 11618 } else { 11619 se->cfs_rq = parent->my_q; 11620 se->depth = parent->depth + 1; 11621 } 11622 11623 se->my_q = cfs_rq; 11624 /* guarantee group entities always have weight */ 11625 update_load_set(&se->load, NICE_0_LOAD); 11626 se->parent = parent; 11627 } 11628 11629 static DEFINE_MUTEX(shares_mutex); 11630 11631 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares) 11632 { 11633 int i; 11634 11635 lockdep_assert_held(&shares_mutex); 11636 11637 /* 11638 * We can't change the weight of the root cgroup. 11639 */ 11640 if (!tg->se[0]) 11641 return -EINVAL; 11642 11643 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 11644 11645 if (tg->shares == shares) 11646 return 0; 11647 11648 tg->shares = shares; 11649 for_each_possible_cpu(i) { 11650 struct rq *rq = cpu_rq(i); 11651 struct sched_entity *se = tg->se[i]; 11652 struct rq_flags rf; 11653 11654 /* Propagate contribution to hierarchy */ 11655 rq_lock_irqsave(rq, &rf); 11656 update_rq_clock(rq); 11657 for_each_sched_entity(se) { 11658 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 11659 update_cfs_group(se); 11660 } 11661 rq_unlock_irqrestore(rq, &rf); 11662 } 11663 11664 return 0; 11665 } 11666 11667 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 11668 { 11669 int ret; 11670 11671 mutex_lock(&shares_mutex); 11672 if (tg_is_idle(tg)) 11673 ret = -EINVAL; 11674 else 11675 ret = __sched_group_set_shares(tg, shares); 11676 mutex_unlock(&shares_mutex); 11677 11678 return ret; 11679 } 11680 11681 int sched_group_set_idle(struct task_group *tg, long idle) 11682 { 11683 int i; 11684 11685 if (tg == &root_task_group) 11686 return -EINVAL; 11687 11688 if (idle < 0 || idle > 1) 11689 return -EINVAL; 11690 11691 mutex_lock(&shares_mutex); 11692 11693 if (tg->idle == idle) { 11694 mutex_unlock(&shares_mutex); 11695 return 0; 11696 } 11697 11698 tg->idle = idle; 11699 11700 for_each_possible_cpu(i) { 11701 struct rq *rq = cpu_rq(i); 11702 struct sched_entity *se = tg->se[i]; 11703 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; 11704 bool was_idle = cfs_rq_is_idle(grp_cfs_rq); 11705 long idle_task_delta; 11706 struct rq_flags rf; 11707 11708 rq_lock_irqsave(rq, &rf); 11709 11710 grp_cfs_rq->idle = idle; 11711 if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq))) 11712 goto next_cpu; 11713 11714 if (se->on_rq) { 11715 parent_cfs_rq = cfs_rq_of(se); 11716 if (cfs_rq_is_idle(grp_cfs_rq)) 11717 parent_cfs_rq->idle_nr_running++; 11718 else 11719 parent_cfs_rq->idle_nr_running--; 11720 } 11721 11722 idle_task_delta = grp_cfs_rq->h_nr_running - 11723 grp_cfs_rq->idle_h_nr_running; 11724 if (!cfs_rq_is_idle(grp_cfs_rq)) 11725 idle_task_delta *= -1; 11726 11727 for_each_sched_entity(se) { 11728 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11729 11730 if (!se->on_rq) 11731 break; 11732 11733 cfs_rq->idle_h_nr_running += idle_task_delta; 11734 11735 /* Already accounted at parent level and above. */ 11736 if (cfs_rq_is_idle(cfs_rq)) 11737 break; 11738 } 11739 11740 next_cpu: 11741 rq_unlock_irqrestore(rq, &rf); 11742 } 11743 11744 /* Idle groups have minimum weight. */ 11745 if (tg_is_idle(tg)) 11746 __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO)); 11747 else 11748 __sched_group_set_shares(tg, NICE_0_LOAD); 11749 11750 mutex_unlock(&shares_mutex); 11751 return 0; 11752 } 11753 11754 #else /* CONFIG_FAIR_GROUP_SCHED */ 11755 11756 void free_fair_sched_group(struct task_group *tg) { } 11757 11758 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 11759 { 11760 return 1; 11761 } 11762 11763 void online_fair_sched_group(struct task_group *tg) { } 11764 11765 void unregister_fair_sched_group(struct task_group *tg) { } 11766 11767 #endif /* CONFIG_FAIR_GROUP_SCHED */ 11768 11769 11770 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 11771 { 11772 struct sched_entity *se = &task->se; 11773 unsigned int rr_interval = 0; 11774 11775 /* 11776 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 11777 * idle runqueue: 11778 */ 11779 if (rq->cfs.load.weight) 11780 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 11781 11782 return rr_interval; 11783 } 11784 11785 /* 11786 * All the scheduling class methods: 11787 */ 11788 DEFINE_SCHED_CLASS(fair) = { 11789 11790 .enqueue_task = enqueue_task_fair, 11791 .dequeue_task = dequeue_task_fair, 11792 .yield_task = yield_task_fair, 11793 .yield_to_task = yield_to_task_fair, 11794 11795 .check_preempt_curr = check_preempt_wakeup, 11796 11797 .pick_next_task = __pick_next_task_fair, 11798 .put_prev_task = put_prev_task_fair, 11799 .set_next_task = set_next_task_fair, 11800 11801 #ifdef CONFIG_SMP 11802 .balance = balance_fair, 11803 .pick_task = pick_task_fair, 11804 .select_task_rq = select_task_rq_fair, 11805 .migrate_task_rq = migrate_task_rq_fair, 11806 11807 .rq_online = rq_online_fair, 11808 .rq_offline = rq_offline_fair, 11809 11810 .task_dead = task_dead_fair, 11811 .set_cpus_allowed = set_cpus_allowed_common, 11812 #endif 11813 11814 .task_tick = task_tick_fair, 11815 .task_fork = task_fork_fair, 11816 11817 .prio_changed = prio_changed_fair, 11818 .switched_from = switched_from_fair, 11819 .switched_to = switched_to_fair, 11820 11821 .get_rr_interval = get_rr_interval_fair, 11822 11823 .update_curr = update_curr_fair, 11824 11825 #ifdef CONFIG_FAIR_GROUP_SCHED 11826 .task_change_group = task_change_group_fair, 11827 #endif 11828 11829 #ifdef CONFIG_UCLAMP_TASK 11830 .uclamp_enabled = 1, 11831 #endif 11832 }; 11833 11834 #ifdef CONFIG_SCHED_DEBUG 11835 void print_cfs_stats(struct seq_file *m, int cpu) 11836 { 11837 struct cfs_rq *cfs_rq, *pos; 11838 11839 rcu_read_lock(); 11840 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) 11841 print_cfs_rq(m, cpu, cfs_rq); 11842 rcu_read_unlock(); 11843 } 11844 11845 #ifdef CONFIG_NUMA_BALANCING 11846 void show_numa_stats(struct task_struct *p, struct seq_file *m) 11847 { 11848 int node; 11849 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 11850 struct numa_group *ng; 11851 11852 rcu_read_lock(); 11853 ng = rcu_dereference(p->numa_group); 11854 for_each_online_node(node) { 11855 if (p->numa_faults) { 11856 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 11857 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 11858 } 11859 if (ng) { 11860 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], 11861 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 11862 } 11863 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 11864 } 11865 rcu_read_unlock(); 11866 } 11867 #endif /* CONFIG_NUMA_BALANCING */ 11868 #endif /* CONFIG_SCHED_DEBUG */ 11869 11870 __init void init_sched_fair_class(void) 11871 { 11872 #ifdef CONFIG_SMP 11873 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 11874 11875 #ifdef CONFIG_NO_HZ_COMMON 11876 nohz.next_balance = jiffies; 11877 nohz.next_blocked = jiffies; 11878 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 11879 #endif 11880 #endif /* SMP */ 11881 11882 } 11883 11884 /* 11885 * Helper functions to facilitate extracting info from tracepoints. 11886 */ 11887 11888 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) 11889 { 11890 #ifdef CONFIG_SMP 11891 return cfs_rq ? &cfs_rq->avg : NULL; 11892 #else 11893 return NULL; 11894 #endif 11895 } 11896 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg); 11897 11898 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) 11899 { 11900 if (!cfs_rq) { 11901 if (str) 11902 strlcpy(str, "(null)", len); 11903 else 11904 return NULL; 11905 } 11906 11907 cfs_rq_tg_path(cfs_rq, str, len); 11908 return str; 11909 } 11910 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path); 11911 11912 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) 11913 { 11914 return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; 11915 } 11916 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu); 11917 11918 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq) 11919 { 11920 #ifdef CONFIG_SMP 11921 return rq ? &rq->avg_rt : NULL; 11922 #else 11923 return NULL; 11924 #endif 11925 } 11926 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt); 11927 11928 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq) 11929 { 11930 #ifdef CONFIG_SMP 11931 return rq ? &rq->avg_dl : NULL; 11932 #else 11933 return NULL; 11934 #endif 11935 } 11936 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl); 11937 11938 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq) 11939 { 11940 #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ) 11941 return rq ? &rq->avg_irq : NULL; 11942 #else 11943 return NULL; 11944 #endif 11945 } 11946 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq); 11947 11948 int sched_trace_rq_cpu(struct rq *rq) 11949 { 11950 return rq ? cpu_of(rq) : -1; 11951 } 11952 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu); 11953 11954 int sched_trace_rq_cpu_capacity(struct rq *rq) 11955 { 11956 return rq ? 11957 #ifdef CONFIG_SMP 11958 rq->cpu_capacity 11959 #else 11960 SCHED_CAPACITY_SCALE 11961 #endif 11962 : -1; 11963 } 11964 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity); 11965 11966 const struct cpumask *sched_trace_rd_span(struct root_domain *rd) 11967 { 11968 #ifdef CONFIG_SMP 11969 return rd ? rd->span : NULL; 11970 #else 11971 return NULL; 11972 #endif 11973 } 11974 EXPORT_SYMBOL_GPL(sched_trace_rd_span); 11975 11976 int sched_trace_rq_nr_running(struct rq *rq) 11977 { 11978 return rq ? rq->nr_running : -1; 11979 } 11980 EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running); 11981