1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 */ 23 #include <linux/energy_model.h> 24 #include <linux/mmap_lock.h> 25 #include <linux/hugetlb_inline.h> 26 #include <linux/jiffies.h> 27 #include <linux/mm_api.h> 28 #include <linux/highmem.h> 29 #include <linux/spinlock_api.h> 30 #include <linux/cpumask_api.h> 31 #include <linux/lockdep_api.h> 32 #include <linux/softirq.h> 33 #include <linux/refcount_api.h> 34 #include <linux/topology.h> 35 #include <linux/sched/clock.h> 36 #include <linux/sched/cond_resched.h> 37 #include <linux/sched/cputime.h> 38 #include <linux/sched/isolation.h> 39 #include <linux/sched/nohz.h> 40 41 #include <linux/cpuidle.h> 42 #include <linux/interrupt.h> 43 #include <linux/memory-tiers.h> 44 #include <linux/mempolicy.h> 45 #include <linux/mutex_api.h> 46 #include <linux/profile.h> 47 #include <linux/psi.h> 48 #include <linux/ratelimit.h> 49 #include <linux/task_work.h> 50 51 #include <asm/switch_to.h> 52 53 #include <linux/sched/cond_resched.h> 54 55 #include "sched.h" 56 #include "stats.h" 57 #include "autogroup.h" 58 59 /* 60 * Targeted preemption latency for CPU-bound tasks: 61 * 62 * NOTE: this latency value is not the same as the concept of 63 * 'timeslice length' - timeslices in CFS are of variable length 64 * and have no persistent notion like in traditional, time-slice 65 * based scheduling concepts. 66 * 67 * (to see the precise effective timeslice length of your workload, 68 * run vmstat and monitor the context-switches (cs) field) 69 * 70 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 71 */ 72 unsigned int sysctl_sched_latency = 6000000ULL; 73 static unsigned int normalized_sysctl_sched_latency = 6000000ULL; 74 75 /* 76 * The initial- and re-scaling of tunables is configurable 77 * 78 * Options are: 79 * 80 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 81 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 82 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 83 * 84 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 85 */ 86 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 87 88 /* 89 * Minimal preemption granularity for CPU-bound tasks: 90 * 91 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 92 */ 93 unsigned int sysctl_sched_min_granularity = 750000ULL; 94 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 95 96 /* 97 * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks. 98 * Applies only when SCHED_IDLE tasks compete with normal tasks. 99 * 100 * (default: 0.75 msec) 101 */ 102 unsigned int sysctl_sched_idle_min_granularity = 750000ULL; 103 104 /* 105 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 106 */ 107 static unsigned int sched_nr_latency = 8; 108 109 /* 110 * After fork, child runs first. If set to 0 (default) then 111 * parent will (try to) run first. 112 */ 113 unsigned int sysctl_sched_child_runs_first __read_mostly; 114 115 /* 116 * SCHED_OTHER wake-up granularity. 117 * 118 * This option delays the preemption effects of decoupled workloads 119 * and reduces their over-scheduling. Synchronous workloads will still 120 * have immediate wakeup/sleep latencies. 121 * 122 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 123 */ 124 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 125 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 126 127 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 128 129 int sched_thermal_decay_shift; 130 static int __init setup_sched_thermal_decay_shift(char *str) 131 { 132 int _shift = 0; 133 134 if (kstrtoint(str, 0, &_shift)) 135 pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n"); 136 137 sched_thermal_decay_shift = clamp(_shift, 0, 10); 138 return 1; 139 } 140 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); 141 142 #ifdef CONFIG_SMP 143 /* 144 * For asym packing, by default the lower numbered CPU has higher priority. 145 */ 146 int __weak arch_asym_cpu_priority(int cpu) 147 { 148 return -cpu; 149 } 150 151 /* 152 * The margin used when comparing utilization with CPU capacity. 153 * 154 * (default: ~20%) 155 */ 156 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) 157 158 /* 159 * The margin used when comparing CPU capacities. 160 * is 'cap1' noticeably greater than 'cap2' 161 * 162 * (default: ~5%) 163 */ 164 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078) 165 #endif 166 167 #ifdef CONFIG_CFS_BANDWIDTH 168 /* 169 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 170 * each time a cfs_rq requests quota. 171 * 172 * Note: in the case that the slice exceeds the runtime remaining (either due 173 * to consumption or the quota being specified to be smaller than the slice) 174 * we will always only issue the remaining available time. 175 * 176 * (default: 5 msec, units: microseconds) 177 */ 178 static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 179 #endif 180 181 #ifdef CONFIG_SYSCTL 182 static struct ctl_table sched_fair_sysctls[] = { 183 { 184 .procname = "sched_child_runs_first", 185 .data = &sysctl_sched_child_runs_first, 186 .maxlen = sizeof(unsigned int), 187 .mode = 0644, 188 .proc_handler = proc_dointvec, 189 }, 190 #ifdef CONFIG_CFS_BANDWIDTH 191 { 192 .procname = "sched_cfs_bandwidth_slice_us", 193 .data = &sysctl_sched_cfs_bandwidth_slice, 194 .maxlen = sizeof(unsigned int), 195 .mode = 0644, 196 .proc_handler = proc_dointvec_minmax, 197 .extra1 = SYSCTL_ONE, 198 }, 199 #endif 200 {} 201 }; 202 203 static int __init sched_fair_sysctl_init(void) 204 { 205 register_sysctl_init("kernel", sched_fair_sysctls); 206 return 0; 207 } 208 late_initcall(sched_fair_sysctl_init); 209 #endif 210 211 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 212 { 213 lw->weight += inc; 214 lw->inv_weight = 0; 215 } 216 217 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 218 { 219 lw->weight -= dec; 220 lw->inv_weight = 0; 221 } 222 223 static inline void update_load_set(struct load_weight *lw, unsigned long w) 224 { 225 lw->weight = w; 226 lw->inv_weight = 0; 227 } 228 229 /* 230 * Increase the granularity value when there are more CPUs, 231 * because with more CPUs the 'effective latency' as visible 232 * to users decreases. But the relationship is not linear, 233 * so pick a second-best guess by going with the log2 of the 234 * number of CPUs. 235 * 236 * This idea comes from the SD scheduler of Con Kolivas: 237 */ 238 static unsigned int get_update_sysctl_factor(void) 239 { 240 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 241 unsigned int factor; 242 243 switch (sysctl_sched_tunable_scaling) { 244 case SCHED_TUNABLESCALING_NONE: 245 factor = 1; 246 break; 247 case SCHED_TUNABLESCALING_LINEAR: 248 factor = cpus; 249 break; 250 case SCHED_TUNABLESCALING_LOG: 251 default: 252 factor = 1 + ilog2(cpus); 253 break; 254 } 255 256 return factor; 257 } 258 259 static void update_sysctl(void) 260 { 261 unsigned int factor = get_update_sysctl_factor(); 262 263 #define SET_SYSCTL(name) \ 264 (sysctl_##name = (factor) * normalized_sysctl_##name) 265 SET_SYSCTL(sched_min_granularity); 266 SET_SYSCTL(sched_latency); 267 SET_SYSCTL(sched_wakeup_granularity); 268 #undef SET_SYSCTL 269 } 270 271 void __init sched_init_granularity(void) 272 { 273 update_sysctl(); 274 } 275 276 #define WMULT_CONST (~0U) 277 #define WMULT_SHIFT 32 278 279 static void __update_inv_weight(struct load_weight *lw) 280 { 281 unsigned long w; 282 283 if (likely(lw->inv_weight)) 284 return; 285 286 w = scale_load_down(lw->weight); 287 288 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 289 lw->inv_weight = 1; 290 else if (unlikely(!w)) 291 lw->inv_weight = WMULT_CONST; 292 else 293 lw->inv_weight = WMULT_CONST / w; 294 } 295 296 /* 297 * delta_exec * weight / lw.weight 298 * OR 299 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 300 * 301 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 302 * we're guaranteed shift stays positive because inv_weight is guaranteed to 303 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 304 * 305 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 306 * weight/lw.weight <= 1, and therefore our shift will also be positive. 307 */ 308 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 309 { 310 u64 fact = scale_load_down(weight); 311 u32 fact_hi = (u32)(fact >> 32); 312 int shift = WMULT_SHIFT; 313 int fs; 314 315 __update_inv_weight(lw); 316 317 if (unlikely(fact_hi)) { 318 fs = fls(fact_hi); 319 shift -= fs; 320 fact >>= fs; 321 } 322 323 fact = mul_u32_u32(fact, lw->inv_weight); 324 325 fact_hi = (u32)(fact >> 32); 326 if (fact_hi) { 327 fs = fls(fact_hi); 328 shift -= fs; 329 fact >>= fs; 330 } 331 332 return mul_u64_u32_shr(delta_exec, fact, shift); 333 } 334 335 336 const struct sched_class fair_sched_class; 337 338 /************************************************************** 339 * CFS operations on generic schedulable entities: 340 */ 341 342 #ifdef CONFIG_FAIR_GROUP_SCHED 343 344 /* Walk up scheduling entities hierarchy */ 345 #define for_each_sched_entity(se) \ 346 for (; se; se = se->parent) 347 348 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 349 { 350 struct rq *rq = rq_of(cfs_rq); 351 int cpu = cpu_of(rq); 352 353 if (cfs_rq->on_list) 354 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; 355 356 cfs_rq->on_list = 1; 357 358 /* 359 * Ensure we either appear before our parent (if already 360 * enqueued) or force our parent to appear after us when it is 361 * enqueued. The fact that we always enqueue bottom-up 362 * reduces this to two cases and a special case for the root 363 * cfs_rq. Furthermore, it also means that we will always reset 364 * tmp_alone_branch either when the branch is connected 365 * to a tree or when we reach the top of the tree 366 */ 367 if (cfs_rq->tg->parent && 368 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 369 /* 370 * If parent is already on the list, we add the child 371 * just before. Thanks to circular linked property of 372 * the list, this means to put the child at the tail 373 * of the list that starts by parent. 374 */ 375 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 376 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 377 /* 378 * The branch is now connected to its tree so we can 379 * reset tmp_alone_branch to the beginning of the 380 * list. 381 */ 382 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 383 return true; 384 } 385 386 if (!cfs_rq->tg->parent) { 387 /* 388 * cfs rq without parent should be put 389 * at the tail of the list. 390 */ 391 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 392 &rq->leaf_cfs_rq_list); 393 /* 394 * We have reach the top of a tree so we can reset 395 * tmp_alone_branch to the beginning of the list. 396 */ 397 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 398 return true; 399 } 400 401 /* 402 * The parent has not already been added so we want to 403 * make sure that it will be put after us. 404 * tmp_alone_branch points to the begin of the branch 405 * where we will add parent. 406 */ 407 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); 408 /* 409 * update tmp_alone_branch to points to the new begin 410 * of the branch 411 */ 412 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 413 return false; 414 } 415 416 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 417 { 418 if (cfs_rq->on_list) { 419 struct rq *rq = rq_of(cfs_rq); 420 421 /* 422 * With cfs_rq being unthrottled/throttled during an enqueue, 423 * it can happen the tmp_alone_branch points the a leaf that 424 * we finally want to del. In this case, tmp_alone_branch moves 425 * to the prev element but it will point to rq->leaf_cfs_rq_list 426 * at the end of the enqueue. 427 */ 428 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) 429 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; 430 431 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 432 cfs_rq->on_list = 0; 433 } 434 } 435 436 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 437 { 438 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); 439 } 440 441 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 442 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 443 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ 444 leaf_cfs_rq_list) 445 446 /* Do the two (enqueued) entities belong to the same group ? */ 447 static inline struct cfs_rq * 448 is_same_group(struct sched_entity *se, struct sched_entity *pse) 449 { 450 if (se->cfs_rq == pse->cfs_rq) 451 return se->cfs_rq; 452 453 return NULL; 454 } 455 456 static inline struct sched_entity *parent_entity(struct sched_entity *se) 457 { 458 return se->parent; 459 } 460 461 static void 462 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 463 { 464 int se_depth, pse_depth; 465 466 /* 467 * preemption test can be made between sibling entities who are in the 468 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 469 * both tasks until we find their ancestors who are siblings of common 470 * parent. 471 */ 472 473 /* First walk up until both entities are at same depth */ 474 se_depth = (*se)->depth; 475 pse_depth = (*pse)->depth; 476 477 while (se_depth > pse_depth) { 478 se_depth--; 479 *se = parent_entity(*se); 480 } 481 482 while (pse_depth > se_depth) { 483 pse_depth--; 484 *pse = parent_entity(*pse); 485 } 486 487 while (!is_same_group(*se, *pse)) { 488 *se = parent_entity(*se); 489 *pse = parent_entity(*pse); 490 } 491 } 492 493 static int tg_is_idle(struct task_group *tg) 494 { 495 return tg->idle > 0; 496 } 497 498 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) 499 { 500 return cfs_rq->idle > 0; 501 } 502 503 static int se_is_idle(struct sched_entity *se) 504 { 505 if (entity_is_task(se)) 506 return task_has_idle_policy(task_of(se)); 507 return cfs_rq_is_idle(group_cfs_rq(se)); 508 } 509 510 #else /* !CONFIG_FAIR_GROUP_SCHED */ 511 512 #define for_each_sched_entity(se) \ 513 for (; se; se = NULL) 514 515 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 516 { 517 return true; 518 } 519 520 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 521 { 522 } 523 524 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 525 { 526 } 527 528 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 529 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 530 531 static inline struct sched_entity *parent_entity(struct sched_entity *se) 532 { 533 return NULL; 534 } 535 536 static inline void 537 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 538 { 539 } 540 541 static inline int tg_is_idle(struct task_group *tg) 542 { 543 return 0; 544 } 545 546 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) 547 { 548 return 0; 549 } 550 551 static int se_is_idle(struct sched_entity *se) 552 { 553 return 0; 554 } 555 556 #endif /* CONFIG_FAIR_GROUP_SCHED */ 557 558 static __always_inline 559 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 560 561 /************************************************************** 562 * Scheduling class tree data structure manipulation methods: 563 */ 564 565 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 566 { 567 s64 delta = (s64)(vruntime - max_vruntime); 568 if (delta > 0) 569 max_vruntime = vruntime; 570 571 return max_vruntime; 572 } 573 574 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 575 { 576 s64 delta = (s64)(vruntime - min_vruntime); 577 if (delta < 0) 578 min_vruntime = vruntime; 579 580 return min_vruntime; 581 } 582 583 static inline bool entity_before(struct sched_entity *a, 584 struct sched_entity *b) 585 { 586 return (s64)(a->vruntime - b->vruntime) < 0; 587 } 588 589 #define __node_2_se(node) \ 590 rb_entry((node), struct sched_entity, run_node) 591 592 static void update_min_vruntime(struct cfs_rq *cfs_rq) 593 { 594 struct sched_entity *curr = cfs_rq->curr; 595 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); 596 597 u64 vruntime = cfs_rq->min_vruntime; 598 599 if (curr) { 600 if (curr->on_rq) 601 vruntime = curr->vruntime; 602 else 603 curr = NULL; 604 } 605 606 if (leftmost) { /* non-empty tree */ 607 struct sched_entity *se = __node_2_se(leftmost); 608 609 if (!curr) 610 vruntime = se->vruntime; 611 else 612 vruntime = min_vruntime(vruntime, se->vruntime); 613 } 614 615 /* ensure we never gain time by being placed backwards. */ 616 u64_u32_store(cfs_rq->min_vruntime, 617 max_vruntime(cfs_rq->min_vruntime, vruntime)); 618 } 619 620 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) 621 { 622 return entity_before(__node_2_se(a), __node_2_se(b)); 623 } 624 625 /* 626 * Enqueue an entity into the rb-tree: 627 */ 628 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 629 { 630 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less); 631 } 632 633 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 634 { 635 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); 636 } 637 638 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 639 { 640 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); 641 642 if (!left) 643 return NULL; 644 645 return __node_2_se(left); 646 } 647 648 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 649 { 650 struct rb_node *next = rb_next(&se->run_node); 651 652 if (!next) 653 return NULL; 654 655 return __node_2_se(next); 656 } 657 658 #ifdef CONFIG_SCHED_DEBUG 659 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 660 { 661 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); 662 663 if (!last) 664 return NULL; 665 666 return __node_2_se(last); 667 } 668 669 /************************************************************** 670 * Scheduling class statistics methods: 671 */ 672 673 int sched_update_scaling(void) 674 { 675 unsigned int factor = get_update_sysctl_factor(); 676 677 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 678 sysctl_sched_min_granularity); 679 680 #define WRT_SYSCTL(name) \ 681 (normalized_sysctl_##name = sysctl_##name / (factor)) 682 WRT_SYSCTL(sched_min_granularity); 683 WRT_SYSCTL(sched_latency); 684 WRT_SYSCTL(sched_wakeup_granularity); 685 #undef WRT_SYSCTL 686 687 return 0; 688 } 689 #endif 690 691 /* 692 * delta /= w 693 */ 694 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 695 { 696 if (unlikely(se->load.weight != NICE_0_LOAD)) 697 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 698 699 return delta; 700 } 701 702 /* 703 * The idea is to set a period in which each task runs once. 704 * 705 * When there are too many tasks (sched_nr_latency) we have to stretch 706 * this period because otherwise the slices get too small. 707 * 708 * p = (nr <= nl) ? l : l*nr/nl 709 */ 710 static u64 __sched_period(unsigned long nr_running) 711 { 712 if (unlikely(nr_running > sched_nr_latency)) 713 return nr_running * sysctl_sched_min_granularity; 714 else 715 return sysctl_sched_latency; 716 } 717 718 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq); 719 720 /* 721 * We calculate the wall-time slice from the period by taking a part 722 * proportional to the weight. 723 * 724 * s = p*P[w/rw] 725 */ 726 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 727 { 728 unsigned int nr_running = cfs_rq->nr_running; 729 struct sched_entity *init_se = se; 730 unsigned int min_gran; 731 u64 slice; 732 733 if (sched_feat(ALT_PERIOD)) 734 nr_running = rq_of(cfs_rq)->cfs.h_nr_running; 735 736 slice = __sched_period(nr_running + !se->on_rq); 737 738 for_each_sched_entity(se) { 739 struct load_weight *load; 740 struct load_weight lw; 741 struct cfs_rq *qcfs_rq; 742 743 qcfs_rq = cfs_rq_of(se); 744 load = &qcfs_rq->load; 745 746 if (unlikely(!se->on_rq)) { 747 lw = qcfs_rq->load; 748 749 update_load_add(&lw, se->load.weight); 750 load = &lw; 751 } 752 slice = __calc_delta(slice, se->load.weight, load); 753 } 754 755 if (sched_feat(BASE_SLICE)) { 756 if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq)) 757 min_gran = sysctl_sched_idle_min_granularity; 758 else 759 min_gran = sysctl_sched_min_granularity; 760 761 slice = max_t(u64, slice, min_gran); 762 } 763 764 return slice; 765 } 766 767 /* 768 * We calculate the vruntime slice of a to-be-inserted task. 769 * 770 * vs = s/w 771 */ 772 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 773 { 774 return calc_delta_fair(sched_slice(cfs_rq, se), se); 775 } 776 777 #include "pelt.h" 778 #ifdef CONFIG_SMP 779 780 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 781 static unsigned long task_h_load(struct task_struct *p); 782 static unsigned long capacity_of(int cpu); 783 784 /* Give new sched_entity start runnable values to heavy its load in infant time */ 785 void init_entity_runnable_average(struct sched_entity *se) 786 { 787 struct sched_avg *sa = &se->avg; 788 789 memset(sa, 0, sizeof(*sa)); 790 791 /* 792 * Tasks are initialized with full load to be seen as heavy tasks until 793 * they get a chance to stabilize to their real load level. 794 * Group entities are initialized with zero load to reflect the fact that 795 * nothing has been attached to the task group yet. 796 */ 797 if (entity_is_task(se)) 798 sa->load_avg = scale_load_down(se->load.weight); 799 800 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 801 } 802 803 static void attach_entity_cfs_rq(struct sched_entity *se); 804 805 /* 806 * With new tasks being created, their initial util_avgs are extrapolated 807 * based on the cfs_rq's current util_avg: 808 * 809 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 810 * 811 * However, in many cases, the above util_avg does not give a desired 812 * value. Moreover, the sum of the util_avgs may be divergent, such 813 * as when the series is a harmonic series. 814 * 815 * To solve this problem, we also cap the util_avg of successive tasks to 816 * only 1/2 of the left utilization budget: 817 * 818 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 819 * 820 * where n denotes the nth task and cpu_scale the CPU capacity. 821 * 822 * For example, for a CPU with 1024 of capacity, a simplest series from 823 * the beginning would be like: 824 * 825 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 826 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 827 * 828 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 829 * if util_avg > util_avg_cap. 830 */ 831 void post_init_entity_util_avg(struct task_struct *p) 832 { 833 struct sched_entity *se = &p->se; 834 struct cfs_rq *cfs_rq = cfs_rq_of(se); 835 struct sched_avg *sa = &se->avg; 836 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); 837 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; 838 839 if (cap > 0) { 840 if (cfs_rq->avg.util_avg != 0) { 841 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 842 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 843 844 if (sa->util_avg > cap) 845 sa->util_avg = cap; 846 } else { 847 sa->util_avg = cap; 848 } 849 } 850 851 sa->runnable_avg = sa->util_avg; 852 853 if (p->sched_class != &fair_sched_class) { 854 /* 855 * For !fair tasks do: 856 * 857 update_cfs_rq_load_avg(now, cfs_rq); 858 attach_entity_load_avg(cfs_rq, se); 859 switched_from_fair(rq, p); 860 * 861 * such that the next switched_to_fair() has the 862 * expected state. 863 */ 864 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); 865 return; 866 } 867 868 attach_entity_cfs_rq(se); 869 } 870 871 #else /* !CONFIG_SMP */ 872 void init_entity_runnable_average(struct sched_entity *se) 873 { 874 } 875 void post_init_entity_util_avg(struct task_struct *p) 876 { 877 } 878 static void update_tg_load_avg(struct cfs_rq *cfs_rq) 879 { 880 } 881 #endif /* CONFIG_SMP */ 882 883 /* 884 * Update the current task's runtime statistics. 885 */ 886 static void update_curr(struct cfs_rq *cfs_rq) 887 { 888 struct sched_entity *curr = cfs_rq->curr; 889 u64 now = rq_clock_task(rq_of(cfs_rq)); 890 u64 delta_exec; 891 892 if (unlikely(!curr)) 893 return; 894 895 delta_exec = now - curr->exec_start; 896 if (unlikely((s64)delta_exec <= 0)) 897 return; 898 899 curr->exec_start = now; 900 901 if (schedstat_enabled()) { 902 struct sched_statistics *stats; 903 904 stats = __schedstats_from_se(curr); 905 __schedstat_set(stats->exec_max, 906 max(delta_exec, stats->exec_max)); 907 } 908 909 curr->sum_exec_runtime += delta_exec; 910 schedstat_add(cfs_rq->exec_clock, delta_exec); 911 912 curr->vruntime += calc_delta_fair(delta_exec, curr); 913 update_min_vruntime(cfs_rq); 914 915 if (entity_is_task(curr)) { 916 struct task_struct *curtask = task_of(curr); 917 918 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 919 cgroup_account_cputime(curtask, delta_exec); 920 account_group_exec_runtime(curtask, delta_exec); 921 } 922 923 account_cfs_rq_runtime(cfs_rq, delta_exec); 924 } 925 926 static void update_curr_fair(struct rq *rq) 927 { 928 update_curr(cfs_rq_of(&rq->curr->se)); 929 } 930 931 static inline void 932 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 933 { 934 struct sched_statistics *stats; 935 struct task_struct *p = NULL; 936 937 if (!schedstat_enabled()) 938 return; 939 940 stats = __schedstats_from_se(se); 941 942 if (entity_is_task(se)) 943 p = task_of(se); 944 945 __update_stats_wait_start(rq_of(cfs_rq), p, stats); 946 } 947 948 static inline void 949 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 950 { 951 struct sched_statistics *stats; 952 struct task_struct *p = NULL; 953 954 if (!schedstat_enabled()) 955 return; 956 957 stats = __schedstats_from_se(se); 958 959 /* 960 * When the sched_schedstat changes from 0 to 1, some sched se 961 * maybe already in the runqueue, the se->statistics.wait_start 962 * will be 0.So it will let the delta wrong. We need to avoid this 963 * scenario. 964 */ 965 if (unlikely(!schedstat_val(stats->wait_start))) 966 return; 967 968 if (entity_is_task(se)) 969 p = task_of(se); 970 971 __update_stats_wait_end(rq_of(cfs_rq), p, stats); 972 } 973 974 static inline void 975 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) 976 { 977 struct sched_statistics *stats; 978 struct task_struct *tsk = NULL; 979 980 if (!schedstat_enabled()) 981 return; 982 983 stats = __schedstats_from_se(se); 984 985 if (entity_is_task(se)) 986 tsk = task_of(se); 987 988 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats); 989 } 990 991 /* 992 * Task is being enqueued - update stats: 993 */ 994 static inline void 995 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 996 { 997 if (!schedstat_enabled()) 998 return; 999 1000 /* 1001 * Are we enqueueing a waiting task? (for current tasks 1002 * a dequeue/enqueue event is a NOP) 1003 */ 1004 if (se != cfs_rq->curr) 1005 update_stats_wait_start_fair(cfs_rq, se); 1006 1007 if (flags & ENQUEUE_WAKEUP) 1008 update_stats_enqueue_sleeper_fair(cfs_rq, se); 1009 } 1010 1011 static inline void 1012 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1013 { 1014 1015 if (!schedstat_enabled()) 1016 return; 1017 1018 /* 1019 * Mark the end of the wait period if dequeueing a 1020 * waiting task: 1021 */ 1022 if (se != cfs_rq->curr) 1023 update_stats_wait_end_fair(cfs_rq, se); 1024 1025 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 1026 struct task_struct *tsk = task_of(se); 1027 unsigned int state; 1028 1029 /* XXX racy against TTWU */ 1030 state = READ_ONCE(tsk->__state); 1031 if (state & TASK_INTERRUPTIBLE) 1032 __schedstat_set(tsk->stats.sleep_start, 1033 rq_clock(rq_of(cfs_rq))); 1034 if (state & TASK_UNINTERRUPTIBLE) 1035 __schedstat_set(tsk->stats.block_start, 1036 rq_clock(rq_of(cfs_rq))); 1037 } 1038 } 1039 1040 /* 1041 * We are picking a new current task - update its stats: 1042 */ 1043 static inline void 1044 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1045 { 1046 /* 1047 * We are starting a new run period: 1048 */ 1049 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1050 } 1051 1052 /************************************************** 1053 * Scheduling class queueing methods: 1054 */ 1055 1056 #ifdef CONFIG_NUMA 1057 #define NUMA_IMBALANCE_MIN 2 1058 1059 static inline long 1060 adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr) 1061 { 1062 /* 1063 * Allow a NUMA imbalance if busy CPUs is less than the maximum 1064 * threshold. Above this threshold, individual tasks may be contending 1065 * for both memory bandwidth and any shared HT resources. This is an 1066 * approximation as the number of running tasks may not be related to 1067 * the number of busy CPUs due to sched_setaffinity. 1068 */ 1069 if (dst_running > imb_numa_nr) 1070 return imbalance; 1071 1072 /* 1073 * Allow a small imbalance based on a simple pair of communicating 1074 * tasks that remain local when the destination is lightly loaded. 1075 */ 1076 if (imbalance <= NUMA_IMBALANCE_MIN) 1077 return 0; 1078 1079 return imbalance; 1080 } 1081 #endif /* CONFIG_NUMA */ 1082 1083 #ifdef CONFIG_NUMA_BALANCING 1084 /* 1085 * Approximate time to scan a full NUMA task in ms. The task scan period is 1086 * calculated based on the tasks virtual memory size and 1087 * numa_balancing_scan_size. 1088 */ 1089 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1090 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1091 1092 /* Portion of address space to scan in MB */ 1093 unsigned int sysctl_numa_balancing_scan_size = 256; 1094 1095 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1096 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1097 1098 /* The page with hint page fault latency < threshold in ms is considered hot */ 1099 unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC; 1100 1101 /* Restrict the NUMA promotion throughput (MB/s) for each target node. */ 1102 unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; 1103 1104 struct numa_group { 1105 refcount_t refcount; 1106 1107 spinlock_t lock; /* nr_tasks, tasks */ 1108 int nr_tasks; 1109 pid_t gid; 1110 int active_nodes; 1111 1112 struct rcu_head rcu; 1113 unsigned long total_faults; 1114 unsigned long max_faults_cpu; 1115 /* 1116 * faults[] array is split into two regions: faults_mem and faults_cpu. 1117 * 1118 * Faults_cpu is used to decide whether memory should move 1119 * towards the CPU. As a consequence, these stats are weighted 1120 * more by CPU use than by memory faults. 1121 */ 1122 unsigned long faults[]; 1123 }; 1124 1125 /* 1126 * For functions that can be called in multiple contexts that permit reading 1127 * ->numa_group (see struct task_struct for locking rules). 1128 */ 1129 static struct numa_group *deref_task_numa_group(struct task_struct *p) 1130 { 1131 return rcu_dereference_check(p->numa_group, p == current || 1132 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); 1133 } 1134 1135 static struct numa_group *deref_curr_numa_group(struct task_struct *p) 1136 { 1137 return rcu_dereference_protected(p->numa_group, p == current); 1138 } 1139 1140 static inline unsigned long group_faults_priv(struct numa_group *ng); 1141 static inline unsigned long group_faults_shared(struct numa_group *ng); 1142 1143 static unsigned int task_nr_scan_windows(struct task_struct *p) 1144 { 1145 unsigned long rss = 0; 1146 unsigned long nr_scan_pages; 1147 1148 /* 1149 * Calculations based on RSS as non-present and empty pages are skipped 1150 * by the PTE scanner and NUMA hinting faults should be trapped based 1151 * on resident pages 1152 */ 1153 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1154 rss = get_mm_rss(p->mm); 1155 if (!rss) 1156 rss = nr_scan_pages; 1157 1158 rss = round_up(rss, nr_scan_pages); 1159 return rss / nr_scan_pages; 1160 } 1161 1162 /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1163 #define MAX_SCAN_WINDOW 2560 1164 1165 static unsigned int task_scan_min(struct task_struct *p) 1166 { 1167 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1168 unsigned int scan, floor; 1169 unsigned int windows = 1; 1170 1171 if (scan_size < MAX_SCAN_WINDOW) 1172 windows = MAX_SCAN_WINDOW / scan_size; 1173 floor = 1000 / windows; 1174 1175 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1176 return max_t(unsigned int, floor, scan); 1177 } 1178 1179 static unsigned int task_scan_start(struct task_struct *p) 1180 { 1181 unsigned long smin = task_scan_min(p); 1182 unsigned long period = smin; 1183 struct numa_group *ng; 1184 1185 /* Scale the maximum scan period with the amount of shared memory. */ 1186 rcu_read_lock(); 1187 ng = rcu_dereference(p->numa_group); 1188 if (ng) { 1189 unsigned long shared = group_faults_shared(ng); 1190 unsigned long private = group_faults_priv(ng); 1191 1192 period *= refcount_read(&ng->refcount); 1193 period *= shared + 1; 1194 period /= private + shared + 1; 1195 } 1196 rcu_read_unlock(); 1197 1198 return max(smin, period); 1199 } 1200 1201 static unsigned int task_scan_max(struct task_struct *p) 1202 { 1203 unsigned long smin = task_scan_min(p); 1204 unsigned long smax; 1205 struct numa_group *ng; 1206 1207 /* Watch for min being lower than max due to floor calculations */ 1208 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1209 1210 /* Scale the maximum scan period with the amount of shared memory. */ 1211 ng = deref_curr_numa_group(p); 1212 if (ng) { 1213 unsigned long shared = group_faults_shared(ng); 1214 unsigned long private = group_faults_priv(ng); 1215 unsigned long period = smax; 1216 1217 period *= refcount_read(&ng->refcount); 1218 period *= shared + 1; 1219 period /= private + shared + 1; 1220 1221 smax = max(smax, period); 1222 } 1223 1224 return max(smin, smax); 1225 } 1226 1227 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1228 { 1229 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); 1230 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1231 } 1232 1233 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1234 { 1235 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); 1236 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1237 } 1238 1239 /* Shared or private faults. */ 1240 #define NR_NUMA_HINT_FAULT_TYPES 2 1241 1242 /* Memory and CPU locality */ 1243 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1244 1245 /* Averaged statistics, and temporary buffers. */ 1246 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1247 1248 pid_t task_numa_group_id(struct task_struct *p) 1249 { 1250 struct numa_group *ng; 1251 pid_t gid = 0; 1252 1253 rcu_read_lock(); 1254 ng = rcu_dereference(p->numa_group); 1255 if (ng) 1256 gid = ng->gid; 1257 rcu_read_unlock(); 1258 1259 return gid; 1260 } 1261 1262 /* 1263 * The averaged statistics, shared & private, memory & CPU, 1264 * occupy the first half of the array. The second half of the 1265 * array is for current counters, which are averaged into the 1266 * first set by task_numa_placement. 1267 */ 1268 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1269 { 1270 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1271 } 1272 1273 static inline unsigned long task_faults(struct task_struct *p, int nid) 1274 { 1275 if (!p->numa_faults) 1276 return 0; 1277 1278 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1279 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1280 } 1281 1282 static inline unsigned long group_faults(struct task_struct *p, int nid) 1283 { 1284 struct numa_group *ng = deref_task_numa_group(p); 1285 1286 if (!ng) 1287 return 0; 1288 1289 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1290 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1291 } 1292 1293 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1294 { 1295 return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] + 1296 group->faults[task_faults_idx(NUMA_CPU, nid, 1)]; 1297 } 1298 1299 static inline unsigned long group_faults_priv(struct numa_group *ng) 1300 { 1301 unsigned long faults = 0; 1302 int node; 1303 1304 for_each_online_node(node) { 1305 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 1306 } 1307 1308 return faults; 1309 } 1310 1311 static inline unsigned long group_faults_shared(struct numa_group *ng) 1312 { 1313 unsigned long faults = 0; 1314 int node; 1315 1316 for_each_online_node(node) { 1317 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; 1318 } 1319 1320 return faults; 1321 } 1322 1323 /* 1324 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1325 * considered part of a numa group's pseudo-interleaving set. Migrations 1326 * between these nodes are slowed down, to allow things to settle down. 1327 */ 1328 #define ACTIVE_NODE_FRACTION 3 1329 1330 static bool numa_is_active_node(int nid, struct numa_group *ng) 1331 { 1332 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1333 } 1334 1335 /* Handle placement on systems where not all nodes are directly connected. */ 1336 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1337 int lim_dist, bool task) 1338 { 1339 unsigned long score = 0; 1340 int node, max_dist; 1341 1342 /* 1343 * All nodes are directly connected, and the same distance 1344 * from each other. No need for fancy placement algorithms. 1345 */ 1346 if (sched_numa_topology_type == NUMA_DIRECT) 1347 return 0; 1348 1349 /* sched_max_numa_distance may be changed in parallel. */ 1350 max_dist = READ_ONCE(sched_max_numa_distance); 1351 /* 1352 * This code is called for each node, introducing N^2 complexity, 1353 * which should be ok given the number of nodes rarely exceeds 8. 1354 */ 1355 for_each_online_node(node) { 1356 unsigned long faults; 1357 int dist = node_distance(nid, node); 1358 1359 /* 1360 * The furthest away nodes in the system are not interesting 1361 * for placement; nid was already counted. 1362 */ 1363 if (dist >= max_dist || node == nid) 1364 continue; 1365 1366 /* 1367 * On systems with a backplane NUMA topology, compare groups 1368 * of nodes, and move tasks towards the group with the most 1369 * memory accesses. When comparing two nodes at distance 1370 * "hoplimit", only nodes closer by than "hoplimit" are part 1371 * of each group. Skip other nodes. 1372 */ 1373 if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist) 1374 continue; 1375 1376 /* Add up the faults from nearby nodes. */ 1377 if (task) 1378 faults = task_faults(p, node); 1379 else 1380 faults = group_faults(p, node); 1381 1382 /* 1383 * On systems with a glueless mesh NUMA topology, there are 1384 * no fixed "groups of nodes". Instead, nodes that are not 1385 * directly connected bounce traffic through intermediate 1386 * nodes; a numa_group can occupy any set of nodes. 1387 * The further away a node is, the less the faults count. 1388 * This seems to result in good task placement. 1389 */ 1390 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1391 faults *= (max_dist - dist); 1392 faults /= (max_dist - LOCAL_DISTANCE); 1393 } 1394 1395 score += faults; 1396 } 1397 1398 return score; 1399 } 1400 1401 /* 1402 * These return the fraction of accesses done by a particular task, or 1403 * task group, on a particular numa node. The group weight is given a 1404 * larger multiplier, in order to group tasks together that are almost 1405 * evenly spread out between numa nodes. 1406 */ 1407 static inline unsigned long task_weight(struct task_struct *p, int nid, 1408 int dist) 1409 { 1410 unsigned long faults, total_faults; 1411 1412 if (!p->numa_faults) 1413 return 0; 1414 1415 total_faults = p->total_numa_faults; 1416 1417 if (!total_faults) 1418 return 0; 1419 1420 faults = task_faults(p, nid); 1421 faults += score_nearby_nodes(p, nid, dist, true); 1422 1423 return 1000 * faults / total_faults; 1424 } 1425 1426 static inline unsigned long group_weight(struct task_struct *p, int nid, 1427 int dist) 1428 { 1429 struct numa_group *ng = deref_task_numa_group(p); 1430 unsigned long faults, total_faults; 1431 1432 if (!ng) 1433 return 0; 1434 1435 total_faults = ng->total_faults; 1436 1437 if (!total_faults) 1438 return 0; 1439 1440 faults = group_faults(p, nid); 1441 faults += score_nearby_nodes(p, nid, dist, false); 1442 1443 return 1000 * faults / total_faults; 1444 } 1445 1446 /* 1447 * If memory tiering mode is enabled, cpupid of slow memory page is 1448 * used to record scan time instead of CPU and PID. When tiering mode 1449 * is disabled at run time, the scan time (in cpupid) will be 1450 * interpreted as CPU and PID. So CPU needs to be checked to avoid to 1451 * access out of array bound. 1452 */ 1453 static inline bool cpupid_valid(int cpupid) 1454 { 1455 return cpupid_to_cpu(cpupid) < nr_cpu_ids; 1456 } 1457 1458 /* 1459 * For memory tiering mode, if there are enough free pages (more than 1460 * enough watermark defined here) in fast memory node, to take full 1461 * advantage of fast memory capacity, all recently accessed slow 1462 * memory pages will be migrated to fast memory node without 1463 * considering hot threshold. 1464 */ 1465 static bool pgdat_free_space_enough(struct pglist_data *pgdat) 1466 { 1467 int z; 1468 unsigned long enough_wmark; 1469 1470 enough_wmark = max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT, 1471 pgdat->node_present_pages >> 4); 1472 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1473 struct zone *zone = pgdat->node_zones + z; 1474 1475 if (!populated_zone(zone)) 1476 continue; 1477 1478 if (zone_watermark_ok(zone, 0, 1479 wmark_pages(zone, WMARK_PROMO) + enough_wmark, 1480 ZONE_MOVABLE, 0)) 1481 return true; 1482 } 1483 return false; 1484 } 1485 1486 /* 1487 * For memory tiering mode, when page tables are scanned, the scan 1488 * time will be recorded in struct page in addition to make page 1489 * PROT_NONE for slow memory page. So when the page is accessed, in 1490 * hint page fault handler, the hint page fault latency is calculated 1491 * via, 1492 * 1493 * hint page fault latency = hint page fault time - scan time 1494 * 1495 * The smaller the hint page fault latency, the higher the possibility 1496 * for the page to be hot. 1497 */ 1498 static int numa_hint_fault_latency(struct page *page) 1499 { 1500 int last_time, time; 1501 1502 time = jiffies_to_msecs(jiffies); 1503 last_time = xchg_page_access_time(page, time); 1504 1505 return (time - last_time) & PAGE_ACCESS_TIME_MASK; 1506 } 1507 1508 /* 1509 * For memory tiering mode, too high promotion/demotion throughput may 1510 * hurt application latency. So we provide a mechanism to rate limit 1511 * the number of pages that are tried to be promoted. 1512 */ 1513 static bool numa_promotion_rate_limit(struct pglist_data *pgdat, 1514 unsigned long rate_limit, int nr) 1515 { 1516 unsigned long nr_cand; 1517 unsigned int now, start; 1518 1519 now = jiffies_to_msecs(jiffies); 1520 mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr); 1521 nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 1522 start = pgdat->nbp_rl_start; 1523 if (now - start > MSEC_PER_SEC && 1524 cmpxchg(&pgdat->nbp_rl_start, start, now) == start) 1525 pgdat->nbp_rl_nr_cand = nr_cand; 1526 if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit) 1527 return true; 1528 return false; 1529 } 1530 1531 #define NUMA_MIGRATION_ADJUST_STEPS 16 1532 1533 static void numa_promotion_adjust_threshold(struct pglist_data *pgdat, 1534 unsigned long rate_limit, 1535 unsigned int ref_th) 1536 { 1537 unsigned int now, start, th_period, unit_th, th; 1538 unsigned long nr_cand, ref_cand, diff_cand; 1539 1540 now = jiffies_to_msecs(jiffies); 1541 th_period = sysctl_numa_balancing_scan_period_max; 1542 start = pgdat->nbp_th_start; 1543 if (now - start > th_period && 1544 cmpxchg(&pgdat->nbp_th_start, start, now) == start) { 1545 ref_cand = rate_limit * 1546 sysctl_numa_balancing_scan_period_max / MSEC_PER_SEC; 1547 nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 1548 diff_cand = nr_cand - pgdat->nbp_th_nr_cand; 1549 unit_th = ref_th * 2 / NUMA_MIGRATION_ADJUST_STEPS; 1550 th = pgdat->nbp_threshold ? : ref_th; 1551 if (diff_cand > ref_cand * 11 / 10) 1552 th = max(th - unit_th, unit_th); 1553 else if (diff_cand < ref_cand * 9 / 10) 1554 th = min(th + unit_th, ref_th * 2); 1555 pgdat->nbp_th_nr_cand = nr_cand; 1556 pgdat->nbp_threshold = th; 1557 } 1558 } 1559 1560 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1561 int src_nid, int dst_cpu) 1562 { 1563 struct numa_group *ng = deref_curr_numa_group(p); 1564 int dst_nid = cpu_to_node(dst_cpu); 1565 int last_cpupid, this_cpupid; 1566 1567 /* 1568 * The pages in slow memory node should be migrated according 1569 * to hot/cold instead of private/shared. 1570 */ 1571 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && 1572 !node_is_toptier(src_nid)) { 1573 struct pglist_data *pgdat; 1574 unsigned long rate_limit; 1575 unsigned int latency, th, def_th; 1576 1577 pgdat = NODE_DATA(dst_nid); 1578 if (pgdat_free_space_enough(pgdat)) { 1579 /* workload changed, reset hot threshold */ 1580 pgdat->nbp_threshold = 0; 1581 return true; 1582 } 1583 1584 def_th = sysctl_numa_balancing_hot_threshold; 1585 rate_limit = sysctl_numa_balancing_promote_rate_limit << \ 1586 (20 - PAGE_SHIFT); 1587 numa_promotion_adjust_threshold(pgdat, rate_limit, def_th); 1588 1589 th = pgdat->nbp_threshold ? : def_th; 1590 latency = numa_hint_fault_latency(page); 1591 if (latency >= th) 1592 return false; 1593 1594 return !numa_promotion_rate_limit(pgdat, rate_limit, 1595 thp_nr_pages(page)); 1596 } 1597 1598 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1599 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1600 1601 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 1602 !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid)) 1603 return false; 1604 1605 /* 1606 * Allow first faults or private faults to migrate immediately early in 1607 * the lifetime of a task. The magic number 4 is based on waiting for 1608 * two full passes of the "multi-stage node selection" test that is 1609 * executed below. 1610 */ 1611 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && 1612 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) 1613 return true; 1614 1615 /* 1616 * Multi-stage node selection is used in conjunction with a periodic 1617 * migration fault to build a temporal task<->page relation. By using 1618 * a two-stage filter we remove short/unlikely relations. 1619 * 1620 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1621 * a task's usage of a particular page (n_p) per total usage of this 1622 * page (n_t) (in a given time-span) to a probability. 1623 * 1624 * Our periodic faults will sample this probability and getting the 1625 * same result twice in a row, given these samples are fully 1626 * independent, is then given by P(n)^2, provided our sample period 1627 * is sufficiently short compared to the usage pattern. 1628 * 1629 * This quadric squishes small probabilities, making it less likely we 1630 * act on an unlikely task<->page relation. 1631 */ 1632 if (!cpupid_pid_unset(last_cpupid) && 1633 cpupid_to_nid(last_cpupid) != dst_nid) 1634 return false; 1635 1636 /* Always allow migrate on private faults */ 1637 if (cpupid_match_pid(p, last_cpupid)) 1638 return true; 1639 1640 /* A shared fault, but p->numa_group has not been set up yet. */ 1641 if (!ng) 1642 return true; 1643 1644 /* 1645 * Destination node is much more heavily used than the source 1646 * node? Allow migration. 1647 */ 1648 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1649 ACTIVE_NODE_FRACTION) 1650 return true; 1651 1652 /* 1653 * Distribute memory according to CPU & memory use on each node, 1654 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1655 * 1656 * faults_cpu(dst) 3 faults_cpu(src) 1657 * --------------- * - > --------------- 1658 * faults_mem(dst) 4 faults_mem(src) 1659 */ 1660 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1661 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1662 } 1663 1664 /* 1665 * 'numa_type' describes the node at the moment of load balancing. 1666 */ 1667 enum numa_type { 1668 /* The node has spare capacity that can be used to run more tasks. */ 1669 node_has_spare = 0, 1670 /* 1671 * The node is fully used and the tasks don't compete for more CPU 1672 * cycles. Nevertheless, some tasks might wait before running. 1673 */ 1674 node_fully_busy, 1675 /* 1676 * The node is overloaded and can't provide expected CPU cycles to all 1677 * tasks. 1678 */ 1679 node_overloaded 1680 }; 1681 1682 /* Cached statistics for all CPUs within a node */ 1683 struct numa_stats { 1684 unsigned long load; 1685 unsigned long runnable; 1686 unsigned long util; 1687 /* Total compute capacity of CPUs on a node */ 1688 unsigned long compute_capacity; 1689 unsigned int nr_running; 1690 unsigned int weight; 1691 enum numa_type node_type; 1692 int idle_cpu; 1693 }; 1694 1695 static inline bool is_core_idle(int cpu) 1696 { 1697 #ifdef CONFIG_SCHED_SMT 1698 int sibling; 1699 1700 for_each_cpu(sibling, cpu_smt_mask(cpu)) { 1701 if (cpu == sibling) 1702 continue; 1703 1704 if (!idle_cpu(sibling)) 1705 return false; 1706 } 1707 #endif 1708 1709 return true; 1710 } 1711 1712 struct task_numa_env { 1713 struct task_struct *p; 1714 1715 int src_cpu, src_nid; 1716 int dst_cpu, dst_nid; 1717 int imb_numa_nr; 1718 1719 struct numa_stats src_stats, dst_stats; 1720 1721 int imbalance_pct; 1722 int dist; 1723 1724 struct task_struct *best_task; 1725 long best_imp; 1726 int best_cpu; 1727 }; 1728 1729 static unsigned long cpu_load(struct rq *rq); 1730 static unsigned long cpu_runnable(struct rq *rq); 1731 1732 static inline enum 1733 numa_type numa_classify(unsigned int imbalance_pct, 1734 struct numa_stats *ns) 1735 { 1736 if ((ns->nr_running > ns->weight) && 1737 (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) || 1738 ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100)))) 1739 return node_overloaded; 1740 1741 if ((ns->nr_running < ns->weight) || 1742 (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) && 1743 ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100)))) 1744 return node_has_spare; 1745 1746 return node_fully_busy; 1747 } 1748 1749 #ifdef CONFIG_SCHED_SMT 1750 /* Forward declarations of select_idle_sibling helpers */ 1751 static inline bool test_idle_cores(int cpu, bool def); 1752 static inline int numa_idle_core(int idle_core, int cpu) 1753 { 1754 if (!static_branch_likely(&sched_smt_present) || 1755 idle_core >= 0 || !test_idle_cores(cpu, false)) 1756 return idle_core; 1757 1758 /* 1759 * Prefer cores instead of packing HT siblings 1760 * and triggering future load balancing. 1761 */ 1762 if (is_core_idle(cpu)) 1763 idle_core = cpu; 1764 1765 return idle_core; 1766 } 1767 #else 1768 static inline int numa_idle_core(int idle_core, int cpu) 1769 { 1770 return idle_core; 1771 } 1772 #endif 1773 1774 /* 1775 * Gather all necessary information to make NUMA balancing placement 1776 * decisions that are compatible with standard load balancer. This 1777 * borrows code and logic from update_sg_lb_stats but sharing a 1778 * common implementation is impractical. 1779 */ 1780 static void update_numa_stats(struct task_numa_env *env, 1781 struct numa_stats *ns, int nid, 1782 bool find_idle) 1783 { 1784 int cpu, idle_core = -1; 1785 1786 memset(ns, 0, sizeof(*ns)); 1787 ns->idle_cpu = -1; 1788 1789 rcu_read_lock(); 1790 for_each_cpu(cpu, cpumask_of_node(nid)) { 1791 struct rq *rq = cpu_rq(cpu); 1792 1793 ns->load += cpu_load(rq); 1794 ns->runnable += cpu_runnable(rq); 1795 ns->util += cpu_util_cfs(cpu); 1796 ns->nr_running += rq->cfs.h_nr_running; 1797 ns->compute_capacity += capacity_of(cpu); 1798 1799 if (find_idle && !rq->nr_running && idle_cpu(cpu)) { 1800 if (READ_ONCE(rq->numa_migrate_on) || 1801 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1802 continue; 1803 1804 if (ns->idle_cpu == -1) 1805 ns->idle_cpu = cpu; 1806 1807 idle_core = numa_idle_core(idle_core, cpu); 1808 } 1809 } 1810 rcu_read_unlock(); 1811 1812 ns->weight = cpumask_weight(cpumask_of_node(nid)); 1813 1814 ns->node_type = numa_classify(env->imbalance_pct, ns); 1815 1816 if (idle_core >= 0) 1817 ns->idle_cpu = idle_core; 1818 } 1819 1820 static void task_numa_assign(struct task_numa_env *env, 1821 struct task_struct *p, long imp) 1822 { 1823 struct rq *rq = cpu_rq(env->dst_cpu); 1824 1825 /* Check if run-queue part of active NUMA balance. */ 1826 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { 1827 int cpu; 1828 int start = env->dst_cpu; 1829 1830 /* Find alternative idle CPU. */ 1831 for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) { 1832 if (cpu == env->best_cpu || !idle_cpu(cpu) || 1833 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { 1834 continue; 1835 } 1836 1837 env->dst_cpu = cpu; 1838 rq = cpu_rq(env->dst_cpu); 1839 if (!xchg(&rq->numa_migrate_on, 1)) 1840 goto assign; 1841 } 1842 1843 /* Failed to find an alternative idle CPU */ 1844 return; 1845 } 1846 1847 assign: 1848 /* 1849 * Clear previous best_cpu/rq numa-migrate flag, since task now 1850 * found a better CPU to move/swap. 1851 */ 1852 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { 1853 rq = cpu_rq(env->best_cpu); 1854 WRITE_ONCE(rq->numa_migrate_on, 0); 1855 } 1856 1857 if (env->best_task) 1858 put_task_struct(env->best_task); 1859 if (p) 1860 get_task_struct(p); 1861 1862 env->best_task = p; 1863 env->best_imp = imp; 1864 env->best_cpu = env->dst_cpu; 1865 } 1866 1867 static bool load_too_imbalanced(long src_load, long dst_load, 1868 struct task_numa_env *env) 1869 { 1870 long imb, old_imb; 1871 long orig_src_load, orig_dst_load; 1872 long src_capacity, dst_capacity; 1873 1874 /* 1875 * The load is corrected for the CPU capacity available on each node. 1876 * 1877 * src_load dst_load 1878 * ------------ vs --------- 1879 * src_capacity dst_capacity 1880 */ 1881 src_capacity = env->src_stats.compute_capacity; 1882 dst_capacity = env->dst_stats.compute_capacity; 1883 1884 imb = abs(dst_load * src_capacity - src_load * dst_capacity); 1885 1886 orig_src_load = env->src_stats.load; 1887 orig_dst_load = env->dst_stats.load; 1888 1889 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); 1890 1891 /* Would this change make things worse? */ 1892 return (imb > old_imb); 1893 } 1894 1895 /* 1896 * Maximum NUMA importance can be 1998 (2*999); 1897 * SMALLIMP @ 30 would be close to 1998/64. 1898 * Used to deter task migration. 1899 */ 1900 #define SMALLIMP 30 1901 1902 /* 1903 * This checks if the overall compute and NUMA accesses of the system would 1904 * be improved if the source tasks was migrated to the target dst_cpu taking 1905 * into account that it might be best if task running on the dst_cpu should 1906 * be exchanged with the source task 1907 */ 1908 static bool task_numa_compare(struct task_numa_env *env, 1909 long taskimp, long groupimp, bool maymove) 1910 { 1911 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); 1912 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1913 long imp = p_ng ? groupimp : taskimp; 1914 struct task_struct *cur; 1915 long src_load, dst_load; 1916 int dist = env->dist; 1917 long moveimp = imp; 1918 long load; 1919 bool stopsearch = false; 1920 1921 if (READ_ONCE(dst_rq->numa_migrate_on)) 1922 return false; 1923 1924 rcu_read_lock(); 1925 cur = rcu_dereference(dst_rq->curr); 1926 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1927 cur = NULL; 1928 1929 /* 1930 * Because we have preemption enabled we can get migrated around and 1931 * end try selecting ourselves (current == env->p) as a swap candidate. 1932 */ 1933 if (cur == env->p) { 1934 stopsearch = true; 1935 goto unlock; 1936 } 1937 1938 if (!cur) { 1939 if (maymove && moveimp >= env->best_imp) 1940 goto assign; 1941 else 1942 goto unlock; 1943 } 1944 1945 /* Skip this swap candidate if cannot move to the source cpu. */ 1946 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) 1947 goto unlock; 1948 1949 /* 1950 * Skip this swap candidate if it is not moving to its preferred 1951 * node and the best task is. 1952 */ 1953 if (env->best_task && 1954 env->best_task->numa_preferred_nid == env->src_nid && 1955 cur->numa_preferred_nid != env->src_nid) { 1956 goto unlock; 1957 } 1958 1959 /* 1960 * "imp" is the fault differential for the source task between the 1961 * source and destination node. Calculate the total differential for 1962 * the source task and potential destination task. The more negative 1963 * the value is, the more remote accesses that would be expected to 1964 * be incurred if the tasks were swapped. 1965 * 1966 * If dst and source tasks are in the same NUMA group, or not 1967 * in any group then look only at task weights. 1968 */ 1969 cur_ng = rcu_dereference(cur->numa_group); 1970 if (cur_ng == p_ng) { 1971 /* 1972 * Do not swap within a group or between tasks that have 1973 * no group if there is spare capacity. Swapping does 1974 * not address the load imbalance and helps one task at 1975 * the cost of punishing another. 1976 */ 1977 if (env->dst_stats.node_type == node_has_spare) 1978 goto unlock; 1979 1980 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1981 task_weight(cur, env->dst_nid, dist); 1982 /* 1983 * Add some hysteresis to prevent swapping the 1984 * tasks within a group over tiny differences. 1985 */ 1986 if (cur_ng) 1987 imp -= imp / 16; 1988 } else { 1989 /* 1990 * Compare the group weights. If a task is all by itself 1991 * (not part of a group), use the task weight instead. 1992 */ 1993 if (cur_ng && p_ng) 1994 imp += group_weight(cur, env->src_nid, dist) - 1995 group_weight(cur, env->dst_nid, dist); 1996 else 1997 imp += task_weight(cur, env->src_nid, dist) - 1998 task_weight(cur, env->dst_nid, dist); 1999 } 2000 2001 /* Discourage picking a task already on its preferred node */ 2002 if (cur->numa_preferred_nid == env->dst_nid) 2003 imp -= imp / 16; 2004 2005 /* 2006 * Encourage picking a task that moves to its preferred node. 2007 * This potentially makes imp larger than it's maximum of 2008 * 1998 (see SMALLIMP and task_weight for why) but in this 2009 * case, it does not matter. 2010 */ 2011 if (cur->numa_preferred_nid == env->src_nid) 2012 imp += imp / 8; 2013 2014 if (maymove && moveimp > imp && moveimp > env->best_imp) { 2015 imp = moveimp; 2016 cur = NULL; 2017 goto assign; 2018 } 2019 2020 /* 2021 * Prefer swapping with a task moving to its preferred node over a 2022 * task that is not. 2023 */ 2024 if (env->best_task && cur->numa_preferred_nid == env->src_nid && 2025 env->best_task->numa_preferred_nid != env->src_nid) { 2026 goto assign; 2027 } 2028 2029 /* 2030 * If the NUMA importance is less than SMALLIMP, 2031 * task migration might only result in ping pong 2032 * of tasks and also hurt performance due to cache 2033 * misses. 2034 */ 2035 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) 2036 goto unlock; 2037 2038 /* 2039 * In the overloaded case, try and keep the load balanced. 2040 */ 2041 load = task_h_load(env->p) - task_h_load(cur); 2042 if (!load) 2043 goto assign; 2044 2045 dst_load = env->dst_stats.load + load; 2046 src_load = env->src_stats.load - load; 2047 2048 if (load_too_imbalanced(src_load, dst_load, env)) 2049 goto unlock; 2050 2051 assign: 2052 /* Evaluate an idle CPU for a task numa move. */ 2053 if (!cur) { 2054 int cpu = env->dst_stats.idle_cpu; 2055 2056 /* Nothing cached so current CPU went idle since the search. */ 2057 if (cpu < 0) 2058 cpu = env->dst_cpu; 2059 2060 /* 2061 * If the CPU is no longer truly idle and the previous best CPU 2062 * is, keep using it. 2063 */ 2064 if (!idle_cpu(cpu) && env->best_cpu >= 0 && 2065 idle_cpu(env->best_cpu)) { 2066 cpu = env->best_cpu; 2067 } 2068 2069 env->dst_cpu = cpu; 2070 } 2071 2072 task_numa_assign(env, cur, imp); 2073 2074 /* 2075 * If a move to idle is allowed because there is capacity or load 2076 * balance improves then stop the search. While a better swap 2077 * candidate may exist, a search is not free. 2078 */ 2079 if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) 2080 stopsearch = true; 2081 2082 /* 2083 * If a swap candidate must be identified and the current best task 2084 * moves its preferred node then stop the search. 2085 */ 2086 if (!maymove && env->best_task && 2087 env->best_task->numa_preferred_nid == env->src_nid) { 2088 stopsearch = true; 2089 } 2090 unlock: 2091 rcu_read_unlock(); 2092 2093 return stopsearch; 2094 } 2095 2096 static void task_numa_find_cpu(struct task_numa_env *env, 2097 long taskimp, long groupimp) 2098 { 2099 bool maymove = false; 2100 int cpu; 2101 2102 /* 2103 * If dst node has spare capacity, then check if there is an 2104 * imbalance that would be overruled by the load balancer. 2105 */ 2106 if (env->dst_stats.node_type == node_has_spare) { 2107 unsigned int imbalance; 2108 int src_running, dst_running; 2109 2110 /* 2111 * Would movement cause an imbalance? Note that if src has 2112 * more running tasks that the imbalance is ignored as the 2113 * move improves the imbalance from the perspective of the 2114 * CPU load balancer. 2115 * */ 2116 src_running = env->src_stats.nr_running - 1; 2117 dst_running = env->dst_stats.nr_running + 1; 2118 imbalance = max(0, dst_running - src_running); 2119 imbalance = adjust_numa_imbalance(imbalance, dst_running, 2120 env->imb_numa_nr); 2121 2122 /* Use idle CPU if there is no imbalance */ 2123 if (!imbalance) { 2124 maymove = true; 2125 if (env->dst_stats.idle_cpu >= 0) { 2126 env->dst_cpu = env->dst_stats.idle_cpu; 2127 task_numa_assign(env, NULL, 0); 2128 return; 2129 } 2130 } 2131 } else { 2132 long src_load, dst_load, load; 2133 /* 2134 * If the improvement from just moving env->p direction is better 2135 * than swapping tasks around, check if a move is possible. 2136 */ 2137 load = task_h_load(env->p); 2138 dst_load = env->dst_stats.load + load; 2139 src_load = env->src_stats.load - load; 2140 maymove = !load_too_imbalanced(src_load, dst_load, env); 2141 } 2142 2143 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 2144 /* Skip this CPU if the source task cannot migrate */ 2145 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) 2146 continue; 2147 2148 env->dst_cpu = cpu; 2149 if (task_numa_compare(env, taskimp, groupimp, maymove)) 2150 break; 2151 } 2152 } 2153 2154 static int task_numa_migrate(struct task_struct *p) 2155 { 2156 struct task_numa_env env = { 2157 .p = p, 2158 2159 .src_cpu = task_cpu(p), 2160 .src_nid = task_node(p), 2161 2162 .imbalance_pct = 112, 2163 2164 .best_task = NULL, 2165 .best_imp = 0, 2166 .best_cpu = -1, 2167 }; 2168 unsigned long taskweight, groupweight; 2169 struct sched_domain *sd; 2170 long taskimp, groupimp; 2171 struct numa_group *ng; 2172 struct rq *best_rq; 2173 int nid, ret, dist; 2174 2175 /* 2176 * Pick the lowest SD_NUMA domain, as that would have the smallest 2177 * imbalance and would be the first to start moving tasks about. 2178 * 2179 * And we want to avoid any moving of tasks about, as that would create 2180 * random movement of tasks -- counter the numa conditions we're trying 2181 * to satisfy here. 2182 */ 2183 rcu_read_lock(); 2184 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 2185 if (sd) { 2186 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 2187 env.imb_numa_nr = sd->imb_numa_nr; 2188 } 2189 rcu_read_unlock(); 2190 2191 /* 2192 * Cpusets can break the scheduler domain tree into smaller 2193 * balance domains, some of which do not cross NUMA boundaries. 2194 * Tasks that are "trapped" in such domains cannot be migrated 2195 * elsewhere, so there is no point in (re)trying. 2196 */ 2197 if (unlikely(!sd)) { 2198 sched_setnuma(p, task_node(p)); 2199 return -EINVAL; 2200 } 2201 2202 env.dst_nid = p->numa_preferred_nid; 2203 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 2204 taskweight = task_weight(p, env.src_nid, dist); 2205 groupweight = group_weight(p, env.src_nid, dist); 2206 update_numa_stats(&env, &env.src_stats, env.src_nid, false); 2207 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 2208 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 2209 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); 2210 2211 /* Try to find a spot on the preferred nid. */ 2212 task_numa_find_cpu(&env, taskimp, groupimp); 2213 2214 /* 2215 * Look at other nodes in these cases: 2216 * - there is no space available on the preferred_nid 2217 * - the task is part of a numa_group that is interleaved across 2218 * multiple NUMA nodes; in order to better consolidate the group, 2219 * we need to check other locations. 2220 */ 2221 ng = deref_curr_numa_group(p); 2222 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { 2223 for_each_node_state(nid, N_CPU) { 2224 if (nid == env.src_nid || nid == p->numa_preferred_nid) 2225 continue; 2226 2227 dist = node_distance(env.src_nid, env.dst_nid); 2228 if (sched_numa_topology_type == NUMA_BACKPLANE && 2229 dist != env.dist) { 2230 taskweight = task_weight(p, env.src_nid, dist); 2231 groupweight = group_weight(p, env.src_nid, dist); 2232 } 2233 2234 /* Only consider nodes where both task and groups benefit */ 2235 taskimp = task_weight(p, nid, dist) - taskweight; 2236 groupimp = group_weight(p, nid, dist) - groupweight; 2237 if (taskimp < 0 && groupimp < 0) 2238 continue; 2239 2240 env.dist = dist; 2241 env.dst_nid = nid; 2242 update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); 2243 task_numa_find_cpu(&env, taskimp, groupimp); 2244 } 2245 } 2246 2247 /* 2248 * If the task is part of a workload that spans multiple NUMA nodes, 2249 * and is migrating into one of the workload's active nodes, remember 2250 * this node as the task's preferred numa node, so the workload can 2251 * settle down. 2252 * A task that migrated to a second choice node will be better off 2253 * trying for a better one later. Do not set the preferred node here. 2254 */ 2255 if (ng) { 2256 if (env.best_cpu == -1) 2257 nid = env.src_nid; 2258 else 2259 nid = cpu_to_node(env.best_cpu); 2260 2261 if (nid != p->numa_preferred_nid) 2262 sched_setnuma(p, nid); 2263 } 2264 2265 /* No better CPU than the current one was found. */ 2266 if (env.best_cpu == -1) { 2267 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); 2268 return -EAGAIN; 2269 } 2270 2271 best_rq = cpu_rq(env.best_cpu); 2272 if (env.best_task == NULL) { 2273 ret = migrate_task_to(p, env.best_cpu); 2274 WRITE_ONCE(best_rq->numa_migrate_on, 0); 2275 if (ret != 0) 2276 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); 2277 return ret; 2278 } 2279 2280 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); 2281 WRITE_ONCE(best_rq->numa_migrate_on, 0); 2282 2283 if (ret != 0) 2284 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); 2285 put_task_struct(env.best_task); 2286 return ret; 2287 } 2288 2289 /* Attempt to migrate a task to a CPU on the preferred node. */ 2290 static void numa_migrate_preferred(struct task_struct *p) 2291 { 2292 unsigned long interval = HZ; 2293 2294 /* This task has no NUMA fault statistics yet */ 2295 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) 2296 return; 2297 2298 /* Periodically retry migrating the task to the preferred node */ 2299 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 2300 p->numa_migrate_retry = jiffies + interval; 2301 2302 /* Success if task is already running on preferred CPU */ 2303 if (task_node(p) == p->numa_preferred_nid) 2304 return; 2305 2306 /* Otherwise, try migrate to a CPU on the preferred node */ 2307 task_numa_migrate(p); 2308 } 2309 2310 /* 2311 * Find out how many nodes the workload is actively running on. Do this by 2312 * tracking the nodes from which NUMA hinting faults are triggered. This can 2313 * be different from the set of nodes where the workload's memory is currently 2314 * located. 2315 */ 2316 static void numa_group_count_active_nodes(struct numa_group *numa_group) 2317 { 2318 unsigned long faults, max_faults = 0; 2319 int nid, active_nodes = 0; 2320 2321 for_each_node_state(nid, N_CPU) { 2322 faults = group_faults_cpu(numa_group, nid); 2323 if (faults > max_faults) 2324 max_faults = faults; 2325 } 2326 2327 for_each_node_state(nid, N_CPU) { 2328 faults = group_faults_cpu(numa_group, nid); 2329 if (faults * ACTIVE_NODE_FRACTION > max_faults) 2330 active_nodes++; 2331 } 2332 2333 numa_group->max_faults_cpu = max_faults; 2334 numa_group->active_nodes = active_nodes; 2335 } 2336 2337 /* 2338 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 2339 * increments. The more local the fault statistics are, the higher the scan 2340 * period will be for the next scan window. If local/(local+remote) ratio is 2341 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 2342 * the scan period will decrease. Aim for 70% local accesses. 2343 */ 2344 #define NUMA_PERIOD_SLOTS 10 2345 #define NUMA_PERIOD_THRESHOLD 7 2346 2347 /* 2348 * Increase the scan period (slow down scanning) if the majority of 2349 * our memory is already on our local node, or if the majority of 2350 * the page accesses are shared with other processes. 2351 * Otherwise, decrease the scan period. 2352 */ 2353 static void update_task_scan_period(struct task_struct *p, 2354 unsigned long shared, unsigned long private) 2355 { 2356 unsigned int period_slot; 2357 int lr_ratio, ps_ratio; 2358 int diff; 2359 2360 unsigned long remote = p->numa_faults_locality[0]; 2361 unsigned long local = p->numa_faults_locality[1]; 2362 2363 /* 2364 * If there were no record hinting faults then either the task is 2365 * completely idle or all activity is in areas that are not of interest 2366 * to automatic numa balancing. Related to that, if there were failed 2367 * migration then it implies we are migrating too quickly or the local 2368 * node is overloaded. In either case, scan slower 2369 */ 2370 if (local + shared == 0 || p->numa_faults_locality[2]) { 2371 p->numa_scan_period = min(p->numa_scan_period_max, 2372 p->numa_scan_period << 1); 2373 2374 p->mm->numa_next_scan = jiffies + 2375 msecs_to_jiffies(p->numa_scan_period); 2376 2377 return; 2378 } 2379 2380 /* 2381 * Prepare to scale scan period relative to the current period. 2382 * == NUMA_PERIOD_THRESHOLD scan period stays the same 2383 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 2384 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 2385 */ 2386 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 2387 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 2388 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); 2389 2390 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { 2391 /* 2392 * Most memory accesses are local. There is no need to 2393 * do fast NUMA scanning, since memory is already local. 2394 */ 2395 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; 2396 if (!slot) 2397 slot = 1; 2398 diff = slot * period_slot; 2399 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { 2400 /* 2401 * Most memory accesses are shared with other tasks. 2402 * There is no point in continuing fast NUMA scanning, 2403 * since other tasks may just move the memory elsewhere. 2404 */ 2405 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; 2406 if (!slot) 2407 slot = 1; 2408 diff = slot * period_slot; 2409 } else { 2410 /* 2411 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, 2412 * yet they are not on the local NUMA node. Speed up 2413 * NUMA scanning to get the memory moved over. 2414 */ 2415 int ratio = max(lr_ratio, ps_ratio); 2416 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 2417 } 2418 2419 p->numa_scan_period = clamp(p->numa_scan_period + diff, 2420 task_scan_min(p), task_scan_max(p)); 2421 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2422 } 2423 2424 /* 2425 * Get the fraction of time the task has been running since the last 2426 * NUMA placement cycle. The scheduler keeps similar statistics, but 2427 * decays those on a 32ms period, which is orders of magnitude off 2428 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 2429 * stats only if the task is so new there are no NUMA statistics yet. 2430 */ 2431 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 2432 { 2433 u64 runtime, delta, now; 2434 /* Use the start of this time slice to avoid calculations. */ 2435 now = p->se.exec_start; 2436 runtime = p->se.sum_exec_runtime; 2437 2438 if (p->last_task_numa_placement) { 2439 delta = runtime - p->last_sum_exec_runtime; 2440 *period = now - p->last_task_numa_placement; 2441 2442 /* Avoid time going backwards, prevent potential divide error: */ 2443 if (unlikely((s64)*period < 0)) 2444 *period = 0; 2445 } else { 2446 delta = p->se.avg.load_sum; 2447 *period = LOAD_AVG_MAX; 2448 } 2449 2450 p->last_sum_exec_runtime = runtime; 2451 p->last_task_numa_placement = now; 2452 2453 return delta; 2454 } 2455 2456 /* 2457 * Determine the preferred nid for a task in a numa_group. This needs to 2458 * be done in a way that produces consistent results with group_weight, 2459 * otherwise workloads might not converge. 2460 */ 2461 static int preferred_group_nid(struct task_struct *p, int nid) 2462 { 2463 nodemask_t nodes; 2464 int dist; 2465 2466 /* Direct connections between all NUMA nodes. */ 2467 if (sched_numa_topology_type == NUMA_DIRECT) 2468 return nid; 2469 2470 /* 2471 * On a system with glueless mesh NUMA topology, group_weight 2472 * scores nodes according to the number of NUMA hinting faults on 2473 * both the node itself, and on nearby nodes. 2474 */ 2475 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 2476 unsigned long score, max_score = 0; 2477 int node, max_node = nid; 2478 2479 dist = sched_max_numa_distance; 2480 2481 for_each_node_state(node, N_CPU) { 2482 score = group_weight(p, node, dist); 2483 if (score > max_score) { 2484 max_score = score; 2485 max_node = node; 2486 } 2487 } 2488 return max_node; 2489 } 2490 2491 /* 2492 * Finding the preferred nid in a system with NUMA backplane 2493 * interconnect topology is more involved. The goal is to locate 2494 * tasks from numa_groups near each other in the system, and 2495 * untangle workloads from different sides of the system. This requires 2496 * searching down the hierarchy of node groups, recursively searching 2497 * inside the highest scoring group of nodes. The nodemask tricks 2498 * keep the complexity of the search down. 2499 */ 2500 nodes = node_states[N_CPU]; 2501 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2502 unsigned long max_faults = 0; 2503 nodemask_t max_group = NODE_MASK_NONE; 2504 int a, b; 2505 2506 /* Are there nodes at this distance from each other? */ 2507 if (!find_numa_distance(dist)) 2508 continue; 2509 2510 for_each_node_mask(a, nodes) { 2511 unsigned long faults = 0; 2512 nodemask_t this_group; 2513 nodes_clear(this_group); 2514 2515 /* Sum group's NUMA faults; includes a==b case. */ 2516 for_each_node_mask(b, nodes) { 2517 if (node_distance(a, b) < dist) { 2518 faults += group_faults(p, b); 2519 node_set(b, this_group); 2520 node_clear(b, nodes); 2521 } 2522 } 2523 2524 /* Remember the top group. */ 2525 if (faults > max_faults) { 2526 max_faults = faults; 2527 max_group = this_group; 2528 /* 2529 * subtle: at the smallest distance there is 2530 * just one node left in each "group", the 2531 * winner is the preferred nid. 2532 */ 2533 nid = a; 2534 } 2535 } 2536 /* Next round, evaluate the nodes within max_group. */ 2537 if (!max_faults) 2538 break; 2539 nodes = max_group; 2540 } 2541 return nid; 2542 } 2543 2544 static void task_numa_placement(struct task_struct *p) 2545 { 2546 int seq, nid, max_nid = NUMA_NO_NODE; 2547 unsigned long max_faults = 0; 2548 unsigned long fault_types[2] = { 0, 0 }; 2549 unsigned long total_faults; 2550 u64 runtime, period; 2551 spinlock_t *group_lock = NULL; 2552 struct numa_group *ng; 2553 2554 /* 2555 * The p->mm->numa_scan_seq field gets updated without 2556 * exclusive access. Use READ_ONCE() here to ensure 2557 * that the field is read in a single access: 2558 */ 2559 seq = READ_ONCE(p->mm->numa_scan_seq); 2560 if (p->numa_scan_seq == seq) 2561 return; 2562 p->numa_scan_seq = seq; 2563 p->numa_scan_period_max = task_scan_max(p); 2564 2565 total_faults = p->numa_faults_locality[0] + 2566 p->numa_faults_locality[1]; 2567 runtime = numa_get_avg_runtime(p, &period); 2568 2569 /* If the task is part of a group prevent parallel updates to group stats */ 2570 ng = deref_curr_numa_group(p); 2571 if (ng) { 2572 group_lock = &ng->lock; 2573 spin_lock_irq(group_lock); 2574 } 2575 2576 /* Find the node with the highest number of faults */ 2577 for_each_online_node(nid) { 2578 /* Keep track of the offsets in numa_faults array */ 2579 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2580 unsigned long faults = 0, group_faults = 0; 2581 int priv; 2582 2583 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2584 long diff, f_diff, f_weight; 2585 2586 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2587 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2588 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2589 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2590 2591 /* Decay existing window, copy faults since last scan */ 2592 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2593 fault_types[priv] += p->numa_faults[membuf_idx]; 2594 p->numa_faults[membuf_idx] = 0; 2595 2596 /* 2597 * Normalize the faults_from, so all tasks in a group 2598 * count according to CPU use, instead of by the raw 2599 * number of faults. Tasks with little runtime have 2600 * little over-all impact on throughput, and thus their 2601 * faults are less important. 2602 */ 2603 f_weight = div64_u64(runtime << 16, period + 1); 2604 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2605 (total_faults + 1); 2606 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2607 p->numa_faults[cpubuf_idx] = 0; 2608 2609 p->numa_faults[mem_idx] += diff; 2610 p->numa_faults[cpu_idx] += f_diff; 2611 faults += p->numa_faults[mem_idx]; 2612 p->total_numa_faults += diff; 2613 if (ng) { 2614 /* 2615 * safe because we can only change our own group 2616 * 2617 * mem_idx represents the offset for a given 2618 * nid and priv in a specific region because it 2619 * is at the beginning of the numa_faults array. 2620 */ 2621 ng->faults[mem_idx] += diff; 2622 ng->faults[cpu_idx] += f_diff; 2623 ng->total_faults += diff; 2624 group_faults += ng->faults[mem_idx]; 2625 } 2626 } 2627 2628 if (!ng) { 2629 if (faults > max_faults) { 2630 max_faults = faults; 2631 max_nid = nid; 2632 } 2633 } else if (group_faults > max_faults) { 2634 max_faults = group_faults; 2635 max_nid = nid; 2636 } 2637 } 2638 2639 /* Cannot migrate task to CPU-less node */ 2640 if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) { 2641 int near_nid = max_nid; 2642 int distance, near_distance = INT_MAX; 2643 2644 for_each_node_state(nid, N_CPU) { 2645 distance = node_distance(max_nid, nid); 2646 if (distance < near_distance) { 2647 near_nid = nid; 2648 near_distance = distance; 2649 } 2650 } 2651 max_nid = near_nid; 2652 } 2653 2654 if (ng) { 2655 numa_group_count_active_nodes(ng); 2656 spin_unlock_irq(group_lock); 2657 max_nid = preferred_group_nid(p, max_nid); 2658 } 2659 2660 if (max_faults) { 2661 /* Set the new preferred node */ 2662 if (max_nid != p->numa_preferred_nid) 2663 sched_setnuma(p, max_nid); 2664 } 2665 2666 update_task_scan_period(p, fault_types[0], fault_types[1]); 2667 } 2668 2669 static inline int get_numa_group(struct numa_group *grp) 2670 { 2671 return refcount_inc_not_zero(&grp->refcount); 2672 } 2673 2674 static inline void put_numa_group(struct numa_group *grp) 2675 { 2676 if (refcount_dec_and_test(&grp->refcount)) 2677 kfree_rcu(grp, rcu); 2678 } 2679 2680 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2681 int *priv) 2682 { 2683 struct numa_group *grp, *my_grp; 2684 struct task_struct *tsk; 2685 bool join = false; 2686 int cpu = cpupid_to_cpu(cpupid); 2687 int i; 2688 2689 if (unlikely(!deref_curr_numa_group(p))) { 2690 unsigned int size = sizeof(struct numa_group) + 2691 NR_NUMA_HINT_FAULT_STATS * 2692 nr_node_ids * sizeof(unsigned long); 2693 2694 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2695 if (!grp) 2696 return; 2697 2698 refcount_set(&grp->refcount, 1); 2699 grp->active_nodes = 1; 2700 grp->max_faults_cpu = 0; 2701 spin_lock_init(&grp->lock); 2702 grp->gid = p->pid; 2703 2704 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2705 grp->faults[i] = p->numa_faults[i]; 2706 2707 grp->total_faults = p->total_numa_faults; 2708 2709 grp->nr_tasks++; 2710 rcu_assign_pointer(p->numa_group, grp); 2711 } 2712 2713 rcu_read_lock(); 2714 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2715 2716 if (!cpupid_match_pid(tsk, cpupid)) 2717 goto no_join; 2718 2719 grp = rcu_dereference(tsk->numa_group); 2720 if (!grp) 2721 goto no_join; 2722 2723 my_grp = deref_curr_numa_group(p); 2724 if (grp == my_grp) 2725 goto no_join; 2726 2727 /* 2728 * Only join the other group if its bigger; if we're the bigger group, 2729 * the other task will join us. 2730 */ 2731 if (my_grp->nr_tasks > grp->nr_tasks) 2732 goto no_join; 2733 2734 /* 2735 * Tie-break on the grp address. 2736 */ 2737 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2738 goto no_join; 2739 2740 /* Always join threads in the same process. */ 2741 if (tsk->mm == current->mm) 2742 join = true; 2743 2744 /* Simple filter to avoid false positives due to PID collisions */ 2745 if (flags & TNF_SHARED) 2746 join = true; 2747 2748 /* Update priv based on whether false sharing was detected */ 2749 *priv = !join; 2750 2751 if (join && !get_numa_group(grp)) 2752 goto no_join; 2753 2754 rcu_read_unlock(); 2755 2756 if (!join) 2757 return; 2758 2759 BUG_ON(irqs_disabled()); 2760 double_lock_irq(&my_grp->lock, &grp->lock); 2761 2762 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2763 my_grp->faults[i] -= p->numa_faults[i]; 2764 grp->faults[i] += p->numa_faults[i]; 2765 } 2766 my_grp->total_faults -= p->total_numa_faults; 2767 grp->total_faults += p->total_numa_faults; 2768 2769 my_grp->nr_tasks--; 2770 grp->nr_tasks++; 2771 2772 spin_unlock(&my_grp->lock); 2773 spin_unlock_irq(&grp->lock); 2774 2775 rcu_assign_pointer(p->numa_group, grp); 2776 2777 put_numa_group(my_grp); 2778 return; 2779 2780 no_join: 2781 rcu_read_unlock(); 2782 return; 2783 } 2784 2785 /* 2786 * Get rid of NUMA statistics associated with a task (either current or dead). 2787 * If @final is set, the task is dead and has reached refcount zero, so we can 2788 * safely free all relevant data structures. Otherwise, there might be 2789 * concurrent reads from places like load balancing and procfs, and we should 2790 * reset the data back to default state without freeing ->numa_faults. 2791 */ 2792 void task_numa_free(struct task_struct *p, bool final) 2793 { 2794 /* safe: p either is current or is being freed by current */ 2795 struct numa_group *grp = rcu_dereference_raw(p->numa_group); 2796 unsigned long *numa_faults = p->numa_faults; 2797 unsigned long flags; 2798 int i; 2799 2800 if (!numa_faults) 2801 return; 2802 2803 if (grp) { 2804 spin_lock_irqsave(&grp->lock, flags); 2805 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2806 grp->faults[i] -= p->numa_faults[i]; 2807 grp->total_faults -= p->total_numa_faults; 2808 2809 grp->nr_tasks--; 2810 spin_unlock_irqrestore(&grp->lock, flags); 2811 RCU_INIT_POINTER(p->numa_group, NULL); 2812 put_numa_group(grp); 2813 } 2814 2815 if (final) { 2816 p->numa_faults = NULL; 2817 kfree(numa_faults); 2818 } else { 2819 p->total_numa_faults = 0; 2820 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2821 numa_faults[i] = 0; 2822 } 2823 } 2824 2825 /* 2826 * Got a PROT_NONE fault for a page on @node. 2827 */ 2828 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2829 { 2830 struct task_struct *p = current; 2831 bool migrated = flags & TNF_MIGRATED; 2832 int cpu_node = task_node(current); 2833 int local = !!(flags & TNF_FAULT_LOCAL); 2834 struct numa_group *ng; 2835 int priv; 2836 2837 if (!static_branch_likely(&sched_numa_balancing)) 2838 return; 2839 2840 /* for example, ksmd faulting in a user's mm */ 2841 if (!p->mm) 2842 return; 2843 2844 /* 2845 * NUMA faults statistics are unnecessary for the slow memory 2846 * node for memory tiering mode. 2847 */ 2848 if (!node_is_toptier(mem_node) && 2849 (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING || 2850 !cpupid_valid(last_cpupid))) 2851 return; 2852 2853 /* Allocate buffer to track faults on a per-node basis */ 2854 if (unlikely(!p->numa_faults)) { 2855 int size = sizeof(*p->numa_faults) * 2856 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2857 2858 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2859 if (!p->numa_faults) 2860 return; 2861 2862 p->total_numa_faults = 0; 2863 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2864 } 2865 2866 /* 2867 * First accesses are treated as private, otherwise consider accesses 2868 * to be private if the accessing pid has not changed 2869 */ 2870 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2871 priv = 1; 2872 } else { 2873 priv = cpupid_match_pid(p, last_cpupid); 2874 if (!priv && !(flags & TNF_NO_GROUP)) 2875 task_numa_group(p, last_cpupid, flags, &priv); 2876 } 2877 2878 /* 2879 * If a workload spans multiple NUMA nodes, a shared fault that 2880 * occurs wholly within the set of nodes that the workload is 2881 * actively using should be counted as local. This allows the 2882 * scan rate to slow down when a workload has settled down. 2883 */ 2884 ng = deref_curr_numa_group(p); 2885 if (!priv && !local && ng && ng->active_nodes > 1 && 2886 numa_is_active_node(cpu_node, ng) && 2887 numa_is_active_node(mem_node, ng)) 2888 local = 1; 2889 2890 /* 2891 * Retry to migrate task to preferred node periodically, in case it 2892 * previously failed, or the scheduler moved us. 2893 */ 2894 if (time_after(jiffies, p->numa_migrate_retry)) { 2895 task_numa_placement(p); 2896 numa_migrate_preferred(p); 2897 } 2898 2899 if (migrated) 2900 p->numa_pages_migrated += pages; 2901 if (flags & TNF_MIGRATE_FAIL) 2902 p->numa_faults_locality[2] += pages; 2903 2904 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2905 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2906 p->numa_faults_locality[local] += pages; 2907 } 2908 2909 static void reset_ptenuma_scan(struct task_struct *p) 2910 { 2911 /* 2912 * We only did a read acquisition of the mmap sem, so 2913 * p->mm->numa_scan_seq is written to without exclusive access 2914 * and the update is not guaranteed to be atomic. That's not 2915 * much of an issue though, since this is just used for 2916 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2917 * expensive, to avoid any form of compiler optimizations: 2918 */ 2919 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2920 p->mm->numa_scan_offset = 0; 2921 } 2922 2923 /* 2924 * The expensive part of numa migration is done from task_work context. 2925 * Triggered from task_tick_numa(). 2926 */ 2927 static void task_numa_work(struct callback_head *work) 2928 { 2929 unsigned long migrate, next_scan, now = jiffies; 2930 struct task_struct *p = current; 2931 struct mm_struct *mm = p->mm; 2932 u64 runtime = p->se.sum_exec_runtime; 2933 struct vm_area_struct *vma; 2934 unsigned long start, end; 2935 unsigned long nr_pte_updates = 0; 2936 long pages, virtpages; 2937 2938 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2939 2940 work->next = work; 2941 /* 2942 * Who cares about NUMA placement when they're dying. 2943 * 2944 * NOTE: make sure not to dereference p->mm before this check, 2945 * exit_task_work() happens _after_ exit_mm() so we could be called 2946 * without p->mm even though we still had it when we enqueued this 2947 * work. 2948 */ 2949 if (p->flags & PF_EXITING) 2950 return; 2951 2952 if (!mm->numa_next_scan) { 2953 mm->numa_next_scan = now + 2954 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2955 } 2956 2957 /* 2958 * Enforce maximal scan/migration frequency.. 2959 */ 2960 migrate = mm->numa_next_scan; 2961 if (time_before(now, migrate)) 2962 return; 2963 2964 if (p->numa_scan_period == 0) { 2965 p->numa_scan_period_max = task_scan_max(p); 2966 p->numa_scan_period = task_scan_start(p); 2967 } 2968 2969 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2970 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2971 return; 2972 2973 /* 2974 * Delay this task enough that another task of this mm will likely win 2975 * the next time around. 2976 */ 2977 p->node_stamp += 2 * TICK_NSEC; 2978 2979 start = mm->numa_scan_offset; 2980 pages = sysctl_numa_balancing_scan_size; 2981 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2982 virtpages = pages * 8; /* Scan up to this much virtual space */ 2983 if (!pages) 2984 return; 2985 2986 2987 if (!mmap_read_trylock(mm)) 2988 return; 2989 vma = find_vma(mm, start); 2990 if (!vma) { 2991 reset_ptenuma_scan(p); 2992 start = 0; 2993 vma = mm->mmap; 2994 } 2995 for (; vma; vma = vma->vm_next) { 2996 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2997 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2998 continue; 2999 } 3000 3001 /* 3002 * Shared library pages mapped by multiple processes are not 3003 * migrated as it is expected they are cache replicated. Avoid 3004 * hinting faults in read-only file-backed mappings or the vdso 3005 * as migrating the pages will be of marginal benefit. 3006 */ 3007 if (!vma->vm_mm || 3008 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 3009 continue; 3010 3011 /* 3012 * Skip inaccessible VMAs to avoid any confusion between 3013 * PROT_NONE and NUMA hinting ptes 3014 */ 3015 if (!vma_is_accessible(vma)) 3016 continue; 3017 3018 do { 3019 start = max(start, vma->vm_start); 3020 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 3021 end = min(end, vma->vm_end); 3022 nr_pte_updates = change_prot_numa(vma, start, end); 3023 3024 /* 3025 * Try to scan sysctl_numa_balancing_size worth of 3026 * hpages that have at least one present PTE that 3027 * is not already pte-numa. If the VMA contains 3028 * areas that are unused or already full of prot_numa 3029 * PTEs, scan up to virtpages, to skip through those 3030 * areas faster. 3031 */ 3032 if (nr_pte_updates) 3033 pages -= (end - start) >> PAGE_SHIFT; 3034 virtpages -= (end - start) >> PAGE_SHIFT; 3035 3036 start = end; 3037 if (pages <= 0 || virtpages <= 0) 3038 goto out; 3039 3040 cond_resched(); 3041 } while (end != vma->vm_end); 3042 } 3043 3044 out: 3045 /* 3046 * It is possible to reach the end of the VMA list but the last few 3047 * VMAs are not guaranteed to the vma_migratable. If they are not, we 3048 * would find the !migratable VMA on the next scan but not reset the 3049 * scanner to the start so check it now. 3050 */ 3051 if (vma) 3052 mm->numa_scan_offset = start; 3053 else 3054 reset_ptenuma_scan(p); 3055 mmap_read_unlock(mm); 3056 3057 /* 3058 * Make sure tasks use at least 32x as much time to run other code 3059 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 3060 * Usually update_task_scan_period slows down scanning enough; on an 3061 * overloaded system we need to limit overhead on a per task basis. 3062 */ 3063 if (unlikely(p->se.sum_exec_runtime != runtime)) { 3064 u64 diff = p->se.sum_exec_runtime - runtime; 3065 p->node_stamp += 32 * diff; 3066 } 3067 } 3068 3069 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 3070 { 3071 int mm_users = 0; 3072 struct mm_struct *mm = p->mm; 3073 3074 if (mm) { 3075 mm_users = atomic_read(&mm->mm_users); 3076 if (mm_users == 1) { 3077 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 3078 mm->numa_scan_seq = 0; 3079 } 3080 } 3081 p->node_stamp = 0; 3082 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; 3083 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 3084 p->numa_migrate_retry = 0; 3085 /* Protect against double add, see task_tick_numa and task_numa_work */ 3086 p->numa_work.next = &p->numa_work; 3087 p->numa_faults = NULL; 3088 p->numa_pages_migrated = 0; 3089 p->total_numa_faults = 0; 3090 RCU_INIT_POINTER(p->numa_group, NULL); 3091 p->last_task_numa_placement = 0; 3092 p->last_sum_exec_runtime = 0; 3093 3094 init_task_work(&p->numa_work, task_numa_work); 3095 3096 /* New address space, reset the preferred nid */ 3097 if (!(clone_flags & CLONE_VM)) { 3098 p->numa_preferred_nid = NUMA_NO_NODE; 3099 return; 3100 } 3101 3102 /* 3103 * New thread, keep existing numa_preferred_nid which should be copied 3104 * already by arch_dup_task_struct but stagger when scans start. 3105 */ 3106 if (mm) { 3107 unsigned int delay; 3108 3109 delay = min_t(unsigned int, task_scan_max(current), 3110 current->numa_scan_period * mm_users * NSEC_PER_MSEC); 3111 delay += 2 * TICK_NSEC; 3112 p->node_stamp = delay; 3113 } 3114 } 3115 3116 /* 3117 * Drive the periodic memory faults.. 3118 */ 3119 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 3120 { 3121 struct callback_head *work = &curr->numa_work; 3122 u64 period, now; 3123 3124 /* 3125 * We don't care about NUMA placement if we don't have memory. 3126 */ 3127 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) 3128 return; 3129 3130 /* 3131 * Using runtime rather than walltime has the dual advantage that 3132 * we (mostly) drive the selection from busy threads and that the 3133 * task needs to have done some actual work before we bother with 3134 * NUMA placement. 3135 */ 3136 now = curr->se.sum_exec_runtime; 3137 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 3138 3139 if (now > curr->node_stamp + period) { 3140 if (!curr->node_stamp) 3141 curr->numa_scan_period = task_scan_start(curr); 3142 curr->node_stamp += period; 3143 3144 if (!time_before(jiffies, curr->mm->numa_next_scan)) 3145 task_work_add(curr, work, TWA_RESUME); 3146 } 3147 } 3148 3149 static void update_scan_period(struct task_struct *p, int new_cpu) 3150 { 3151 int src_nid = cpu_to_node(task_cpu(p)); 3152 int dst_nid = cpu_to_node(new_cpu); 3153 3154 if (!static_branch_likely(&sched_numa_balancing)) 3155 return; 3156 3157 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) 3158 return; 3159 3160 if (src_nid == dst_nid) 3161 return; 3162 3163 /* 3164 * Allow resets if faults have been trapped before one scan 3165 * has completed. This is most likely due to a new task that 3166 * is pulled cross-node due to wakeups or load balancing. 3167 */ 3168 if (p->numa_scan_seq) { 3169 /* 3170 * Avoid scan adjustments if moving to the preferred 3171 * node or if the task was not previously running on 3172 * the preferred node. 3173 */ 3174 if (dst_nid == p->numa_preferred_nid || 3175 (p->numa_preferred_nid != NUMA_NO_NODE && 3176 src_nid != p->numa_preferred_nid)) 3177 return; 3178 } 3179 3180 p->numa_scan_period = task_scan_start(p); 3181 } 3182 3183 #else 3184 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 3185 { 3186 } 3187 3188 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 3189 { 3190 } 3191 3192 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 3193 { 3194 } 3195 3196 static inline void update_scan_period(struct task_struct *p, int new_cpu) 3197 { 3198 } 3199 3200 #endif /* CONFIG_NUMA_BALANCING */ 3201 3202 static void 3203 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 3204 { 3205 update_load_add(&cfs_rq->load, se->load.weight); 3206 #ifdef CONFIG_SMP 3207 if (entity_is_task(se)) { 3208 struct rq *rq = rq_of(cfs_rq); 3209 3210 account_numa_enqueue(rq, task_of(se)); 3211 list_add(&se->group_node, &rq->cfs_tasks); 3212 } 3213 #endif 3214 cfs_rq->nr_running++; 3215 if (se_is_idle(se)) 3216 cfs_rq->idle_nr_running++; 3217 } 3218 3219 static void 3220 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 3221 { 3222 update_load_sub(&cfs_rq->load, se->load.weight); 3223 #ifdef CONFIG_SMP 3224 if (entity_is_task(se)) { 3225 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 3226 list_del_init(&se->group_node); 3227 } 3228 #endif 3229 cfs_rq->nr_running--; 3230 if (se_is_idle(se)) 3231 cfs_rq->idle_nr_running--; 3232 } 3233 3234 /* 3235 * Signed add and clamp on underflow. 3236 * 3237 * Explicitly do a load-store to ensure the intermediate value never hits 3238 * memory. This allows lockless observations without ever seeing the negative 3239 * values. 3240 */ 3241 #define add_positive(_ptr, _val) do { \ 3242 typeof(_ptr) ptr = (_ptr); \ 3243 typeof(_val) val = (_val); \ 3244 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3245 \ 3246 res = var + val; \ 3247 \ 3248 if (val < 0 && res > var) \ 3249 res = 0; \ 3250 \ 3251 WRITE_ONCE(*ptr, res); \ 3252 } while (0) 3253 3254 /* 3255 * Unsigned subtract and clamp on underflow. 3256 * 3257 * Explicitly do a load-store to ensure the intermediate value never hits 3258 * memory. This allows lockless observations without ever seeing the negative 3259 * values. 3260 */ 3261 #define sub_positive(_ptr, _val) do { \ 3262 typeof(_ptr) ptr = (_ptr); \ 3263 typeof(*ptr) val = (_val); \ 3264 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3265 res = var - val; \ 3266 if (res > var) \ 3267 res = 0; \ 3268 WRITE_ONCE(*ptr, res); \ 3269 } while (0) 3270 3271 /* 3272 * Remove and clamp on negative, from a local variable. 3273 * 3274 * A variant of sub_positive(), which does not use explicit load-store 3275 * and is thus optimized for local variable updates. 3276 */ 3277 #define lsub_positive(_ptr, _val) do { \ 3278 typeof(_ptr) ptr = (_ptr); \ 3279 *ptr -= min_t(typeof(*ptr), *ptr, _val); \ 3280 } while (0) 3281 3282 #ifdef CONFIG_SMP 3283 static inline void 3284 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3285 { 3286 cfs_rq->avg.load_avg += se->avg.load_avg; 3287 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; 3288 } 3289 3290 static inline void 3291 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3292 { 3293 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 3294 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); 3295 /* See update_cfs_rq_load_avg() */ 3296 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, 3297 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); 3298 } 3299 #else 3300 static inline void 3301 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3302 static inline void 3303 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 3304 #endif 3305 3306 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 3307 unsigned long weight) 3308 { 3309 if (se->on_rq) { 3310 /* commit outstanding execution time */ 3311 if (cfs_rq->curr == se) 3312 update_curr(cfs_rq); 3313 update_load_sub(&cfs_rq->load, se->load.weight); 3314 } 3315 dequeue_load_avg(cfs_rq, se); 3316 3317 update_load_set(&se->load, weight); 3318 3319 #ifdef CONFIG_SMP 3320 do { 3321 u32 divider = get_pelt_divider(&se->avg); 3322 3323 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); 3324 } while (0); 3325 #endif 3326 3327 enqueue_load_avg(cfs_rq, se); 3328 if (se->on_rq) 3329 update_load_add(&cfs_rq->load, se->load.weight); 3330 3331 } 3332 3333 void reweight_task(struct task_struct *p, int prio) 3334 { 3335 struct sched_entity *se = &p->se; 3336 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3337 struct load_weight *load = &se->load; 3338 unsigned long weight = scale_load(sched_prio_to_weight[prio]); 3339 3340 reweight_entity(cfs_rq, se, weight); 3341 load->inv_weight = sched_prio_to_wmult[prio]; 3342 } 3343 3344 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 3345 3346 #ifdef CONFIG_FAIR_GROUP_SCHED 3347 #ifdef CONFIG_SMP 3348 /* 3349 * All this does is approximate the hierarchical proportion which includes that 3350 * global sum we all love to hate. 3351 * 3352 * That is, the weight of a group entity, is the proportional share of the 3353 * group weight based on the group runqueue weights. That is: 3354 * 3355 * tg->weight * grq->load.weight 3356 * ge->load.weight = ----------------------------- (1) 3357 * \Sum grq->load.weight 3358 * 3359 * Now, because computing that sum is prohibitively expensive to compute (been 3360 * there, done that) we approximate it with this average stuff. The average 3361 * moves slower and therefore the approximation is cheaper and more stable. 3362 * 3363 * So instead of the above, we substitute: 3364 * 3365 * grq->load.weight -> grq->avg.load_avg (2) 3366 * 3367 * which yields the following: 3368 * 3369 * tg->weight * grq->avg.load_avg 3370 * ge->load.weight = ------------------------------ (3) 3371 * tg->load_avg 3372 * 3373 * Where: tg->load_avg ~= \Sum grq->avg.load_avg 3374 * 3375 * That is shares_avg, and it is right (given the approximation (2)). 3376 * 3377 * The problem with it is that because the average is slow -- it was designed 3378 * to be exactly that of course -- this leads to transients in boundary 3379 * conditions. In specific, the case where the group was idle and we start the 3380 * one task. It takes time for our CPU's grq->avg.load_avg to build up, 3381 * yielding bad latency etc.. 3382 * 3383 * Now, in that special case (1) reduces to: 3384 * 3385 * tg->weight * grq->load.weight 3386 * ge->load.weight = ----------------------------- = tg->weight (4) 3387 * grp->load.weight 3388 * 3389 * That is, the sum collapses because all other CPUs are idle; the UP scenario. 3390 * 3391 * So what we do is modify our approximation (3) to approach (4) in the (near) 3392 * UP case, like: 3393 * 3394 * ge->load.weight = 3395 * 3396 * tg->weight * grq->load.weight 3397 * --------------------------------------------------- (5) 3398 * tg->load_avg - grq->avg.load_avg + grq->load.weight 3399 * 3400 * But because grq->load.weight can drop to 0, resulting in a divide by zero, 3401 * we need to use grq->avg.load_avg as its lower bound, which then gives: 3402 * 3403 * 3404 * tg->weight * grq->load.weight 3405 * ge->load.weight = ----------------------------- (6) 3406 * tg_load_avg' 3407 * 3408 * Where: 3409 * 3410 * tg_load_avg' = tg->load_avg - grq->avg.load_avg + 3411 * max(grq->load.weight, grq->avg.load_avg) 3412 * 3413 * And that is shares_weight and is icky. In the (near) UP case it approaches 3414 * (4) while in the normal case it approaches (3). It consistently 3415 * overestimates the ge->load.weight and therefore: 3416 * 3417 * \Sum ge->load.weight >= tg->weight 3418 * 3419 * hence icky! 3420 */ 3421 static long calc_group_shares(struct cfs_rq *cfs_rq) 3422 { 3423 long tg_weight, tg_shares, load, shares; 3424 struct task_group *tg = cfs_rq->tg; 3425 3426 tg_shares = READ_ONCE(tg->shares); 3427 3428 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); 3429 3430 tg_weight = atomic_long_read(&tg->load_avg); 3431 3432 /* Ensure tg_weight >= load */ 3433 tg_weight -= cfs_rq->tg_load_avg_contrib; 3434 tg_weight += load; 3435 3436 shares = (tg_shares * load); 3437 if (tg_weight) 3438 shares /= tg_weight; 3439 3440 /* 3441 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 3442 * of a group with small tg->shares value. It is a floor value which is 3443 * assigned as a minimum load.weight to the sched_entity representing 3444 * the group on a CPU. 3445 * 3446 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 3447 * on an 8-core system with 8 tasks each runnable on one CPU shares has 3448 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 3449 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 3450 * instead of 0. 3451 */ 3452 return clamp_t(long, shares, MIN_SHARES, tg_shares); 3453 } 3454 #endif /* CONFIG_SMP */ 3455 3456 /* 3457 * Recomputes the group entity based on the current state of its group 3458 * runqueue. 3459 */ 3460 static void update_cfs_group(struct sched_entity *se) 3461 { 3462 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3463 long shares; 3464 3465 if (!gcfs_rq) 3466 return; 3467 3468 if (throttled_hierarchy(gcfs_rq)) 3469 return; 3470 3471 #ifndef CONFIG_SMP 3472 shares = READ_ONCE(gcfs_rq->tg->shares); 3473 3474 if (likely(se->load.weight == shares)) 3475 return; 3476 #else 3477 shares = calc_group_shares(gcfs_rq); 3478 #endif 3479 3480 reweight_entity(cfs_rq_of(se), se, shares); 3481 } 3482 3483 #else /* CONFIG_FAIR_GROUP_SCHED */ 3484 static inline void update_cfs_group(struct sched_entity *se) 3485 { 3486 } 3487 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3488 3489 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) 3490 { 3491 struct rq *rq = rq_of(cfs_rq); 3492 3493 if (&rq->cfs == cfs_rq) { 3494 /* 3495 * There are a few boundary cases this might miss but it should 3496 * get called often enough that that should (hopefully) not be 3497 * a real problem. 3498 * 3499 * It will not get called when we go idle, because the idle 3500 * thread is a different class (!fair), nor will the utilization 3501 * number include things like RT tasks. 3502 * 3503 * As is, the util number is not freq-invariant (we'd have to 3504 * implement arch_scale_freq_capacity() for that). 3505 * 3506 * See cpu_util_cfs(). 3507 */ 3508 cpufreq_update_util(rq, flags); 3509 } 3510 } 3511 3512 #ifdef CONFIG_SMP 3513 static inline bool load_avg_is_decayed(struct sched_avg *sa) 3514 { 3515 if (sa->load_sum) 3516 return false; 3517 3518 if (sa->util_sum) 3519 return false; 3520 3521 if (sa->runnable_sum) 3522 return false; 3523 3524 /* 3525 * _avg must be null when _sum are null because _avg = _sum / divider 3526 * Make sure that rounding and/or propagation of PELT values never 3527 * break this. 3528 */ 3529 SCHED_WARN_ON(sa->load_avg || 3530 sa->util_avg || 3531 sa->runnable_avg); 3532 3533 return true; 3534 } 3535 3536 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3537 { 3538 return u64_u32_load_copy(cfs_rq->avg.last_update_time, 3539 cfs_rq->last_update_time_copy); 3540 } 3541 #ifdef CONFIG_FAIR_GROUP_SCHED 3542 /* 3543 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list 3544 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list 3545 * bottom-up, we only have to test whether the cfs_rq before us on the list 3546 * is our child. 3547 * If cfs_rq is not on the list, test whether a child needs its to be added to 3548 * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details). 3549 */ 3550 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) 3551 { 3552 struct cfs_rq *prev_cfs_rq; 3553 struct list_head *prev; 3554 3555 if (cfs_rq->on_list) { 3556 prev = cfs_rq->leaf_cfs_rq_list.prev; 3557 } else { 3558 struct rq *rq = rq_of(cfs_rq); 3559 3560 prev = rq->tmp_alone_branch; 3561 } 3562 3563 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); 3564 3565 return (prev_cfs_rq->tg->parent == cfs_rq->tg); 3566 } 3567 3568 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 3569 { 3570 if (cfs_rq->load.weight) 3571 return false; 3572 3573 if (!load_avg_is_decayed(&cfs_rq->avg)) 3574 return false; 3575 3576 if (child_cfs_rq_on_list(cfs_rq)) 3577 return false; 3578 3579 return true; 3580 } 3581 3582 /** 3583 * update_tg_load_avg - update the tg's load avg 3584 * @cfs_rq: the cfs_rq whose avg changed 3585 * 3586 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3587 * However, because tg->load_avg is a global value there are performance 3588 * considerations. 3589 * 3590 * In order to avoid having to look at the other cfs_rq's, we use a 3591 * differential update where we store the last value we propagated. This in 3592 * turn allows skipping updates if the differential is 'small'. 3593 * 3594 * Updating tg's load_avg is necessary before update_cfs_share(). 3595 */ 3596 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) 3597 { 3598 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3599 3600 /* 3601 * No need to update load_avg for root_task_group as it is not used. 3602 */ 3603 if (cfs_rq->tg == &root_task_group) 3604 return; 3605 3606 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3607 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3608 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3609 } 3610 } 3611 3612 /* 3613 * Called within set_task_rq() right before setting a task's CPU. The 3614 * caller only guarantees p->pi_lock is held; no other assumptions, 3615 * including the state of rq->lock, should be made. 3616 */ 3617 void set_task_rq_fair(struct sched_entity *se, 3618 struct cfs_rq *prev, struct cfs_rq *next) 3619 { 3620 u64 p_last_update_time; 3621 u64 n_last_update_time; 3622 3623 if (!sched_feat(ATTACH_AGE_LOAD)) 3624 return; 3625 3626 /* 3627 * We are supposed to update the task to "current" time, then its up to 3628 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3629 * getting what current time is, so simply throw away the out-of-date 3630 * time. This will result in the wakee task is less decayed, but giving 3631 * the wakee more load sounds not bad. 3632 */ 3633 if (!(se->avg.last_update_time && prev)) 3634 return; 3635 3636 p_last_update_time = cfs_rq_last_update_time(prev); 3637 n_last_update_time = cfs_rq_last_update_time(next); 3638 3639 __update_load_avg_blocked_se(p_last_update_time, se); 3640 se->avg.last_update_time = n_last_update_time; 3641 } 3642 3643 /* 3644 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to 3645 * propagate its contribution. The key to this propagation is the invariant 3646 * that for each group: 3647 * 3648 * ge->avg == grq->avg (1) 3649 * 3650 * _IFF_ we look at the pure running and runnable sums. Because they 3651 * represent the very same entity, just at different points in the hierarchy. 3652 * 3653 * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial 3654 * and simply copies the running/runnable sum over (but still wrong, because 3655 * the group entity and group rq do not have their PELT windows aligned). 3656 * 3657 * However, update_tg_cfs_load() is more complex. So we have: 3658 * 3659 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) 3660 * 3661 * And since, like util, the runnable part should be directly transferable, 3662 * the following would _appear_ to be the straight forward approach: 3663 * 3664 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) 3665 * 3666 * And per (1) we have: 3667 * 3668 * ge->avg.runnable_avg == grq->avg.runnable_avg 3669 * 3670 * Which gives: 3671 * 3672 * ge->load.weight * grq->avg.load_avg 3673 * ge->avg.load_avg = ----------------------------------- (4) 3674 * grq->load.weight 3675 * 3676 * Except that is wrong! 3677 * 3678 * Because while for entities historical weight is not important and we 3679 * really only care about our future and therefore can consider a pure 3680 * runnable sum, runqueues can NOT do this. 3681 * 3682 * We specifically want runqueues to have a load_avg that includes 3683 * historical weights. Those represent the blocked load, the load we expect 3684 * to (shortly) return to us. This only works by keeping the weights as 3685 * integral part of the sum. We therefore cannot decompose as per (3). 3686 * 3687 * Another reason this doesn't work is that runnable isn't a 0-sum entity. 3688 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the 3689 * rq itself is runnable anywhere between 2/3 and 1 depending on how the 3690 * runnable section of these tasks overlap (or not). If they were to perfectly 3691 * align the rq as a whole would be runnable 2/3 of the time. If however we 3692 * always have at least 1 runnable task, the rq as a whole is always runnable. 3693 * 3694 * So we'll have to approximate.. :/ 3695 * 3696 * Given the constraint: 3697 * 3698 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX 3699 * 3700 * We can construct a rule that adds runnable to a rq by assuming minimal 3701 * overlap. 3702 * 3703 * On removal, we'll assume each task is equally runnable; which yields: 3704 * 3705 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight 3706 * 3707 * XXX: only do this for the part of runnable > running ? 3708 * 3709 */ 3710 static inline void 3711 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3712 { 3713 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; 3714 u32 new_sum, divider; 3715 3716 /* Nothing to update */ 3717 if (!delta_avg) 3718 return; 3719 3720 /* 3721 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3722 * See ___update_load_avg() for details. 3723 */ 3724 divider = get_pelt_divider(&cfs_rq->avg); 3725 3726 3727 /* Set new sched_entity's utilization */ 3728 se->avg.util_avg = gcfs_rq->avg.util_avg; 3729 new_sum = se->avg.util_avg * divider; 3730 delta_sum = (long)new_sum - (long)se->avg.util_sum; 3731 se->avg.util_sum = new_sum; 3732 3733 /* Update parent cfs_rq utilization */ 3734 add_positive(&cfs_rq->avg.util_avg, delta_avg); 3735 add_positive(&cfs_rq->avg.util_sum, delta_sum); 3736 3737 /* See update_cfs_rq_load_avg() */ 3738 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, 3739 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); 3740 } 3741 3742 static inline void 3743 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3744 { 3745 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; 3746 u32 new_sum, divider; 3747 3748 /* Nothing to update */ 3749 if (!delta_avg) 3750 return; 3751 3752 /* 3753 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3754 * See ___update_load_avg() for details. 3755 */ 3756 divider = get_pelt_divider(&cfs_rq->avg); 3757 3758 /* Set new sched_entity's runnable */ 3759 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; 3760 new_sum = se->avg.runnable_avg * divider; 3761 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; 3762 se->avg.runnable_sum = new_sum; 3763 3764 /* Update parent cfs_rq runnable */ 3765 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); 3766 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); 3767 /* See update_cfs_rq_load_avg() */ 3768 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, 3769 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); 3770 } 3771 3772 static inline void 3773 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3774 { 3775 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; 3776 unsigned long load_avg; 3777 u64 load_sum = 0; 3778 s64 delta_sum; 3779 u32 divider; 3780 3781 if (!runnable_sum) 3782 return; 3783 3784 gcfs_rq->prop_runnable_sum = 0; 3785 3786 /* 3787 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 3788 * See ___update_load_avg() for details. 3789 */ 3790 divider = get_pelt_divider(&cfs_rq->avg); 3791 3792 if (runnable_sum >= 0) { 3793 /* 3794 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until 3795 * the CPU is saturated running == runnable. 3796 */ 3797 runnable_sum += se->avg.load_sum; 3798 runnable_sum = min_t(long, runnable_sum, divider); 3799 } else { 3800 /* 3801 * Estimate the new unweighted runnable_sum of the gcfs_rq by 3802 * assuming all tasks are equally runnable. 3803 */ 3804 if (scale_load_down(gcfs_rq->load.weight)) { 3805 load_sum = div_u64(gcfs_rq->avg.load_sum, 3806 scale_load_down(gcfs_rq->load.weight)); 3807 } 3808 3809 /* But make sure to not inflate se's runnable */ 3810 runnable_sum = min(se->avg.load_sum, load_sum); 3811 } 3812 3813 /* 3814 * runnable_sum can't be lower than running_sum 3815 * Rescale running sum to be in the same range as runnable sum 3816 * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT] 3817 * runnable_sum is in [0 : LOAD_AVG_MAX] 3818 */ 3819 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; 3820 runnable_sum = max(runnable_sum, running_sum); 3821 3822 load_sum = se_weight(se) * runnable_sum; 3823 load_avg = div_u64(load_sum, divider); 3824 3825 delta_avg = load_avg - se->avg.load_avg; 3826 if (!delta_avg) 3827 return; 3828 3829 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; 3830 3831 se->avg.load_sum = runnable_sum; 3832 se->avg.load_avg = load_avg; 3833 add_positive(&cfs_rq->avg.load_avg, delta_avg); 3834 add_positive(&cfs_rq->avg.load_sum, delta_sum); 3835 /* See update_cfs_rq_load_avg() */ 3836 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, 3837 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); 3838 } 3839 3840 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) 3841 { 3842 cfs_rq->propagate = 1; 3843 cfs_rq->prop_runnable_sum += runnable_sum; 3844 } 3845 3846 /* Update task and its cfs_rq load average */ 3847 static inline int propagate_entity_load_avg(struct sched_entity *se) 3848 { 3849 struct cfs_rq *cfs_rq, *gcfs_rq; 3850 3851 if (entity_is_task(se)) 3852 return 0; 3853 3854 gcfs_rq = group_cfs_rq(se); 3855 if (!gcfs_rq->propagate) 3856 return 0; 3857 3858 gcfs_rq->propagate = 0; 3859 3860 cfs_rq = cfs_rq_of(se); 3861 3862 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); 3863 3864 update_tg_cfs_util(cfs_rq, se, gcfs_rq); 3865 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); 3866 update_tg_cfs_load(cfs_rq, se, gcfs_rq); 3867 3868 trace_pelt_cfs_tp(cfs_rq); 3869 trace_pelt_se_tp(se); 3870 3871 return 1; 3872 } 3873 3874 /* 3875 * Check if we need to update the load and the utilization of a blocked 3876 * group_entity: 3877 */ 3878 static inline bool skip_blocked_update(struct sched_entity *se) 3879 { 3880 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3881 3882 /* 3883 * If sched_entity still have not zero load or utilization, we have to 3884 * decay it: 3885 */ 3886 if (se->avg.load_avg || se->avg.util_avg) 3887 return false; 3888 3889 /* 3890 * If there is a pending propagation, we have to update the load and 3891 * the utilization of the sched_entity: 3892 */ 3893 if (gcfs_rq->propagate) 3894 return false; 3895 3896 /* 3897 * Otherwise, the load and the utilization of the sched_entity is 3898 * already zero and there is no pending propagation, so it will be a 3899 * waste of time to try to decay it: 3900 */ 3901 return true; 3902 } 3903 3904 #else /* CONFIG_FAIR_GROUP_SCHED */ 3905 3906 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} 3907 3908 static inline int propagate_entity_load_avg(struct sched_entity *se) 3909 { 3910 return 0; 3911 } 3912 3913 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} 3914 3915 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3916 3917 #ifdef CONFIG_NO_HZ_COMMON 3918 static inline void migrate_se_pelt_lag(struct sched_entity *se) 3919 { 3920 u64 throttled = 0, now, lut; 3921 struct cfs_rq *cfs_rq; 3922 struct rq *rq; 3923 bool is_idle; 3924 3925 if (load_avg_is_decayed(&se->avg)) 3926 return; 3927 3928 cfs_rq = cfs_rq_of(se); 3929 rq = rq_of(cfs_rq); 3930 3931 rcu_read_lock(); 3932 is_idle = is_idle_task(rcu_dereference(rq->curr)); 3933 rcu_read_unlock(); 3934 3935 /* 3936 * The lag estimation comes with a cost we don't want to pay all the 3937 * time. Hence, limiting to the case where the source CPU is idle and 3938 * we know we are at the greatest risk to have an outdated clock. 3939 */ 3940 if (!is_idle) 3941 return; 3942 3943 /* 3944 * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where: 3945 * 3946 * last_update_time (the cfs_rq's last_update_time) 3947 * = cfs_rq_clock_pelt()@cfs_rq_idle 3948 * = rq_clock_pelt()@cfs_rq_idle 3949 * - cfs->throttled_clock_pelt_time@cfs_rq_idle 3950 * 3951 * cfs_idle_lag (delta between rq's update and cfs_rq's update) 3952 * = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle 3953 * 3954 * rq_idle_lag (delta between now and rq's update) 3955 * = sched_clock_cpu() - rq_clock()@rq_idle 3956 * 3957 * We can then write: 3958 * 3959 * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time + 3960 * sched_clock_cpu() - rq_clock()@rq_idle 3961 * Where: 3962 * rq_clock_pelt()@rq_idle is rq->clock_pelt_idle 3963 * rq_clock()@rq_idle is rq->clock_idle 3964 * cfs->throttled_clock_pelt_time@cfs_rq_idle 3965 * is cfs_rq->throttled_pelt_idle 3966 */ 3967 3968 #ifdef CONFIG_CFS_BANDWIDTH 3969 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle); 3970 /* The clock has been stopped for throttling */ 3971 if (throttled == U64_MAX) 3972 return; 3973 #endif 3974 now = u64_u32_load(rq->clock_pelt_idle); 3975 /* 3976 * Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case 3977 * is observed the old clock_pelt_idle value and the new clock_idle, 3978 * which lead to an underestimation. The opposite would lead to an 3979 * overestimation. 3980 */ 3981 smp_rmb(); 3982 lut = cfs_rq_last_update_time(cfs_rq); 3983 3984 now -= throttled; 3985 if (now < lut) 3986 /* 3987 * cfs_rq->avg.last_update_time is more recent than our 3988 * estimation, let's use it. 3989 */ 3990 now = lut; 3991 else 3992 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle); 3993 3994 __update_load_avg_blocked_se(now, se); 3995 } 3996 #else 3997 static void migrate_se_pelt_lag(struct sched_entity *se) {} 3998 #endif 3999 4000 /** 4001 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 4002 * @now: current time, as per cfs_rq_clock_pelt() 4003 * @cfs_rq: cfs_rq to update 4004 * 4005 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 4006 * avg. The immediate corollary is that all (fair) tasks must be attached, see 4007 * post_init_entity_util_avg(). 4008 * 4009 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 4010 * 4011 * Return: true if the load decayed or we removed load. 4012 * 4013 * Since both these conditions indicate a changed cfs_rq->avg.load we should 4014 * call update_tg_load_avg() when this function returns true. 4015 */ 4016 static inline int 4017 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 4018 { 4019 unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0; 4020 struct sched_avg *sa = &cfs_rq->avg; 4021 int decayed = 0; 4022 4023 if (cfs_rq->removed.nr) { 4024 unsigned long r; 4025 u32 divider = get_pelt_divider(&cfs_rq->avg); 4026 4027 raw_spin_lock(&cfs_rq->removed.lock); 4028 swap(cfs_rq->removed.util_avg, removed_util); 4029 swap(cfs_rq->removed.load_avg, removed_load); 4030 swap(cfs_rq->removed.runnable_avg, removed_runnable); 4031 cfs_rq->removed.nr = 0; 4032 raw_spin_unlock(&cfs_rq->removed.lock); 4033 4034 r = removed_load; 4035 sub_positive(&sa->load_avg, r); 4036 sub_positive(&sa->load_sum, r * divider); 4037 /* See sa->util_sum below */ 4038 sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER); 4039 4040 r = removed_util; 4041 sub_positive(&sa->util_avg, r); 4042 sub_positive(&sa->util_sum, r * divider); 4043 /* 4044 * Because of rounding, se->util_sum might ends up being +1 more than 4045 * cfs->util_sum. Although this is not a problem by itself, detaching 4046 * a lot of tasks with the rounding problem between 2 updates of 4047 * util_avg (~1ms) can make cfs->util_sum becoming null whereas 4048 * cfs_util_avg is not. 4049 * Check that util_sum is still above its lower bound for the new 4050 * util_avg. Given that period_contrib might have moved since the last 4051 * sync, we are only sure that util_sum must be above or equal to 4052 * util_avg * minimum possible divider 4053 */ 4054 sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER); 4055 4056 r = removed_runnable; 4057 sub_positive(&sa->runnable_avg, r); 4058 sub_positive(&sa->runnable_sum, r * divider); 4059 /* See sa->util_sum above */ 4060 sa->runnable_sum = max_t(u32, sa->runnable_sum, 4061 sa->runnable_avg * PELT_MIN_DIVIDER); 4062 4063 /* 4064 * removed_runnable is the unweighted version of removed_load so we 4065 * can use it to estimate removed_load_sum. 4066 */ 4067 add_tg_cfs_propagate(cfs_rq, 4068 -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT); 4069 4070 decayed = 1; 4071 } 4072 4073 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); 4074 u64_u32_store_copy(sa->last_update_time, 4075 cfs_rq->last_update_time_copy, 4076 sa->last_update_time); 4077 return decayed; 4078 } 4079 4080 /** 4081 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 4082 * @cfs_rq: cfs_rq to attach to 4083 * @se: sched_entity to attach 4084 * 4085 * Must call update_cfs_rq_load_avg() before this, since we rely on 4086 * cfs_rq->avg.last_update_time being current. 4087 */ 4088 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 4089 { 4090 /* 4091 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. 4092 * See ___update_load_avg() for details. 4093 */ 4094 u32 divider = get_pelt_divider(&cfs_rq->avg); 4095 4096 /* 4097 * When we attach the @se to the @cfs_rq, we must align the decay 4098 * window because without that, really weird and wonderful things can 4099 * happen. 4100 * 4101 * XXX illustrate 4102 */ 4103 se->avg.last_update_time = cfs_rq->avg.last_update_time; 4104 se->avg.period_contrib = cfs_rq->avg.period_contrib; 4105 4106 /* 4107 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new 4108 * period_contrib. This isn't strictly correct, but since we're 4109 * entirely outside of the PELT hierarchy, nobody cares if we truncate 4110 * _sum a little. 4111 */ 4112 se->avg.util_sum = se->avg.util_avg * divider; 4113 4114 se->avg.runnable_sum = se->avg.runnable_avg * divider; 4115 4116 se->avg.load_sum = se->avg.load_avg * divider; 4117 if (se_weight(se) < se->avg.load_sum) 4118 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); 4119 else 4120 se->avg.load_sum = 1; 4121 4122 enqueue_load_avg(cfs_rq, se); 4123 cfs_rq->avg.util_avg += se->avg.util_avg; 4124 cfs_rq->avg.util_sum += se->avg.util_sum; 4125 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; 4126 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; 4127 4128 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); 4129 4130 cfs_rq_util_change(cfs_rq, 0); 4131 4132 trace_pelt_cfs_tp(cfs_rq); 4133 } 4134 4135 /** 4136 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 4137 * @cfs_rq: cfs_rq to detach from 4138 * @se: sched_entity to detach 4139 * 4140 * Must call update_cfs_rq_load_avg() before this, since we rely on 4141 * cfs_rq->avg.last_update_time being current. 4142 */ 4143 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 4144 { 4145 dequeue_load_avg(cfs_rq, se); 4146 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 4147 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); 4148 /* See update_cfs_rq_load_avg() */ 4149 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, 4150 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); 4151 4152 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); 4153 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); 4154 /* See update_cfs_rq_load_avg() */ 4155 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, 4156 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); 4157 4158 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); 4159 4160 cfs_rq_util_change(cfs_rq, 0); 4161 4162 trace_pelt_cfs_tp(cfs_rq); 4163 } 4164 4165 /* 4166 * Optional action to be done while updating the load average 4167 */ 4168 #define UPDATE_TG 0x1 4169 #define SKIP_AGE_LOAD 0x2 4170 #define DO_ATTACH 0x4 4171 4172 /* Update task and its cfs_rq load average */ 4173 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4174 { 4175 u64 now = cfs_rq_clock_pelt(cfs_rq); 4176 int decayed; 4177 4178 /* 4179 * Track task load average for carrying it to new CPU after migrated, and 4180 * track group sched_entity load average for task_h_load calc in migration 4181 */ 4182 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 4183 __update_load_avg_se(now, cfs_rq, se); 4184 4185 decayed = update_cfs_rq_load_avg(now, cfs_rq); 4186 decayed |= propagate_entity_load_avg(se); 4187 4188 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { 4189 4190 /* 4191 * DO_ATTACH means we're here from enqueue_entity(). 4192 * !last_update_time means we've passed through 4193 * migrate_task_rq_fair() indicating we migrated. 4194 * 4195 * IOW we're enqueueing a task on a new CPU. 4196 */ 4197 attach_entity_load_avg(cfs_rq, se); 4198 update_tg_load_avg(cfs_rq); 4199 4200 } else if (decayed) { 4201 cfs_rq_util_change(cfs_rq, 0); 4202 4203 if (flags & UPDATE_TG) 4204 update_tg_load_avg(cfs_rq); 4205 } 4206 } 4207 4208 /* 4209 * Synchronize entity load avg of dequeued entity without locking 4210 * the previous rq. 4211 */ 4212 static void sync_entity_load_avg(struct sched_entity *se) 4213 { 4214 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4215 u64 last_update_time; 4216 4217 last_update_time = cfs_rq_last_update_time(cfs_rq); 4218 __update_load_avg_blocked_se(last_update_time, se); 4219 } 4220 4221 /* 4222 * Task first catches up with cfs_rq, and then subtract 4223 * itself from the cfs_rq (task must be off the queue now). 4224 */ 4225 static void remove_entity_load_avg(struct sched_entity *se) 4226 { 4227 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4228 unsigned long flags; 4229 4230 /* 4231 * tasks cannot exit without having gone through wake_up_new_task() -> 4232 * post_init_entity_util_avg() which will have added things to the 4233 * cfs_rq, so we can remove unconditionally. 4234 */ 4235 4236 sync_entity_load_avg(se); 4237 4238 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); 4239 ++cfs_rq->removed.nr; 4240 cfs_rq->removed.util_avg += se->avg.util_avg; 4241 cfs_rq->removed.load_avg += se->avg.load_avg; 4242 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; 4243 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); 4244 } 4245 4246 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) 4247 { 4248 return cfs_rq->avg.runnable_avg; 4249 } 4250 4251 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 4252 { 4253 return cfs_rq->avg.load_avg; 4254 } 4255 4256 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 4257 4258 static inline unsigned long task_util(struct task_struct *p) 4259 { 4260 return READ_ONCE(p->se.avg.util_avg); 4261 } 4262 4263 static inline unsigned long _task_util_est(struct task_struct *p) 4264 { 4265 struct util_est ue = READ_ONCE(p->se.avg.util_est); 4266 4267 return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED)); 4268 } 4269 4270 static inline unsigned long task_util_est(struct task_struct *p) 4271 { 4272 return max(task_util(p), _task_util_est(p)); 4273 } 4274 4275 #ifdef CONFIG_UCLAMP_TASK 4276 static inline unsigned long uclamp_task_util(struct task_struct *p) 4277 { 4278 return clamp(task_util_est(p), 4279 uclamp_eff_value(p, UCLAMP_MIN), 4280 uclamp_eff_value(p, UCLAMP_MAX)); 4281 } 4282 #else 4283 static inline unsigned long uclamp_task_util(struct task_struct *p) 4284 { 4285 return task_util_est(p); 4286 } 4287 #endif 4288 4289 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, 4290 struct task_struct *p) 4291 { 4292 unsigned int enqueued; 4293 4294 if (!sched_feat(UTIL_EST)) 4295 return; 4296 4297 /* Update root cfs_rq's estimated utilization */ 4298 enqueued = cfs_rq->avg.util_est.enqueued; 4299 enqueued += _task_util_est(p); 4300 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 4301 4302 trace_sched_util_est_cfs_tp(cfs_rq); 4303 } 4304 4305 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, 4306 struct task_struct *p) 4307 { 4308 unsigned int enqueued; 4309 4310 if (!sched_feat(UTIL_EST)) 4311 return; 4312 4313 /* Update root cfs_rq's estimated utilization */ 4314 enqueued = cfs_rq->avg.util_est.enqueued; 4315 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); 4316 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 4317 4318 trace_sched_util_est_cfs_tp(cfs_rq); 4319 } 4320 4321 #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100) 4322 4323 /* 4324 * Check if a (signed) value is within a specified (unsigned) margin, 4325 * based on the observation that: 4326 * 4327 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) 4328 * 4329 * NOTE: this only works when value + margin < INT_MAX. 4330 */ 4331 static inline bool within_margin(int value, int margin) 4332 { 4333 return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); 4334 } 4335 4336 static inline void util_est_update(struct cfs_rq *cfs_rq, 4337 struct task_struct *p, 4338 bool task_sleep) 4339 { 4340 long last_ewma_diff, last_enqueued_diff; 4341 struct util_est ue; 4342 4343 if (!sched_feat(UTIL_EST)) 4344 return; 4345 4346 /* 4347 * Skip update of task's estimated utilization when the task has not 4348 * yet completed an activation, e.g. being migrated. 4349 */ 4350 if (!task_sleep) 4351 return; 4352 4353 /* 4354 * If the PELT values haven't changed since enqueue time, 4355 * skip the util_est update. 4356 */ 4357 ue = p->se.avg.util_est; 4358 if (ue.enqueued & UTIL_AVG_UNCHANGED) 4359 return; 4360 4361 last_enqueued_diff = ue.enqueued; 4362 4363 /* 4364 * Reset EWMA on utilization increases, the moving average is used only 4365 * to smooth utilization decreases. 4366 */ 4367 ue.enqueued = task_util(p); 4368 if (sched_feat(UTIL_EST_FASTUP)) { 4369 if (ue.ewma < ue.enqueued) { 4370 ue.ewma = ue.enqueued; 4371 goto done; 4372 } 4373 } 4374 4375 /* 4376 * Skip update of task's estimated utilization when its members are 4377 * already ~1% close to its last activation value. 4378 */ 4379 last_ewma_diff = ue.enqueued - ue.ewma; 4380 last_enqueued_diff -= ue.enqueued; 4381 if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) { 4382 if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN)) 4383 goto done; 4384 4385 return; 4386 } 4387 4388 /* 4389 * To avoid overestimation of actual task utilization, skip updates if 4390 * we cannot grant there is idle time in this CPU. 4391 */ 4392 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) 4393 return; 4394 4395 /* 4396 * Update Task's estimated utilization 4397 * 4398 * When *p completes an activation we can consolidate another sample 4399 * of the task size. This is done by storing the current PELT value 4400 * as ue.enqueued and by using this value to update the Exponential 4401 * Weighted Moving Average (EWMA): 4402 * 4403 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) 4404 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) 4405 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) 4406 * = w * ( last_ewma_diff ) + ewma(t-1) 4407 * = w * (last_ewma_diff + ewma(t-1) / w) 4408 * 4409 * Where 'w' is the weight of new samples, which is configured to be 4410 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) 4411 */ 4412 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; 4413 ue.ewma += last_ewma_diff; 4414 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; 4415 done: 4416 ue.enqueued |= UTIL_AVG_UNCHANGED; 4417 WRITE_ONCE(p->se.avg.util_est, ue); 4418 4419 trace_sched_util_est_se_tp(&p->se); 4420 } 4421 4422 static inline int task_fits_capacity(struct task_struct *p, 4423 unsigned long capacity) 4424 { 4425 return fits_capacity(uclamp_task_util(p), capacity); 4426 } 4427 4428 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) 4429 { 4430 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 4431 return; 4432 4433 if (!p || p->nr_cpus_allowed == 1) { 4434 rq->misfit_task_load = 0; 4435 return; 4436 } 4437 4438 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { 4439 rq->misfit_task_load = 0; 4440 return; 4441 } 4442 4443 /* 4444 * Make sure that misfit_task_load will not be null even if 4445 * task_h_load() returns 0. 4446 */ 4447 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); 4448 } 4449 4450 #else /* CONFIG_SMP */ 4451 4452 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 4453 { 4454 return true; 4455 } 4456 4457 #define UPDATE_TG 0x0 4458 #define SKIP_AGE_LOAD 0x0 4459 #define DO_ATTACH 0x0 4460 4461 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 4462 { 4463 cfs_rq_util_change(cfs_rq, 0); 4464 } 4465 4466 static inline void remove_entity_load_avg(struct sched_entity *se) {} 4467 4468 static inline void 4469 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 4470 static inline void 4471 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 4472 4473 static inline int newidle_balance(struct rq *rq, struct rq_flags *rf) 4474 { 4475 return 0; 4476 } 4477 4478 static inline void 4479 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 4480 4481 static inline void 4482 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 4483 4484 static inline void 4485 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, 4486 bool task_sleep) {} 4487 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} 4488 4489 #endif /* CONFIG_SMP */ 4490 4491 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 4492 { 4493 #ifdef CONFIG_SCHED_DEBUG 4494 s64 d = se->vruntime - cfs_rq->min_vruntime; 4495 4496 if (d < 0) 4497 d = -d; 4498 4499 if (d > 3*sysctl_sched_latency) 4500 schedstat_inc(cfs_rq->nr_spread_over); 4501 #endif 4502 } 4503 4504 static void 4505 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 4506 { 4507 u64 vruntime = cfs_rq->min_vruntime; 4508 4509 /* 4510 * The 'current' period is already promised to the current tasks, 4511 * however the extra weight of the new task will slow them down a 4512 * little, place the new task so that it fits in the slot that 4513 * stays open at the end. 4514 */ 4515 if (initial && sched_feat(START_DEBIT)) 4516 vruntime += sched_vslice(cfs_rq, se); 4517 4518 /* sleeps up to a single latency don't count. */ 4519 if (!initial) { 4520 unsigned long thresh; 4521 4522 if (se_is_idle(se)) 4523 thresh = sysctl_sched_min_granularity; 4524 else 4525 thresh = sysctl_sched_latency; 4526 4527 /* 4528 * Halve their sleep time's effect, to allow 4529 * for a gentler effect of sleepers: 4530 */ 4531 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 4532 thresh >>= 1; 4533 4534 vruntime -= thresh; 4535 } 4536 4537 /* ensure we never gain time by being placed backwards. */ 4538 se->vruntime = max_vruntime(se->vruntime, vruntime); 4539 } 4540 4541 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 4542 4543 static inline bool cfs_bandwidth_used(void); 4544 4545 /* 4546 * MIGRATION 4547 * 4548 * dequeue 4549 * update_curr() 4550 * update_min_vruntime() 4551 * vruntime -= min_vruntime 4552 * 4553 * enqueue 4554 * update_curr() 4555 * update_min_vruntime() 4556 * vruntime += min_vruntime 4557 * 4558 * this way the vruntime transition between RQs is done when both 4559 * min_vruntime are up-to-date. 4560 * 4561 * WAKEUP (remote) 4562 * 4563 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 4564 * vruntime -= min_vruntime 4565 * 4566 * enqueue 4567 * update_curr() 4568 * update_min_vruntime() 4569 * vruntime += min_vruntime 4570 * 4571 * this way we don't have the most up-to-date min_vruntime on the originating 4572 * CPU and an up-to-date min_vruntime on the destination CPU. 4573 */ 4574 4575 static void 4576 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4577 { 4578 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 4579 bool curr = cfs_rq->curr == se; 4580 4581 /* 4582 * If we're the current task, we must renormalise before calling 4583 * update_curr(). 4584 */ 4585 if (renorm && curr) 4586 se->vruntime += cfs_rq->min_vruntime; 4587 4588 update_curr(cfs_rq); 4589 4590 /* 4591 * Otherwise, renormalise after, such that we're placed at the current 4592 * moment in time, instead of some random moment in the past. Being 4593 * placed in the past could significantly boost this task to the 4594 * fairness detriment of existing tasks. 4595 */ 4596 if (renorm && !curr) 4597 se->vruntime += cfs_rq->min_vruntime; 4598 4599 /* 4600 * When enqueuing a sched_entity, we must: 4601 * - Update loads to have both entity and cfs_rq synced with now. 4602 * - Add its load to cfs_rq->runnable_avg 4603 * - For group_entity, update its weight to reflect the new share of 4604 * its group cfs_rq 4605 * - Add its new weight to cfs_rq->load.weight 4606 */ 4607 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); 4608 se_update_runnable(se); 4609 update_cfs_group(se); 4610 account_entity_enqueue(cfs_rq, se); 4611 4612 if (flags & ENQUEUE_WAKEUP) 4613 place_entity(cfs_rq, se, 0); 4614 4615 check_schedstat_required(); 4616 update_stats_enqueue_fair(cfs_rq, se, flags); 4617 check_spread(cfs_rq, se); 4618 if (!curr) 4619 __enqueue_entity(cfs_rq, se); 4620 se->on_rq = 1; 4621 4622 if (cfs_rq->nr_running == 1) { 4623 check_enqueue_throttle(cfs_rq); 4624 if (!throttled_hierarchy(cfs_rq)) 4625 list_add_leaf_cfs_rq(cfs_rq); 4626 } 4627 } 4628 4629 static void __clear_buddies_last(struct sched_entity *se) 4630 { 4631 for_each_sched_entity(se) { 4632 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4633 if (cfs_rq->last != se) 4634 break; 4635 4636 cfs_rq->last = NULL; 4637 } 4638 } 4639 4640 static void __clear_buddies_next(struct sched_entity *se) 4641 { 4642 for_each_sched_entity(se) { 4643 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4644 if (cfs_rq->next != se) 4645 break; 4646 4647 cfs_rq->next = NULL; 4648 } 4649 } 4650 4651 static void __clear_buddies_skip(struct sched_entity *se) 4652 { 4653 for_each_sched_entity(se) { 4654 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4655 if (cfs_rq->skip != se) 4656 break; 4657 4658 cfs_rq->skip = NULL; 4659 } 4660 } 4661 4662 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 4663 { 4664 if (cfs_rq->last == se) 4665 __clear_buddies_last(se); 4666 4667 if (cfs_rq->next == se) 4668 __clear_buddies_next(se); 4669 4670 if (cfs_rq->skip == se) 4671 __clear_buddies_skip(se); 4672 } 4673 4674 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4675 4676 static void 4677 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4678 { 4679 /* 4680 * Update run-time statistics of the 'current'. 4681 */ 4682 update_curr(cfs_rq); 4683 4684 /* 4685 * When dequeuing a sched_entity, we must: 4686 * - Update loads to have both entity and cfs_rq synced with now. 4687 * - Subtract its load from the cfs_rq->runnable_avg. 4688 * - Subtract its previous weight from cfs_rq->load.weight. 4689 * - For group entity, update its weight to reflect the new share 4690 * of its group cfs_rq. 4691 */ 4692 update_load_avg(cfs_rq, se, UPDATE_TG); 4693 se_update_runnable(se); 4694 4695 update_stats_dequeue_fair(cfs_rq, se, flags); 4696 4697 clear_buddies(cfs_rq, se); 4698 4699 if (se != cfs_rq->curr) 4700 __dequeue_entity(cfs_rq, se); 4701 se->on_rq = 0; 4702 account_entity_dequeue(cfs_rq, se); 4703 4704 /* 4705 * Normalize after update_curr(); which will also have moved 4706 * min_vruntime if @se is the one holding it back. But before doing 4707 * update_min_vruntime() again, which will discount @se's position and 4708 * can move min_vruntime forward still more. 4709 */ 4710 if (!(flags & DEQUEUE_SLEEP)) 4711 se->vruntime -= cfs_rq->min_vruntime; 4712 4713 /* return excess runtime on last dequeue */ 4714 return_cfs_rq_runtime(cfs_rq); 4715 4716 update_cfs_group(se); 4717 4718 /* 4719 * Now advance min_vruntime if @se was the entity holding it back, 4720 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 4721 * put back on, and if we advance min_vruntime, we'll be placed back 4722 * further than we started -- ie. we'll be penalized. 4723 */ 4724 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) 4725 update_min_vruntime(cfs_rq); 4726 4727 if (cfs_rq->nr_running == 0) 4728 update_idle_cfs_rq_clock_pelt(cfs_rq); 4729 } 4730 4731 /* 4732 * Preempt the current task with a newly woken task if needed: 4733 */ 4734 static void 4735 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4736 { 4737 unsigned long ideal_runtime, delta_exec; 4738 struct sched_entity *se; 4739 s64 delta; 4740 4741 ideal_runtime = sched_slice(cfs_rq, curr); 4742 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 4743 if (delta_exec > ideal_runtime) { 4744 resched_curr(rq_of(cfs_rq)); 4745 /* 4746 * The current task ran long enough, ensure it doesn't get 4747 * re-elected due to buddy favours. 4748 */ 4749 clear_buddies(cfs_rq, curr); 4750 return; 4751 } 4752 4753 /* 4754 * Ensure that a task that missed wakeup preemption by a 4755 * narrow margin doesn't have to wait for a full slice. 4756 * This also mitigates buddy induced latencies under load. 4757 */ 4758 if (delta_exec < sysctl_sched_min_granularity) 4759 return; 4760 4761 se = __pick_first_entity(cfs_rq); 4762 delta = curr->vruntime - se->vruntime; 4763 4764 if (delta < 0) 4765 return; 4766 4767 if (delta > ideal_runtime) 4768 resched_curr(rq_of(cfs_rq)); 4769 } 4770 4771 static void 4772 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 4773 { 4774 clear_buddies(cfs_rq, se); 4775 4776 /* 'current' is not kept within the tree. */ 4777 if (se->on_rq) { 4778 /* 4779 * Any task has to be enqueued before it get to execute on 4780 * a CPU. So account for the time it spent waiting on the 4781 * runqueue. 4782 */ 4783 update_stats_wait_end_fair(cfs_rq, se); 4784 __dequeue_entity(cfs_rq, se); 4785 update_load_avg(cfs_rq, se, UPDATE_TG); 4786 } 4787 4788 update_stats_curr_start(cfs_rq, se); 4789 cfs_rq->curr = se; 4790 4791 /* 4792 * Track our maximum slice length, if the CPU's load is at 4793 * least twice that of our own weight (i.e. dont track it 4794 * when there are only lesser-weight tasks around): 4795 */ 4796 if (schedstat_enabled() && 4797 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { 4798 struct sched_statistics *stats; 4799 4800 stats = __schedstats_from_se(se); 4801 __schedstat_set(stats->slice_max, 4802 max((u64)stats->slice_max, 4803 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 4804 } 4805 4806 se->prev_sum_exec_runtime = se->sum_exec_runtime; 4807 } 4808 4809 static int 4810 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 4811 4812 /* 4813 * Pick the next process, keeping these things in mind, in this order: 4814 * 1) keep things fair between processes/task groups 4815 * 2) pick the "next" process, since someone really wants that to run 4816 * 3) pick the "last" process, for cache locality 4817 * 4) do not run the "skip" process, if something else is available 4818 */ 4819 static struct sched_entity * 4820 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4821 { 4822 struct sched_entity *left = __pick_first_entity(cfs_rq); 4823 struct sched_entity *se; 4824 4825 /* 4826 * If curr is set we have to see if its left of the leftmost entity 4827 * still in the tree, provided there was anything in the tree at all. 4828 */ 4829 if (!left || (curr && entity_before(curr, left))) 4830 left = curr; 4831 4832 se = left; /* ideally we run the leftmost entity */ 4833 4834 /* 4835 * Avoid running the skip buddy, if running something else can 4836 * be done without getting too unfair. 4837 */ 4838 if (cfs_rq->skip && cfs_rq->skip == se) { 4839 struct sched_entity *second; 4840 4841 if (se == curr) { 4842 second = __pick_first_entity(cfs_rq); 4843 } else { 4844 second = __pick_next_entity(se); 4845 if (!second || (curr && entity_before(curr, second))) 4846 second = curr; 4847 } 4848 4849 if (second && wakeup_preempt_entity(second, left) < 1) 4850 se = second; 4851 } 4852 4853 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) { 4854 /* 4855 * Someone really wants this to run. If it's not unfair, run it. 4856 */ 4857 se = cfs_rq->next; 4858 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { 4859 /* 4860 * Prefer last buddy, try to return the CPU to a preempted task. 4861 */ 4862 se = cfs_rq->last; 4863 } 4864 4865 return se; 4866 } 4867 4868 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4869 4870 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 4871 { 4872 /* 4873 * If still on the runqueue then deactivate_task() 4874 * was not called and update_curr() has to be done: 4875 */ 4876 if (prev->on_rq) 4877 update_curr(cfs_rq); 4878 4879 /* throttle cfs_rqs exceeding runtime */ 4880 check_cfs_rq_runtime(cfs_rq); 4881 4882 check_spread(cfs_rq, prev); 4883 4884 if (prev->on_rq) { 4885 update_stats_wait_start_fair(cfs_rq, prev); 4886 /* Put 'current' back into the tree. */ 4887 __enqueue_entity(cfs_rq, prev); 4888 /* in !on_rq case, update occurred at dequeue */ 4889 update_load_avg(cfs_rq, prev, 0); 4890 } 4891 cfs_rq->curr = NULL; 4892 } 4893 4894 static void 4895 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 4896 { 4897 /* 4898 * Update run-time statistics of the 'current'. 4899 */ 4900 update_curr(cfs_rq); 4901 4902 /* 4903 * Ensure that runnable average is periodically updated. 4904 */ 4905 update_load_avg(cfs_rq, curr, UPDATE_TG); 4906 update_cfs_group(curr); 4907 4908 #ifdef CONFIG_SCHED_HRTICK 4909 /* 4910 * queued ticks are scheduled to match the slice, so don't bother 4911 * validating it and just reschedule. 4912 */ 4913 if (queued) { 4914 resched_curr(rq_of(cfs_rq)); 4915 return; 4916 } 4917 /* 4918 * don't let the period tick interfere with the hrtick preemption 4919 */ 4920 if (!sched_feat(DOUBLE_TICK) && 4921 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4922 return; 4923 #endif 4924 4925 if (cfs_rq->nr_running > 1) 4926 check_preempt_tick(cfs_rq, curr); 4927 } 4928 4929 4930 /************************************************** 4931 * CFS bandwidth control machinery 4932 */ 4933 4934 #ifdef CONFIG_CFS_BANDWIDTH 4935 4936 #ifdef CONFIG_JUMP_LABEL 4937 static struct static_key __cfs_bandwidth_used; 4938 4939 static inline bool cfs_bandwidth_used(void) 4940 { 4941 return static_key_false(&__cfs_bandwidth_used); 4942 } 4943 4944 void cfs_bandwidth_usage_inc(void) 4945 { 4946 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); 4947 } 4948 4949 void cfs_bandwidth_usage_dec(void) 4950 { 4951 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); 4952 } 4953 #else /* CONFIG_JUMP_LABEL */ 4954 static bool cfs_bandwidth_used(void) 4955 { 4956 return true; 4957 } 4958 4959 void cfs_bandwidth_usage_inc(void) {} 4960 void cfs_bandwidth_usage_dec(void) {} 4961 #endif /* CONFIG_JUMP_LABEL */ 4962 4963 /* 4964 * default period for cfs group bandwidth. 4965 * default: 0.1s, units: nanoseconds 4966 */ 4967 static inline u64 default_cfs_period(void) 4968 { 4969 return 100000000ULL; 4970 } 4971 4972 static inline u64 sched_cfs_bandwidth_slice(void) 4973 { 4974 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4975 } 4976 4977 /* 4978 * Replenish runtime according to assigned quota. We use sched_clock_cpu 4979 * directly instead of rq->clock to avoid adding additional synchronization 4980 * around rq->lock. 4981 * 4982 * requires cfs_b->lock 4983 */ 4984 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4985 { 4986 s64 runtime; 4987 4988 if (unlikely(cfs_b->quota == RUNTIME_INF)) 4989 return; 4990 4991 cfs_b->runtime += cfs_b->quota; 4992 runtime = cfs_b->runtime_snap - cfs_b->runtime; 4993 if (runtime > 0) { 4994 cfs_b->burst_time += runtime; 4995 cfs_b->nr_burst++; 4996 } 4997 4998 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); 4999 cfs_b->runtime_snap = cfs_b->runtime; 5000 } 5001 5002 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 5003 { 5004 return &tg->cfs_bandwidth; 5005 } 5006 5007 /* returns 0 on failure to allocate runtime */ 5008 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, 5009 struct cfs_rq *cfs_rq, u64 target_runtime) 5010 { 5011 u64 min_amount, amount = 0; 5012 5013 lockdep_assert_held(&cfs_b->lock); 5014 5015 /* note: this is a positive sum as runtime_remaining <= 0 */ 5016 min_amount = target_runtime - cfs_rq->runtime_remaining; 5017 5018 if (cfs_b->quota == RUNTIME_INF) 5019 amount = min_amount; 5020 else { 5021 start_cfs_bandwidth(cfs_b); 5022 5023 if (cfs_b->runtime > 0) { 5024 amount = min(cfs_b->runtime, min_amount); 5025 cfs_b->runtime -= amount; 5026 cfs_b->idle = 0; 5027 } 5028 } 5029 5030 cfs_rq->runtime_remaining += amount; 5031 5032 return cfs_rq->runtime_remaining > 0; 5033 } 5034 5035 /* returns 0 on failure to allocate runtime */ 5036 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5037 { 5038 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 5039 int ret; 5040 5041 raw_spin_lock(&cfs_b->lock); 5042 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); 5043 raw_spin_unlock(&cfs_b->lock); 5044 5045 return ret; 5046 } 5047 5048 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 5049 { 5050 /* dock delta_exec before expiring quota (as it could span periods) */ 5051 cfs_rq->runtime_remaining -= delta_exec; 5052 5053 if (likely(cfs_rq->runtime_remaining > 0)) 5054 return; 5055 5056 if (cfs_rq->throttled) 5057 return; 5058 /* 5059 * if we're unable to extend our runtime we resched so that the active 5060 * hierarchy can be throttled 5061 */ 5062 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 5063 resched_curr(rq_of(cfs_rq)); 5064 } 5065 5066 static __always_inline 5067 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 5068 { 5069 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 5070 return; 5071 5072 __account_cfs_rq_runtime(cfs_rq, delta_exec); 5073 } 5074 5075 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 5076 { 5077 return cfs_bandwidth_used() && cfs_rq->throttled; 5078 } 5079 5080 /* check whether cfs_rq, or any parent, is throttled */ 5081 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 5082 { 5083 return cfs_bandwidth_used() && cfs_rq->throttle_count; 5084 } 5085 5086 /* 5087 * Ensure that neither of the group entities corresponding to src_cpu or 5088 * dest_cpu are members of a throttled hierarchy when performing group 5089 * load-balance operations. 5090 */ 5091 static inline int throttled_lb_pair(struct task_group *tg, 5092 int src_cpu, int dest_cpu) 5093 { 5094 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 5095 5096 src_cfs_rq = tg->cfs_rq[src_cpu]; 5097 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 5098 5099 return throttled_hierarchy(src_cfs_rq) || 5100 throttled_hierarchy(dest_cfs_rq); 5101 } 5102 5103 static int tg_unthrottle_up(struct task_group *tg, void *data) 5104 { 5105 struct rq *rq = data; 5106 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5107 5108 cfs_rq->throttle_count--; 5109 if (!cfs_rq->throttle_count) { 5110 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - 5111 cfs_rq->throttled_clock_pelt; 5112 5113 /* Add cfs_rq with load or one or more already running entities to the list */ 5114 if (!cfs_rq_is_decayed(cfs_rq)) 5115 list_add_leaf_cfs_rq(cfs_rq); 5116 } 5117 5118 return 0; 5119 } 5120 5121 static int tg_throttle_down(struct task_group *tg, void *data) 5122 { 5123 struct rq *rq = data; 5124 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5125 5126 /* group is entering throttled state, stop time */ 5127 if (!cfs_rq->throttle_count) { 5128 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); 5129 list_del_leaf_cfs_rq(cfs_rq); 5130 } 5131 cfs_rq->throttle_count++; 5132 5133 return 0; 5134 } 5135 5136 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) 5137 { 5138 struct rq *rq = rq_of(cfs_rq); 5139 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 5140 struct sched_entity *se; 5141 long task_delta, idle_task_delta, dequeue = 1; 5142 5143 raw_spin_lock(&cfs_b->lock); 5144 /* This will start the period timer if necessary */ 5145 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { 5146 /* 5147 * We have raced with bandwidth becoming available, and if we 5148 * actually throttled the timer might not unthrottle us for an 5149 * entire period. We additionally needed to make sure that any 5150 * subsequent check_cfs_rq_runtime calls agree not to throttle 5151 * us, as we may commit to do cfs put_prev+pick_next, so we ask 5152 * for 1ns of runtime rather than just check cfs_b. 5153 */ 5154 dequeue = 0; 5155 } else { 5156 list_add_tail_rcu(&cfs_rq->throttled_list, 5157 &cfs_b->throttled_cfs_rq); 5158 } 5159 raw_spin_unlock(&cfs_b->lock); 5160 5161 if (!dequeue) 5162 return false; /* Throttle no longer required. */ 5163 5164 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 5165 5166 /* freeze hierarchy runnable averages while throttled */ 5167 rcu_read_lock(); 5168 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 5169 rcu_read_unlock(); 5170 5171 task_delta = cfs_rq->h_nr_running; 5172 idle_task_delta = cfs_rq->idle_h_nr_running; 5173 for_each_sched_entity(se) { 5174 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 5175 /* throttled entity or throttle-on-deactivate */ 5176 if (!se->on_rq) 5177 goto done; 5178 5179 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 5180 5181 if (cfs_rq_is_idle(group_cfs_rq(se))) 5182 idle_task_delta = cfs_rq->h_nr_running; 5183 5184 qcfs_rq->h_nr_running -= task_delta; 5185 qcfs_rq->idle_h_nr_running -= idle_task_delta; 5186 5187 if (qcfs_rq->load.weight) { 5188 /* Avoid re-evaluating load for this entity: */ 5189 se = parent_entity(se); 5190 break; 5191 } 5192 } 5193 5194 for_each_sched_entity(se) { 5195 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 5196 /* throttled entity or throttle-on-deactivate */ 5197 if (!se->on_rq) 5198 goto done; 5199 5200 update_load_avg(qcfs_rq, se, 0); 5201 se_update_runnable(se); 5202 5203 if (cfs_rq_is_idle(group_cfs_rq(se))) 5204 idle_task_delta = cfs_rq->h_nr_running; 5205 5206 qcfs_rq->h_nr_running -= task_delta; 5207 qcfs_rq->idle_h_nr_running -= idle_task_delta; 5208 } 5209 5210 /* At this point se is NULL and we are at root level*/ 5211 sub_nr_running(rq, task_delta); 5212 5213 done: 5214 /* 5215 * Note: distribution will already see us throttled via the 5216 * throttled-list. rq->lock protects completion. 5217 */ 5218 cfs_rq->throttled = 1; 5219 cfs_rq->throttled_clock = rq_clock(rq); 5220 return true; 5221 } 5222 5223 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 5224 { 5225 struct rq *rq = rq_of(cfs_rq); 5226 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 5227 struct sched_entity *se; 5228 long task_delta, idle_task_delta; 5229 5230 se = cfs_rq->tg->se[cpu_of(rq)]; 5231 5232 cfs_rq->throttled = 0; 5233 5234 update_rq_clock(rq); 5235 5236 raw_spin_lock(&cfs_b->lock); 5237 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 5238 list_del_rcu(&cfs_rq->throttled_list); 5239 raw_spin_unlock(&cfs_b->lock); 5240 5241 /* update hierarchical throttle state */ 5242 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 5243 5244 if (!cfs_rq->load.weight) { 5245 if (!cfs_rq->on_list) 5246 return; 5247 /* 5248 * Nothing to run but something to decay (on_list)? 5249 * Complete the branch. 5250 */ 5251 for_each_sched_entity(se) { 5252 if (list_add_leaf_cfs_rq(cfs_rq_of(se))) 5253 break; 5254 } 5255 goto unthrottle_throttle; 5256 } 5257 5258 task_delta = cfs_rq->h_nr_running; 5259 idle_task_delta = cfs_rq->idle_h_nr_running; 5260 for_each_sched_entity(se) { 5261 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 5262 5263 if (se->on_rq) 5264 break; 5265 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); 5266 5267 if (cfs_rq_is_idle(group_cfs_rq(se))) 5268 idle_task_delta = cfs_rq->h_nr_running; 5269 5270 qcfs_rq->h_nr_running += task_delta; 5271 qcfs_rq->idle_h_nr_running += idle_task_delta; 5272 5273 /* end evaluation on encountering a throttled cfs_rq */ 5274 if (cfs_rq_throttled(qcfs_rq)) 5275 goto unthrottle_throttle; 5276 } 5277 5278 for_each_sched_entity(se) { 5279 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 5280 5281 update_load_avg(qcfs_rq, se, UPDATE_TG); 5282 se_update_runnable(se); 5283 5284 if (cfs_rq_is_idle(group_cfs_rq(se))) 5285 idle_task_delta = cfs_rq->h_nr_running; 5286 5287 qcfs_rq->h_nr_running += task_delta; 5288 qcfs_rq->idle_h_nr_running += idle_task_delta; 5289 5290 /* end evaluation on encountering a throttled cfs_rq */ 5291 if (cfs_rq_throttled(qcfs_rq)) 5292 goto unthrottle_throttle; 5293 } 5294 5295 /* At this point se is NULL and we are at root level*/ 5296 add_nr_running(rq, task_delta); 5297 5298 unthrottle_throttle: 5299 assert_list_leaf_cfs_rq(rq); 5300 5301 /* Determine whether we need to wake up potentially idle CPU: */ 5302 if (rq->curr == rq->idle && rq->cfs.nr_running) 5303 resched_curr(rq); 5304 } 5305 5306 static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) 5307 { 5308 struct cfs_rq *cfs_rq; 5309 u64 runtime, remaining = 1; 5310 5311 rcu_read_lock(); 5312 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 5313 throttled_list) { 5314 struct rq *rq = rq_of(cfs_rq); 5315 struct rq_flags rf; 5316 5317 rq_lock_irqsave(rq, &rf); 5318 if (!cfs_rq_throttled(cfs_rq)) 5319 goto next; 5320 5321 /* By the above check, this should never be true */ 5322 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); 5323 5324 raw_spin_lock(&cfs_b->lock); 5325 runtime = -cfs_rq->runtime_remaining + 1; 5326 if (runtime > cfs_b->runtime) 5327 runtime = cfs_b->runtime; 5328 cfs_b->runtime -= runtime; 5329 remaining = cfs_b->runtime; 5330 raw_spin_unlock(&cfs_b->lock); 5331 5332 cfs_rq->runtime_remaining += runtime; 5333 5334 /* we check whether we're throttled above */ 5335 if (cfs_rq->runtime_remaining > 0) 5336 unthrottle_cfs_rq(cfs_rq); 5337 5338 next: 5339 rq_unlock_irqrestore(rq, &rf); 5340 5341 if (!remaining) 5342 break; 5343 } 5344 rcu_read_unlock(); 5345 } 5346 5347 /* 5348 * Responsible for refilling a task_group's bandwidth and unthrottling its 5349 * cfs_rqs as appropriate. If there has been no activity within the last 5350 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 5351 * used to track this state. 5352 */ 5353 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) 5354 { 5355 int throttled; 5356 5357 /* no need to continue the timer with no bandwidth constraint */ 5358 if (cfs_b->quota == RUNTIME_INF) 5359 goto out_deactivate; 5360 5361 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 5362 cfs_b->nr_periods += overrun; 5363 5364 /* Refill extra burst quota even if cfs_b->idle */ 5365 __refill_cfs_bandwidth_runtime(cfs_b); 5366 5367 /* 5368 * idle depends on !throttled (for the case of a large deficit), and if 5369 * we're going inactive then everything else can be deferred 5370 */ 5371 if (cfs_b->idle && !throttled) 5372 goto out_deactivate; 5373 5374 if (!throttled) { 5375 /* mark as potentially idle for the upcoming period */ 5376 cfs_b->idle = 1; 5377 return 0; 5378 } 5379 5380 /* account preceding periods in which throttling occurred */ 5381 cfs_b->nr_throttled += overrun; 5382 5383 /* 5384 * This check is repeated as we release cfs_b->lock while we unthrottle. 5385 */ 5386 while (throttled && cfs_b->runtime > 0) { 5387 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5388 /* we can't nest cfs_b->lock while distributing bandwidth */ 5389 distribute_cfs_runtime(cfs_b); 5390 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5391 5392 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 5393 } 5394 5395 /* 5396 * While we are ensured activity in the period following an 5397 * unthrottle, this also covers the case in which the new bandwidth is 5398 * insufficient to cover the existing bandwidth deficit. (Forcing the 5399 * timer to remain active while there are any throttled entities.) 5400 */ 5401 cfs_b->idle = 0; 5402 5403 return 0; 5404 5405 out_deactivate: 5406 return 1; 5407 } 5408 5409 /* a cfs_rq won't donate quota below this amount */ 5410 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 5411 /* minimum remaining period time to redistribute slack quota */ 5412 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 5413 /* how long we wait to gather additional slack before distributing */ 5414 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 5415 5416 /* 5417 * Are we near the end of the current quota period? 5418 * 5419 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 5420 * hrtimer base being cleared by hrtimer_start. In the case of 5421 * migrate_hrtimers, base is never cleared, so we are fine. 5422 */ 5423 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 5424 { 5425 struct hrtimer *refresh_timer = &cfs_b->period_timer; 5426 s64 remaining; 5427 5428 /* if the call-back is running a quota refresh is already occurring */ 5429 if (hrtimer_callback_running(refresh_timer)) 5430 return 1; 5431 5432 /* is a quota refresh about to occur? */ 5433 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 5434 if (remaining < (s64)min_expire) 5435 return 1; 5436 5437 return 0; 5438 } 5439 5440 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 5441 { 5442 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 5443 5444 /* if there's a quota refresh soon don't bother with slack */ 5445 if (runtime_refresh_within(cfs_b, min_left)) 5446 return; 5447 5448 /* don't push forwards an existing deferred unthrottle */ 5449 if (cfs_b->slack_started) 5450 return; 5451 cfs_b->slack_started = true; 5452 5453 hrtimer_start(&cfs_b->slack_timer, 5454 ns_to_ktime(cfs_bandwidth_slack_period), 5455 HRTIMER_MODE_REL); 5456 } 5457 5458 /* we know any runtime found here is valid as update_curr() precedes return */ 5459 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5460 { 5461 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 5462 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 5463 5464 if (slack_runtime <= 0) 5465 return; 5466 5467 raw_spin_lock(&cfs_b->lock); 5468 if (cfs_b->quota != RUNTIME_INF) { 5469 cfs_b->runtime += slack_runtime; 5470 5471 /* we are under rq->lock, defer unthrottling using a timer */ 5472 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 5473 !list_empty(&cfs_b->throttled_cfs_rq)) 5474 start_cfs_slack_bandwidth(cfs_b); 5475 } 5476 raw_spin_unlock(&cfs_b->lock); 5477 5478 /* even if it's not valid for return we don't want to try again */ 5479 cfs_rq->runtime_remaining -= slack_runtime; 5480 } 5481 5482 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5483 { 5484 if (!cfs_bandwidth_used()) 5485 return; 5486 5487 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 5488 return; 5489 5490 __return_cfs_rq_runtime(cfs_rq); 5491 } 5492 5493 /* 5494 * This is done with a timer (instead of inline with bandwidth return) since 5495 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 5496 */ 5497 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 5498 { 5499 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 5500 unsigned long flags; 5501 5502 /* confirm we're still not at a refresh boundary */ 5503 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5504 cfs_b->slack_started = false; 5505 5506 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 5507 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5508 return; 5509 } 5510 5511 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 5512 runtime = cfs_b->runtime; 5513 5514 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5515 5516 if (!runtime) 5517 return; 5518 5519 distribute_cfs_runtime(cfs_b); 5520 } 5521 5522 /* 5523 * When a group wakes up we want to make sure that its quota is not already 5524 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 5525 * runtime as update_curr() throttling can not trigger until it's on-rq. 5526 */ 5527 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 5528 { 5529 if (!cfs_bandwidth_used()) 5530 return; 5531 5532 /* an active group must be handled by the update_curr()->put() path */ 5533 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 5534 return; 5535 5536 /* ensure the group is not already throttled */ 5537 if (cfs_rq_throttled(cfs_rq)) 5538 return; 5539 5540 /* update runtime allocation */ 5541 account_cfs_rq_runtime(cfs_rq, 0); 5542 if (cfs_rq->runtime_remaining <= 0) 5543 throttle_cfs_rq(cfs_rq); 5544 } 5545 5546 static void sync_throttle(struct task_group *tg, int cpu) 5547 { 5548 struct cfs_rq *pcfs_rq, *cfs_rq; 5549 5550 if (!cfs_bandwidth_used()) 5551 return; 5552 5553 if (!tg->parent) 5554 return; 5555 5556 cfs_rq = tg->cfs_rq[cpu]; 5557 pcfs_rq = tg->parent->cfs_rq[cpu]; 5558 5559 cfs_rq->throttle_count = pcfs_rq->throttle_count; 5560 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); 5561 } 5562 5563 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 5564 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5565 { 5566 if (!cfs_bandwidth_used()) 5567 return false; 5568 5569 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 5570 return false; 5571 5572 /* 5573 * it's possible for a throttled entity to be forced into a running 5574 * state (e.g. set_curr_task), in this case we're finished. 5575 */ 5576 if (cfs_rq_throttled(cfs_rq)) 5577 return true; 5578 5579 return throttle_cfs_rq(cfs_rq); 5580 } 5581 5582 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 5583 { 5584 struct cfs_bandwidth *cfs_b = 5585 container_of(timer, struct cfs_bandwidth, slack_timer); 5586 5587 do_sched_cfs_slack_timer(cfs_b); 5588 5589 return HRTIMER_NORESTART; 5590 } 5591 5592 extern const u64 max_cfs_quota_period; 5593 5594 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 5595 { 5596 struct cfs_bandwidth *cfs_b = 5597 container_of(timer, struct cfs_bandwidth, period_timer); 5598 unsigned long flags; 5599 int overrun; 5600 int idle = 0; 5601 int count = 0; 5602 5603 raw_spin_lock_irqsave(&cfs_b->lock, flags); 5604 for (;;) { 5605 overrun = hrtimer_forward_now(timer, cfs_b->period); 5606 if (!overrun) 5607 break; 5608 5609 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); 5610 5611 if (++count > 3) { 5612 u64 new, old = ktime_to_ns(cfs_b->period); 5613 5614 /* 5615 * Grow period by a factor of 2 to avoid losing precision. 5616 * Precision loss in the quota/period ratio can cause __cfs_schedulable 5617 * to fail. 5618 */ 5619 new = old * 2; 5620 if (new < max_cfs_quota_period) { 5621 cfs_b->period = ns_to_ktime(new); 5622 cfs_b->quota *= 2; 5623 cfs_b->burst *= 2; 5624 5625 pr_warn_ratelimited( 5626 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", 5627 smp_processor_id(), 5628 div_u64(new, NSEC_PER_USEC), 5629 div_u64(cfs_b->quota, NSEC_PER_USEC)); 5630 } else { 5631 pr_warn_ratelimited( 5632 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n", 5633 smp_processor_id(), 5634 div_u64(old, NSEC_PER_USEC), 5635 div_u64(cfs_b->quota, NSEC_PER_USEC)); 5636 } 5637 5638 /* reset count so we don't come right back in here */ 5639 count = 0; 5640 } 5641 } 5642 if (idle) 5643 cfs_b->period_active = 0; 5644 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 5645 5646 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 5647 } 5648 5649 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5650 { 5651 raw_spin_lock_init(&cfs_b->lock); 5652 cfs_b->runtime = 0; 5653 cfs_b->quota = RUNTIME_INF; 5654 cfs_b->period = ns_to_ktime(default_cfs_period()); 5655 cfs_b->burst = 0; 5656 5657 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 5658 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 5659 cfs_b->period_timer.function = sched_cfs_period_timer; 5660 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5661 cfs_b->slack_timer.function = sched_cfs_slack_timer; 5662 cfs_b->slack_started = false; 5663 } 5664 5665 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5666 { 5667 cfs_rq->runtime_enabled = 0; 5668 INIT_LIST_HEAD(&cfs_rq->throttled_list); 5669 } 5670 5671 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5672 { 5673 lockdep_assert_held(&cfs_b->lock); 5674 5675 if (cfs_b->period_active) 5676 return; 5677 5678 cfs_b->period_active = 1; 5679 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 5680 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 5681 } 5682 5683 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5684 { 5685 /* init_cfs_bandwidth() was not called */ 5686 if (!cfs_b->throttled_cfs_rq.next) 5687 return; 5688 5689 hrtimer_cancel(&cfs_b->period_timer); 5690 hrtimer_cancel(&cfs_b->slack_timer); 5691 } 5692 5693 /* 5694 * Both these CPU hotplug callbacks race against unregister_fair_sched_group() 5695 * 5696 * The race is harmless, since modifying bandwidth settings of unhooked group 5697 * bits doesn't do much. 5698 */ 5699 5700 /* cpu online callback */ 5701 static void __maybe_unused update_runtime_enabled(struct rq *rq) 5702 { 5703 struct task_group *tg; 5704 5705 lockdep_assert_rq_held(rq); 5706 5707 rcu_read_lock(); 5708 list_for_each_entry_rcu(tg, &task_groups, list) { 5709 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 5710 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5711 5712 raw_spin_lock(&cfs_b->lock); 5713 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 5714 raw_spin_unlock(&cfs_b->lock); 5715 } 5716 rcu_read_unlock(); 5717 } 5718 5719 /* cpu offline callback */ 5720 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 5721 { 5722 struct task_group *tg; 5723 5724 lockdep_assert_rq_held(rq); 5725 5726 rcu_read_lock(); 5727 list_for_each_entry_rcu(tg, &task_groups, list) { 5728 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5729 5730 if (!cfs_rq->runtime_enabled) 5731 continue; 5732 5733 /* 5734 * clock_task is not advancing so we just need to make sure 5735 * there's some valid quota amount 5736 */ 5737 cfs_rq->runtime_remaining = 1; 5738 /* 5739 * Offline rq is schedulable till CPU is completely disabled 5740 * in take_cpu_down(), so we prevent new cfs throttling here. 5741 */ 5742 cfs_rq->runtime_enabled = 0; 5743 5744 if (cfs_rq_throttled(cfs_rq)) 5745 unthrottle_cfs_rq(cfs_rq); 5746 } 5747 rcu_read_unlock(); 5748 } 5749 5750 #else /* CONFIG_CFS_BANDWIDTH */ 5751 5752 static inline bool cfs_bandwidth_used(void) 5753 { 5754 return false; 5755 } 5756 5757 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 5758 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 5759 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 5760 static inline void sync_throttle(struct task_group *tg, int cpu) {} 5761 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5762 5763 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 5764 { 5765 return 0; 5766 } 5767 5768 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 5769 { 5770 return 0; 5771 } 5772 5773 static inline int throttled_lb_pair(struct task_group *tg, 5774 int src_cpu, int dest_cpu) 5775 { 5776 return 0; 5777 } 5778 5779 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5780 5781 #ifdef CONFIG_FAIR_GROUP_SCHED 5782 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5783 #endif 5784 5785 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 5786 { 5787 return NULL; 5788 } 5789 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5790 static inline void update_runtime_enabled(struct rq *rq) {} 5791 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 5792 5793 #endif /* CONFIG_CFS_BANDWIDTH */ 5794 5795 /************************************************** 5796 * CFS operations on tasks: 5797 */ 5798 5799 #ifdef CONFIG_SCHED_HRTICK 5800 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 5801 { 5802 struct sched_entity *se = &p->se; 5803 struct cfs_rq *cfs_rq = cfs_rq_of(se); 5804 5805 SCHED_WARN_ON(task_rq(p) != rq); 5806 5807 if (rq->cfs.h_nr_running > 1) { 5808 u64 slice = sched_slice(cfs_rq, se); 5809 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 5810 s64 delta = slice - ran; 5811 5812 if (delta < 0) { 5813 if (task_current(rq, p)) 5814 resched_curr(rq); 5815 return; 5816 } 5817 hrtick_start(rq, delta); 5818 } 5819 } 5820 5821 /* 5822 * called from enqueue/dequeue and updates the hrtick when the 5823 * current task is from our class and nr_running is low enough 5824 * to matter. 5825 */ 5826 static void hrtick_update(struct rq *rq) 5827 { 5828 struct task_struct *curr = rq->curr; 5829 5830 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class) 5831 return; 5832 5833 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 5834 hrtick_start_fair(rq, curr); 5835 } 5836 #else /* !CONFIG_SCHED_HRTICK */ 5837 static inline void 5838 hrtick_start_fair(struct rq *rq, struct task_struct *p) 5839 { 5840 } 5841 5842 static inline void hrtick_update(struct rq *rq) 5843 { 5844 } 5845 #endif 5846 5847 #ifdef CONFIG_SMP 5848 static inline bool cpu_overutilized(int cpu) 5849 { 5850 return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu)); 5851 } 5852 5853 static inline void update_overutilized_status(struct rq *rq) 5854 { 5855 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { 5856 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); 5857 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); 5858 } 5859 } 5860 #else 5861 static inline void update_overutilized_status(struct rq *rq) { } 5862 #endif 5863 5864 /* Runqueue only has SCHED_IDLE tasks enqueued */ 5865 static int sched_idle_rq(struct rq *rq) 5866 { 5867 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && 5868 rq->nr_running); 5869 } 5870 5871 /* 5872 * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use 5873 * of idle_nr_running, which does not consider idle descendants of normal 5874 * entities. 5875 */ 5876 static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq) 5877 { 5878 return cfs_rq->nr_running && 5879 cfs_rq->nr_running == cfs_rq->idle_nr_running; 5880 } 5881 5882 #ifdef CONFIG_SMP 5883 static int sched_idle_cpu(int cpu) 5884 { 5885 return sched_idle_rq(cpu_rq(cpu)); 5886 } 5887 #endif 5888 5889 /* 5890 * The enqueue_task method is called before nr_running is 5891 * increased. Here we update the fair scheduling stats and 5892 * then put the task into the rbtree: 5893 */ 5894 static void 5895 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5896 { 5897 struct cfs_rq *cfs_rq; 5898 struct sched_entity *se = &p->se; 5899 int idle_h_nr_running = task_has_idle_policy(p); 5900 int task_new = !(flags & ENQUEUE_WAKEUP); 5901 5902 /* 5903 * The code below (indirectly) updates schedutil which looks at 5904 * the cfs_rq utilization to select a frequency. 5905 * Let's add the task's estimated utilization to the cfs_rq's 5906 * estimated utilization, before we update schedutil. 5907 */ 5908 util_est_enqueue(&rq->cfs, p); 5909 5910 /* 5911 * If in_iowait is set, the code below may not trigger any cpufreq 5912 * utilization updates, so do it here explicitly with the IOWAIT flag 5913 * passed. 5914 */ 5915 if (p->in_iowait) 5916 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); 5917 5918 for_each_sched_entity(se) { 5919 if (se->on_rq) 5920 break; 5921 cfs_rq = cfs_rq_of(se); 5922 enqueue_entity(cfs_rq, se, flags); 5923 5924 cfs_rq->h_nr_running++; 5925 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5926 5927 if (cfs_rq_is_idle(cfs_rq)) 5928 idle_h_nr_running = 1; 5929 5930 /* end evaluation on encountering a throttled cfs_rq */ 5931 if (cfs_rq_throttled(cfs_rq)) 5932 goto enqueue_throttle; 5933 5934 flags = ENQUEUE_WAKEUP; 5935 } 5936 5937 for_each_sched_entity(se) { 5938 cfs_rq = cfs_rq_of(se); 5939 5940 update_load_avg(cfs_rq, se, UPDATE_TG); 5941 se_update_runnable(se); 5942 update_cfs_group(se); 5943 5944 cfs_rq->h_nr_running++; 5945 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5946 5947 if (cfs_rq_is_idle(cfs_rq)) 5948 idle_h_nr_running = 1; 5949 5950 /* end evaluation on encountering a throttled cfs_rq */ 5951 if (cfs_rq_throttled(cfs_rq)) 5952 goto enqueue_throttle; 5953 } 5954 5955 /* At this point se is NULL and we are at root level*/ 5956 add_nr_running(rq, 1); 5957 5958 /* 5959 * Since new tasks are assigned an initial util_avg equal to 5960 * half of the spare capacity of their CPU, tiny tasks have the 5961 * ability to cross the overutilized threshold, which will 5962 * result in the load balancer ruining all the task placement 5963 * done by EAS. As a way to mitigate that effect, do not account 5964 * for the first enqueue operation of new tasks during the 5965 * overutilized flag detection. 5966 * 5967 * A better way of solving this problem would be to wait for 5968 * the PELT signals of tasks to converge before taking them 5969 * into account, but that is not straightforward to implement, 5970 * and the following generally works well enough in practice. 5971 */ 5972 if (!task_new) 5973 update_overutilized_status(rq); 5974 5975 enqueue_throttle: 5976 assert_list_leaf_cfs_rq(rq); 5977 5978 hrtick_update(rq); 5979 } 5980 5981 static void set_next_buddy(struct sched_entity *se); 5982 5983 /* 5984 * The dequeue_task method is called before nr_running is 5985 * decreased. We remove the task from the rbtree and 5986 * update the fair scheduling stats: 5987 */ 5988 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5989 { 5990 struct cfs_rq *cfs_rq; 5991 struct sched_entity *se = &p->se; 5992 int task_sleep = flags & DEQUEUE_SLEEP; 5993 int idle_h_nr_running = task_has_idle_policy(p); 5994 bool was_sched_idle = sched_idle_rq(rq); 5995 5996 util_est_dequeue(&rq->cfs, p); 5997 5998 for_each_sched_entity(se) { 5999 cfs_rq = cfs_rq_of(se); 6000 dequeue_entity(cfs_rq, se, flags); 6001 6002 cfs_rq->h_nr_running--; 6003 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 6004 6005 if (cfs_rq_is_idle(cfs_rq)) 6006 idle_h_nr_running = 1; 6007 6008 /* end evaluation on encountering a throttled cfs_rq */ 6009 if (cfs_rq_throttled(cfs_rq)) 6010 goto dequeue_throttle; 6011 6012 /* Don't dequeue parent if it has other entities besides us */ 6013 if (cfs_rq->load.weight) { 6014 /* Avoid re-evaluating load for this entity: */ 6015 se = parent_entity(se); 6016 /* 6017 * Bias pick_next to pick a task from this cfs_rq, as 6018 * p is sleeping when it is within its sched_slice. 6019 */ 6020 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 6021 set_next_buddy(se); 6022 break; 6023 } 6024 flags |= DEQUEUE_SLEEP; 6025 } 6026 6027 for_each_sched_entity(se) { 6028 cfs_rq = cfs_rq_of(se); 6029 6030 update_load_avg(cfs_rq, se, UPDATE_TG); 6031 se_update_runnable(se); 6032 update_cfs_group(se); 6033 6034 cfs_rq->h_nr_running--; 6035 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 6036 6037 if (cfs_rq_is_idle(cfs_rq)) 6038 idle_h_nr_running = 1; 6039 6040 /* end evaluation on encountering a throttled cfs_rq */ 6041 if (cfs_rq_throttled(cfs_rq)) 6042 goto dequeue_throttle; 6043 6044 } 6045 6046 /* At this point se is NULL and we are at root level*/ 6047 sub_nr_running(rq, 1); 6048 6049 /* balance early to pull high priority tasks */ 6050 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) 6051 rq->next_balance = jiffies; 6052 6053 dequeue_throttle: 6054 util_est_update(&rq->cfs, p, task_sleep); 6055 hrtick_update(rq); 6056 } 6057 6058 #ifdef CONFIG_SMP 6059 6060 /* Working cpumask for: load_balance, load_balance_newidle. */ 6061 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 6062 DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); 6063 6064 #ifdef CONFIG_NO_HZ_COMMON 6065 6066 static struct { 6067 cpumask_var_t idle_cpus_mask; 6068 atomic_t nr_cpus; 6069 int has_blocked; /* Idle CPUS has blocked load */ 6070 int needs_update; /* Newly idle CPUs need their next_balance collated */ 6071 unsigned long next_balance; /* in jiffy units */ 6072 unsigned long next_blocked; /* Next update of blocked load in jiffies */ 6073 } nohz ____cacheline_aligned; 6074 6075 #endif /* CONFIG_NO_HZ_COMMON */ 6076 6077 static unsigned long cpu_load(struct rq *rq) 6078 { 6079 return cfs_rq_load_avg(&rq->cfs); 6080 } 6081 6082 /* 6083 * cpu_load_without - compute CPU load without any contributions from *p 6084 * @cpu: the CPU which load is requested 6085 * @p: the task which load should be discounted 6086 * 6087 * The load of a CPU is defined by the load of tasks currently enqueued on that 6088 * CPU as well as tasks which are currently sleeping after an execution on that 6089 * CPU. 6090 * 6091 * This method returns the load of the specified CPU by discounting the load of 6092 * the specified task, whenever the task is currently contributing to the CPU 6093 * load. 6094 */ 6095 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) 6096 { 6097 struct cfs_rq *cfs_rq; 6098 unsigned int load; 6099 6100 /* Task has no contribution or is new */ 6101 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6102 return cpu_load(rq); 6103 6104 cfs_rq = &rq->cfs; 6105 load = READ_ONCE(cfs_rq->avg.load_avg); 6106 6107 /* Discount task's util from CPU's util */ 6108 lsub_positive(&load, task_h_load(p)); 6109 6110 return load; 6111 } 6112 6113 static unsigned long cpu_runnable(struct rq *rq) 6114 { 6115 return cfs_rq_runnable_avg(&rq->cfs); 6116 } 6117 6118 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) 6119 { 6120 struct cfs_rq *cfs_rq; 6121 unsigned int runnable; 6122 6123 /* Task has no contribution or is new */ 6124 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6125 return cpu_runnable(rq); 6126 6127 cfs_rq = &rq->cfs; 6128 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); 6129 6130 /* Discount task's runnable from CPU's runnable */ 6131 lsub_positive(&runnable, p->se.avg.runnable_avg); 6132 6133 return runnable; 6134 } 6135 6136 static unsigned long capacity_of(int cpu) 6137 { 6138 return cpu_rq(cpu)->cpu_capacity; 6139 } 6140 6141 static void record_wakee(struct task_struct *p) 6142 { 6143 /* 6144 * Only decay a single time; tasks that have less then 1 wakeup per 6145 * jiffy will not have built up many flips. 6146 */ 6147 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 6148 current->wakee_flips >>= 1; 6149 current->wakee_flip_decay_ts = jiffies; 6150 } 6151 6152 if (current->last_wakee != p) { 6153 current->last_wakee = p; 6154 current->wakee_flips++; 6155 } 6156 } 6157 6158 /* 6159 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 6160 * 6161 * A waker of many should wake a different task than the one last awakened 6162 * at a frequency roughly N times higher than one of its wakees. 6163 * 6164 * In order to determine whether we should let the load spread vs consolidating 6165 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 6166 * partner, and a factor of lls_size higher frequency in the other. 6167 * 6168 * With both conditions met, we can be relatively sure that the relationship is 6169 * non-monogamous, with partner count exceeding socket size. 6170 * 6171 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 6172 * whatever is irrelevant, spread criteria is apparent partner count exceeds 6173 * socket size. 6174 */ 6175 static int wake_wide(struct task_struct *p) 6176 { 6177 unsigned int master = current->wakee_flips; 6178 unsigned int slave = p->wakee_flips; 6179 int factor = __this_cpu_read(sd_llc_size); 6180 6181 if (master < slave) 6182 swap(master, slave); 6183 if (slave < factor || master < slave * factor) 6184 return 0; 6185 return 1; 6186 } 6187 6188 /* 6189 * The purpose of wake_affine() is to quickly determine on which CPU we can run 6190 * soonest. For the purpose of speed we only consider the waking and previous 6191 * CPU. 6192 * 6193 * wake_affine_idle() - only considers 'now', it check if the waking CPU is 6194 * cache-affine and is (or will be) idle. 6195 * 6196 * wake_affine_weight() - considers the weight to reflect the average 6197 * scheduling latency of the CPUs. This seems to work 6198 * for the overloaded case. 6199 */ 6200 static int 6201 wake_affine_idle(int this_cpu, int prev_cpu, int sync) 6202 { 6203 /* 6204 * If this_cpu is idle, it implies the wakeup is from interrupt 6205 * context. Only allow the move if cache is shared. Otherwise an 6206 * interrupt intensive workload could force all tasks onto one 6207 * node depending on the IO topology or IRQ affinity settings. 6208 * 6209 * If the prev_cpu is idle and cache affine then avoid a migration. 6210 * There is no guarantee that the cache hot data from an interrupt 6211 * is more important than cache hot data on the prev_cpu and from 6212 * a cpufreq perspective, it's better to have higher utilisation 6213 * on one CPU. 6214 */ 6215 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) 6216 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; 6217 6218 if (sync && cpu_rq(this_cpu)->nr_running == 1) 6219 return this_cpu; 6220 6221 if (available_idle_cpu(prev_cpu)) 6222 return prev_cpu; 6223 6224 return nr_cpumask_bits; 6225 } 6226 6227 static int 6228 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, 6229 int this_cpu, int prev_cpu, int sync) 6230 { 6231 s64 this_eff_load, prev_eff_load; 6232 unsigned long task_load; 6233 6234 this_eff_load = cpu_load(cpu_rq(this_cpu)); 6235 6236 if (sync) { 6237 unsigned long current_load = task_h_load(current); 6238 6239 if (current_load > this_eff_load) 6240 return this_cpu; 6241 6242 this_eff_load -= current_load; 6243 } 6244 6245 task_load = task_h_load(p); 6246 6247 this_eff_load += task_load; 6248 if (sched_feat(WA_BIAS)) 6249 this_eff_load *= 100; 6250 this_eff_load *= capacity_of(prev_cpu); 6251 6252 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); 6253 prev_eff_load -= task_load; 6254 if (sched_feat(WA_BIAS)) 6255 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; 6256 prev_eff_load *= capacity_of(this_cpu); 6257 6258 /* 6259 * If sync, adjust the weight of prev_eff_load such that if 6260 * prev_eff == this_eff that select_idle_sibling() will consider 6261 * stacking the wakee on top of the waker if no other CPU is 6262 * idle. 6263 */ 6264 if (sync) 6265 prev_eff_load += 1; 6266 6267 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; 6268 } 6269 6270 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 6271 int this_cpu, int prev_cpu, int sync) 6272 { 6273 int target = nr_cpumask_bits; 6274 6275 if (sched_feat(WA_IDLE)) 6276 target = wake_affine_idle(this_cpu, prev_cpu, sync); 6277 6278 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) 6279 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); 6280 6281 schedstat_inc(p->stats.nr_wakeups_affine_attempts); 6282 if (target == nr_cpumask_bits) 6283 return prev_cpu; 6284 6285 schedstat_inc(sd->ttwu_move_affine); 6286 schedstat_inc(p->stats.nr_wakeups_affine); 6287 return target; 6288 } 6289 6290 static struct sched_group * 6291 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); 6292 6293 /* 6294 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. 6295 */ 6296 static int 6297 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 6298 { 6299 unsigned long load, min_load = ULONG_MAX; 6300 unsigned int min_exit_latency = UINT_MAX; 6301 u64 latest_idle_timestamp = 0; 6302 int least_loaded_cpu = this_cpu; 6303 int shallowest_idle_cpu = -1; 6304 int i; 6305 6306 /* Check if we have any choice: */ 6307 if (group->group_weight == 1) 6308 return cpumask_first(sched_group_span(group)); 6309 6310 /* Traverse only the allowed CPUs */ 6311 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { 6312 struct rq *rq = cpu_rq(i); 6313 6314 if (!sched_core_cookie_match(rq, p)) 6315 continue; 6316 6317 if (sched_idle_cpu(i)) 6318 return i; 6319 6320 if (available_idle_cpu(i)) { 6321 struct cpuidle_state *idle = idle_get_state(rq); 6322 if (idle && idle->exit_latency < min_exit_latency) { 6323 /* 6324 * We give priority to a CPU whose idle state 6325 * has the smallest exit latency irrespective 6326 * of any idle timestamp. 6327 */ 6328 min_exit_latency = idle->exit_latency; 6329 latest_idle_timestamp = rq->idle_stamp; 6330 shallowest_idle_cpu = i; 6331 } else if ((!idle || idle->exit_latency == min_exit_latency) && 6332 rq->idle_stamp > latest_idle_timestamp) { 6333 /* 6334 * If equal or no active idle state, then 6335 * the most recently idled CPU might have 6336 * a warmer cache. 6337 */ 6338 latest_idle_timestamp = rq->idle_stamp; 6339 shallowest_idle_cpu = i; 6340 } 6341 } else if (shallowest_idle_cpu == -1) { 6342 load = cpu_load(cpu_rq(i)); 6343 if (load < min_load) { 6344 min_load = load; 6345 least_loaded_cpu = i; 6346 } 6347 } 6348 } 6349 6350 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 6351 } 6352 6353 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, 6354 int cpu, int prev_cpu, int sd_flag) 6355 { 6356 int new_cpu = cpu; 6357 6358 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) 6359 return prev_cpu; 6360 6361 /* 6362 * We need task's util for cpu_util_without, sync it up to 6363 * prev_cpu's last_update_time. 6364 */ 6365 if (!(sd_flag & SD_BALANCE_FORK)) 6366 sync_entity_load_avg(&p->se); 6367 6368 while (sd) { 6369 struct sched_group *group; 6370 struct sched_domain *tmp; 6371 int weight; 6372 6373 if (!(sd->flags & sd_flag)) { 6374 sd = sd->child; 6375 continue; 6376 } 6377 6378 group = find_idlest_group(sd, p, cpu); 6379 if (!group) { 6380 sd = sd->child; 6381 continue; 6382 } 6383 6384 new_cpu = find_idlest_group_cpu(group, p, cpu); 6385 if (new_cpu == cpu) { 6386 /* Now try balancing at a lower domain level of 'cpu': */ 6387 sd = sd->child; 6388 continue; 6389 } 6390 6391 /* Now try balancing at a lower domain level of 'new_cpu': */ 6392 cpu = new_cpu; 6393 weight = sd->span_weight; 6394 sd = NULL; 6395 for_each_domain(cpu, tmp) { 6396 if (weight <= tmp->span_weight) 6397 break; 6398 if (tmp->flags & sd_flag) 6399 sd = tmp; 6400 } 6401 } 6402 6403 return new_cpu; 6404 } 6405 6406 static inline int __select_idle_cpu(int cpu, struct task_struct *p) 6407 { 6408 if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) && 6409 sched_cpu_cookie_match(cpu_rq(cpu), p)) 6410 return cpu; 6411 6412 return -1; 6413 } 6414 6415 #ifdef CONFIG_SCHED_SMT 6416 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 6417 EXPORT_SYMBOL_GPL(sched_smt_present); 6418 6419 static inline void set_idle_cores(int cpu, int val) 6420 { 6421 struct sched_domain_shared *sds; 6422 6423 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 6424 if (sds) 6425 WRITE_ONCE(sds->has_idle_cores, val); 6426 } 6427 6428 static inline bool test_idle_cores(int cpu, bool def) 6429 { 6430 struct sched_domain_shared *sds; 6431 6432 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 6433 if (sds) 6434 return READ_ONCE(sds->has_idle_cores); 6435 6436 return def; 6437 } 6438 6439 /* 6440 * Scans the local SMT mask to see if the entire core is idle, and records this 6441 * information in sd_llc_shared->has_idle_cores. 6442 * 6443 * Since SMT siblings share all cache levels, inspecting this limited remote 6444 * state should be fairly cheap. 6445 */ 6446 void __update_idle_core(struct rq *rq) 6447 { 6448 int core = cpu_of(rq); 6449 int cpu; 6450 6451 rcu_read_lock(); 6452 if (test_idle_cores(core, true)) 6453 goto unlock; 6454 6455 for_each_cpu(cpu, cpu_smt_mask(core)) { 6456 if (cpu == core) 6457 continue; 6458 6459 if (!available_idle_cpu(cpu)) 6460 goto unlock; 6461 } 6462 6463 set_idle_cores(core, 1); 6464 unlock: 6465 rcu_read_unlock(); 6466 } 6467 6468 /* 6469 * Scan the entire LLC domain for idle cores; this dynamically switches off if 6470 * there are no idle cores left in the system; tracked through 6471 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 6472 */ 6473 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) 6474 { 6475 bool idle = true; 6476 int cpu; 6477 6478 if (!static_branch_likely(&sched_smt_present)) 6479 return __select_idle_cpu(core, p); 6480 6481 for_each_cpu(cpu, cpu_smt_mask(core)) { 6482 if (!available_idle_cpu(cpu)) { 6483 idle = false; 6484 if (*idle_cpu == -1) { 6485 if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { 6486 *idle_cpu = cpu; 6487 break; 6488 } 6489 continue; 6490 } 6491 break; 6492 } 6493 if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) 6494 *idle_cpu = cpu; 6495 } 6496 6497 if (idle) 6498 return core; 6499 6500 cpumask_andnot(cpus, cpus, cpu_smt_mask(core)); 6501 return -1; 6502 } 6503 6504 /* 6505 * Scan the local SMT mask for idle CPUs. 6506 */ 6507 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6508 { 6509 int cpu; 6510 6511 for_each_cpu(cpu, cpu_smt_mask(target)) { 6512 if (!cpumask_test_cpu(cpu, p->cpus_ptr) || 6513 !cpumask_test_cpu(cpu, sched_domain_span(sd))) 6514 continue; 6515 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) 6516 return cpu; 6517 } 6518 6519 return -1; 6520 } 6521 6522 #else /* CONFIG_SCHED_SMT */ 6523 6524 static inline void set_idle_cores(int cpu, int val) 6525 { 6526 } 6527 6528 static inline bool test_idle_cores(int cpu, bool def) 6529 { 6530 return def; 6531 } 6532 6533 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) 6534 { 6535 return __select_idle_cpu(core, p); 6536 } 6537 6538 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6539 { 6540 return -1; 6541 } 6542 6543 #endif /* CONFIG_SCHED_SMT */ 6544 6545 /* 6546 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 6547 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 6548 * average idle time for this rq (as found in rq->avg_idle). 6549 */ 6550 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target) 6551 { 6552 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); 6553 int i, cpu, idle_cpu = -1, nr = INT_MAX; 6554 struct sched_domain_shared *sd_share; 6555 struct rq *this_rq = this_rq(); 6556 int this = smp_processor_id(); 6557 struct sched_domain *this_sd; 6558 u64 time = 0; 6559 6560 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 6561 if (!this_sd) 6562 return -1; 6563 6564 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6565 6566 if (sched_feat(SIS_PROP) && !has_idle_core) { 6567 u64 avg_cost, avg_idle, span_avg; 6568 unsigned long now = jiffies; 6569 6570 /* 6571 * If we're busy, the assumption that the last idle period 6572 * predicts the future is flawed; age away the remaining 6573 * predicted idle time. 6574 */ 6575 if (unlikely(this_rq->wake_stamp < now)) { 6576 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) { 6577 this_rq->wake_stamp++; 6578 this_rq->wake_avg_idle >>= 1; 6579 } 6580 } 6581 6582 avg_idle = this_rq->wake_avg_idle; 6583 avg_cost = this_sd->avg_scan_cost + 1; 6584 6585 span_avg = sd->span_weight * avg_idle; 6586 if (span_avg > 4*avg_cost) 6587 nr = div_u64(span_avg, avg_cost); 6588 else 6589 nr = 4; 6590 6591 time = cpu_clock(this); 6592 } 6593 6594 if (sched_feat(SIS_UTIL)) { 6595 sd_share = rcu_dereference(per_cpu(sd_llc_shared, target)); 6596 if (sd_share) { 6597 /* because !--nr is the condition to stop scan */ 6598 nr = READ_ONCE(sd_share->nr_idle_scan) + 1; 6599 /* overloaded LLC is unlikely to have idle cpu/core */ 6600 if (nr == 1) 6601 return -1; 6602 } 6603 } 6604 6605 for_each_cpu_wrap(cpu, cpus, target + 1) { 6606 if (has_idle_core) { 6607 i = select_idle_core(p, cpu, cpus, &idle_cpu); 6608 if ((unsigned int)i < nr_cpumask_bits) 6609 return i; 6610 6611 } else { 6612 if (!--nr) 6613 return -1; 6614 idle_cpu = __select_idle_cpu(cpu, p); 6615 if ((unsigned int)idle_cpu < nr_cpumask_bits) 6616 break; 6617 } 6618 } 6619 6620 if (has_idle_core) 6621 set_idle_cores(target, false); 6622 6623 if (sched_feat(SIS_PROP) && !has_idle_core) { 6624 time = cpu_clock(this) - time; 6625 6626 /* 6627 * Account for the scan cost of wakeups against the average 6628 * idle time. 6629 */ 6630 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time); 6631 6632 update_avg(&this_sd->avg_scan_cost, time); 6633 } 6634 6635 return idle_cpu; 6636 } 6637 6638 /* 6639 * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which 6640 * the task fits. If no CPU is big enough, but there are idle ones, try to 6641 * maximize capacity. 6642 */ 6643 static int 6644 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) 6645 { 6646 unsigned long task_util, best_cap = 0; 6647 int cpu, best_cpu = -1; 6648 struct cpumask *cpus; 6649 6650 cpus = this_cpu_cpumask_var_ptr(select_rq_mask); 6651 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 6652 6653 task_util = uclamp_task_util(p); 6654 6655 for_each_cpu_wrap(cpu, cpus, target) { 6656 unsigned long cpu_cap = capacity_of(cpu); 6657 6658 if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) 6659 continue; 6660 if (fits_capacity(task_util, cpu_cap)) 6661 return cpu; 6662 6663 if (cpu_cap > best_cap) { 6664 best_cap = cpu_cap; 6665 best_cpu = cpu; 6666 } 6667 } 6668 6669 return best_cpu; 6670 } 6671 6672 static inline bool asym_fits_capacity(unsigned long task_util, int cpu) 6673 { 6674 if (static_branch_unlikely(&sched_asym_cpucapacity)) 6675 return fits_capacity(task_util, capacity_of(cpu)); 6676 6677 return true; 6678 } 6679 6680 /* 6681 * Try and locate an idle core/thread in the LLC cache domain. 6682 */ 6683 static int select_idle_sibling(struct task_struct *p, int prev, int target) 6684 { 6685 bool has_idle_core = false; 6686 struct sched_domain *sd; 6687 unsigned long task_util; 6688 int i, recent_used_cpu; 6689 6690 /* 6691 * On asymmetric system, update task utilization because we will check 6692 * that the task fits with cpu's capacity. 6693 */ 6694 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6695 sync_entity_load_avg(&p->se); 6696 task_util = uclamp_task_util(p); 6697 } 6698 6699 /* 6700 * per-cpu select_rq_mask usage 6701 */ 6702 lockdep_assert_irqs_disabled(); 6703 6704 if ((available_idle_cpu(target) || sched_idle_cpu(target)) && 6705 asym_fits_capacity(task_util, target)) 6706 return target; 6707 6708 /* 6709 * If the previous CPU is cache affine and idle, don't be stupid: 6710 */ 6711 if (prev != target && cpus_share_cache(prev, target) && 6712 (available_idle_cpu(prev) || sched_idle_cpu(prev)) && 6713 asym_fits_capacity(task_util, prev)) 6714 return prev; 6715 6716 /* 6717 * Allow a per-cpu kthread to stack with the wakee if the 6718 * kworker thread and the tasks previous CPUs are the same. 6719 * The assumption is that the wakee queued work for the 6720 * per-cpu kthread that is now complete and the wakeup is 6721 * essentially a sync wakeup. An obvious example of this 6722 * pattern is IO completions. 6723 */ 6724 if (is_per_cpu_kthread(current) && 6725 in_task() && 6726 prev == smp_processor_id() && 6727 this_rq()->nr_running <= 1 && 6728 asym_fits_capacity(task_util, prev)) { 6729 return prev; 6730 } 6731 6732 /* Check a recently used CPU as a potential idle candidate: */ 6733 recent_used_cpu = p->recent_used_cpu; 6734 p->recent_used_cpu = prev; 6735 if (recent_used_cpu != prev && 6736 recent_used_cpu != target && 6737 cpus_share_cache(recent_used_cpu, target) && 6738 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && 6739 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && 6740 asym_fits_capacity(task_util, recent_used_cpu)) { 6741 return recent_used_cpu; 6742 } 6743 6744 /* 6745 * For asymmetric CPU capacity systems, our domain of interest is 6746 * sd_asym_cpucapacity rather than sd_llc. 6747 */ 6748 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 6749 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); 6750 /* 6751 * On an asymmetric CPU capacity system where an exclusive 6752 * cpuset defines a symmetric island (i.e. one unique 6753 * capacity_orig value through the cpuset), the key will be set 6754 * but the CPUs within that cpuset will not have a domain with 6755 * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric 6756 * capacity path. 6757 */ 6758 if (sd) { 6759 i = select_idle_capacity(p, sd, target); 6760 return ((unsigned)i < nr_cpumask_bits) ? i : target; 6761 } 6762 } 6763 6764 sd = rcu_dereference(per_cpu(sd_llc, target)); 6765 if (!sd) 6766 return target; 6767 6768 if (sched_smt_active()) { 6769 has_idle_core = test_idle_cores(target, false); 6770 6771 if (!has_idle_core && cpus_share_cache(prev, target)) { 6772 i = select_idle_smt(p, sd, prev); 6773 if ((unsigned int)i < nr_cpumask_bits) 6774 return i; 6775 } 6776 } 6777 6778 i = select_idle_cpu(p, sd, has_idle_core, target); 6779 if ((unsigned)i < nr_cpumask_bits) 6780 return i; 6781 6782 return target; 6783 } 6784 6785 /* 6786 * Predicts what cpu_util(@cpu) would return if @p was removed from @cpu 6787 * (@dst_cpu = -1) or migrated to @dst_cpu. 6788 */ 6789 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) 6790 { 6791 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; 6792 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); 6793 6794 /* 6795 * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its 6796 * contribution. If @p migrates from another CPU to @cpu add its 6797 * contribution. In all the other cases @cpu is not impacted by the 6798 * migration so its util_avg is already correct. 6799 */ 6800 if (task_cpu(p) == cpu && dst_cpu != cpu) 6801 lsub_positive(&util, task_util(p)); 6802 else if (task_cpu(p) != cpu && dst_cpu == cpu) 6803 util += task_util(p); 6804 6805 if (sched_feat(UTIL_EST)) { 6806 unsigned long util_est; 6807 6808 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); 6809 6810 /* 6811 * During wake-up @p isn't enqueued yet and doesn't contribute 6812 * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued. 6813 * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p 6814 * has been enqueued. 6815 * 6816 * During exec (@dst_cpu = -1) @p is enqueued and does 6817 * contribute to cpu_rq(cpu)->cfs.util_est.enqueued. 6818 * Remove it to "simulate" cpu_util without @p's contribution. 6819 * 6820 * Despite the task_on_rq_queued(@p) check there is still a 6821 * small window for a possible race when an exec 6822 * select_task_rq_fair() races with LB's detach_task(). 6823 * 6824 * detach_task() 6825 * deactivate_task() 6826 * p->on_rq = TASK_ON_RQ_MIGRATING; 6827 * -------------------------------- A 6828 * dequeue_task() \ 6829 * dequeue_task_fair() + Race Time 6830 * util_est_dequeue() / 6831 * -------------------------------- B 6832 * 6833 * The additional check "current == p" is required to further 6834 * reduce the race window. 6835 */ 6836 if (dst_cpu == cpu) 6837 util_est += _task_util_est(p); 6838 else if (unlikely(task_on_rq_queued(p) || current == p)) 6839 lsub_positive(&util_est, _task_util_est(p)); 6840 6841 util = max(util, util_est); 6842 } 6843 6844 return min(util, capacity_orig_of(cpu)); 6845 } 6846 6847 /* 6848 * cpu_util_without: compute cpu utilization without any contributions from *p 6849 * @cpu: the CPU which utilization is requested 6850 * @p: the task which utilization should be discounted 6851 * 6852 * The utilization of a CPU is defined by the utilization of tasks currently 6853 * enqueued on that CPU as well as tasks which are currently sleeping after an 6854 * execution on that CPU. 6855 * 6856 * This method returns the utilization of the specified CPU by discounting the 6857 * utilization of the specified task, whenever the task is currently 6858 * contributing to the CPU utilization. 6859 */ 6860 static unsigned long cpu_util_without(int cpu, struct task_struct *p) 6861 { 6862 /* Task has no contribution or is new */ 6863 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6864 return cpu_util_cfs(cpu); 6865 6866 return cpu_util_next(cpu, p, -1); 6867 } 6868 6869 /* 6870 * energy_env - Utilization landscape for energy estimation. 6871 * @task_busy_time: Utilization contribution by the task for which we test the 6872 * placement. Given by eenv_task_busy_time(). 6873 * @pd_busy_time: Utilization of the whole perf domain without the task 6874 * contribution. Given by eenv_pd_busy_time(). 6875 * @cpu_cap: Maximum CPU capacity for the perf domain. 6876 * @pd_cap: Entire perf domain capacity. (pd->nr_cpus * cpu_cap). 6877 */ 6878 struct energy_env { 6879 unsigned long task_busy_time; 6880 unsigned long pd_busy_time; 6881 unsigned long cpu_cap; 6882 unsigned long pd_cap; 6883 }; 6884 6885 /* 6886 * Compute the task busy time for compute_energy(). This time cannot be 6887 * injected directly into effective_cpu_util() because of the IRQ scaling. 6888 * The latter only makes sense with the most recent CPUs where the task has 6889 * run. 6890 */ 6891 static inline void eenv_task_busy_time(struct energy_env *eenv, 6892 struct task_struct *p, int prev_cpu) 6893 { 6894 unsigned long busy_time, max_cap = arch_scale_cpu_capacity(prev_cpu); 6895 unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu)); 6896 6897 if (unlikely(irq >= max_cap)) 6898 busy_time = max_cap; 6899 else 6900 busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap); 6901 6902 eenv->task_busy_time = busy_time; 6903 } 6904 6905 /* 6906 * Compute the perf_domain (PD) busy time for compute_energy(). Based on the 6907 * utilization for each @pd_cpus, it however doesn't take into account 6908 * clamping since the ratio (utilization / cpu_capacity) is already enough to 6909 * scale the EM reported power consumption at the (eventually clamped) 6910 * cpu_capacity. 6911 * 6912 * The contribution of the task @p for which we want to estimate the 6913 * energy cost is removed (by cpu_util_next()) and must be calculated 6914 * separately (see eenv_task_busy_time). This ensures: 6915 * 6916 * - A stable PD utilization, no matter which CPU of that PD we want to place 6917 * the task on. 6918 * 6919 * - A fair comparison between CPUs as the task contribution (task_util()) 6920 * will always be the same no matter which CPU utilization we rely on 6921 * (util_avg or util_est). 6922 * 6923 * Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't 6924 * exceed @eenv->pd_cap. 6925 */ 6926 static inline void eenv_pd_busy_time(struct energy_env *eenv, 6927 struct cpumask *pd_cpus, 6928 struct task_struct *p) 6929 { 6930 unsigned long busy_time = 0; 6931 int cpu; 6932 6933 for_each_cpu(cpu, pd_cpus) { 6934 unsigned long util = cpu_util_next(cpu, p, -1); 6935 6936 busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL); 6937 } 6938 6939 eenv->pd_busy_time = min(eenv->pd_cap, busy_time); 6940 } 6941 6942 /* 6943 * Compute the maximum utilization for compute_energy() when the task @p 6944 * is placed on the cpu @dst_cpu. 6945 * 6946 * Returns the maximum utilization among @eenv->cpus. This utilization can't 6947 * exceed @eenv->cpu_cap. 6948 */ 6949 static inline unsigned long 6950 eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus, 6951 struct task_struct *p, int dst_cpu) 6952 { 6953 unsigned long max_util = 0; 6954 int cpu; 6955 6956 for_each_cpu(cpu, pd_cpus) { 6957 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; 6958 unsigned long util = cpu_util_next(cpu, p, dst_cpu); 6959 unsigned long cpu_util; 6960 6961 /* 6962 * Performance domain frequency: utilization clamping 6963 * must be considered since it affects the selection 6964 * of the performance domain frequency. 6965 * NOTE: in case RT tasks are running, by default the 6966 * FREQUENCY_UTIL's utilization can be max OPP. 6967 */ 6968 cpu_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk); 6969 max_util = max(max_util, cpu_util); 6970 } 6971 6972 return min(max_util, eenv->cpu_cap); 6973 } 6974 6975 /* 6976 * compute_energy(): Use the Energy Model to estimate the energy that @pd would 6977 * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task 6978 * contribution is ignored. 6979 */ 6980 static inline unsigned long 6981 compute_energy(struct energy_env *eenv, struct perf_domain *pd, 6982 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu) 6983 { 6984 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu); 6985 unsigned long busy_time = eenv->pd_busy_time; 6986 6987 if (dst_cpu >= 0) 6988 busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time); 6989 6990 return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap); 6991 } 6992 6993 /* 6994 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the 6995 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum 6996 * spare capacity in each performance domain and uses it as a potential 6997 * candidate to execute the task. Then, it uses the Energy Model to figure 6998 * out which of the CPU candidates is the most energy-efficient. 6999 * 7000 * The rationale for this heuristic is as follows. In a performance domain, 7001 * all the most energy efficient CPU candidates (according to the Energy 7002 * Model) are those for which we'll request a low frequency. When there are 7003 * several CPUs for which the frequency request will be the same, we don't 7004 * have enough data to break the tie between them, because the Energy Model 7005 * only includes active power costs. With this model, if we assume that 7006 * frequency requests follow utilization (e.g. using schedutil), the CPU with 7007 * the maximum spare capacity in a performance domain is guaranteed to be among 7008 * the best candidates of the performance domain. 7009 * 7010 * In practice, it could be preferable from an energy standpoint to pack 7011 * small tasks on a CPU in order to let other CPUs go in deeper idle states, 7012 * but that could also hurt our chances to go cluster idle, and we have no 7013 * ways to tell with the current Energy Model if this is actually a good 7014 * idea or not. So, find_energy_efficient_cpu() basically favors 7015 * cluster-packing, and spreading inside a cluster. That should at least be 7016 * a good thing for latency, and this is consistent with the idea that most 7017 * of the energy savings of EAS come from the asymmetry of the system, and 7018 * not so much from breaking the tie between identical CPUs. That's also the 7019 * reason why EAS is enabled in the topology code only for systems where 7020 * SD_ASYM_CPUCAPACITY is set. 7021 * 7022 * NOTE: Forkees are not accepted in the energy-aware wake-up path because 7023 * they don't have any useful utilization data yet and it's not possible to 7024 * forecast their impact on energy consumption. Consequently, they will be 7025 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out 7026 * to be energy-inefficient in some use-cases. The alternative would be to 7027 * bias new tasks towards specific types of CPUs first, or to try to infer 7028 * their util_avg from the parent task, but those heuristics could hurt 7029 * other use-cases too. So, until someone finds a better way to solve this, 7030 * let's keep things simple by re-using the existing slow path. 7031 */ 7032 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) 7033 { 7034 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); 7035 unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; 7036 struct root_domain *rd = this_rq()->rd; 7037 int cpu, best_energy_cpu, target = -1; 7038 struct sched_domain *sd; 7039 struct perf_domain *pd; 7040 struct energy_env eenv; 7041 7042 rcu_read_lock(); 7043 pd = rcu_dereference(rd->pd); 7044 if (!pd || READ_ONCE(rd->overutilized)) 7045 goto unlock; 7046 7047 /* 7048 * Energy-aware wake-up happens on the lowest sched_domain starting 7049 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. 7050 */ 7051 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); 7052 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 7053 sd = sd->parent; 7054 if (!sd) 7055 goto unlock; 7056 7057 target = prev_cpu; 7058 7059 sync_entity_load_avg(&p->se); 7060 if (!task_util_est(p)) 7061 goto unlock; 7062 7063 eenv_task_busy_time(&eenv, p, prev_cpu); 7064 7065 for (; pd; pd = pd->next) { 7066 unsigned long cpu_cap, cpu_thermal_cap, util; 7067 unsigned long cur_delta, max_spare_cap = 0; 7068 bool compute_prev_delta = false; 7069 int max_spare_cap_cpu = -1; 7070 unsigned long base_energy; 7071 7072 cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask); 7073 7074 if (cpumask_empty(cpus)) 7075 continue; 7076 7077 /* Account thermal pressure for the energy estimation */ 7078 cpu = cpumask_first(cpus); 7079 cpu_thermal_cap = arch_scale_cpu_capacity(cpu); 7080 cpu_thermal_cap -= arch_scale_thermal_pressure(cpu); 7081 7082 eenv.cpu_cap = cpu_thermal_cap; 7083 eenv.pd_cap = 0; 7084 7085 for_each_cpu(cpu, cpus) { 7086 eenv.pd_cap += cpu_thermal_cap; 7087 7088 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) 7089 continue; 7090 7091 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 7092 continue; 7093 7094 util = cpu_util_next(cpu, p, cpu); 7095 cpu_cap = capacity_of(cpu); 7096 7097 /* 7098 * Skip CPUs that cannot satisfy the capacity request. 7099 * IOW, placing the task there would make the CPU 7100 * overutilized. Take uclamp into account to see how 7101 * much capacity we can get out of the CPU; this is 7102 * aligned with sched_cpu_util(). 7103 */ 7104 util = uclamp_rq_util_with(cpu_rq(cpu), util, p); 7105 if (!fits_capacity(util, cpu_cap)) 7106 continue; 7107 7108 lsub_positive(&cpu_cap, util); 7109 7110 if (cpu == prev_cpu) { 7111 /* Always use prev_cpu as a candidate. */ 7112 compute_prev_delta = true; 7113 } else if (cpu_cap > max_spare_cap) { 7114 /* 7115 * Find the CPU with the maximum spare capacity 7116 * in the performance domain. 7117 */ 7118 max_spare_cap = cpu_cap; 7119 max_spare_cap_cpu = cpu; 7120 } 7121 } 7122 7123 if (max_spare_cap_cpu < 0 && !compute_prev_delta) 7124 continue; 7125 7126 eenv_pd_busy_time(&eenv, cpus, p); 7127 /* Compute the 'base' energy of the pd, without @p */ 7128 base_energy = compute_energy(&eenv, pd, cpus, p, -1); 7129 7130 /* Evaluate the energy impact of using prev_cpu. */ 7131 if (compute_prev_delta) { 7132 prev_delta = compute_energy(&eenv, pd, cpus, p, 7133 prev_cpu); 7134 /* CPU utilization has changed */ 7135 if (prev_delta < base_energy) 7136 goto unlock; 7137 prev_delta -= base_energy; 7138 best_delta = min(best_delta, prev_delta); 7139 } 7140 7141 /* Evaluate the energy impact of using max_spare_cap_cpu. */ 7142 if (max_spare_cap_cpu >= 0) { 7143 cur_delta = compute_energy(&eenv, pd, cpus, p, 7144 max_spare_cap_cpu); 7145 /* CPU utilization has changed */ 7146 if (cur_delta < base_energy) 7147 goto unlock; 7148 cur_delta -= base_energy; 7149 if (cur_delta < best_delta) { 7150 best_delta = cur_delta; 7151 best_energy_cpu = max_spare_cap_cpu; 7152 } 7153 } 7154 } 7155 rcu_read_unlock(); 7156 7157 if (best_delta < prev_delta) 7158 target = best_energy_cpu; 7159 7160 return target; 7161 7162 unlock: 7163 rcu_read_unlock(); 7164 7165 return target; 7166 } 7167 7168 /* 7169 * select_task_rq_fair: Select target runqueue for the waking task in domains 7170 * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE, 7171 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 7172 * 7173 * Balances load by selecting the idlest CPU in the idlest group, or under 7174 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. 7175 * 7176 * Returns the target CPU number. 7177 */ 7178 static int 7179 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) 7180 { 7181 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); 7182 struct sched_domain *tmp, *sd = NULL; 7183 int cpu = smp_processor_id(); 7184 int new_cpu = prev_cpu; 7185 int want_affine = 0; 7186 /* SD_flags and WF_flags share the first nibble */ 7187 int sd_flag = wake_flags & 0xF; 7188 7189 /* 7190 * required for stable ->cpus_allowed 7191 */ 7192 lockdep_assert_held(&p->pi_lock); 7193 if (wake_flags & WF_TTWU) { 7194 record_wakee(p); 7195 7196 if (sched_energy_enabled()) { 7197 new_cpu = find_energy_efficient_cpu(p, prev_cpu); 7198 if (new_cpu >= 0) 7199 return new_cpu; 7200 new_cpu = prev_cpu; 7201 } 7202 7203 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); 7204 } 7205 7206 rcu_read_lock(); 7207 for_each_domain(cpu, tmp) { 7208 /* 7209 * If both 'cpu' and 'prev_cpu' are part of this domain, 7210 * cpu is a valid SD_WAKE_AFFINE target. 7211 */ 7212 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 7213 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 7214 if (cpu != prev_cpu) 7215 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); 7216 7217 sd = NULL; /* Prefer wake_affine over balance flags */ 7218 break; 7219 } 7220 7221 /* 7222 * Usually only true for WF_EXEC and WF_FORK, as sched_domains 7223 * usually do not have SD_BALANCE_WAKE set. That means wakeup 7224 * will usually go to the fast path. 7225 */ 7226 if (tmp->flags & sd_flag) 7227 sd = tmp; 7228 else if (!want_affine) 7229 break; 7230 } 7231 7232 if (unlikely(sd)) { 7233 /* Slow path */ 7234 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); 7235 } else if (wake_flags & WF_TTWU) { /* XXX always ? */ 7236 /* Fast path */ 7237 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 7238 } 7239 rcu_read_unlock(); 7240 7241 return new_cpu; 7242 } 7243 7244 static void detach_entity_cfs_rq(struct sched_entity *se); 7245 7246 /* 7247 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and 7248 * cfs_rq_of(p) references at time of call are still valid and identify the 7249 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 7250 */ 7251 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) 7252 { 7253 struct sched_entity *se = &p->se; 7254 7255 /* 7256 * As blocked tasks retain absolute vruntime the migration needs to 7257 * deal with this by subtracting the old and adding the new 7258 * min_vruntime -- the latter is done by enqueue_entity() when placing 7259 * the task on the new runqueue. 7260 */ 7261 if (READ_ONCE(p->__state) == TASK_WAKING) { 7262 struct cfs_rq *cfs_rq = cfs_rq_of(se); 7263 7264 se->vruntime -= u64_u32_load(cfs_rq->min_vruntime); 7265 } 7266 7267 if (p->on_rq == TASK_ON_RQ_MIGRATING) { 7268 /* 7269 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' 7270 * rq->lock and can modify state directly. 7271 */ 7272 lockdep_assert_rq_held(task_rq(p)); 7273 detach_entity_cfs_rq(se); 7274 7275 } else { 7276 remove_entity_load_avg(se); 7277 7278 /* 7279 * Here, the task's PELT values have been updated according to 7280 * the current rq's clock. But if that clock hasn't been 7281 * updated in a while, a substantial idle time will be missed, 7282 * leading to an inflation after wake-up on the new rq. 7283 * 7284 * Estimate the missing time from the cfs_rq last_update_time 7285 * and update sched_avg to improve the PELT continuity after 7286 * migration. 7287 */ 7288 migrate_se_pelt_lag(se); 7289 } 7290 7291 /* Tell new CPU we are migrated */ 7292 se->avg.last_update_time = 0; 7293 7294 /* We have migrated, no longer consider this task hot */ 7295 se->exec_start = 0; 7296 7297 update_scan_period(p, new_cpu); 7298 } 7299 7300 static void task_dead_fair(struct task_struct *p) 7301 { 7302 remove_entity_load_avg(&p->se); 7303 } 7304 7305 static int 7306 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 7307 { 7308 if (rq->nr_running) 7309 return 1; 7310 7311 return newidle_balance(rq, rf) != 0; 7312 } 7313 #endif /* CONFIG_SMP */ 7314 7315 static unsigned long wakeup_gran(struct sched_entity *se) 7316 { 7317 unsigned long gran = sysctl_sched_wakeup_granularity; 7318 7319 /* 7320 * Since its curr running now, convert the gran from real-time 7321 * to virtual-time in his units. 7322 * 7323 * By using 'se' instead of 'curr' we penalize light tasks, so 7324 * they get preempted easier. That is, if 'se' < 'curr' then 7325 * the resulting gran will be larger, therefore penalizing the 7326 * lighter, if otoh 'se' > 'curr' then the resulting gran will 7327 * be smaller, again penalizing the lighter task. 7328 * 7329 * This is especially important for buddies when the leftmost 7330 * task is higher priority than the buddy. 7331 */ 7332 return calc_delta_fair(gran, se); 7333 } 7334 7335 /* 7336 * Should 'se' preempt 'curr'. 7337 * 7338 * |s1 7339 * |s2 7340 * |s3 7341 * g 7342 * |<--->|c 7343 * 7344 * w(c, s1) = -1 7345 * w(c, s2) = 0 7346 * w(c, s3) = 1 7347 * 7348 */ 7349 static int 7350 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 7351 { 7352 s64 gran, vdiff = curr->vruntime - se->vruntime; 7353 7354 if (vdiff <= 0) 7355 return -1; 7356 7357 gran = wakeup_gran(se); 7358 if (vdiff > gran) 7359 return 1; 7360 7361 return 0; 7362 } 7363 7364 static void set_last_buddy(struct sched_entity *se) 7365 { 7366 for_each_sched_entity(se) { 7367 if (SCHED_WARN_ON(!se->on_rq)) 7368 return; 7369 if (se_is_idle(se)) 7370 return; 7371 cfs_rq_of(se)->last = se; 7372 } 7373 } 7374 7375 static void set_next_buddy(struct sched_entity *se) 7376 { 7377 for_each_sched_entity(se) { 7378 if (SCHED_WARN_ON(!se->on_rq)) 7379 return; 7380 if (se_is_idle(se)) 7381 return; 7382 cfs_rq_of(se)->next = se; 7383 } 7384 } 7385 7386 static void set_skip_buddy(struct sched_entity *se) 7387 { 7388 for_each_sched_entity(se) 7389 cfs_rq_of(se)->skip = se; 7390 } 7391 7392 /* 7393 * Preempt the current task with a newly woken task if needed: 7394 */ 7395 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 7396 { 7397 struct task_struct *curr = rq->curr; 7398 struct sched_entity *se = &curr->se, *pse = &p->se; 7399 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7400 int scale = cfs_rq->nr_running >= sched_nr_latency; 7401 int next_buddy_marked = 0; 7402 int cse_is_idle, pse_is_idle; 7403 7404 if (unlikely(se == pse)) 7405 return; 7406 7407 /* 7408 * This is possible from callers such as attach_tasks(), in which we 7409 * unconditionally check_preempt_curr() after an enqueue (which may have 7410 * lead to a throttle). This both saves work and prevents false 7411 * next-buddy nomination below. 7412 */ 7413 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 7414 return; 7415 7416 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 7417 set_next_buddy(pse); 7418 next_buddy_marked = 1; 7419 } 7420 7421 /* 7422 * We can come here with TIF_NEED_RESCHED already set from new task 7423 * wake up path. 7424 * 7425 * Note: this also catches the edge-case of curr being in a throttled 7426 * group (e.g. via set_curr_task), since update_curr() (in the 7427 * enqueue of curr) will have resulted in resched being set. This 7428 * prevents us from potentially nominating it as a false LAST_BUDDY 7429 * below. 7430 */ 7431 if (test_tsk_need_resched(curr)) 7432 return; 7433 7434 /* Idle tasks are by definition preempted by non-idle tasks. */ 7435 if (unlikely(task_has_idle_policy(curr)) && 7436 likely(!task_has_idle_policy(p))) 7437 goto preempt; 7438 7439 /* 7440 * Batch and idle tasks do not preempt non-idle tasks (their preemption 7441 * is driven by the tick): 7442 */ 7443 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 7444 return; 7445 7446 find_matching_se(&se, &pse); 7447 BUG_ON(!pse); 7448 7449 cse_is_idle = se_is_idle(se); 7450 pse_is_idle = se_is_idle(pse); 7451 7452 /* 7453 * Preempt an idle group in favor of a non-idle group (and don't preempt 7454 * in the inverse case). 7455 */ 7456 if (cse_is_idle && !pse_is_idle) 7457 goto preempt; 7458 if (cse_is_idle != pse_is_idle) 7459 return; 7460 7461 update_curr(cfs_rq_of(se)); 7462 if (wakeup_preempt_entity(se, pse) == 1) { 7463 /* 7464 * Bias pick_next to pick the sched entity that is 7465 * triggering this preemption. 7466 */ 7467 if (!next_buddy_marked) 7468 set_next_buddy(pse); 7469 goto preempt; 7470 } 7471 7472 return; 7473 7474 preempt: 7475 resched_curr(rq); 7476 /* 7477 * Only set the backward buddy when the current task is still 7478 * on the rq. This can happen when a wakeup gets interleaved 7479 * with schedule on the ->pre_schedule() or idle_balance() 7480 * point, either of which can * drop the rq lock. 7481 * 7482 * Also, during early boot the idle thread is in the fair class, 7483 * for obvious reasons its a bad idea to schedule back to it. 7484 */ 7485 if (unlikely(!se->on_rq || curr == rq->idle)) 7486 return; 7487 7488 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 7489 set_last_buddy(se); 7490 } 7491 7492 #ifdef CONFIG_SMP 7493 static struct task_struct *pick_task_fair(struct rq *rq) 7494 { 7495 struct sched_entity *se; 7496 struct cfs_rq *cfs_rq; 7497 7498 again: 7499 cfs_rq = &rq->cfs; 7500 if (!cfs_rq->nr_running) 7501 return NULL; 7502 7503 do { 7504 struct sched_entity *curr = cfs_rq->curr; 7505 7506 /* When we pick for a remote RQ, we'll not have done put_prev_entity() */ 7507 if (curr) { 7508 if (curr->on_rq) 7509 update_curr(cfs_rq); 7510 else 7511 curr = NULL; 7512 7513 if (unlikely(check_cfs_rq_runtime(cfs_rq))) 7514 goto again; 7515 } 7516 7517 se = pick_next_entity(cfs_rq, curr); 7518 cfs_rq = group_cfs_rq(se); 7519 } while (cfs_rq); 7520 7521 return task_of(se); 7522 } 7523 #endif 7524 7525 struct task_struct * 7526 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 7527 { 7528 struct cfs_rq *cfs_rq = &rq->cfs; 7529 struct sched_entity *se; 7530 struct task_struct *p; 7531 int new_tasks; 7532 7533 again: 7534 if (!sched_fair_runnable(rq)) 7535 goto idle; 7536 7537 #ifdef CONFIG_FAIR_GROUP_SCHED 7538 if (!prev || prev->sched_class != &fair_sched_class) 7539 goto simple; 7540 7541 /* 7542 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 7543 * likely that a next task is from the same cgroup as the current. 7544 * 7545 * Therefore attempt to avoid putting and setting the entire cgroup 7546 * hierarchy, only change the part that actually changes. 7547 */ 7548 7549 do { 7550 struct sched_entity *curr = cfs_rq->curr; 7551 7552 /* 7553 * Since we got here without doing put_prev_entity() we also 7554 * have to consider cfs_rq->curr. If it is still a runnable 7555 * entity, update_curr() will update its vruntime, otherwise 7556 * forget we've ever seen it. 7557 */ 7558 if (curr) { 7559 if (curr->on_rq) 7560 update_curr(cfs_rq); 7561 else 7562 curr = NULL; 7563 7564 /* 7565 * This call to check_cfs_rq_runtime() will do the 7566 * throttle and dequeue its entity in the parent(s). 7567 * Therefore the nr_running test will indeed 7568 * be correct. 7569 */ 7570 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { 7571 cfs_rq = &rq->cfs; 7572 7573 if (!cfs_rq->nr_running) 7574 goto idle; 7575 7576 goto simple; 7577 } 7578 } 7579 7580 se = pick_next_entity(cfs_rq, curr); 7581 cfs_rq = group_cfs_rq(se); 7582 } while (cfs_rq); 7583 7584 p = task_of(se); 7585 7586 /* 7587 * Since we haven't yet done put_prev_entity and if the selected task 7588 * is a different task than we started out with, try and touch the 7589 * least amount of cfs_rqs. 7590 */ 7591 if (prev != p) { 7592 struct sched_entity *pse = &prev->se; 7593 7594 while (!(cfs_rq = is_same_group(se, pse))) { 7595 int se_depth = se->depth; 7596 int pse_depth = pse->depth; 7597 7598 if (se_depth <= pse_depth) { 7599 put_prev_entity(cfs_rq_of(pse), pse); 7600 pse = parent_entity(pse); 7601 } 7602 if (se_depth >= pse_depth) { 7603 set_next_entity(cfs_rq_of(se), se); 7604 se = parent_entity(se); 7605 } 7606 } 7607 7608 put_prev_entity(cfs_rq, pse); 7609 set_next_entity(cfs_rq, se); 7610 } 7611 7612 goto done; 7613 simple: 7614 #endif 7615 if (prev) 7616 put_prev_task(rq, prev); 7617 7618 do { 7619 se = pick_next_entity(cfs_rq, NULL); 7620 set_next_entity(cfs_rq, se); 7621 cfs_rq = group_cfs_rq(se); 7622 } while (cfs_rq); 7623 7624 p = task_of(se); 7625 7626 done: __maybe_unused; 7627 #ifdef CONFIG_SMP 7628 /* 7629 * Move the next running task to the front of 7630 * the list, so our cfs_tasks list becomes MRU 7631 * one. 7632 */ 7633 list_move(&p->se.group_node, &rq->cfs_tasks); 7634 #endif 7635 7636 if (hrtick_enabled_fair(rq)) 7637 hrtick_start_fair(rq, p); 7638 7639 update_misfit_status(p, rq); 7640 7641 return p; 7642 7643 idle: 7644 if (!rf) 7645 return NULL; 7646 7647 new_tasks = newidle_balance(rq, rf); 7648 7649 /* 7650 * Because newidle_balance() releases (and re-acquires) rq->lock, it is 7651 * possible for any higher priority task to appear. In that case we 7652 * must re-start the pick_next_entity() loop. 7653 */ 7654 if (new_tasks < 0) 7655 return RETRY_TASK; 7656 7657 if (new_tasks > 0) 7658 goto again; 7659 7660 /* 7661 * rq is about to be idle, check if we need to update the 7662 * lost_idle_time of clock_pelt 7663 */ 7664 update_idle_rq_clock_pelt(rq); 7665 7666 return NULL; 7667 } 7668 7669 static struct task_struct *__pick_next_task_fair(struct rq *rq) 7670 { 7671 return pick_next_task_fair(rq, NULL, NULL); 7672 } 7673 7674 /* 7675 * Account for a descheduled task: 7676 */ 7677 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 7678 { 7679 struct sched_entity *se = &prev->se; 7680 struct cfs_rq *cfs_rq; 7681 7682 for_each_sched_entity(se) { 7683 cfs_rq = cfs_rq_of(se); 7684 put_prev_entity(cfs_rq, se); 7685 } 7686 } 7687 7688 /* 7689 * sched_yield() is very simple 7690 * 7691 * The magic of dealing with the ->skip buddy is in pick_next_entity. 7692 */ 7693 static void yield_task_fair(struct rq *rq) 7694 { 7695 struct task_struct *curr = rq->curr; 7696 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7697 struct sched_entity *se = &curr->se; 7698 7699 /* 7700 * Are we the only task in the tree? 7701 */ 7702 if (unlikely(rq->nr_running == 1)) 7703 return; 7704 7705 clear_buddies(cfs_rq, se); 7706 7707 if (curr->policy != SCHED_BATCH) { 7708 update_rq_clock(rq); 7709 /* 7710 * Update run-time statistics of the 'current'. 7711 */ 7712 update_curr(cfs_rq); 7713 /* 7714 * Tell update_rq_clock() that we've just updated, 7715 * so we don't do microscopic update in schedule() 7716 * and double the fastpath cost. 7717 */ 7718 rq_clock_skip_update(rq); 7719 } 7720 7721 set_skip_buddy(se); 7722 } 7723 7724 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) 7725 { 7726 struct sched_entity *se = &p->se; 7727 7728 /* throttled hierarchies are not runnable */ 7729 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 7730 return false; 7731 7732 /* Tell the scheduler that we'd really like pse to run next. */ 7733 set_next_buddy(se); 7734 7735 yield_task_fair(rq); 7736 7737 return true; 7738 } 7739 7740 #ifdef CONFIG_SMP 7741 /************************************************** 7742 * Fair scheduling class load-balancing methods. 7743 * 7744 * BASICS 7745 * 7746 * The purpose of load-balancing is to achieve the same basic fairness the 7747 * per-CPU scheduler provides, namely provide a proportional amount of compute 7748 * time to each task. This is expressed in the following equation: 7749 * 7750 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 7751 * 7752 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight 7753 * W_i,0 is defined as: 7754 * 7755 * W_i,0 = \Sum_j w_i,j (2) 7756 * 7757 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight 7758 * is derived from the nice value as per sched_prio_to_weight[]. 7759 * 7760 * The weight average is an exponential decay average of the instantaneous 7761 * weight: 7762 * 7763 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 7764 * 7765 * C_i is the compute capacity of CPU i, typically it is the 7766 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 7767 * can also include other factors [XXX]. 7768 * 7769 * To achieve this balance we define a measure of imbalance which follows 7770 * directly from (1): 7771 * 7772 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 7773 * 7774 * We them move tasks around to minimize the imbalance. In the continuous 7775 * function space it is obvious this converges, in the discrete case we get 7776 * a few fun cases generally called infeasible weight scenarios. 7777 * 7778 * [XXX expand on: 7779 * - infeasible weights; 7780 * - local vs global optima in the discrete case. ] 7781 * 7782 * 7783 * SCHED DOMAINS 7784 * 7785 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 7786 * for all i,j solution, we create a tree of CPUs that follows the hardware 7787 * topology where each level pairs two lower groups (or better). This results 7788 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the 7789 * tree to only the first of the previous level and we decrease the frequency 7790 * of load-balance at each level inv. proportional to the number of CPUs in 7791 * the groups. 7792 * 7793 * This yields: 7794 * 7795 * log_2 n 1 n 7796 * \Sum { --- * --- * 2^i } = O(n) (5) 7797 * i = 0 2^i 2^i 7798 * `- size of each group 7799 * | | `- number of CPUs doing load-balance 7800 * | `- freq 7801 * `- sum over all levels 7802 * 7803 * Coupled with a limit on how many tasks we can migrate every balance pass, 7804 * this makes (5) the runtime complexity of the balancer. 7805 * 7806 * An important property here is that each CPU is still (indirectly) connected 7807 * to every other CPU in at most O(log n) steps: 7808 * 7809 * The adjacency matrix of the resulting graph is given by: 7810 * 7811 * log_2 n 7812 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 7813 * k = 0 7814 * 7815 * And you'll find that: 7816 * 7817 * A^(log_2 n)_i,j != 0 for all i,j (7) 7818 * 7819 * Showing there's indeed a path between every CPU in at most O(log n) steps. 7820 * The task movement gives a factor of O(m), giving a convergence complexity 7821 * of: 7822 * 7823 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 7824 * 7825 * 7826 * WORK CONSERVING 7827 * 7828 * In order to avoid CPUs going idle while there's still work to do, new idle 7829 * balancing is more aggressive and has the newly idle CPU iterate up the domain 7830 * tree itself instead of relying on other CPUs to bring it work. 7831 * 7832 * This adds some complexity to both (5) and (8) but it reduces the total idle 7833 * time. 7834 * 7835 * [XXX more?] 7836 * 7837 * 7838 * CGROUPS 7839 * 7840 * Cgroups make a horror show out of (2), instead of a simple sum we get: 7841 * 7842 * s_k,i 7843 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 7844 * S_k 7845 * 7846 * Where 7847 * 7848 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 7849 * 7850 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. 7851 * 7852 * The big problem is S_k, its a global sum needed to compute a local (W_i) 7853 * property. 7854 * 7855 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 7856 * rewrite all of this once again.] 7857 */ 7858 7859 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 7860 7861 enum fbq_type { regular, remote, all }; 7862 7863 /* 7864 * 'group_type' describes the group of CPUs at the moment of load balancing. 7865 * 7866 * The enum is ordered by pulling priority, with the group with lowest priority 7867 * first so the group_type can simply be compared when selecting the busiest 7868 * group. See update_sd_pick_busiest(). 7869 */ 7870 enum group_type { 7871 /* The group has spare capacity that can be used to run more tasks. */ 7872 group_has_spare = 0, 7873 /* 7874 * The group is fully used and the tasks don't compete for more CPU 7875 * cycles. Nevertheless, some tasks might wait before running. 7876 */ 7877 group_fully_busy, 7878 /* 7879 * One task doesn't fit with CPU's capacity and must be migrated to a 7880 * more powerful CPU. 7881 */ 7882 group_misfit_task, 7883 /* 7884 * SD_ASYM_PACKING only: One local CPU with higher capacity is available, 7885 * and the task should be migrated to it instead of running on the 7886 * current CPU. 7887 */ 7888 group_asym_packing, 7889 /* 7890 * The tasks' affinity constraints previously prevented the scheduler 7891 * from balancing the load across the system. 7892 */ 7893 group_imbalanced, 7894 /* 7895 * The CPU is overloaded and can't provide expected CPU cycles to all 7896 * tasks. 7897 */ 7898 group_overloaded 7899 }; 7900 7901 enum migration_type { 7902 migrate_load = 0, 7903 migrate_util, 7904 migrate_task, 7905 migrate_misfit 7906 }; 7907 7908 #define LBF_ALL_PINNED 0x01 7909 #define LBF_NEED_BREAK 0x02 7910 #define LBF_DST_PINNED 0x04 7911 #define LBF_SOME_PINNED 0x08 7912 #define LBF_ACTIVE_LB 0x10 7913 7914 struct lb_env { 7915 struct sched_domain *sd; 7916 7917 struct rq *src_rq; 7918 int src_cpu; 7919 7920 int dst_cpu; 7921 struct rq *dst_rq; 7922 7923 struct cpumask *dst_grpmask; 7924 int new_dst_cpu; 7925 enum cpu_idle_type idle; 7926 long imbalance; 7927 /* The set of CPUs under consideration for load-balancing */ 7928 struct cpumask *cpus; 7929 7930 unsigned int flags; 7931 7932 unsigned int loop; 7933 unsigned int loop_break; 7934 unsigned int loop_max; 7935 7936 enum fbq_type fbq_type; 7937 enum migration_type migration_type; 7938 struct list_head tasks; 7939 }; 7940 7941 /* 7942 * Is this task likely cache-hot: 7943 */ 7944 static int task_hot(struct task_struct *p, struct lb_env *env) 7945 { 7946 s64 delta; 7947 7948 lockdep_assert_rq_held(env->src_rq); 7949 7950 if (p->sched_class != &fair_sched_class) 7951 return 0; 7952 7953 if (unlikely(task_has_idle_policy(p))) 7954 return 0; 7955 7956 /* SMT siblings share cache */ 7957 if (env->sd->flags & SD_SHARE_CPUCAPACITY) 7958 return 0; 7959 7960 /* 7961 * Buddy candidates are cache hot: 7962 */ 7963 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 7964 (&p->se == cfs_rq_of(&p->se)->next || 7965 &p->se == cfs_rq_of(&p->se)->last)) 7966 return 1; 7967 7968 if (sysctl_sched_migration_cost == -1) 7969 return 1; 7970 7971 /* 7972 * Don't migrate task if the task's cookie does not match 7973 * with the destination CPU's core cookie. 7974 */ 7975 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) 7976 return 1; 7977 7978 if (sysctl_sched_migration_cost == 0) 7979 return 0; 7980 7981 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 7982 7983 return delta < (s64)sysctl_sched_migration_cost; 7984 } 7985 7986 #ifdef CONFIG_NUMA_BALANCING 7987 /* 7988 * Returns 1, if task migration degrades locality 7989 * Returns 0, if task migration improves locality i.e migration preferred. 7990 * Returns -1, if task migration is not affected by locality. 7991 */ 7992 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 7993 { 7994 struct numa_group *numa_group = rcu_dereference(p->numa_group); 7995 unsigned long src_weight, dst_weight; 7996 int src_nid, dst_nid, dist; 7997 7998 if (!static_branch_likely(&sched_numa_balancing)) 7999 return -1; 8000 8001 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 8002 return -1; 8003 8004 src_nid = cpu_to_node(env->src_cpu); 8005 dst_nid = cpu_to_node(env->dst_cpu); 8006 8007 if (src_nid == dst_nid) 8008 return -1; 8009 8010 /* Migrating away from the preferred node is always bad. */ 8011 if (src_nid == p->numa_preferred_nid) { 8012 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 8013 return 1; 8014 else 8015 return -1; 8016 } 8017 8018 /* Encourage migration to the preferred node. */ 8019 if (dst_nid == p->numa_preferred_nid) 8020 return 0; 8021 8022 /* Leaving a core idle is often worse than degrading locality. */ 8023 if (env->idle == CPU_IDLE) 8024 return -1; 8025 8026 dist = node_distance(src_nid, dst_nid); 8027 if (numa_group) { 8028 src_weight = group_weight(p, src_nid, dist); 8029 dst_weight = group_weight(p, dst_nid, dist); 8030 } else { 8031 src_weight = task_weight(p, src_nid, dist); 8032 dst_weight = task_weight(p, dst_nid, dist); 8033 } 8034 8035 return dst_weight < src_weight; 8036 } 8037 8038 #else 8039 static inline int migrate_degrades_locality(struct task_struct *p, 8040 struct lb_env *env) 8041 { 8042 return -1; 8043 } 8044 #endif 8045 8046 /* 8047 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 8048 */ 8049 static 8050 int can_migrate_task(struct task_struct *p, struct lb_env *env) 8051 { 8052 int tsk_cache_hot; 8053 8054 lockdep_assert_rq_held(env->src_rq); 8055 8056 /* 8057 * We do not migrate tasks that are: 8058 * 1) throttled_lb_pair, or 8059 * 2) cannot be migrated to this CPU due to cpus_ptr, or 8060 * 3) running (obviously), or 8061 * 4) are cache-hot on their current CPU. 8062 */ 8063 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 8064 return 0; 8065 8066 /* Disregard pcpu kthreads; they are where they need to be. */ 8067 if (kthread_is_per_cpu(p)) 8068 return 0; 8069 8070 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { 8071 int cpu; 8072 8073 schedstat_inc(p->stats.nr_failed_migrations_affine); 8074 8075 env->flags |= LBF_SOME_PINNED; 8076 8077 /* 8078 * Remember if this task can be migrated to any other CPU in 8079 * our sched_group. We may want to revisit it if we couldn't 8080 * meet load balance goals by pulling other tasks on src_cpu. 8081 * 8082 * Avoid computing new_dst_cpu 8083 * - for NEWLY_IDLE 8084 * - if we have already computed one in current iteration 8085 * - if it's an active balance 8086 */ 8087 if (env->idle == CPU_NEWLY_IDLE || 8088 env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB)) 8089 return 0; 8090 8091 /* Prevent to re-select dst_cpu via env's CPUs: */ 8092 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 8093 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { 8094 env->flags |= LBF_DST_PINNED; 8095 env->new_dst_cpu = cpu; 8096 break; 8097 } 8098 } 8099 8100 return 0; 8101 } 8102 8103 /* Record that we found at least one task that could run on dst_cpu */ 8104 env->flags &= ~LBF_ALL_PINNED; 8105 8106 if (task_running(env->src_rq, p)) { 8107 schedstat_inc(p->stats.nr_failed_migrations_running); 8108 return 0; 8109 } 8110 8111 /* 8112 * Aggressive migration if: 8113 * 1) active balance 8114 * 2) destination numa is preferred 8115 * 3) task is cache cold, or 8116 * 4) too many balance attempts have failed. 8117 */ 8118 if (env->flags & LBF_ACTIVE_LB) 8119 return 1; 8120 8121 tsk_cache_hot = migrate_degrades_locality(p, env); 8122 if (tsk_cache_hot == -1) 8123 tsk_cache_hot = task_hot(p, env); 8124 8125 if (tsk_cache_hot <= 0 || 8126 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 8127 if (tsk_cache_hot == 1) { 8128 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 8129 schedstat_inc(p->stats.nr_forced_migrations); 8130 } 8131 return 1; 8132 } 8133 8134 schedstat_inc(p->stats.nr_failed_migrations_hot); 8135 return 0; 8136 } 8137 8138 /* 8139 * detach_task() -- detach the task for the migration specified in env 8140 */ 8141 static void detach_task(struct task_struct *p, struct lb_env *env) 8142 { 8143 lockdep_assert_rq_held(env->src_rq); 8144 8145 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 8146 set_task_cpu(p, env->dst_cpu); 8147 } 8148 8149 /* 8150 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 8151 * part of active balancing operations within "domain". 8152 * 8153 * Returns a task if successful and NULL otherwise. 8154 */ 8155 static struct task_struct *detach_one_task(struct lb_env *env) 8156 { 8157 struct task_struct *p; 8158 8159 lockdep_assert_rq_held(env->src_rq); 8160 8161 list_for_each_entry_reverse(p, 8162 &env->src_rq->cfs_tasks, se.group_node) { 8163 if (!can_migrate_task(p, env)) 8164 continue; 8165 8166 detach_task(p, env); 8167 8168 /* 8169 * Right now, this is only the second place where 8170 * lb_gained[env->idle] is updated (other is detach_tasks) 8171 * so we can safely collect stats here rather than 8172 * inside detach_tasks(). 8173 */ 8174 schedstat_inc(env->sd->lb_gained[env->idle]); 8175 return p; 8176 } 8177 return NULL; 8178 } 8179 8180 static const unsigned int sched_nr_migrate_break = 32; 8181 8182 /* 8183 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from 8184 * busiest_rq, as part of a balancing operation within domain "sd". 8185 * 8186 * Returns number of detached tasks if successful and 0 otherwise. 8187 */ 8188 static int detach_tasks(struct lb_env *env) 8189 { 8190 struct list_head *tasks = &env->src_rq->cfs_tasks; 8191 unsigned long util, load; 8192 struct task_struct *p; 8193 int detached = 0; 8194 8195 lockdep_assert_rq_held(env->src_rq); 8196 8197 /* 8198 * Source run queue has been emptied by another CPU, clear 8199 * LBF_ALL_PINNED flag as we will not test any task. 8200 */ 8201 if (env->src_rq->nr_running <= 1) { 8202 env->flags &= ~LBF_ALL_PINNED; 8203 return 0; 8204 } 8205 8206 if (env->imbalance <= 0) 8207 return 0; 8208 8209 while (!list_empty(tasks)) { 8210 /* 8211 * We don't want to steal all, otherwise we may be treated likewise, 8212 * which could at worst lead to a livelock crash. 8213 */ 8214 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 8215 break; 8216 8217 p = list_last_entry(tasks, struct task_struct, se.group_node); 8218 8219 env->loop++; 8220 /* We've more or less seen every task there is, call it quits */ 8221 if (env->loop > env->loop_max) 8222 break; 8223 8224 /* take a breather every nr_migrate tasks */ 8225 if (env->loop > env->loop_break) { 8226 env->loop_break += sched_nr_migrate_break; 8227 env->flags |= LBF_NEED_BREAK; 8228 break; 8229 } 8230 8231 if (!can_migrate_task(p, env)) 8232 goto next; 8233 8234 switch (env->migration_type) { 8235 case migrate_load: 8236 /* 8237 * Depending of the number of CPUs and tasks and the 8238 * cgroup hierarchy, task_h_load() can return a null 8239 * value. Make sure that env->imbalance decreases 8240 * otherwise detach_tasks() will stop only after 8241 * detaching up to loop_max tasks. 8242 */ 8243 load = max_t(unsigned long, task_h_load(p), 1); 8244 8245 if (sched_feat(LB_MIN) && 8246 load < 16 && !env->sd->nr_balance_failed) 8247 goto next; 8248 8249 /* 8250 * Make sure that we don't migrate too much load. 8251 * Nevertheless, let relax the constraint if 8252 * scheduler fails to find a good waiting task to 8253 * migrate. 8254 */ 8255 if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) 8256 goto next; 8257 8258 env->imbalance -= load; 8259 break; 8260 8261 case migrate_util: 8262 util = task_util_est(p); 8263 8264 if (util > env->imbalance) 8265 goto next; 8266 8267 env->imbalance -= util; 8268 break; 8269 8270 case migrate_task: 8271 env->imbalance--; 8272 break; 8273 8274 case migrate_misfit: 8275 /* This is not a misfit task */ 8276 if (task_fits_capacity(p, capacity_of(env->src_cpu))) 8277 goto next; 8278 8279 env->imbalance = 0; 8280 break; 8281 } 8282 8283 detach_task(p, env); 8284 list_add(&p->se.group_node, &env->tasks); 8285 8286 detached++; 8287 8288 #ifdef CONFIG_PREEMPTION 8289 /* 8290 * NEWIDLE balancing is a source of latency, so preemptible 8291 * kernels will stop after the first task is detached to minimize 8292 * the critical section. 8293 */ 8294 if (env->idle == CPU_NEWLY_IDLE) 8295 break; 8296 #endif 8297 8298 /* 8299 * We only want to steal up to the prescribed amount of 8300 * load/util/tasks. 8301 */ 8302 if (env->imbalance <= 0) 8303 break; 8304 8305 continue; 8306 next: 8307 list_move(&p->se.group_node, tasks); 8308 } 8309 8310 /* 8311 * Right now, this is one of only two places we collect this stat 8312 * so we can safely collect detach_one_task() stats here rather 8313 * than inside detach_one_task(). 8314 */ 8315 schedstat_add(env->sd->lb_gained[env->idle], detached); 8316 8317 return detached; 8318 } 8319 8320 /* 8321 * attach_task() -- attach the task detached by detach_task() to its new rq. 8322 */ 8323 static void attach_task(struct rq *rq, struct task_struct *p) 8324 { 8325 lockdep_assert_rq_held(rq); 8326 8327 BUG_ON(task_rq(p) != rq); 8328 activate_task(rq, p, ENQUEUE_NOCLOCK); 8329 check_preempt_curr(rq, p, 0); 8330 } 8331 8332 /* 8333 * attach_one_task() -- attaches the task returned from detach_one_task() to 8334 * its new rq. 8335 */ 8336 static void attach_one_task(struct rq *rq, struct task_struct *p) 8337 { 8338 struct rq_flags rf; 8339 8340 rq_lock(rq, &rf); 8341 update_rq_clock(rq); 8342 attach_task(rq, p); 8343 rq_unlock(rq, &rf); 8344 } 8345 8346 /* 8347 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 8348 * new rq. 8349 */ 8350 static void attach_tasks(struct lb_env *env) 8351 { 8352 struct list_head *tasks = &env->tasks; 8353 struct task_struct *p; 8354 struct rq_flags rf; 8355 8356 rq_lock(env->dst_rq, &rf); 8357 update_rq_clock(env->dst_rq); 8358 8359 while (!list_empty(tasks)) { 8360 p = list_first_entry(tasks, struct task_struct, se.group_node); 8361 list_del_init(&p->se.group_node); 8362 8363 attach_task(env->dst_rq, p); 8364 } 8365 8366 rq_unlock(env->dst_rq, &rf); 8367 } 8368 8369 #ifdef CONFIG_NO_HZ_COMMON 8370 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) 8371 { 8372 if (cfs_rq->avg.load_avg) 8373 return true; 8374 8375 if (cfs_rq->avg.util_avg) 8376 return true; 8377 8378 return false; 8379 } 8380 8381 static inline bool others_have_blocked(struct rq *rq) 8382 { 8383 if (READ_ONCE(rq->avg_rt.util_avg)) 8384 return true; 8385 8386 if (READ_ONCE(rq->avg_dl.util_avg)) 8387 return true; 8388 8389 if (thermal_load_avg(rq)) 8390 return true; 8391 8392 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 8393 if (READ_ONCE(rq->avg_irq.util_avg)) 8394 return true; 8395 #endif 8396 8397 return false; 8398 } 8399 8400 static inline void update_blocked_load_tick(struct rq *rq) 8401 { 8402 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies); 8403 } 8404 8405 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) 8406 { 8407 if (!has_blocked) 8408 rq->has_blocked_load = 0; 8409 } 8410 #else 8411 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } 8412 static inline bool others_have_blocked(struct rq *rq) { return false; } 8413 static inline void update_blocked_load_tick(struct rq *rq) {} 8414 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} 8415 #endif 8416 8417 static bool __update_blocked_others(struct rq *rq, bool *done) 8418 { 8419 const struct sched_class *curr_class; 8420 u64 now = rq_clock_pelt(rq); 8421 unsigned long thermal_pressure; 8422 bool decayed; 8423 8424 /* 8425 * update_load_avg() can call cpufreq_update_util(). Make sure that RT, 8426 * DL and IRQ signals have been updated before updating CFS. 8427 */ 8428 curr_class = rq->curr->sched_class; 8429 8430 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 8431 8432 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | 8433 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | 8434 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | 8435 update_irq_load_avg(rq, 0); 8436 8437 if (others_have_blocked(rq)) 8438 *done = false; 8439 8440 return decayed; 8441 } 8442 8443 #ifdef CONFIG_FAIR_GROUP_SCHED 8444 8445 static bool __update_blocked_fair(struct rq *rq, bool *done) 8446 { 8447 struct cfs_rq *cfs_rq, *pos; 8448 bool decayed = false; 8449 int cpu = cpu_of(rq); 8450 8451 /* 8452 * Iterates the task_group tree in a bottom up fashion, see 8453 * list_add_leaf_cfs_rq() for details. 8454 */ 8455 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { 8456 struct sched_entity *se; 8457 8458 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { 8459 update_tg_load_avg(cfs_rq); 8460 8461 if (cfs_rq->nr_running == 0) 8462 update_idle_cfs_rq_clock_pelt(cfs_rq); 8463 8464 if (cfs_rq == &rq->cfs) 8465 decayed = true; 8466 } 8467 8468 /* Propagate pending load changes to the parent, if any: */ 8469 se = cfs_rq->tg->se[cpu]; 8470 if (se && !skip_blocked_update(se)) 8471 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 8472 8473 /* 8474 * There can be a lot of idle CPU cgroups. Don't let fully 8475 * decayed cfs_rqs linger on the list. 8476 */ 8477 if (cfs_rq_is_decayed(cfs_rq)) 8478 list_del_leaf_cfs_rq(cfs_rq); 8479 8480 /* Don't need periodic decay once load/util_avg are null */ 8481 if (cfs_rq_has_blocked(cfs_rq)) 8482 *done = false; 8483 } 8484 8485 return decayed; 8486 } 8487 8488 /* 8489 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 8490 * This needs to be done in a top-down fashion because the load of a child 8491 * group is a fraction of its parents load. 8492 */ 8493 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 8494 { 8495 struct rq *rq = rq_of(cfs_rq); 8496 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 8497 unsigned long now = jiffies; 8498 unsigned long load; 8499 8500 if (cfs_rq->last_h_load_update == now) 8501 return; 8502 8503 WRITE_ONCE(cfs_rq->h_load_next, NULL); 8504 for_each_sched_entity(se) { 8505 cfs_rq = cfs_rq_of(se); 8506 WRITE_ONCE(cfs_rq->h_load_next, se); 8507 if (cfs_rq->last_h_load_update == now) 8508 break; 8509 } 8510 8511 if (!se) { 8512 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 8513 cfs_rq->last_h_load_update = now; 8514 } 8515 8516 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { 8517 load = cfs_rq->h_load; 8518 load = div64_ul(load * se->avg.load_avg, 8519 cfs_rq_load_avg(cfs_rq) + 1); 8520 cfs_rq = group_cfs_rq(se); 8521 cfs_rq->h_load = load; 8522 cfs_rq->last_h_load_update = now; 8523 } 8524 } 8525 8526 static unsigned long task_h_load(struct task_struct *p) 8527 { 8528 struct cfs_rq *cfs_rq = task_cfs_rq(p); 8529 8530 update_cfs_rq_h_load(cfs_rq); 8531 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 8532 cfs_rq_load_avg(cfs_rq) + 1); 8533 } 8534 #else 8535 static bool __update_blocked_fair(struct rq *rq, bool *done) 8536 { 8537 struct cfs_rq *cfs_rq = &rq->cfs; 8538 bool decayed; 8539 8540 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); 8541 if (cfs_rq_has_blocked(cfs_rq)) 8542 *done = false; 8543 8544 return decayed; 8545 } 8546 8547 static unsigned long task_h_load(struct task_struct *p) 8548 { 8549 return p->se.avg.load_avg; 8550 } 8551 #endif 8552 8553 static void update_blocked_averages(int cpu) 8554 { 8555 bool decayed = false, done = true; 8556 struct rq *rq = cpu_rq(cpu); 8557 struct rq_flags rf; 8558 8559 rq_lock_irqsave(rq, &rf); 8560 update_blocked_load_tick(rq); 8561 update_rq_clock(rq); 8562 8563 decayed |= __update_blocked_others(rq, &done); 8564 decayed |= __update_blocked_fair(rq, &done); 8565 8566 update_blocked_load_status(rq, !done); 8567 if (decayed) 8568 cpufreq_update_util(rq, 0); 8569 rq_unlock_irqrestore(rq, &rf); 8570 } 8571 8572 /********** Helpers for find_busiest_group ************************/ 8573 8574 /* 8575 * sg_lb_stats - stats of a sched_group required for load_balancing 8576 */ 8577 struct sg_lb_stats { 8578 unsigned long avg_load; /*Avg load across the CPUs of the group */ 8579 unsigned long group_load; /* Total load over the CPUs of the group */ 8580 unsigned long group_capacity; 8581 unsigned long group_util; /* Total utilization over the CPUs of the group */ 8582 unsigned long group_runnable; /* Total runnable time over the CPUs of the group */ 8583 unsigned int sum_nr_running; /* Nr of tasks running in the group */ 8584 unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */ 8585 unsigned int idle_cpus; 8586 unsigned int group_weight; 8587 enum group_type group_type; 8588 unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */ 8589 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ 8590 #ifdef CONFIG_NUMA_BALANCING 8591 unsigned int nr_numa_running; 8592 unsigned int nr_preferred_running; 8593 #endif 8594 }; 8595 8596 /* 8597 * sd_lb_stats - Structure to store the statistics of a sched_domain 8598 * during load balancing. 8599 */ 8600 struct sd_lb_stats { 8601 struct sched_group *busiest; /* Busiest group in this sd */ 8602 struct sched_group *local; /* Local group in this sd */ 8603 unsigned long total_load; /* Total load of all groups in sd */ 8604 unsigned long total_capacity; /* Total capacity of all groups in sd */ 8605 unsigned long avg_load; /* Average load across all groups in sd */ 8606 unsigned int prefer_sibling; /* tasks should go to sibling first */ 8607 8608 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 8609 struct sg_lb_stats local_stat; /* Statistics of the local group */ 8610 }; 8611 8612 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 8613 { 8614 /* 8615 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 8616 * local_stat because update_sg_lb_stats() does a full clear/assignment. 8617 * We must however set busiest_stat::group_type and 8618 * busiest_stat::idle_cpus to the worst busiest group because 8619 * update_sd_pick_busiest() reads these before assignment. 8620 */ 8621 *sds = (struct sd_lb_stats){ 8622 .busiest = NULL, 8623 .local = NULL, 8624 .total_load = 0UL, 8625 .total_capacity = 0UL, 8626 .busiest_stat = { 8627 .idle_cpus = UINT_MAX, 8628 .group_type = group_has_spare, 8629 }, 8630 }; 8631 } 8632 8633 static unsigned long scale_rt_capacity(int cpu) 8634 { 8635 struct rq *rq = cpu_rq(cpu); 8636 unsigned long max = arch_scale_cpu_capacity(cpu); 8637 unsigned long used, free; 8638 unsigned long irq; 8639 8640 irq = cpu_util_irq(rq); 8641 8642 if (unlikely(irq >= max)) 8643 return 1; 8644 8645 /* 8646 * avg_rt.util_avg and avg_dl.util_avg track binary signals 8647 * (running and not running) with weights 0 and 1024 respectively. 8648 * avg_thermal.load_avg tracks thermal pressure and the weighted 8649 * average uses the actual delta max capacity(load). 8650 */ 8651 used = READ_ONCE(rq->avg_rt.util_avg); 8652 used += READ_ONCE(rq->avg_dl.util_avg); 8653 used += thermal_load_avg(rq); 8654 8655 if (unlikely(used >= max)) 8656 return 1; 8657 8658 free = max - used; 8659 8660 return scale_irq_capacity(free, irq, max); 8661 } 8662 8663 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 8664 { 8665 unsigned long capacity = scale_rt_capacity(cpu); 8666 struct sched_group *sdg = sd->groups; 8667 8668 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); 8669 8670 if (!capacity) 8671 capacity = 1; 8672 8673 cpu_rq(cpu)->cpu_capacity = capacity; 8674 trace_sched_cpu_capacity_tp(cpu_rq(cpu)); 8675 8676 sdg->sgc->capacity = capacity; 8677 sdg->sgc->min_capacity = capacity; 8678 sdg->sgc->max_capacity = capacity; 8679 } 8680 8681 void update_group_capacity(struct sched_domain *sd, int cpu) 8682 { 8683 struct sched_domain *child = sd->child; 8684 struct sched_group *group, *sdg = sd->groups; 8685 unsigned long capacity, min_capacity, max_capacity; 8686 unsigned long interval; 8687 8688 interval = msecs_to_jiffies(sd->balance_interval); 8689 interval = clamp(interval, 1UL, max_load_balance_interval); 8690 sdg->sgc->next_update = jiffies + interval; 8691 8692 if (!child) { 8693 update_cpu_capacity(sd, cpu); 8694 return; 8695 } 8696 8697 capacity = 0; 8698 min_capacity = ULONG_MAX; 8699 max_capacity = 0; 8700 8701 if (child->flags & SD_OVERLAP) { 8702 /* 8703 * SD_OVERLAP domains cannot assume that child groups 8704 * span the current group. 8705 */ 8706 8707 for_each_cpu(cpu, sched_group_span(sdg)) { 8708 unsigned long cpu_cap = capacity_of(cpu); 8709 8710 capacity += cpu_cap; 8711 min_capacity = min(cpu_cap, min_capacity); 8712 max_capacity = max(cpu_cap, max_capacity); 8713 } 8714 } else { 8715 /* 8716 * !SD_OVERLAP domains can assume that child groups 8717 * span the current group. 8718 */ 8719 8720 group = child->groups; 8721 do { 8722 struct sched_group_capacity *sgc = group->sgc; 8723 8724 capacity += sgc->capacity; 8725 min_capacity = min(sgc->min_capacity, min_capacity); 8726 max_capacity = max(sgc->max_capacity, max_capacity); 8727 group = group->next; 8728 } while (group != child->groups); 8729 } 8730 8731 sdg->sgc->capacity = capacity; 8732 sdg->sgc->min_capacity = min_capacity; 8733 sdg->sgc->max_capacity = max_capacity; 8734 } 8735 8736 /* 8737 * Check whether the capacity of the rq has been noticeably reduced by side 8738 * activity. The imbalance_pct is used for the threshold. 8739 * Return true is the capacity is reduced 8740 */ 8741 static inline int 8742 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 8743 { 8744 return ((rq->cpu_capacity * sd->imbalance_pct) < 8745 (rq->cpu_capacity_orig * 100)); 8746 } 8747 8748 /* 8749 * Check whether a rq has a misfit task and if it looks like we can actually 8750 * help that task: we can migrate the task to a CPU of higher capacity, or 8751 * the task's current CPU is heavily pressured. 8752 */ 8753 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) 8754 { 8755 return rq->misfit_task_load && 8756 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || 8757 check_cpu_capacity(rq, sd)); 8758 } 8759 8760 /* 8761 * Group imbalance indicates (and tries to solve) the problem where balancing 8762 * groups is inadequate due to ->cpus_ptr constraints. 8763 * 8764 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 8765 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. 8766 * Something like: 8767 * 8768 * { 0 1 2 3 } { 4 5 6 7 } 8769 * * * * * 8770 * 8771 * If we were to balance group-wise we'd place two tasks in the first group and 8772 * two tasks in the second group. Clearly this is undesired as it will overload 8773 * cpu 3 and leave one of the CPUs in the second group unused. 8774 * 8775 * The current solution to this issue is detecting the skew in the first group 8776 * by noticing the lower domain failed to reach balance and had difficulty 8777 * moving tasks due to affinity constraints. 8778 * 8779 * When this is so detected; this group becomes a candidate for busiest; see 8780 * update_sd_pick_busiest(). And calculate_imbalance() and 8781 * find_busiest_group() avoid some of the usual balance conditions to allow it 8782 * to create an effective group imbalance. 8783 * 8784 * This is a somewhat tricky proposition since the next run might not find the 8785 * group imbalance and decide the groups need to be balanced again. A most 8786 * subtle and fragile situation. 8787 */ 8788 8789 static inline int sg_imbalanced(struct sched_group *group) 8790 { 8791 return group->sgc->imbalance; 8792 } 8793 8794 /* 8795 * group_has_capacity returns true if the group has spare capacity that could 8796 * be used by some tasks. 8797 * We consider that a group has spare capacity if the number of task is 8798 * smaller than the number of CPUs or if the utilization is lower than the 8799 * available capacity for CFS tasks. 8800 * For the latter, we use a threshold to stabilize the state, to take into 8801 * account the variance of the tasks' load and to return true if the available 8802 * capacity in meaningful for the load balancer. 8803 * As an example, an available capacity of 1% can appear but it doesn't make 8804 * any benefit for the load balance. 8805 */ 8806 static inline bool 8807 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 8808 { 8809 if (sgs->sum_nr_running < sgs->group_weight) 8810 return true; 8811 8812 if ((sgs->group_capacity * imbalance_pct) < 8813 (sgs->group_runnable * 100)) 8814 return false; 8815 8816 if ((sgs->group_capacity * 100) > 8817 (sgs->group_util * imbalance_pct)) 8818 return true; 8819 8820 return false; 8821 } 8822 8823 /* 8824 * group_is_overloaded returns true if the group has more tasks than it can 8825 * handle. 8826 * group_is_overloaded is not equals to !group_has_capacity because a group 8827 * with the exact right number of tasks, has no more spare capacity but is not 8828 * overloaded so both group_has_capacity and group_is_overloaded return 8829 * false. 8830 */ 8831 static inline bool 8832 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 8833 { 8834 if (sgs->sum_nr_running <= sgs->group_weight) 8835 return false; 8836 8837 if ((sgs->group_capacity * 100) < 8838 (sgs->group_util * imbalance_pct)) 8839 return true; 8840 8841 if ((sgs->group_capacity * imbalance_pct) < 8842 (sgs->group_runnable * 100)) 8843 return true; 8844 8845 return false; 8846 } 8847 8848 static inline enum 8849 group_type group_classify(unsigned int imbalance_pct, 8850 struct sched_group *group, 8851 struct sg_lb_stats *sgs) 8852 { 8853 if (group_is_overloaded(imbalance_pct, sgs)) 8854 return group_overloaded; 8855 8856 if (sg_imbalanced(group)) 8857 return group_imbalanced; 8858 8859 if (sgs->group_asym_packing) 8860 return group_asym_packing; 8861 8862 if (sgs->group_misfit_task_load) 8863 return group_misfit_task; 8864 8865 if (!group_has_capacity(imbalance_pct, sgs)) 8866 return group_fully_busy; 8867 8868 return group_has_spare; 8869 } 8870 8871 /** 8872 * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks 8873 * @dst_cpu: Destination CPU of the load balancing 8874 * @sds: Load-balancing data with statistics of the local group 8875 * @sgs: Load-balancing statistics of the candidate busiest group 8876 * @sg: The candidate busiest group 8877 * 8878 * Check the state of the SMT siblings of both @sds::local and @sg and decide 8879 * if @dst_cpu can pull tasks. 8880 * 8881 * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of 8882 * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks 8883 * only if @dst_cpu has higher priority. 8884 * 8885 * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more 8886 * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority. 8887 * Bigger imbalances in the number of busy CPUs will be dealt with in 8888 * update_sd_pick_busiest(). 8889 * 8890 * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings 8891 * of @dst_cpu are idle and @sg has lower priority. 8892 * 8893 * Return: true if @dst_cpu can pull tasks, false otherwise. 8894 */ 8895 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, 8896 struct sg_lb_stats *sgs, 8897 struct sched_group *sg) 8898 { 8899 #ifdef CONFIG_SCHED_SMT 8900 bool local_is_smt, sg_is_smt; 8901 int sg_busy_cpus; 8902 8903 local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; 8904 sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY; 8905 8906 sg_busy_cpus = sgs->group_weight - sgs->idle_cpus; 8907 8908 if (!local_is_smt) { 8909 /* 8910 * If we are here, @dst_cpu is idle and does not have SMT 8911 * siblings. Pull tasks if candidate group has two or more 8912 * busy CPUs. 8913 */ 8914 if (sg_busy_cpus >= 2) /* implies sg_is_smt */ 8915 return true; 8916 8917 /* 8918 * @dst_cpu does not have SMT siblings. @sg may have SMT 8919 * siblings and only one is busy. In such case, @dst_cpu 8920 * can help if it has higher priority and is idle (i.e., 8921 * it has no running tasks). 8922 */ 8923 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8924 } 8925 8926 /* @dst_cpu has SMT siblings. */ 8927 8928 if (sg_is_smt) { 8929 int local_busy_cpus = sds->local->group_weight - 8930 sds->local_stat.idle_cpus; 8931 int busy_cpus_delta = sg_busy_cpus - local_busy_cpus; 8932 8933 if (busy_cpus_delta == 1) 8934 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8935 8936 return false; 8937 } 8938 8939 /* 8940 * @sg does not have SMT siblings. Ensure that @sds::local does not end 8941 * up with more than one busy SMT sibling and only pull tasks if there 8942 * are not busy CPUs (i.e., no CPU has running tasks). 8943 */ 8944 if (!sds->local_stat.sum_nr_running) 8945 return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu); 8946 8947 return false; 8948 #else 8949 /* Always return false so that callers deal with non-SMT cases. */ 8950 return false; 8951 #endif 8952 } 8953 8954 static inline bool 8955 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, 8956 struct sched_group *group) 8957 { 8958 /* Only do SMT checks if either local or candidate have SMT siblings */ 8959 if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || 8960 (group->flags & SD_SHARE_CPUCAPACITY)) 8961 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); 8962 8963 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); 8964 } 8965 8966 static inline bool 8967 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) 8968 { 8969 /* 8970 * When there is more than 1 task, the group_overloaded case already 8971 * takes care of cpu with reduced capacity 8972 */ 8973 if (rq->cfs.h_nr_running != 1) 8974 return false; 8975 8976 return check_cpu_capacity(rq, sd); 8977 } 8978 8979 /** 8980 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 8981 * @env: The load balancing environment. 8982 * @sds: Load-balancing data with statistics of the local group. 8983 * @group: sched_group whose statistics are to be updated. 8984 * @sgs: variable to hold the statistics for this group. 8985 * @sg_status: Holds flag indicating the status of the sched_group 8986 */ 8987 static inline void update_sg_lb_stats(struct lb_env *env, 8988 struct sd_lb_stats *sds, 8989 struct sched_group *group, 8990 struct sg_lb_stats *sgs, 8991 int *sg_status) 8992 { 8993 int i, nr_running, local_group; 8994 8995 memset(sgs, 0, sizeof(*sgs)); 8996 8997 local_group = group == sds->local; 8998 8999 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 9000 struct rq *rq = cpu_rq(i); 9001 unsigned long load = cpu_load(rq); 9002 9003 sgs->group_load += load; 9004 sgs->group_util += cpu_util_cfs(i); 9005 sgs->group_runnable += cpu_runnable(rq); 9006 sgs->sum_h_nr_running += rq->cfs.h_nr_running; 9007 9008 nr_running = rq->nr_running; 9009 sgs->sum_nr_running += nr_running; 9010 9011 if (nr_running > 1) 9012 *sg_status |= SG_OVERLOAD; 9013 9014 if (cpu_overutilized(i)) 9015 *sg_status |= SG_OVERUTILIZED; 9016 9017 #ifdef CONFIG_NUMA_BALANCING 9018 sgs->nr_numa_running += rq->nr_numa_running; 9019 sgs->nr_preferred_running += rq->nr_preferred_running; 9020 #endif 9021 /* 9022 * No need to call idle_cpu() if nr_running is not 0 9023 */ 9024 if (!nr_running && idle_cpu(i)) { 9025 sgs->idle_cpus++; 9026 /* Idle cpu can't have misfit task */ 9027 continue; 9028 } 9029 9030 if (local_group) 9031 continue; 9032 9033 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { 9034 /* Check for a misfit task on the cpu */ 9035 if (sgs->group_misfit_task_load < rq->misfit_task_load) { 9036 sgs->group_misfit_task_load = rq->misfit_task_load; 9037 *sg_status |= SG_OVERLOAD; 9038 } 9039 } else if ((env->idle != CPU_NOT_IDLE) && 9040 sched_reduced_capacity(rq, env->sd)) { 9041 /* Check for a task running on a CPU with reduced capacity */ 9042 if (sgs->group_misfit_task_load < load) 9043 sgs->group_misfit_task_load = load; 9044 } 9045 } 9046 9047 sgs->group_capacity = group->sgc->capacity; 9048 9049 sgs->group_weight = group->group_weight; 9050 9051 /* Check if dst CPU is idle and preferred to this group */ 9052 if (!local_group && env->sd->flags & SD_ASYM_PACKING && 9053 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && 9054 sched_asym(env, sds, sgs, group)) { 9055 sgs->group_asym_packing = 1; 9056 } 9057 9058 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); 9059 9060 /* Computing avg_load makes sense only when group is overloaded */ 9061 if (sgs->group_type == group_overloaded) 9062 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 9063 sgs->group_capacity; 9064 } 9065 9066 /** 9067 * update_sd_pick_busiest - return 1 on busiest group 9068 * @env: The load balancing environment. 9069 * @sds: sched_domain statistics 9070 * @sg: sched_group candidate to be checked for being the busiest 9071 * @sgs: sched_group statistics 9072 * 9073 * Determine if @sg is a busier group than the previously selected 9074 * busiest group. 9075 * 9076 * Return: %true if @sg is a busier group than the previously selected 9077 * busiest group. %false otherwise. 9078 */ 9079 static bool update_sd_pick_busiest(struct lb_env *env, 9080 struct sd_lb_stats *sds, 9081 struct sched_group *sg, 9082 struct sg_lb_stats *sgs) 9083 { 9084 struct sg_lb_stats *busiest = &sds->busiest_stat; 9085 9086 /* Make sure that there is at least one task to pull */ 9087 if (!sgs->sum_h_nr_running) 9088 return false; 9089 9090 /* 9091 * Don't try to pull misfit tasks we can't help. 9092 * We can use max_capacity here as reduction in capacity on some 9093 * CPUs in the group should either be possible to resolve 9094 * internally or be covered by avg_load imbalance (eventually). 9095 */ 9096 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && 9097 (sgs->group_type == group_misfit_task) && 9098 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || 9099 sds->local_stat.group_type != group_has_spare)) 9100 return false; 9101 9102 if (sgs->group_type > busiest->group_type) 9103 return true; 9104 9105 if (sgs->group_type < busiest->group_type) 9106 return false; 9107 9108 /* 9109 * The candidate and the current busiest group are the same type of 9110 * group. Let check which one is the busiest according to the type. 9111 */ 9112 9113 switch (sgs->group_type) { 9114 case group_overloaded: 9115 /* Select the overloaded group with highest avg_load. */ 9116 if (sgs->avg_load <= busiest->avg_load) 9117 return false; 9118 break; 9119 9120 case group_imbalanced: 9121 /* 9122 * Select the 1st imbalanced group as we don't have any way to 9123 * choose one more than another. 9124 */ 9125 return false; 9126 9127 case group_asym_packing: 9128 /* Prefer to move from lowest priority CPU's work */ 9129 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) 9130 return false; 9131 break; 9132 9133 case group_misfit_task: 9134 /* 9135 * If we have more than one misfit sg go with the biggest 9136 * misfit. 9137 */ 9138 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) 9139 return false; 9140 break; 9141 9142 case group_fully_busy: 9143 /* 9144 * Select the fully busy group with highest avg_load. In 9145 * theory, there is no need to pull task from such kind of 9146 * group because tasks have all compute capacity that they need 9147 * but we can still improve the overall throughput by reducing 9148 * contention when accessing shared HW resources. 9149 * 9150 * XXX for now avg_load is not computed and always 0 so we 9151 * select the 1st one. 9152 */ 9153 if (sgs->avg_load <= busiest->avg_load) 9154 return false; 9155 break; 9156 9157 case group_has_spare: 9158 /* 9159 * Select not overloaded group with lowest number of idle cpus 9160 * and highest number of running tasks. We could also compare 9161 * the spare capacity which is more stable but it can end up 9162 * that the group has less spare capacity but finally more idle 9163 * CPUs which means less opportunity to pull tasks. 9164 */ 9165 if (sgs->idle_cpus > busiest->idle_cpus) 9166 return false; 9167 else if ((sgs->idle_cpus == busiest->idle_cpus) && 9168 (sgs->sum_nr_running <= busiest->sum_nr_running)) 9169 return false; 9170 9171 break; 9172 } 9173 9174 /* 9175 * Candidate sg has no more than one task per CPU and has higher 9176 * per-CPU capacity. Migrating tasks to less capable CPUs may harm 9177 * throughput. Maximize throughput, power/energy consequences are not 9178 * considered. 9179 */ 9180 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && 9181 (sgs->group_type <= group_fully_busy) && 9182 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) 9183 return false; 9184 9185 return true; 9186 } 9187 9188 #ifdef CONFIG_NUMA_BALANCING 9189 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 9190 { 9191 if (sgs->sum_h_nr_running > sgs->nr_numa_running) 9192 return regular; 9193 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) 9194 return remote; 9195 return all; 9196 } 9197 9198 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 9199 { 9200 if (rq->nr_running > rq->nr_numa_running) 9201 return regular; 9202 if (rq->nr_running > rq->nr_preferred_running) 9203 return remote; 9204 return all; 9205 } 9206 #else 9207 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 9208 { 9209 return all; 9210 } 9211 9212 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 9213 { 9214 return regular; 9215 } 9216 #endif /* CONFIG_NUMA_BALANCING */ 9217 9218 9219 struct sg_lb_stats; 9220 9221 /* 9222 * task_running_on_cpu - return 1 if @p is running on @cpu. 9223 */ 9224 9225 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) 9226 { 9227 /* Task has no contribution or is new */ 9228 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 9229 return 0; 9230 9231 if (task_on_rq_queued(p)) 9232 return 1; 9233 9234 return 0; 9235 } 9236 9237 /** 9238 * idle_cpu_without - would a given CPU be idle without p ? 9239 * @cpu: the processor on which idleness is tested. 9240 * @p: task which should be ignored. 9241 * 9242 * Return: 1 if the CPU would be idle. 0 otherwise. 9243 */ 9244 static int idle_cpu_without(int cpu, struct task_struct *p) 9245 { 9246 struct rq *rq = cpu_rq(cpu); 9247 9248 if (rq->curr != rq->idle && rq->curr != p) 9249 return 0; 9250 9251 /* 9252 * rq->nr_running can't be used but an updated version without the 9253 * impact of p on cpu must be used instead. The updated nr_running 9254 * be computed and tested before calling idle_cpu_without(). 9255 */ 9256 9257 #ifdef CONFIG_SMP 9258 if (rq->ttwu_pending) 9259 return 0; 9260 #endif 9261 9262 return 1; 9263 } 9264 9265 /* 9266 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup. 9267 * @sd: The sched_domain level to look for idlest group. 9268 * @group: sched_group whose statistics are to be updated. 9269 * @sgs: variable to hold the statistics for this group. 9270 * @p: The task for which we look for the idlest group/CPU. 9271 */ 9272 static inline void update_sg_wakeup_stats(struct sched_domain *sd, 9273 struct sched_group *group, 9274 struct sg_lb_stats *sgs, 9275 struct task_struct *p) 9276 { 9277 int i, nr_running; 9278 9279 memset(sgs, 0, sizeof(*sgs)); 9280 9281 for_each_cpu(i, sched_group_span(group)) { 9282 struct rq *rq = cpu_rq(i); 9283 unsigned int local; 9284 9285 sgs->group_load += cpu_load_without(rq, p); 9286 sgs->group_util += cpu_util_without(i, p); 9287 sgs->group_runnable += cpu_runnable_without(rq, p); 9288 local = task_running_on_cpu(i, p); 9289 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; 9290 9291 nr_running = rq->nr_running - local; 9292 sgs->sum_nr_running += nr_running; 9293 9294 /* 9295 * No need to call idle_cpu_without() if nr_running is not 0 9296 */ 9297 if (!nr_running && idle_cpu_without(i, p)) 9298 sgs->idle_cpus++; 9299 9300 } 9301 9302 /* Check if task fits in the group */ 9303 if (sd->flags & SD_ASYM_CPUCAPACITY && 9304 !task_fits_capacity(p, group->sgc->max_capacity)) { 9305 sgs->group_misfit_task_load = 1; 9306 } 9307 9308 sgs->group_capacity = group->sgc->capacity; 9309 9310 sgs->group_weight = group->group_weight; 9311 9312 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); 9313 9314 /* 9315 * Computing avg_load makes sense only when group is fully busy or 9316 * overloaded 9317 */ 9318 if (sgs->group_type == group_fully_busy || 9319 sgs->group_type == group_overloaded) 9320 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 9321 sgs->group_capacity; 9322 } 9323 9324 static bool update_pick_idlest(struct sched_group *idlest, 9325 struct sg_lb_stats *idlest_sgs, 9326 struct sched_group *group, 9327 struct sg_lb_stats *sgs) 9328 { 9329 if (sgs->group_type < idlest_sgs->group_type) 9330 return true; 9331 9332 if (sgs->group_type > idlest_sgs->group_type) 9333 return false; 9334 9335 /* 9336 * The candidate and the current idlest group are the same type of 9337 * group. Let check which one is the idlest according to the type. 9338 */ 9339 9340 switch (sgs->group_type) { 9341 case group_overloaded: 9342 case group_fully_busy: 9343 /* Select the group with lowest avg_load. */ 9344 if (idlest_sgs->avg_load <= sgs->avg_load) 9345 return false; 9346 break; 9347 9348 case group_imbalanced: 9349 case group_asym_packing: 9350 /* Those types are not used in the slow wakeup path */ 9351 return false; 9352 9353 case group_misfit_task: 9354 /* Select group with the highest max capacity */ 9355 if (idlest->sgc->max_capacity >= group->sgc->max_capacity) 9356 return false; 9357 break; 9358 9359 case group_has_spare: 9360 /* Select group with most idle CPUs */ 9361 if (idlest_sgs->idle_cpus > sgs->idle_cpus) 9362 return false; 9363 9364 /* Select group with lowest group_util */ 9365 if (idlest_sgs->idle_cpus == sgs->idle_cpus && 9366 idlest_sgs->group_util <= sgs->group_util) 9367 return false; 9368 9369 break; 9370 } 9371 9372 return true; 9373 } 9374 9375 /* 9376 * find_idlest_group() finds and returns the least busy CPU group within the 9377 * domain. 9378 * 9379 * Assumes p is allowed on at least one CPU in sd. 9380 */ 9381 static struct sched_group * 9382 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) 9383 { 9384 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; 9385 struct sg_lb_stats local_sgs, tmp_sgs; 9386 struct sg_lb_stats *sgs; 9387 unsigned long imbalance; 9388 struct sg_lb_stats idlest_sgs = { 9389 .avg_load = UINT_MAX, 9390 .group_type = group_overloaded, 9391 }; 9392 9393 do { 9394 int local_group; 9395 9396 /* Skip over this group if it has no CPUs allowed */ 9397 if (!cpumask_intersects(sched_group_span(group), 9398 p->cpus_ptr)) 9399 continue; 9400 9401 /* Skip over this group if no cookie matched */ 9402 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) 9403 continue; 9404 9405 local_group = cpumask_test_cpu(this_cpu, 9406 sched_group_span(group)); 9407 9408 if (local_group) { 9409 sgs = &local_sgs; 9410 local = group; 9411 } else { 9412 sgs = &tmp_sgs; 9413 } 9414 9415 update_sg_wakeup_stats(sd, group, sgs, p); 9416 9417 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { 9418 idlest = group; 9419 idlest_sgs = *sgs; 9420 } 9421 9422 } while (group = group->next, group != sd->groups); 9423 9424 9425 /* There is no idlest group to push tasks to */ 9426 if (!idlest) 9427 return NULL; 9428 9429 /* The local group has been skipped because of CPU affinity */ 9430 if (!local) 9431 return idlest; 9432 9433 /* 9434 * If the local group is idler than the selected idlest group 9435 * don't try and push the task. 9436 */ 9437 if (local_sgs.group_type < idlest_sgs.group_type) 9438 return NULL; 9439 9440 /* 9441 * If the local group is busier than the selected idlest group 9442 * try and push the task. 9443 */ 9444 if (local_sgs.group_type > idlest_sgs.group_type) 9445 return idlest; 9446 9447 switch (local_sgs.group_type) { 9448 case group_overloaded: 9449 case group_fully_busy: 9450 9451 /* Calculate allowed imbalance based on load */ 9452 imbalance = scale_load_down(NICE_0_LOAD) * 9453 (sd->imbalance_pct-100) / 100; 9454 9455 /* 9456 * When comparing groups across NUMA domains, it's possible for 9457 * the local domain to be very lightly loaded relative to the 9458 * remote domains but "imbalance" skews the comparison making 9459 * remote CPUs look much more favourable. When considering 9460 * cross-domain, add imbalance to the load on the remote node 9461 * and consider staying local. 9462 */ 9463 9464 if ((sd->flags & SD_NUMA) && 9465 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) 9466 return NULL; 9467 9468 /* 9469 * If the local group is less loaded than the selected 9470 * idlest group don't try and push any tasks. 9471 */ 9472 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance)) 9473 return NULL; 9474 9475 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) 9476 return NULL; 9477 break; 9478 9479 case group_imbalanced: 9480 case group_asym_packing: 9481 /* Those type are not used in the slow wakeup path */ 9482 return NULL; 9483 9484 case group_misfit_task: 9485 /* Select group with the highest max capacity */ 9486 if (local->sgc->max_capacity >= idlest->sgc->max_capacity) 9487 return NULL; 9488 break; 9489 9490 case group_has_spare: 9491 #ifdef CONFIG_NUMA 9492 if (sd->flags & SD_NUMA) { 9493 int imb_numa_nr = sd->imb_numa_nr; 9494 #ifdef CONFIG_NUMA_BALANCING 9495 int idlest_cpu; 9496 /* 9497 * If there is spare capacity at NUMA, try to select 9498 * the preferred node 9499 */ 9500 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) 9501 return NULL; 9502 9503 idlest_cpu = cpumask_first(sched_group_span(idlest)); 9504 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) 9505 return idlest; 9506 #endif /* CONFIG_NUMA_BALANCING */ 9507 /* 9508 * Otherwise, keep the task close to the wakeup source 9509 * and improve locality if the number of running tasks 9510 * would remain below threshold where an imbalance is 9511 * allowed while accounting for the possibility the 9512 * task is pinned to a subset of CPUs. If there is a 9513 * real need of migration, periodic load balance will 9514 * take care of it. 9515 */ 9516 if (p->nr_cpus_allowed != NR_CPUS) { 9517 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask); 9518 9519 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr); 9520 imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr); 9521 } 9522 9523 imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus); 9524 if (!adjust_numa_imbalance(imbalance, 9525 local_sgs.sum_nr_running + 1, 9526 imb_numa_nr)) { 9527 return NULL; 9528 } 9529 } 9530 #endif /* CONFIG_NUMA */ 9531 9532 /* 9533 * Select group with highest number of idle CPUs. We could also 9534 * compare the utilization which is more stable but it can end 9535 * up that the group has less spare capacity but finally more 9536 * idle CPUs which means more opportunity to run task. 9537 */ 9538 if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus) 9539 return NULL; 9540 break; 9541 } 9542 9543 return idlest; 9544 } 9545 9546 static void update_idle_cpu_scan(struct lb_env *env, 9547 unsigned long sum_util) 9548 { 9549 struct sched_domain_shared *sd_share; 9550 int llc_weight, pct; 9551 u64 x, y, tmp; 9552 /* 9553 * Update the number of CPUs to scan in LLC domain, which could 9554 * be used as a hint in select_idle_cpu(). The update of sd_share 9555 * could be expensive because it is within a shared cache line. 9556 * So the write of this hint only occurs during periodic load 9557 * balancing, rather than CPU_NEWLY_IDLE, because the latter 9558 * can fire way more frequently than the former. 9559 */ 9560 if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE) 9561 return; 9562 9563 llc_weight = per_cpu(sd_llc_size, env->dst_cpu); 9564 if (env->sd->span_weight != llc_weight) 9565 return; 9566 9567 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu)); 9568 if (!sd_share) 9569 return; 9570 9571 /* 9572 * The number of CPUs to search drops as sum_util increases, when 9573 * sum_util hits 85% or above, the scan stops. 9574 * The reason to choose 85% as the threshold is because this is the 9575 * imbalance_pct(117) when a LLC sched group is overloaded. 9576 * 9577 * let y = SCHED_CAPACITY_SCALE - p * x^2 [1] 9578 * and y'= y / SCHED_CAPACITY_SCALE 9579 * 9580 * x is the ratio of sum_util compared to the CPU capacity: 9581 * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE) 9582 * y' is the ratio of CPUs to be scanned in the LLC domain, 9583 * and the number of CPUs to scan is calculated by: 9584 * 9585 * nr_scan = llc_weight * y' [2] 9586 * 9587 * When x hits the threshold of overloaded, AKA, when 9588 * x = 100 / pct, y drops to 0. According to [1], 9589 * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000 9590 * 9591 * Scale x by SCHED_CAPACITY_SCALE: 9592 * x' = sum_util / llc_weight; [3] 9593 * 9594 * and finally [1] becomes: 9595 * y = SCHED_CAPACITY_SCALE - 9596 * x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE) [4] 9597 * 9598 */ 9599 /* equation [3] */ 9600 x = sum_util; 9601 do_div(x, llc_weight); 9602 9603 /* equation [4] */ 9604 pct = env->sd->imbalance_pct; 9605 tmp = x * x * pct * pct; 9606 do_div(tmp, 10000 * SCHED_CAPACITY_SCALE); 9607 tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE); 9608 y = SCHED_CAPACITY_SCALE - tmp; 9609 9610 /* equation [2] */ 9611 y *= llc_weight; 9612 do_div(y, SCHED_CAPACITY_SCALE); 9613 if ((int)y != sd_share->nr_idle_scan) 9614 WRITE_ONCE(sd_share->nr_idle_scan, (int)y); 9615 } 9616 9617 /** 9618 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 9619 * @env: The load balancing environment. 9620 * @sds: variable to hold the statistics for this sched_domain. 9621 */ 9622 9623 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 9624 { 9625 struct sched_domain *child = env->sd->child; 9626 struct sched_group *sg = env->sd->groups; 9627 struct sg_lb_stats *local = &sds->local_stat; 9628 struct sg_lb_stats tmp_sgs; 9629 unsigned long sum_util = 0; 9630 int sg_status = 0; 9631 9632 do { 9633 struct sg_lb_stats *sgs = &tmp_sgs; 9634 int local_group; 9635 9636 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 9637 if (local_group) { 9638 sds->local = sg; 9639 sgs = local; 9640 9641 if (env->idle != CPU_NEWLY_IDLE || 9642 time_after_eq(jiffies, sg->sgc->next_update)) 9643 update_group_capacity(env->sd, env->dst_cpu); 9644 } 9645 9646 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); 9647 9648 if (local_group) 9649 goto next_group; 9650 9651 9652 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 9653 sds->busiest = sg; 9654 sds->busiest_stat = *sgs; 9655 } 9656 9657 next_group: 9658 /* Now, start updating sd_lb_stats */ 9659 sds->total_load += sgs->group_load; 9660 sds->total_capacity += sgs->group_capacity; 9661 9662 sum_util += sgs->group_util; 9663 sg = sg->next; 9664 } while (sg != env->sd->groups); 9665 9666 /* Tag domain that child domain prefers tasks go to siblings first */ 9667 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; 9668 9669 9670 if (env->sd->flags & SD_NUMA) 9671 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 9672 9673 if (!env->sd->parent) { 9674 struct root_domain *rd = env->dst_rq->rd; 9675 9676 /* update overload indicator if we are at root domain */ 9677 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); 9678 9679 /* Update over-utilization (tipping point, U >= 0) indicator */ 9680 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); 9681 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); 9682 } else if (sg_status & SG_OVERUTILIZED) { 9683 struct root_domain *rd = env->dst_rq->rd; 9684 9685 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); 9686 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); 9687 } 9688 9689 update_idle_cpu_scan(env, sum_util); 9690 } 9691 9692 /** 9693 * calculate_imbalance - Calculate the amount of imbalance present within the 9694 * groups of a given sched_domain during load balance. 9695 * @env: load balance environment 9696 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 9697 */ 9698 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 9699 { 9700 struct sg_lb_stats *local, *busiest; 9701 9702 local = &sds->local_stat; 9703 busiest = &sds->busiest_stat; 9704 9705 if (busiest->group_type == group_misfit_task) { 9706 if (env->sd->flags & SD_ASYM_CPUCAPACITY) { 9707 /* Set imbalance to allow misfit tasks to be balanced. */ 9708 env->migration_type = migrate_misfit; 9709 env->imbalance = 1; 9710 } else { 9711 /* 9712 * Set load imbalance to allow moving task from cpu 9713 * with reduced capacity. 9714 */ 9715 env->migration_type = migrate_load; 9716 env->imbalance = busiest->group_misfit_task_load; 9717 } 9718 return; 9719 } 9720 9721 if (busiest->group_type == group_asym_packing) { 9722 /* 9723 * In case of asym capacity, we will try to migrate all load to 9724 * the preferred CPU. 9725 */ 9726 env->migration_type = migrate_task; 9727 env->imbalance = busiest->sum_h_nr_running; 9728 return; 9729 } 9730 9731 if (busiest->group_type == group_imbalanced) { 9732 /* 9733 * In the group_imb case we cannot rely on group-wide averages 9734 * to ensure CPU-load equilibrium, try to move any task to fix 9735 * the imbalance. The next load balance will take care of 9736 * balancing back the system. 9737 */ 9738 env->migration_type = migrate_task; 9739 env->imbalance = 1; 9740 return; 9741 } 9742 9743 /* 9744 * Try to use spare capacity of local group without overloading it or 9745 * emptying busiest. 9746 */ 9747 if (local->group_type == group_has_spare) { 9748 if ((busiest->group_type > group_fully_busy) && 9749 !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { 9750 /* 9751 * If busiest is overloaded, try to fill spare 9752 * capacity. This might end up creating spare capacity 9753 * in busiest or busiest still being overloaded but 9754 * there is no simple way to directly compute the 9755 * amount of load to migrate in order to balance the 9756 * system. 9757 */ 9758 env->migration_type = migrate_util; 9759 env->imbalance = max(local->group_capacity, local->group_util) - 9760 local->group_util; 9761 9762 /* 9763 * In some cases, the group's utilization is max or even 9764 * higher than capacity because of migrations but the 9765 * local CPU is (newly) idle. There is at least one 9766 * waiting task in this overloaded busiest group. Let's 9767 * try to pull it. 9768 */ 9769 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { 9770 env->migration_type = migrate_task; 9771 env->imbalance = 1; 9772 } 9773 9774 return; 9775 } 9776 9777 if (busiest->group_weight == 1 || sds->prefer_sibling) { 9778 unsigned int nr_diff = busiest->sum_nr_running; 9779 /* 9780 * When prefer sibling, evenly spread running tasks on 9781 * groups. 9782 */ 9783 env->migration_type = migrate_task; 9784 lsub_positive(&nr_diff, local->sum_nr_running); 9785 env->imbalance = nr_diff; 9786 } else { 9787 9788 /* 9789 * If there is no overload, we just want to even the number of 9790 * idle cpus. 9791 */ 9792 env->migration_type = migrate_task; 9793 env->imbalance = max_t(long, 0, 9794 (local->idle_cpus - busiest->idle_cpus)); 9795 } 9796 9797 #ifdef CONFIG_NUMA 9798 /* Consider allowing a small imbalance between NUMA groups */ 9799 if (env->sd->flags & SD_NUMA) { 9800 env->imbalance = adjust_numa_imbalance(env->imbalance, 9801 local->sum_nr_running + 1, 9802 env->sd->imb_numa_nr); 9803 } 9804 #endif 9805 9806 /* Number of tasks to move to restore balance */ 9807 env->imbalance >>= 1; 9808 9809 return; 9810 } 9811 9812 /* 9813 * Local is fully busy but has to take more load to relieve the 9814 * busiest group 9815 */ 9816 if (local->group_type < group_overloaded) { 9817 /* 9818 * Local will become overloaded so the avg_load metrics are 9819 * finally needed. 9820 */ 9821 9822 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / 9823 local->group_capacity; 9824 9825 /* 9826 * If the local group is more loaded than the selected 9827 * busiest group don't try to pull any tasks. 9828 */ 9829 if (local->avg_load >= busiest->avg_load) { 9830 env->imbalance = 0; 9831 return; 9832 } 9833 9834 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / 9835 sds->total_capacity; 9836 } 9837 9838 /* 9839 * Both group are or will become overloaded and we're trying to get all 9840 * the CPUs to the average_load, so we don't want to push ourselves 9841 * above the average load, nor do we wish to reduce the max loaded CPU 9842 * below the average load. At the same time, we also don't want to 9843 * reduce the group load below the group capacity. Thus we look for 9844 * the minimum possible imbalance. 9845 */ 9846 env->migration_type = migrate_load; 9847 env->imbalance = min( 9848 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, 9849 (sds->avg_load - local->avg_load) * local->group_capacity 9850 ) / SCHED_CAPACITY_SCALE; 9851 } 9852 9853 /******* find_busiest_group() helpers end here *********************/ 9854 9855 /* 9856 * Decision matrix according to the local and busiest group type: 9857 * 9858 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded 9859 * has_spare nr_idle balanced N/A N/A balanced balanced 9860 * fully_busy nr_idle nr_idle N/A N/A balanced balanced 9861 * misfit_task force N/A N/A N/A N/A N/A 9862 * asym_packing force force N/A N/A force force 9863 * imbalanced force force N/A N/A force force 9864 * overloaded force force N/A N/A force avg_load 9865 * 9866 * N/A : Not Applicable because already filtered while updating 9867 * statistics. 9868 * balanced : The system is balanced for these 2 groups. 9869 * force : Calculate the imbalance as load migration is probably needed. 9870 * avg_load : Only if imbalance is significant enough. 9871 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite 9872 * different in groups. 9873 */ 9874 9875 /** 9876 * find_busiest_group - Returns the busiest group within the sched_domain 9877 * if there is an imbalance. 9878 * @env: The load balancing environment. 9879 * 9880 * Also calculates the amount of runnable load which should be moved 9881 * to restore balance. 9882 * 9883 * Return: - The busiest group if imbalance exists. 9884 */ 9885 static struct sched_group *find_busiest_group(struct lb_env *env) 9886 { 9887 struct sg_lb_stats *local, *busiest; 9888 struct sd_lb_stats sds; 9889 9890 init_sd_lb_stats(&sds); 9891 9892 /* 9893 * Compute the various statistics relevant for load balancing at 9894 * this level. 9895 */ 9896 update_sd_lb_stats(env, &sds); 9897 9898 if (sched_energy_enabled()) { 9899 struct root_domain *rd = env->dst_rq->rd; 9900 9901 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) 9902 goto out_balanced; 9903 } 9904 9905 local = &sds.local_stat; 9906 busiest = &sds.busiest_stat; 9907 9908 /* There is no busy sibling group to pull tasks from */ 9909 if (!sds.busiest) 9910 goto out_balanced; 9911 9912 /* Misfit tasks should be dealt with regardless of the avg load */ 9913 if (busiest->group_type == group_misfit_task) 9914 goto force_balance; 9915 9916 /* ASYM feature bypasses nice load balance check */ 9917 if (busiest->group_type == group_asym_packing) 9918 goto force_balance; 9919 9920 /* 9921 * If the busiest group is imbalanced the below checks don't 9922 * work because they assume all things are equal, which typically 9923 * isn't true due to cpus_ptr constraints and the like. 9924 */ 9925 if (busiest->group_type == group_imbalanced) 9926 goto force_balance; 9927 9928 /* 9929 * If the local group is busier than the selected busiest group 9930 * don't try and pull any tasks. 9931 */ 9932 if (local->group_type > busiest->group_type) 9933 goto out_balanced; 9934 9935 /* 9936 * When groups are overloaded, use the avg_load to ensure fairness 9937 * between tasks. 9938 */ 9939 if (local->group_type == group_overloaded) { 9940 /* 9941 * If the local group is more loaded than the selected 9942 * busiest group don't try to pull any tasks. 9943 */ 9944 if (local->avg_load >= busiest->avg_load) 9945 goto out_balanced; 9946 9947 /* XXX broken for overlapping NUMA groups */ 9948 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / 9949 sds.total_capacity; 9950 9951 /* 9952 * Don't pull any tasks if this group is already above the 9953 * domain average load. 9954 */ 9955 if (local->avg_load >= sds.avg_load) 9956 goto out_balanced; 9957 9958 /* 9959 * If the busiest group is more loaded, use imbalance_pct to be 9960 * conservative. 9961 */ 9962 if (100 * busiest->avg_load <= 9963 env->sd->imbalance_pct * local->avg_load) 9964 goto out_balanced; 9965 } 9966 9967 /* Try to move all excess tasks to child's sibling domain */ 9968 if (sds.prefer_sibling && local->group_type == group_has_spare && 9969 busiest->sum_nr_running > local->sum_nr_running + 1) 9970 goto force_balance; 9971 9972 if (busiest->group_type != group_overloaded) { 9973 if (env->idle == CPU_NOT_IDLE) 9974 /* 9975 * If the busiest group is not overloaded (and as a 9976 * result the local one too) but this CPU is already 9977 * busy, let another idle CPU try to pull task. 9978 */ 9979 goto out_balanced; 9980 9981 if (busiest->group_weight > 1 && 9982 local->idle_cpus <= (busiest->idle_cpus + 1)) 9983 /* 9984 * If the busiest group is not overloaded 9985 * and there is no imbalance between this and busiest 9986 * group wrt idle CPUs, it is balanced. The imbalance 9987 * becomes significant if the diff is greater than 1 9988 * otherwise we might end up to just move the imbalance 9989 * on another group. Of course this applies only if 9990 * there is more than 1 CPU per group. 9991 */ 9992 goto out_balanced; 9993 9994 if (busiest->sum_h_nr_running == 1) 9995 /* 9996 * busiest doesn't have any tasks waiting to run 9997 */ 9998 goto out_balanced; 9999 } 10000 10001 force_balance: 10002 /* Looks like there is an imbalance. Compute it */ 10003 calculate_imbalance(env, &sds); 10004 return env->imbalance ? sds.busiest : NULL; 10005 10006 out_balanced: 10007 env->imbalance = 0; 10008 return NULL; 10009 } 10010 10011 /* 10012 * find_busiest_queue - find the busiest runqueue among the CPUs in the group. 10013 */ 10014 static struct rq *find_busiest_queue(struct lb_env *env, 10015 struct sched_group *group) 10016 { 10017 struct rq *busiest = NULL, *rq; 10018 unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1; 10019 unsigned int busiest_nr = 0; 10020 int i; 10021 10022 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 10023 unsigned long capacity, load, util; 10024 unsigned int nr_running; 10025 enum fbq_type rt; 10026 10027 rq = cpu_rq(i); 10028 rt = fbq_classify_rq(rq); 10029 10030 /* 10031 * We classify groups/runqueues into three groups: 10032 * - regular: there are !numa tasks 10033 * - remote: there are numa tasks that run on the 'wrong' node 10034 * - all: there is no distinction 10035 * 10036 * In order to avoid migrating ideally placed numa tasks, 10037 * ignore those when there's better options. 10038 * 10039 * If we ignore the actual busiest queue to migrate another 10040 * task, the next balance pass can still reduce the busiest 10041 * queue by moving tasks around inside the node. 10042 * 10043 * If we cannot move enough load due to this classification 10044 * the next pass will adjust the group classification and 10045 * allow migration of more tasks. 10046 * 10047 * Both cases only affect the total convergence complexity. 10048 */ 10049 if (rt > env->fbq_type) 10050 continue; 10051 10052 nr_running = rq->cfs.h_nr_running; 10053 if (!nr_running) 10054 continue; 10055 10056 capacity = capacity_of(i); 10057 10058 /* 10059 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could 10060 * eventually lead to active_balancing high->low capacity. 10061 * Higher per-CPU capacity is considered better than balancing 10062 * average load. 10063 */ 10064 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 10065 !capacity_greater(capacity_of(env->dst_cpu), capacity) && 10066 nr_running == 1) 10067 continue; 10068 10069 /* Make sure we only pull tasks from a CPU of lower priority */ 10070 if ((env->sd->flags & SD_ASYM_PACKING) && 10071 sched_asym_prefer(i, env->dst_cpu) && 10072 nr_running == 1) 10073 continue; 10074 10075 switch (env->migration_type) { 10076 case migrate_load: 10077 /* 10078 * When comparing with load imbalance, use cpu_load() 10079 * which is not scaled with the CPU capacity. 10080 */ 10081 load = cpu_load(rq); 10082 10083 if (nr_running == 1 && load > env->imbalance && 10084 !check_cpu_capacity(rq, env->sd)) 10085 break; 10086 10087 /* 10088 * For the load comparisons with the other CPUs, 10089 * consider the cpu_load() scaled with the CPU 10090 * capacity, so that the load can be moved away 10091 * from the CPU that is potentially running at a 10092 * lower capacity. 10093 * 10094 * Thus we're looking for max(load_i / capacity_i), 10095 * crosswise multiplication to rid ourselves of the 10096 * division works out to: 10097 * load_i * capacity_j > load_j * capacity_i; 10098 * where j is our previous maximum. 10099 */ 10100 if (load * busiest_capacity > busiest_load * capacity) { 10101 busiest_load = load; 10102 busiest_capacity = capacity; 10103 busiest = rq; 10104 } 10105 break; 10106 10107 case migrate_util: 10108 util = cpu_util_cfs(i); 10109 10110 /* 10111 * Don't try to pull utilization from a CPU with one 10112 * running task. Whatever its utilization, we will fail 10113 * detach the task. 10114 */ 10115 if (nr_running <= 1) 10116 continue; 10117 10118 if (busiest_util < util) { 10119 busiest_util = util; 10120 busiest = rq; 10121 } 10122 break; 10123 10124 case migrate_task: 10125 if (busiest_nr < nr_running) { 10126 busiest_nr = nr_running; 10127 busiest = rq; 10128 } 10129 break; 10130 10131 case migrate_misfit: 10132 /* 10133 * For ASYM_CPUCAPACITY domains with misfit tasks we 10134 * simply seek the "biggest" misfit task. 10135 */ 10136 if (rq->misfit_task_load > busiest_load) { 10137 busiest_load = rq->misfit_task_load; 10138 busiest = rq; 10139 } 10140 10141 break; 10142 10143 } 10144 } 10145 10146 return busiest; 10147 } 10148 10149 /* 10150 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 10151 * so long as it is large enough. 10152 */ 10153 #define MAX_PINNED_INTERVAL 512 10154 10155 static inline bool 10156 asym_active_balance(struct lb_env *env) 10157 { 10158 /* 10159 * ASYM_PACKING needs to force migrate tasks from busy but 10160 * lower priority CPUs in order to pack all tasks in the 10161 * highest priority CPUs. 10162 */ 10163 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && 10164 sched_asym_prefer(env->dst_cpu, env->src_cpu); 10165 } 10166 10167 static inline bool 10168 imbalanced_active_balance(struct lb_env *env) 10169 { 10170 struct sched_domain *sd = env->sd; 10171 10172 /* 10173 * The imbalanced case includes the case of pinned tasks preventing a fair 10174 * distribution of the load on the system but also the even distribution of the 10175 * threads on a system with spare capacity 10176 */ 10177 if ((env->migration_type == migrate_task) && 10178 (sd->nr_balance_failed > sd->cache_nice_tries+2)) 10179 return 1; 10180 10181 return 0; 10182 } 10183 10184 static int need_active_balance(struct lb_env *env) 10185 { 10186 struct sched_domain *sd = env->sd; 10187 10188 if (asym_active_balance(env)) 10189 return 1; 10190 10191 if (imbalanced_active_balance(env)) 10192 return 1; 10193 10194 /* 10195 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 10196 * It's worth migrating the task if the src_cpu's capacity is reduced 10197 * because of other sched_class or IRQs if more capacity stays 10198 * available on dst_cpu. 10199 */ 10200 if ((env->idle != CPU_NOT_IDLE) && 10201 (env->src_rq->cfs.h_nr_running == 1)) { 10202 if ((check_cpu_capacity(env->src_rq, sd)) && 10203 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 10204 return 1; 10205 } 10206 10207 if (env->migration_type == migrate_misfit) 10208 return 1; 10209 10210 return 0; 10211 } 10212 10213 static int active_load_balance_cpu_stop(void *data); 10214 10215 static int should_we_balance(struct lb_env *env) 10216 { 10217 struct sched_group *sg = env->sd->groups; 10218 int cpu; 10219 10220 /* 10221 * Ensure the balancing environment is consistent; can happen 10222 * when the softirq triggers 'during' hotplug. 10223 */ 10224 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) 10225 return 0; 10226 10227 /* 10228 * In the newly idle case, we will allow all the CPUs 10229 * to do the newly idle load balance. 10230 * 10231 * However, we bail out if we already have tasks or a wakeup pending, 10232 * to optimize wakeup latency. 10233 */ 10234 if (env->idle == CPU_NEWLY_IDLE) { 10235 if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending) 10236 return 0; 10237 return 1; 10238 } 10239 10240 /* Try to find first idle CPU */ 10241 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 10242 if (!idle_cpu(cpu)) 10243 continue; 10244 10245 /* Are we the first idle CPU? */ 10246 return cpu == env->dst_cpu; 10247 } 10248 10249 /* Are we the first CPU of this group ? */ 10250 return group_balance_cpu(sg) == env->dst_cpu; 10251 } 10252 10253 /* 10254 * Check this_cpu to ensure it is balanced within domain. Attempt to move 10255 * tasks if there is an imbalance. 10256 */ 10257 static int load_balance(int this_cpu, struct rq *this_rq, 10258 struct sched_domain *sd, enum cpu_idle_type idle, 10259 int *continue_balancing) 10260 { 10261 int ld_moved, cur_ld_moved, active_balance = 0; 10262 struct sched_domain *sd_parent = sd->parent; 10263 struct sched_group *group; 10264 struct rq *busiest; 10265 struct rq_flags rf; 10266 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 10267 10268 struct lb_env env = { 10269 .sd = sd, 10270 .dst_cpu = this_cpu, 10271 .dst_rq = this_rq, 10272 .dst_grpmask = sched_group_span(sd->groups), 10273 .idle = idle, 10274 .loop_break = sched_nr_migrate_break, 10275 .cpus = cpus, 10276 .fbq_type = all, 10277 .tasks = LIST_HEAD_INIT(env.tasks), 10278 }; 10279 10280 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 10281 10282 schedstat_inc(sd->lb_count[idle]); 10283 10284 redo: 10285 if (!should_we_balance(&env)) { 10286 *continue_balancing = 0; 10287 goto out_balanced; 10288 } 10289 10290 group = find_busiest_group(&env); 10291 if (!group) { 10292 schedstat_inc(sd->lb_nobusyg[idle]); 10293 goto out_balanced; 10294 } 10295 10296 busiest = find_busiest_queue(&env, group); 10297 if (!busiest) { 10298 schedstat_inc(sd->lb_nobusyq[idle]); 10299 goto out_balanced; 10300 } 10301 10302 BUG_ON(busiest == env.dst_rq); 10303 10304 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 10305 10306 env.src_cpu = busiest->cpu; 10307 env.src_rq = busiest; 10308 10309 ld_moved = 0; 10310 /* Clear this flag as soon as we find a pullable task */ 10311 env.flags |= LBF_ALL_PINNED; 10312 if (busiest->nr_running > 1) { 10313 /* 10314 * Attempt to move tasks. If find_busiest_group has found 10315 * an imbalance but busiest->nr_running <= 1, the group is 10316 * still unbalanced. ld_moved simply stays zero, so it is 10317 * correctly treated as an imbalance. 10318 */ 10319 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 10320 10321 more_balance: 10322 rq_lock_irqsave(busiest, &rf); 10323 update_rq_clock(busiest); 10324 10325 /* 10326 * cur_ld_moved - load moved in current iteration 10327 * ld_moved - cumulative load moved across iterations 10328 */ 10329 cur_ld_moved = detach_tasks(&env); 10330 10331 /* 10332 * We've detached some tasks from busiest_rq. Every 10333 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 10334 * unlock busiest->lock, and we are able to be sure 10335 * that nobody can manipulate the tasks in parallel. 10336 * See task_rq_lock() family for the details. 10337 */ 10338 10339 rq_unlock(busiest, &rf); 10340 10341 if (cur_ld_moved) { 10342 attach_tasks(&env); 10343 ld_moved += cur_ld_moved; 10344 } 10345 10346 local_irq_restore(rf.flags); 10347 10348 if (env.flags & LBF_NEED_BREAK) { 10349 env.flags &= ~LBF_NEED_BREAK; 10350 goto more_balance; 10351 } 10352 10353 /* 10354 * Revisit (affine) tasks on src_cpu that couldn't be moved to 10355 * us and move them to an alternate dst_cpu in our sched_group 10356 * where they can run. The upper limit on how many times we 10357 * iterate on same src_cpu is dependent on number of CPUs in our 10358 * sched_group. 10359 * 10360 * This changes load balance semantics a bit on who can move 10361 * load to a given_cpu. In addition to the given_cpu itself 10362 * (or a ilb_cpu acting on its behalf where given_cpu is 10363 * nohz-idle), we now have balance_cpu in a position to move 10364 * load to given_cpu. In rare situations, this may cause 10365 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 10366 * _independently_ and at _same_ time to move some load to 10367 * given_cpu) causing excess load to be moved to given_cpu. 10368 * This however should not happen so much in practice and 10369 * moreover subsequent load balance cycles should correct the 10370 * excess load moved. 10371 */ 10372 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 10373 10374 /* Prevent to re-select dst_cpu via env's CPUs */ 10375 __cpumask_clear_cpu(env.dst_cpu, env.cpus); 10376 10377 env.dst_rq = cpu_rq(env.new_dst_cpu); 10378 env.dst_cpu = env.new_dst_cpu; 10379 env.flags &= ~LBF_DST_PINNED; 10380 env.loop = 0; 10381 env.loop_break = sched_nr_migrate_break; 10382 10383 /* 10384 * Go back to "more_balance" rather than "redo" since we 10385 * need to continue with same src_cpu. 10386 */ 10387 goto more_balance; 10388 } 10389 10390 /* 10391 * We failed to reach balance because of affinity. 10392 */ 10393 if (sd_parent) { 10394 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 10395 10396 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 10397 *group_imbalance = 1; 10398 } 10399 10400 /* All tasks on this runqueue were pinned by CPU affinity */ 10401 if (unlikely(env.flags & LBF_ALL_PINNED)) { 10402 __cpumask_clear_cpu(cpu_of(busiest), cpus); 10403 /* 10404 * Attempting to continue load balancing at the current 10405 * sched_domain level only makes sense if there are 10406 * active CPUs remaining as possible busiest CPUs to 10407 * pull load from which are not contained within the 10408 * destination group that is receiving any migrated 10409 * load. 10410 */ 10411 if (!cpumask_subset(cpus, env.dst_grpmask)) { 10412 env.loop = 0; 10413 env.loop_break = sched_nr_migrate_break; 10414 goto redo; 10415 } 10416 goto out_all_pinned; 10417 } 10418 } 10419 10420 if (!ld_moved) { 10421 schedstat_inc(sd->lb_failed[idle]); 10422 /* 10423 * Increment the failure counter only on periodic balance. 10424 * We do not want newidle balance, which can be very 10425 * frequent, pollute the failure counter causing 10426 * excessive cache_hot migrations and active balances. 10427 */ 10428 if (idle != CPU_NEWLY_IDLE) 10429 sd->nr_balance_failed++; 10430 10431 if (need_active_balance(&env)) { 10432 unsigned long flags; 10433 10434 raw_spin_rq_lock_irqsave(busiest, flags); 10435 10436 /* 10437 * Don't kick the active_load_balance_cpu_stop, 10438 * if the curr task on busiest CPU can't be 10439 * moved to this_cpu: 10440 */ 10441 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { 10442 raw_spin_rq_unlock_irqrestore(busiest, flags); 10443 goto out_one_pinned; 10444 } 10445 10446 /* Record that we found at least one task that could run on this_cpu */ 10447 env.flags &= ~LBF_ALL_PINNED; 10448 10449 /* 10450 * ->active_balance synchronizes accesses to 10451 * ->active_balance_work. Once set, it's cleared 10452 * only after active load balance is finished. 10453 */ 10454 if (!busiest->active_balance) { 10455 busiest->active_balance = 1; 10456 busiest->push_cpu = this_cpu; 10457 active_balance = 1; 10458 } 10459 raw_spin_rq_unlock_irqrestore(busiest, flags); 10460 10461 if (active_balance) { 10462 stop_one_cpu_nowait(cpu_of(busiest), 10463 active_load_balance_cpu_stop, busiest, 10464 &busiest->active_balance_work); 10465 } 10466 } 10467 } else { 10468 sd->nr_balance_failed = 0; 10469 } 10470 10471 if (likely(!active_balance) || need_active_balance(&env)) { 10472 /* We were unbalanced, so reset the balancing interval */ 10473 sd->balance_interval = sd->min_interval; 10474 } 10475 10476 goto out; 10477 10478 out_balanced: 10479 /* 10480 * We reach balance although we may have faced some affinity 10481 * constraints. Clear the imbalance flag only if other tasks got 10482 * a chance to move and fix the imbalance. 10483 */ 10484 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { 10485 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 10486 10487 if (*group_imbalance) 10488 *group_imbalance = 0; 10489 } 10490 10491 out_all_pinned: 10492 /* 10493 * We reach balance because all tasks are pinned at this level so 10494 * we can't migrate them. Let the imbalance flag set so parent level 10495 * can try to migrate them. 10496 */ 10497 schedstat_inc(sd->lb_balanced[idle]); 10498 10499 sd->nr_balance_failed = 0; 10500 10501 out_one_pinned: 10502 ld_moved = 0; 10503 10504 /* 10505 * newidle_balance() disregards balance intervals, so we could 10506 * repeatedly reach this code, which would lead to balance_interval 10507 * skyrocketing in a short amount of time. Skip the balance_interval 10508 * increase logic to avoid that. 10509 */ 10510 if (env.idle == CPU_NEWLY_IDLE) 10511 goto out; 10512 10513 /* tune up the balancing interval */ 10514 if ((env.flags & LBF_ALL_PINNED && 10515 sd->balance_interval < MAX_PINNED_INTERVAL) || 10516 sd->balance_interval < sd->max_interval) 10517 sd->balance_interval *= 2; 10518 out: 10519 return ld_moved; 10520 } 10521 10522 static inline unsigned long 10523 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 10524 { 10525 unsigned long interval = sd->balance_interval; 10526 10527 if (cpu_busy) 10528 interval *= sd->busy_factor; 10529 10530 /* scale ms to jiffies */ 10531 interval = msecs_to_jiffies(interval); 10532 10533 /* 10534 * Reduce likelihood of busy balancing at higher domains racing with 10535 * balancing at lower domains by preventing their balancing periods 10536 * from being multiples of each other. 10537 */ 10538 if (cpu_busy) 10539 interval -= 1; 10540 10541 interval = clamp(interval, 1UL, max_load_balance_interval); 10542 10543 return interval; 10544 } 10545 10546 static inline void 10547 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 10548 { 10549 unsigned long interval, next; 10550 10551 /* used by idle balance, so cpu_busy = 0 */ 10552 interval = get_sd_balance_interval(sd, 0); 10553 next = sd->last_balance + interval; 10554 10555 if (time_after(*next_balance, next)) 10556 *next_balance = next; 10557 } 10558 10559 /* 10560 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes 10561 * running tasks off the busiest CPU onto idle CPUs. It requires at 10562 * least 1 task to be running on each physical CPU where possible, and 10563 * avoids physical / logical imbalances. 10564 */ 10565 static int active_load_balance_cpu_stop(void *data) 10566 { 10567 struct rq *busiest_rq = data; 10568 int busiest_cpu = cpu_of(busiest_rq); 10569 int target_cpu = busiest_rq->push_cpu; 10570 struct rq *target_rq = cpu_rq(target_cpu); 10571 struct sched_domain *sd; 10572 struct task_struct *p = NULL; 10573 struct rq_flags rf; 10574 10575 rq_lock_irq(busiest_rq, &rf); 10576 /* 10577 * Between queueing the stop-work and running it is a hole in which 10578 * CPUs can become inactive. We should not move tasks from or to 10579 * inactive CPUs. 10580 */ 10581 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 10582 goto out_unlock; 10583 10584 /* Make sure the requested CPU hasn't gone down in the meantime: */ 10585 if (unlikely(busiest_cpu != smp_processor_id() || 10586 !busiest_rq->active_balance)) 10587 goto out_unlock; 10588 10589 /* Is there any task to move? */ 10590 if (busiest_rq->nr_running <= 1) 10591 goto out_unlock; 10592 10593 /* 10594 * This condition is "impossible", if it occurs 10595 * we need to fix it. Originally reported by 10596 * Bjorn Helgaas on a 128-CPU setup. 10597 */ 10598 BUG_ON(busiest_rq == target_rq); 10599 10600 /* Search for an sd spanning us and the target CPU. */ 10601 rcu_read_lock(); 10602 for_each_domain(target_cpu, sd) { 10603 if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 10604 break; 10605 } 10606 10607 if (likely(sd)) { 10608 struct lb_env env = { 10609 .sd = sd, 10610 .dst_cpu = target_cpu, 10611 .dst_rq = target_rq, 10612 .src_cpu = busiest_rq->cpu, 10613 .src_rq = busiest_rq, 10614 .idle = CPU_IDLE, 10615 .flags = LBF_ACTIVE_LB, 10616 }; 10617 10618 schedstat_inc(sd->alb_count); 10619 update_rq_clock(busiest_rq); 10620 10621 p = detach_one_task(&env); 10622 if (p) { 10623 schedstat_inc(sd->alb_pushed); 10624 /* Active balancing done, reset the failure counter. */ 10625 sd->nr_balance_failed = 0; 10626 } else { 10627 schedstat_inc(sd->alb_failed); 10628 } 10629 } 10630 rcu_read_unlock(); 10631 out_unlock: 10632 busiest_rq->active_balance = 0; 10633 rq_unlock(busiest_rq, &rf); 10634 10635 if (p) 10636 attach_one_task(target_rq, p); 10637 10638 local_irq_enable(); 10639 10640 return 0; 10641 } 10642 10643 static DEFINE_SPINLOCK(balancing); 10644 10645 /* 10646 * Scale the max load_balance interval with the number of CPUs in the system. 10647 * This trades load-balance latency on larger machines for less cross talk. 10648 */ 10649 void update_max_interval(void) 10650 { 10651 max_load_balance_interval = HZ*num_online_cpus()/10; 10652 } 10653 10654 static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) 10655 { 10656 if (cost > sd->max_newidle_lb_cost) { 10657 /* 10658 * Track max cost of a domain to make sure to not delay the 10659 * next wakeup on the CPU. 10660 */ 10661 sd->max_newidle_lb_cost = cost; 10662 sd->last_decay_max_lb_cost = jiffies; 10663 } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { 10664 /* 10665 * Decay the newidle max times by ~1% per second to ensure that 10666 * it is not outdated and the current max cost is actually 10667 * shorter. 10668 */ 10669 sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256; 10670 sd->last_decay_max_lb_cost = jiffies; 10671 10672 return true; 10673 } 10674 10675 return false; 10676 } 10677 10678 /* 10679 * It checks each scheduling domain to see if it is due to be balanced, 10680 * and initiates a balancing operation if so. 10681 * 10682 * Balancing parameters are set up in init_sched_domains. 10683 */ 10684 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 10685 { 10686 int continue_balancing = 1; 10687 int cpu = rq->cpu; 10688 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 10689 unsigned long interval; 10690 struct sched_domain *sd; 10691 /* Earliest time when we have to do rebalance again */ 10692 unsigned long next_balance = jiffies + 60*HZ; 10693 int update_next_balance = 0; 10694 int need_serialize, need_decay = 0; 10695 u64 max_cost = 0; 10696 10697 rcu_read_lock(); 10698 for_each_domain(cpu, sd) { 10699 /* 10700 * Decay the newidle max times here because this is a regular 10701 * visit to all the domains. 10702 */ 10703 need_decay = update_newidle_cost(sd, 0); 10704 max_cost += sd->max_newidle_lb_cost; 10705 10706 /* 10707 * Stop the load balance at this level. There is another 10708 * CPU in our sched group which is doing load balancing more 10709 * actively. 10710 */ 10711 if (!continue_balancing) { 10712 if (need_decay) 10713 continue; 10714 break; 10715 } 10716 10717 interval = get_sd_balance_interval(sd, busy); 10718 10719 need_serialize = sd->flags & SD_SERIALIZE; 10720 if (need_serialize) { 10721 if (!spin_trylock(&balancing)) 10722 goto out; 10723 } 10724 10725 if (time_after_eq(jiffies, sd->last_balance + interval)) { 10726 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 10727 /* 10728 * The LBF_DST_PINNED logic could have changed 10729 * env->dst_cpu, so we can't know our idle 10730 * state even if we migrated tasks. Update it. 10731 */ 10732 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 10733 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 10734 } 10735 sd->last_balance = jiffies; 10736 interval = get_sd_balance_interval(sd, busy); 10737 } 10738 if (need_serialize) 10739 spin_unlock(&balancing); 10740 out: 10741 if (time_after(next_balance, sd->last_balance + interval)) { 10742 next_balance = sd->last_balance + interval; 10743 update_next_balance = 1; 10744 } 10745 } 10746 if (need_decay) { 10747 /* 10748 * Ensure the rq-wide value also decays but keep it at a 10749 * reasonable floor to avoid funnies with rq->avg_idle. 10750 */ 10751 rq->max_idle_balance_cost = 10752 max((u64)sysctl_sched_migration_cost, max_cost); 10753 } 10754 rcu_read_unlock(); 10755 10756 /* 10757 * next_balance will be updated only when there is a need. 10758 * When the cpu is attached to null domain for ex, it will not be 10759 * updated. 10760 */ 10761 if (likely(update_next_balance)) 10762 rq->next_balance = next_balance; 10763 10764 } 10765 10766 static inline int on_null_domain(struct rq *rq) 10767 { 10768 return unlikely(!rcu_dereference_sched(rq->sd)); 10769 } 10770 10771 #ifdef CONFIG_NO_HZ_COMMON 10772 /* 10773 * idle load balancing details 10774 * - When one of the busy CPUs notice that there may be an idle rebalancing 10775 * needed, they will kick the idle load balancer, which then does idle 10776 * load balancing for all the idle CPUs. 10777 * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set 10778 * anywhere yet. 10779 */ 10780 10781 static inline int find_new_ilb(void) 10782 { 10783 int ilb; 10784 const struct cpumask *hk_mask; 10785 10786 hk_mask = housekeeping_cpumask(HK_TYPE_MISC); 10787 10788 for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) { 10789 10790 if (ilb == smp_processor_id()) 10791 continue; 10792 10793 if (idle_cpu(ilb)) 10794 return ilb; 10795 } 10796 10797 return nr_cpu_ids; 10798 } 10799 10800 /* 10801 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any 10802 * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one). 10803 */ 10804 static void kick_ilb(unsigned int flags) 10805 { 10806 int ilb_cpu; 10807 10808 /* 10809 * Increase nohz.next_balance only when if full ilb is triggered but 10810 * not if we only update stats. 10811 */ 10812 if (flags & NOHZ_BALANCE_KICK) 10813 nohz.next_balance = jiffies+1; 10814 10815 ilb_cpu = find_new_ilb(); 10816 10817 if (ilb_cpu >= nr_cpu_ids) 10818 return; 10819 10820 /* 10821 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets 10822 * the first flag owns it; cleared by nohz_csd_func(). 10823 */ 10824 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); 10825 if (flags & NOHZ_KICK_MASK) 10826 return; 10827 10828 /* 10829 * This way we generate an IPI on the target CPU which 10830 * is idle. And the softirq performing nohz idle load balance 10831 * will be run before returning from the IPI. 10832 */ 10833 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd); 10834 } 10835 10836 /* 10837 * Current decision point for kicking the idle load balancer in the presence 10838 * of idle CPUs in the system. 10839 */ 10840 static void nohz_balancer_kick(struct rq *rq) 10841 { 10842 unsigned long now = jiffies; 10843 struct sched_domain_shared *sds; 10844 struct sched_domain *sd; 10845 int nr_busy, i, cpu = rq->cpu; 10846 unsigned int flags = 0; 10847 10848 if (unlikely(rq->idle_balance)) 10849 return; 10850 10851 /* 10852 * We may be recently in ticked or tickless idle mode. At the first 10853 * busy tick after returning from idle, we will update the busy stats. 10854 */ 10855 nohz_balance_exit_idle(rq); 10856 10857 /* 10858 * None are in tickless mode and hence no need for NOHZ idle load 10859 * balancing. 10860 */ 10861 if (likely(!atomic_read(&nohz.nr_cpus))) 10862 return; 10863 10864 if (READ_ONCE(nohz.has_blocked) && 10865 time_after(now, READ_ONCE(nohz.next_blocked))) 10866 flags = NOHZ_STATS_KICK; 10867 10868 if (time_before(now, nohz.next_balance)) 10869 goto out; 10870 10871 if (rq->nr_running >= 2) { 10872 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10873 goto out; 10874 } 10875 10876 rcu_read_lock(); 10877 10878 sd = rcu_dereference(rq->sd); 10879 if (sd) { 10880 /* 10881 * If there's a CFS task and the current CPU has reduced 10882 * capacity; kick the ILB to see if there's a better CPU to run 10883 * on. 10884 */ 10885 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { 10886 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10887 goto unlock; 10888 } 10889 } 10890 10891 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 10892 if (sd) { 10893 /* 10894 * When ASYM_PACKING; see if there's a more preferred CPU 10895 * currently idle; in which case, kick the ILB to move tasks 10896 * around. 10897 */ 10898 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { 10899 if (sched_asym_prefer(i, cpu)) { 10900 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10901 goto unlock; 10902 } 10903 } 10904 } 10905 10906 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); 10907 if (sd) { 10908 /* 10909 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU 10910 * to run the misfit task on. 10911 */ 10912 if (check_misfit_status(rq, sd)) { 10913 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10914 goto unlock; 10915 } 10916 10917 /* 10918 * For asymmetric systems, we do not want to nicely balance 10919 * cache use, instead we want to embrace asymmetry and only 10920 * ensure tasks have enough CPU capacity. 10921 * 10922 * Skip the LLC logic because it's not relevant in that case. 10923 */ 10924 goto unlock; 10925 } 10926 10927 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 10928 if (sds) { 10929 /* 10930 * If there is an imbalance between LLC domains (IOW we could 10931 * increase the overall cache use), we need some less-loaded LLC 10932 * domain to pull some load. Likewise, we may need to spread 10933 * load within the current LLC domain (e.g. packed SMT cores but 10934 * other CPUs are idle). We can't really know from here how busy 10935 * the others are - so just get a nohz balance going if it looks 10936 * like this LLC domain has tasks we could move. 10937 */ 10938 nr_busy = atomic_read(&sds->nr_busy_cpus); 10939 if (nr_busy > 1) { 10940 flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; 10941 goto unlock; 10942 } 10943 } 10944 unlock: 10945 rcu_read_unlock(); 10946 out: 10947 if (READ_ONCE(nohz.needs_update)) 10948 flags |= NOHZ_NEXT_KICK; 10949 10950 if (flags) 10951 kick_ilb(flags); 10952 } 10953 10954 static void set_cpu_sd_state_busy(int cpu) 10955 { 10956 struct sched_domain *sd; 10957 10958 rcu_read_lock(); 10959 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 10960 10961 if (!sd || !sd->nohz_idle) 10962 goto unlock; 10963 sd->nohz_idle = 0; 10964 10965 atomic_inc(&sd->shared->nr_busy_cpus); 10966 unlock: 10967 rcu_read_unlock(); 10968 } 10969 10970 void nohz_balance_exit_idle(struct rq *rq) 10971 { 10972 SCHED_WARN_ON(rq != this_rq()); 10973 10974 if (likely(!rq->nohz_tick_stopped)) 10975 return; 10976 10977 rq->nohz_tick_stopped = 0; 10978 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); 10979 atomic_dec(&nohz.nr_cpus); 10980 10981 set_cpu_sd_state_busy(rq->cpu); 10982 } 10983 10984 static void set_cpu_sd_state_idle(int cpu) 10985 { 10986 struct sched_domain *sd; 10987 10988 rcu_read_lock(); 10989 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 10990 10991 if (!sd || sd->nohz_idle) 10992 goto unlock; 10993 sd->nohz_idle = 1; 10994 10995 atomic_dec(&sd->shared->nr_busy_cpus); 10996 unlock: 10997 rcu_read_unlock(); 10998 } 10999 11000 /* 11001 * This routine will record that the CPU is going idle with tick stopped. 11002 * This info will be used in performing idle load balancing in the future. 11003 */ 11004 void nohz_balance_enter_idle(int cpu) 11005 { 11006 struct rq *rq = cpu_rq(cpu); 11007 11008 SCHED_WARN_ON(cpu != smp_processor_id()); 11009 11010 /* If this CPU is going down, then nothing needs to be done: */ 11011 if (!cpu_active(cpu)) 11012 return; 11013 11014 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 11015 if (!housekeeping_cpu(cpu, HK_TYPE_SCHED)) 11016 return; 11017 11018 /* 11019 * Can be set safely without rq->lock held 11020 * If a clear happens, it will have evaluated last additions because 11021 * rq->lock is held during the check and the clear 11022 */ 11023 rq->has_blocked_load = 1; 11024 11025 /* 11026 * The tick is still stopped but load could have been added in the 11027 * meantime. We set the nohz.has_blocked flag to trig a check of the 11028 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear 11029 * of nohz.has_blocked can only happen after checking the new load 11030 */ 11031 if (rq->nohz_tick_stopped) 11032 goto out; 11033 11034 /* If we're a completely isolated CPU, we don't play: */ 11035 if (on_null_domain(rq)) 11036 return; 11037 11038 rq->nohz_tick_stopped = 1; 11039 11040 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 11041 atomic_inc(&nohz.nr_cpus); 11042 11043 /* 11044 * Ensures that if nohz_idle_balance() fails to observe our 11045 * @idle_cpus_mask store, it must observe the @has_blocked 11046 * and @needs_update stores. 11047 */ 11048 smp_mb__after_atomic(); 11049 11050 set_cpu_sd_state_idle(cpu); 11051 11052 WRITE_ONCE(nohz.needs_update, 1); 11053 out: 11054 /* 11055 * Each time a cpu enter idle, we assume that it has blocked load and 11056 * enable the periodic update of the load of idle cpus 11057 */ 11058 WRITE_ONCE(nohz.has_blocked, 1); 11059 } 11060 11061 static bool update_nohz_stats(struct rq *rq) 11062 { 11063 unsigned int cpu = rq->cpu; 11064 11065 if (!rq->has_blocked_load) 11066 return false; 11067 11068 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) 11069 return false; 11070 11071 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) 11072 return true; 11073 11074 update_blocked_averages(cpu); 11075 11076 return rq->has_blocked_load; 11077 } 11078 11079 /* 11080 * Internal function that runs load balance for all idle cpus. The load balance 11081 * can be a simple update of blocked load or a complete load balance with 11082 * tasks movement depending of flags. 11083 */ 11084 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, 11085 enum cpu_idle_type idle) 11086 { 11087 /* Earliest time when we have to do rebalance again */ 11088 unsigned long now = jiffies; 11089 unsigned long next_balance = now + 60*HZ; 11090 bool has_blocked_load = false; 11091 int update_next_balance = 0; 11092 int this_cpu = this_rq->cpu; 11093 int balance_cpu; 11094 struct rq *rq; 11095 11096 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 11097 11098 /* 11099 * We assume there will be no idle load after this update and clear 11100 * the has_blocked flag. If a cpu enters idle in the mean time, it will 11101 * set the has_blocked flag and trigger another update of idle load. 11102 * Because a cpu that becomes idle, is added to idle_cpus_mask before 11103 * setting the flag, we are sure to not clear the state and not 11104 * check the load of an idle cpu. 11105 * 11106 * Same applies to idle_cpus_mask vs needs_update. 11107 */ 11108 if (flags & NOHZ_STATS_KICK) 11109 WRITE_ONCE(nohz.has_blocked, 0); 11110 if (flags & NOHZ_NEXT_KICK) 11111 WRITE_ONCE(nohz.needs_update, 0); 11112 11113 /* 11114 * Ensures that if we miss the CPU, we must see the has_blocked 11115 * store from nohz_balance_enter_idle(). 11116 */ 11117 smp_mb(); 11118 11119 /* 11120 * Start with the next CPU after this_cpu so we will end with this_cpu and let a 11121 * chance for other idle cpu to pull load. 11122 */ 11123 for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) { 11124 if (!idle_cpu(balance_cpu)) 11125 continue; 11126 11127 /* 11128 * If this CPU gets work to do, stop the load balancing 11129 * work being done for other CPUs. Next load 11130 * balancing owner will pick it up. 11131 */ 11132 if (need_resched()) { 11133 if (flags & NOHZ_STATS_KICK) 11134 has_blocked_load = true; 11135 if (flags & NOHZ_NEXT_KICK) 11136 WRITE_ONCE(nohz.needs_update, 1); 11137 goto abort; 11138 } 11139 11140 rq = cpu_rq(balance_cpu); 11141 11142 if (flags & NOHZ_STATS_KICK) 11143 has_blocked_load |= update_nohz_stats(rq); 11144 11145 /* 11146 * If time for next balance is due, 11147 * do the balance. 11148 */ 11149 if (time_after_eq(jiffies, rq->next_balance)) { 11150 struct rq_flags rf; 11151 11152 rq_lock_irqsave(rq, &rf); 11153 update_rq_clock(rq); 11154 rq_unlock_irqrestore(rq, &rf); 11155 11156 if (flags & NOHZ_BALANCE_KICK) 11157 rebalance_domains(rq, CPU_IDLE); 11158 } 11159 11160 if (time_after(next_balance, rq->next_balance)) { 11161 next_balance = rq->next_balance; 11162 update_next_balance = 1; 11163 } 11164 } 11165 11166 /* 11167 * next_balance will be updated only when there is a need. 11168 * When the CPU is attached to null domain for ex, it will not be 11169 * updated. 11170 */ 11171 if (likely(update_next_balance)) 11172 nohz.next_balance = next_balance; 11173 11174 if (flags & NOHZ_STATS_KICK) 11175 WRITE_ONCE(nohz.next_blocked, 11176 now + msecs_to_jiffies(LOAD_AVG_PERIOD)); 11177 11178 abort: 11179 /* There is still blocked load, enable periodic update */ 11180 if (has_blocked_load) 11181 WRITE_ONCE(nohz.has_blocked, 1); 11182 } 11183 11184 /* 11185 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 11186 * rebalancing for all the cpus for whom scheduler ticks are stopped. 11187 */ 11188 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 11189 { 11190 unsigned int flags = this_rq->nohz_idle_balance; 11191 11192 if (!flags) 11193 return false; 11194 11195 this_rq->nohz_idle_balance = 0; 11196 11197 if (idle != CPU_IDLE) 11198 return false; 11199 11200 _nohz_idle_balance(this_rq, flags, idle); 11201 11202 return true; 11203 } 11204 11205 /* 11206 * Check if we need to run the ILB for updating blocked load before entering 11207 * idle state. 11208 */ 11209 void nohz_run_idle_balance(int cpu) 11210 { 11211 unsigned int flags; 11212 11213 flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu)); 11214 11215 /* 11216 * Update the blocked load only if no SCHED_SOFTIRQ is about to happen 11217 * (ie NOHZ_STATS_KICK set) and will do the same. 11218 */ 11219 if ((flags == NOHZ_NEWILB_KICK) && !need_resched()) 11220 _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE); 11221 } 11222 11223 static void nohz_newidle_balance(struct rq *this_rq) 11224 { 11225 int this_cpu = this_rq->cpu; 11226 11227 /* 11228 * This CPU doesn't want to be disturbed by scheduler 11229 * housekeeping 11230 */ 11231 if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED)) 11232 return; 11233 11234 /* Will wake up very soon. No time for doing anything else*/ 11235 if (this_rq->avg_idle < sysctl_sched_migration_cost) 11236 return; 11237 11238 /* Don't need to update blocked load of idle CPUs*/ 11239 if (!READ_ONCE(nohz.has_blocked) || 11240 time_before(jiffies, READ_ONCE(nohz.next_blocked))) 11241 return; 11242 11243 /* 11244 * Set the need to trigger ILB in order to update blocked load 11245 * before entering idle state. 11246 */ 11247 atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu)); 11248 } 11249 11250 #else /* !CONFIG_NO_HZ_COMMON */ 11251 static inline void nohz_balancer_kick(struct rq *rq) { } 11252 11253 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 11254 { 11255 return false; 11256 } 11257 11258 static inline void nohz_newidle_balance(struct rq *this_rq) { } 11259 #endif /* CONFIG_NO_HZ_COMMON */ 11260 11261 /* 11262 * newidle_balance is called by schedule() if this_cpu is about to become 11263 * idle. Attempts to pull tasks from other CPUs. 11264 * 11265 * Returns: 11266 * < 0 - we released the lock and there are !fair tasks present 11267 * 0 - failed, no new tasks 11268 * > 0 - success, new (fair) tasks present 11269 */ 11270 static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) 11271 { 11272 unsigned long next_balance = jiffies + HZ; 11273 int this_cpu = this_rq->cpu; 11274 u64 t0, t1, curr_cost = 0; 11275 struct sched_domain *sd; 11276 int pulled_task = 0; 11277 11278 update_misfit_status(NULL, this_rq); 11279 11280 /* 11281 * There is a task waiting to run. No need to search for one. 11282 * Return 0; the task will be enqueued when switching to idle. 11283 */ 11284 if (this_rq->ttwu_pending) 11285 return 0; 11286 11287 /* 11288 * We must set idle_stamp _before_ calling idle_balance(), such that we 11289 * measure the duration of idle_balance() as idle time. 11290 */ 11291 this_rq->idle_stamp = rq_clock(this_rq); 11292 11293 /* 11294 * Do not pull tasks towards !active CPUs... 11295 */ 11296 if (!cpu_active(this_cpu)) 11297 return 0; 11298 11299 /* 11300 * This is OK, because current is on_cpu, which avoids it being picked 11301 * for load-balance and preemption/IRQs are still disabled avoiding 11302 * further scheduler activity on it and we're being very careful to 11303 * re-start the picking loop. 11304 */ 11305 rq_unpin_lock(this_rq, rf); 11306 11307 rcu_read_lock(); 11308 sd = rcu_dereference_check_sched_domain(this_rq->sd); 11309 11310 if (!READ_ONCE(this_rq->rd->overload) || 11311 (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) { 11312 11313 if (sd) 11314 update_next_balance(sd, &next_balance); 11315 rcu_read_unlock(); 11316 11317 goto out; 11318 } 11319 rcu_read_unlock(); 11320 11321 raw_spin_rq_unlock(this_rq); 11322 11323 t0 = sched_clock_cpu(this_cpu); 11324 update_blocked_averages(this_cpu); 11325 11326 rcu_read_lock(); 11327 for_each_domain(this_cpu, sd) { 11328 int continue_balancing = 1; 11329 u64 domain_cost; 11330 11331 update_next_balance(sd, &next_balance); 11332 11333 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) 11334 break; 11335 11336 if (sd->flags & SD_BALANCE_NEWIDLE) { 11337 11338 pulled_task = load_balance(this_cpu, this_rq, 11339 sd, CPU_NEWLY_IDLE, 11340 &continue_balancing); 11341 11342 t1 = sched_clock_cpu(this_cpu); 11343 domain_cost = t1 - t0; 11344 update_newidle_cost(sd, domain_cost); 11345 11346 curr_cost += domain_cost; 11347 t0 = t1; 11348 } 11349 11350 /* 11351 * Stop searching for tasks to pull if there are 11352 * now runnable tasks on this rq. 11353 */ 11354 if (pulled_task || this_rq->nr_running > 0 || 11355 this_rq->ttwu_pending) 11356 break; 11357 } 11358 rcu_read_unlock(); 11359 11360 raw_spin_rq_lock(this_rq); 11361 11362 if (curr_cost > this_rq->max_idle_balance_cost) 11363 this_rq->max_idle_balance_cost = curr_cost; 11364 11365 /* 11366 * While browsing the domains, we released the rq lock, a task could 11367 * have been enqueued in the meantime. Since we're not going idle, 11368 * pretend we pulled a task. 11369 */ 11370 if (this_rq->cfs.h_nr_running && !pulled_task) 11371 pulled_task = 1; 11372 11373 /* Is there a task of a high priority class? */ 11374 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 11375 pulled_task = -1; 11376 11377 out: 11378 /* Move the next balance forward */ 11379 if (time_after(this_rq->next_balance, next_balance)) 11380 this_rq->next_balance = next_balance; 11381 11382 if (pulled_task) 11383 this_rq->idle_stamp = 0; 11384 else 11385 nohz_newidle_balance(this_rq); 11386 11387 rq_repin_lock(this_rq, rf); 11388 11389 return pulled_task; 11390 } 11391 11392 /* 11393 * run_rebalance_domains is triggered when needed from the scheduler tick. 11394 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 11395 */ 11396 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 11397 { 11398 struct rq *this_rq = this_rq(); 11399 enum cpu_idle_type idle = this_rq->idle_balance ? 11400 CPU_IDLE : CPU_NOT_IDLE; 11401 11402 /* 11403 * If this CPU has a pending nohz_balance_kick, then do the 11404 * balancing on behalf of the other idle CPUs whose ticks are 11405 * stopped. Do nohz_idle_balance *before* rebalance_domains to 11406 * give the idle CPUs a chance to load balance. Else we may 11407 * load balance only within the local sched_domain hierarchy 11408 * and abort nohz_idle_balance altogether if we pull some load. 11409 */ 11410 if (nohz_idle_balance(this_rq, idle)) 11411 return; 11412 11413 /* normal load balance */ 11414 update_blocked_averages(this_rq->cpu); 11415 rebalance_domains(this_rq, idle); 11416 } 11417 11418 /* 11419 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 11420 */ 11421 void trigger_load_balance(struct rq *rq) 11422 { 11423 /* 11424 * Don't need to rebalance while attached to NULL domain or 11425 * runqueue CPU is not active 11426 */ 11427 if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq)))) 11428 return; 11429 11430 if (time_after_eq(jiffies, rq->next_balance)) 11431 raise_softirq(SCHED_SOFTIRQ); 11432 11433 nohz_balancer_kick(rq); 11434 } 11435 11436 static void rq_online_fair(struct rq *rq) 11437 { 11438 update_sysctl(); 11439 11440 update_runtime_enabled(rq); 11441 } 11442 11443 static void rq_offline_fair(struct rq *rq) 11444 { 11445 update_sysctl(); 11446 11447 /* Ensure any throttled groups are reachable by pick_next_task */ 11448 unthrottle_offline_cfs_rqs(rq); 11449 } 11450 11451 #endif /* CONFIG_SMP */ 11452 11453 #ifdef CONFIG_SCHED_CORE 11454 static inline bool 11455 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) 11456 { 11457 u64 slice = sched_slice(cfs_rq_of(se), se); 11458 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; 11459 11460 return (rtime * min_nr_tasks > slice); 11461 } 11462 11463 #define MIN_NR_TASKS_DURING_FORCEIDLE 2 11464 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) 11465 { 11466 if (!sched_core_enabled(rq)) 11467 return; 11468 11469 /* 11470 * If runqueue has only one task which used up its slice and 11471 * if the sibling is forced idle, then trigger schedule to 11472 * give forced idle task a chance. 11473 * 11474 * sched_slice() considers only this active rq and it gets the 11475 * whole slice. But during force idle, we have siblings acting 11476 * like a single runqueue and hence we need to consider runnable 11477 * tasks on this CPU and the forced idle CPU. Ideally, we should 11478 * go through the forced idle rq, but that would be a perf hit. 11479 * We can assume that the forced idle CPU has at least 11480 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check 11481 * if we need to give up the CPU. 11482 */ 11483 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && 11484 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) 11485 resched_curr(rq); 11486 } 11487 11488 /* 11489 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed. 11490 */ 11491 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) 11492 { 11493 for_each_sched_entity(se) { 11494 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11495 11496 if (forceidle) { 11497 if (cfs_rq->forceidle_seq == fi_seq) 11498 break; 11499 cfs_rq->forceidle_seq = fi_seq; 11500 } 11501 11502 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; 11503 } 11504 } 11505 11506 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) 11507 { 11508 struct sched_entity *se = &p->se; 11509 11510 if (p->sched_class != &fair_sched_class) 11511 return; 11512 11513 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); 11514 } 11515 11516 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) 11517 { 11518 struct rq *rq = task_rq(a); 11519 struct sched_entity *sea = &a->se; 11520 struct sched_entity *seb = &b->se; 11521 struct cfs_rq *cfs_rqa; 11522 struct cfs_rq *cfs_rqb; 11523 s64 delta; 11524 11525 SCHED_WARN_ON(task_rq(b)->core != rq->core); 11526 11527 #ifdef CONFIG_FAIR_GROUP_SCHED 11528 /* 11529 * Find an se in the hierarchy for tasks a and b, such that the se's 11530 * are immediate siblings. 11531 */ 11532 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { 11533 int sea_depth = sea->depth; 11534 int seb_depth = seb->depth; 11535 11536 if (sea_depth >= seb_depth) 11537 sea = parent_entity(sea); 11538 if (sea_depth <= seb_depth) 11539 seb = parent_entity(seb); 11540 } 11541 11542 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi); 11543 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi); 11544 11545 cfs_rqa = sea->cfs_rq; 11546 cfs_rqb = seb->cfs_rq; 11547 #else 11548 cfs_rqa = &task_rq(a)->cfs; 11549 cfs_rqb = &task_rq(b)->cfs; 11550 #endif 11551 11552 /* 11553 * Find delta after normalizing se's vruntime with its cfs_rq's 11554 * min_vruntime_fi, which would have been updated in prior calls 11555 * to se_fi_update(). 11556 */ 11557 delta = (s64)(sea->vruntime - seb->vruntime) + 11558 (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi); 11559 11560 return delta > 0; 11561 } 11562 #else 11563 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} 11564 #endif 11565 11566 /* 11567 * scheduler tick hitting a task of our scheduling class. 11568 * 11569 * NOTE: This function can be called remotely by the tick offload that 11570 * goes along full dynticks. Therefore no local assumption can be made 11571 * and everything must be accessed through the @rq and @curr passed in 11572 * parameters. 11573 */ 11574 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 11575 { 11576 struct cfs_rq *cfs_rq; 11577 struct sched_entity *se = &curr->se; 11578 11579 for_each_sched_entity(se) { 11580 cfs_rq = cfs_rq_of(se); 11581 entity_tick(cfs_rq, se, queued); 11582 } 11583 11584 if (static_branch_unlikely(&sched_numa_balancing)) 11585 task_tick_numa(rq, curr); 11586 11587 update_misfit_status(curr, rq); 11588 update_overutilized_status(task_rq(curr)); 11589 11590 task_tick_core(rq, curr); 11591 } 11592 11593 /* 11594 * called on fork with the child task as argument from the parent's context 11595 * - child not yet on the tasklist 11596 * - preemption disabled 11597 */ 11598 static void task_fork_fair(struct task_struct *p) 11599 { 11600 struct cfs_rq *cfs_rq; 11601 struct sched_entity *se = &p->se, *curr; 11602 struct rq *rq = this_rq(); 11603 struct rq_flags rf; 11604 11605 rq_lock(rq, &rf); 11606 update_rq_clock(rq); 11607 11608 cfs_rq = task_cfs_rq(current); 11609 curr = cfs_rq->curr; 11610 if (curr) { 11611 update_curr(cfs_rq); 11612 se->vruntime = curr->vruntime; 11613 } 11614 place_entity(cfs_rq, se, 1); 11615 11616 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 11617 /* 11618 * Upon rescheduling, sched_class::put_prev_task() will place 11619 * 'current' within the tree based on its new key value. 11620 */ 11621 swap(curr->vruntime, se->vruntime); 11622 resched_curr(rq); 11623 } 11624 11625 se->vruntime -= cfs_rq->min_vruntime; 11626 rq_unlock(rq, &rf); 11627 } 11628 11629 /* 11630 * Priority of the task has changed. Check to see if we preempt 11631 * the current task. 11632 */ 11633 static void 11634 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 11635 { 11636 if (!task_on_rq_queued(p)) 11637 return; 11638 11639 if (rq->cfs.nr_running == 1) 11640 return; 11641 11642 /* 11643 * Reschedule if we are currently running on this runqueue and 11644 * our priority decreased, or if we are not currently running on 11645 * this runqueue and our priority is higher than the current's 11646 */ 11647 if (task_current(rq, p)) { 11648 if (p->prio > oldprio) 11649 resched_curr(rq); 11650 } else 11651 check_preempt_curr(rq, p, 0); 11652 } 11653 11654 static inline bool vruntime_normalized(struct task_struct *p) 11655 { 11656 struct sched_entity *se = &p->se; 11657 11658 /* 11659 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 11660 * the dequeue_entity(.flags=0) will already have normalized the 11661 * vruntime. 11662 */ 11663 if (p->on_rq) 11664 return true; 11665 11666 /* 11667 * When !on_rq, vruntime of the task has usually NOT been normalized. 11668 * But there are some cases where it has already been normalized: 11669 * 11670 * - A forked child which is waiting for being woken up by 11671 * wake_up_new_task(). 11672 * - A task which has been woken up by try_to_wake_up() and 11673 * waiting for actually being woken up by sched_ttwu_pending(). 11674 */ 11675 if (!se->sum_exec_runtime || 11676 (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup)) 11677 return true; 11678 11679 return false; 11680 } 11681 11682 #ifdef CONFIG_FAIR_GROUP_SCHED 11683 /* 11684 * Propagate the changes of the sched_entity across the tg tree to make it 11685 * visible to the root 11686 */ 11687 static void propagate_entity_cfs_rq(struct sched_entity *se) 11688 { 11689 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11690 11691 if (cfs_rq_throttled(cfs_rq)) 11692 return; 11693 11694 if (!throttled_hierarchy(cfs_rq)) 11695 list_add_leaf_cfs_rq(cfs_rq); 11696 11697 /* Start to propagate at parent */ 11698 se = se->parent; 11699 11700 for_each_sched_entity(se) { 11701 cfs_rq = cfs_rq_of(se); 11702 11703 update_load_avg(cfs_rq, se, UPDATE_TG); 11704 11705 if (cfs_rq_throttled(cfs_rq)) 11706 break; 11707 11708 if (!throttled_hierarchy(cfs_rq)) 11709 list_add_leaf_cfs_rq(cfs_rq); 11710 } 11711 } 11712 #else 11713 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 11714 #endif 11715 11716 static void detach_entity_cfs_rq(struct sched_entity *se) 11717 { 11718 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11719 11720 /* Catch up with the cfs_rq and remove our load when we leave */ 11721 update_load_avg(cfs_rq, se, 0); 11722 detach_entity_load_avg(cfs_rq, se); 11723 update_tg_load_avg(cfs_rq); 11724 propagate_entity_cfs_rq(se); 11725 } 11726 11727 static void attach_entity_cfs_rq(struct sched_entity *se) 11728 { 11729 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11730 11731 #ifdef CONFIG_FAIR_GROUP_SCHED 11732 /* 11733 * Since the real-depth could have been changed (only FAIR 11734 * class maintain depth value), reset depth properly. 11735 */ 11736 se->depth = se->parent ? se->parent->depth + 1 : 0; 11737 #endif 11738 11739 /* Synchronize entity with its cfs_rq */ 11740 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 11741 attach_entity_load_avg(cfs_rq, se); 11742 update_tg_load_avg(cfs_rq); 11743 propagate_entity_cfs_rq(se); 11744 } 11745 11746 static void detach_task_cfs_rq(struct task_struct *p) 11747 { 11748 struct sched_entity *se = &p->se; 11749 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11750 11751 if (!vruntime_normalized(p)) { 11752 /* 11753 * Fix up our vruntime so that the current sleep doesn't 11754 * cause 'unlimited' sleep bonus. 11755 */ 11756 place_entity(cfs_rq, se, 0); 11757 se->vruntime -= cfs_rq->min_vruntime; 11758 } 11759 11760 detach_entity_cfs_rq(se); 11761 } 11762 11763 static void attach_task_cfs_rq(struct task_struct *p) 11764 { 11765 struct sched_entity *se = &p->se; 11766 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11767 11768 attach_entity_cfs_rq(se); 11769 11770 if (!vruntime_normalized(p)) 11771 se->vruntime += cfs_rq->min_vruntime; 11772 } 11773 11774 static void switched_from_fair(struct rq *rq, struct task_struct *p) 11775 { 11776 detach_task_cfs_rq(p); 11777 } 11778 11779 static void switched_to_fair(struct rq *rq, struct task_struct *p) 11780 { 11781 attach_task_cfs_rq(p); 11782 11783 if (task_on_rq_queued(p)) { 11784 /* 11785 * We were most likely switched from sched_rt, so 11786 * kick off the schedule if running, otherwise just see 11787 * if we can still preempt the current task. 11788 */ 11789 if (task_current(rq, p)) 11790 resched_curr(rq); 11791 else 11792 check_preempt_curr(rq, p, 0); 11793 } 11794 } 11795 11796 /* Account for a task changing its policy or group. 11797 * 11798 * This routine is mostly called to set cfs_rq->curr field when a task 11799 * migrates between groups/classes. 11800 */ 11801 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) 11802 { 11803 struct sched_entity *se = &p->se; 11804 11805 #ifdef CONFIG_SMP 11806 if (task_on_rq_queued(p)) { 11807 /* 11808 * Move the next running task to the front of the list, so our 11809 * cfs_tasks list becomes MRU one. 11810 */ 11811 list_move(&se->group_node, &rq->cfs_tasks); 11812 } 11813 #endif 11814 11815 for_each_sched_entity(se) { 11816 struct cfs_rq *cfs_rq = cfs_rq_of(se); 11817 11818 set_next_entity(cfs_rq, se); 11819 /* ensure bandwidth has been allocated on our new cfs_rq */ 11820 account_cfs_rq_runtime(cfs_rq, 0); 11821 } 11822 } 11823 11824 void init_cfs_rq(struct cfs_rq *cfs_rq) 11825 { 11826 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 11827 u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20))); 11828 #ifdef CONFIG_SMP 11829 raw_spin_lock_init(&cfs_rq->removed.lock); 11830 #endif 11831 } 11832 11833 #ifdef CONFIG_FAIR_GROUP_SCHED 11834 static void task_set_group_fair(struct task_struct *p) 11835 { 11836 struct sched_entity *se = &p->se; 11837 11838 set_task_rq(p, task_cpu(p)); 11839 se->depth = se->parent ? se->parent->depth + 1 : 0; 11840 } 11841 11842 static void task_move_group_fair(struct task_struct *p) 11843 { 11844 detach_task_cfs_rq(p); 11845 set_task_rq(p, task_cpu(p)); 11846 11847 #ifdef CONFIG_SMP 11848 /* Tell se's cfs_rq has been changed -- migrated */ 11849 p->se.avg.last_update_time = 0; 11850 #endif 11851 attach_task_cfs_rq(p); 11852 } 11853 11854 static void task_change_group_fair(struct task_struct *p, int type) 11855 { 11856 switch (type) { 11857 case TASK_SET_GROUP: 11858 task_set_group_fair(p); 11859 break; 11860 11861 case TASK_MOVE_GROUP: 11862 task_move_group_fair(p); 11863 break; 11864 } 11865 } 11866 11867 void free_fair_sched_group(struct task_group *tg) 11868 { 11869 int i; 11870 11871 for_each_possible_cpu(i) { 11872 if (tg->cfs_rq) 11873 kfree(tg->cfs_rq[i]); 11874 if (tg->se) 11875 kfree(tg->se[i]); 11876 } 11877 11878 kfree(tg->cfs_rq); 11879 kfree(tg->se); 11880 } 11881 11882 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 11883 { 11884 struct sched_entity *se; 11885 struct cfs_rq *cfs_rq; 11886 int i; 11887 11888 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); 11889 if (!tg->cfs_rq) 11890 goto err; 11891 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); 11892 if (!tg->se) 11893 goto err; 11894 11895 tg->shares = NICE_0_LOAD; 11896 11897 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 11898 11899 for_each_possible_cpu(i) { 11900 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 11901 GFP_KERNEL, cpu_to_node(i)); 11902 if (!cfs_rq) 11903 goto err; 11904 11905 se = kzalloc_node(sizeof(struct sched_entity_stats), 11906 GFP_KERNEL, cpu_to_node(i)); 11907 if (!se) 11908 goto err_free_rq; 11909 11910 init_cfs_rq(cfs_rq); 11911 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 11912 init_entity_runnable_average(se); 11913 } 11914 11915 return 1; 11916 11917 err_free_rq: 11918 kfree(cfs_rq); 11919 err: 11920 return 0; 11921 } 11922 11923 void online_fair_sched_group(struct task_group *tg) 11924 { 11925 struct sched_entity *se; 11926 struct rq_flags rf; 11927 struct rq *rq; 11928 int i; 11929 11930 for_each_possible_cpu(i) { 11931 rq = cpu_rq(i); 11932 se = tg->se[i]; 11933 rq_lock_irq(rq, &rf); 11934 update_rq_clock(rq); 11935 attach_entity_cfs_rq(se); 11936 sync_throttle(tg, i); 11937 rq_unlock_irq(rq, &rf); 11938 } 11939 } 11940 11941 void unregister_fair_sched_group(struct task_group *tg) 11942 { 11943 unsigned long flags; 11944 struct rq *rq; 11945 int cpu; 11946 11947 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 11948 11949 for_each_possible_cpu(cpu) { 11950 if (tg->se[cpu]) 11951 remove_entity_load_avg(tg->se[cpu]); 11952 11953 /* 11954 * Only empty task groups can be destroyed; so we can speculatively 11955 * check on_list without danger of it being re-added. 11956 */ 11957 if (!tg->cfs_rq[cpu]->on_list) 11958 continue; 11959 11960 rq = cpu_rq(cpu); 11961 11962 raw_spin_rq_lock_irqsave(rq, flags); 11963 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 11964 raw_spin_rq_unlock_irqrestore(rq, flags); 11965 } 11966 } 11967 11968 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 11969 struct sched_entity *se, int cpu, 11970 struct sched_entity *parent) 11971 { 11972 struct rq *rq = cpu_rq(cpu); 11973 11974 cfs_rq->tg = tg; 11975 cfs_rq->rq = rq; 11976 init_cfs_rq_runtime(cfs_rq); 11977 11978 tg->cfs_rq[cpu] = cfs_rq; 11979 tg->se[cpu] = se; 11980 11981 /* se could be NULL for root_task_group */ 11982 if (!se) 11983 return; 11984 11985 if (!parent) { 11986 se->cfs_rq = &rq->cfs; 11987 se->depth = 0; 11988 } else { 11989 se->cfs_rq = parent->my_q; 11990 se->depth = parent->depth + 1; 11991 } 11992 11993 se->my_q = cfs_rq; 11994 /* guarantee group entities always have weight */ 11995 update_load_set(&se->load, NICE_0_LOAD); 11996 se->parent = parent; 11997 } 11998 11999 static DEFINE_MUTEX(shares_mutex); 12000 12001 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares) 12002 { 12003 int i; 12004 12005 lockdep_assert_held(&shares_mutex); 12006 12007 /* 12008 * We can't change the weight of the root cgroup. 12009 */ 12010 if (!tg->se[0]) 12011 return -EINVAL; 12012 12013 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 12014 12015 if (tg->shares == shares) 12016 return 0; 12017 12018 tg->shares = shares; 12019 for_each_possible_cpu(i) { 12020 struct rq *rq = cpu_rq(i); 12021 struct sched_entity *se = tg->se[i]; 12022 struct rq_flags rf; 12023 12024 /* Propagate contribution to hierarchy */ 12025 rq_lock_irqsave(rq, &rf); 12026 update_rq_clock(rq); 12027 for_each_sched_entity(se) { 12028 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 12029 update_cfs_group(se); 12030 } 12031 rq_unlock_irqrestore(rq, &rf); 12032 } 12033 12034 return 0; 12035 } 12036 12037 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 12038 { 12039 int ret; 12040 12041 mutex_lock(&shares_mutex); 12042 if (tg_is_idle(tg)) 12043 ret = -EINVAL; 12044 else 12045 ret = __sched_group_set_shares(tg, shares); 12046 mutex_unlock(&shares_mutex); 12047 12048 return ret; 12049 } 12050 12051 int sched_group_set_idle(struct task_group *tg, long idle) 12052 { 12053 int i; 12054 12055 if (tg == &root_task_group) 12056 return -EINVAL; 12057 12058 if (idle < 0 || idle > 1) 12059 return -EINVAL; 12060 12061 mutex_lock(&shares_mutex); 12062 12063 if (tg->idle == idle) { 12064 mutex_unlock(&shares_mutex); 12065 return 0; 12066 } 12067 12068 tg->idle = idle; 12069 12070 for_each_possible_cpu(i) { 12071 struct rq *rq = cpu_rq(i); 12072 struct sched_entity *se = tg->se[i]; 12073 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; 12074 bool was_idle = cfs_rq_is_idle(grp_cfs_rq); 12075 long idle_task_delta; 12076 struct rq_flags rf; 12077 12078 rq_lock_irqsave(rq, &rf); 12079 12080 grp_cfs_rq->idle = idle; 12081 if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq))) 12082 goto next_cpu; 12083 12084 if (se->on_rq) { 12085 parent_cfs_rq = cfs_rq_of(se); 12086 if (cfs_rq_is_idle(grp_cfs_rq)) 12087 parent_cfs_rq->idle_nr_running++; 12088 else 12089 parent_cfs_rq->idle_nr_running--; 12090 } 12091 12092 idle_task_delta = grp_cfs_rq->h_nr_running - 12093 grp_cfs_rq->idle_h_nr_running; 12094 if (!cfs_rq_is_idle(grp_cfs_rq)) 12095 idle_task_delta *= -1; 12096 12097 for_each_sched_entity(se) { 12098 struct cfs_rq *cfs_rq = cfs_rq_of(se); 12099 12100 if (!se->on_rq) 12101 break; 12102 12103 cfs_rq->idle_h_nr_running += idle_task_delta; 12104 12105 /* Already accounted at parent level and above. */ 12106 if (cfs_rq_is_idle(cfs_rq)) 12107 break; 12108 } 12109 12110 next_cpu: 12111 rq_unlock_irqrestore(rq, &rf); 12112 } 12113 12114 /* Idle groups have minimum weight. */ 12115 if (tg_is_idle(tg)) 12116 __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO)); 12117 else 12118 __sched_group_set_shares(tg, NICE_0_LOAD); 12119 12120 mutex_unlock(&shares_mutex); 12121 return 0; 12122 } 12123 12124 #else /* CONFIG_FAIR_GROUP_SCHED */ 12125 12126 void free_fair_sched_group(struct task_group *tg) { } 12127 12128 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 12129 { 12130 return 1; 12131 } 12132 12133 void online_fair_sched_group(struct task_group *tg) { } 12134 12135 void unregister_fair_sched_group(struct task_group *tg) { } 12136 12137 #endif /* CONFIG_FAIR_GROUP_SCHED */ 12138 12139 12140 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 12141 { 12142 struct sched_entity *se = &task->se; 12143 unsigned int rr_interval = 0; 12144 12145 /* 12146 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 12147 * idle runqueue: 12148 */ 12149 if (rq->cfs.load.weight) 12150 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 12151 12152 return rr_interval; 12153 } 12154 12155 /* 12156 * All the scheduling class methods: 12157 */ 12158 DEFINE_SCHED_CLASS(fair) = { 12159 12160 .enqueue_task = enqueue_task_fair, 12161 .dequeue_task = dequeue_task_fair, 12162 .yield_task = yield_task_fair, 12163 .yield_to_task = yield_to_task_fair, 12164 12165 .check_preempt_curr = check_preempt_wakeup, 12166 12167 .pick_next_task = __pick_next_task_fair, 12168 .put_prev_task = put_prev_task_fair, 12169 .set_next_task = set_next_task_fair, 12170 12171 #ifdef CONFIG_SMP 12172 .balance = balance_fair, 12173 .pick_task = pick_task_fair, 12174 .select_task_rq = select_task_rq_fair, 12175 .migrate_task_rq = migrate_task_rq_fair, 12176 12177 .rq_online = rq_online_fair, 12178 .rq_offline = rq_offline_fair, 12179 12180 .task_dead = task_dead_fair, 12181 .set_cpus_allowed = set_cpus_allowed_common, 12182 #endif 12183 12184 .task_tick = task_tick_fair, 12185 .task_fork = task_fork_fair, 12186 12187 .prio_changed = prio_changed_fair, 12188 .switched_from = switched_from_fair, 12189 .switched_to = switched_to_fair, 12190 12191 .get_rr_interval = get_rr_interval_fair, 12192 12193 .update_curr = update_curr_fair, 12194 12195 #ifdef CONFIG_FAIR_GROUP_SCHED 12196 .task_change_group = task_change_group_fair, 12197 #endif 12198 12199 #ifdef CONFIG_UCLAMP_TASK 12200 .uclamp_enabled = 1, 12201 #endif 12202 }; 12203 12204 #ifdef CONFIG_SCHED_DEBUG 12205 void print_cfs_stats(struct seq_file *m, int cpu) 12206 { 12207 struct cfs_rq *cfs_rq, *pos; 12208 12209 rcu_read_lock(); 12210 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) 12211 print_cfs_rq(m, cpu, cfs_rq); 12212 rcu_read_unlock(); 12213 } 12214 12215 #ifdef CONFIG_NUMA_BALANCING 12216 void show_numa_stats(struct task_struct *p, struct seq_file *m) 12217 { 12218 int node; 12219 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 12220 struct numa_group *ng; 12221 12222 rcu_read_lock(); 12223 ng = rcu_dereference(p->numa_group); 12224 for_each_online_node(node) { 12225 if (p->numa_faults) { 12226 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 12227 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 12228 } 12229 if (ng) { 12230 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], 12231 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 12232 } 12233 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 12234 } 12235 rcu_read_unlock(); 12236 } 12237 #endif /* CONFIG_NUMA_BALANCING */ 12238 #endif /* CONFIG_SCHED_DEBUG */ 12239 12240 __init void init_sched_fair_class(void) 12241 { 12242 #ifdef CONFIG_SMP 12243 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 12244 12245 #ifdef CONFIG_NO_HZ_COMMON 12246 nohz.next_balance = jiffies; 12247 nohz.next_blocked = jiffies; 12248 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 12249 #endif 12250 #endif /* SMP */ 12251 12252 } 12253