1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 */ 23 #include "sched.h" 24 25 #include <trace/events/sched.h> 26 27 /* 28 * Targeted preemption latency for CPU-bound tasks: 29 * 30 * NOTE: this latency value is not the same as the concept of 31 * 'timeslice length' - timeslices in CFS are of variable length 32 * and have no persistent notion like in traditional, time-slice 33 * based scheduling concepts. 34 * 35 * (to see the precise effective timeslice length of your workload, 36 * run vmstat and monitor the context-switches (cs) field) 37 * 38 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 39 */ 40 unsigned int sysctl_sched_latency = 6000000ULL; 41 static unsigned int normalized_sysctl_sched_latency = 6000000ULL; 42 43 /* 44 * The initial- and re-scaling of tunables is configurable 45 * 46 * Options are: 47 * 48 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 49 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 50 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 51 * 52 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 53 */ 54 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 55 56 /* 57 * Minimal preemption granularity for CPU-bound tasks: 58 * 59 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 60 */ 61 unsigned int sysctl_sched_min_granularity = 750000ULL; 62 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 63 64 /* 65 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 66 */ 67 static unsigned int sched_nr_latency = 8; 68 69 /* 70 * After fork, child runs first. If set to 0 (default) then 71 * parent will (try to) run first. 72 */ 73 unsigned int sysctl_sched_child_runs_first __read_mostly; 74 75 /* 76 * SCHED_OTHER wake-up granularity. 77 * 78 * This option delays the preemption effects of decoupled workloads 79 * and reduces their over-scheduling. Synchronous workloads will still 80 * have immediate wakeup/sleep latencies. 81 * 82 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 83 */ 84 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 85 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 86 87 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 88 89 #ifdef CONFIG_SMP 90 /* 91 * For asym packing, by default the lower numbered CPU has higher priority. 92 */ 93 int __weak arch_asym_cpu_priority(int cpu) 94 { 95 return -cpu; 96 } 97 98 /* 99 * The margin used when comparing utilization with CPU capacity. 100 * 101 * (default: ~20%) 102 */ 103 #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) 104 105 #endif 106 107 #ifdef CONFIG_CFS_BANDWIDTH 108 /* 109 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 110 * each time a cfs_rq requests quota. 111 * 112 * Note: in the case that the slice exceeds the runtime remaining (either due 113 * to consumption or the quota being specified to be smaller than the slice) 114 * we will always only issue the remaining available time. 115 * 116 * (default: 5 msec, units: microseconds) 117 */ 118 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 119 #endif 120 121 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 122 { 123 lw->weight += inc; 124 lw->inv_weight = 0; 125 } 126 127 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 128 { 129 lw->weight -= dec; 130 lw->inv_weight = 0; 131 } 132 133 static inline void update_load_set(struct load_weight *lw, unsigned long w) 134 { 135 lw->weight = w; 136 lw->inv_weight = 0; 137 } 138 139 /* 140 * Increase the granularity value when there are more CPUs, 141 * because with more CPUs the 'effective latency' as visible 142 * to users decreases. But the relationship is not linear, 143 * so pick a second-best guess by going with the log2 of the 144 * number of CPUs. 145 * 146 * This idea comes from the SD scheduler of Con Kolivas: 147 */ 148 static unsigned int get_update_sysctl_factor(void) 149 { 150 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 151 unsigned int factor; 152 153 switch (sysctl_sched_tunable_scaling) { 154 case SCHED_TUNABLESCALING_NONE: 155 factor = 1; 156 break; 157 case SCHED_TUNABLESCALING_LINEAR: 158 factor = cpus; 159 break; 160 case SCHED_TUNABLESCALING_LOG: 161 default: 162 factor = 1 + ilog2(cpus); 163 break; 164 } 165 166 return factor; 167 } 168 169 static void update_sysctl(void) 170 { 171 unsigned int factor = get_update_sysctl_factor(); 172 173 #define SET_SYSCTL(name) \ 174 (sysctl_##name = (factor) * normalized_sysctl_##name) 175 SET_SYSCTL(sched_min_granularity); 176 SET_SYSCTL(sched_latency); 177 SET_SYSCTL(sched_wakeup_granularity); 178 #undef SET_SYSCTL 179 } 180 181 void sched_init_granularity(void) 182 { 183 update_sysctl(); 184 } 185 186 #define WMULT_CONST (~0U) 187 #define WMULT_SHIFT 32 188 189 static void __update_inv_weight(struct load_weight *lw) 190 { 191 unsigned long w; 192 193 if (likely(lw->inv_weight)) 194 return; 195 196 w = scale_load_down(lw->weight); 197 198 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 199 lw->inv_weight = 1; 200 else if (unlikely(!w)) 201 lw->inv_weight = WMULT_CONST; 202 else 203 lw->inv_weight = WMULT_CONST / w; 204 } 205 206 /* 207 * delta_exec * weight / lw.weight 208 * OR 209 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 210 * 211 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 212 * we're guaranteed shift stays positive because inv_weight is guaranteed to 213 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 214 * 215 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 216 * weight/lw.weight <= 1, and therefore our shift will also be positive. 217 */ 218 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 219 { 220 u64 fact = scale_load_down(weight); 221 int shift = WMULT_SHIFT; 222 223 __update_inv_weight(lw); 224 225 if (unlikely(fact >> 32)) { 226 while (fact >> 32) { 227 fact >>= 1; 228 shift--; 229 } 230 } 231 232 fact = mul_u32_u32(fact, lw->inv_weight); 233 234 while (fact >> 32) { 235 fact >>= 1; 236 shift--; 237 } 238 239 return mul_u64_u32_shr(delta_exec, fact, shift); 240 } 241 242 243 const struct sched_class fair_sched_class; 244 245 /************************************************************** 246 * CFS operations on generic schedulable entities: 247 */ 248 249 #ifdef CONFIG_FAIR_GROUP_SCHED 250 static inline struct task_struct *task_of(struct sched_entity *se) 251 { 252 SCHED_WARN_ON(!entity_is_task(se)); 253 return container_of(se, struct task_struct, se); 254 } 255 256 /* Walk up scheduling entities hierarchy */ 257 #define for_each_sched_entity(se) \ 258 for (; se; se = se->parent) 259 260 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 261 { 262 return p->se.cfs_rq; 263 } 264 265 /* runqueue on which this entity is (to be) queued */ 266 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 267 { 268 return se->cfs_rq; 269 } 270 271 /* runqueue "owned" by this group */ 272 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 273 { 274 return grp->my_q; 275 } 276 277 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) 278 { 279 if (!path) 280 return; 281 282 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) 283 autogroup_path(cfs_rq->tg, path, len); 284 else if (cfs_rq && cfs_rq->tg->css.cgroup) 285 cgroup_path(cfs_rq->tg->css.cgroup, path, len); 286 else 287 strlcpy(path, "(null)", len); 288 } 289 290 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 291 { 292 struct rq *rq = rq_of(cfs_rq); 293 int cpu = cpu_of(rq); 294 295 if (cfs_rq->on_list) 296 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; 297 298 cfs_rq->on_list = 1; 299 300 /* 301 * Ensure we either appear before our parent (if already 302 * enqueued) or force our parent to appear after us when it is 303 * enqueued. The fact that we always enqueue bottom-up 304 * reduces this to two cases and a special case for the root 305 * cfs_rq. Furthermore, it also means that we will always reset 306 * tmp_alone_branch either when the branch is connected 307 * to a tree or when we reach the top of the tree 308 */ 309 if (cfs_rq->tg->parent && 310 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 311 /* 312 * If parent is already on the list, we add the child 313 * just before. Thanks to circular linked property of 314 * the list, this means to put the child at the tail 315 * of the list that starts by parent. 316 */ 317 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 318 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 319 /* 320 * The branch is now connected to its tree so we can 321 * reset tmp_alone_branch to the beginning of the 322 * list. 323 */ 324 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 325 return true; 326 } 327 328 if (!cfs_rq->tg->parent) { 329 /* 330 * cfs rq without parent should be put 331 * at the tail of the list. 332 */ 333 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 334 &rq->leaf_cfs_rq_list); 335 /* 336 * We have reach the top of a tree so we can reset 337 * tmp_alone_branch to the beginning of the list. 338 */ 339 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 340 return true; 341 } 342 343 /* 344 * The parent has not already been added so we want to 345 * make sure that it will be put after us. 346 * tmp_alone_branch points to the begin of the branch 347 * where we will add parent. 348 */ 349 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); 350 /* 351 * update tmp_alone_branch to points to the new begin 352 * of the branch 353 */ 354 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 355 return false; 356 } 357 358 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 359 { 360 if (cfs_rq->on_list) { 361 struct rq *rq = rq_of(cfs_rq); 362 363 /* 364 * With cfs_rq being unthrottled/throttled during an enqueue, 365 * it can happen the tmp_alone_branch points the a leaf that 366 * we finally want to del. In this case, tmp_alone_branch moves 367 * to the prev element but it will point to rq->leaf_cfs_rq_list 368 * at the end of the enqueue. 369 */ 370 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) 371 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; 372 373 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 374 cfs_rq->on_list = 0; 375 } 376 } 377 378 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 379 { 380 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); 381 } 382 383 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 384 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 385 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ 386 leaf_cfs_rq_list) 387 388 /* Do the two (enqueued) entities belong to the same group ? */ 389 static inline struct cfs_rq * 390 is_same_group(struct sched_entity *se, struct sched_entity *pse) 391 { 392 if (se->cfs_rq == pse->cfs_rq) 393 return se->cfs_rq; 394 395 return NULL; 396 } 397 398 static inline struct sched_entity *parent_entity(struct sched_entity *se) 399 { 400 return se->parent; 401 } 402 403 static void 404 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 405 { 406 int se_depth, pse_depth; 407 408 /* 409 * preemption test can be made between sibling entities who are in the 410 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 411 * both tasks until we find their ancestors who are siblings of common 412 * parent. 413 */ 414 415 /* First walk up until both entities are at same depth */ 416 se_depth = (*se)->depth; 417 pse_depth = (*pse)->depth; 418 419 while (se_depth > pse_depth) { 420 se_depth--; 421 *se = parent_entity(*se); 422 } 423 424 while (pse_depth > se_depth) { 425 pse_depth--; 426 *pse = parent_entity(*pse); 427 } 428 429 while (!is_same_group(*se, *pse)) { 430 *se = parent_entity(*se); 431 *pse = parent_entity(*pse); 432 } 433 } 434 435 #else /* !CONFIG_FAIR_GROUP_SCHED */ 436 437 static inline struct task_struct *task_of(struct sched_entity *se) 438 { 439 return container_of(se, struct task_struct, se); 440 } 441 442 #define for_each_sched_entity(se) \ 443 for (; se; se = NULL) 444 445 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 446 { 447 return &task_rq(p)->cfs; 448 } 449 450 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 451 { 452 struct task_struct *p = task_of(se); 453 struct rq *rq = task_rq(p); 454 455 return &rq->cfs; 456 } 457 458 /* runqueue "owned" by this group */ 459 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 460 { 461 return NULL; 462 } 463 464 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) 465 { 466 if (path) 467 strlcpy(path, "(null)", len); 468 } 469 470 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 471 { 472 return true; 473 } 474 475 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 476 { 477 } 478 479 static inline void assert_list_leaf_cfs_rq(struct rq *rq) 480 { 481 } 482 483 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 484 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 485 486 static inline struct sched_entity *parent_entity(struct sched_entity *se) 487 { 488 return NULL; 489 } 490 491 static inline void 492 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 493 { 494 } 495 496 #endif /* CONFIG_FAIR_GROUP_SCHED */ 497 498 static __always_inline 499 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 500 501 /************************************************************** 502 * Scheduling class tree data structure manipulation methods: 503 */ 504 505 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 506 { 507 s64 delta = (s64)(vruntime - max_vruntime); 508 if (delta > 0) 509 max_vruntime = vruntime; 510 511 return max_vruntime; 512 } 513 514 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 515 { 516 s64 delta = (s64)(vruntime - min_vruntime); 517 if (delta < 0) 518 min_vruntime = vruntime; 519 520 return min_vruntime; 521 } 522 523 static inline int entity_before(struct sched_entity *a, 524 struct sched_entity *b) 525 { 526 return (s64)(a->vruntime - b->vruntime) < 0; 527 } 528 529 static void update_min_vruntime(struct cfs_rq *cfs_rq) 530 { 531 struct sched_entity *curr = cfs_rq->curr; 532 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); 533 534 u64 vruntime = cfs_rq->min_vruntime; 535 536 if (curr) { 537 if (curr->on_rq) 538 vruntime = curr->vruntime; 539 else 540 curr = NULL; 541 } 542 543 if (leftmost) { /* non-empty tree */ 544 struct sched_entity *se; 545 se = rb_entry(leftmost, struct sched_entity, run_node); 546 547 if (!curr) 548 vruntime = se->vruntime; 549 else 550 vruntime = min_vruntime(vruntime, se->vruntime); 551 } 552 553 /* ensure we never gain time by being placed backwards. */ 554 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 555 #ifndef CONFIG_64BIT 556 smp_wmb(); 557 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 558 #endif 559 } 560 561 /* 562 * Enqueue an entity into the rb-tree: 563 */ 564 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 565 { 566 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; 567 struct rb_node *parent = NULL; 568 struct sched_entity *entry; 569 bool leftmost = true; 570 571 /* 572 * Find the right place in the rbtree: 573 */ 574 while (*link) { 575 parent = *link; 576 entry = rb_entry(parent, struct sched_entity, run_node); 577 /* 578 * We dont care about collisions. Nodes with 579 * the same key stay together. 580 */ 581 if (entity_before(se, entry)) { 582 link = &parent->rb_left; 583 } else { 584 link = &parent->rb_right; 585 leftmost = false; 586 } 587 } 588 589 rb_link_node(&se->run_node, parent, link); 590 rb_insert_color_cached(&se->run_node, 591 &cfs_rq->tasks_timeline, leftmost); 592 } 593 594 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 595 { 596 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); 597 } 598 599 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 600 { 601 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); 602 603 if (!left) 604 return NULL; 605 606 return rb_entry(left, struct sched_entity, run_node); 607 } 608 609 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 610 { 611 struct rb_node *next = rb_next(&se->run_node); 612 613 if (!next) 614 return NULL; 615 616 return rb_entry(next, struct sched_entity, run_node); 617 } 618 619 #ifdef CONFIG_SCHED_DEBUG 620 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 621 { 622 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); 623 624 if (!last) 625 return NULL; 626 627 return rb_entry(last, struct sched_entity, run_node); 628 } 629 630 /************************************************************** 631 * Scheduling class statistics methods: 632 */ 633 634 int sched_proc_update_handler(struct ctl_table *table, int write, 635 void __user *buffer, size_t *lenp, 636 loff_t *ppos) 637 { 638 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 639 unsigned int factor = get_update_sysctl_factor(); 640 641 if (ret || !write) 642 return ret; 643 644 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 645 sysctl_sched_min_granularity); 646 647 #define WRT_SYSCTL(name) \ 648 (normalized_sysctl_##name = sysctl_##name / (factor)) 649 WRT_SYSCTL(sched_min_granularity); 650 WRT_SYSCTL(sched_latency); 651 WRT_SYSCTL(sched_wakeup_granularity); 652 #undef WRT_SYSCTL 653 654 return 0; 655 } 656 #endif 657 658 /* 659 * delta /= w 660 */ 661 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 662 { 663 if (unlikely(se->load.weight != NICE_0_LOAD)) 664 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 665 666 return delta; 667 } 668 669 /* 670 * The idea is to set a period in which each task runs once. 671 * 672 * When there are too many tasks (sched_nr_latency) we have to stretch 673 * this period because otherwise the slices get too small. 674 * 675 * p = (nr <= nl) ? l : l*nr/nl 676 */ 677 static u64 __sched_period(unsigned long nr_running) 678 { 679 if (unlikely(nr_running > sched_nr_latency)) 680 return nr_running * sysctl_sched_min_granularity; 681 else 682 return sysctl_sched_latency; 683 } 684 685 /* 686 * We calculate the wall-time slice from the period by taking a part 687 * proportional to the weight. 688 * 689 * s = p*P[w/rw] 690 */ 691 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 692 { 693 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 694 695 for_each_sched_entity(se) { 696 struct load_weight *load; 697 struct load_weight lw; 698 699 cfs_rq = cfs_rq_of(se); 700 load = &cfs_rq->load; 701 702 if (unlikely(!se->on_rq)) { 703 lw = cfs_rq->load; 704 705 update_load_add(&lw, se->load.weight); 706 load = &lw; 707 } 708 slice = __calc_delta(slice, se->load.weight, load); 709 } 710 return slice; 711 } 712 713 /* 714 * We calculate the vruntime slice of a to-be-inserted task. 715 * 716 * vs = s/w 717 */ 718 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 719 { 720 return calc_delta_fair(sched_slice(cfs_rq, se), se); 721 } 722 723 #include "pelt.h" 724 #ifdef CONFIG_SMP 725 726 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 727 static unsigned long task_h_load(struct task_struct *p); 728 static unsigned long capacity_of(int cpu); 729 730 /* Give new sched_entity start runnable values to heavy its load in infant time */ 731 void init_entity_runnable_average(struct sched_entity *se) 732 { 733 struct sched_avg *sa = &se->avg; 734 735 memset(sa, 0, sizeof(*sa)); 736 737 /* 738 * Tasks are initialized with full load to be seen as heavy tasks until 739 * they get a chance to stabilize to their real load level. 740 * Group entities are initialized with zero load to reflect the fact that 741 * nothing has been attached to the task group yet. 742 */ 743 if (entity_is_task(se)) 744 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); 745 746 se->runnable_weight = se->load.weight; 747 748 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 749 } 750 751 static void attach_entity_cfs_rq(struct sched_entity *se); 752 753 /* 754 * With new tasks being created, their initial util_avgs are extrapolated 755 * based on the cfs_rq's current util_avg: 756 * 757 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 758 * 759 * However, in many cases, the above util_avg does not give a desired 760 * value. Moreover, the sum of the util_avgs may be divergent, such 761 * as when the series is a harmonic series. 762 * 763 * To solve this problem, we also cap the util_avg of successive tasks to 764 * only 1/2 of the left utilization budget: 765 * 766 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 767 * 768 * where n denotes the nth task and cpu_scale the CPU capacity. 769 * 770 * For example, for a CPU with 1024 of capacity, a simplest series from 771 * the beginning would be like: 772 * 773 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 774 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 775 * 776 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 777 * if util_avg > util_avg_cap. 778 */ 779 void post_init_entity_util_avg(struct task_struct *p) 780 { 781 struct sched_entity *se = &p->se; 782 struct cfs_rq *cfs_rq = cfs_rq_of(se); 783 struct sched_avg *sa = &se->avg; 784 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); 785 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; 786 787 if (cap > 0) { 788 if (cfs_rq->avg.util_avg != 0) { 789 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 790 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 791 792 if (sa->util_avg > cap) 793 sa->util_avg = cap; 794 } else { 795 sa->util_avg = cap; 796 } 797 } 798 799 if (p->sched_class != &fair_sched_class) { 800 /* 801 * For !fair tasks do: 802 * 803 update_cfs_rq_load_avg(now, cfs_rq); 804 attach_entity_load_avg(cfs_rq, se); 805 switched_from_fair(rq, p); 806 * 807 * such that the next switched_to_fair() has the 808 * expected state. 809 */ 810 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); 811 return; 812 } 813 814 attach_entity_cfs_rq(se); 815 } 816 817 #else /* !CONFIG_SMP */ 818 void init_entity_runnable_average(struct sched_entity *se) 819 { 820 } 821 void post_init_entity_util_avg(struct task_struct *p) 822 { 823 } 824 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 825 { 826 } 827 #endif /* CONFIG_SMP */ 828 829 /* 830 * Update the current task's runtime statistics. 831 */ 832 static void update_curr(struct cfs_rq *cfs_rq) 833 { 834 struct sched_entity *curr = cfs_rq->curr; 835 u64 now = rq_clock_task(rq_of(cfs_rq)); 836 u64 delta_exec; 837 838 if (unlikely(!curr)) 839 return; 840 841 delta_exec = now - curr->exec_start; 842 if (unlikely((s64)delta_exec <= 0)) 843 return; 844 845 curr->exec_start = now; 846 847 schedstat_set(curr->statistics.exec_max, 848 max(delta_exec, curr->statistics.exec_max)); 849 850 curr->sum_exec_runtime += delta_exec; 851 schedstat_add(cfs_rq->exec_clock, delta_exec); 852 853 curr->vruntime += calc_delta_fair(delta_exec, curr); 854 update_min_vruntime(cfs_rq); 855 856 if (entity_is_task(curr)) { 857 struct task_struct *curtask = task_of(curr); 858 859 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 860 cgroup_account_cputime(curtask, delta_exec); 861 account_group_exec_runtime(curtask, delta_exec); 862 } 863 864 account_cfs_rq_runtime(cfs_rq, delta_exec); 865 } 866 867 static void update_curr_fair(struct rq *rq) 868 { 869 update_curr(cfs_rq_of(&rq->curr->se)); 870 } 871 872 static inline void 873 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 874 { 875 u64 wait_start, prev_wait_start; 876 877 if (!schedstat_enabled()) 878 return; 879 880 wait_start = rq_clock(rq_of(cfs_rq)); 881 prev_wait_start = schedstat_val(se->statistics.wait_start); 882 883 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && 884 likely(wait_start > prev_wait_start)) 885 wait_start -= prev_wait_start; 886 887 __schedstat_set(se->statistics.wait_start, wait_start); 888 } 889 890 static inline void 891 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 892 { 893 struct task_struct *p; 894 u64 delta; 895 896 if (!schedstat_enabled()) 897 return; 898 899 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); 900 901 if (entity_is_task(se)) { 902 p = task_of(se); 903 if (task_on_rq_migrating(p)) { 904 /* 905 * Preserve migrating task's wait time so wait_start 906 * time stamp can be adjusted to accumulate wait time 907 * prior to migration. 908 */ 909 __schedstat_set(se->statistics.wait_start, delta); 910 return; 911 } 912 trace_sched_stat_wait(p, delta); 913 } 914 915 __schedstat_set(se->statistics.wait_max, 916 max(schedstat_val(se->statistics.wait_max), delta)); 917 __schedstat_inc(se->statistics.wait_count); 918 __schedstat_add(se->statistics.wait_sum, delta); 919 __schedstat_set(se->statistics.wait_start, 0); 920 } 921 922 static inline void 923 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 924 { 925 struct task_struct *tsk = NULL; 926 u64 sleep_start, block_start; 927 928 if (!schedstat_enabled()) 929 return; 930 931 sleep_start = schedstat_val(se->statistics.sleep_start); 932 block_start = schedstat_val(se->statistics.block_start); 933 934 if (entity_is_task(se)) 935 tsk = task_of(se); 936 937 if (sleep_start) { 938 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; 939 940 if ((s64)delta < 0) 941 delta = 0; 942 943 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) 944 __schedstat_set(se->statistics.sleep_max, delta); 945 946 __schedstat_set(se->statistics.sleep_start, 0); 947 __schedstat_add(se->statistics.sum_sleep_runtime, delta); 948 949 if (tsk) { 950 account_scheduler_latency(tsk, delta >> 10, 1); 951 trace_sched_stat_sleep(tsk, delta); 952 } 953 } 954 if (block_start) { 955 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; 956 957 if ((s64)delta < 0) 958 delta = 0; 959 960 if (unlikely(delta > schedstat_val(se->statistics.block_max))) 961 __schedstat_set(se->statistics.block_max, delta); 962 963 __schedstat_set(se->statistics.block_start, 0); 964 __schedstat_add(se->statistics.sum_sleep_runtime, delta); 965 966 if (tsk) { 967 if (tsk->in_iowait) { 968 __schedstat_add(se->statistics.iowait_sum, delta); 969 __schedstat_inc(se->statistics.iowait_count); 970 trace_sched_stat_iowait(tsk, delta); 971 } 972 973 trace_sched_stat_blocked(tsk, delta); 974 975 /* 976 * Blocking time is in units of nanosecs, so shift by 977 * 20 to get a milliseconds-range estimation of the 978 * amount of time that the task spent sleeping: 979 */ 980 if (unlikely(prof_on == SLEEP_PROFILING)) { 981 profile_hits(SLEEP_PROFILING, 982 (void *)get_wchan(tsk), 983 delta >> 20); 984 } 985 account_scheduler_latency(tsk, delta >> 10, 0); 986 } 987 } 988 } 989 990 /* 991 * Task is being enqueued - update stats: 992 */ 993 static inline void 994 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 995 { 996 if (!schedstat_enabled()) 997 return; 998 999 /* 1000 * Are we enqueueing a waiting task? (for current tasks 1001 * a dequeue/enqueue event is a NOP) 1002 */ 1003 if (se != cfs_rq->curr) 1004 update_stats_wait_start(cfs_rq, se); 1005 1006 if (flags & ENQUEUE_WAKEUP) 1007 update_stats_enqueue_sleeper(cfs_rq, se); 1008 } 1009 1010 static inline void 1011 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1012 { 1013 1014 if (!schedstat_enabled()) 1015 return; 1016 1017 /* 1018 * Mark the end of the wait period if dequeueing a 1019 * waiting task: 1020 */ 1021 if (se != cfs_rq->curr) 1022 update_stats_wait_end(cfs_rq, se); 1023 1024 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 1025 struct task_struct *tsk = task_of(se); 1026 1027 if (tsk->state & TASK_INTERRUPTIBLE) 1028 __schedstat_set(se->statistics.sleep_start, 1029 rq_clock(rq_of(cfs_rq))); 1030 if (tsk->state & TASK_UNINTERRUPTIBLE) 1031 __schedstat_set(se->statistics.block_start, 1032 rq_clock(rq_of(cfs_rq))); 1033 } 1034 } 1035 1036 /* 1037 * We are picking a new current task - update its stats: 1038 */ 1039 static inline void 1040 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1041 { 1042 /* 1043 * We are starting a new run period: 1044 */ 1045 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1046 } 1047 1048 /************************************************** 1049 * Scheduling class queueing methods: 1050 */ 1051 1052 #ifdef CONFIG_NUMA_BALANCING 1053 /* 1054 * Approximate time to scan a full NUMA task in ms. The task scan period is 1055 * calculated based on the tasks virtual memory size and 1056 * numa_balancing_scan_size. 1057 */ 1058 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1059 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1060 1061 /* Portion of address space to scan in MB */ 1062 unsigned int sysctl_numa_balancing_scan_size = 256; 1063 1064 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1065 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1066 1067 struct numa_group { 1068 refcount_t refcount; 1069 1070 spinlock_t lock; /* nr_tasks, tasks */ 1071 int nr_tasks; 1072 pid_t gid; 1073 int active_nodes; 1074 1075 struct rcu_head rcu; 1076 unsigned long total_faults; 1077 unsigned long max_faults_cpu; 1078 /* 1079 * Faults_cpu is used to decide whether memory should move 1080 * towards the CPU. As a consequence, these stats are weighted 1081 * more by CPU use than by memory faults. 1082 */ 1083 unsigned long *faults_cpu; 1084 unsigned long faults[0]; 1085 }; 1086 1087 /* 1088 * For functions that can be called in multiple contexts that permit reading 1089 * ->numa_group (see struct task_struct for locking rules). 1090 */ 1091 static struct numa_group *deref_task_numa_group(struct task_struct *p) 1092 { 1093 return rcu_dereference_check(p->numa_group, p == current || 1094 (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); 1095 } 1096 1097 static struct numa_group *deref_curr_numa_group(struct task_struct *p) 1098 { 1099 return rcu_dereference_protected(p->numa_group, p == current); 1100 } 1101 1102 static inline unsigned long group_faults_priv(struct numa_group *ng); 1103 static inline unsigned long group_faults_shared(struct numa_group *ng); 1104 1105 static unsigned int task_nr_scan_windows(struct task_struct *p) 1106 { 1107 unsigned long rss = 0; 1108 unsigned long nr_scan_pages; 1109 1110 /* 1111 * Calculations based on RSS as non-present and empty pages are skipped 1112 * by the PTE scanner and NUMA hinting faults should be trapped based 1113 * on resident pages 1114 */ 1115 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1116 rss = get_mm_rss(p->mm); 1117 if (!rss) 1118 rss = nr_scan_pages; 1119 1120 rss = round_up(rss, nr_scan_pages); 1121 return rss / nr_scan_pages; 1122 } 1123 1124 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1125 #define MAX_SCAN_WINDOW 2560 1126 1127 static unsigned int task_scan_min(struct task_struct *p) 1128 { 1129 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1130 unsigned int scan, floor; 1131 unsigned int windows = 1; 1132 1133 if (scan_size < MAX_SCAN_WINDOW) 1134 windows = MAX_SCAN_WINDOW / scan_size; 1135 floor = 1000 / windows; 1136 1137 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1138 return max_t(unsigned int, floor, scan); 1139 } 1140 1141 static unsigned int task_scan_start(struct task_struct *p) 1142 { 1143 unsigned long smin = task_scan_min(p); 1144 unsigned long period = smin; 1145 struct numa_group *ng; 1146 1147 /* Scale the maximum scan period with the amount of shared memory. */ 1148 rcu_read_lock(); 1149 ng = rcu_dereference(p->numa_group); 1150 if (ng) { 1151 unsigned long shared = group_faults_shared(ng); 1152 unsigned long private = group_faults_priv(ng); 1153 1154 period *= refcount_read(&ng->refcount); 1155 period *= shared + 1; 1156 period /= private + shared + 1; 1157 } 1158 rcu_read_unlock(); 1159 1160 return max(smin, period); 1161 } 1162 1163 static unsigned int task_scan_max(struct task_struct *p) 1164 { 1165 unsigned long smin = task_scan_min(p); 1166 unsigned long smax; 1167 struct numa_group *ng; 1168 1169 /* Watch for min being lower than max due to floor calculations */ 1170 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1171 1172 /* Scale the maximum scan period with the amount of shared memory. */ 1173 ng = deref_curr_numa_group(p); 1174 if (ng) { 1175 unsigned long shared = group_faults_shared(ng); 1176 unsigned long private = group_faults_priv(ng); 1177 unsigned long period = smax; 1178 1179 period *= refcount_read(&ng->refcount); 1180 period *= shared + 1; 1181 period /= private + shared + 1; 1182 1183 smax = max(smax, period); 1184 } 1185 1186 return max(smin, smax); 1187 } 1188 1189 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1190 { 1191 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); 1192 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1193 } 1194 1195 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1196 { 1197 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); 1198 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1199 } 1200 1201 /* Shared or private faults. */ 1202 #define NR_NUMA_HINT_FAULT_TYPES 2 1203 1204 /* Memory and CPU locality */ 1205 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1206 1207 /* Averaged statistics, and temporary buffers. */ 1208 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1209 1210 pid_t task_numa_group_id(struct task_struct *p) 1211 { 1212 struct numa_group *ng; 1213 pid_t gid = 0; 1214 1215 rcu_read_lock(); 1216 ng = rcu_dereference(p->numa_group); 1217 if (ng) 1218 gid = ng->gid; 1219 rcu_read_unlock(); 1220 1221 return gid; 1222 } 1223 1224 /* 1225 * The averaged statistics, shared & private, memory & CPU, 1226 * occupy the first half of the array. The second half of the 1227 * array is for current counters, which are averaged into the 1228 * first set by task_numa_placement. 1229 */ 1230 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1231 { 1232 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1233 } 1234 1235 static inline unsigned long task_faults(struct task_struct *p, int nid) 1236 { 1237 if (!p->numa_faults) 1238 return 0; 1239 1240 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1241 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1242 } 1243 1244 static inline unsigned long group_faults(struct task_struct *p, int nid) 1245 { 1246 struct numa_group *ng = deref_task_numa_group(p); 1247 1248 if (!ng) 1249 return 0; 1250 1251 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1252 ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1253 } 1254 1255 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1256 { 1257 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + 1258 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; 1259 } 1260 1261 static inline unsigned long group_faults_priv(struct numa_group *ng) 1262 { 1263 unsigned long faults = 0; 1264 int node; 1265 1266 for_each_online_node(node) { 1267 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 1268 } 1269 1270 return faults; 1271 } 1272 1273 static inline unsigned long group_faults_shared(struct numa_group *ng) 1274 { 1275 unsigned long faults = 0; 1276 int node; 1277 1278 for_each_online_node(node) { 1279 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; 1280 } 1281 1282 return faults; 1283 } 1284 1285 /* 1286 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1287 * considered part of a numa group's pseudo-interleaving set. Migrations 1288 * between these nodes are slowed down, to allow things to settle down. 1289 */ 1290 #define ACTIVE_NODE_FRACTION 3 1291 1292 static bool numa_is_active_node(int nid, struct numa_group *ng) 1293 { 1294 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1295 } 1296 1297 /* Handle placement on systems where not all nodes are directly connected. */ 1298 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1299 int maxdist, bool task) 1300 { 1301 unsigned long score = 0; 1302 int node; 1303 1304 /* 1305 * All nodes are directly connected, and the same distance 1306 * from each other. No need for fancy placement algorithms. 1307 */ 1308 if (sched_numa_topology_type == NUMA_DIRECT) 1309 return 0; 1310 1311 /* 1312 * This code is called for each node, introducing N^2 complexity, 1313 * which should be ok given the number of nodes rarely exceeds 8. 1314 */ 1315 for_each_online_node(node) { 1316 unsigned long faults; 1317 int dist = node_distance(nid, node); 1318 1319 /* 1320 * The furthest away nodes in the system are not interesting 1321 * for placement; nid was already counted. 1322 */ 1323 if (dist == sched_max_numa_distance || node == nid) 1324 continue; 1325 1326 /* 1327 * On systems with a backplane NUMA topology, compare groups 1328 * of nodes, and move tasks towards the group with the most 1329 * memory accesses. When comparing two nodes at distance 1330 * "hoplimit", only nodes closer by than "hoplimit" are part 1331 * of each group. Skip other nodes. 1332 */ 1333 if (sched_numa_topology_type == NUMA_BACKPLANE && 1334 dist >= maxdist) 1335 continue; 1336 1337 /* Add up the faults from nearby nodes. */ 1338 if (task) 1339 faults = task_faults(p, node); 1340 else 1341 faults = group_faults(p, node); 1342 1343 /* 1344 * On systems with a glueless mesh NUMA topology, there are 1345 * no fixed "groups of nodes". Instead, nodes that are not 1346 * directly connected bounce traffic through intermediate 1347 * nodes; a numa_group can occupy any set of nodes. 1348 * The further away a node is, the less the faults count. 1349 * This seems to result in good task placement. 1350 */ 1351 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1352 faults *= (sched_max_numa_distance - dist); 1353 faults /= (sched_max_numa_distance - LOCAL_DISTANCE); 1354 } 1355 1356 score += faults; 1357 } 1358 1359 return score; 1360 } 1361 1362 /* 1363 * These return the fraction of accesses done by a particular task, or 1364 * task group, on a particular numa node. The group weight is given a 1365 * larger multiplier, in order to group tasks together that are almost 1366 * evenly spread out between numa nodes. 1367 */ 1368 static inline unsigned long task_weight(struct task_struct *p, int nid, 1369 int dist) 1370 { 1371 unsigned long faults, total_faults; 1372 1373 if (!p->numa_faults) 1374 return 0; 1375 1376 total_faults = p->total_numa_faults; 1377 1378 if (!total_faults) 1379 return 0; 1380 1381 faults = task_faults(p, nid); 1382 faults += score_nearby_nodes(p, nid, dist, true); 1383 1384 return 1000 * faults / total_faults; 1385 } 1386 1387 static inline unsigned long group_weight(struct task_struct *p, int nid, 1388 int dist) 1389 { 1390 struct numa_group *ng = deref_task_numa_group(p); 1391 unsigned long faults, total_faults; 1392 1393 if (!ng) 1394 return 0; 1395 1396 total_faults = ng->total_faults; 1397 1398 if (!total_faults) 1399 return 0; 1400 1401 faults = group_faults(p, nid); 1402 faults += score_nearby_nodes(p, nid, dist, false); 1403 1404 return 1000 * faults / total_faults; 1405 } 1406 1407 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1408 int src_nid, int dst_cpu) 1409 { 1410 struct numa_group *ng = deref_curr_numa_group(p); 1411 int dst_nid = cpu_to_node(dst_cpu); 1412 int last_cpupid, this_cpupid; 1413 1414 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1415 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1416 1417 /* 1418 * Allow first faults or private faults to migrate immediately early in 1419 * the lifetime of a task. The magic number 4 is based on waiting for 1420 * two full passes of the "multi-stage node selection" test that is 1421 * executed below. 1422 */ 1423 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && 1424 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) 1425 return true; 1426 1427 /* 1428 * Multi-stage node selection is used in conjunction with a periodic 1429 * migration fault to build a temporal task<->page relation. By using 1430 * a two-stage filter we remove short/unlikely relations. 1431 * 1432 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1433 * a task's usage of a particular page (n_p) per total usage of this 1434 * page (n_t) (in a given time-span) to a probability. 1435 * 1436 * Our periodic faults will sample this probability and getting the 1437 * same result twice in a row, given these samples are fully 1438 * independent, is then given by P(n)^2, provided our sample period 1439 * is sufficiently short compared to the usage pattern. 1440 * 1441 * This quadric squishes small probabilities, making it less likely we 1442 * act on an unlikely task<->page relation. 1443 */ 1444 if (!cpupid_pid_unset(last_cpupid) && 1445 cpupid_to_nid(last_cpupid) != dst_nid) 1446 return false; 1447 1448 /* Always allow migrate on private faults */ 1449 if (cpupid_match_pid(p, last_cpupid)) 1450 return true; 1451 1452 /* A shared fault, but p->numa_group has not been set up yet. */ 1453 if (!ng) 1454 return true; 1455 1456 /* 1457 * Destination node is much more heavily used than the source 1458 * node? Allow migration. 1459 */ 1460 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1461 ACTIVE_NODE_FRACTION) 1462 return true; 1463 1464 /* 1465 * Distribute memory according to CPU & memory use on each node, 1466 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1467 * 1468 * faults_cpu(dst) 3 faults_cpu(src) 1469 * --------------- * - > --------------- 1470 * faults_mem(dst) 4 faults_mem(src) 1471 */ 1472 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1473 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1474 } 1475 1476 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq); 1477 1478 static unsigned long cpu_runnable_load(struct rq *rq) 1479 { 1480 return cfs_rq_runnable_load_avg(&rq->cfs); 1481 } 1482 1483 /* Cached statistics for all CPUs within a node */ 1484 struct numa_stats { 1485 unsigned long load; 1486 1487 /* Total compute capacity of CPUs on a node */ 1488 unsigned long compute_capacity; 1489 }; 1490 1491 /* 1492 * XXX borrowed from update_sg_lb_stats 1493 */ 1494 static void update_numa_stats(struct numa_stats *ns, int nid) 1495 { 1496 int cpu; 1497 1498 memset(ns, 0, sizeof(*ns)); 1499 for_each_cpu(cpu, cpumask_of_node(nid)) { 1500 struct rq *rq = cpu_rq(cpu); 1501 1502 ns->load += cpu_runnable_load(rq); 1503 ns->compute_capacity += capacity_of(cpu); 1504 } 1505 1506 } 1507 1508 struct task_numa_env { 1509 struct task_struct *p; 1510 1511 int src_cpu, src_nid; 1512 int dst_cpu, dst_nid; 1513 1514 struct numa_stats src_stats, dst_stats; 1515 1516 int imbalance_pct; 1517 int dist; 1518 1519 struct task_struct *best_task; 1520 long best_imp; 1521 int best_cpu; 1522 }; 1523 1524 static void task_numa_assign(struct task_numa_env *env, 1525 struct task_struct *p, long imp) 1526 { 1527 struct rq *rq = cpu_rq(env->dst_cpu); 1528 1529 /* Bail out if run-queue part of active NUMA balance. */ 1530 if (xchg(&rq->numa_migrate_on, 1)) 1531 return; 1532 1533 /* 1534 * Clear previous best_cpu/rq numa-migrate flag, since task now 1535 * found a better CPU to move/swap. 1536 */ 1537 if (env->best_cpu != -1) { 1538 rq = cpu_rq(env->best_cpu); 1539 WRITE_ONCE(rq->numa_migrate_on, 0); 1540 } 1541 1542 if (env->best_task) 1543 put_task_struct(env->best_task); 1544 if (p) 1545 get_task_struct(p); 1546 1547 env->best_task = p; 1548 env->best_imp = imp; 1549 env->best_cpu = env->dst_cpu; 1550 } 1551 1552 static bool load_too_imbalanced(long src_load, long dst_load, 1553 struct task_numa_env *env) 1554 { 1555 long imb, old_imb; 1556 long orig_src_load, orig_dst_load; 1557 long src_capacity, dst_capacity; 1558 1559 /* 1560 * The load is corrected for the CPU capacity available on each node. 1561 * 1562 * src_load dst_load 1563 * ------------ vs --------- 1564 * src_capacity dst_capacity 1565 */ 1566 src_capacity = env->src_stats.compute_capacity; 1567 dst_capacity = env->dst_stats.compute_capacity; 1568 1569 imb = abs(dst_load * src_capacity - src_load * dst_capacity); 1570 1571 orig_src_load = env->src_stats.load; 1572 orig_dst_load = env->dst_stats.load; 1573 1574 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); 1575 1576 /* Would this change make things worse? */ 1577 return (imb > old_imb); 1578 } 1579 1580 /* 1581 * Maximum NUMA importance can be 1998 (2*999); 1582 * SMALLIMP @ 30 would be close to 1998/64. 1583 * Used to deter task migration. 1584 */ 1585 #define SMALLIMP 30 1586 1587 /* 1588 * This checks if the overall compute and NUMA accesses of the system would 1589 * be improved if the source tasks was migrated to the target dst_cpu taking 1590 * into account that it might be best if task running on the dst_cpu should 1591 * be exchanged with the source task 1592 */ 1593 static void task_numa_compare(struct task_numa_env *env, 1594 long taskimp, long groupimp, bool maymove) 1595 { 1596 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); 1597 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1598 long imp = p_ng ? groupimp : taskimp; 1599 struct task_struct *cur; 1600 long src_load, dst_load; 1601 int dist = env->dist; 1602 long moveimp = imp; 1603 long load; 1604 1605 if (READ_ONCE(dst_rq->numa_migrate_on)) 1606 return; 1607 1608 rcu_read_lock(); 1609 cur = rcu_dereference(dst_rq->curr); 1610 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1611 cur = NULL; 1612 1613 /* 1614 * Because we have preemption enabled we can get migrated around and 1615 * end try selecting ourselves (current == env->p) as a swap candidate. 1616 */ 1617 if (cur == env->p) 1618 goto unlock; 1619 1620 if (!cur) { 1621 if (maymove && moveimp >= env->best_imp) 1622 goto assign; 1623 else 1624 goto unlock; 1625 } 1626 1627 /* 1628 * "imp" is the fault differential for the source task between the 1629 * source and destination node. Calculate the total differential for 1630 * the source task and potential destination task. The more negative 1631 * the value is, the more remote accesses that would be expected to 1632 * be incurred if the tasks were swapped. 1633 */ 1634 /* Skip this swap candidate if cannot move to the source cpu */ 1635 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) 1636 goto unlock; 1637 1638 /* 1639 * If dst and source tasks are in the same NUMA group, or not 1640 * in any group then look only at task weights. 1641 */ 1642 cur_ng = rcu_dereference(cur->numa_group); 1643 if (cur_ng == p_ng) { 1644 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1645 task_weight(cur, env->dst_nid, dist); 1646 /* 1647 * Add some hysteresis to prevent swapping the 1648 * tasks within a group over tiny differences. 1649 */ 1650 if (cur_ng) 1651 imp -= imp / 16; 1652 } else { 1653 /* 1654 * Compare the group weights. If a task is all by itself 1655 * (not part of a group), use the task weight instead. 1656 */ 1657 if (cur_ng && p_ng) 1658 imp += group_weight(cur, env->src_nid, dist) - 1659 group_weight(cur, env->dst_nid, dist); 1660 else 1661 imp += task_weight(cur, env->src_nid, dist) - 1662 task_weight(cur, env->dst_nid, dist); 1663 } 1664 1665 if (maymove && moveimp > imp && moveimp > env->best_imp) { 1666 imp = moveimp; 1667 cur = NULL; 1668 goto assign; 1669 } 1670 1671 /* 1672 * If the NUMA importance is less than SMALLIMP, 1673 * task migration might only result in ping pong 1674 * of tasks and also hurt performance due to cache 1675 * misses. 1676 */ 1677 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) 1678 goto unlock; 1679 1680 /* 1681 * In the overloaded case, try and keep the load balanced. 1682 */ 1683 load = task_h_load(env->p) - task_h_load(cur); 1684 if (!load) 1685 goto assign; 1686 1687 dst_load = env->dst_stats.load + load; 1688 src_load = env->src_stats.load - load; 1689 1690 if (load_too_imbalanced(src_load, dst_load, env)) 1691 goto unlock; 1692 1693 assign: 1694 /* 1695 * One idle CPU per node is evaluated for a task numa move. 1696 * Call select_idle_sibling to maybe find a better one. 1697 */ 1698 if (!cur) { 1699 /* 1700 * select_idle_siblings() uses an per-CPU cpumask that 1701 * can be used from IRQ context. 1702 */ 1703 local_irq_disable(); 1704 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, 1705 env->dst_cpu); 1706 local_irq_enable(); 1707 } 1708 1709 task_numa_assign(env, cur, imp); 1710 unlock: 1711 rcu_read_unlock(); 1712 } 1713 1714 static void task_numa_find_cpu(struct task_numa_env *env, 1715 long taskimp, long groupimp) 1716 { 1717 long src_load, dst_load, load; 1718 bool maymove = false; 1719 int cpu; 1720 1721 load = task_h_load(env->p); 1722 dst_load = env->dst_stats.load + load; 1723 src_load = env->src_stats.load - load; 1724 1725 /* 1726 * If the improvement from just moving env->p direction is better 1727 * than swapping tasks around, check if a move is possible. 1728 */ 1729 maymove = !load_too_imbalanced(src_load, dst_load, env); 1730 1731 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1732 /* Skip this CPU if the source task cannot migrate */ 1733 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) 1734 continue; 1735 1736 env->dst_cpu = cpu; 1737 task_numa_compare(env, taskimp, groupimp, maymove); 1738 } 1739 } 1740 1741 static int task_numa_migrate(struct task_struct *p) 1742 { 1743 struct task_numa_env env = { 1744 .p = p, 1745 1746 .src_cpu = task_cpu(p), 1747 .src_nid = task_node(p), 1748 1749 .imbalance_pct = 112, 1750 1751 .best_task = NULL, 1752 .best_imp = 0, 1753 .best_cpu = -1, 1754 }; 1755 unsigned long taskweight, groupweight; 1756 struct sched_domain *sd; 1757 long taskimp, groupimp; 1758 struct numa_group *ng; 1759 struct rq *best_rq; 1760 int nid, ret, dist; 1761 1762 /* 1763 * Pick the lowest SD_NUMA domain, as that would have the smallest 1764 * imbalance and would be the first to start moving tasks about. 1765 * 1766 * And we want to avoid any moving of tasks about, as that would create 1767 * random movement of tasks -- counter the numa conditions we're trying 1768 * to satisfy here. 1769 */ 1770 rcu_read_lock(); 1771 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 1772 if (sd) 1773 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 1774 rcu_read_unlock(); 1775 1776 /* 1777 * Cpusets can break the scheduler domain tree into smaller 1778 * balance domains, some of which do not cross NUMA boundaries. 1779 * Tasks that are "trapped" in such domains cannot be migrated 1780 * elsewhere, so there is no point in (re)trying. 1781 */ 1782 if (unlikely(!sd)) { 1783 sched_setnuma(p, task_node(p)); 1784 return -EINVAL; 1785 } 1786 1787 env.dst_nid = p->numa_preferred_nid; 1788 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 1789 taskweight = task_weight(p, env.src_nid, dist); 1790 groupweight = group_weight(p, env.src_nid, dist); 1791 update_numa_stats(&env.src_stats, env.src_nid); 1792 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 1793 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 1794 update_numa_stats(&env.dst_stats, env.dst_nid); 1795 1796 /* Try to find a spot on the preferred nid. */ 1797 task_numa_find_cpu(&env, taskimp, groupimp); 1798 1799 /* 1800 * Look at other nodes in these cases: 1801 * - there is no space available on the preferred_nid 1802 * - the task is part of a numa_group that is interleaved across 1803 * multiple NUMA nodes; in order to better consolidate the group, 1804 * we need to check other locations. 1805 */ 1806 ng = deref_curr_numa_group(p); 1807 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { 1808 for_each_online_node(nid) { 1809 if (nid == env.src_nid || nid == p->numa_preferred_nid) 1810 continue; 1811 1812 dist = node_distance(env.src_nid, env.dst_nid); 1813 if (sched_numa_topology_type == NUMA_BACKPLANE && 1814 dist != env.dist) { 1815 taskweight = task_weight(p, env.src_nid, dist); 1816 groupweight = group_weight(p, env.src_nid, dist); 1817 } 1818 1819 /* Only consider nodes where both task and groups benefit */ 1820 taskimp = task_weight(p, nid, dist) - taskweight; 1821 groupimp = group_weight(p, nid, dist) - groupweight; 1822 if (taskimp < 0 && groupimp < 0) 1823 continue; 1824 1825 env.dist = dist; 1826 env.dst_nid = nid; 1827 update_numa_stats(&env.dst_stats, env.dst_nid); 1828 task_numa_find_cpu(&env, taskimp, groupimp); 1829 } 1830 } 1831 1832 /* 1833 * If the task is part of a workload that spans multiple NUMA nodes, 1834 * and is migrating into one of the workload's active nodes, remember 1835 * this node as the task's preferred numa node, so the workload can 1836 * settle down. 1837 * A task that migrated to a second choice node will be better off 1838 * trying for a better one later. Do not set the preferred node here. 1839 */ 1840 if (ng) { 1841 if (env.best_cpu == -1) 1842 nid = env.src_nid; 1843 else 1844 nid = cpu_to_node(env.best_cpu); 1845 1846 if (nid != p->numa_preferred_nid) 1847 sched_setnuma(p, nid); 1848 } 1849 1850 /* No better CPU than the current one was found. */ 1851 if (env.best_cpu == -1) 1852 return -EAGAIN; 1853 1854 best_rq = cpu_rq(env.best_cpu); 1855 if (env.best_task == NULL) { 1856 ret = migrate_task_to(p, env.best_cpu); 1857 WRITE_ONCE(best_rq->numa_migrate_on, 0); 1858 if (ret != 0) 1859 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); 1860 return ret; 1861 } 1862 1863 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); 1864 WRITE_ONCE(best_rq->numa_migrate_on, 0); 1865 1866 if (ret != 0) 1867 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); 1868 put_task_struct(env.best_task); 1869 return ret; 1870 } 1871 1872 /* Attempt to migrate a task to a CPU on the preferred node. */ 1873 static void numa_migrate_preferred(struct task_struct *p) 1874 { 1875 unsigned long interval = HZ; 1876 1877 /* This task has no NUMA fault statistics yet */ 1878 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) 1879 return; 1880 1881 /* Periodically retry migrating the task to the preferred node */ 1882 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 1883 p->numa_migrate_retry = jiffies + interval; 1884 1885 /* Success if task is already running on preferred CPU */ 1886 if (task_node(p) == p->numa_preferred_nid) 1887 return; 1888 1889 /* Otherwise, try migrate to a CPU on the preferred node */ 1890 task_numa_migrate(p); 1891 } 1892 1893 /* 1894 * Find out how many nodes on the workload is actively running on. Do this by 1895 * tracking the nodes from which NUMA hinting faults are triggered. This can 1896 * be different from the set of nodes where the workload's memory is currently 1897 * located. 1898 */ 1899 static void numa_group_count_active_nodes(struct numa_group *numa_group) 1900 { 1901 unsigned long faults, max_faults = 0; 1902 int nid, active_nodes = 0; 1903 1904 for_each_online_node(nid) { 1905 faults = group_faults_cpu(numa_group, nid); 1906 if (faults > max_faults) 1907 max_faults = faults; 1908 } 1909 1910 for_each_online_node(nid) { 1911 faults = group_faults_cpu(numa_group, nid); 1912 if (faults * ACTIVE_NODE_FRACTION > max_faults) 1913 active_nodes++; 1914 } 1915 1916 numa_group->max_faults_cpu = max_faults; 1917 numa_group->active_nodes = active_nodes; 1918 } 1919 1920 /* 1921 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 1922 * increments. The more local the fault statistics are, the higher the scan 1923 * period will be for the next scan window. If local/(local+remote) ratio is 1924 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 1925 * the scan period will decrease. Aim for 70% local accesses. 1926 */ 1927 #define NUMA_PERIOD_SLOTS 10 1928 #define NUMA_PERIOD_THRESHOLD 7 1929 1930 /* 1931 * Increase the scan period (slow down scanning) if the majority of 1932 * our memory is already on our local node, or if the majority of 1933 * the page accesses are shared with other processes. 1934 * Otherwise, decrease the scan period. 1935 */ 1936 static void update_task_scan_period(struct task_struct *p, 1937 unsigned long shared, unsigned long private) 1938 { 1939 unsigned int period_slot; 1940 int lr_ratio, ps_ratio; 1941 int diff; 1942 1943 unsigned long remote = p->numa_faults_locality[0]; 1944 unsigned long local = p->numa_faults_locality[1]; 1945 1946 /* 1947 * If there were no record hinting faults then either the task is 1948 * completely idle or all activity is areas that are not of interest 1949 * to automatic numa balancing. Related to that, if there were failed 1950 * migration then it implies we are migrating too quickly or the local 1951 * node is overloaded. In either case, scan slower 1952 */ 1953 if (local + shared == 0 || p->numa_faults_locality[2]) { 1954 p->numa_scan_period = min(p->numa_scan_period_max, 1955 p->numa_scan_period << 1); 1956 1957 p->mm->numa_next_scan = jiffies + 1958 msecs_to_jiffies(p->numa_scan_period); 1959 1960 return; 1961 } 1962 1963 /* 1964 * Prepare to scale scan period relative to the current period. 1965 * == NUMA_PERIOD_THRESHOLD scan period stays the same 1966 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 1967 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 1968 */ 1969 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 1970 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 1971 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); 1972 1973 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { 1974 /* 1975 * Most memory accesses are local. There is no need to 1976 * do fast NUMA scanning, since memory is already local. 1977 */ 1978 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; 1979 if (!slot) 1980 slot = 1; 1981 diff = slot * period_slot; 1982 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { 1983 /* 1984 * Most memory accesses are shared with other tasks. 1985 * There is no point in continuing fast NUMA scanning, 1986 * since other tasks may just move the memory elsewhere. 1987 */ 1988 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; 1989 if (!slot) 1990 slot = 1; 1991 diff = slot * period_slot; 1992 } else { 1993 /* 1994 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, 1995 * yet they are not on the local NUMA node. Speed up 1996 * NUMA scanning to get the memory moved over. 1997 */ 1998 int ratio = max(lr_ratio, ps_ratio); 1999 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 2000 } 2001 2002 p->numa_scan_period = clamp(p->numa_scan_period + diff, 2003 task_scan_min(p), task_scan_max(p)); 2004 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2005 } 2006 2007 /* 2008 * Get the fraction of time the task has been running since the last 2009 * NUMA placement cycle. The scheduler keeps similar statistics, but 2010 * decays those on a 32ms period, which is orders of magnitude off 2011 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 2012 * stats only if the task is so new there are no NUMA statistics yet. 2013 */ 2014 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 2015 { 2016 u64 runtime, delta, now; 2017 /* Use the start of this time slice to avoid calculations. */ 2018 now = p->se.exec_start; 2019 runtime = p->se.sum_exec_runtime; 2020 2021 if (p->last_task_numa_placement) { 2022 delta = runtime - p->last_sum_exec_runtime; 2023 *period = now - p->last_task_numa_placement; 2024 2025 /* Avoid time going backwards, prevent potential divide error: */ 2026 if (unlikely((s64)*period < 0)) 2027 *period = 0; 2028 } else { 2029 delta = p->se.avg.load_sum; 2030 *period = LOAD_AVG_MAX; 2031 } 2032 2033 p->last_sum_exec_runtime = runtime; 2034 p->last_task_numa_placement = now; 2035 2036 return delta; 2037 } 2038 2039 /* 2040 * Determine the preferred nid for a task in a numa_group. This needs to 2041 * be done in a way that produces consistent results with group_weight, 2042 * otherwise workloads might not converge. 2043 */ 2044 static int preferred_group_nid(struct task_struct *p, int nid) 2045 { 2046 nodemask_t nodes; 2047 int dist; 2048 2049 /* Direct connections between all NUMA nodes. */ 2050 if (sched_numa_topology_type == NUMA_DIRECT) 2051 return nid; 2052 2053 /* 2054 * On a system with glueless mesh NUMA topology, group_weight 2055 * scores nodes according to the number of NUMA hinting faults on 2056 * both the node itself, and on nearby nodes. 2057 */ 2058 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 2059 unsigned long score, max_score = 0; 2060 int node, max_node = nid; 2061 2062 dist = sched_max_numa_distance; 2063 2064 for_each_online_node(node) { 2065 score = group_weight(p, node, dist); 2066 if (score > max_score) { 2067 max_score = score; 2068 max_node = node; 2069 } 2070 } 2071 return max_node; 2072 } 2073 2074 /* 2075 * Finding the preferred nid in a system with NUMA backplane 2076 * interconnect topology is more involved. The goal is to locate 2077 * tasks from numa_groups near each other in the system, and 2078 * untangle workloads from different sides of the system. This requires 2079 * searching down the hierarchy of node groups, recursively searching 2080 * inside the highest scoring group of nodes. The nodemask tricks 2081 * keep the complexity of the search down. 2082 */ 2083 nodes = node_online_map; 2084 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2085 unsigned long max_faults = 0; 2086 nodemask_t max_group = NODE_MASK_NONE; 2087 int a, b; 2088 2089 /* Are there nodes at this distance from each other? */ 2090 if (!find_numa_distance(dist)) 2091 continue; 2092 2093 for_each_node_mask(a, nodes) { 2094 unsigned long faults = 0; 2095 nodemask_t this_group; 2096 nodes_clear(this_group); 2097 2098 /* Sum group's NUMA faults; includes a==b case. */ 2099 for_each_node_mask(b, nodes) { 2100 if (node_distance(a, b) < dist) { 2101 faults += group_faults(p, b); 2102 node_set(b, this_group); 2103 node_clear(b, nodes); 2104 } 2105 } 2106 2107 /* Remember the top group. */ 2108 if (faults > max_faults) { 2109 max_faults = faults; 2110 max_group = this_group; 2111 /* 2112 * subtle: at the smallest distance there is 2113 * just one node left in each "group", the 2114 * winner is the preferred nid. 2115 */ 2116 nid = a; 2117 } 2118 } 2119 /* Next round, evaluate the nodes within max_group. */ 2120 if (!max_faults) 2121 break; 2122 nodes = max_group; 2123 } 2124 return nid; 2125 } 2126 2127 static void task_numa_placement(struct task_struct *p) 2128 { 2129 int seq, nid, max_nid = NUMA_NO_NODE; 2130 unsigned long max_faults = 0; 2131 unsigned long fault_types[2] = { 0, 0 }; 2132 unsigned long total_faults; 2133 u64 runtime, period; 2134 spinlock_t *group_lock = NULL; 2135 struct numa_group *ng; 2136 2137 /* 2138 * The p->mm->numa_scan_seq field gets updated without 2139 * exclusive access. Use READ_ONCE() here to ensure 2140 * that the field is read in a single access: 2141 */ 2142 seq = READ_ONCE(p->mm->numa_scan_seq); 2143 if (p->numa_scan_seq == seq) 2144 return; 2145 p->numa_scan_seq = seq; 2146 p->numa_scan_period_max = task_scan_max(p); 2147 2148 total_faults = p->numa_faults_locality[0] + 2149 p->numa_faults_locality[1]; 2150 runtime = numa_get_avg_runtime(p, &period); 2151 2152 /* If the task is part of a group prevent parallel updates to group stats */ 2153 ng = deref_curr_numa_group(p); 2154 if (ng) { 2155 group_lock = &ng->lock; 2156 spin_lock_irq(group_lock); 2157 } 2158 2159 /* Find the node with the highest number of faults */ 2160 for_each_online_node(nid) { 2161 /* Keep track of the offsets in numa_faults array */ 2162 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2163 unsigned long faults = 0, group_faults = 0; 2164 int priv; 2165 2166 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2167 long diff, f_diff, f_weight; 2168 2169 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2170 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2171 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2172 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2173 2174 /* Decay existing window, copy faults since last scan */ 2175 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2176 fault_types[priv] += p->numa_faults[membuf_idx]; 2177 p->numa_faults[membuf_idx] = 0; 2178 2179 /* 2180 * Normalize the faults_from, so all tasks in a group 2181 * count according to CPU use, instead of by the raw 2182 * number of faults. Tasks with little runtime have 2183 * little over-all impact on throughput, and thus their 2184 * faults are less important. 2185 */ 2186 f_weight = div64_u64(runtime << 16, period + 1); 2187 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2188 (total_faults + 1); 2189 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2190 p->numa_faults[cpubuf_idx] = 0; 2191 2192 p->numa_faults[mem_idx] += diff; 2193 p->numa_faults[cpu_idx] += f_diff; 2194 faults += p->numa_faults[mem_idx]; 2195 p->total_numa_faults += diff; 2196 if (ng) { 2197 /* 2198 * safe because we can only change our own group 2199 * 2200 * mem_idx represents the offset for a given 2201 * nid and priv in a specific region because it 2202 * is at the beginning of the numa_faults array. 2203 */ 2204 ng->faults[mem_idx] += diff; 2205 ng->faults_cpu[mem_idx] += f_diff; 2206 ng->total_faults += diff; 2207 group_faults += ng->faults[mem_idx]; 2208 } 2209 } 2210 2211 if (!ng) { 2212 if (faults > max_faults) { 2213 max_faults = faults; 2214 max_nid = nid; 2215 } 2216 } else if (group_faults > max_faults) { 2217 max_faults = group_faults; 2218 max_nid = nid; 2219 } 2220 } 2221 2222 if (ng) { 2223 numa_group_count_active_nodes(ng); 2224 spin_unlock_irq(group_lock); 2225 max_nid = preferred_group_nid(p, max_nid); 2226 } 2227 2228 if (max_faults) { 2229 /* Set the new preferred node */ 2230 if (max_nid != p->numa_preferred_nid) 2231 sched_setnuma(p, max_nid); 2232 } 2233 2234 update_task_scan_period(p, fault_types[0], fault_types[1]); 2235 } 2236 2237 static inline int get_numa_group(struct numa_group *grp) 2238 { 2239 return refcount_inc_not_zero(&grp->refcount); 2240 } 2241 2242 static inline void put_numa_group(struct numa_group *grp) 2243 { 2244 if (refcount_dec_and_test(&grp->refcount)) 2245 kfree_rcu(grp, rcu); 2246 } 2247 2248 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2249 int *priv) 2250 { 2251 struct numa_group *grp, *my_grp; 2252 struct task_struct *tsk; 2253 bool join = false; 2254 int cpu = cpupid_to_cpu(cpupid); 2255 int i; 2256 2257 if (unlikely(!deref_curr_numa_group(p))) { 2258 unsigned int size = sizeof(struct numa_group) + 2259 4*nr_node_ids*sizeof(unsigned long); 2260 2261 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2262 if (!grp) 2263 return; 2264 2265 refcount_set(&grp->refcount, 1); 2266 grp->active_nodes = 1; 2267 grp->max_faults_cpu = 0; 2268 spin_lock_init(&grp->lock); 2269 grp->gid = p->pid; 2270 /* Second half of the array tracks nids where faults happen */ 2271 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * 2272 nr_node_ids; 2273 2274 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2275 grp->faults[i] = p->numa_faults[i]; 2276 2277 grp->total_faults = p->total_numa_faults; 2278 2279 grp->nr_tasks++; 2280 rcu_assign_pointer(p->numa_group, grp); 2281 } 2282 2283 rcu_read_lock(); 2284 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2285 2286 if (!cpupid_match_pid(tsk, cpupid)) 2287 goto no_join; 2288 2289 grp = rcu_dereference(tsk->numa_group); 2290 if (!grp) 2291 goto no_join; 2292 2293 my_grp = deref_curr_numa_group(p); 2294 if (grp == my_grp) 2295 goto no_join; 2296 2297 /* 2298 * Only join the other group if its bigger; if we're the bigger group, 2299 * the other task will join us. 2300 */ 2301 if (my_grp->nr_tasks > grp->nr_tasks) 2302 goto no_join; 2303 2304 /* 2305 * Tie-break on the grp address. 2306 */ 2307 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2308 goto no_join; 2309 2310 /* Always join threads in the same process. */ 2311 if (tsk->mm == current->mm) 2312 join = true; 2313 2314 /* Simple filter to avoid false positives due to PID collisions */ 2315 if (flags & TNF_SHARED) 2316 join = true; 2317 2318 /* Update priv based on whether false sharing was detected */ 2319 *priv = !join; 2320 2321 if (join && !get_numa_group(grp)) 2322 goto no_join; 2323 2324 rcu_read_unlock(); 2325 2326 if (!join) 2327 return; 2328 2329 BUG_ON(irqs_disabled()); 2330 double_lock_irq(&my_grp->lock, &grp->lock); 2331 2332 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2333 my_grp->faults[i] -= p->numa_faults[i]; 2334 grp->faults[i] += p->numa_faults[i]; 2335 } 2336 my_grp->total_faults -= p->total_numa_faults; 2337 grp->total_faults += p->total_numa_faults; 2338 2339 my_grp->nr_tasks--; 2340 grp->nr_tasks++; 2341 2342 spin_unlock(&my_grp->lock); 2343 spin_unlock_irq(&grp->lock); 2344 2345 rcu_assign_pointer(p->numa_group, grp); 2346 2347 put_numa_group(my_grp); 2348 return; 2349 2350 no_join: 2351 rcu_read_unlock(); 2352 return; 2353 } 2354 2355 /* 2356 * Get rid of NUMA staticstics associated with a task (either current or dead). 2357 * If @final is set, the task is dead and has reached refcount zero, so we can 2358 * safely free all relevant data structures. Otherwise, there might be 2359 * concurrent reads from places like load balancing and procfs, and we should 2360 * reset the data back to default state without freeing ->numa_faults. 2361 */ 2362 void task_numa_free(struct task_struct *p, bool final) 2363 { 2364 /* safe: p either is current or is being freed by current */ 2365 struct numa_group *grp = rcu_dereference_raw(p->numa_group); 2366 unsigned long *numa_faults = p->numa_faults; 2367 unsigned long flags; 2368 int i; 2369 2370 if (!numa_faults) 2371 return; 2372 2373 if (grp) { 2374 spin_lock_irqsave(&grp->lock, flags); 2375 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2376 grp->faults[i] -= p->numa_faults[i]; 2377 grp->total_faults -= p->total_numa_faults; 2378 2379 grp->nr_tasks--; 2380 spin_unlock_irqrestore(&grp->lock, flags); 2381 RCU_INIT_POINTER(p->numa_group, NULL); 2382 put_numa_group(grp); 2383 } 2384 2385 if (final) { 2386 p->numa_faults = NULL; 2387 kfree(numa_faults); 2388 } else { 2389 p->total_numa_faults = 0; 2390 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2391 numa_faults[i] = 0; 2392 } 2393 } 2394 2395 /* 2396 * Got a PROT_NONE fault for a page on @node. 2397 */ 2398 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2399 { 2400 struct task_struct *p = current; 2401 bool migrated = flags & TNF_MIGRATED; 2402 int cpu_node = task_node(current); 2403 int local = !!(flags & TNF_FAULT_LOCAL); 2404 struct numa_group *ng; 2405 int priv; 2406 2407 if (!static_branch_likely(&sched_numa_balancing)) 2408 return; 2409 2410 /* for example, ksmd faulting in a user's mm */ 2411 if (!p->mm) 2412 return; 2413 2414 /* Allocate buffer to track faults on a per-node basis */ 2415 if (unlikely(!p->numa_faults)) { 2416 int size = sizeof(*p->numa_faults) * 2417 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2418 2419 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2420 if (!p->numa_faults) 2421 return; 2422 2423 p->total_numa_faults = 0; 2424 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2425 } 2426 2427 /* 2428 * First accesses are treated as private, otherwise consider accesses 2429 * to be private if the accessing pid has not changed 2430 */ 2431 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2432 priv = 1; 2433 } else { 2434 priv = cpupid_match_pid(p, last_cpupid); 2435 if (!priv && !(flags & TNF_NO_GROUP)) 2436 task_numa_group(p, last_cpupid, flags, &priv); 2437 } 2438 2439 /* 2440 * If a workload spans multiple NUMA nodes, a shared fault that 2441 * occurs wholly within the set of nodes that the workload is 2442 * actively using should be counted as local. This allows the 2443 * scan rate to slow down when a workload has settled down. 2444 */ 2445 ng = deref_curr_numa_group(p); 2446 if (!priv && !local && ng && ng->active_nodes > 1 && 2447 numa_is_active_node(cpu_node, ng) && 2448 numa_is_active_node(mem_node, ng)) 2449 local = 1; 2450 2451 /* 2452 * Retry to migrate task to preferred node periodically, in case it 2453 * previously failed, or the scheduler moved us. 2454 */ 2455 if (time_after(jiffies, p->numa_migrate_retry)) { 2456 task_numa_placement(p); 2457 numa_migrate_preferred(p); 2458 } 2459 2460 if (migrated) 2461 p->numa_pages_migrated += pages; 2462 if (flags & TNF_MIGRATE_FAIL) 2463 p->numa_faults_locality[2] += pages; 2464 2465 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2466 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2467 p->numa_faults_locality[local] += pages; 2468 } 2469 2470 static void reset_ptenuma_scan(struct task_struct *p) 2471 { 2472 /* 2473 * We only did a read acquisition of the mmap sem, so 2474 * p->mm->numa_scan_seq is written to without exclusive access 2475 * and the update is not guaranteed to be atomic. That's not 2476 * much of an issue though, since this is just used for 2477 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2478 * expensive, to avoid any form of compiler optimizations: 2479 */ 2480 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2481 p->mm->numa_scan_offset = 0; 2482 } 2483 2484 /* 2485 * The expensive part of numa migration is done from task_work context. 2486 * Triggered from task_tick_numa(). 2487 */ 2488 static void task_numa_work(struct callback_head *work) 2489 { 2490 unsigned long migrate, next_scan, now = jiffies; 2491 struct task_struct *p = current; 2492 struct mm_struct *mm = p->mm; 2493 u64 runtime = p->se.sum_exec_runtime; 2494 struct vm_area_struct *vma; 2495 unsigned long start, end; 2496 unsigned long nr_pte_updates = 0; 2497 long pages, virtpages; 2498 2499 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2500 2501 work->next = work; 2502 /* 2503 * Who cares about NUMA placement when they're dying. 2504 * 2505 * NOTE: make sure not to dereference p->mm before this check, 2506 * exit_task_work() happens _after_ exit_mm() so we could be called 2507 * without p->mm even though we still had it when we enqueued this 2508 * work. 2509 */ 2510 if (p->flags & PF_EXITING) 2511 return; 2512 2513 if (!mm->numa_next_scan) { 2514 mm->numa_next_scan = now + 2515 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2516 } 2517 2518 /* 2519 * Enforce maximal scan/migration frequency.. 2520 */ 2521 migrate = mm->numa_next_scan; 2522 if (time_before(now, migrate)) 2523 return; 2524 2525 if (p->numa_scan_period == 0) { 2526 p->numa_scan_period_max = task_scan_max(p); 2527 p->numa_scan_period = task_scan_start(p); 2528 } 2529 2530 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2531 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2532 return; 2533 2534 /* 2535 * Delay this task enough that another task of this mm will likely win 2536 * the next time around. 2537 */ 2538 p->node_stamp += 2 * TICK_NSEC; 2539 2540 start = mm->numa_scan_offset; 2541 pages = sysctl_numa_balancing_scan_size; 2542 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2543 virtpages = pages * 8; /* Scan up to this much virtual space */ 2544 if (!pages) 2545 return; 2546 2547 2548 if (!down_read_trylock(&mm->mmap_sem)) 2549 return; 2550 vma = find_vma(mm, start); 2551 if (!vma) { 2552 reset_ptenuma_scan(p); 2553 start = 0; 2554 vma = mm->mmap; 2555 } 2556 for (; vma; vma = vma->vm_next) { 2557 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2558 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2559 continue; 2560 } 2561 2562 /* 2563 * Shared library pages mapped by multiple processes are not 2564 * migrated as it is expected they are cache replicated. Avoid 2565 * hinting faults in read-only file-backed mappings or the vdso 2566 * as migrating the pages will be of marginal benefit. 2567 */ 2568 if (!vma->vm_mm || 2569 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 2570 continue; 2571 2572 /* 2573 * Skip inaccessible VMAs to avoid any confusion between 2574 * PROT_NONE and NUMA hinting ptes 2575 */ 2576 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 2577 continue; 2578 2579 do { 2580 start = max(start, vma->vm_start); 2581 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2582 end = min(end, vma->vm_end); 2583 nr_pte_updates = change_prot_numa(vma, start, end); 2584 2585 /* 2586 * Try to scan sysctl_numa_balancing_size worth of 2587 * hpages that have at least one present PTE that 2588 * is not already pte-numa. If the VMA contains 2589 * areas that are unused or already full of prot_numa 2590 * PTEs, scan up to virtpages, to skip through those 2591 * areas faster. 2592 */ 2593 if (nr_pte_updates) 2594 pages -= (end - start) >> PAGE_SHIFT; 2595 virtpages -= (end - start) >> PAGE_SHIFT; 2596 2597 start = end; 2598 if (pages <= 0 || virtpages <= 0) 2599 goto out; 2600 2601 cond_resched(); 2602 } while (end != vma->vm_end); 2603 } 2604 2605 out: 2606 /* 2607 * It is possible to reach the end of the VMA list but the last few 2608 * VMAs are not guaranteed to the vma_migratable. If they are not, we 2609 * would find the !migratable VMA on the next scan but not reset the 2610 * scanner to the start so check it now. 2611 */ 2612 if (vma) 2613 mm->numa_scan_offset = start; 2614 else 2615 reset_ptenuma_scan(p); 2616 up_read(&mm->mmap_sem); 2617 2618 /* 2619 * Make sure tasks use at least 32x as much time to run other code 2620 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 2621 * Usually update_task_scan_period slows down scanning enough; on an 2622 * overloaded system we need to limit overhead on a per task basis. 2623 */ 2624 if (unlikely(p->se.sum_exec_runtime != runtime)) { 2625 u64 diff = p->se.sum_exec_runtime - runtime; 2626 p->node_stamp += 32 * diff; 2627 } 2628 } 2629 2630 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 2631 { 2632 int mm_users = 0; 2633 struct mm_struct *mm = p->mm; 2634 2635 if (mm) { 2636 mm_users = atomic_read(&mm->mm_users); 2637 if (mm_users == 1) { 2638 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2639 mm->numa_scan_seq = 0; 2640 } 2641 } 2642 p->node_stamp = 0; 2643 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; 2644 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 2645 /* Protect against double add, see task_tick_numa and task_numa_work */ 2646 p->numa_work.next = &p->numa_work; 2647 p->numa_faults = NULL; 2648 RCU_INIT_POINTER(p->numa_group, NULL); 2649 p->last_task_numa_placement = 0; 2650 p->last_sum_exec_runtime = 0; 2651 2652 init_task_work(&p->numa_work, task_numa_work); 2653 2654 /* New address space, reset the preferred nid */ 2655 if (!(clone_flags & CLONE_VM)) { 2656 p->numa_preferred_nid = NUMA_NO_NODE; 2657 return; 2658 } 2659 2660 /* 2661 * New thread, keep existing numa_preferred_nid which should be copied 2662 * already by arch_dup_task_struct but stagger when scans start. 2663 */ 2664 if (mm) { 2665 unsigned int delay; 2666 2667 delay = min_t(unsigned int, task_scan_max(current), 2668 current->numa_scan_period * mm_users * NSEC_PER_MSEC); 2669 delay += 2 * TICK_NSEC; 2670 p->node_stamp = delay; 2671 } 2672 } 2673 2674 /* 2675 * Drive the periodic memory faults.. 2676 */ 2677 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2678 { 2679 struct callback_head *work = &curr->numa_work; 2680 u64 period, now; 2681 2682 /* 2683 * We don't care about NUMA placement if we don't have memory. 2684 */ 2685 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) 2686 return; 2687 2688 /* 2689 * Using runtime rather than walltime has the dual advantage that 2690 * we (mostly) drive the selection from busy threads and that the 2691 * task needs to have done some actual work before we bother with 2692 * NUMA placement. 2693 */ 2694 now = curr->se.sum_exec_runtime; 2695 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 2696 2697 if (now > curr->node_stamp + period) { 2698 if (!curr->node_stamp) 2699 curr->numa_scan_period = task_scan_start(curr); 2700 curr->node_stamp += period; 2701 2702 if (!time_before(jiffies, curr->mm->numa_next_scan)) 2703 task_work_add(curr, work, true); 2704 } 2705 } 2706 2707 static void update_scan_period(struct task_struct *p, int new_cpu) 2708 { 2709 int src_nid = cpu_to_node(task_cpu(p)); 2710 int dst_nid = cpu_to_node(new_cpu); 2711 2712 if (!static_branch_likely(&sched_numa_balancing)) 2713 return; 2714 2715 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) 2716 return; 2717 2718 if (src_nid == dst_nid) 2719 return; 2720 2721 /* 2722 * Allow resets if faults have been trapped before one scan 2723 * has completed. This is most likely due to a new task that 2724 * is pulled cross-node due to wakeups or load balancing. 2725 */ 2726 if (p->numa_scan_seq) { 2727 /* 2728 * Avoid scan adjustments if moving to the preferred 2729 * node or if the task was not previously running on 2730 * the preferred node. 2731 */ 2732 if (dst_nid == p->numa_preferred_nid || 2733 (p->numa_preferred_nid != NUMA_NO_NODE && 2734 src_nid != p->numa_preferred_nid)) 2735 return; 2736 } 2737 2738 p->numa_scan_period = task_scan_start(p); 2739 } 2740 2741 #else 2742 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2743 { 2744 } 2745 2746 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 2747 { 2748 } 2749 2750 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 2751 { 2752 } 2753 2754 static inline void update_scan_period(struct task_struct *p, int new_cpu) 2755 { 2756 } 2757 2758 #endif /* CONFIG_NUMA_BALANCING */ 2759 2760 static void 2761 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2762 { 2763 update_load_add(&cfs_rq->load, se->load.weight); 2764 #ifdef CONFIG_SMP 2765 if (entity_is_task(se)) { 2766 struct rq *rq = rq_of(cfs_rq); 2767 2768 account_numa_enqueue(rq, task_of(se)); 2769 list_add(&se->group_node, &rq->cfs_tasks); 2770 } 2771 #endif 2772 cfs_rq->nr_running++; 2773 } 2774 2775 static void 2776 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2777 { 2778 update_load_sub(&cfs_rq->load, se->load.weight); 2779 #ifdef CONFIG_SMP 2780 if (entity_is_task(se)) { 2781 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 2782 list_del_init(&se->group_node); 2783 } 2784 #endif 2785 cfs_rq->nr_running--; 2786 } 2787 2788 /* 2789 * Signed add and clamp on underflow. 2790 * 2791 * Explicitly do a load-store to ensure the intermediate value never hits 2792 * memory. This allows lockless observations without ever seeing the negative 2793 * values. 2794 */ 2795 #define add_positive(_ptr, _val) do { \ 2796 typeof(_ptr) ptr = (_ptr); \ 2797 typeof(_val) val = (_val); \ 2798 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 2799 \ 2800 res = var + val; \ 2801 \ 2802 if (val < 0 && res > var) \ 2803 res = 0; \ 2804 \ 2805 WRITE_ONCE(*ptr, res); \ 2806 } while (0) 2807 2808 /* 2809 * Unsigned subtract and clamp on underflow. 2810 * 2811 * Explicitly do a load-store to ensure the intermediate value never hits 2812 * memory. This allows lockless observations without ever seeing the negative 2813 * values. 2814 */ 2815 #define sub_positive(_ptr, _val) do { \ 2816 typeof(_ptr) ptr = (_ptr); \ 2817 typeof(*ptr) val = (_val); \ 2818 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 2819 res = var - val; \ 2820 if (res > var) \ 2821 res = 0; \ 2822 WRITE_ONCE(*ptr, res); \ 2823 } while (0) 2824 2825 /* 2826 * Remove and clamp on negative, from a local variable. 2827 * 2828 * A variant of sub_positive(), which does not use explicit load-store 2829 * and is thus optimized for local variable updates. 2830 */ 2831 #define lsub_positive(_ptr, _val) do { \ 2832 typeof(_ptr) ptr = (_ptr); \ 2833 *ptr -= min_t(typeof(*ptr), *ptr, _val); \ 2834 } while (0) 2835 2836 #ifdef CONFIG_SMP 2837 static inline void 2838 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2839 { 2840 cfs_rq->runnable_weight += se->runnable_weight; 2841 2842 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; 2843 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; 2844 } 2845 2846 static inline void 2847 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2848 { 2849 cfs_rq->runnable_weight -= se->runnable_weight; 2850 2851 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); 2852 sub_positive(&cfs_rq->avg.runnable_load_sum, 2853 se_runnable(se) * se->avg.runnable_load_sum); 2854 } 2855 2856 static inline void 2857 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2858 { 2859 cfs_rq->avg.load_avg += se->avg.load_avg; 2860 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; 2861 } 2862 2863 static inline void 2864 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2865 { 2866 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 2867 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); 2868 } 2869 #else 2870 static inline void 2871 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2872 static inline void 2873 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2874 static inline void 2875 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2876 static inline void 2877 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2878 #endif 2879 2880 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2881 unsigned long weight, unsigned long runnable) 2882 { 2883 if (se->on_rq) { 2884 /* commit outstanding execution time */ 2885 if (cfs_rq->curr == se) 2886 update_curr(cfs_rq); 2887 account_entity_dequeue(cfs_rq, se); 2888 dequeue_runnable_load_avg(cfs_rq, se); 2889 } 2890 dequeue_load_avg(cfs_rq, se); 2891 2892 se->runnable_weight = runnable; 2893 update_load_set(&se->load, weight); 2894 2895 #ifdef CONFIG_SMP 2896 do { 2897 u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; 2898 2899 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); 2900 se->avg.runnable_load_avg = 2901 div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); 2902 } while (0); 2903 #endif 2904 2905 enqueue_load_avg(cfs_rq, se); 2906 if (se->on_rq) { 2907 account_entity_enqueue(cfs_rq, se); 2908 enqueue_runnable_load_avg(cfs_rq, se); 2909 } 2910 } 2911 2912 void reweight_task(struct task_struct *p, int prio) 2913 { 2914 struct sched_entity *se = &p->se; 2915 struct cfs_rq *cfs_rq = cfs_rq_of(se); 2916 struct load_weight *load = &se->load; 2917 unsigned long weight = scale_load(sched_prio_to_weight[prio]); 2918 2919 reweight_entity(cfs_rq, se, weight, weight); 2920 load->inv_weight = sched_prio_to_wmult[prio]; 2921 } 2922 2923 #ifdef CONFIG_FAIR_GROUP_SCHED 2924 #ifdef CONFIG_SMP 2925 /* 2926 * All this does is approximate the hierarchical proportion which includes that 2927 * global sum we all love to hate. 2928 * 2929 * That is, the weight of a group entity, is the proportional share of the 2930 * group weight based on the group runqueue weights. That is: 2931 * 2932 * tg->weight * grq->load.weight 2933 * ge->load.weight = ----------------------------- (1) 2934 * \Sum grq->load.weight 2935 * 2936 * Now, because computing that sum is prohibitively expensive to compute (been 2937 * there, done that) we approximate it with this average stuff. The average 2938 * moves slower and therefore the approximation is cheaper and more stable. 2939 * 2940 * So instead of the above, we substitute: 2941 * 2942 * grq->load.weight -> grq->avg.load_avg (2) 2943 * 2944 * which yields the following: 2945 * 2946 * tg->weight * grq->avg.load_avg 2947 * ge->load.weight = ------------------------------ (3) 2948 * tg->load_avg 2949 * 2950 * Where: tg->load_avg ~= \Sum grq->avg.load_avg 2951 * 2952 * That is shares_avg, and it is right (given the approximation (2)). 2953 * 2954 * The problem with it is that because the average is slow -- it was designed 2955 * to be exactly that of course -- this leads to transients in boundary 2956 * conditions. In specific, the case where the group was idle and we start the 2957 * one task. It takes time for our CPU's grq->avg.load_avg to build up, 2958 * yielding bad latency etc.. 2959 * 2960 * Now, in that special case (1) reduces to: 2961 * 2962 * tg->weight * grq->load.weight 2963 * ge->load.weight = ----------------------------- = tg->weight (4) 2964 * grp->load.weight 2965 * 2966 * That is, the sum collapses because all other CPUs are idle; the UP scenario. 2967 * 2968 * So what we do is modify our approximation (3) to approach (4) in the (near) 2969 * UP case, like: 2970 * 2971 * ge->load.weight = 2972 * 2973 * tg->weight * grq->load.weight 2974 * --------------------------------------------------- (5) 2975 * tg->load_avg - grq->avg.load_avg + grq->load.weight 2976 * 2977 * But because grq->load.weight can drop to 0, resulting in a divide by zero, 2978 * we need to use grq->avg.load_avg as its lower bound, which then gives: 2979 * 2980 * 2981 * tg->weight * grq->load.weight 2982 * ge->load.weight = ----------------------------- (6) 2983 * tg_load_avg' 2984 * 2985 * Where: 2986 * 2987 * tg_load_avg' = tg->load_avg - grq->avg.load_avg + 2988 * max(grq->load.weight, grq->avg.load_avg) 2989 * 2990 * And that is shares_weight and is icky. In the (near) UP case it approaches 2991 * (4) while in the normal case it approaches (3). It consistently 2992 * overestimates the ge->load.weight and therefore: 2993 * 2994 * \Sum ge->load.weight >= tg->weight 2995 * 2996 * hence icky! 2997 */ 2998 static long calc_group_shares(struct cfs_rq *cfs_rq) 2999 { 3000 long tg_weight, tg_shares, load, shares; 3001 struct task_group *tg = cfs_rq->tg; 3002 3003 tg_shares = READ_ONCE(tg->shares); 3004 3005 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); 3006 3007 tg_weight = atomic_long_read(&tg->load_avg); 3008 3009 /* Ensure tg_weight >= load */ 3010 tg_weight -= cfs_rq->tg_load_avg_contrib; 3011 tg_weight += load; 3012 3013 shares = (tg_shares * load); 3014 if (tg_weight) 3015 shares /= tg_weight; 3016 3017 /* 3018 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 3019 * of a group with small tg->shares value. It is a floor value which is 3020 * assigned as a minimum load.weight to the sched_entity representing 3021 * the group on a CPU. 3022 * 3023 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 3024 * on an 8-core system with 8 tasks each runnable on one CPU shares has 3025 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 3026 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 3027 * instead of 0. 3028 */ 3029 return clamp_t(long, shares, MIN_SHARES, tg_shares); 3030 } 3031 3032 /* 3033 * This calculates the effective runnable weight for a group entity based on 3034 * the group entity weight calculated above. 3035 * 3036 * Because of the above approximation (2), our group entity weight is 3037 * an load_avg based ratio (3). This means that it includes blocked load and 3038 * does not represent the runnable weight. 3039 * 3040 * Approximate the group entity's runnable weight per ratio from the group 3041 * runqueue: 3042 * 3043 * grq->avg.runnable_load_avg 3044 * ge->runnable_weight = ge->load.weight * -------------------------- (7) 3045 * grq->avg.load_avg 3046 * 3047 * However, analogous to above, since the avg numbers are slow, this leads to 3048 * transients in the from-idle case. Instead we use: 3049 * 3050 * ge->runnable_weight = ge->load.weight * 3051 * 3052 * max(grq->avg.runnable_load_avg, grq->runnable_weight) 3053 * ----------------------------------------------------- (8) 3054 * max(grq->avg.load_avg, grq->load.weight) 3055 * 3056 * Where these max() serve both to use the 'instant' values to fix the slow 3057 * from-idle and avoid the /0 on to-idle, similar to (6). 3058 */ 3059 static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) 3060 { 3061 long runnable, load_avg; 3062 3063 load_avg = max(cfs_rq->avg.load_avg, 3064 scale_load_down(cfs_rq->load.weight)); 3065 3066 runnable = max(cfs_rq->avg.runnable_load_avg, 3067 scale_load_down(cfs_rq->runnable_weight)); 3068 3069 runnable *= shares; 3070 if (load_avg) 3071 runnable /= load_avg; 3072 3073 return clamp_t(long, runnable, MIN_SHARES, shares); 3074 } 3075 #endif /* CONFIG_SMP */ 3076 3077 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 3078 3079 /* 3080 * Recomputes the group entity based on the current state of its group 3081 * runqueue. 3082 */ 3083 static void update_cfs_group(struct sched_entity *se) 3084 { 3085 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3086 long shares, runnable; 3087 3088 if (!gcfs_rq) 3089 return; 3090 3091 if (throttled_hierarchy(gcfs_rq)) 3092 return; 3093 3094 #ifndef CONFIG_SMP 3095 runnable = shares = READ_ONCE(gcfs_rq->tg->shares); 3096 3097 if (likely(se->load.weight == shares)) 3098 return; 3099 #else 3100 shares = calc_group_shares(gcfs_rq); 3101 runnable = calc_group_runnable(gcfs_rq, shares); 3102 #endif 3103 3104 reweight_entity(cfs_rq_of(se), se, shares, runnable); 3105 } 3106 3107 #else /* CONFIG_FAIR_GROUP_SCHED */ 3108 static inline void update_cfs_group(struct sched_entity *se) 3109 { 3110 } 3111 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3112 3113 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) 3114 { 3115 struct rq *rq = rq_of(cfs_rq); 3116 3117 if (&rq->cfs == cfs_rq) { 3118 /* 3119 * There are a few boundary cases this might miss but it should 3120 * get called often enough that that should (hopefully) not be 3121 * a real problem. 3122 * 3123 * It will not get called when we go idle, because the idle 3124 * thread is a different class (!fair), nor will the utilization 3125 * number include things like RT tasks. 3126 * 3127 * As is, the util number is not freq-invariant (we'd have to 3128 * implement arch_scale_freq_capacity() for that). 3129 * 3130 * See cpu_util(). 3131 */ 3132 cpufreq_update_util(rq, flags); 3133 } 3134 } 3135 3136 #ifdef CONFIG_SMP 3137 #ifdef CONFIG_FAIR_GROUP_SCHED 3138 /** 3139 * update_tg_load_avg - update the tg's load avg 3140 * @cfs_rq: the cfs_rq whose avg changed 3141 * @force: update regardless of how small the difference 3142 * 3143 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3144 * However, because tg->load_avg is a global value there are performance 3145 * considerations. 3146 * 3147 * In order to avoid having to look at the other cfs_rq's, we use a 3148 * differential update where we store the last value we propagated. This in 3149 * turn allows skipping updates if the differential is 'small'. 3150 * 3151 * Updating tg's load_avg is necessary before update_cfs_share(). 3152 */ 3153 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 3154 { 3155 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3156 3157 /* 3158 * No need to update load_avg for root_task_group as it is not used. 3159 */ 3160 if (cfs_rq->tg == &root_task_group) 3161 return; 3162 3163 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3164 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3165 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3166 } 3167 } 3168 3169 /* 3170 * Called within set_task_rq() right before setting a task's CPU. The 3171 * caller only guarantees p->pi_lock is held; no other assumptions, 3172 * including the state of rq->lock, should be made. 3173 */ 3174 void set_task_rq_fair(struct sched_entity *se, 3175 struct cfs_rq *prev, struct cfs_rq *next) 3176 { 3177 u64 p_last_update_time; 3178 u64 n_last_update_time; 3179 3180 if (!sched_feat(ATTACH_AGE_LOAD)) 3181 return; 3182 3183 /* 3184 * We are supposed to update the task to "current" time, then its up to 3185 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3186 * getting what current time is, so simply throw away the out-of-date 3187 * time. This will result in the wakee task is less decayed, but giving 3188 * the wakee more load sounds not bad. 3189 */ 3190 if (!(se->avg.last_update_time && prev)) 3191 return; 3192 3193 #ifndef CONFIG_64BIT 3194 { 3195 u64 p_last_update_time_copy; 3196 u64 n_last_update_time_copy; 3197 3198 do { 3199 p_last_update_time_copy = prev->load_last_update_time_copy; 3200 n_last_update_time_copy = next->load_last_update_time_copy; 3201 3202 smp_rmb(); 3203 3204 p_last_update_time = prev->avg.last_update_time; 3205 n_last_update_time = next->avg.last_update_time; 3206 3207 } while (p_last_update_time != p_last_update_time_copy || 3208 n_last_update_time != n_last_update_time_copy); 3209 } 3210 #else 3211 p_last_update_time = prev->avg.last_update_time; 3212 n_last_update_time = next->avg.last_update_time; 3213 #endif 3214 __update_load_avg_blocked_se(p_last_update_time, se); 3215 se->avg.last_update_time = n_last_update_time; 3216 } 3217 3218 3219 /* 3220 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to 3221 * propagate its contribution. The key to this propagation is the invariant 3222 * that for each group: 3223 * 3224 * ge->avg == grq->avg (1) 3225 * 3226 * _IFF_ we look at the pure running and runnable sums. Because they 3227 * represent the very same entity, just at different points in the hierarchy. 3228 * 3229 * Per the above update_tg_cfs_util() is trivial and simply copies the running 3230 * sum over (but still wrong, because the group entity and group rq do not have 3231 * their PELT windows aligned). 3232 * 3233 * However, update_tg_cfs_runnable() is more complex. So we have: 3234 * 3235 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) 3236 * 3237 * And since, like util, the runnable part should be directly transferable, 3238 * the following would _appear_ to be the straight forward approach: 3239 * 3240 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) 3241 * 3242 * And per (1) we have: 3243 * 3244 * ge->avg.runnable_avg == grq->avg.runnable_avg 3245 * 3246 * Which gives: 3247 * 3248 * ge->load.weight * grq->avg.load_avg 3249 * ge->avg.load_avg = ----------------------------------- (4) 3250 * grq->load.weight 3251 * 3252 * Except that is wrong! 3253 * 3254 * Because while for entities historical weight is not important and we 3255 * really only care about our future and therefore can consider a pure 3256 * runnable sum, runqueues can NOT do this. 3257 * 3258 * We specifically want runqueues to have a load_avg that includes 3259 * historical weights. Those represent the blocked load, the load we expect 3260 * to (shortly) return to us. This only works by keeping the weights as 3261 * integral part of the sum. We therefore cannot decompose as per (3). 3262 * 3263 * Another reason this doesn't work is that runnable isn't a 0-sum entity. 3264 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the 3265 * rq itself is runnable anywhere between 2/3 and 1 depending on how the 3266 * runnable section of these tasks overlap (or not). If they were to perfectly 3267 * align the rq as a whole would be runnable 2/3 of the time. If however we 3268 * always have at least 1 runnable task, the rq as a whole is always runnable. 3269 * 3270 * So we'll have to approximate.. :/ 3271 * 3272 * Given the constraint: 3273 * 3274 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX 3275 * 3276 * We can construct a rule that adds runnable to a rq by assuming minimal 3277 * overlap. 3278 * 3279 * On removal, we'll assume each task is equally runnable; which yields: 3280 * 3281 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight 3282 * 3283 * XXX: only do this for the part of runnable > running ? 3284 * 3285 */ 3286 3287 static inline void 3288 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3289 { 3290 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; 3291 3292 /* Nothing to update */ 3293 if (!delta) 3294 return; 3295 3296 /* 3297 * The relation between sum and avg is: 3298 * 3299 * LOAD_AVG_MAX - 1024 + sa->period_contrib 3300 * 3301 * however, the PELT windows are not aligned between grq and gse. 3302 */ 3303 3304 /* Set new sched_entity's utilization */ 3305 se->avg.util_avg = gcfs_rq->avg.util_avg; 3306 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; 3307 3308 /* Update parent cfs_rq utilization */ 3309 add_positive(&cfs_rq->avg.util_avg, delta); 3310 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; 3311 } 3312 3313 static inline void 3314 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3315 { 3316 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; 3317 unsigned long runnable_load_avg, load_avg; 3318 u64 runnable_load_sum, load_sum = 0; 3319 s64 delta_sum; 3320 3321 if (!runnable_sum) 3322 return; 3323 3324 gcfs_rq->prop_runnable_sum = 0; 3325 3326 if (runnable_sum >= 0) { 3327 /* 3328 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until 3329 * the CPU is saturated running == runnable. 3330 */ 3331 runnable_sum += se->avg.load_sum; 3332 runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX); 3333 } else { 3334 /* 3335 * Estimate the new unweighted runnable_sum of the gcfs_rq by 3336 * assuming all tasks are equally runnable. 3337 */ 3338 if (scale_load_down(gcfs_rq->load.weight)) { 3339 load_sum = div_s64(gcfs_rq->avg.load_sum, 3340 scale_load_down(gcfs_rq->load.weight)); 3341 } 3342 3343 /* But make sure to not inflate se's runnable */ 3344 runnable_sum = min(se->avg.load_sum, load_sum); 3345 } 3346 3347 /* 3348 * runnable_sum can't be lower than running_sum 3349 * Rescale running sum to be in the same range as runnable sum 3350 * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT] 3351 * runnable_sum is in [0 : LOAD_AVG_MAX] 3352 */ 3353 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; 3354 runnable_sum = max(runnable_sum, running_sum); 3355 3356 load_sum = (s64)se_weight(se) * runnable_sum; 3357 load_avg = div_s64(load_sum, LOAD_AVG_MAX); 3358 3359 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; 3360 delta_avg = load_avg - se->avg.load_avg; 3361 3362 se->avg.load_sum = runnable_sum; 3363 se->avg.load_avg = load_avg; 3364 add_positive(&cfs_rq->avg.load_avg, delta_avg); 3365 add_positive(&cfs_rq->avg.load_sum, delta_sum); 3366 3367 runnable_load_sum = (s64)se_runnable(se) * runnable_sum; 3368 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX); 3369 3370 if (se->on_rq) { 3371 delta_sum = runnable_load_sum - 3372 se_weight(se) * se->avg.runnable_load_sum; 3373 delta_avg = runnable_load_avg - se->avg.runnable_load_avg; 3374 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg); 3375 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum); 3376 } 3377 3378 se->avg.runnable_load_sum = runnable_sum; 3379 se->avg.runnable_load_avg = runnable_load_avg; 3380 } 3381 3382 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) 3383 { 3384 cfs_rq->propagate = 1; 3385 cfs_rq->prop_runnable_sum += runnable_sum; 3386 } 3387 3388 /* Update task and its cfs_rq load average */ 3389 static inline int propagate_entity_load_avg(struct sched_entity *se) 3390 { 3391 struct cfs_rq *cfs_rq, *gcfs_rq; 3392 3393 if (entity_is_task(se)) 3394 return 0; 3395 3396 gcfs_rq = group_cfs_rq(se); 3397 if (!gcfs_rq->propagate) 3398 return 0; 3399 3400 gcfs_rq->propagate = 0; 3401 3402 cfs_rq = cfs_rq_of(se); 3403 3404 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); 3405 3406 update_tg_cfs_util(cfs_rq, se, gcfs_rq); 3407 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); 3408 3409 trace_pelt_cfs_tp(cfs_rq); 3410 trace_pelt_se_tp(se); 3411 3412 return 1; 3413 } 3414 3415 /* 3416 * Check if we need to update the load and the utilization of a blocked 3417 * group_entity: 3418 */ 3419 static inline bool skip_blocked_update(struct sched_entity *se) 3420 { 3421 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3422 3423 /* 3424 * If sched_entity still have not zero load or utilization, we have to 3425 * decay it: 3426 */ 3427 if (se->avg.load_avg || se->avg.util_avg) 3428 return false; 3429 3430 /* 3431 * If there is a pending propagation, we have to update the load and 3432 * the utilization of the sched_entity: 3433 */ 3434 if (gcfs_rq->propagate) 3435 return false; 3436 3437 /* 3438 * Otherwise, the load and the utilization of the sched_entity is 3439 * already zero and there is no pending propagation, so it will be a 3440 * waste of time to try to decay it: 3441 */ 3442 return true; 3443 } 3444 3445 #else /* CONFIG_FAIR_GROUP_SCHED */ 3446 3447 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} 3448 3449 static inline int propagate_entity_load_avg(struct sched_entity *se) 3450 { 3451 return 0; 3452 } 3453 3454 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} 3455 3456 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3457 3458 /** 3459 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 3460 * @now: current time, as per cfs_rq_clock_pelt() 3461 * @cfs_rq: cfs_rq to update 3462 * 3463 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 3464 * avg. The immediate corollary is that all (fair) tasks must be attached, see 3465 * post_init_entity_util_avg(). 3466 * 3467 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 3468 * 3469 * Returns true if the load decayed or we removed load. 3470 * 3471 * Since both these conditions indicate a changed cfs_rq->avg.load we should 3472 * call update_tg_load_avg() when this function returns true. 3473 */ 3474 static inline int 3475 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 3476 { 3477 unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; 3478 struct sched_avg *sa = &cfs_rq->avg; 3479 int decayed = 0; 3480 3481 if (cfs_rq->removed.nr) { 3482 unsigned long r; 3483 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib; 3484 3485 raw_spin_lock(&cfs_rq->removed.lock); 3486 swap(cfs_rq->removed.util_avg, removed_util); 3487 swap(cfs_rq->removed.load_avg, removed_load); 3488 swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); 3489 cfs_rq->removed.nr = 0; 3490 raw_spin_unlock(&cfs_rq->removed.lock); 3491 3492 r = removed_load; 3493 sub_positive(&sa->load_avg, r); 3494 sub_positive(&sa->load_sum, r * divider); 3495 3496 r = removed_util; 3497 sub_positive(&sa->util_avg, r); 3498 sub_positive(&sa->util_sum, r * divider); 3499 3500 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); 3501 3502 decayed = 1; 3503 } 3504 3505 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); 3506 3507 #ifndef CONFIG_64BIT 3508 smp_wmb(); 3509 cfs_rq->load_last_update_time_copy = sa->last_update_time; 3510 #endif 3511 3512 return decayed; 3513 } 3514 3515 /** 3516 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3517 * @cfs_rq: cfs_rq to attach to 3518 * @se: sched_entity to attach 3519 * 3520 * Must call update_cfs_rq_load_avg() before this, since we rely on 3521 * cfs_rq->avg.last_update_time being current. 3522 */ 3523 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3524 { 3525 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; 3526 3527 /* 3528 * When we attach the @se to the @cfs_rq, we must align the decay 3529 * window because without that, really weird and wonderful things can 3530 * happen. 3531 * 3532 * XXX illustrate 3533 */ 3534 se->avg.last_update_time = cfs_rq->avg.last_update_time; 3535 se->avg.period_contrib = cfs_rq->avg.period_contrib; 3536 3537 /* 3538 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new 3539 * period_contrib. This isn't strictly correct, but since we're 3540 * entirely outside of the PELT hierarchy, nobody cares if we truncate 3541 * _sum a little. 3542 */ 3543 se->avg.util_sum = se->avg.util_avg * divider; 3544 3545 se->avg.load_sum = divider; 3546 if (se_weight(se)) { 3547 se->avg.load_sum = 3548 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); 3549 } 3550 3551 se->avg.runnable_load_sum = se->avg.load_sum; 3552 3553 enqueue_load_avg(cfs_rq, se); 3554 cfs_rq->avg.util_avg += se->avg.util_avg; 3555 cfs_rq->avg.util_sum += se->avg.util_sum; 3556 3557 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); 3558 3559 cfs_rq_util_change(cfs_rq, 0); 3560 3561 trace_pelt_cfs_tp(cfs_rq); 3562 } 3563 3564 /** 3565 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 3566 * @cfs_rq: cfs_rq to detach from 3567 * @se: sched_entity to detach 3568 * 3569 * Must call update_cfs_rq_load_avg() before this, since we rely on 3570 * cfs_rq->avg.last_update_time being current. 3571 */ 3572 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3573 { 3574 dequeue_load_avg(cfs_rq, se); 3575 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 3576 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); 3577 3578 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); 3579 3580 cfs_rq_util_change(cfs_rq, 0); 3581 3582 trace_pelt_cfs_tp(cfs_rq); 3583 } 3584 3585 /* 3586 * Optional action to be done while updating the load average 3587 */ 3588 #define UPDATE_TG 0x1 3589 #define SKIP_AGE_LOAD 0x2 3590 #define DO_ATTACH 0x4 3591 3592 /* Update task and its cfs_rq load average */ 3593 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3594 { 3595 u64 now = cfs_rq_clock_pelt(cfs_rq); 3596 int decayed; 3597 3598 /* 3599 * Track task load average for carrying it to new CPU after migrated, and 3600 * track group sched_entity load average for task_h_load calc in migration 3601 */ 3602 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 3603 __update_load_avg_se(now, cfs_rq, se); 3604 3605 decayed = update_cfs_rq_load_avg(now, cfs_rq); 3606 decayed |= propagate_entity_load_avg(se); 3607 3608 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { 3609 3610 /* 3611 * DO_ATTACH means we're here from enqueue_entity(). 3612 * !last_update_time means we've passed through 3613 * migrate_task_rq_fair() indicating we migrated. 3614 * 3615 * IOW we're enqueueing a task on a new CPU. 3616 */ 3617 attach_entity_load_avg(cfs_rq, se); 3618 update_tg_load_avg(cfs_rq, 0); 3619 3620 } else if (decayed) { 3621 cfs_rq_util_change(cfs_rq, 0); 3622 3623 if (flags & UPDATE_TG) 3624 update_tg_load_avg(cfs_rq, 0); 3625 } 3626 } 3627 3628 #ifndef CONFIG_64BIT 3629 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3630 { 3631 u64 last_update_time_copy; 3632 u64 last_update_time; 3633 3634 do { 3635 last_update_time_copy = cfs_rq->load_last_update_time_copy; 3636 smp_rmb(); 3637 last_update_time = cfs_rq->avg.last_update_time; 3638 } while (last_update_time != last_update_time_copy); 3639 3640 return last_update_time; 3641 } 3642 #else 3643 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3644 { 3645 return cfs_rq->avg.last_update_time; 3646 } 3647 #endif 3648 3649 /* 3650 * Synchronize entity load avg of dequeued entity without locking 3651 * the previous rq. 3652 */ 3653 static void sync_entity_load_avg(struct sched_entity *se) 3654 { 3655 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3656 u64 last_update_time; 3657 3658 last_update_time = cfs_rq_last_update_time(cfs_rq); 3659 __update_load_avg_blocked_se(last_update_time, se); 3660 } 3661 3662 /* 3663 * Task first catches up with cfs_rq, and then subtract 3664 * itself from the cfs_rq (task must be off the queue now). 3665 */ 3666 static void remove_entity_load_avg(struct sched_entity *se) 3667 { 3668 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3669 unsigned long flags; 3670 3671 /* 3672 * tasks cannot exit without having gone through wake_up_new_task() -> 3673 * post_init_entity_util_avg() which will have added things to the 3674 * cfs_rq, so we can remove unconditionally. 3675 */ 3676 3677 sync_entity_load_avg(se); 3678 3679 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); 3680 ++cfs_rq->removed.nr; 3681 cfs_rq->removed.util_avg += se->avg.util_avg; 3682 cfs_rq->removed.load_avg += se->avg.load_avg; 3683 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ 3684 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); 3685 } 3686 3687 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) 3688 { 3689 return cfs_rq->avg.runnable_load_avg; 3690 } 3691 3692 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 3693 { 3694 return cfs_rq->avg.load_avg; 3695 } 3696 3697 static inline unsigned long task_util(struct task_struct *p) 3698 { 3699 return READ_ONCE(p->se.avg.util_avg); 3700 } 3701 3702 static inline unsigned long _task_util_est(struct task_struct *p) 3703 { 3704 struct util_est ue = READ_ONCE(p->se.avg.util_est); 3705 3706 return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); 3707 } 3708 3709 static inline unsigned long task_util_est(struct task_struct *p) 3710 { 3711 return max(task_util(p), _task_util_est(p)); 3712 } 3713 3714 #ifdef CONFIG_UCLAMP_TASK 3715 static inline unsigned long uclamp_task_util(struct task_struct *p) 3716 { 3717 return clamp(task_util_est(p), 3718 uclamp_eff_value(p, UCLAMP_MIN), 3719 uclamp_eff_value(p, UCLAMP_MAX)); 3720 } 3721 #else 3722 static inline unsigned long uclamp_task_util(struct task_struct *p) 3723 { 3724 return task_util_est(p); 3725 } 3726 #endif 3727 3728 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, 3729 struct task_struct *p) 3730 { 3731 unsigned int enqueued; 3732 3733 if (!sched_feat(UTIL_EST)) 3734 return; 3735 3736 /* Update root cfs_rq's estimated utilization */ 3737 enqueued = cfs_rq->avg.util_est.enqueued; 3738 enqueued += _task_util_est(p); 3739 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 3740 } 3741 3742 /* 3743 * Check if a (signed) value is within a specified (unsigned) margin, 3744 * based on the observation that: 3745 * 3746 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) 3747 * 3748 * NOTE: this only works when value + maring < INT_MAX. 3749 */ 3750 static inline bool within_margin(int value, int margin) 3751 { 3752 return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); 3753 } 3754 3755 static void 3756 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) 3757 { 3758 long last_ewma_diff; 3759 struct util_est ue; 3760 int cpu; 3761 3762 if (!sched_feat(UTIL_EST)) 3763 return; 3764 3765 /* Update root cfs_rq's estimated utilization */ 3766 ue.enqueued = cfs_rq->avg.util_est.enqueued; 3767 ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); 3768 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); 3769 3770 /* 3771 * Skip update of task's estimated utilization when the task has not 3772 * yet completed an activation, e.g. being migrated. 3773 */ 3774 if (!task_sleep) 3775 return; 3776 3777 /* 3778 * If the PELT values haven't changed since enqueue time, 3779 * skip the util_est update. 3780 */ 3781 ue = p->se.avg.util_est; 3782 if (ue.enqueued & UTIL_AVG_UNCHANGED) 3783 return; 3784 3785 /* 3786 * Reset EWMA on utilization increases, the moving average is used only 3787 * to smooth utilization decreases. 3788 */ 3789 ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); 3790 if (sched_feat(UTIL_EST_FASTUP)) { 3791 if (ue.ewma < ue.enqueued) { 3792 ue.ewma = ue.enqueued; 3793 goto done; 3794 } 3795 } 3796 3797 /* 3798 * Skip update of task's estimated utilization when its EWMA is 3799 * already ~1% close to its last activation value. 3800 */ 3801 last_ewma_diff = ue.enqueued - ue.ewma; 3802 if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) 3803 return; 3804 3805 /* 3806 * To avoid overestimation of actual task utilization, skip updates if 3807 * we cannot grant there is idle time in this CPU. 3808 */ 3809 cpu = cpu_of(rq_of(cfs_rq)); 3810 if (task_util(p) > capacity_orig_of(cpu)) 3811 return; 3812 3813 /* 3814 * Update Task's estimated utilization 3815 * 3816 * When *p completes an activation we can consolidate another sample 3817 * of the task size. This is done by storing the current PELT value 3818 * as ue.enqueued and by using this value to update the Exponential 3819 * Weighted Moving Average (EWMA): 3820 * 3821 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) 3822 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) 3823 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) 3824 * = w * ( last_ewma_diff ) + ewma(t-1) 3825 * = w * (last_ewma_diff + ewma(t-1) / w) 3826 * 3827 * Where 'w' is the weight of new samples, which is configured to be 3828 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) 3829 */ 3830 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; 3831 ue.ewma += last_ewma_diff; 3832 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; 3833 done: 3834 WRITE_ONCE(p->se.avg.util_est, ue); 3835 } 3836 3837 static inline int task_fits_capacity(struct task_struct *p, long capacity) 3838 { 3839 return fits_capacity(uclamp_task_util(p), capacity); 3840 } 3841 3842 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) 3843 { 3844 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 3845 return; 3846 3847 if (!p) { 3848 rq->misfit_task_load = 0; 3849 return; 3850 } 3851 3852 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { 3853 rq->misfit_task_load = 0; 3854 return; 3855 } 3856 3857 rq->misfit_task_load = task_h_load(p); 3858 } 3859 3860 #else /* CONFIG_SMP */ 3861 3862 #define UPDATE_TG 0x0 3863 #define SKIP_AGE_LOAD 0x0 3864 #define DO_ATTACH 0x0 3865 3866 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 3867 { 3868 cfs_rq_util_change(cfs_rq, 0); 3869 } 3870 3871 static inline void remove_entity_load_avg(struct sched_entity *se) {} 3872 3873 static inline void 3874 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3875 static inline void 3876 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3877 3878 static inline int idle_balance(struct rq *rq, struct rq_flags *rf) 3879 { 3880 return 0; 3881 } 3882 3883 static inline void 3884 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 3885 3886 static inline void 3887 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, 3888 bool task_sleep) {} 3889 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} 3890 3891 #endif /* CONFIG_SMP */ 3892 3893 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 3894 { 3895 #ifdef CONFIG_SCHED_DEBUG 3896 s64 d = se->vruntime - cfs_rq->min_vruntime; 3897 3898 if (d < 0) 3899 d = -d; 3900 3901 if (d > 3*sysctl_sched_latency) 3902 schedstat_inc(cfs_rq->nr_spread_over); 3903 #endif 3904 } 3905 3906 static void 3907 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 3908 { 3909 u64 vruntime = cfs_rq->min_vruntime; 3910 3911 /* 3912 * The 'current' period is already promised to the current tasks, 3913 * however the extra weight of the new task will slow them down a 3914 * little, place the new task so that it fits in the slot that 3915 * stays open at the end. 3916 */ 3917 if (initial && sched_feat(START_DEBIT)) 3918 vruntime += sched_vslice(cfs_rq, se); 3919 3920 /* sleeps up to a single latency don't count. */ 3921 if (!initial) { 3922 unsigned long thresh = sysctl_sched_latency; 3923 3924 /* 3925 * Halve their sleep time's effect, to allow 3926 * for a gentler effect of sleepers: 3927 */ 3928 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 3929 thresh >>= 1; 3930 3931 vruntime -= thresh; 3932 } 3933 3934 /* ensure we never gain time by being placed backwards. */ 3935 se->vruntime = max_vruntime(se->vruntime, vruntime); 3936 } 3937 3938 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 3939 3940 static inline void check_schedstat_required(void) 3941 { 3942 #ifdef CONFIG_SCHEDSTATS 3943 if (schedstat_enabled()) 3944 return; 3945 3946 /* Force schedstat enabled if a dependent tracepoint is active */ 3947 if (trace_sched_stat_wait_enabled() || 3948 trace_sched_stat_sleep_enabled() || 3949 trace_sched_stat_iowait_enabled() || 3950 trace_sched_stat_blocked_enabled() || 3951 trace_sched_stat_runtime_enabled()) { 3952 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3953 "stat_blocked and stat_runtime require the " 3954 "kernel parameter schedstats=enable or " 3955 "kernel.sched_schedstats=1\n"); 3956 } 3957 #endif 3958 } 3959 3960 3961 /* 3962 * MIGRATION 3963 * 3964 * dequeue 3965 * update_curr() 3966 * update_min_vruntime() 3967 * vruntime -= min_vruntime 3968 * 3969 * enqueue 3970 * update_curr() 3971 * update_min_vruntime() 3972 * vruntime += min_vruntime 3973 * 3974 * this way the vruntime transition between RQs is done when both 3975 * min_vruntime are up-to-date. 3976 * 3977 * WAKEUP (remote) 3978 * 3979 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 3980 * vruntime -= min_vruntime 3981 * 3982 * enqueue 3983 * update_curr() 3984 * update_min_vruntime() 3985 * vruntime += min_vruntime 3986 * 3987 * this way we don't have the most up-to-date min_vruntime on the originating 3988 * CPU and an up-to-date min_vruntime on the destination CPU. 3989 */ 3990 3991 static void 3992 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3993 { 3994 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 3995 bool curr = cfs_rq->curr == se; 3996 3997 /* 3998 * If we're the current task, we must renormalise before calling 3999 * update_curr(). 4000 */ 4001 if (renorm && curr) 4002 se->vruntime += cfs_rq->min_vruntime; 4003 4004 update_curr(cfs_rq); 4005 4006 /* 4007 * Otherwise, renormalise after, such that we're placed at the current 4008 * moment in time, instead of some random moment in the past. Being 4009 * placed in the past could significantly boost this task to the 4010 * fairness detriment of existing tasks. 4011 */ 4012 if (renorm && !curr) 4013 se->vruntime += cfs_rq->min_vruntime; 4014 4015 /* 4016 * When enqueuing a sched_entity, we must: 4017 * - Update loads to have both entity and cfs_rq synced with now. 4018 * - Add its load to cfs_rq->runnable_avg 4019 * - For group_entity, update its weight to reflect the new share of 4020 * its group cfs_rq 4021 * - Add its new weight to cfs_rq->load.weight 4022 */ 4023 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); 4024 update_cfs_group(se); 4025 enqueue_runnable_load_avg(cfs_rq, se); 4026 account_entity_enqueue(cfs_rq, se); 4027 4028 if (flags & ENQUEUE_WAKEUP) 4029 place_entity(cfs_rq, se, 0); 4030 4031 check_schedstat_required(); 4032 update_stats_enqueue(cfs_rq, se, flags); 4033 check_spread(cfs_rq, se); 4034 if (!curr) 4035 __enqueue_entity(cfs_rq, se); 4036 se->on_rq = 1; 4037 4038 if (cfs_rq->nr_running == 1) { 4039 list_add_leaf_cfs_rq(cfs_rq); 4040 check_enqueue_throttle(cfs_rq); 4041 } 4042 } 4043 4044 static void __clear_buddies_last(struct sched_entity *se) 4045 { 4046 for_each_sched_entity(se) { 4047 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4048 if (cfs_rq->last != se) 4049 break; 4050 4051 cfs_rq->last = NULL; 4052 } 4053 } 4054 4055 static void __clear_buddies_next(struct sched_entity *se) 4056 { 4057 for_each_sched_entity(se) { 4058 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4059 if (cfs_rq->next != se) 4060 break; 4061 4062 cfs_rq->next = NULL; 4063 } 4064 } 4065 4066 static void __clear_buddies_skip(struct sched_entity *se) 4067 { 4068 for_each_sched_entity(se) { 4069 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4070 if (cfs_rq->skip != se) 4071 break; 4072 4073 cfs_rq->skip = NULL; 4074 } 4075 } 4076 4077 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 4078 { 4079 if (cfs_rq->last == se) 4080 __clear_buddies_last(se); 4081 4082 if (cfs_rq->next == se) 4083 __clear_buddies_next(se); 4084 4085 if (cfs_rq->skip == se) 4086 __clear_buddies_skip(se); 4087 } 4088 4089 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4090 4091 static void 4092 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 4093 { 4094 /* 4095 * Update run-time statistics of the 'current'. 4096 */ 4097 update_curr(cfs_rq); 4098 4099 /* 4100 * When dequeuing a sched_entity, we must: 4101 * - Update loads to have both entity and cfs_rq synced with now. 4102 * - Subtract its load from the cfs_rq->runnable_avg. 4103 * - Subtract its previous weight from cfs_rq->load.weight. 4104 * - For group entity, update its weight to reflect the new share 4105 * of its group cfs_rq. 4106 */ 4107 update_load_avg(cfs_rq, se, UPDATE_TG); 4108 dequeue_runnable_load_avg(cfs_rq, se); 4109 4110 update_stats_dequeue(cfs_rq, se, flags); 4111 4112 clear_buddies(cfs_rq, se); 4113 4114 if (se != cfs_rq->curr) 4115 __dequeue_entity(cfs_rq, se); 4116 se->on_rq = 0; 4117 account_entity_dequeue(cfs_rq, se); 4118 4119 /* 4120 * Normalize after update_curr(); which will also have moved 4121 * min_vruntime if @se is the one holding it back. But before doing 4122 * update_min_vruntime() again, which will discount @se's position and 4123 * can move min_vruntime forward still more. 4124 */ 4125 if (!(flags & DEQUEUE_SLEEP)) 4126 se->vruntime -= cfs_rq->min_vruntime; 4127 4128 /* return excess runtime on last dequeue */ 4129 return_cfs_rq_runtime(cfs_rq); 4130 4131 update_cfs_group(se); 4132 4133 /* 4134 * Now advance min_vruntime if @se was the entity holding it back, 4135 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 4136 * put back on, and if we advance min_vruntime, we'll be placed back 4137 * further than we started -- ie. we'll be penalized. 4138 */ 4139 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) 4140 update_min_vruntime(cfs_rq); 4141 } 4142 4143 /* 4144 * Preempt the current task with a newly woken task if needed: 4145 */ 4146 static void 4147 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4148 { 4149 unsigned long ideal_runtime, delta_exec; 4150 struct sched_entity *se; 4151 s64 delta; 4152 4153 ideal_runtime = sched_slice(cfs_rq, curr); 4154 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 4155 if (delta_exec > ideal_runtime) { 4156 resched_curr(rq_of(cfs_rq)); 4157 /* 4158 * The current task ran long enough, ensure it doesn't get 4159 * re-elected due to buddy favours. 4160 */ 4161 clear_buddies(cfs_rq, curr); 4162 return; 4163 } 4164 4165 /* 4166 * Ensure that a task that missed wakeup preemption by a 4167 * narrow margin doesn't have to wait for a full slice. 4168 * This also mitigates buddy induced latencies under load. 4169 */ 4170 if (delta_exec < sysctl_sched_min_granularity) 4171 return; 4172 4173 se = __pick_first_entity(cfs_rq); 4174 delta = curr->vruntime - se->vruntime; 4175 4176 if (delta < 0) 4177 return; 4178 4179 if (delta > ideal_runtime) 4180 resched_curr(rq_of(cfs_rq)); 4181 } 4182 4183 static void 4184 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 4185 { 4186 /* 'current' is not kept within the tree. */ 4187 if (se->on_rq) { 4188 /* 4189 * Any task has to be enqueued before it get to execute on 4190 * a CPU. So account for the time it spent waiting on the 4191 * runqueue. 4192 */ 4193 update_stats_wait_end(cfs_rq, se); 4194 __dequeue_entity(cfs_rq, se); 4195 update_load_avg(cfs_rq, se, UPDATE_TG); 4196 } 4197 4198 update_stats_curr_start(cfs_rq, se); 4199 cfs_rq->curr = se; 4200 4201 /* 4202 * Track our maximum slice length, if the CPU's load is at 4203 * least twice that of our own weight (i.e. dont track it 4204 * when there are only lesser-weight tasks around): 4205 */ 4206 if (schedstat_enabled() && 4207 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { 4208 schedstat_set(se->statistics.slice_max, 4209 max((u64)schedstat_val(se->statistics.slice_max), 4210 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 4211 } 4212 4213 se->prev_sum_exec_runtime = se->sum_exec_runtime; 4214 } 4215 4216 static int 4217 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 4218 4219 /* 4220 * Pick the next process, keeping these things in mind, in this order: 4221 * 1) keep things fair between processes/task groups 4222 * 2) pick the "next" process, since someone really wants that to run 4223 * 3) pick the "last" process, for cache locality 4224 * 4) do not run the "skip" process, if something else is available 4225 */ 4226 static struct sched_entity * 4227 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4228 { 4229 struct sched_entity *left = __pick_first_entity(cfs_rq); 4230 struct sched_entity *se; 4231 4232 /* 4233 * If curr is set we have to see if its left of the leftmost entity 4234 * still in the tree, provided there was anything in the tree at all. 4235 */ 4236 if (!left || (curr && entity_before(curr, left))) 4237 left = curr; 4238 4239 se = left; /* ideally we run the leftmost entity */ 4240 4241 /* 4242 * Avoid running the skip buddy, if running something else can 4243 * be done without getting too unfair. 4244 */ 4245 if (cfs_rq->skip == se) { 4246 struct sched_entity *second; 4247 4248 if (se == curr) { 4249 second = __pick_first_entity(cfs_rq); 4250 } else { 4251 second = __pick_next_entity(se); 4252 if (!second || (curr && entity_before(curr, second))) 4253 second = curr; 4254 } 4255 4256 if (second && wakeup_preempt_entity(second, left) < 1) 4257 se = second; 4258 } 4259 4260 /* 4261 * Prefer last buddy, try to return the CPU to a preempted task. 4262 */ 4263 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) 4264 se = cfs_rq->last; 4265 4266 /* 4267 * Someone really wants this to run. If it's not unfair, run it. 4268 */ 4269 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) 4270 se = cfs_rq->next; 4271 4272 clear_buddies(cfs_rq, se); 4273 4274 return se; 4275 } 4276 4277 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4278 4279 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 4280 { 4281 /* 4282 * If still on the runqueue then deactivate_task() 4283 * was not called and update_curr() has to be done: 4284 */ 4285 if (prev->on_rq) 4286 update_curr(cfs_rq); 4287 4288 /* throttle cfs_rqs exceeding runtime */ 4289 check_cfs_rq_runtime(cfs_rq); 4290 4291 check_spread(cfs_rq, prev); 4292 4293 if (prev->on_rq) { 4294 update_stats_wait_start(cfs_rq, prev); 4295 /* Put 'current' back into the tree. */ 4296 __enqueue_entity(cfs_rq, prev); 4297 /* in !on_rq case, update occurred at dequeue */ 4298 update_load_avg(cfs_rq, prev, 0); 4299 } 4300 cfs_rq->curr = NULL; 4301 } 4302 4303 static void 4304 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 4305 { 4306 /* 4307 * Update run-time statistics of the 'current'. 4308 */ 4309 update_curr(cfs_rq); 4310 4311 /* 4312 * Ensure that runnable average is periodically updated. 4313 */ 4314 update_load_avg(cfs_rq, curr, UPDATE_TG); 4315 update_cfs_group(curr); 4316 4317 #ifdef CONFIG_SCHED_HRTICK 4318 /* 4319 * queued ticks are scheduled to match the slice, so don't bother 4320 * validating it and just reschedule. 4321 */ 4322 if (queued) { 4323 resched_curr(rq_of(cfs_rq)); 4324 return; 4325 } 4326 /* 4327 * don't let the period tick interfere with the hrtick preemption 4328 */ 4329 if (!sched_feat(DOUBLE_TICK) && 4330 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4331 return; 4332 #endif 4333 4334 if (cfs_rq->nr_running > 1) 4335 check_preempt_tick(cfs_rq, curr); 4336 } 4337 4338 4339 /************************************************** 4340 * CFS bandwidth control machinery 4341 */ 4342 4343 #ifdef CONFIG_CFS_BANDWIDTH 4344 4345 #ifdef CONFIG_JUMP_LABEL 4346 static struct static_key __cfs_bandwidth_used; 4347 4348 static inline bool cfs_bandwidth_used(void) 4349 { 4350 return static_key_false(&__cfs_bandwidth_used); 4351 } 4352 4353 void cfs_bandwidth_usage_inc(void) 4354 { 4355 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); 4356 } 4357 4358 void cfs_bandwidth_usage_dec(void) 4359 { 4360 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); 4361 } 4362 #else /* CONFIG_JUMP_LABEL */ 4363 static bool cfs_bandwidth_used(void) 4364 { 4365 return true; 4366 } 4367 4368 void cfs_bandwidth_usage_inc(void) {} 4369 void cfs_bandwidth_usage_dec(void) {} 4370 #endif /* CONFIG_JUMP_LABEL */ 4371 4372 /* 4373 * default period for cfs group bandwidth. 4374 * default: 0.1s, units: nanoseconds 4375 */ 4376 static inline u64 default_cfs_period(void) 4377 { 4378 return 100000000ULL; 4379 } 4380 4381 static inline u64 sched_cfs_bandwidth_slice(void) 4382 { 4383 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4384 } 4385 4386 /* 4387 * Replenish runtime according to assigned quota. We use sched_clock_cpu 4388 * directly instead of rq->clock to avoid adding additional synchronization 4389 * around rq->lock. 4390 * 4391 * requires cfs_b->lock 4392 */ 4393 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4394 { 4395 if (cfs_b->quota != RUNTIME_INF) 4396 cfs_b->runtime = cfs_b->quota; 4397 } 4398 4399 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4400 { 4401 return &tg->cfs_bandwidth; 4402 } 4403 4404 /* returns 0 on failure to allocate runtime */ 4405 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4406 { 4407 struct task_group *tg = cfs_rq->tg; 4408 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); 4409 u64 amount = 0, min_amount; 4410 4411 /* note: this is a positive sum as runtime_remaining <= 0 */ 4412 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; 4413 4414 raw_spin_lock(&cfs_b->lock); 4415 if (cfs_b->quota == RUNTIME_INF) 4416 amount = min_amount; 4417 else { 4418 start_cfs_bandwidth(cfs_b); 4419 4420 if (cfs_b->runtime > 0) { 4421 amount = min(cfs_b->runtime, min_amount); 4422 cfs_b->runtime -= amount; 4423 cfs_b->idle = 0; 4424 } 4425 } 4426 raw_spin_unlock(&cfs_b->lock); 4427 4428 cfs_rq->runtime_remaining += amount; 4429 4430 return cfs_rq->runtime_remaining > 0; 4431 } 4432 4433 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4434 { 4435 /* dock delta_exec before expiring quota (as it could span periods) */ 4436 cfs_rq->runtime_remaining -= delta_exec; 4437 4438 if (likely(cfs_rq->runtime_remaining > 0)) 4439 return; 4440 4441 if (cfs_rq->throttled) 4442 return; 4443 /* 4444 * if we're unable to extend our runtime we resched so that the active 4445 * hierarchy can be throttled 4446 */ 4447 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 4448 resched_curr(rq_of(cfs_rq)); 4449 } 4450 4451 static __always_inline 4452 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4453 { 4454 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 4455 return; 4456 4457 __account_cfs_rq_runtime(cfs_rq, delta_exec); 4458 } 4459 4460 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4461 { 4462 return cfs_bandwidth_used() && cfs_rq->throttled; 4463 } 4464 4465 /* check whether cfs_rq, or any parent, is throttled */ 4466 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4467 { 4468 return cfs_bandwidth_used() && cfs_rq->throttle_count; 4469 } 4470 4471 /* 4472 * Ensure that neither of the group entities corresponding to src_cpu or 4473 * dest_cpu are members of a throttled hierarchy when performing group 4474 * load-balance operations. 4475 */ 4476 static inline int throttled_lb_pair(struct task_group *tg, 4477 int src_cpu, int dest_cpu) 4478 { 4479 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 4480 4481 src_cfs_rq = tg->cfs_rq[src_cpu]; 4482 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 4483 4484 return throttled_hierarchy(src_cfs_rq) || 4485 throttled_hierarchy(dest_cfs_rq); 4486 } 4487 4488 static int tg_unthrottle_up(struct task_group *tg, void *data) 4489 { 4490 struct rq *rq = data; 4491 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4492 4493 cfs_rq->throttle_count--; 4494 if (!cfs_rq->throttle_count) { 4495 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4496 cfs_rq->throttled_clock_task; 4497 4498 /* Add cfs_rq with already running entity in the list */ 4499 if (cfs_rq->nr_running >= 1) 4500 list_add_leaf_cfs_rq(cfs_rq); 4501 } 4502 4503 return 0; 4504 } 4505 4506 static int tg_throttle_down(struct task_group *tg, void *data) 4507 { 4508 struct rq *rq = data; 4509 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4510 4511 /* group is entering throttled state, stop time */ 4512 if (!cfs_rq->throttle_count) { 4513 cfs_rq->throttled_clock_task = rq_clock_task(rq); 4514 list_del_leaf_cfs_rq(cfs_rq); 4515 } 4516 cfs_rq->throttle_count++; 4517 4518 return 0; 4519 } 4520 4521 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) 4522 { 4523 struct rq *rq = rq_of(cfs_rq); 4524 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4525 struct sched_entity *se; 4526 long task_delta, idle_task_delta, dequeue = 1; 4527 bool empty; 4528 4529 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 4530 4531 /* freeze hierarchy runnable averages while throttled */ 4532 rcu_read_lock(); 4533 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 4534 rcu_read_unlock(); 4535 4536 task_delta = cfs_rq->h_nr_running; 4537 idle_task_delta = cfs_rq->idle_h_nr_running; 4538 for_each_sched_entity(se) { 4539 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4540 /* throttled entity or throttle-on-deactivate */ 4541 if (!se->on_rq) 4542 break; 4543 4544 if (dequeue) 4545 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 4546 qcfs_rq->h_nr_running -= task_delta; 4547 qcfs_rq->idle_h_nr_running -= idle_task_delta; 4548 4549 if (qcfs_rq->load.weight) 4550 dequeue = 0; 4551 } 4552 4553 if (!se) 4554 sub_nr_running(rq, task_delta); 4555 4556 cfs_rq->throttled = 1; 4557 cfs_rq->throttled_clock = rq_clock(rq); 4558 raw_spin_lock(&cfs_b->lock); 4559 empty = list_empty(&cfs_b->throttled_cfs_rq); 4560 4561 /* 4562 * Add to the _head_ of the list, so that an already-started 4563 * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is 4564 * not running add to the tail so that later runqueues don't get starved. 4565 */ 4566 if (cfs_b->distribute_running) 4567 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4568 else 4569 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4570 4571 /* 4572 * If we're the first throttled task, make sure the bandwidth 4573 * timer is running. 4574 */ 4575 if (empty) 4576 start_cfs_bandwidth(cfs_b); 4577 4578 raw_spin_unlock(&cfs_b->lock); 4579 } 4580 4581 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 4582 { 4583 struct rq *rq = rq_of(cfs_rq); 4584 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4585 struct sched_entity *se; 4586 int enqueue = 1; 4587 long task_delta, idle_task_delta; 4588 4589 se = cfs_rq->tg->se[cpu_of(rq)]; 4590 4591 cfs_rq->throttled = 0; 4592 4593 update_rq_clock(rq); 4594 4595 raw_spin_lock(&cfs_b->lock); 4596 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 4597 list_del_rcu(&cfs_rq->throttled_list); 4598 raw_spin_unlock(&cfs_b->lock); 4599 4600 /* update hierarchical throttle state */ 4601 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4602 4603 if (!cfs_rq->load.weight) 4604 return; 4605 4606 task_delta = cfs_rq->h_nr_running; 4607 idle_task_delta = cfs_rq->idle_h_nr_running; 4608 for_each_sched_entity(se) { 4609 if (se->on_rq) 4610 enqueue = 0; 4611 4612 cfs_rq = cfs_rq_of(se); 4613 if (enqueue) 4614 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); 4615 cfs_rq->h_nr_running += task_delta; 4616 cfs_rq->idle_h_nr_running += idle_task_delta; 4617 4618 if (cfs_rq_throttled(cfs_rq)) 4619 break; 4620 } 4621 4622 assert_list_leaf_cfs_rq(rq); 4623 4624 if (!se) 4625 add_nr_running(rq, task_delta); 4626 4627 /* Determine whether we need to wake up potentially idle CPU: */ 4628 if (rq->curr == rq->idle && rq->cfs.nr_running) 4629 resched_curr(rq); 4630 } 4631 4632 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) 4633 { 4634 struct cfs_rq *cfs_rq; 4635 u64 runtime; 4636 u64 starting_runtime = remaining; 4637 4638 rcu_read_lock(); 4639 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 4640 throttled_list) { 4641 struct rq *rq = rq_of(cfs_rq); 4642 struct rq_flags rf; 4643 4644 rq_lock_irqsave(rq, &rf); 4645 if (!cfs_rq_throttled(cfs_rq)) 4646 goto next; 4647 4648 /* By the above check, this should never be true */ 4649 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); 4650 4651 runtime = -cfs_rq->runtime_remaining + 1; 4652 if (runtime > remaining) 4653 runtime = remaining; 4654 remaining -= runtime; 4655 4656 cfs_rq->runtime_remaining += runtime; 4657 4658 /* we check whether we're throttled above */ 4659 if (cfs_rq->runtime_remaining > 0) 4660 unthrottle_cfs_rq(cfs_rq); 4661 4662 next: 4663 rq_unlock_irqrestore(rq, &rf); 4664 4665 if (!remaining) 4666 break; 4667 } 4668 rcu_read_unlock(); 4669 4670 return starting_runtime - remaining; 4671 } 4672 4673 /* 4674 * Responsible for refilling a task_group's bandwidth and unthrottling its 4675 * cfs_rqs as appropriate. If there has been no activity within the last 4676 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 4677 * used to track this state. 4678 */ 4679 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) 4680 { 4681 u64 runtime; 4682 int throttled; 4683 4684 /* no need to continue the timer with no bandwidth constraint */ 4685 if (cfs_b->quota == RUNTIME_INF) 4686 goto out_deactivate; 4687 4688 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4689 cfs_b->nr_periods += overrun; 4690 4691 /* 4692 * idle depends on !throttled (for the case of a large deficit), and if 4693 * we're going inactive then everything else can be deferred 4694 */ 4695 if (cfs_b->idle && !throttled) 4696 goto out_deactivate; 4697 4698 __refill_cfs_bandwidth_runtime(cfs_b); 4699 4700 if (!throttled) { 4701 /* mark as potentially idle for the upcoming period */ 4702 cfs_b->idle = 1; 4703 return 0; 4704 } 4705 4706 /* account preceding periods in which throttling occurred */ 4707 cfs_b->nr_throttled += overrun; 4708 4709 /* 4710 * This check is repeated as we are holding onto the new bandwidth while 4711 * we unthrottle. This can potentially race with an unthrottled group 4712 * trying to acquire new bandwidth from the global pool. This can result 4713 * in us over-using our runtime if it is all used during this loop, but 4714 * only by limited amounts in that extreme case. 4715 */ 4716 while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { 4717 runtime = cfs_b->runtime; 4718 cfs_b->distribute_running = 1; 4719 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4720 /* we can't nest cfs_b->lock while distributing bandwidth */ 4721 runtime = distribute_cfs_runtime(cfs_b, runtime); 4722 raw_spin_lock_irqsave(&cfs_b->lock, flags); 4723 4724 cfs_b->distribute_running = 0; 4725 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4726 4727 lsub_positive(&cfs_b->runtime, runtime); 4728 } 4729 4730 /* 4731 * While we are ensured activity in the period following an 4732 * unthrottle, this also covers the case in which the new bandwidth is 4733 * insufficient to cover the existing bandwidth deficit. (Forcing the 4734 * timer to remain active while there are any throttled entities.) 4735 */ 4736 cfs_b->idle = 0; 4737 4738 return 0; 4739 4740 out_deactivate: 4741 return 1; 4742 } 4743 4744 /* a cfs_rq won't donate quota below this amount */ 4745 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 4746 /* minimum remaining period time to redistribute slack quota */ 4747 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 4748 /* how long we wait to gather additional slack before distributing */ 4749 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 4750 4751 /* 4752 * Are we near the end of the current quota period? 4753 * 4754 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 4755 * hrtimer base being cleared by hrtimer_start. In the case of 4756 * migrate_hrtimers, base is never cleared, so we are fine. 4757 */ 4758 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 4759 { 4760 struct hrtimer *refresh_timer = &cfs_b->period_timer; 4761 u64 remaining; 4762 4763 /* if the call-back is running a quota refresh is already occurring */ 4764 if (hrtimer_callback_running(refresh_timer)) 4765 return 1; 4766 4767 /* is a quota refresh about to occur? */ 4768 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 4769 if (remaining < min_expire) 4770 return 1; 4771 4772 return 0; 4773 } 4774 4775 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 4776 { 4777 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 4778 4779 /* if there's a quota refresh soon don't bother with slack */ 4780 if (runtime_refresh_within(cfs_b, min_left)) 4781 return; 4782 4783 /* don't push forwards an existing deferred unthrottle */ 4784 if (cfs_b->slack_started) 4785 return; 4786 cfs_b->slack_started = true; 4787 4788 hrtimer_start(&cfs_b->slack_timer, 4789 ns_to_ktime(cfs_bandwidth_slack_period), 4790 HRTIMER_MODE_REL); 4791 } 4792 4793 /* we know any runtime found here is valid as update_curr() precedes return */ 4794 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4795 { 4796 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4797 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 4798 4799 if (slack_runtime <= 0) 4800 return; 4801 4802 raw_spin_lock(&cfs_b->lock); 4803 if (cfs_b->quota != RUNTIME_INF) { 4804 cfs_b->runtime += slack_runtime; 4805 4806 /* we are under rq->lock, defer unthrottling using a timer */ 4807 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 4808 !list_empty(&cfs_b->throttled_cfs_rq)) 4809 start_cfs_slack_bandwidth(cfs_b); 4810 } 4811 raw_spin_unlock(&cfs_b->lock); 4812 4813 /* even if it's not valid for return we don't want to try again */ 4814 cfs_rq->runtime_remaining -= slack_runtime; 4815 } 4816 4817 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4818 { 4819 if (!cfs_bandwidth_used()) 4820 return; 4821 4822 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 4823 return; 4824 4825 __return_cfs_rq_runtime(cfs_rq); 4826 } 4827 4828 /* 4829 * This is done with a timer (instead of inline with bandwidth return) since 4830 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 4831 */ 4832 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 4833 { 4834 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 4835 unsigned long flags; 4836 4837 /* confirm we're still not at a refresh boundary */ 4838 raw_spin_lock_irqsave(&cfs_b->lock, flags); 4839 cfs_b->slack_started = false; 4840 if (cfs_b->distribute_running) { 4841 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4842 return; 4843 } 4844 4845 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 4846 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4847 return; 4848 } 4849 4850 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 4851 runtime = cfs_b->runtime; 4852 4853 if (runtime) 4854 cfs_b->distribute_running = 1; 4855 4856 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4857 4858 if (!runtime) 4859 return; 4860 4861 runtime = distribute_cfs_runtime(cfs_b, runtime); 4862 4863 raw_spin_lock_irqsave(&cfs_b->lock, flags); 4864 lsub_positive(&cfs_b->runtime, runtime); 4865 cfs_b->distribute_running = 0; 4866 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4867 } 4868 4869 /* 4870 * When a group wakes up we want to make sure that its quota is not already 4871 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 4872 * runtime as update_curr() throttling can not not trigger until it's on-rq. 4873 */ 4874 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 4875 { 4876 if (!cfs_bandwidth_used()) 4877 return; 4878 4879 /* an active group must be handled by the update_curr()->put() path */ 4880 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 4881 return; 4882 4883 /* ensure the group is not already throttled */ 4884 if (cfs_rq_throttled(cfs_rq)) 4885 return; 4886 4887 /* update runtime allocation */ 4888 account_cfs_rq_runtime(cfs_rq, 0); 4889 if (cfs_rq->runtime_remaining <= 0) 4890 throttle_cfs_rq(cfs_rq); 4891 } 4892 4893 static void sync_throttle(struct task_group *tg, int cpu) 4894 { 4895 struct cfs_rq *pcfs_rq, *cfs_rq; 4896 4897 if (!cfs_bandwidth_used()) 4898 return; 4899 4900 if (!tg->parent) 4901 return; 4902 4903 cfs_rq = tg->cfs_rq[cpu]; 4904 pcfs_rq = tg->parent->cfs_rq[cpu]; 4905 4906 cfs_rq->throttle_count = pcfs_rq->throttle_count; 4907 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4908 } 4909 4910 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 4911 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4912 { 4913 if (!cfs_bandwidth_used()) 4914 return false; 4915 4916 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 4917 return false; 4918 4919 /* 4920 * it's possible for a throttled entity to be forced into a running 4921 * state (e.g. set_curr_task), in this case we're finished. 4922 */ 4923 if (cfs_rq_throttled(cfs_rq)) 4924 return true; 4925 4926 throttle_cfs_rq(cfs_rq); 4927 return true; 4928 } 4929 4930 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 4931 { 4932 struct cfs_bandwidth *cfs_b = 4933 container_of(timer, struct cfs_bandwidth, slack_timer); 4934 4935 do_sched_cfs_slack_timer(cfs_b); 4936 4937 return HRTIMER_NORESTART; 4938 } 4939 4940 extern const u64 max_cfs_quota_period; 4941 4942 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 4943 { 4944 struct cfs_bandwidth *cfs_b = 4945 container_of(timer, struct cfs_bandwidth, period_timer); 4946 unsigned long flags; 4947 int overrun; 4948 int idle = 0; 4949 int count = 0; 4950 4951 raw_spin_lock_irqsave(&cfs_b->lock, flags); 4952 for (;;) { 4953 overrun = hrtimer_forward_now(timer, cfs_b->period); 4954 if (!overrun) 4955 break; 4956 4957 if (++count > 3) { 4958 u64 new, old = ktime_to_ns(cfs_b->period); 4959 4960 /* 4961 * Grow period by a factor of 2 to avoid losing precision. 4962 * Precision loss in the quota/period ratio can cause __cfs_schedulable 4963 * to fail. 4964 */ 4965 new = old * 2; 4966 if (new < max_cfs_quota_period) { 4967 cfs_b->period = ns_to_ktime(new); 4968 cfs_b->quota *= 2; 4969 4970 pr_warn_ratelimited( 4971 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", 4972 smp_processor_id(), 4973 div_u64(new, NSEC_PER_USEC), 4974 div_u64(cfs_b->quota, NSEC_PER_USEC)); 4975 } else { 4976 pr_warn_ratelimited( 4977 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n", 4978 smp_processor_id(), 4979 div_u64(old, NSEC_PER_USEC), 4980 div_u64(cfs_b->quota, NSEC_PER_USEC)); 4981 } 4982 4983 /* reset count so we don't come right back in here */ 4984 count = 0; 4985 } 4986 4987 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); 4988 } 4989 if (idle) 4990 cfs_b->period_active = 0; 4991 raw_spin_unlock_irqrestore(&cfs_b->lock, flags); 4992 4993 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 4994 } 4995 4996 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4997 { 4998 raw_spin_lock_init(&cfs_b->lock); 4999 cfs_b->runtime = 0; 5000 cfs_b->quota = RUNTIME_INF; 5001 cfs_b->period = ns_to_ktime(default_cfs_period()); 5002 5003 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 5004 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 5005 cfs_b->period_timer.function = sched_cfs_period_timer; 5006 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 5007 cfs_b->slack_timer.function = sched_cfs_slack_timer; 5008 cfs_b->distribute_running = 0; 5009 cfs_b->slack_started = false; 5010 } 5011 5012 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 5013 { 5014 cfs_rq->runtime_enabled = 0; 5015 INIT_LIST_HEAD(&cfs_rq->throttled_list); 5016 } 5017 5018 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5019 { 5020 lockdep_assert_held(&cfs_b->lock); 5021 5022 if (cfs_b->period_active) 5023 return; 5024 5025 cfs_b->period_active = 1; 5026 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 5027 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 5028 } 5029 5030 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 5031 { 5032 /* init_cfs_bandwidth() was not called */ 5033 if (!cfs_b->throttled_cfs_rq.next) 5034 return; 5035 5036 hrtimer_cancel(&cfs_b->period_timer); 5037 hrtimer_cancel(&cfs_b->slack_timer); 5038 } 5039 5040 /* 5041 * Both these CPU hotplug callbacks race against unregister_fair_sched_group() 5042 * 5043 * The race is harmless, since modifying bandwidth settings of unhooked group 5044 * bits doesn't do much. 5045 */ 5046 5047 /* cpu online calback */ 5048 static void __maybe_unused update_runtime_enabled(struct rq *rq) 5049 { 5050 struct task_group *tg; 5051 5052 lockdep_assert_held(&rq->lock); 5053 5054 rcu_read_lock(); 5055 list_for_each_entry_rcu(tg, &task_groups, list) { 5056 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 5057 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5058 5059 raw_spin_lock(&cfs_b->lock); 5060 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 5061 raw_spin_unlock(&cfs_b->lock); 5062 } 5063 rcu_read_unlock(); 5064 } 5065 5066 /* cpu offline callback */ 5067 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 5068 { 5069 struct task_group *tg; 5070 5071 lockdep_assert_held(&rq->lock); 5072 5073 rcu_read_lock(); 5074 list_for_each_entry_rcu(tg, &task_groups, list) { 5075 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 5076 5077 if (!cfs_rq->runtime_enabled) 5078 continue; 5079 5080 /* 5081 * clock_task is not advancing so we just need to make sure 5082 * there's some valid quota amount 5083 */ 5084 cfs_rq->runtime_remaining = 1; 5085 /* 5086 * Offline rq is schedulable till CPU is completely disabled 5087 * in take_cpu_down(), so we prevent new cfs throttling here. 5088 */ 5089 cfs_rq->runtime_enabled = 0; 5090 5091 if (cfs_rq_throttled(cfs_rq)) 5092 unthrottle_cfs_rq(cfs_rq); 5093 } 5094 rcu_read_unlock(); 5095 } 5096 5097 #else /* CONFIG_CFS_BANDWIDTH */ 5098 5099 static inline bool cfs_bandwidth_used(void) 5100 { 5101 return false; 5102 } 5103 5104 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 5105 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 5106 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 5107 static inline void sync_throttle(struct task_group *tg, int cpu) {} 5108 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5109 5110 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 5111 { 5112 return 0; 5113 } 5114 5115 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 5116 { 5117 return 0; 5118 } 5119 5120 static inline int throttled_lb_pair(struct task_group *tg, 5121 int src_cpu, int dest_cpu) 5122 { 5123 return 0; 5124 } 5125 5126 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5127 5128 #ifdef CONFIG_FAIR_GROUP_SCHED 5129 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5130 #endif 5131 5132 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 5133 { 5134 return NULL; 5135 } 5136 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5137 static inline void update_runtime_enabled(struct rq *rq) {} 5138 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 5139 5140 #endif /* CONFIG_CFS_BANDWIDTH */ 5141 5142 /************************************************** 5143 * CFS operations on tasks: 5144 */ 5145 5146 #ifdef CONFIG_SCHED_HRTICK 5147 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 5148 { 5149 struct sched_entity *se = &p->se; 5150 struct cfs_rq *cfs_rq = cfs_rq_of(se); 5151 5152 SCHED_WARN_ON(task_rq(p) != rq); 5153 5154 if (rq->cfs.h_nr_running > 1) { 5155 u64 slice = sched_slice(cfs_rq, se); 5156 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 5157 s64 delta = slice - ran; 5158 5159 if (delta < 0) { 5160 if (rq->curr == p) 5161 resched_curr(rq); 5162 return; 5163 } 5164 hrtick_start(rq, delta); 5165 } 5166 } 5167 5168 /* 5169 * called from enqueue/dequeue and updates the hrtick when the 5170 * current task is from our class and nr_running is low enough 5171 * to matter. 5172 */ 5173 static void hrtick_update(struct rq *rq) 5174 { 5175 struct task_struct *curr = rq->curr; 5176 5177 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) 5178 return; 5179 5180 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 5181 hrtick_start_fair(rq, curr); 5182 } 5183 #else /* !CONFIG_SCHED_HRTICK */ 5184 static inline void 5185 hrtick_start_fair(struct rq *rq, struct task_struct *p) 5186 { 5187 } 5188 5189 static inline void hrtick_update(struct rq *rq) 5190 { 5191 } 5192 #endif 5193 5194 #ifdef CONFIG_SMP 5195 static inline unsigned long cpu_util(int cpu); 5196 5197 static inline bool cpu_overutilized(int cpu) 5198 { 5199 return !fits_capacity(cpu_util(cpu), capacity_of(cpu)); 5200 } 5201 5202 static inline void update_overutilized_status(struct rq *rq) 5203 { 5204 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { 5205 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); 5206 trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); 5207 } 5208 } 5209 #else 5210 static inline void update_overutilized_status(struct rq *rq) { } 5211 #endif 5212 5213 /* Runqueue only has SCHED_IDLE tasks enqueued */ 5214 static int sched_idle_rq(struct rq *rq) 5215 { 5216 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && 5217 rq->nr_running); 5218 } 5219 5220 #ifdef CONFIG_SMP 5221 static int sched_idle_cpu(int cpu) 5222 { 5223 return sched_idle_rq(cpu_rq(cpu)); 5224 } 5225 #endif 5226 5227 /* 5228 * The enqueue_task method is called before nr_running is 5229 * increased. Here we update the fair scheduling stats and 5230 * then put the task into the rbtree: 5231 */ 5232 static void 5233 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5234 { 5235 struct cfs_rq *cfs_rq; 5236 struct sched_entity *se = &p->se; 5237 int idle_h_nr_running = task_has_idle_policy(p); 5238 5239 /* 5240 * The code below (indirectly) updates schedutil which looks at 5241 * the cfs_rq utilization to select a frequency. 5242 * Let's add the task's estimated utilization to the cfs_rq's 5243 * estimated utilization, before we update schedutil. 5244 */ 5245 util_est_enqueue(&rq->cfs, p); 5246 5247 /* 5248 * If in_iowait is set, the code below may not trigger any cpufreq 5249 * utilization updates, so do it here explicitly with the IOWAIT flag 5250 * passed. 5251 */ 5252 if (p->in_iowait) 5253 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); 5254 5255 for_each_sched_entity(se) { 5256 if (se->on_rq) 5257 break; 5258 cfs_rq = cfs_rq_of(se); 5259 enqueue_entity(cfs_rq, se, flags); 5260 5261 /* 5262 * end evaluation on encountering a throttled cfs_rq 5263 * 5264 * note: in the case of encountering a throttled cfs_rq we will 5265 * post the final h_nr_running increment below. 5266 */ 5267 if (cfs_rq_throttled(cfs_rq)) 5268 break; 5269 cfs_rq->h_nr_running++; 5270 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5271 5272 flags = ENQUEUE_WAKEUP; 5273 } 5274 5275 for_each_sched_entity(se) { 5276 cfs_rq = cfs_rq_of(se); 5277 cfs_rq->h_nr_running++; 5278 cfs_rq->idle_h_nr_running += idle_h_nr_running; 5279 5280 if (cfs_rq_throttled(cfs_rq)) 5281 break; 5282 5283 update_load_avg(cfs_rq, se, UPDATE_TG); 5284 update_cfs_group(se); 5285 } 5286 5287 if (!se) { 5288 add_nr_running(rq, 1); 5289 /* 5290 * Since new tasks are assigned an initial util_avg equal to 5291 * half of the spare capacity of their CPU, tiny tasks have the 5292 * ability to cross the overutilized threshold, which will 5293 * result in the load balancer ruining all the task placement 5294 * done by EAS. As a way to mitigate that effect, do not account 5295 * for the first enqueue operation of new tasks during the 5296 * overutilized flag detection. 5297 * 5298 * A better way of solving this problem would be to wait for 5299 * the PELT signals of tasks to converge before taking them 5300 * into account, but that is not straightforward to implement, 5301 * and the following generally works well enough in practice. 5302 */ 5303 if (flags & ENQUEUE_WAKEUP) 5304 update_overutilized_status(rq); 5305 5306 } 5307 5308 if (cfs_bandwidth_used()) { 5309 /* 5310 * When bandwidth control is enabled; the cfs_rq_throttled() 5311 * breaks in the above iteration can result in incomplete 5312 * leaf list maintenance, resulting in triggering the assertion 5313 * below. 5314 */ 5315 for_each_sched_entity(se) { 5316 cfs_rq = cfs_rq_of(se); 5317 5318 if (list_add_leaf_cfs_rq(cfs_rq)) 5319 break; 5320 } 5321 } 5322 5323 assert_list_leaf_cfs_rq(rq); 5324 5325 hrtick_update(rq); 5326 } 5327 5328 static void set_next_buddy(struct sched_entity *se); 5329 5330 /* 5331 * The dequeue_task method is called before nr_running is 5332 * decreased. We remove the task from the rbtree and 5333 * update the fair scheduling stats: 5334 */ 5335 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5336 { 5337 struct cfs_rq *cfs_rq; 5338 struct sched_entity *se = &p->se; 5339 int task_sleep = flags & DEQUEUE_SLEEP; 5340 int idle_h_nr_running = task_has_idle_policy(p); 5341 bool was_sched_idle = sched_idle_rq(rq); 5342 5343 for_each_sched_entity(se) { 5344 cfs_rq = cfs_rq_of(se); 5345 dequeue_entity(cfs_rq, se, flags); 5346 5347 /* 5348 * end evaluation on encountering a throttled cfs_rq 5349 * 5350 * note: in the case of encountering a throttled cfs_rq we will 5351 * post the final h_nr_running decrement below. 5352 */ 5353 if (cfs_rq_throttled(cfs_rq)) 5354 break; 5355 cfs_rq->h_nr_running--; 5356 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 5357 5358 /* Don't dequeue parent if it has other entities besides us */ 5359 if (cfs_rq->load.weight) { 5360 /* Avoid re-evaluating load for this entity: */ 5361 se = parent_entity(se); 5362 /* 5363 * Bias pick_next to pick a task from this cfs_rq, as 5364 * p is sleeping when it is within its sched_slice. 5365 */ 5366 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 5367 set_next_buddy(se); 5368 break; 5369 } 5370 flags |= DEQUEUE_SLEEP; 5371 } 5372 5373 for_each_sched_entity(se) { 5374 cfs_rq = cfs_rq_of(se); 5375 cfs_rq->h_nr_running--; 5376 cfs_rq->idle_h_nr_running -= idle_h_nr_running; 5377 5378 if (cfs_rq_throttled(cfs_rq)) 5379 break; 5380 5381 update_load_avg(cfs_rq, se, UPDATE_TG); 5382 update_cfs_group(se); 5383 } 5384 5385 if (!se) 5386 sub_nr_running(rq, 1); 5387 5388 /* balance early to pull high priority tasks */ 5389 if (unlikely(!was_sched_idle && sched_idle_rq(rq))) 5390 rq->next_balance = jiffies; 5391 5392 util_est_dequeue(&rq->cfs, p, task_sleep); 5393 hrtick_update(rq); 5394 } 5395 5396 #ifdef CONFIG_SMP 5397 5398 /* Working cpumask for: load_balance, load_balance_newidle. */ 5399 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 5400 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); 5401 5402 #ifdef CONFIG_NO_HZ_COMMON 5403 5404 static struct { 5405 cpumask_var_t idle_cpus_mask; 5406 atomic_t nr_cpus; 5407 int has_blocked; /* Idle CPUS has blocked load */ 5408 unsigned long next_balance; /* in jiffy units */ 5409 unsigned long next_blocked; /* Next update of blocked load in jiffies */ 5410 } nohz ____cacheline_aligned; 5411 5412 #endif /* CONFIG_NO_HZ_COMMON */ 5413 5414 static unsigned long cpu_load(struct rq *rq) 5415 { 5416 return cfs_rq_load_avg(&rq->cfs); 5417 } 5418 5419 /* 5420 * cpu_load_without - compute CPU load without any contributions from *p 5421 * @cpu: the CPU which load is requested 5422 * @p: the task which load should be discounted 5423 * 5424 * The load of a CPU is defined by the load of tasks currently enqueued on that 5425 * CPU as well as tasks which are currently sleeping after an execution on that 5426 * CPU. 5427 * 5428 * This method returns the load of the specified CPU by discounting the load of 5429 * the specified task, whenever the task is currently contributing to the CPU 5430 * load. 5431 */ 5432 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) 5433 { 5434 struct cfs_rq *cfs_rq; 5435 unsigned int load; 5436 5437 /* Task has no contribution or is new */ 5438 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 5439 return cpu_load(rq); 5440 5441 cfs_rq = &rq->cfs; 5442 load = READ_ONCE(cfs_rq->avg.load_avg); 5443 5444 /* Discount task's util from CPU's util */ 5445 lsub_positive(&load, task_h_load(p)); 5446 5447 return load; 5448 } 5449 5450 static unsigned long capacity_of(int cpu) 5451 { 5452 return cpu_rq(cpu)->cpu_capacity; 5453 } 5454 5455 static void record_wakee(struct task_struct *p) 5456 { 5457 /* 5458 * Only decay a single time; tasks that have less then 1 wakeup per 5459 * jiffy will not have built up many flips. 5460 */ 5461 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 5462 current->wakee_flips >>= 1; 5463 current->wakee_flip_decay_ts = jiffies; 5464 } 5465 5466 if (current->last_wakee != p) { 5467 current->last_wakee = p; 5468 current->wakee_flips++; 5469 } 5470 } 5471 5472 /* 5473 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 5474 * 5475 * A waker of many should wake a different task than the one last awakened 5476 * at a frequency roughly N times higher than one of its wakees. 5477 * 5478 * In order to determine whether we should let the load spread vs consolidating 5479 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 5480 * partner, and a factor of lls_size higher frequency in the other. 5481 * 5482 * With both conditions met, we can be relatively sure that the relationship is 5483 * non-monogamous, with partner count exceeding socket size. 5484 * 5485 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 5486 * whatever is irrelevant, spread criteria is apparent partner count exceeds 5487 * socket size. 5488 */ 5489 static int wake_wide(struct task_struct *p) 5490 { 5491 unsigned int master = current->wakee_flips; 5492 unsigned int slave = p->wakee_flips; 5493 int factor = this_cpu_read(sd_llc_size); 5494 5495 if (master < slave) 5496 swap(master, slave); 5497 if (slave < factor || master < slave * factor) 5498 return 0; 5499 return 1; 5500 } 5501 5502 /* 5503 * The purpose of wake_affine() is to quickly determine on which CPU we can run 5504 * soonest. For the purpose of speed we only consider the waking and previous 5505 * CPU. 5506 * 5507 * wake_affine_idle() - only considers 'now', it check if the waking CPU is 5508 * cache-affine and is (or will be) idle. 5509 * 5510 * wake_affine_weight() - considers the weight to reflect the average 5511 * scheduling latency of the CPUs. This seems to work 5512 * for the overloaded case. 5513 */ 5514 static int 5515 wake_affine_idle(int this_cpu, int prev_cpu, int sync) 5516 { 5517 /* 5518 * If this_cpu is idle, it implies the wakeup is from interrupt 5519 * context. Only allow the move if cache is shared. Otherwise an 5520 * interrupt intensive workload could force all tasks onto one 5521 * node depending on the IO topology or IRQ affinity settings. 5522 * 5523 * If the prev_cpu is idle and cache affine then avoid a migration. 5524 * There is no guarantee that the cache hot data from an interrupt 5525 * is more important than cache hot data on the prev_cpu and from 5526 * a cpufreq perspective, it's better to have higher utilisation 5527 * on one CPU. 5528 */ 5529 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) 5530 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; 5531 5532 if (sync && cpu_rq(this_cpu)->nr_running == 1) 5533 return this_cpu; 5534 5535 return nr_cpumask_bits; 5536 } 5537 5538 static int 5539 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, 5540 int this_cpu, int prev_cpu, int sync) 5541 { 5542 s64 this_eff_load, prev_eff_load; 5543 unsigned long task_load; 5544 5545 this_eff_load = cpu_load(cpu_rq(this_cpu)); 5546 5547 if (sync) { 5548 unsigned long current_load = task_h_load(current); 5549 5550 if (current_load > this_eff_load) 5551 return this_cpu; 5552 5553 this_eff_load -= current_load; 5554 } 5555 5556 task_load = task_h_load(p); 5557 5558 this_eff_load += task_load; 5559 if (sched_feat(WA_BIAS)) 5560 this_eff_load *= 100; 5561 this_eff_load *= capacity_of(prev_cpu); 5562 5563 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); 5564 prev_eff_load -= task_load; 5565 if (sched_feat(WA_BIAS)) 5566 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; 5567 prev_eff_load *= capacity_of(this_cpu); 5568 5569 /* 5570 * If sync, adjust the weight of prev_eff_load such that if 5571 * prev_eff == this_eff that select_idle_sibling() will consider 5572 * stacking the wakee on top of the waker if no other CPU is 5573 * idle. 5574 */ 5575 if (sync) 5576 prev_eff_load += 1; 5577 5578 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; 5579 } 5580 5581 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 5582 int this_cpu, int prev_cpu, int sync) 5583 { 5584 int target = nr_cpumask_bits; 5585 5586 if (sched_feat(WA_IDLE)) 5587 target = wake_affine_idle(this_cpu, prev_cpu, sync); 5588 5589 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) 5590 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); 5591 5592 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); 5593 if (target == nr_cpumask_bits) 5594 return prev_cpu; 5595 5596 schedstat_inc(sd->ttwu_move_affine); 5597 schedstat_inc(p->se.statistics.nr_wakeups_affine); 5598 return target; 5599 } 5600 5601 static struct sched_group * 5602 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 5603 int this_cpu, int sd_flag); 5604 5605 /* 5606 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. 5607 */ 5608 static int 5609 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 5610 { 5611 unsigned long load, min_load = ULONG_MAX; 5612 unsigned int min_exit_latency = UINT_MAX; 5613 u64 latest_idle_timestamp = 0; 5614 int least_loaded_cpu = this_cpu; 5615 int shallowest_idle_cpu = -1; 5616 int i; 5617 5618 /* Check if we have any choice: */ 5619 if (group->group_weight == 1) 5620 return cpumask_first(sched_group_span(group)); 5621 5622 /* Traverse only the allowed CPUs */ 5623 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { 5624 if (sched_idle_cpu(i)) 5625 return i; 5626 5627 if (available_idle_cpu(i)) { 5628 struct rq *rq = cpu_rq(i); 5629 struct cpuidle_state *idle = idle_get_state(rq); 5630 if (idle && idle->exit_latency < min_exit_latency) { 5631 /* 5632 * We give priority to a CPU whose idle state 5633 * has the smallest exit latency irrespective 5634 * of any idle timestamp. 5635 */ 5636 min_exit_latency = idle->exit_latency; 5637 latest_idle_timestamp = rq->idle_stamp; 5638 shallowest_idle_cpu = i; 5639 } else if ((!idle || idle->exit_latency == min_exit_latency) && 5640 rq->idle_stamp > latest_idle_timestamp) { 5641 /* 5642 * If equal or no active idle state, then 5643 * the most recently idled CPU might have 5644 * a warmer cache. 5645 */ 5646 latest_idle_timestamp = rq->idle_stamp; 5647 shallowest_idle_cpu = i; 5648 } 5649 } else if (shallowest_idle_cpu == -1) { 5650 load = cpu_load(cpu_rq(i)); 5651 if (load < min_load) { 5652 min_load = load; 5653 least_loaded_cpu = i; 5654 } 5655 } 5656 } 5657 5658 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 5659 } 5660 5661 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, 5662 int cpu, int prev_cpu, int sd_flag) 5663 { 5664 int new_cpu = cpu; 5665 5666 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) 5667 return prev_cpu; 5668 5669 /* 5670 * We need task's util for cpu_util_without, sync it up to 5671 * prev_cpu's last_update_time. 5672 */ 5673 if (!(sd_flag & SD_BALANCE_FORK)) 5674 sync_entity_load_avg(&p->se); 5675 5676 while (sd) { 5677 struct sched_group *group; 5678 struct sched_domain *tmp; 5679 int weight; 5680 5681 if (!(sd->flags & sd_flag)) { 5682 sd = sd->child; 5683 continue; 5684 } 5685 5686 group = find_idlest_group(sd, p, cpu, sd_flag); 5687 if (!group) { 5688 sd = sd->child; 5689 continue; 5690 } 5691 5692 new_cpu = find_idlest_group_cpu(group, p, cpu); 5693 if (new_cpu == cpu) { 5694 /* Now try balancing at a lower domain level of 'cpu': */ 5695 sd = sd->child; 5696 continue; 5697 } 5698 5699 /* Now try balancing at a lower domain level of 'new_cpu': */ 5700 cpu = new_cpu; 5701 weight = sd->span_weight; 5702 sd = NULL; 5703 for_each_domain(cpu, tmp) { 5704 if (weight <= tmp->span_weight) 5705 break; 5706 if (tmp->flags & sd_flag) 5707 sd = tmp; 5708 } 5709 } 5710 5711 return new_cpu; 5712 } 5713 5714 #ifdef CONFIG_SCHED_SMT 5715 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5716 EXPORT_SYMBOL_GPL(sched_smt_present); 5717 5718 static inline void set_idle_cores(int cpu, int val) 5719 { 5720 struct sched_domain_shared *sds; 5721 5722 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5723 if (sds) 5724 WRITE_ONCE(sds->has_idle_cores, val); 5725 } 5726 5727 static inline bool test_idle_cores(int cpu, bool def) 5728 { 5729 struct sched_domain_shared *sds; 5730 5731 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5732 if (sds) 5733 return READ_ONCE(sds->has_idle_cores); 5734 5735 return def; 5736 } 5737 5738 /* 5739 * Scans the local SMT mask to see if the entire core is idle, and records this 5740 * information in sd_llc_shared->has_idle_cores. 5741 * 5742 * Since SMT siblings share all cache levels, inspecting this limited remote 5743 * state should be fairly cheap. 5744 */ 5745 void __update_idle_core(struct rq *rq) 5746 { 5747 int core = cpu_of(rq); 5748 int cpu; 5749 5750 rcu_read_lock(); 5751 if (test_idle_cores(core, true)) 5752 goto unlock; 5753 5754 for_each_cpu(cpu, cpu_smt_mask(core)) { 5755 if (cpu == core) 5756 continue; 5757 5758 if (!available_idle_cpu(cpu)) 5759 goto unlock; 5760 } 5761 5762 set_idle_cores(core, 1); 5763 unlock: 5764 rcu_read_unlock(); 5765 } 5766 5767 /* 5768 * Scan the entire LLC domain for idle cores; this dynamically switches off if 5769 * there are no idle cores left in the system; tracked through 5770 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 5771 */ 5772 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 5773 { 5774 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 5775 int core, cpu; 5776 5777 if (!static_branch_likely(&sched_smt_present)) 5778 return -1; 5779 5780 if (!test_idle_cores(target, false)) 5781 return -1; 5782 5783 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 5784 5785 for_each_cpu_wrap(core, cpus, target) { 5786 bool idle = true; 5787 5788 for_each_cpu(cpu, cpu_smt_mask(core)) { 5789 __cpumask_clear_cpu(cpu, cpus); 5790 if (!available_idle_cpu(cpu)) 5791 idle = false; 5792 } 5793 5794 if (idle) 5795 return core; 5796 } 5797 5798 /* 5799 * Failed to find an idle core; stop looking for one. 5800 */ 5801 set_idle_cores(target, 0); 5802 5803 return -1; 5804 } 5805 5806 /* 5807 * Scan the local SMT mask for idle CPUs. 5808 */ 5809 static int select_idle_smt(struct task_struct *p, int target) 5810 { 5811 int cpu; 5812 5813 if (!static_branch_likely(&sched_smt_present)) 5814 return -1; 5815 5816 for_each_cpu(cpu, cpu_smt_mask(target)) { 5817 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 5818 continue; 5819 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) 5820 return cpu; 5821 } 5822 5823 return -1; 5824 } 5825 5826 #else /* CONFIG_SCHED_SMT */ 5827 5828 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 5829 { 5830 return -1; 5831 } 5832 5833 static inline int select_idle_smt(struct task_struct *p, int target) 5834 { 5835 return -1; 5836 } 5837 5838 #endif /* CONFIG_SCHED_SMT */ 5839 5840 /* 5841 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 5842 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 5843 * average idle time for this rq (as found in rq->avg_idle). 5844 */ 5845 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) 5846 { 5847 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 5848 struct sched_domain *this_sd; 5849 u64 avg_cost, avg_idle; 5850 u64 time, cost; 5851 s64 delta; 5852 int this = smp_processor_id(); 5853 int cpu, nr = INT_MAX; 5854 5855 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 5856 if (!this_sd) 5857 return -1; 5858 5859 /* 5860 * Due to large variance we need a large fuzz factor; hackbench in 5861 * particularly is sensitive here. 5862 */ 5863 avg_idle = this_rq()->avg_idle / 512; 5864 avg_cost = this_sd->avg_scan_cost + 1; 5865 5866 if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) 5867 return -1; 5868 5869 if (sched_feat(SIS_PROP)) { 5870 u64 span_avg = sd->span_weight * avg_idle; 5871 if (span_avg > 4*avg_cost) 5872 nr = div_u64(span_avg, avg_cost); 5873 else 5874 nr = 4; 5875 } 5876 5877 time = cpu_clock(this); 5878 5879 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); 5880 5881 for_each_cpu_wrap(cpu, cpus, target) { 5882 if (!--nr) 5883 return -1; 5884 if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) 5885 break; 5886 } 5887 5888 time = cpu_clock(this) - time; 5889 cost = this_sd->avg_scan_cost; 5890 delta = (s64)(time - cost) / 8; 5891 this_sd->avg_scan_cost += delta; 5892 5893 return cpu; 5894 } 5895 5896 /* 5897 * Try and locate an idle core/thread in the LLC cache domain. 5898 */ 5899 static int select_idle_sibling(struct task_struct *p, int prev, int target) 5900 { 5901 struct sched_domain *sd; 5902 int i, recent_used_cpu; 5903 5904 if (available_idle_cpu(target) || sched_idle_cpu(target)) 5905 return target; 5906 5907 /* 5908 * If the previous CPU is cache affine and idle, don't be stupid: 5909 */ 5910 if (prev != target && cpus_share_cache(prev, target) && 5911 (available_idle_cpu(prev) || sched_idle_cpu(prev))) 5912 return prev; 5913 5914 /* 5915 * Allow a per-cpu kthread to stack with the wakee if the 5916 * kworker thread and the tasks previous CPUs are the same. 5917 * The assumption is that the wakee queued work for the 5918 * per-cpu kthread that is now complete and the wakeup is 5919 * essentially a sync wakeup. An obvious example of this 5920 * pattern is IO completions. 5921 */ 5922 if (is_per_cpu_kthread(current) && 5923 prev == smp_processor_id() && 5924 this_rq()->nr_running <= 1) { 5925 return prev; 5926 } 5927 5928 /* Check a recently used CPU as a potential idle candidate: */ 5929 recent_used_cpu = p->recent_used_cpu; 5930 if (recent_used_cpu != prev && 5931 recent_used_cpu != target && 5932 cpus_share_cache(recent_used_cpu, target) && 5933 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && 5934 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { 5935 /* 5936 * Replace recent_used_cpu with prev as it is a potential 5937 * candidate for the next wake: 5938 */ 5939 p->recent_used_cpu = prev; 5940 return recent_used_cpu; 5941 } 5942 5943 sd = rcu_dereference(per_cpu(sd_llc, target)); 5944 if (!sd) 5945 return target; 5946 5947 i = select_idle_core(p, sd, target); 5948 if ((unsigned)i < nr_cpumask_bits) 5949 return i; 5950 5951 i = select_idle_cpu(p, sd, target); 5952 if ((unsigned)i < nr_cpumask_bits) 5953 return i; 5954 5955 i = select_idle_smt(p, target); 5956 if ((unsigned)i < nr_cpumask_bits) 5957 return i; 5958 5959 return target; 5960 } 5961 5962 /** 5963 * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks 5964 * @cpu: the CPU to get the utilization of 5965 * 5966 * The unit of the return value must be the one of capacity so we can compare 5967 * the utilization with the capacity of the CPU that is available for CFS task 5968 * (ie cpu_capacity). 5969 * 5970 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the 5971 * recent utilization of currently non-runnable tasks on a CPU. It represents 5972 * the amount of utilization of a CPU in the range [0..capacity_orig] where 5973 * capacity_orig is the cpu_capacity available at the highest frequency 5974 * (arch_scale_freq_capacity()). 5975 * The utilization of a CPU converges towards a sum equal to or less than the 5976 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is 5977 * the running time on this CPU scaled by capacity_curr. 5978 * 5979 * The estimated utilization of a CPU is defined to be the maximum between its 5980 * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks 5981 * currently RUNNABLE on that CPU. 5982 * This allows to properly represent the expected utilization of a CPU which 5983 * has just got a big task running since a long sleep period. At the same time 5984 * however it preserves the benefits of the "blocked utilization" in 5985 * describing the potential for other tasks waking up on the same CPU. 5986 * 5987 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even 5988 * higher than capacity_orig because of unfortunate rounding in 5989 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until 5990 * the average stabilizes with the new running time. We need to check that the 5991 * utilization stays within the range of [0..capacity_orig] and cap it if 5992 * necessary. Without utilization capping, a group could be seen as overloaded 5993 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of 5994 * available capacity. We allow utilization to overshoot capacity_curr (but not 5995 * capacity_orig) as it useful for predicting the capacity required after task 5996 * migrations (scheduler-driven DVFS). 5997 * 5998 * Return: the (estimated) utilization for the specified CPU 5999 */ 6000 static inline unsigned long cpu_util(int cpu) 6001 { 6002 struct cfs_rq *cfs_rq; 6003 unsigned int util; 6004 6005 cfs_rq = &cpu_rq(cpu)->cfs; 6006 util = READ_ONCE(cfs_rq->avg.util_avg); 6007 6008 if (sched_feat(UTIL_EST)) 6009 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); 6010 6011 return min_t(unsigned long, util, capacity_orig_of(cpu)); 6012 } 6013 6014 /* 6015 * cpu_util_without: compute cpu utilization without any contributions from *p 6016 * @cpu: the CPU which utilization is requested 6017 * @p: the task which utilization should be discounted 6018 * 6019 * The utilization of a CPU is defined by the utilization of tasks currently 6020 * enqueued on that CPU as well as tasks which are currently sleeping after an 6021 * execution on that CPU. 6022 * 6023 * This method returns the utilization of the specified CPU by discounting the 6024 * utilization of the specified task, whenever the task is currently 6025 * contributing to the CPU utilization. 6026 */ 6027 static unsigned long cpu_util_without(int cpu, struct task_struct *p) 6028 { 6029 struct cfs_rq *cfs_rq; 6030 unsigned int util; 6031 6032 /* Task has no contribution or is new */ 6033 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6034 return cpu_util(cpu); 6035 6036 cfs_rq = &cpu_rq(cpu)->cfs; 6037 util = READ_ONCE(cfs_rq->avg.util_avg); 6038 6039 /* Discount task's util from CPU's util */ 6040 lsub_positive(&util, task_util(p)); 6041 6042 /* 6043 * Covered cases: 6044 * 6045 * a) if *p is the only task sleeping on this CPU, then: 6046 * cpu_util (== task_util) > util_est (== 0) 6047 * and thus we return: 6048 * cpu_util_without = (cpu_util - task_util) = 0 6049 * 6050 * b) if other tasks are SLEEPING on this CPU, which is now exiting 6051 * IDLE, then: 6052 * cpu_util >= task_util 6053 * cpu_util > util_est (== 0) 6054 * and thus we discount *p's blocked utilization to return: 6055 * cpu_util_without = (cpu_util - task_util) >= 0 6056 * 6057 * c) if other tasks are RUNNABLE on that CPU and 6058 * util_est > cpu_util 6059 * then we use util_est since it returns a more restrictive 6060 * estimation of the spare capacity on that CPU, by just 6061 * considering the expected utilization of tasks already 6062 * runnable on that CPU. 6063 * 6064 * Cases a) and b) are covered by the above code, while case c) is 6065 * covered by the following code when estimated utilization is 6066 * enabled. 6067 */ 6068 if (sched_feat(UTIL_EST)) { 6069 unsigned int estimated = 6070 READ_ONCE(cfs_rq->avg.util_est.enqueued); 6071 6072 /* 6073 * Despite the following checks we still have a small window 6074 * for a possible race, when an execl's select_task_rq_fair() 6075 * races with LB's detach_task(): 6076 * 6077 * detach_task() 6078 * p->on_rq = TASK_ON_RQ_MIGRATING; 6079 * ---------------------------------- A 6080 * deactivate_task() \ 6081 * dequeue_task() + RaceTime 6082 * util_est_dequeue() / 6083 * ---------------------------------- B 6084 * 6085 * The additional check on "current == p" it's required to 6086 * properly fix the execl regression and it helps in further 6087 * reducing the chances for the above race. 6088 */ 6089 if (unlikely(task_on_rq_queued(p) || current == p)) 6090 lsub_positive(&estimated, _task_util_est(p)); 6091 6092 util = max(util, estimated); 6093 } 6094 6095 /* 6096 * Utilization (estimated) can exceed the CPU capacity, thus let's 6097 * clamp to the maximum CPU capacity to ensure consistency with 6098 * the cpu_util call. 6099 */ 6100 return min_t(unsigned long, util, capacity_orig_of(cpu)); 6101 } 6102 6103 /* 6104 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the 6105 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. 6106 * 6107 * In that case WAKE_AFFINE doesn't make sense and we'll let 6108 * BALANCE_WAKE sort things out. 6109 */ 6110 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) 6111 { 6112 long min_cap, max_cap; 6113 6114 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 6115 return 0; 6116 6117 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); 6118 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; 6119 6120 /* Minimum capacity is close to max, no need to abort wake_affine */ 6121 if (max_cap - min_cap < max_cap >> 3) 6122 return 0; 6123 6124 /* Bring task utilization in sync with prev_cpu */ 6125 sync_entity_load_avg(&p->se); 6126 6127 return !task_fits_capacity(p, min_cap); 6128 } 6129 6130 /* 6131 * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) 6132 * to @dst_cpu. 6133 */ 6134 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) 6135 { 6136 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; 6137 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); 6138 6139 /* 6140 * If @p migrates from @cpu to another, remove its contribution. Or, 6141 * if @p migrates from another CPU to @cpu, add its contribution. In 6142 * the other cases, @cpu is not impacted by the migration, so the 6143 * util_avg should already be correct. 6144 */ 6145 if (task_cpu(p) == cpu && dst_cpu != cpu) 6146 sub_positive(&util, task_util(p)); 6147 else if (task_cpu(p) != cpu && dst_cpu == cpu) 6148 util += task_util(p); 6149 6150 if (sched_feat(UTIL_EST)) { 6151 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); 6152 6153 /* 6154 * During wake-up, the task isn't enqueued yet and doesn't 6155 * appear in the cfs_rq->avg.util_est.enqueued of any rq, 6156 * so just add it (if needed) to "simulate" what will be 6157 * cpu_util() after the task has been enqueued. 6158 */ 6159 if (dst_cpu == cpu) 6160 util_est += _task_util_est(p); 6161 6162 util = max(util, util_est); 6163 } 6164 6165 return min(util, capacity_orig_of(cpu)); 6166 } 6167 6168 /* 6169 * compute_energy(): Estimates the energy that @pd would consume if @p was 6170 * migrated to @dst_cpu. compute_energy() predicts what will be the utilization 6171 * landscape of @pd's CPUs after the task migration, and uses the Energy Model 6172 * to compute what would be the energy if we decided to actually migrate that 6173 * task. 6174 */ 6175 static long 6176 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) 6177 { 6178 struct cpumask *pd_mask = perf_domain_span(pd); 6179 unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask)); 6180 unsigned long max_util = 0, sum_util = 0; 6181 int cpu; 6182 6183 /* 6184 * The capacity state of CPUs of the current rd can be driven by CPUs 6185 * of another rd if they belong to the same pd. So, account for the 6186 * utilization of these CPUs too by masking pd with cpu_online_mask 6187 * instead of the rd span. 6188 * 6189 * If an entire pd is outside of the current rd, it will not appear in 6190 * its pd list and will not be accounted by compute_energy(). 6191 */ 6192 for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { 6193 unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu); 6194 struct task_struct *tsk = cpu == dst_cpu ? p : NULL; 6195 6196 /* 6197 * Busy time computation: utilization clamping is not 6198 * required since the ratio (sum_util / cpu_capacity) 6199 * is already enough to scale the EM reported power 6200 * consumption at the (eventually clamped) cpu_capacity. 6201 */ 6202 sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap, 6203 ENERGY_UTIL, NULL); 6204 6205 /* 6206 * Performance domain frequency: utilization clamping 6207 * must be considered since it affects the selection 6208 * of the performance domain frequency. 6209 * NOTE: in case RT tasks are running, by default the 6210 * FREQUENCY_UTIL's utilization can be max OPP. 6211 */ 6212 cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap, 6213 FREQUENCY_UTIL, tsk); 6214 max_util = max(max_util, cpu_util); 6215 } 6216 6217 return em_pd_energy(pd->em_pd, max_util, sum_util); 6218 } 6219 6220 /* 6221 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the 6222 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum 6223 * spare capacity in each performance domain and uses it as a potential 6224 * candidate to execute the task. Then, it uses the Energy Model to figure 6225 * out which of the CPU candidates is the most energy-efficient. 6226 * 6227 * The rationale for this heuristic is as follows. In a performance domain, 6228 * all the most energy efficient CPU candidates (according to the Energy 6229 * Model) are those for which we'll request a low frequency. When there are 6230 * several CPUs for which the frequency request will be the same, we don't 6231 * have enough data to break the tie between them, because the Energy Model 6232 * only includes active power costs. With this model, if we assume that 6233 * frequency requests follow utilization (e.g. using schedutil), the CPU with 6234 * the maximum spare capacity in a performance domain is guaranteed to be among 6235 * the best candidates of the performance domain. 6236 * 6237 * In practice, it could be preferable from an energy standpoint to pack 6238 * small tasks on a CPU in order to let other CPUs go in deeper idle states, 6239 * but that could also hurt our chances to go cluster idle, and we have no 6240 * ways to tell with the current Energy Model if this is actually a good 6241 * idea or not. So, find_energy_efficient_cpu() basically favors 6242 * cluster-packing, and spreading inside a cluster. That should at least be 6243 * a good thing for latency, and this is consistent with the idea that most 6244 * of the energy savings of EAS come from the asymmetry of the system, and 6245 * not so much from breaking the tie between identical CPUs. That's also the 6246 * reason why EAS is enabled in the topology code only for systems where 6247 * SD_ASYM_CPUCAPACITY is set. 6248 * 6249 * NOTE: Forkees are not accepted in the energy-aware wake-up path because 6250 * they don't have any useful utilization data yet and it's not possible to 6251 * forecast their impact on energy consumption. Consequently, they will be 6252 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out 6253 * to be energy-inefficient in some use-cases. The alternative would be to 6254 * bias new tasks towards specific types of CPUs first, or to try to infer 6255 * their util_avg from the parent task, but those heuristics could hurt 6256 * other use-cases too. So, until someone finds a better way to solve this, 6257 * let's keep things simple by re-using the existing slow path. 6258 */ 6259 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) 6260 { 6261 unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; 6262 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 6263 unsigned long cpu_cap, util, base_energy = 0; 6264 int cpu, best_energy_cpu = prev_cpu; 6265 struct sched_domain *sd; 6266 struct perf_domain *pd; 6267 6268 rcu_read_lock(); 6269 pd = rcu_dereference(rd->pd); 6270 if (!pd || READ_ONCE(rd->overutilized)) 6271 goto fail; 6272 6273 /* 6274 * Energy-aware wake-up happens on the lowest sched_domain starting 6275 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. 6276 */ 6277 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); 6278 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 6279 sd = sd->parent; 6280 if (!sd) 6281 goto fail; 6282 6283 sync_entity_load_avg(&p->se); 6284 if (!task_util_est(p)) 6285 goto unlock; 6286 6287 for (; pd; pd = pd->next) { 6288 unsigned long cur_delta, spare_cap, max_spare_cap = 0; 6289 unsigned long base_energy_pd; 6290 int max_spare_cap_cpu = -1; 6291 6292 /* Compute the 'base' energy of the pd, without @p */ 6293 base_energy_pd = compute_energy(p, -1, pd); 6294 base_energy += base_energy_pd; 6295 6296 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { 6297 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 6298 continue; 6299 6300 util = cpu_util_next(cpu, p, cpu); 6301 cpu_cap = capacity_of(cpu); 6302 spare_cap = cpu_cap - util; 6303 6304 /* 6305 * Skip CPUs that cannot satisfy the capacity request. 6306 * IOW, placing the task there would make the CPU 6307 * overutilized. Take uclamp into account to see how 6308 * much capacity we can get out of the CPU; this is 6309 * aligned with schedutil_cpu_util(). 6310 */ 6311 util = uclamp_rq_util_with(cpu_rq(cpu), util, p); 6312 if (!fits_capacity(util, cpu_cap)) 6313 continue; 6314 6315 /* Always use prev_cpu as a candidate. */ 6316 if (cpu == prev_cpu) { 6317 prev_delta = compute_energy(p, prev_cpu, pd); 6318 prev_delta -= base_energy_pd; 6319 best_delta = min(best_delta, prev_delta); 6320 } 6321 6322 /* 6323 * Find the CPU with the maximum spare capacity in 6324 * the performance domain 6325 */ 6326 if (spare_cap > max_spare_cap) { 6327 max_spare_cap = spare_cap; 6328 max_spare_cap_cpu = cpu; 6329 } 6330 } 6331 6332 /* Evaluate the energy impact of using this CPU. */ 6333 if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) { 6334 cur_delta = compute_energy(p, max_spare_cap_cpu, pd); 6335 cur_delta -= base_energy_pd; 6336 if (cur_delta < best_delta) { 6337 best_delta = cur_delta; 6338 best_energy_cpu = max_spare_cap_cpu; 6339 } 6340 } 6341 } 6342 unlock: 6343 rcu_read_unlock(); 6344 6345 /* 6346 * Pick the best CPU if prev_cpu cannot be used, or if it saves at 6347 * least 6% of the energy used by prev_cpu. 6348 */ 6349 if (prev_delta == ULONG_MAX) 6350 return best_energy_cpu; 6351 6352 if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4)) 6353 return best_energy_cpu; 6354 6355 return prev_cpu; 6356 6357 fail: 6358 rcu_read_unlock(); 6359 6360 return -1; 6361 } 6362 6363 /* 6364 * select_task_rq_fair: Select target runqueue for the waking task in domains 6365 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, 6366 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 6367 * 6368 * Balances load by selecting the idlest CPU in the idlest group, or under 6369 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. 6370 * 6371 * Returns the target CPU number. 6372 * 6373 * preempt must be disabled. 6374 */ 6375 static int 6376 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) 6377 { 6378 struct sched_domain *tmp, *sd = NULL; 6379 int cpu = smp_processor_id(); 6380 int new_cpu = prev_cpu; 6381 int want_affine = 0; 6382 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); 6383 6384 if (sd_flag & SD_BALANCE_WAKE) { 6385 record_wakee(p); 6386 6387 if (sched_energy_enabled()) { 6388 new_cpu = find_energy_efficient_cpu(p, prev_cpu); 6389 if (new_cpu >= 0) 6390 return new_cpu; 6391 new_cpu = prev_cpu; 6392 } 6393 6394 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && 6395 cpumask_test_cpu(cpu, p->cpus_ptr); 6396 } 6397 6398 rcu_read_lock(); 6399 for_each_domain(cpu, tmp) { 6400 if (!(tmp->flags & SD_LOAD_BALANCE)) 6401 break; 6402 6403 /* 6404 * If both 'cpu' and 'prev_cpu' are part of this domain, 6405 * cpu is a valid SD_WAKE_AFFINE target. 6406 */ 6407 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 6408 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 6409 if (cpu != prev_cpu) 6410 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); 6411 6412 sd = NULL; /* Prefer wake_affine over balance flags */ 6413 break; 6414 } 6415 6416 if (tmp->flags & sd_flag) 6417 sd = tmp; 6418 else if (!want_affine) 6419 break; 6420 } 6421 6422 if (unlikely(sd)) { 6423 /* Slow path */ 6424 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); 6425 } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ 6426 /* Fast path */ 6427 6428 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 6429 6430 if (want_affine) 6431 current->recent_used_cpu = cpu; 6432 } 6433 rcu_read_unlock(); 6434 6435 return new_cpu; 6436 } 6437 6438 static void detach_entity_cfs_rq(struct sched_entity *se); 6439 6440 /* 6441 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and 6442 * cfs_rq_of(p) references at time of call are still valid and identify the 6443 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 6444 */ 6445 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) 6446 { 6447 /* 6448 * As blocked tasks retain absolute vruntime the migration needs to 6449 * deal with this by subtracting the old and adding the new 6450 * min_vruntime -- the latter is done by enqueue_entity() when placing 6451 * the task on the new runqueue. 6452 */ 6453 if (p->state == TASK_WAKING) { 6454 struct sched_entity *se = &p->se; 6455 struct cfs_rq *cfs_rq = cfs_rq_of(se); 6456 u64 min_vruntime; 6457 6458 #ifndef CONFIG_64BIT 6459 u64 min_vruntime_copy; 6460 6461 do { 6462 min_vruntime_copy = cfs_rq->min_vruntime_copy; 6463 smp_rmb(); 6464 min_vruntime = cfs_rq->min_vruntime; 6465 } while (min_vruntime != min_vruntime_copy); 6466 #else 6467 min_vruntime = cfs_rq->min_vruntime; 6468 #endif 6469 6470 se->vruntime -= min_vruntime; 6471 } 6472 6473 if (p->on_rq == TASK_ON_RQ_MIGRATING) { 6474 /* 6475 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' 6476 * rq->lock and can modify state directly. 6477 */ 6478 lockdep_assert_held(&task_rq(p)->lock); 6479 detach_entity_cfs_rq(&p->se); 6480 6481 } else { 6482 /* 6483 * We are supposed to update the task to "current" time, then 6484 * its up to date and ready to go to new CPU/cfs_rq. But we 6485 * have difficulty in getting what current time is, so simply 6486 * throw away the out-of-date time. This will result in the 6487 * wakee task is less decayed, but giving the wakee more load 6488 * sounds not bad. 6489 */ 6490 remove_entity_load_avg(&p->se); 6491 } 6492 6493 /* Tell new CPU we are migrated */ 6494 p->se.avg.last_update_time = 0; 6495 6496 /* We have migrated, no longer consider this task hot */ 6497 p->se.exec_start = 0; 6498 6499 update_scan_period(p, new_cpu); 6500 } 6501 6502 static void task_dead_fair(struct task_struct *p) 6503 { 6504 remove_entity_load_avg(&p->se); 6505 } 6506 6507 static int 6508 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6509 { 6510 if (rq->nr_running) 6511 return 1; 6512 6513 return newidle_balance(rq, rf) != 0; 6514 } 6515 #endif /* CONFIG_SMP */ 6516 6517 static unsigned long wakeup_gran(struct sched_entity *se) 6518 { 6519 unsigned long gran = sysctl_sched_wakeup_granularity; 6520 6521 /* 6522 * Since its curr running now, convert the gran from real-time 6523 * to virtual-time in his units. 6524 * 6525 * By using 'se' instead of 'curr' we penalize light tasks, so 6526 * they get preempted easier. That is, if 'se' < 'curr' then 6527 * the resulting gran will be larger, therefore penalizing the 6528 * lighter, if otoh 'se' > 'curr' then the resulting gran will 6529 * be smaller, again penalizing the lighter task. 6530 * 6531 * This is especially important for buddies when the leftmost 6532 * task is higher priority than the buddy. 6533 */ 6534 return calc_delta_fair(gran, se); 6535 } 6536 6537 /* 6538 * Should 'se' preempt 'curr'. 6539 * 6540 * |s1 6541 * |s2 6542 * |s3 6543 * g 6544 * |<--->|c 6545 * 6546 * w(c, s1) = -1 6547 * w(c, s2) = 0 6548 * w(c, s3) = 1 6549 * 6550 */ 6551 static int 6552 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 6553 { 6554 s64 gran, vdiff = curr->vruntime - se->vruntime; 6555 6556 if (vdiff <= 0) 6557 return -1; 6558 6559 gran = wakeup_gran(se); 6560 if (vdiff > gran) 6561 return 1; 6562 6563 return 0; 6564 } 6565 6566 static void set_last_buddy(struct sched_entity *se) 6567 { 6568 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) 6569 return; 6570 6571 for_each_sched_entity(se) { 6572 if (SCHED_WARN_ON(!se->on_rq)) 6573 return; 6574 cfs_rq_of(se)->last = se; 6575 } 6576 } 6577 6578 static void set_next_buddy(struct sched_entity *se) 6579 { 6580 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) 6581 return; 6582 6583 for_each_sched_entity(se) { 6584 if (SCHED_WARN_ON(!se->on_rq)) 6585 return; 6586 cfs_rq_of(se)->next = se; 6587 } 6588 } 6589 6590 static void set_skip_buddy(struct sched_entity *se) 6591 { 6592 for_each_sched_entity(se) 6593 cfs_rq_of(se)->skip = se; 6594 } 6595 6596 /* 6597 * Preempt the current task with a newly woken task if needed: 6598 */ 6599 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 6600 { 6601 struct task_struct *curr = rq->curr; 6602 struct sched_entity *se = &curr->se, *pse = &p->se; 6603 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6604 int scale = cfs_rq->nr_running >= sched_nr_latency; 6605 int next_buddy_marked = 0; 6606 6607 if (unlikely(se == pse)) 6608 return; 6609 6610 /* 6611 * This is possible from callers such as attach_tasks(), in which we 6612 * unconditionally check_prempt_curr() after an enqueue (which may have 6613 * lead to a throttle). This both saves work and prevents false 6614 * next-buddy nomination below. 6615 */ 6616 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 6617 return; 6618 6619 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 6620 set_next_buddy(pse); 6621 next_buddy_marked = 1; 6622 } 6623 6624 /* 6625 * We can come here with TIF_NEED_RESCHED already set from new task 6626 * wake up path. 6627 * 6628 * Note: this also catches the edge-case of curr being in a throttled 6629 * group (e.g. via set_curr_task), since update_curr() (in the 6630 * enqueue of curr) will have resulted in resched being set. This 6631 * prevents us from potentially nominating it as a false LAST_BUDDY 6632 * below. 6633 */ 6634 if (test_tsk_need_resched(curr)) 6635 return; 6636 6637 /* Idle tasks are by definition preempted by non-idle tasks. */ 6638 if (unlikely(task_has_idle_policy(curr)) && 6639 likely(!task_has_idle_policy(p))) 6640 goto preempt; 6641 6642 /* 6643 * Batch and idle tasks do not preempt non-idle tasks (their preemption 6644 * is driven by the tick): 6645 */ 6646 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 6647 return; 6648 6649 find_matching_se(&se, &pse); 6650 update_curr(cfs_rq_of(se)); 6651 BUG_ON(!pse); 6652 if (wakeup_preempt_entity(se, pse) == 1) { 6653 /* 6654 * Bias pick_next to pick the sched entity that is 6655 * triggering this preemption. 6656 */ 6657 if (!next_buddy_marked) 6658 set_next_buddy(pse); 6659 goto preempt; 6660 } 6661 6662 return; 6663 6664 preempt: 6665 resched_curr(rq); 6666 /* 6667 * Only set the backward buddy when the current task is still 6668 * on the rq. This can happen when a wakeup gets interleaved 6669 * with schedule on the ->pre_schedule() or idle_balance() 6670 * point, either of which can * drop the rq lock. 6671 * 6672 * Also, during early boot the idle thread is in the fair class, 6673 * for obvious reasons its a bad idea to schedule back to it. 6674 */ 6675 if (unlikely(!se->on_rq || curr == rq->idle)) 6676 return; 6677 6678 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 6679 set_last_buddy(se); 6680 } 6681 6682 struct task_struct * 6683 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6684 { 6685 struct cfs_rq *cfs_rq = &rq->cfs; 6686 struct sched_entity *se; 6687 struct task_struct *p; 6688 int new_tasks; 6689 6690 again: 6691 if (!sched_fair_runnable(rq)) 6692 goto idle; 6693 6694 #ifdef CONFIG_FAIR_GROUP_SCHED 6695 if (!prev || prev->sched_class != &fair_sched_class) 6696 goto simple; 6697 6698 /* 6699 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 6700 * likely that a next task is from the same cgroup as the current. 6701 * 6702 * Therefore attempt to avoid putting and setting the entire cgroup 6703 * hierarchy, only change the part that actually changes. 6704 */ 6705 6706 do { 6707 struct sched_entity *curr = cfs_rq->curr; 6708 6709 /* 6710 * Since we got here without doing put_prev_entity() we also 6711 * have to consider cfs_rq->curr. If it is still a runnable 6712 * entity, update_curr() will update its vruntime, otherwise 6713 * forget we've ever seen it. 6714 */ 6715 if (curr) { 6716 if (curr->on_rq) 6717 update_curr(cfs_rq); 6718 else 6719 curr = NULL; 6720 6721 /* 6722 * This call to check_cfs_rq_runtime() will do the 6723 * throttle and dequeue its entity in the parent(s). 6724 * Therefore the nr_running test will indeed 6725 * be correct. 6726 */ 6727 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { 6728 cfs_rq = &rq->cfs; 6729 6730 if (!cfs_rq->nr_running) 6731 goto idle; 6732 6733 goto simple; 6734 } 6735 } 6736 6737 se = pick_next_entity(cfs_rq, curr); 6738 cfs_rq = group_cfs_rq(se); 6739 } while (cfs_rq); 6740 6741 p = task_of(se); 6742 6743 /* 6744 * Since we haven't yet done put_prev_entity and if the selected task 6745 * is a different task than we started out with, try and touch the 6746 * least amount of cfs_rqs. 6747 */ 6748 if (prev != p) { 6749 struct sched_entity *pse = &prev->se; 6750 6751 while (!(cfs_rq = is_same_group(se, pse))) { 6752 int se_depth = se->depth; 6753 int pse_depth = pse->depth; 6754 6755 if (se_depth <= pse_depth) { 6756 put_prev_entity(cfs_rq_of(pse), pse); 6757 pse = parent_entity(pse); 6758 } 6759 if (se_depth >= pse_depth) { 6760 set_next_entity(cfs_rq_of(se), se); 6761 se = parent_entity(se); 6762 } 6763 } 6764 6765 put_prev_entity(cfs_rq, pse); 6766 set_next_entity(cfs_rq, se); 6767 } 6768 6769 goto done; 6770 simple: 6771 #endif 6772 if (prev) 6773 put_prev_task(rq, prev); 6774 6775 do { 6776 se = pick_next_entity(cfs_rq, NULL); 6777 set_next_entity(cfs_rq, se); 6778 cfs_rq = group_cfs_rq(se); 6779 } while (cfs_rq); 6780 6781 p = task_of(se); 6782 6783 done: __maybe_unused; 6784 #ifdef CONFIG_SMP 6785 /* 6786 * Move the next running task to the front of 6787 * the list, so our cfs_tasks list becomes MRU 6788 * one. 6789 */ 6790 list_move(&p->se.group_node, &rq->cfs_tasks); 6791 #endif 6792 6793 if (hrtick_enabled(rq)) 6794 hrtick_start_fair(rq, p); 6795 6796 update_misfit_status(p, rq); 6797 6798 return p; 6799 6800 idle: 6801 if (!rf) 6802 return NULL; 6803 6804 new_tasks = newidle_balance(rq, rf); 6805 6806 /* 6807 * Because newidle_balance() releases (and re-acquires) rq->lock, it is 6808 * possible for any higher priority task to appear. In that case we 6809 * must re-start the pick_next_entity() loop. 6810 */ 6811 if (new_tasks < 0) 6812 return RETRY_TASK; 6813 6814 if (new_tasks > 0) 6815 goto again; 6816 6817 /* 6818 * rq is about to be idle, check if we need to update the 6819 * lost_idle_time of clock_pelt 6820 */ 6821 update_idle_rq_clock_pelt(rq); 6822 6823 return NULL; 6824 } 6825 6826 static struct task_struct *__pick_next_task_fair(struct rq *rq) 6827 { 6828 return pick_next_task_fair(rq, NULL, NULL); 6829 } 6830 6831 /* 6832 * Account for a descheduled task: 6833 */ 6834 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 6835 { 6836 struct sched_entity *se = &prev->se; 6837 struct cfs_rq *cfs_rq; 6838 6839 for_each_sched_entity(se) { 6840 cfs_rq = cfs_rq_of(se); 6841 put_prev_entity(cfs_rq, se); 6842 } 6843 } 6844 6845 /* 6846 * sched_yield() is very simple 6847 * 6848 * The magic of dealing with the ->skip buddy is in pick_next_entity. 6849 */ 6850 static void yield_task_fair(struct rq *rq) 6851 { 6852 struct task_struct *curr = rq->curr; 6853 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6854 struct sched_entity *se = &curr->se; 6855 6856 /* 6857 * Are we the only task in the tree? 6858 */ 6859 if (unlikely(rq->nr_running == 1)) 6860 return; 6861 6862 clear_buddies(cfs_rq, se); 6863 6864 if (curr->policy != SCHED_BATCH) { 6865 update_rq_clock(rq); 6866 /* 6867 * Update run-time statistics of the 'current'. 6868 */ 6869 update_curr(cfs_rq); 6870 /* 6871 * Tell update_rq_clock() that we've just updated, 6872 * so we don't do microscopic update in schedule() 6873 * and double the fastpath cost. 6874 */ 6875 rq_clock_skip_update(rq); 6876 } 6877 6878 set_skip_buddy(se); 6879 } 6880 6881 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) 6882 { 6883 struct sched_entity *se = &p->se; 6884 6885 /* throttled hierarchies are not runnable */ 6886 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 6887 return false; 6888 6889 /* Tell the scheduler that we'd really like pse to run next. */ 6890 set_next_buddy(se); 6891 6892 yield_task_fair(rq); 6893 6894 return true; 6895 } 6896 6897 #ifdef CONFIG_SMP 6898 /************************************************** 6899 * Fair scheduling class load-balancing methods. 6900 * 6901 * BASICS 6902 * 6903 * The purpose of load-balancing is to achieve the same basic fairness the 6904 * per-CPU scheduler provides, namely provide a proportional amount of compute 6905 * time to each task. This is expressed in the following equation: 6906 * 6907 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 6908 * 6909 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight 6910 * W_i,0 is defined as: 6911 * 6912 * W_i,0 = \Sum_j w_i,j (2) 6913 * 6914 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight 6915 * is derived from the nice value as per sched_prio_to_weight[]. 6916 * 6917 * The weight average is an exponential decay average of the instantaneous 6918 * weight: 6919 * 6920 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 6921 * 6922 * C_i is the compute capacity of CPU i, typically it is the 6923 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 6924 * can also include other factors [XXX]. 6925 * 6926 * To achieve this balance we define a measure of imbalance which follows 6927 * directly from (1): 6928 * 6929 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 6930 * 6931 * We them move tasks around to minimize the imbalance. In the continuous 6932 * function space it is obvious this converges, in the discrete case we get 6933 * a few fun cases generally called infeasible weight scenarios. 6934 * 6935 * [XXX expand on: 6936 * - infeasible weights; 6937 * - local vs global optima in the discrete case. ] 6938 * 6939 * 6940 * SCHED DOMAINS 6941 * 6942 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 6943 * for all i,j solution, we create a tree of CPUs that follows the hardware 6944 * topology where each level pairs two lower groups (or better). This results 6945 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the 6946 * tree to only the first of the previous level and we decrease the frequency 6947 * of load-balance at each level inv. proportional to the number of CPUs in 6948 * the groups. 6949 * 6950 * This yields: 6951 * 6952 * log_2 n 1 n 6953 * \Sum { --- * --- * 2^i } = O(n) (5) 6954 * i = 0 2^i 2^i 6955 * `- size of each group 6956 * | | `- number of CPUs doing load-balance 6957 * | `- freq 6958 * `- sum over all levels 6959 * 6960 * Coupled with a limit on how many tasks we can migrate every balance pass, 6961 * this makes (5) the runtime complexity of the balancer. 6962 * 6963 * An important property here is that each CPU is still (indirectly) connected 6964 * to every other CPU in at most O(log n) steps: 6965 * 6966 * The adjacency matrix of the resulting graph is given by: 6967 * 6968 * log_2 n 6969 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 6970 * k = 0 6971 * 6972 * And you'll find that: 6973 * 6974 * A^(log_2 n)_i,j != 0 for all i,j (7) 6975 * 6976 * Showing there's indeed a path between every CPU in at most O(log n) steps. 6977 * The task movement gives a factor of O(m), giving a convergence complexity 6978 * of: 6979 * 6980 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 6981 * 6982 * 6983 * WORK CONSERVING 6984 * 6985 * In order to avoid CPUs going idle while there's still work to do, new idle 6986 * balancing is more aggressive and has the newly idle CPU iterate up the domain 6987 * tree itself instead of relying on other CPUs to bring it work. 6988 * 6989 * This adds some complexity to both (5) and (8) but it reduces the total idle 6990 * time. 6991 * 6992 * [XXX more?] 6993 * 6994 * 6995 * CGROUPS 6996 * 6997 * Cgroups make a horror show out of (2), instead of a simple sum we get: 6998 * 6999 * s_k,i 7000 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 7001 * S_k 7002 * 7003 * Where 7004 * 7005 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 7006 * 7007 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. 7008 * 7009 * The big problem is S_k, its a global sum needed to compute a local (W_i) 7010 * property. 7011 * 7012 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 7013 * rewrite all of this once again.] 7014 */ 7015 7016 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 7017 7018 enum fbq_type { regular, remote, all }; 7019 7020 /* 7021 * 'group_type' describes the group of CPUs at the moment of load balancing. 7022 * 7023 * The enum is ordered by pulling priority, with the group with lowest priority 7024 * first so the group_type can simply be compared when selecting the busiest 7025 * group. See update_sd_pick_busiest(). 7026 */ 7027 enum group_type { 7028 /* The group has spare capacity that can be used to run more tasks. */ 7029 group_has_spare = 0, 7030 /* 7031 * The group is fully used and the tasks don't compete for more CPU 7032 * cycles. Nevertheless, some tasks might wait before running. 7033 */ 7034 group_fully_busy, 7035 /* 7036 * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity 7037 * and must be migrated to a more powerful CPU. 7038 */ 7039 group_misfit_task, 7040 /* 7041 * SD_ASYM_PACKING only: One local CPU with higher capacity is available, 7042 * and the task should be migrated to it instead of running on the 7043 * current CPU. 7044 */ 7045 group_asym_packing, 7046 /* 7047 * The tasks' affinity constraints previously prevented the scheduler 7048 * from balancing the load across the system. 7049 */ 7050 group_imbalanced, 7051 /* 7052 * The CPU is overloaded and can't provide expected CPU cycles to all 7053 * tasks. 7054 */ 7055 group_overloaded 7056 }; 7057 7058 enum migration_type { 7059 migrate_load = 0, 7060 migrate_util, 7061 migrate_task, 7062 migrate_misfit 7063 }; 7064 7065 #define LBF_ALL_PINNED 0x01 7066 #define LBF_NEED_BREAK 0x02 7067 #define LBF_DST_PINNED 0x04 7068 #define LBF_SOME_PINNED 0x08 7069 #define LBF_NOHZ_STATS 0x10 7070 #define LBF_NOHZ_AGAIN 0x20 7071 7072 struct lb_env { 7073 struct sched_domain *sd; 7074 7075 struct rq *src_rq; 7076 int src_cpu; 7077 7078 int dst_cpu; 7079 struct rq *dst_rq; 7080 7081 struct cpumask *dst_grpmask; 7082 int new_dst_cpu; 7083 enum cpu_idle_type idle; 7084 long imbalance; 7085 /* The set of CPUs under consideration for load-balancing */ 7086 struct cpumask *cpus; 7087 7088 unsigned int flags; 7089 7090 unsigned int loop; 7091 unsigned int loop_break; 7092 unsigned int loop_max; 7093 7094 enum fbq_type fbq_type; 7095 enum migration_type migration_type; 7096 struct list_head tasks; 7097 }; 7098 7099 /* 7100 * Is this task likely cache-hot: 7101 */ 7102 static int task_hot(struct task_struct *p, struct lb_env *env) 7103 { 7104 s64 delta; 7105 7106 lockdep_assert_held(&env->src_rq->lock); 7107 7108 if (p->sched_class != &fair_sched_class) 7109 return 0; 7110 7111 if (unlikely(task_has_idle_policy(p))) 7112 return 0; 7113 7114 /* 7115 * Buddy candidates are cache hot: 7116 */ 7117 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 7118 (&p->se == cfs_rq_of(&p->se)->next || 7119 &p->se == cfs_rq_of(&p->se)->last)) 7120 return 1; 7121 7122 if (sysctl_sched_migration_cost == -1) 7123 return 1; 7124 if (sysctl_sched_migration_cost == 0) 7125 return 0; 7126 7127 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 7128 7129 return delta < (s64)sysctl_sched_migration_cost; 7130 } 7131 7132 #ifdef CONFIG_NUMA_BALANCING 7133 /* 7134 * Returns 1, if task migration degrades locality 7135 * Returns 0, if task migration improves locality i.e migration preferred. 7136 * Returns -1, if task migration is not affected by locality. 7137 */ 7138 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 7139 { 7140 struct numa_group *numa_group = rcu_dereference(p->numa_group); 7141 unsigned long src_weight, dst_weight; 7142 int src_nid, dst_nid, dist; 7143 7144 if (!static_branch_likely(&sched_numa_balancing)) 7145 return -1; 7146 7147 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 7148 return -1; 7149 7150 src_nid = cpu_to_node(env->src_cpu); 7151 dst_nid = cpu_to_node(env->dst_cpu); 7152 7153 if (src_nid == dst_nid) 7154 return -1; 7155 7156 /* Migrating away from the preferred node is always bad. */ 7157 if (src_nid == p->numa_preferred_nid) { 7158 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 7159 return 1; 7160 else 7161 return -1; 7162 } 7163 7164 /* Encourage migration to the preferred node. */ 7165 if (dst_nid == p->numa_preferred_nid) 7166 return 0; 7167 7168 /* Leaving a core idle is often worse than degrading locality. */ 7169 if (env->idle == CPU_IDLE) 7170 return -1; 7171 7172 dist = node_distance(src_nid, dst_nid); 7173 if (numa_group) { 7174 src_weight = group_weight(p, src_nid, dist); 7175 dst_weight = group_weight(p, dst_nid, dist); 7176 } else { 7177 src_weight = task_weight(p, src_nid, dist); 7178 dst_weight = task_weight(p, dst_nid, dist); 7179 } 7180 7181 return dst_weight < src_weight; 7182 } 7183 7184 #else 7185 static inline int migrate_degrades_locality(struct task_struct *p, 7186 struct lb_env *env) 7187 { 7188 return -1; 7189 } 7190 #endif 7191 7192 /* 7193 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 7194 */ 7195 static 7196 int can_migrate_task(struct task_struct *p, struct lb_env *env) 7197 { 7198 int tsk_cache_hot; 7199 7200 lockdep_assert_held(&env->src_rq->lock); 7201 7202 /* 7203 * We do not migrate tasks that are: 7204 * 1) throttled_lb_pair, or 7205 * 2) cannot be migrated to this CPU due to cpus_ptr, or 7206 * 3) running (obviously), or 7207 * 4) are cache-hot on their current CPU. 7208 */ 7209 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 7210 return 0; 7211 7212 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { 7213 int cpu; 7214 7215 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); 7216 7217 env->flags |= LBF_SOME_PINNED; 7218 7219 /* 7220 * Remember if this task can be migrated to any other CPU in 7221 * our sched_group. We may want to revisit it if we couldn't 7222 * meet load balance goals by pulling other tasks on src_cpu. 7223 * 7224 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have 7225 * already computed one in current iteration. 7226 */ 7227 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) 7228 return 0; 7229 7230 /* Prevent to re-select dst_cpu via env's CPUs: */ 7231 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 7232 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { 7233 env->flags |= LBF_DST_PINNED; 7234 env->new_dst_cpu = cpu; 7235 break; 7236 } 7237 } 7238 7239 return 0; 7240 } 7241 7242 /* Record that we found atleast one task that could run on dst_cpu */ 7243 env->flags &= ~LBF_ALL_PINNED; 7244 7245 if (task_running(env->src_rq, p)) { 7246 schedstat_inc(p->se.statistics.nr_failed_migrations_running); 7247 return 0; 7248 } 7249 7250 /* 7251 * Aggressive migration if: 7252 * 1) destination numa is preferred 7253 * 2) task is cache cold, or 7254 * 3) too many balance attempts have failed. 7255 */ 7256 tsk_cache_hot = migrate_degrades_locality(p, env); 7257 if (tsk_cache_hot == -1) 7258 tsk_cache_hot = task_hot(p, env); 7259 7260 if (tsk_cache_hot <= 0 || 7261 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 7262 if (tsk_cache_hot == 1) { 7263 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 7264 schedstat_inc(p->se.statistics.nr_forced_migrations); 7265 } 7266 return 1; 7267 } 7268 7269 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); 7270 return 0; 7271 } 7272 7273 /* 7274 * detach_task() -- detach the task for the migration specified in env 7275 */ 7276 static void detach_task(struct task_struct *p, struct lb_env *env) 7277 { 7278 lockdep_assert_held(&env->src_rq->lock); 7279 7280 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 7281 set_task_cpu(p, env->dst_cpu); 7282 } 7283 7284 /* 7285 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 7286 * part of active balancing operations within "domain". 7287 * 7288 * Returns a task if successful and NULL otherwise. 7289 */ 7290 static struct task_struct *detach_one_task(struct lb_env *env) 7291 { 7292 struct task_struct *p; 7293 7294 lockdep_assert_held(&env->src_rq->lock); 7295 7296 list_for_each_entry_reverse(p, 7297 &env->src_rq->cfs_tasks, se.group_node) { 7298 if (!can_migrate_task(p, env)) 7299 continue; 7300 7301 detach_task(p, env); 7302 7303 /* 7304 * Right now, this is only the second place where 7305 * lb_gained[env->idle] is updated (other is detach_tasks) 7306 * so we can safely collect stats here rather than 7307 * inside detach_tasks(). 7308 */ 7309 schedstat_inc(env->sd->lb_gained[env->idle]); 7310 return p; 7311 } 7312 return NULL; 7313 } 7314 7315 static const unsigned int sched_nr_migrate_break = 32; 7316 7317 /* 7318 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from 7319 * busiest_rq, as part of a balancing operation within domain "sd". 7320 * 7321 * Returns number of detached tasks if successful and 0 otherwise. 7322 */ 7323 static int detach_tasks(struct lb_env *env) 7324 { 7325 struct list_head *tasks = &env->src_rq->cfs_tasks; 7326 unsigned long util, load; 7327 struct task_struct *p; 7328 int detached = 0; 7329 7330 lockdep_assert_held(&env->src_rq->lock); 7331 7332 if (env->imbalance <= 0) 7333 return 0; 7334 7335 while (!list_empty(tasks)) { 7336 /* 7337 * We don't want to steal all, otherwise we may be treated likewise, 7338 * which could at worst lead to a livelock crash. 7339 */ 7340 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 7341 break; 7342 7343 p = list_last_entry(tasks, struct task_struct, se.group_node); 7344 7345 env->loop++; 7346 /* We've more or less seen every task there is, call it quits */ 7347 if (env->loop > env->loop_max) 7348 break; 7349 7350 /* take a breather every nr_migrate tasks */ 7351 if (env->loop > env->loop_break) { 7352 env->loop_break += sched_nr_migrate_break; 7353 env->flags |= LBF_NEED_BREAK; 7354 break; 7355 } 7356 7357 if (!can_migrate_task(p, env)) 7358 goto next; 7359 7360 switch (env->migration_type) { 7361 case migrate_load: 7362 load = task_h_load(p); 7363 7364 if (sched_feat(LB_MIN) && 7365 load < 16 && !env->sd->nr_balance_failed) 7366 goto next; 7367 7368 /* 7369 * Make sure that we don't migrate too much load. 7370 * Nevertheless, let relax the constraint if 7371 * scheduler fails to find a good waiting task to 7372 * migrate. 7373 */ 7374 if (load/2 > env->imbalance && 7375 env->sd->nr_balance_failed <= env->sd->cache_nice_tries) 7376 goto next; 7377 7378 env->imbalance -= load; 7379 break; 7380 7381 case migrate_util: 7382 util = task_util_est(p); 7383 7384 if (util > env->imbalance) 7385 goto next; 7386 7387 env->imbalance -= util; 7388 break; 7389 7390 case migrate_task: 7391 env->imbalance--; 7392 break; 7393 7394 case migrate_misfit: 7395 /* This is not a misfit task */ 7396 if (task_fits_capacity(p, capacity_of(env->src_cpu))) 7397 goto next; 7398 7399 env->imbalance = 0; 7400 break; 7401 } 7402 7403 detach_task(p, env); 7404 list_add(&p->se.group_node, &env->tasks); 7405 7406 detached++; 7407 7408 #ifdef CONFIG_PREEMPTION 7409 /* 7410 * NEWIDLE balancing is a source of latency, so preemptible 7411 * kernels will stop after the first task is detached to minimize 7412 * the critical section. 7413 */ 7414 if (env->idle == CPU_NEWLY_IDLE) 7415 break; 7416 #endif 7417 7418 /* 7419 * We only want to steal up to the prescribed amount of 7420 * load/util/tasks. 7421 */ 7422 if (env->imbalance <= 0) 7423 break; 7424 7425 continue; 7426 next: 7427 list_move(&p->se.group_node, tasks); 7428 } 7429 7430 /* 7431 * Right now, this is one of only two places we collect this stat 7432 * so we can safely collect detach_one_task() stats here rather 7433 * than inside detach_one_task(). 7434 */ 7435 schedstat_add(env->sd->lb_gained[env->idle], detached); 7436 7437 return detached; 7438 } 7439 7440 /* 7441 * attach_task() -- attach the task detached by detach_task() to its new rq. 7442 */ 7443 static void attach_task(struct rq *rq, struct task_struct *p) 7444 { 7445 lockdep_assert_held(&rq->lock); 7446 7447 BUG_ON(task_rq(p) != rq); 7448 activate_task(rq, p, ENQUEUE_NOCLOCK); 7449 check_preempt_curr(rq, p, 0); 7450 } 7451 7452 /* 7453 * attach_one_task() -- attaches the task returned from detach_one_task() to 7454 * its new rq. 7455 */ 7456 static void attach_one_task(struct rq *rq, struct task_struct *p) 7457 { 7458 struct rq_flags rf; 7459 7460 rq_lock(rq, &rf); 7461 update_rq_clock(rq); 7462 attach_task(rq, p); 7463 rq_unlock(rq, &rf); 7464 } 7465 7466 /* 7467 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 7468 * new rq. 7469 */ 7470 static void attach_tasks(struct lb_env *env) 7471 { 7472 struct list_head *tasks = &env->tasks; 7473 struct task_struct *p; 7474 struct rq_flags rf; 7475 7476 rq_lock(env->dst_rq, &rf); 7477 update_rq_clock(env->dst_rq); 7478 7479 while (!list_empty(tasks)) { 7480 p = list_first_entry(tasks, struct task_struct, se.group_node); 7481 list_del_init(&p->se.group_node); 7482 7483 attach_task(env->dst_rq, p); 7484 } 7485 7486 rq_unlock(env->dst_rq, &rf); 7487 } 7488 7489 #ifdef CONFIG_NO_HZ_COMMON 7490 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) 7491 { 7492 if (cfs_rq->avg.load_avg) 7493 return true; 7494 7495 if (cfs_rq->avg.util_avg) 7496 return true; 7497 7498 return false; 7499 } 7500 7501 static inline bool others_have_blocked(struct rq *rq) 7502 { 7503 if (READ_ONCE(rq->avg_rt.util_avg)) 7504 return true; 7505 7506 if (READ_ONCE(rq->avg_dl.util_avg)) 7507 return true; 7508 7509 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 7510 if (READ_ONCE(rq->avg_irq.util_avg)) 7511 return true; 7512 #endif 7513 7514 return false; 7515 } 7516 7517 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) 7518 { 7519 rq->last_blocked_load_update_tick = jiffies; 7520 7521 if (!has_blocked) 7522 rq->has_blocked_load = 0; 7523 } 7524 #else 7525 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } 7526 static inline bool others_have_blocked(struct rq *rq) { return false; } 7527 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} 7528 #endif 7529 7530 static bool __update_blocked_others(struct rq *rq, bool *done) 7531 { 7532 const struct sched_class *curr_class; 7533 u64 now = rq_clock_pelt(rq); 7534 bool decayed; 7535 7536 /* 7537 * update_load_avg() can call cpufreq_update_util(). Make sure that RT, 7538 * DL and IRQ signals have been updated before updating CFS. 7539 */ 7540 curr_class = rq->curr->sched_class; 7541 7542 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | 7543 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | 7544 update_irq_load_avg(rq, 0); 7545 7546 if (others_have_blocked(rq)) 7547 *done = false; 7548 7549 return decayed; 7550 } 7551 7552 #ifdef CONFIG_FAIR_GROUP_SCHED 7553 7554 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 7555 { 7556 if (cfs_rq->load.weight) 7557 return false; 7558 7559 if (cfs_rq->avg.load_sum) 7560 return false; 7561 7562 if (cfs_rq->avg.util_sum) 7563 return false; 7564 7565 if (cfs_rq->avg.runnable_load_sum) 7566 return false; 7567 7568 return true; 7569 } 7570 7571 static bool __update_blocked_fair(struct rq *rq, bool *done) 7572 { 7573 struct cfs_rq *cfs_rq, *pos; 7574 bool decayed = false; 7575 int cpu = cpu_of(rq); 7576 7577 /* 7578 * Iterates the task_group tree in a bottom up fashion, see 7579 * list_add_leaf_cfs_rq() for details. 7580 */ 7581 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { 7582 struct sched_entity *se; 7583 7584 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { 7585 update_tg_load_avg(cfs_rq, 0); 7586 7587 if (cfs_rq == &rq->cfs) 7588 decayed = true; 7589 } 7590 7591 /* Propagate pending load changes to the parent, if any: */ 7592 se = cfs_rq->tg->se[cpu]; 7593 if (se && !skip_blocked_update(se)) 7594 update_load_avg(cfs_rq_of(se), se, 0); 7595 7596 /* 7597 * There can be a lot of idle CPU cgroups. Don't let fully 7598 * decayed cfs_rqs linger on the list. 7599 */ 7600 if (cfs_rq_is_decayed(cfs_rq)) 7601 list_del_leaf_cfs_rq(cfs_rq); 7602 7603 /* Don't need periodic decay once load/util_avg are null */ 7604 if (cfs_rq_has_blocked(cfs_rq)) 7605 *done = false; 7606 } 7607 7608 return decayed; 7609 } 7610 7611 /* 7612 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 7613 * This needs to be done in a top-down fashion because the load of a child 7614 * group is a fraction of its parents load. 7615 */ 7616 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 7617 { 7618 struct rq *rq = rq_of(cfs_rq); 7619 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 7620 unsigned long now = jiffies; 7621 unsigned long load; 7622 7623 if (cfs_rq->last_h_load_update == now) 7624 return; 7625 7626 WRITE_ONCE(cfs_rq->h_load_next, NULL); 7627 for_each_sched_entity(se) { 7628 cfs_rq = cfs_rq_of(se); 7629 WRITE_ONCE(cfs_rq->h_load_next, se); 7630 if (cfs_rq->last_h_load_update == now) 7631 break; 7632 } 7633 7634 if (!se) { 7635 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 7636 cfs_rq->last_h_load_update = now; 7637 } 7638 7639 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { 7640 load = cfs_rq->h_load; 7641 load = div64_ul(load * se->avg.load_avg, 7642 cfs_rq_load_avg(cfs_rq) + 1); 7643 cfs_rq = group_cfs_rq(se); 7644 cfs_rq->h_load = load; 7645 cfs_rq->last_h_load_update = now; 7646 } 7647 } 7648 7649 static unsigned long task_h_load(struct task_struct *p) 7650 { 7651 struct cfs_rq *cfs_rq = task_cfs_rq(p); 7652 7653 update_cfs_rq_h_load(cfs_rq); 7654 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 7655 cfs_rq_load_avg(cfs_rq) + 1); 7656 } 7657 #else 7658 static bool __update_blocked_fair(struct rq *rq, bool *done) 7659 { 7660 struct cfs_rq *cfs_rq = &rq->cfs; 7661 bool decayed; 7662 7663 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); 7664 if (cfs_rq_has_blocked(cfs_rq)) 7665 *done = false; 7666 7667 return decayed; 7668 } 7669 7670 static unsigned long task_h_load(struct task_struct *p) 7671 { 7672 return p->se.avg.load_avg; 7673 } 7674 #endif 7675 7676 static void update_blocked_averages(int cpu) 7677 { 7678 bool decayed = false, done = true; 7679 struct rq *rq = cpu_rq(cpu); 7680 struct rq_flags rf; 7681 7682 rq_lock_irqsave(rq, &rf); 7683 update_rq_clock(rq); 7684 7685 decayed |= __update_blocked_others(rq, &done); 7686 decayed |= __update_blocked_fair(rq, &done); 7687 7688 update_blocked_load_status(rq, !done); 7689 if (decayed) 7690 cpufreq_update_util(rq, 0); 7691 rq_unlock_irqrestore(rq, &rf); 7692 } 7693 7694 /********** Helpers for find_busiest_group ************************/ 7695 7696 /* 7697 * sg_lb_stats - stats of a sched_group required for load_balancing 7698 */ 7699 struct sg_lb_stats { 7700 unsigned long avg_load; /*Avg load across the CPUs of the group */ 7701 unsigned long group_load; /* Total load over the CPUs of the group */ 7702 unsigned long group_capacity; 7703 unsigned long group_util; /* Total utilization of the group */ 7704 unsigned int sum_nr_running; /* Nr of tasks running in the group */ 7705 unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */ 7706 unsigned int idle_cpus; 7707 unsigned int group_weight; 7708 enum group_type group_type; 7709 unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */ 7710 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ 7711 #ifdef CONFIG_NUMA_BALANCING 7712 unsigned int nr_numa_running; 7713 unsigned int nr_preferred_running; 7714 #endif 7715 }; 7716 7717 /* 7718 * sd_lb_stats - Structure to store the statistics of a sched_domain 7719 * during load balancing. 7720 */ 7721 struct sd_lb_stats { 7722 struct sched_group *busiest; /* Busiest group in this sd */ 7723 struct sched_group *local; /* Local group in this sd */ 7724 unsigned long total_load; /* Total load of all groups in sd */ 7725 unsigned long total_capacity; /* Total capacity of all groups in sd */ 7726 unsigned long avg_load; /* Average load across all groups in sd */ 7727 unsigned int prefer_sibling; /* tasks should go to sibling first */ 7728 7729 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 7730 struct sg_lb_stats local_stat; /* Statistics of the local group */ 7731 }; 7732 7733 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 7734 { 7735 /* 7736 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 7737 * local_stat because update_sg_lb_stats() does a full clear/assignment. 7738 * We must however set busiest_stat::group_type and 7739 * busiest_stat::idle_cpus to the worst busiest group because 7740 * update_sd_pick_busiest() reads these before assignment. 7741 */ 7742 *sds = (struct sd_lb_stats){ 7743 .busiest = NULL, 7744 .local = NULL, 7745 .total_load = 0UL, 7746 .total_capacity = 0UL, 7747 .busiest_stat = { 7748 .idle_cpus = UINT_MAX, 7749 .group_type = group_has_spare, 7750 }, 7751 }; 7752 } 7753 7754 static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) 7755 { 7756 struct rq *rq = cpu_rq(cpu); 7757 unsigned long max = arch_scale_cpu_capacity(cpu); 7758 unsigned long used, free; 7759 unsigned long irq; 7760 7761 irq = cpu_util_irq(rq); 7762 7763 if (unlikely(irq >= max)) 7764 return 1; 7765 7766 used = READ_ONCE(rq->avg_rt.util_avg); 7767 used += READ_ONCE(rq->avg_dl.util_avg); 7768 7769 if (unlikely(used >= max)) 7770 return 1; 7771 7772 free = max - used; 7773 7774 return scale_irq_capacity(free, irq, max); 7775 } 7776 7777 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 7778 { 7779 unsigned long capacity = scale_rt_capacity(sd, cpu); 7780 struct sched_group *sdg = sd->groups; 7781 7782 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); 7783 7784 if (!capacity) 7785 capacity = 1; 7786 7787 cpu_rq(cpu)->cpu_capacity = capacity; 7788 sdg->sgc->capacity = capacity; 7789 sdg->sgc->min_capacity = capacity; 7790 sdg->sgc->max_capacity = capacity; 7791 } 7792 7793 void update_group_capacity(struct sched_domain *sd, int cpu) 7794 { 7795 struct sched_domain *child = sd->child; 7796 struct sched_group *group, *sdg = sd->groups; 7797 unsigned long capacity, min_capacity, max_capacity; 7798 unsigned long interval; 7799 7800 interval = msecs_to_jiffies(sd->balance_interval); 7801 interval = clamp(interval, 1UL, max_load_balance_interval); 7802 sdg->sgc->next_update = jiffies + interval; 7803 7804 if (!child) { 7805 update_cpu_capacity(sd, cpu); 7806 return; 7807 } 7808 7809 capacity = 0; 7810 min_capacity = ULONG_MAX; 7811 max_capacity = 0; 7812 7813 if (child->flags & SD_OVERLAP) { 7814 /* 7815 * SD_OVERLAP domains cannot assume that child groups 7816 * span the current group. 7817 */ 7818 7819 for_each_cpu(cpu, sched_group_span(sdg)) { 7820 unsigned long cpu_cap = capacity_of(cpu); 7821 7822 capacity += cpu_cap; 7823 min_capacity = min(cpu_cap, min_capacity); 7824 max_capacity = max(cpu_cap, max_capacity); 7825 } 7826 } else { 7827 /* 7828 * !SD_OVERLAP domains can assume that child groups 7829 * span the current group. 7830 */ 7831 7832 group = child->groups; 7833 do { 7834 struct sched_group_capacity *sgc = group->sgc; 7835 7836 capacity += sgc->capacity; 7837 min_capacity = min(sgc->min_capacity, min_capacity); 7838 max_capacity = max(sgc->max_capacity, max_capacity); 7839 group = group->next; 7840 } while (group != child->groups); 7841 } 7842 7843 sdg->sgc->capacity = capacity; 7844 sdg->sgc->min_capacity = min_capacity; 7845 sdg->sgc->max_capacity = max_capacity; 7846 } 7847 7848 /* 7849 * Check whether the capacity of the rq has been noticeably reduced by side 7850 * activity. The imbalance_pct is used for the threshold. 7851 * Return true is the capacity is reduced 7852 */ 7853 static inline int 7854 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 7855 { 7856 return ((rq->cpu_capacity * sd->imbalance_pct) < 7857 (rq->cpu_capacity_orig * 100)); 7858 } 7859 7860 /* 7861 * Check whether a rq has a misfit task and if it looks like we can actually 7862 * help that task: we can migrate the task to a CPU of higher capacity, or 7863 * the task's current CPU is heavily pressured. 7864 */ 7865 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) 7866 { 7867 return rq->misfit_task_load && 7868 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || 7869 check_cpu_capacity(rq, sd)); 7870 } 7871 7872 /* 7873 * Group imbalance indicates (and tries to solve) the problem where balancing 7874 * groups is inadequate due to ->cpus_ptr constraints. 7875 * 7876 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 7877 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. 7878 * Something like: 7879 * 7880 * { 0 1 2 3 } { 4 5 6 7 } 7881 * * * * * 7882 * 7883 * If we were to balance group-wise we'd place two tasks in the first group and 7884 * two tasks in the second group. Clearly this is undesired as it will overload 7885 * cpu 3 and leave one of the CPUs in the second group unused. 7886 * 7887 * The current solution to this issue is detecting the skew in the first group 7888 * by noticing the lower domain failed to reach balance and had difficulty 7889 * moving tasks due to affinity constraints. 7890 * 7891 * When this is so detected; this group becomes a candidate for busiest; see 7892 * update_sd_pick_busiest(). And calculate_imbalance() and 7893 * find_busiest_group() avoid some of the usual balance conditions to allow it 7894 * to create an effective group imbalance. 7895 * 7896 * This is a somewhat tricky proposition since the next run might not find the 7897 * group imbalance and decide the groups need to be balanced again. A most 7898 * subtle and fragile situation. 7899 */ 7900 7901 static inline int sg_imbalanced(struct sched_group *group) 7902 { 7903 return group->sgc->imbalance; 7904 } 7905 7906 /* 7907 * group_has_capacity returns true if the group has spare capacity that could 7908 * be used by some tasks. 7909 * We consider that a group has spare capacity if the * number of task is 7910 * smaller than the number of CPUs or if the utilization is lower than the 7911 * available capacity for CFS tasks. 7912 * For the latter, we use a threshold to stabilize the state, to take into 7913 * account the variance of the tasks' load and to return true if the available 7914 * capacity in meaningful for the load balancer. 7915 * As an example, an available capacity of 1% can appear but it doesn't make 7916 * any benefit for the load balance. 7917 */ 7918 static inline bool 7919 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 7920 { 7921 if (sgs->sum_nr_running < sgs->group_weight) 7922 return true; 7923 7924 if ((sgs->group_capacity * 100) > 7925 (sgs->group_util * imbalance_pct)) 7926 return true; 7927 7928 return false; 7929 } 7930 7931 /* 7932 * group_is_overloaded returns true if the group has more tasks than it can 7933 * handle. 7934 * group_is_overloaded is not equals to !group_has_capacity because a group 7935 * with the exact right number of tasks, has no more spare capacity but is not 7936 * overloaded so both group_has_capacity and group_is_overloaded return 7937 * false. 7938 */ 7939 static inline bool 7940 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) 7941 { 7942 if (sgs->sum_nr_running <= sgs->group_weight) 7943 return false; 7944 7945 if ((sgs->group_capacity * 100) < 7946 (sgs->group_util * imbalance_pct)) 7947 return true; 7948 7949 return false; 7950 } 7951 7952 /* 7953 * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller 7954 * per-CPU capacity than sched_group ref. 7955 */ 7956 static inline bool 7957 group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) 7958 { 7959 return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity); 7960 } 7961 7962 /* 7963 * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller 7964 * per-CPU capacity_orig than sched_group ref. 7965 */ 7966 static inline bool 7967 group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) 7968 { 7969 return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity); 7970 } 7971 7972 static inline enum 7973 group_type group_classify(unsigned int imbalance_pct, 7974 struct sched_group *group, 7975 struct sg_lb_stats *sgs) 7976 { 7977 if (group_is_overloaded(imbalance_pct, sgs)) 7978 return group_overloaded; 7979 7980 if (sg_imbalanced(group)) 7981 return group_imbalanced; 7982 7983 if (sgs->group_asym_packing) 7984 return group_asym_packing; 7985 7986 if (sgs->group_misfit_task_load) 7987 return group_misfit_task; 7988 7989 if (!group_has_capacity(imbalance_pct, sgs)) 7990 return group_fully_busy; 7991 7992 return group_has_spare; 7993 } 7994 7995 static bool update_nohz_stats(struct rq *rq, bool force) 7996 { 7997 #ifdef CONFIG_NO_HZ_COMMON 7998 unsigned int cpu = rq->cpu; 7999 8000 if (!rq->has_blocked_load) 8001 return false; 8002 8003 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) 8004 return false; 8005 8006 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) 8007 return true; 8008 8009 update_blocked_averages(cpu); 8010 8011 return rq->has_blocked_load; 8012 #else 8013 return false; 8014 #endif 8015 } 8016 8017 /** 8018 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 8019 * @env: The load balancing environment. 8020 * @group: sched_group whose statistics are to be updated. 8021 * @sgs: variable to hold the statistics for this group. 8022 * @sg_status: Holds flag indicating the status of the sched_group 8023 */ 8024 static inline void update_sg_lb_stats(struct lb_env *env, 8025 struct sched_group *group, 8026 struct sg_lb_stats *sgs, 8027 int *sg_status) 8028 { 8029 int i, nr_running, local_group; 8030 8031 memset(sgs, 0, sizeof(*sgs)); 8032 8033 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); 8034 8035 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 8036 struct rq *rq = cpu_rq(i); 8037 8038 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) 8039 env->flags |= LBF_NOHZ_AGAIN; 8040 8041 sgs->group_load += cpu_load(rq); 8042 sgs->group_util += cpu_util(i); 8043 sgs->sum_h_nr_running += rq->cfs.h_nr_running; 8044 8045 nr_running = rq->nr_running; 8046 sgs->sum_nr_running += nr_running; 8047 8048 if (nr_running > 1) 8049 *sg_status |= SG_OVERLOAD; 8050 8051 if (cpu_overutilized(i)) 8052 *sg_status |= SG_OVERUTILIZED; 8053 8054 #ifdef CONFIG_NUMA_BALANCING 8055 sgs->nr_numa_running += rq->nr_numa_running; 8056 sgs->nr_preferred_running += rq->nr_preferred_running; 8057 #endif 8058 /* 8059 * No need to call idle_cpu() if nr_running is not 0 8060 */ 8061 if (!nr_running && idle_cpu(i)) { 8062 sgs->idle_cpus++; 8063 /* Idle cpu can't have misfit task */ 8064 continue; 8065 } 8066 8067 if (local_group) 8068 continue; 8069 8070 /* Check for a misfit task on the cpu */ 8071 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 8072 sgs->group_misfit_task_load < rq->misfit_task_load) { 8073 sgs->group_misfit_task_load = rq->misfit_task_load; 8074 *sg_status |= SG_OVERLOAD; 8075 } 8076 } 8077 8078 /* Check if dst CPU is idle and preferred to this group */ 8079 if (env->sd->flags & SD_ASYM_PACKING && 8080 env->idle != CPU_NOT_IDLE && 8081 sgs->sum_h_nr_running && 8082 sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) { 8083 sgs->group_asym_packing = 1; 8084 } 8085 8086 sgs->group_capacity = group->sgc->capacity; 8087 8088 sgs->group_weight = group->group_weight; 8089 8090 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); 8091 8092 /* Computing avg_load makes sense only when group is overloaded */ 8093 if (sgs->group_type == group_overloaded) 8094 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 8095 sgs->group_capacity; 8096 } 8097 8098 /** 8099 * update_sd_pick_busiest - return 1 on busiest group 8100 * @env: The load balancing environment. 8101 * @sds: sched_domain statistics 8102 * @sg: sched_group candidate to be checked for being the busiest 8103 * @sgs: sched_group statistics 8104 * 8105 * Determine if @sg is a busier group than the previously selected 8106 * busiest group. 8107 * 8108 * Return: %true if @sg is a busier group than the previously selected 8109 * busiest group. %false otherwise. 8110 */ 8111 static bool update_sd_pick_busiest(struct lb_env *env, 8112 struct sd_lb_stats *sds, 8113 struct sched_group *sg, 8114 struct sg_lb_stats *sgs) 8115 { 8116 struct sg_lb_stats *busiest = &sds->busiest_stat; 8117 8118 /* Make sure that there is at least one task to pull */ 8119 if (!sgs->sum_h_nr_running) 8120 return false; 8121 8122 /* 8123 * Don't try to pull misfit tasks we can't help. 8124 * We can use max_capacity here as reduction in capacity on some 8125 * CPUs in the group should either be possible to resolve 8126 * internally or be covered by avg_load imbalance (eventually). 8127 */ 8128 if (sgs->group_type == group_misfit_task && 8129 (!group_smaller_max_cpu_capacity(sg, sds->local) || 8130 sds->local_stat.group_type != group_has_spare)) 8131 return false; 8132 8133 if (sgs->group_type > busiest->group_type) 8134 return true; 8135 8136 if (sgs->group_type < busiest->group_type) 8137 return false; 8138 8139 /* 8140 * The candidate and the current busiest group are the same type of 8141 * group. Let check which one is the busiest according to the type. 8142 */ 8143 8144 switch (sgs->group_type) { 8145 case group_overloaded: 8146 /* Select the overloaded group with highest avg_load. */ 8147 if (sgs->avg_load <= busiest->avg_load) 8148 return false; 8149 break; 8150 8151 case group_imbalanced: 8152 /* 8153 * Select the 1st imbalanced group as we don't have any way to 8154 * choose one more than another. 8155 */ 8156 return false; 8157 8158 case group_asym_packing: 8159 /* Prefer to move from lowest priority CPU's work */ 8160 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) 8161 return false; 8162 break; 8163 8164 case group_misfit_task: 8165 /* 8166 * If we have more than one misfit sg go with the biggest 8167 * misfit. 8168 */ 8169 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) 8170 return false; 8171 break; 8172 8173 case group_fully_busy: 8174 /* 8175 * Select the fully busy group with highest avg_load. In 8176 * theory, there is no need to pull task from such kind of 8177 * group because tasks have all compute capacity that they need 8178 * but we can still improve the overall throughput by reducing 8179 * contention when accessing shared HW resources. 8180 * 8181 * XXX for now avg_load is not computed and always 0 so we 8182 * select the 1st one. 8183 */ 8184 if (sgs->avg_load <= busiest->avg_load) 8185 return false; 8186 break; 8187 8188 case group_has_spare: 8189 /* 8190 * Select not overloaded group with lowest number of idle cpus 8191 * and highest number of running tasks. We could also compare 8192 * the spare capacity which is more stable but it can end up 8193 * that the group has less spare capacity but finally more idle 8194 * CPUs which means less opportunity to pull tasks. 8195 */ 8196 if (sgs->idle_cpus > busiest->idle_cpus) 8197 return false; 8198 else if ((sgs->idle_cpus == busiest->idle_cpus) && 8199 (sgs->sum_nr_running <= busiest->sum_nr_running)) 8200 return false; 8201 8202 break; 8203 } 8204 8205 /* 8206 * Candidate sg has no more than one task per CPU and has higher 8207 * per-CPU capacity. Migrating tasks to less capable CPUs may harm 8208 * throughput. Maximize throughput, power/energy consequences are not 8209 * considered. 8210 */ 8211 if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && 8212 (sgs->group_type <= group_fully_busy) && 8213 (group_smaller_min_cpu_capacity(sds->local, sg))) 8214 return false; 8215 8216 return true; 8217 } 8218 8219 #ifdef CONFIG_NUMA_BALANCING 8220 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8221 { 8222 if (sgs->sum_h_nr_running > sgs->nr_numa_running) 8223 return regular; 8224 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) 8225 return remote; 8226 return all; 8227 } 8228 8229 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8230 { 8231 if (rq->nr_running > rq->nr_numa_running) 8232 return regular; 8233 if (rq->nr_running > rq->nr_preferred_running) 8234 return remote; 8235 return all; 8236 } 8237 #else 8238 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8239 { 8240 return all; 8241 } 8242 8243 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8244 { 8245 return regular; 8246 } 8247 #endif /* CONFIG_NUMA_BALANCING */ 8248 8249 8250 struct sg_lb_stats; 8251 8252 /* 8253 * task_running_on_cpu - return 1 if @p is running on @cpu. 8254 */ 8255 8256 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) 8257 { 8258 /* Task has no contribution or is new */ 8259 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 8260 return 0; 8261 8262 if (task_on_rq_queued(p)) 8263 return 1; 8264 8265 return 0; 8266 } 8267 8268 /** 8269 * idle_cpu_without - would a given CPU be idle without p ? 8270 * @cpu: the processor on which idleness is tested. 8271 * @p: task which should be ignored. 8272 * 8273 * Return: 1 if the CPU would be idle. 0 otherwise. 8274 */ 8275 static int idle_cpu_without(int cpu, struct task_struct *p) 8276 { 8277 struct rq *rq = cpu_rq(cpu); 8278 8279 if (rq->curr != rq->idle && rq->curr != p) 8280 return 0; 8281 8282 /* 8283 * rq->nr_running can't be used but an updated version without the 8284 * impact of p on cpu must be used instead. The updated nr_running 8285 * be computed and tested before calling idle_cpu_without(). 8286 */ 8287 8288 #ifdef CONFIG_SMP 8289 if (!llist_empty(&rq->wake_list)) 8290 return 0; 8291 #endif 8292 8293 return 1; 8294 } 8295 8296 /* 8297 * update_sg_wakeup_stats - Update sched_group's statistics for wakeup. 8298 * @sd: The sched_domain level to look for idlest group. 8299 * @group: sched_group whose statistics are to be updated. 8300 * @sgs: variable to hold the statistics for this group. 8301 * @p: The task for which we look for the idlest group/CPU. 8302 */ 8303 static inline void update_sg_wakeup_stats(struct sched_domain *sd, 8304 struct sched_group *group, 8305 struct sg_lb_stats *sgs, 8306 struct task_struct *p) 8307 { 8308 int i, nr_running; 8309 8310 memset(sgs, 0, sizeof(*sgs)); 8311 8312 for_each_cpu(i, sched_group_span(group)) { 8313 struct rq *rq = cpu_rq(i); 8314 unsigned int local; 8315 8316 sgs->group_load += cpu_load_without(rq, p); 8317 sgs->group_util += cpu_util_without(i, p); 8318 local = task_running_on_cpu(i, p); 8319 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; 8320 8321 nr_running = rq->nr_running - local; 8322 sgs->sum_nr_running += nr_running; 8323 8324 /* 8325 * No need to call idle_cpu_without() if nr_running is not 0 8326 */ 8327 if (!nr_running && idle_cpu_without(i, p)) 8328 sgs->idle_cpus++; 8329 8330 } 8331 8332 /* Check if task fits in the group */ 8333 if (sd->flags & SD_ASYM_CPUCAPACITY && 8334 !task_fits_capacity(p, group->sgc->max_capacity)) { 8335 sgs->group_misfit_task_load = 1; 8336 } 8337 8338 sgs->group_capacity = group->sgc->capacity; 8339 8340 sgs->group_weight = group->group_weight; 8341 8342 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); 8343 8344 /* 8345 * Computing avg_load makes sense only when group is fully busy or 8346 * overloaded 8347 */ 8348 if (sgs->group_type < group_fully_busy) 8349 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / 8350 sgs->group_capacity; 8351 } 8352 8353 static bool update_pick_idlest(struct sched_group *idlest, 8354 struct sg_lb_stats *idlest_sgs, 8355 struct sched_group *group, 8356 struct sg_lb_stats *sgs) 8357 { 8358 if (sgs->group_type < idlest_sgs->group_type) 8359 return true; 8360 8361 if (sgs->group_type > idlest_sgs->group_type) 8362 return false; 8363 8364 /* 8365 * The candidate and the current idlest group are the same type of 8366 * group. Let check which one is the idlest according to the type. 8367 */ 8368 8369 switch (sgs->group_type) { 8370 case group_overloaded: 8371 case group_fully_busy: 8372 /* Select the group with lowest avg_load. */ 8373 if (idlest_sgs->avg_load <= sgs->avg_load) 8374 return false; 8375 break; 8376 8377 case group_imbalanced: 8378 case group_asym_packing: 8379 /* Those types are not used in the slow wakeup path */ 8380 return false; 8381 8382 case group_misfit_task: 8383 /* Select group with the highest max capacity */ 8384 if (idlest->sgc->max_capacity >= group->sgc->max_capacity) 8385 return false; 8386 break; 8387 8388 case group_has_spare: 8389 /* Select group with most idle CPUs */ 8390 if (idlest_sgs->idle_cpus >= sgs->idle_cpus) 8391 return false; 8392 break; 8393 } 8394 8395 return true; 8396 } 8397 8398 /* 8399 * find_idlest_group() finds and returns the least busy CPU group within the 8400 * domain. 8401 * 8402 * Assumes p is allowed on at least one CPU in sd. 8403 */ 8404 static struct sched_group * 8405 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 8406 int this_cpu, int sd_flag) 8407 { 8408 struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; 8409 struct sg_lb_stats local_sgs, tmp_sgs; 8410 struct sg_lb_stats *sgs; 8411 unsigned long imbalance; 8412 struct sg_lb_stats idlest_sgs = { 8413 .avg_load = UINT_MAX, 8414 .group_type = group_overloaded, 8415 }; 8416 8417 imbalance = scale_load_down(NICE_0_LOAD) * 8418 (sd->imbalance_pct-100) / 100; 8419 8420 do { 8421 int local_group; 8422 8423 /* Skip over this group if it has no CPUs allowed */ 8424 if (!cpumask_intersects(sched_group_span(group), 8425 p->cpus_ptr)) 8426 continue; 8427 8428 local_group = cpumask_test_cpu(this_cpu, 8429 sched_group_span(group)); 8430 8431 if (local_group) { 8432 sgs = &local_sgs; 8433 local = group; 8434 } else { 8435 sgs = &tmp_sgs; 8436 } 8437 8438 update_sg_wakeup_stats(sd, group, sgs, p); 8439 8440 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { 8441 idlest = group; 8442 idlest_sgs = *sgs; 8443 } 8444 8445 } while (group = group->next, group != sd->groups); 8446 8447 8448 /* There is no idlest group to push tasks to */ 8449 if (!idlest) 8450 return NULL; 8451 8452 /* The local group has been skipped because of CPU affinity */ 8453 if (!local) 8454 return idlest; 8455 8456 /* 8457 * If the local group is idler than the selected idlest group 8458 * don't try and push the task. 8459 */ 8460 if (local_sgs.group_type < idlest_sgs.group_type) 8461 return NULL; 8462 8463 /* 8464 * If the local group is busier than the selected idlest group 8465 * try and push the task. 8466 */ 8467 if (local_sgs.group_type > idlest_sgs.group_type) 8468 return idlest; 8469 8470 switch (local_sgs.group_type) { 8471 case group_overloaded: 8472 case group_fully_busy: 8473 /* 8474 * When comparing groups across NUMA domains, it's possible for 8475 * the local domain to be very lightly loaded relative to the 8476 * remote domains but "imbalance" skews the comparison making 8477 * remote CPUs look much more favourable. When considering 8478 * cross-domain, add imbalance to the load on the remote node 8479 * and consider staying local. 8480 */ 8481 8482 if ((sd->flags & SD_NUMA) && 8483 ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) 8484 return NULL; 8485 8486 /* 8487 * If the local group is less loaded than the selected 8488 * idlest group don't try and push any tasks. 8489 */ 8490 if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance)) 8491 return NULL; 8492 8493 if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) 8494 return NULL; 8495 break; 8496 8497 case group_imbalanced: 8498 case group_asym_packing: 8499 /* Those type are not used in the slow wakeup path */ 8500 return NULL; 8501 8502 case group_misfit_task: 8503 /* Select group with the highest max capacity */ 8504 if (local->sgc->max_capacity >= idlest->sgc->max_capacity) 8505 return NULL; 8506 break; 8507 8508 case group_has_spare: 8509 if (sd->flags & SD_NUMA) { 8510 #ifdef CONFIG_NUMA_BALANCING 8511 int idlest_cpu; 8512 /* 8513 * If there is spare capacity at NUMA, try to select 8514 * the preferred node 8515 */ 8516 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) 8517 return NULL; 8518 8519 idlest_cpu = cpumask_first(sched_group_span(idlest)); 8520 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) 8521 return idlest; 8522 #endif 8523 /* 8524 * Otherwise, keep the task on this node to stay close 8525 * its wakeup source and improve locality. If there is 8526 * a real need of migration, periodic load balance will 8527 * take care of it. 8528 */ 8529 if (local_sgs.idle_cpus) 8530 return NULL; 8531 } 8532 8533 /* 8534 * Select group with highest number of idle CPUs. We could also 8535 * compare the utilization which is more stable but it can end 8536 * up that the group has less spare capacity but finally more 8537 * idle CPUs which means more opportunity to run task. 8538 */ 8539 if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus) 8540 return NULL; 8541 break; 8542 } 8543 8544 return idlest; 8545 } 8546 8547 /** 8548 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 8549 * @env: The load balancing environment. 8550 * @sds: variable to hold the statistics for this sched_domain. 8551 */ 8552 8553 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 8554 { 8555 struct sched_domain *child = env->sd->child; 8556 struct sched_group *sg = env->sd->groups; 8557 struct sg_lb_stats *local = &sds->local_stat; 8558 struct sg_lb_stats tmp_sgs; 8559 int sg_status = 0; 8560 8561 #ifdef CONFIG_NO_HZ_COMMON 8562 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) 8563 env->flags |= LBF_NOHZ_STATS; 8564 #endif 8565 8566 do { 8567 struct sg_lb_stats *sgs = &tmp_sgs; 8568 int local_group; 8569 8570 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 8571 if (local_group) { 8572 sds->local = sg; 8573 sgs = local; 8574 8575 if (env->idle != CPU_NEWLY_IDLE || 8576 time_after_eq(jiffies, sg->sgc->next_update)) 8577 update_group_capacity(env->sd, env->dst_cpu); 8578 } 8579 8580 update_sg_lb_stats(env, sg, sgs, &sg_status); 8581 8582 if (local_group) 8583 goto next_group; 8584 8585 8586 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 8587 sds->busiest = sg; 8588 sds->busiest_stat = *sgs; 8589 } 8590 8591 next_group: 8592 /* Now, start updating sd_lb_stats */ 8593 sds->total_load += sgs->group_load; 8594 sds->total_capacity += sgs->group_capacity; 8595 8596 sg = sg->next; 8597 } while (sg != env->sd->groups); 8598 8599 /* Tag domain that child domain prefers tasks go to siblings first */ 8600 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; 8601 8602 #ifdef CONFIG_NO_HZ_COMMON 8603 if ((env->flags & LBF_NOHZ_AGAIN) && 8604 cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { 8605 8606 WRITE_ONCE(nohz.next_blocked, 8607 jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD)); 8608 } 8609 #endif 8610 8611 if (env->sd->flags & SD_NUMA) 8612 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 8613 8614 if (!env->sd->parent) { 8615 struct root_domain *rd = env->dst_rq->rd; 8616 8617 /* update overload indicator if we are at root domain */ 8618 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); 8619 8620 /* Update over-utilization (tipping point, U >= 0) indicator */ 8621 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); 8622 trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); 8623 } else if (sg_status & SG_OVERUTILIZED) { 8624 struct root_domain *rd = env->dst_rq->rd; 8625 8626 WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); 8627 trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); 8628 } 8629 } 8630 8631 /** 8632 * calculate_imbalance - Calculate the amount of imbalance present within the 8633 * groups of a given sched_domain during load balance. 8634 * @env: load balance environment 8635 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 8636 */ 8637 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 8638 { 8639 struct sg_lb_stats *local, *busiest; 8640 8641 local = &sds->local_stat; 8642 busiest = &sds->busiest_stat; 8643 8644 if (busiest->group_type == group_misfit_task) { 8645 /* Set imbalance to allow misfit tasks to be balanced. */ 8646 env->migration_type = migrate_misfit; 8647 env->imbalance = 1; 8648 return; 8649 } 8650 8651 if (busiest->group_type == group_asym_packing) { 8652 /* 8653 * In case of asym capacity, we will try to migrate all load to 8654 * the preferred CPU. 8655 */ 8656 env->migration_type = migrate_task; 8657 env->imbalance = busiest->sum_h_nr_running; 8658 return; 8659 } 8660 8661 if (busiest->group_type == group_imbalanced) { 8662 /* 8663 * In the group_imb case we cannot rely on group-wide averages 8664 * to ensure CPU-load equilibrium, try to move any task to fix 8665 * the imbalance. The next load balance will take care of 8666 * balancing back the system. 8667 */ 8668 env->migration_type = migrate_task; 8669 env->imbalance = 1; 8670 return; 8671 } 8672 8673 /* 8674 * Try to use spare capacity of local group without overloading it or 8675 * emptying busiest. 8676 */ 8677 if (local->group_type == group_has_spare) { 8678 if (busiest->group_type > group_fully_busy) { 8679 /* 8680 * If busiest is overloaded, try to fill spare 8681 * capacity. This might end up creating spare capacity 8682 * in busiest or busiest still being overloaded but 8683 * there is no simple way to directly compute the 8684 * amount of load to migrate in order to balance the 8685 * system. 8686 */ 8687 env->migration_type = migrate_util; 8688 env->imbalance = max(local->group_capacity, local->group_util) - 8689 local->group_util; 8690 8691 /* 8692 * In some cases, the group's utilization is max or even 8693 * higher than capacity because of migrations but the 8694 * local CPU is (newly) idle. There is at least one 8695 * waiting task in this overloaded busiest group. Let's 8696 * try to pull it. 8697 */ 8698 if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { 8699 env->migration_type = migrate_task; 8700 env->imbalance = 1; 8701 } 8702 8703 return; 8704 } 8705 8706 if (busiest->group_weight == 1 || sds->prefer_sibling) { 8707 unsigned int nr_diff = busiest->sum_nr_running; 8708 /* 8709 * When prefer sibling, evenly spread running tasks on 8710 * groups. 8711 */ 8712 env->migration_type = migrate_task; 8713 lsub_positive(&nr_diff, local->sum_nr_running); 8714 env->imbalance = nr_diff >> 1; 8715 } else { 8716 8717 /* 8718 * If there is no overload, we just want to even the number of 8719 * idle cpus. 8720 */ 8721 env->migration_type = migrate_task; 8722 env->imbalance = max_t(long, 0, (local->idle_cpus - 8723 busiest->idle_cpus) >> 1); 8724 } 8725 8726 /* Consider allowing a small imbalance between NUMA groups */ 8727 if (env->sd->flags & SD_NUMA) { 8728 unsigned int imbalance_min; 8729 8730 /* 8731 * Compute an allowed imbalance based on a simple 8732 * pair of communicating tasks that should remain 8733 * local and ignore them. 8734 * 8735 * NOTE: Generally this would have been based on 8736 * the domain size and this was evaluated. However, 8737 * the benefit is similar across a range of workloads 8738 * and machines but scaling by the domain size adds 8739 * the risk that lower domains have to be rebalanced. 8740 */ 8741 imbalance_min = 2; 8742 if (busiest->sum_nr_running <= imbalance_min) 8743 env->imbalance = 0; 8744 } 8745 8746 return; 8747 } 8748 8749 /* 8750 * Local is fully busy but has to take more load to relieve the 8751 * busiest group 8752 */ 8753 if (local->group_type < group_overloaded) { 8754 /* 8755 * Local will become overloaded so the avg_load metrics are 8756 * finally needed. 8757 */ 8758 8759 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / 8760 local->group_capacity; 8761 8762 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / 8763 sds->total_capacity; 8764 } 8765 8766 /* 8767 * Both group are or will become overloaded and we're trying to get all 8768 * the CPUs to the average_load, so we don't want to push ourselves 8769 * above the average load, nor do we wish to reduce the max loaded CPU 8770 * below the average load. At the same time, we also don't want to 8771 * reduce the group load below the group capacity. Thus we look for 8772 * the minimum possible imbalance. 8773 */ 8774 env->migration_type = migrate_load; 8775 env->imbalance = min( 8776 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, 8777 (sds->avg_load - local->avg_load) * local->group_capacity 8778 ) / SCHED_CAPACITY_SCALE; 8779 } 8780 8781 /******* find_busiest_group() helpers end here *********************/ 8782 8783 /* 8784 * Decision matrix according to the local and busiest group type: 8785 * 8786 * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded 8787 * has_spare nr_idle balanced N/A N/A balanced balanced 8788 * fully_busy nr_idle nr_idle N/A N/A balanced balanced 8789 * misfit_task force N/A N/A N/A force force 8790 * asym_packing force force N/A N/A force force 8791 * imbalanced force force N/A N/A force force 8792 * overloaded force force N/A N/A force avg_load 8793 * 8794 * N/A : Not Applicable because already filtered while updating 8795 * statistics. 8796 * balanced : The system is balanced for these 2 groups. 8797 * force : Calculate the imbalance as load migration is probably needed. 8798 * avg_load : Only if imbalance is significant enough. 8799 * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite 8800 * different in groups. 8801 */ 8802 8803 /** 8804 * find_busiest_group - Returns the busiest group within the sched_domain 8805 * if there is an imbalance. 8806 * 8807 * Also calculates the amount of runnable load which should be moved 8808 * to restore balance. 8809 * 8810 * @env: The load balancing environment. 8811 * 8812 * Return: - The busiest group if imbalance exists. 8813 */ 8814 static struct sched_group *find_busiest_group(struct lb_env *env) 8815 { 8816 struct sg_lb_stats *local, *busiest; 8817 struct sd_lb_stats sds; 8818 8819 init_sd_lb_stats(&sds); 8820 8821 /* 8822 * Compute the various statistics relevant for load balancing at 8823 * this level. 8824 */ 8825 update_sd_lb_stats(env, &sds); 8826 8827 if (sched_energy_enabled()) { 8828 struct root_domain *rd = env->dst_rq->rd; 8829 8830 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) 8831 goto out_balanced; 8832 } 8833 8834 local = &sds.local_stat; 8835 busiest = &sds.busiest_stat; 8836 8837 /* There is no busy sibling group to pull tasks from */ 8838 if (!sds.busiest) 8839 goto out_balanced; 8840 8841 /* Misfit tasks should be dealt with regardless of the avg load */ 8842 if (busiest->group_type == group_misfit_task) 8843 goto force_balance; 8844 8845 /* ASYM feature bypasses nice load balance check */ 8846 if (busiest->group_type == group_asym_packing) 8847 goto force_balance; 8848 8849 /* 8850 * If the busiest group is imbalanced the below checks don't 8851 * work because they assume all things are equal, which typically 8852 * isn't true due to cpus_ptr constraints and the like. 8853 */ 8854 if (busiest->group_type == group_imbalanced) 8855 goto force_balance; 8856 8857 /* 8858 * If the local group is busier than the selected busiest group 8859 * don't try and pull any tasks. 8860 */ 8861 if (local->group_type > busiest->group_type) 8862 goto out_balanced; 8863 8864 /* 8865 * When groups are overloaded, use the avg_load to ensure fairness 8866 * between tasks. 8867 */ 8868 if (local->group_type == group_overloaded) { 8869 /* 8870 * If the local group is more loaded than the selected 8871 * busiest group don't try to pull any tasks. 8872 */ 8873 if (local->avg_load >= busiest->avg_load) 8874 goto out_balanced; 8875 8876 /* XXX broken for overlapping NUMA groups */ 8877 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / 8878 sds.total_capacity; 8879 8880 /* 8881 * Don't pull any tasks if this group is already above the 8882 * domain average load. 8883 */ 8884 if (local->avg_load >= sds.avg_load) 8885 goto out_balanced; 8886 8887 /* 8888 * If the busiest group is more loaded, use imbalance_pct to be 8889 * conservative. 8890 */ 8891 if (100 * busiest->avg_load <= 8892 env->sd->imbalance_pct * local->avg_load) 8893 goto out_balanced; 8894 } 8895 8896 /* Try to move all excess tasks to child's sibling domain */ 8897 if (sds.prefer_sibling && local->group_type == group_has_spare && 8898 busiest->sum_nr_running > local->sum_nr_running + 1) 8899 goto force_balance; 8900 8901 if (busiest->group_type != group_overloaded) { 8902 if (env->idle == CPU_NOT_IDLE) 8903 /* 8904 * If the busiest group is not overloaded (and as a 8905 * result the local one too) but this CPU is already 8906 * busy, let another idle CPU try to pull task. 8907 */ 8908 goto out_balanced; 8909 8910 if (busiest->group_weight > 1 && 8911 local->idle_cpus <= (busiest->idle_cpus + 1)) 8912 /* 8913 * If the busiest group is not overloaded 8914 * and there is no imbalance between this and busiest 8915 * group wrt idle CPUs, it is balanced. The imbalance 8916 * becomes significant if the diff is greater than 1 8917 * otherwise we might end up to just move the imbalance 8918 * on another group. Of course this applies only if 8919 * there is more than 1 CPU per group. 8920 */ 8921 goto out_balanced; 8922 8923 if (busiest->sum_h_nr_running == 1) 8924 /* 8925 * busiest doesn't have any tasks waiting to run 8926 */ 8927 goto out_balanced; 8928 } 8929 8930 force_balance: 8931 /* Looks like there is an imbalance. Compute it */ 8932 calculate_imbalance(env, &sds); 8933 return env->imbalance ? sds.busiest : NULL; 8934 8935 out_balanced: 8936 env->imbalance = 0; 8937 return NULL; 8938 } 8939 8940 /* 8941 * find_busiest_queue - find the busiest runqueue among the CPUs in the group. 8942 */ 8943 static struct rq *find_busiest_queue(struct lb_env *env, 8944 struct sched_group *group) 8945 { 8946 struct rq *busiest = NULL, *rq; 8947 unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1; 8948 unsigned int busiest_nr = 0; 8949 int i; 8950 8951 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 8952 unsigned long capacity, load, util; 8953 unsigned int nr_running; 8954 enum fbq_type rt; 8955 8956 rq = cpu_rq(i); 8957 rt = fbq_classify_rq(rq); 8958 8959 /* 8960 * We classify groups/runqueues into three groups: 8961 * - regular: there are !numa tasks 8962 * - remote: there are numa tasks that run on the 'wrong' node 8963 * - all: there is no distinction 8964 * 8965 * In order to avoid migrating ideally placed numa tasks, 8966 * ignore those when there's better options. 8967 * 8968 * If we ignore the actual busiest queue to migrate another 8969 * task, the next balance pass can still reduce the busiest 8970 * queue by moving tasks around inside the node. 8971 * 8972 * If we cannot move enough load due to this classification 8973 * the next pass will adjust the group classification and 8974 * allow migration of more tasks. 8975 * 8976 * Both cases only affect the total convergence complexity. 8977 */ 8978 if (rt > env->fbq_type) 8979 continue; 8980 8981 capacity = capacity_of(i); 8982 nr_running = rq->cfs.h_nr_running; 8983 8984 /* 8985 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could 8986 * eventually lead to active_balancing high->low capacity. 8987 * Higher per-CPU capacity is considered better than balancing 8988 * average load. 8989 */ 8990 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 8991 capacity_of(env->dst_cpu) < capacity && 8992 nr_running == 1) 8993 continue; 8994 8995 switch (env->migration_type) { 8996 case migrate_load: 8997 /* 8998 * When comparing with load imbalance, use cpu_load() 8999 * which is not scaled with the CPU capacity. 9000 */ 9001 load = cpu_load(rq); 9002 9003 if (nr_running == 1 && load > env->imbalance && 9004 !check_cpu_capacity(rq, env->sd)) 9005 break; 9006 9007 /* 9008 * For the load comparisons with the other CPUs, 9009 * consider the cpu_load() scaled with the CPU 9010 * capacity, so that the load can be moved away 9011 * from the CPU that is potentially running at a 9012 * lower capacity. 9013 * 9014 * Thus we're looking for max(load_i / capacity_i), 9015 * crosswise multiplication to rid ourselves of the 9016 * division works out to: 9017 * load_i * capacity_j > load_j * capacity_i; 9018 * where j is our previous maximum. 9019 */ 9020 if (load * busiest_capacity > busiest_load * capacity) { 9021 busiest_load = load; 9022 busiest_capacity = capacity; 9023 busiest = rq; 9024 } 9025 break; 9026 9027 case migrate_util: 9028 util = cpu_util(cpu_of(rq)); 9029 9030 if (busiest_util < util) { 9031 busiest_util = util; 9032 busiest = rq; 9033 } 9034 break; 9035 9036 case migrate_task: 9037 if (busiest_nr < nr_running) { 9038 busiest_nr = nr_running; 9039 busiest = rq; 9040 } 9041 break; 9042 9043 case migrate_misfit: 9044 /* 9045 * For ASYM_CPUCAPACITY domains with misfit tasks we 9046 * simply seek the "biggest" misfit task. 9047 */ 9048 if (rq->misfit_task_load > busiest_load) { 9049 busiest_load = rq->misfit_task_load; 9050 busiest = rq; 9051 } 9052 9053 break; 9054 9055 } 9056 } 9057 9058 return busiest; 9059 } 9060 9061 /* 9062 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 9063 * so long as it is large enough. 9064 */ 9065 #define MAX_PINNED_INTERVAL 512 9066 9067 static inline bool 9068 asym_active_balance(struct lb_env *env) 9069 { 9070 /* 9071 * ASYM_PACKING needs to force migrate tasks from busy but 9072 * lower priority CPUs in order to pack all tasks in the 9073 * highest priority CPUs. 9074 */ 9075 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && 9076 sched_asym_prefer(env->dst_cpu, env->src_cpu); 9077 } 9078 9079 static inline bool 9080 voluntary_active_balance(struct lb_env *env) 9081 { 9082 struct sched_domain *sd = env->sd; 9083 9084 if (asym_active_balance(env)) 9085 return 1; 9086 9087 /* 9088 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 9089 * It's worth migrating the task if the src_cpu's capacity is reduced 9090 * because of other sched_class or IRQs if more capacity stays 9091 * available on dst_cpu. 9092 */ 9093 if ((env->idle != CPU_NOT_IDLE) && 9094 (env->src_rq->cfs.h_nr_running == 1)) { 9095 if ((check_cpu_capacity(env->src_rq, sd)) && 9096 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 9097 return 1; 9098 } 9099 9100 if (env->migration_type == migrate_misfit) 9101 return 1; 9102 9103 return 0; 9104 } 9105 9106 static int need_active_balance(struct lb_env *env) 9107 { 9108 struct sched_domain *sd = env->sd; 9109 9110 if (voluntary_active_balance(env)) 9111 return 1; 9112 9113 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); 9114 } 9115 9116 static int active_load_balance_cpu_stop(void *data); 9117 9118 static int should_we_balance(struct lb_env *env) 9119 { 9120 struct sched_group *sg = env->sd->groups; 9121 int cpu, balance_cpu = -1; 9122 9123 /* 9124 * Ensure the balancing environment is consistent; can happen 9125 * when the softirq triggers 'during' hotplug. 9126 */ 9127 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) 9128 return 0; 9129 9130 /* 9131 * In the newly idle case, we will allow all the CPUs 9132 * to do the newly idle load balance. 9133 */ 9134 if (env->idle == CPU_NEWLY_IDLE) 9135 return 1; 9136 9137 /* Try to find first idle CPU */ 9138 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 9139 if (!idle_cpu(cpu)) 9140 continue; 9141 9142 balance_cpu = cpu; 9143 break; 9144 } 9145 9146 if (balance_cpu == -1) 9147 balance_cpu = group_balance_cpu(sg); 9148 9149 /* 9150 * First idle CPU or the first CPU(busiest) in this sched group 9151 * is eligible for doing load balancing at this and above domains. 9152 */ 9153 return balance_cpu == env->dst_cpu; 9154 } 9155 9156 /* 9157 * Check this_cpu to ensure it is balanced within domain. Attempt to move 9158 * tasks if there is an imbalance. 9159 */ 9160 static int load_balance(int this_cpu, struct rq *this_rq, 9161 struct sched_domain *sd, enum cpu_idle_type idle, 9162 int *continue_balancing) 9163 { 9164 int ld_moved, cur_ld_moved, active_balance = 0; 9165 struct sched_domain *sd_parent = sd->parent; 9166 struct sched_group *group; 9167 struct rq *busiest; 9168 struct rq_flags rf; 9169 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 9170 9171 struct lb_env env = { 9172 .sd = sd, 9173 .dst_cpu = this_cpu, 9174 .dst_rq = this_rq, 9175 .dst_grpmask = sched_group_span(sd->groups), 9176 .idle = idle, 9177 .loop_break = sched_nr_migrate_break, 9178 .cpus = cpus, 9179 .fbq_type = all, 9180 .tasks = LIST_HEAD_INIT(env.tasks), 9181 }; 9182 9183 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 9184 9185 schedstat_inc(sd->lb_count[idle]); 9186 9187 redo: 9188 if (!should_we_balance(&env)) { 9189 *continue_balancing = 0; 9190 goto out_balanced; 9191 } 9192 9193 group = find_busiest_group(&env); 9194 if (!group) { 9195 schedstat_inc(sd->lb_nobusyg[idle]); 9196 goto out_balanced; 9197 } 9198 9199 busiest = find_busiest_queue(&env, group); 9200 if (!busiest) { 9201 schedstat_inc(sd->lb_nobusyq[idle]); 9202 goto out_balanced; 9203 } 9204 9205 BUG_ON(busiest == env.dst_rq); 9206 9207 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 9208 9209 env.src_cpu = busiest->cpu; 9210 env.src_rq = busiest; 9211 9212 ld_moved = 0; 9213 if (busiest->nr_running > 1) { 9214 /* 9215 * Attempt to move tasks. If find_busiest_group has found 9216 * an imbalance but busiest->nr_running <= 1, the group is 9217 * still unbalanced. ld_moved simply stays zero, so it is 9218 * correctly treated as an imbalance. 9219 */ 9220 env.flags |= LBF_ALL_PINNED; 9221 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 9222 9223 more_balance: 9224 rq_lock_irqsave(busiest, &rf); 9225 update_rq_clock(busiest); 9226 9227 /* 9228 * cur_ld_moved - load moved in current iteration 9229 * ld_moved - cumulative load moved across iterations 9230 */ 9231 cur_ld_moved = detach_tasks(&env); 9232 9233 /* 9234 * We've detached some tasks from busiest_rq. Every 9235 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 9236 * unlock busiest->lock, and we are able to be sure 9237 * that nobody can manipulate the tasks in parallel. 9238 * See task_rq_lock() family for the details. 9239 */ 9240 9241 rq_unlock(busiest, &rf); 9242 9243 if (cur_ld_moved) { 9244 attach_tasks(&env); 9245 ld_moved += cur_ld_moved; 9246 } 9247 9248 local_irq_restore(rf.flags); 9249 9250 if (env.flags & LBF_NEED_BREAK) { 9251 env.flags &= ~LBF_NEED_BREAK; 9252 goto more_balance; 9253 } 9254 9255 /* 9256 * Revisit (affine) tasks on src_cpu that couldn't be moved to 9257 * us and move them to an alternate dst_cpu in our sched_group 9258 * where they can run. The upper limit on how many times we 9259 * iterate on same src_cpu is dependent on number of CPUs in our 9260 * sched_group. 9261 * 9262 * This changes load balance semantics a bit on who can move 9263 * load to a given_cpu. In addition to the given_cpu itself 9264 * (or a ilb_cpu acting on its behalf where given_cpu is 9265 * nohz-idle), we now have balance_cpu in a position to move 9266 * load to given_cpu. In rare situations, this may cause 9267 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 9268 * _independently_ and at _same_ time to move some load to 9269 * given_cpu) causing exceess load to be moved to given_cpu. 9270 * This however should not happen so much in practice and 9271 * moreover subsequent load balance cycles should correct the 9272 * excess load moved. 9273 */ 9274 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 9275 9276 /* Prevent to re-select dst_cpu via env's CPUs */ 9277 __cpumask_clear_cpu(env.dst_cpu, env.cpus); 9278 9279 env.dst_rq = cpu_rq(env.new_dst_cpu); 9280 env.dst_cpu = env.new_dst_cpu; 9281 env.flags &= ~LBF_DST_PINNED; 9282 env.loop = 0; 9283 env.loop_break = sched_nr_migrate_break; 9284 9285 /* 9286 * Go back to "more_balance" rather than "redo" since we 9287 * need to continue with same src_cpu. 9288 */ 9289 goto more_balance; 9290 } 9291 9292 /* 9293 * We failed to reach balance because of affinity. 9294 */ 9295 if (sd_parent) { 9296 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 9297 9298 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 9299 *group_imbalance = 1; 9300 } 9301 9302 /* All tasks on this runqueue were pinned by CPU affinity */ 9303 if (unlikely(env.flags & LBF_ALL_PINNED)) { 9304 __cpumask_clear_cpu(cpu_of(busiest), cpus); 9305 /* 9306 * Attempting to continue load balancing at the current 9307 * sched_domain level only makes sense if there are 9308 * active CPUs remaining as possible busiest CPUs to 9309 * pull load from which are not contained within the 9310 * destination group that is receiving any migrated 9311 * load. 9312 */ 9313 if (!cpumask_subset(cpus, env.dst_grpmask)) { 9314 env.loop = 0; 9315 env.loop_break = sched_nr_migrate_break; 9316 goto redo; 9317 } 9318 goto out_all_pinned; 9319 } 9320 } 9321 9322 if (!ld_moved) { 9323 schedstat_inc(sd->lb_failed[idle]); 9324 /* 9325 * Increment the failure counter only on periodic balance. 9326 * We do not want newidle balance, which can be very 9327 * frequent, pollute the failure counter causing 9328 * excessive cache_hot migrations and active balances. 9329 */ 9330 if (idle != CPU_NEWLY_IDLE) 9331 sd->nr_balance_failed++; 9332 9333 if (need_active_balance(&env)) { 9334 unsigned long flags; 9335 9336 raw_spin_lock_irqsave(&busiest->lock, flags); 9337 9338 /* 9339 * Don't kick the active_load_balance_cpu_stop, 9340 * if the curr task on busiest CPU can't be 9341 * moved to this_cpu: 9342 */ 9343 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { 9344 raw_spin_unlock_irqrestore(&busiest->lock, 9345 flags); 9346 env.flags |= LBF_ALL_PINNED; 9347 goto out_one_pinned; 9348 } 9349 9350 /* 9351 * ->active_balance synchronizes accesses to 9352 * ->active_balance_work. Once set, it's cleared 9353 * only after active load balance is finished. 9354 */ 9355 if (!busiest->active_balance) { 9356 busiest->active_balance = 1; 9357 busiest->push_cpu = this_cpu; 9358 active_balance = 1; 9359 } 9360 raw_spin_unlock_irqrestore(&busiest->lock, flags); 9361 9362 if (active_balance) { 9363 stop_one_cpu_nowait(cpu_of(busiest), 9364 active_load_balance_cpu_stop, busiest, 9365 &busiest->active_balance_work); 9366 } 9367 9368 /* We've kicked active balancing, force task migration. */ 9369 sd->nr_balance_failed = sd->cache_nice_tries+1; 9370 } 9371 } else 9372 sd->nr_balance_failed = 0; 9373 9374 if (likely(!active_balance) || voluntary_active_balance(&env)) { 9375 /* We were unbalanced, so reset the balancing interval */ 9376 sd->balance_interval = sd->min_interval; 9377 } else { 9378 /* 9379 * If we've begun active balancing, start to back off. This 9380 * case may not be covered by the all_pinned logic if there 9381 * is only 1 task on the busy runqueue (because we don't call 9382 * detach_tasks). 9383 */ 9384 if (sd->balance_interval < sd->max_interval) 9385 sd->balance_interval *= 2; 9386 } 9387 9388 goto out; 9389 9390 out_balanced: 9391 /* 9392 * We reach balance although we may have faced some affinity 9393 * constraints. Clear the imbalance flag only if other tasks got 9394 * a chance to move and fix the imbalance. 9395 */ 9396 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { 9397 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 9398 9399 if (*group_imbalance) 9400 *group_imbalance = 0; 9401 } 9402 9403 out_all_pinned: 9404 /* 9405 * We reach balance because all tasks are pinned at this level so 9406 * we can't migrate them. Let the imbalance flag set so parent level 9407 * can try to migrate them. 9408 */ 9409 schedstat_inc(sd->lb_balanced[idle]); 9410 9411 sd->nr_balance_failed = 0; 9412 9413 out_one_pinned: 9414 ld_moved = 0; 9415 9416 /* 9417 * newidle_balance() disregards balance intervals, so we could 9418 * repeatedly reach this code, which would lead to balance_interval 9419 * skyrocketting in a short amount of time. Skip the balance_interval 9420 * increase logic to avoid that. 9421 */ 9422 if (env.idle == CPU_NEWLY_IDLE) 9423 goto out; 9424 9425 /* tune up the balancing interval */ 9426 if ((env.flags & LBF_ALL_PINNED && 9427 sd->balance_interval < MAX_PINNED_INTERVAL) || 9428 sd->balance_interval < sd->max_interval) 9429 sd->balance_interval *= 2; 9430 out: 9431 return ld_moved; 9432 } 9433 9434 static inline unsigned long 9435 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 9436 { 9437 unsigned long interval = sd->balance_interval; 9438 9439 if (cpu_busy) 9440 interval *= sd->busy_factor; 9441 9442 /* scale ms to jiffies */ 9443 interval = msecs_to_jiffies(interval); 9444 interval = clamp(interval, 1UL, max_load_balance_interval); 9445 9446 return interval; 9447 } 9448 9449 static inline void 9450 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 9451 { 9452 unsigned long interval, next; 9453 9454 /* used by idle balance, so cpu_busy = 0 */ 9455 interval = get_sd_balance_interval(sd, 0); 9456 next = sd->last_balance + interval; 9457 9458 if (time_after(*next_balance, next)) 9459 *next_balance = next; 9460 } 9461 9462 /* 9463 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes 9464 * running tasks off the busiest CPU onto idle CPUs. It requires at 9465 * least 1 task to be running on each physical CPU where possible, and 9466 * avoids physical / logical imbalances. 9467 */ 9468 static int active_load_balance_cpu_stop(void *data) 9469 { 9470 struct rq *busiest_rq = data; 9471 int busiest_cpu = cpu_of(busiest_rq); 9472 int target_cpu = busiest_rq->push_cpu; 9473 struct rq *target_rq = cpu_rq(target_cpu); 9474 struct sched_domain *sd; 9475 struct task_struct *p = NULL; 9476 struct rq_flags rf; 9477 9478 rq_lock_irq(busiest_rq, &rf); 9479 /* 9480 * Between queueing the stop-work and running it is a hole in which 9481 * CPUs can become inactive. We should not move tasks from or to 9482 * inactive CPUs. 9483 */ 9484 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 9485 goto out_unlock; 9486 9487 /* Make sure the requested CPU hasn't gone down in the meantime: */ 9488 if (unlikely(busiest_cpu != smp_processor_id() || 9489 !busiest_rq->active_balance)) 9490 goto out_unlock; 9491 9492 /* Is there any task to move? */ 9493 if (busiest_rq->nr_running <= 1) 9494 goto out_unlock; 9495 9496 /* 9497 * This condition is "impossible", if it occurs 9498 * we need to fix it. Originally reported by 9499 * Bjorn Helgaas on a 128-CPU setup. 9500 */ 9501 BUG_ON(busiest_rq == target_rq); 9502 9503 /* Search for an sd spanning us and the target CPU. */ 9504 rcu_read_lock(); 9505 for_each_domain(target_cpu, sd) { 9506 if ((sd->flags & SD_LOAD_BALANCE) && 9507 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 9508 break; 9509 } 9510 9511 if (likely(sd)) { 9512 struct lb_env env = { 9513 .sd = sd, 9514 .dst_cpu = target_cpu, 9515 .dst_rq = target_rq, 9516 .src_cpu = busiest_rq->cpu, 9517 .src_rq = busiest_rq, 9518 .idle = CPU_IDLE, 9519 /* 9520 * can_migrate_task() doesn't need to compute new_dst_cpu 9521 * for active balancing. Since we have CPU_IDLE, but no 9522 * @dst_grpmask we need to make that test go away with lying 9523 * about DST_PINNED. 9524 */ 9525 .flags = LBF_DST_PINNED, 9526 }; 9527 9528 schedstat_inc(sd->alb_count); 9529 update_rq_clock(busiest_rq); 9530 9531 p = detach_one_task(&env); 9532 if (p) { 9533 schedstat_inc(sd->alb_pushed); 9534 /* Active balancing done, reset the failure counter. */ 9535 sd->nr_balance_failed = 0; 9536 } else { 9537 schedstat_inc(sd->alb_failed); 9538 } 9539 } 9540 rcu_read_unlock(); 9541 out_unlock: 9542 busiest_rq->active_balance = 0; 9543 rq_unlock(busiest_rq, &rf); 9544 9545 if (p) 9546 attach_one_task(target_rq, p); 9547 9548 local_irq_enable(); 9549 9550 return 0; 9551 } 9552 9553 static DEFINE_SPINLOCK(balancing); 9554 9555 /* 9556 * Scale the max load_balance interval with the number of CPUs in the system. 9557 * This trades load-balance latency on larger machines for less cross talk. 9558 */ 9559 void update_max_interval(void) 9560 { 9561 max_load_balance_interval = HZ*num_online_cpus()/10; 9562 } 9563 9564 /* 9565 * It checks each scheduling domain to see if it is due to be balanced, 9566 * and initiates a balancing operation if so. 9567 * 9568 * Balancing parameters are set up in init_sched_domains. 9569 */ 9570 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 9571 { 9572 int continue_balancing = 1; 9573 int cpu = rq->cpu; 9574 int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 9575 unsigned long interval; 9576 struct sched_domain *sd; 9577 /* Earliest time when we have to do rebalance again */ 9578 unsigned long next_balance = jiffies + 60*HZ; 9579 int update_next_balance = 0; 9580 int need_serialize, need_decay = 0; 9581 u64 max_cost = 0; 9582 9583 rcu_read_lock(); 9584 for_each_domain(cpu, sd) { 9585 /* 9586 * Decay the newidle max times here because this is a regular 9587 * visit to all the domains. Decay ~1% per second. 9588 */ 9589 if (time_after(jiffies, sd->next_decay_max_lb_cost)) { 9590 sd->max_newidle_lb_cost = 9591 (sd->max_newidle_lb_cost * 253) / 256; 9592 sd->next_decay_max_lb_cost = jiffies + HZ; 9593 need_decay = 1; 9594 } 9595 max_cost += sd->max_newidle_lb_cost; 9596 9597 if (!(sd->flags & SD_LOAD_BALANCE)) 9598 continue; 9599 9600 /* 9601 * Stop the load balance at this level. There is another 9602 * CPU in our sched group which is doing load balancing more 9603 * actively. 9604 */ 9605 if (!continue_balancing) { 9606 if (need_decay) 9607 continue; 9608 break; 9609 } 9610 9611 interval = get_sd_balance_interval(sd, busy); 9612 9613 need_serialize = sd->flags & SD_SERIALIZE; 9614 if (need_serialize) { 9615 if (!spin_trylock(&balancing)) 9616 goto out; 9617 } 9618 9619 if (time_after_eq(jiffies, sd->last_balance + interval)) { 9620 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 9621 /* 9622 * The LBF_DST_PINNED logic could have changed 9623 * env->dst_cpu, so we can't know our idle 9624 * state even if we migrated tasks. Update it. 9625 */ 9626 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 9627 busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); 9628 } 9629 sd->last_balance = jiffies; 9630 interval = get_sd_balance_interval(sd, busy); 9631 } 9632 if (need_serialize) 9633 spin_unlock(&balancing); 9634 out: 9635 if (time_after(next_balance, sd->last_balance + interval)) { 9636 next_balance = sd->last_balance + interval; 9637 update_next_balance = 1; 9638 } 9639 } 9640 if (need_decay) { 9641 /* 9642 * Ensure the rq-wide value also decays but keep it at a 9643 * reasonable floor to avoid funnies with rq->avg_idle. 9644 */ 9645 rq->max_idle_balance_cost = 9646 max((u64)sysctl_sched_migration_cost, max_cost); 9647 } 9648 rcu_read_unlock(); 9649 9650 /* 9651 * next_balance will be updated only when there is a need. 9652 * When the cpu is attached to null domain for ex, it will not be 9653 * updated. 9654 */ 9655 if (likely(update_next_balance)) { 9656 rq->next_balance = next_balance; 9657 9658 #ifdef CONFIG_NO_HZ_COMMON 9659 /* 9660 * If this CPU has been elected to perform the nohz idle 9661 * balance. Other idle CPUs have already rebalanced with 9662 * nohz_idle_balance() and nohz.next_balance has been 9663 * updated accordingly. This CPU is now running the idle load 9664 * balance for itself and we need to update the 9665 * nohz.next_balance accordingly. 9666 */ 9667 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) 9668 nohz.next_balance = rq->next_balance; 9669 #endif 9670 } 9671 } 9672 9673 static inline int on_null_domain(struct rq *rq) 9674 { 9675 return unlikely(!rcu_dereference_sched(rq->sd)); 9676 } 9677 9678 #ifdef CONFIG_NO_HZ_COMMON 9679 /* 9680 * idle load balancing details 9681 * - When one of the busy CPUs notice that there may be an idle rebalancing 9682 * needed, they will kick the idle load balancer, which then does idle 9683 * load balancing for all the idle CPUs. 9684 * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set 9685 * anywhere yet. 9686 */ 9687 9688 static inline int find_new_ilb(void) 9689 { 9690 int ilb; 9691 9692 for_each_cpu_and(ilb, nohz.idle_cpus_mask, 9693 housekeeping_cpumask(HK_FLAG_MISC)) { 9694 if (idle_cpu(ilb)) 9695 return ilb; 9696 } 9697 9698 return nr_cpu_ids; 9699 } 9700 9701 /* 9702 * Kick a CPU to do the nohz balancing, if it is time for it. We pick any 9703 * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). 9704 */ 9705 static void kick_ilb(unsigned int flags) 9706 { 9707 int ilb_cpu; 9708 9709 nohz.next_balance++; 9710 9711 ilb_cpu = find_new_ilb(); 9712 9713 if (ilb_cpu >= nr_cpu_ids) 9714 return; 9715 9716 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); 9717 if (flags & NOHZ_KICK_MASK) 9718 return; 9719 9720 /* 9721 * Use smp_send_reschedule() instead of resched_cpu(). 9722 * This way we generate a sched IPI on the target CPU which 9723 * is idle. And the softirq performing nohz idle load balance 9724 * will be run before returning from the IPI. 9725 */ 9726 smp_send_reschedule(ilb_cpu); 9727 } 9728 9729 /* 9730 * Current decision point for kicking the idle load balancer in the presence 9731 * of idle CPUs in the system. 9732 */ 9733 static void nohz_balancer_kick(struct rq *rq) 9734 { 9735 unsigned long now = jiffies; 9736 struct sched_domain_shared *sds; 9737 struct sched_domain *sd; 9738 int nr_busy, i, cpu = rq->cpu; 9739 unsigned int flags = 0; 9740 9741 if (unlikely(rq->idle_balance)) 9742 return; 9743 9744 /* 9745 * We may be recently in ticked or tickless idle mode. At the first 9746 * busy tick after returning from idle, we will update the busy stats. 9747 */ 9748 nohz_balance_exit_idle(rq); 9749 9750 /* 9751 * None are in tickless mode and hence no need for NOHZ idle load 9752 * balancing. 9753 */ 9754 if (likely(!atomic_read(&nohz.nr_cpus))) 9755 return; 9756 9757 if (READ_ONCE(nohz.has_blocked) && 9758 time_after(now, READ_ONCE(nohz.next_blocked))) 9759 flags = NOHZ_STATS_KICK; 9760 9761 if (time_before(now, nohz.next_balance)) 9762 goto out; 9763 9764 if (rq->nr_running >= 2) { 9765 flags = NOHZ_KICK_MASK; 9766 goto out; 9767 } 9768 9769 rcu_read_lock(); 9770 9771 sd = rcu_dereference(rq->sd); 9772 if (sd) { 9773 /* 9774 * If there's a CFS task and the current CPU has reduced 9775 * capacity; kick the ILB to see if there's a better CPU to run 9776 * on. 9777 */ 9778 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { 9779 flags = NOHZ_KICK_MASK; 9780 goto unlock; 9781 } 9782 } 9783 9784 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 9785 if (sd) { 9786 /* 9787 * When ASYM_PACKING; see if there's a more preferred CPU 9788 * currently idle; in which case, kick the ILB to move tasks 9789 * around. 9790 */ 9791 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { 9792 if (sched_asym_prefer(i, cpu)) { 9793 flags = NOHZ_KICK_MASK; 9794 goto unlock; 9795 } 9796 } 9797 } 9798 9799 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); 9800 if (sd) { 9801 /* 9802 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU 9803 * to run the misfit task on. 9804 */ 9805 if (check_misfit_status(rq, sd)) { 9806 flags = NOHZ_KICK_MASK; 9807 goto unlock; 9808 } 9809 9810 /* 9811 * For asymmetric systems, we do not want to nicely balance 9812 * cache use, instead we want to embrace asymmetry and only 9813 * ensure tasks have enough CPU capacity. 9814 * 9815 * Skip the LLC logic because it's not relevant in that case. 9816 */ 9817 goto unlock; 9818 } 9819 9820 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 9821 if (sds) { 9822 /* 9823 * If there is an imbalance between LLC domains (IOW we could 9824 * increase the overall cache use), we need some less-loaded LLC 9825 * domain to pull some load. Likewise, we may need to spread 9826 * load within the current LLC domain (e.g. packed SMT cores but 9827 * other CPUs are idle). We can't really know from here how busy 9828 * the others are - so just get a nohz balance going if it looks 9829 * like this LLC domain has tasks we could move. 9830 */ 9831 nr_busy = atomic_read(&sds->nr_busy_cpus); 9832 if (nr_busy > 1) { 9833 flags = NOHZ_KICK_MASK; 9834 goto unlock; 9835 } 9836 } 9837 unlock: 9838 rcu_read_unlock(); 9839 out: 9840 if (flags) 9841 kick_ilb(flags); 9842 } 9843 9844 static void set_cpu_sd_state_busy(int cpu) 9845 { 9846 struct sched_domain *sd; 9847 9848 rcu_read_lock(); 9849 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 9850 9851 if (!sd || !sd->nohz_idle) 9852 goto unlock; 9853 sd->nohz_idle = 0; 9854 9855 atomic_inc(&sd->shared->nr_busy_cpus); 9856 unlock: 9857 rcu_read_unlock(); 9858 } 9859 9860 void nohz_balance_exit_idle(struct rq *rq) 9861 { 9862 SCHED_WARN_ON(rq != this_rq()); 9863 9864 if (likely(!rq->nohz_tick_stopped)) 9865 return; 9866 9867 rq->nohz_tick_stopped = 0; 9868 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); 9869 atomic_dec(&nohz.nr_cpus); 9870 9871 set_cpu_sd_state_busy(rq->cpu); 9872 } 9873 9874 static void set_cpu_sd_state_idle(int cpu) 9875 { 9876 struct sched_domain *sd; 9877 9878 rcu_read_lock(); 9879 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 9880 9881 if (!sd || sd->nohz_idle) 9882 goto unlock; 9883 sd->nohz_idle = 1; 9884 9885 atomic_dec(&sd->shared->nr_busy_cpus); 9886 unlock: 9887 rcu_read_unlock(); 9888 } 9889 9890 /* 9891 * This routine will record that the CPU is going idle with tick stopped. 9892 * This info will be used in performing idle load balancing in the future. 9893 */ 9894 void nohz_balance_enter_idle(int cpu) 9895 { 9896 struct rq *rq = cpu_rq(cpu); 9897 9898 SCHED_WARN_ON(cpu != smp_processor_id()); 9899 9900 /* If this CPU is going down, then nothing needs to be done: */ 9901 if (!cpu_active(cpu)) 9902 return; 9903 9904 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 9905 if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) 9906 return; 9907 9908 /* 9909 * Can be set safely without rq->lock held 9910 * If a clear happens, it will have evaluated last additions because 9911 * rq->lock is held during the check and the clear 9912 */ 9913 rq->has_blocked_load = 1; 9914 9915 /* 9916 * The tick is still stopped but load could have been added in the 9917 * meantime. We set the nohz.has_blocked flag to trig a check of the 9918 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear 9919 * of nohz.has_blocked can only happen after checking the new load 9920 */ 9921 if (rq->nohz_tick_stopped) 9922 goto out; 9923 9924 /* If we're a completely isolated CPU, we don't play: */ 9925 if (on_null_domain(rq)) 9926 return; 9927 9928 rq->nohz_tick_stopped = 1; 9929 9930 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 9931 atomic_inc(&nohz.nr_cpus); 9932 9933 /* 9934 * Ensures that if nohz_idle_balance() fails to observe our 9935 * @idle_cpus_mask store, it must observe the @has_blocked 9936 * store. 9937 */ 9938 smp_mb__after_atomic(); 9939 9940 set_cpu_sd_state_idle(cpu); 9941 9942 out: 9943 /* 9944 * Each time a cpu enter idle, we assume that it has blocked load and 9945 * enable the periodic update of the load of idle cpus 9946 */ 9947 WRITE_ONCE(nohz.has_blocked, 1); 9948 } 9949 9950 /* 9951 * Internal function that runs load balance for all idle cpus. The load balance 9952 * can be a simple update of blocked load or a complete load balance with 9953 * tasks movement depending of flags. 9954 * The function returns false if the loop has stopped before running 9955 * through all idle CPUs. 9956 */ 9957 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, 9958 enum cpu_idle_type idle) 9959 { 9960 /* Earliest time when we have to do rebalance again */ 9961 unsigned long now = jiffies; 9962 unsigned long next_balance = now + 60*HZ; 9963 bool has_blocked_load = false; 9964 int update_next_balance = 0; 9965 int this_cpu = this_rq->cpu; 9966 int balance_cpu; 9967 int ret = false; 9968 struct rq *rq; 9969 9970 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 9971 9972 /* 9973 * We assume there will be no idle load after this update and clear 9974 * the has_blocked flag. If a cpu enters idle in the mean time, it will 9975 * set the has_blocked flag and trig another update of idle load. 9976 * Because a cpu that becomes idle, is added to idle_cpus_mask before 9977 * setting the flag, we are sure to not clear the state and not 9978 * check the load of an idle cpu. 9979 */ 9980 WRITE_ONCE(nohz.has_blocked, 0); 9981 9982 /* 9983 * Ensures that if we miss the CPU, we must see the has_blocked 9984 * store from nohz_balance_enter_idle(). 9985 */ 9986 smp_mb(); 9987 9988 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { 9989 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) 9990 continue; 9991 9992 /* 9993 * If this CPU gets work to do, stop the load balancing 9994 * work being done for other CPUs. Next load 9995 * balancing owner will pick it up. 9996 */ 9997 if (need_resched()) { 9998 has_blocked_load = true; 9999 goto abort; 10000 } 10001 10002 rq = cpu_rq(balance_cpu); 10003 10004 has_blocked_load |= update_nohz_stats(rq, true); 10005 10006 /* 10007 * If time for next balance is due, 10008 * do the balance. 10009 */ 10010 if (time_after_eq(jiffies, rq->next_balance)) { 10011 struct rq_flags rf; 10012 10013 rq_lock_irqsave(rq, &rf); 10014 update_rq_clock(rq); 10015 rq_unlock_irqrestore(rq, &rf); 10016 10017 if (flags & NOHZ_BALANCE_KICK) 10018 rebalance_domains(rq, CPU_IDLE); 10019 } 10020 10021 if (time_after(next_balance, rq->next_balance)) { 10022 next_balance = rq->next_balance; 10023 update_next_balance = 1; 10024 } 10025 } 10026 10027 /* Newly idle CPU doesn't need an update */ 10028 if (idle != CPU_NEWLY_IDLE) { 10029 update_blocked_averages(this_cpu); 10030 has_blocked_load |= this_rq->has_blocked_load; 10031 } 10032 10033 if (flags & NOHZ_BALANCE_KICK) 10034 rebalance_domains(this_rq, CPU_IDLE); 10035 10036 WRITE_ONCE(nohz.next_blocked, 10037 now + msecs_to_jiffies(LOAD_AVG_PERIOD)); 10038 10039 /* The full idle balance loop has been done */ 10040 ret = true; 10041 10042 abort: 10043 /* There is still blocked load, enable periodic update */ 10044 if (has_blocked_load) 10045 WRITE_ONCE(nohz.has_blocked, 1); 10046 10047 /* 10048 * next_balance will be updated only when there is a need. 10049 * When the CPU is attached to null domain for ex, it will not be 10050 * updated. 10051 */ 10052 if (likely(update_next_balance)) 10053 nohz.next_balance = next_balance; 10054 10055 return ret; 10056 } 10057 10058 /* 10059 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 10060 * rebalancing for all the cpus for whom scheduler ticks are stopped. 10061 */ 10062 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 10063 { 10064 int this_cpu = this_rq->cpu; 10065 unsigned int flags; 10066 10067 if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) 10068 return false; 10069 10070 if (idle != CPU_IDLE) { 10071 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); 10072 return false; 10073 } 10074 10075 /* could be _relaxed() */ 10076 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); 10077 if (!(flags & NOHZ_KICK_MASK)) 10078 return false; 10079 10080 _nohz_idle_balance(this_rq, flags, idle); 10081 10082 return true; 10083 } 10084 10085 static void nohz_newidle_balance(struct rq *this_rq) 10086 { 10087 int this_cpu = this_rq->cpu; 10088 10089 /* 10090 * This CPU doesn't want to be disturbed by scheduler 10091 * housekeeping 10092 */ 10093 if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) 10094 return; 10095 10096 /* Will wake up very soon. No time for doing anything else*/ 10097 if (this_rq->avg_idle < sysctl_sched_migration_cost) 10098 return; 10099 10100 /* Don't need to update blocked load of idle CPUs*/ 10101 if (!READ_ONCE(nohz.has_blocked) || 10102 time_before(jiffies, READ_ONCE(nohz.next_blocked))) 10103 return; 10104 10105 raw_spin_unlock(&this_rq->lock); 10106 /* 10107 * This CPU is going to be idle and blocked load of idle CPUs 10108 * need to be updated. Run the ilb locally as it is a good 10109 * candidate for ilb instead of waking up another idle CPU. 10110 * Kick an normal ilb if we failed to do the update. 10111 */ 10112 if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) 10113 kick_ilb(NOHZ_STATS_KICK); 10114 raw_spin_lock(&this_rq->lock); 10115 } 10116 10117 #else /* !CONFIG_NO_HZ_COMMON */ 10118 static inline void nohz_balancer_kick(struct rq *rq) { } 10119 10120 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 10121 { 10122 return false; 10123 } 10124 10125 static inline void nohz_newidle_balance(struct rq *this_rq) { } 10126 #endif /* CONFIG_NO_HZ_COMMON */ 10127 10128 /* 10129 * idle_balance is called by schedule() if this_cpu is about to become 10130 * idle. Attempts to pull tasks from other CPUs. 10131 * 10132 * Returns: 10133 * < 0 - we released the lock and there are !fair tasks present 10134 * 0 - failed, no new tasks 10135 * > 0 - success, new (fair) tasks present 10136 */ 10137 int newidle_balance(struct rq *this_rq, struct rq_flags *rf) 10138 { 10139 unsigned long next_balance = jiffies + HZ; 10140 int this_cpu = this_rq->cpu; 10141 struct sched_domain *sd; 10142 int pulled_task = 0; 10143 u64 curr_cost = 0; 10144 10145 update_misfit_status(NULL, this_rq); 10146 /* 10147 * We must set idle_stamp _before_ calling idle_balance(), such that we 10148 * measure the duration of idle_balance() as idle time. 10149 */ 10150 this_rq->idle_stamp = rq_clock(this_rq); 10151 10152 /* 10153 * Do not pull tasks towards !active CPUs... 10154 */ 10155 if (!cpu_active(this_cpu)) 10156 return 0; 10157 10158 /* 10159 * This is OK, because current is on_cpu, which avoids it being picked 10160 * for load-balance and preemption/IRQs are still disabled avoiding 10161 * further scheduler activity on it and we're being very careful to 10162 * re-start the picking loop. 10163 */ 10164 rq_unpin_lock(this_rq, rf); 10165 10166 if (this_rq->avg_idle < sysctl_sched_migration_cost || 10167 !READ_ONCE(this_rq->rd->overload)) { 10168 10169 rcu_read_lock(); 10170 sd = rcu_dereference_check_sched_domain(this_rq->sd); 10171 if (sd) 10172 update_next_balance(sd, &next_balance); 10173 rcu_read_unlock(); 10174 10175 nohz_newidle_balance(this_rq); 10176 10177 goto out; 10178 } 10179 10180 raw_spin_unlock(&this_rq->lock); 10181 10182 update_blocked_averages(this_cpu); 10183 rcu_read_lock(); 10184 for_each_domain(this_cpu, sd) { 10185 int continue_balancing = 1; 10186 u64 t0, domain_cost; 10187 10188 if (!(sd->flags & SD_LOAD_BALANCE)) 10189 continue; 10190 10191 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { 10192 update_next_balance(sd, &next_balance); 10193 break; 10194 } 10195 10196 if (sd->flags & SD_BALANCE_NEWIDLE) { 10197 t0 = sched_clock_cpu(this_cpu); 10198 10199 pulled_task = load_balance(this_cpu, this_rq, 10200 sd, CPU_NEWLY_IDLE, 10201 &continue_balancing); 10202 10203 domain_cost = sched_clock_cpu(this_cpu) - t0; 10204 if (domain_cost > sd->max_newidle_lb_cost) 10205 sd->max_newidle_lb_cost = domain_cost; 10206 10207 curr_cost += domain_cost; 10208 } 10209 10210 update_next_balance(sd, &next_balance); 10211 10212 /* 10213 * Stop searching for tasks to pull if there are 10214 * now runnable tasks on this rq. 10215 */ 10216 if (pulled_task || this_rq->nr_running > 0) 10217 break; 10218 } 10219 rcu_read_unlock(); 10220 10221 raw_spin_lock(&this_rq->lock); 10222 10223 if (curr_cost > this_rq->max_idle_balance_cost) 10224 this_rq->max_idle_balance_cost = curr_cost; 10225 10226 out: 10227 /* 10228 * While browsing the domains, we released the rq lock, a task could 10229 * have been enqueued in the meantime. Since we're not going idle, 10230 * pretend we pulled a task. 10231 */ 10232 if (this_rq->cfs.h_nr_running && !pulled_task) 10233 pulled_task = 1; 10234 10235 /* Move the next balance forward */ 10236 if (time_after(this_rq->next_balance, next_balance)) 10237 this_rq->next_balance = next_balance; 10238 10239 /* Is there a task of a high priority class? */ 10240 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 10241 pulled_task = -1; 10242 10243 if (pulled_task) 10244 this_rq->idle_stamp = 0; 10245 10246 rq_repin_lock(this_rq, rf); 10247 10248 return pulled_task; 10249 } 10250 10251 /* 10252 * run_rebalance_domains is triggered when needed from the scheduler tick. 10253 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 10254 */ 10255 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 10256 { 10257 struct rq *this_rq = this_rq(); 10258 enum cpu_idle_type idle = this_rq->idle_balance ? 10259 CPU_IDLE : CPU_NOT_IDLE; 10260 10261 /* 10262 * If this CPU has a pending nohz_balance_kick, then do the 10263 * balancing on behalf of the other idle CPUs whose ticks are 10264 * stopped. Do nohz_idle_balance *before* rebalance_domains to 10265 * give the idle CPUs a chance to load balance. Else we may 10266 * load balance only within the local sched_domain hierarchy 10267 * and abort nohz_idle_balance altogether if we pull some load. 10268 */ 10269 if (nohz_idle_balance(this_rq, idle)) 10270 return; 10271 10272 /* normal load balance */ 10273 update_blocked_averages(this_rq->cpu); 10274 rebalance_domains(this_rq, idle); 10275 } 10276 10277 /* 10278 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 10279 */ 10280 void trigger_load_balance(struct rq *rq) 10281 { 10282 /* Don't need to rebalance while attached to NULL domain */ 10283 if (unlikely(on_null_domain(rq))) 10284 return; 10285 10286 if (time_after_eq(jiffies, rq->next_balance)) 10287 raise_softirq(SCHED_SOFTIRQ); 10288 10289 nohz_balancer_kick(rq); 10290 } 10291 10292 static void rq_online_fair(struct rq *rq) 10293 { 10294 update_sysctl(); 10295 10296 update_runtime_enabled(rq); 10297 } 10298 10299 static void rq_offline_fair(struct rq *rq) 10300 { 10301 update_sysctl(); 10302 10303 /* Ensure any throttled groups are reachable by pick_next_task */ 10304 unthrottle_offline_cfs_rqs(rq); 10305 } 10306 10307 #endif /* CONFIG_SMP */ 10308 10309 /* 10310 * scheduler tick hitting a task of our scheduling class. 10311 * 10312 * NOTE: This function can be called remotely by the tick offload that 10313 * goes along full dynticks. Therefore no local assumption can be made 10314 * and everything must be accessed through the @rq and @curr passed in 10315 * parameters. 10316 */ 10317 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 10318 { 10319 struct cfs_rq *cfs_rq; 10320 struct sched_entity *se = &curr->se; 10321 10322 for_each_sched_entity(se) { 10323 cfs_rq = cfs_rq_of(se); 10324 entity_tick(cfs_rq, se, queued); 10325 } 10326 10327 if (static_branch_unlikely(&sched_numa_balancing)) 10328 task_tick_numa(rq, curr); 10329 10330 update_misfit_status(curr, rq); 10331 update_overutilized_status(task_rq(curr)); 10332 } 10333 10334 /* 10335 * called on fork with the child task as argument from the parent's context 10336 * - child not yet on the tasklist 10337 * - preemption disabled 10338 */ 10339 static void task_fork_fair(struct task_struct *p) 10340 { 10341 struct cfs_rq *cfs_rq; 10342 struct sched_entity *se = &p->se, *curr; 10343 struct rq *rq = this_rq(); 10344 struct rq_flags rf; 10345 10346 rq_lock(rq, &rf); 10347 update_rq_clock(rq); 10348 10349 cfs_rq = task_cfs_rq(current); 10350 curr = cfs_rq->curr; 10351 if (curr) { 10352 update_curr(cfs_rq); 10353 se->vruntime = curr->vruntime; 10354 } 10355 place_entity(cfs_rq, se, 1); 10356 10357 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 10358 /* 10359 * Upon rescheduling, sched_class::put_prev_task() will place 10360 * 'current' within the tree based on its new key value. 10361 */ 10362 swap(curr->vruntime, se->vruntime); 10363 resched_curr(rq); 10364 } 10365 10366 se->vruntime -= cfs_rq->min_vruntime; 10367 rq_unlock(rq, &rf); 10368 } 10369 10370 /* 10371 * Priority of the task has changed. Check to see if we preempt 10372 * the current task. 10373 */ 10374 static void 10375 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 10376 { 10377 if (!task_on_rq_queued(p)) 10378 return; 10379 10380 if (rq->cfs.nr_running == 1) 10381 return; 10382 10383 /* 10384 * Reschedule if we are currently running on this runqueue and 10385 * our priority decreased, or if we are not currently running on 10386 * this runqueue and our priority is higher than the current's 10387 */ 10388 if (rq->curr == p) { 10389 if (p->prio > oldprio) 10390 resched_curr(rq); 10391 } else 10392 check_preempt_curr(rq, p, 0); 10393 } 10394 10395 static inline bool vruntime_normalized(struct task_struct *p) 10396 { 10397 struct sched_entity *se = &p->se; 10398 10399 /* 10400 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 10401 * the dequeue_entity(.flags=0) will already have normalized the 10402 * vruntime. 10403 */ 10404 if (p->on_rq) 10405 return true; 10406 10407 /* 10408 * When !on_rq, vruntime of the task has usually NOT been normalized. 10409 * But there are some cases where it has already been normalized: 10410 * 10411 * - A forked child which is waiting for being woken up by 10412 * wake_up_new_task(). 10413 * - A task which has been woken up by try_to_wake_up() and 10414 * waiting for actually being woken up by sched_ttwu_pending(). 10415 */ 10416 if (!se->sum_exec_runtime || 10417 (p->state == TASK_WAKING && p->sched_remote_wakeup)) 10418 return true; 10419 10420 return false; 10421 } 10422 10423 #ifdef CONFIG_FAIR_GROUP_SCHED 10424 /* 10425 * Propagate the changes of the sched_entity across the tg tree to make it 10426 * visible to the root 10427 */ 10428 static void propagate_entity_cfs_rq(struct sched_entity *se) 10429 { 10430 struct cfs_rq *cfs_rq; 10431 10432 /* Start to propagate at parent */ 10433 se = se->parent; 10434 10435 for_each_sched_entity(se) { 10436 cfs_rq = cfs_rq_of(se); 10437 10438 if (cfs_rq_throttled(cfs_rq)) 10439 break; 10440 10441 update_load_avg(cfs_rq, se, UPDATE_TG); 10442 } 10443 } 10444 #else 10445 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 10446 #endif 10447 10448 static void detach_entity_cfs_rq(struct sched_entity *se) 10449 { 10450 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10451 10452 /* Catch up with the cfs_rq and remove our load when we leave */ 10453 update_load_avg(cfs_rq, se, 0); 10454 detach_entity_load_avg(cfs_rq, se); 10455 update_tg_load_avg(cfs_rq, false); 10456 propagate_entity_cfs_rq(se); 10457 } 10458 10459 static void attach_entity_cfs_rq(struct sched_entity *se) 10460 { 10461 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10462 10463 #ifdef CONFIG_FAIR_GROUP_SCHED 10464 /* 10465 * Since the real-depth could have been changed (only FAIR 10466 * class maintain depth value), reset depth properly. 10467 */ 10468 se->depth = se->parent ? se->parent->depth + 1 : 0; 10469 #endif 10470 10471 /* Synchronize entity with its cfs_rq */ 10472 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 10473 attach_entity_load_avg(cfs_rq, se); 10474 update_tg_load_avg(cfs_rq, false); 10475 propagate_entity_cfs_rq(se); 10476 } 10477 10478 static void detach_task_cfs_rq(struct task_struct *p) 10479 { 10480 struct sched_entity *se = &p->se; 10481 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10482 10483 if (!vruntime_normalized(p)) { 10484 /* 10485 * Fix up our vruntime so that the current sleep doesn't 10486 * cause 'unlimited' sleep bonus. 10487 */ 10488 place_entity(cfs_rq, se, 0); 10489 se->vruntime -= cfs_rq->min_vruntime; 10490 } 10491 10492 detach_entity_cfs_rq(se); 10493 } 10494 10495 static void attach_task_cfs_rq(struct task_struct *p) 10496 { 10497 struct sched_entity *se = &p->se; 10498 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10499 10500 attach_entity_cfs_rq(se); 10501 10502 if (!vruntime_normalized(p)) 10503 se->vruntime += cfs_rq->min_vruntime; 10504 } 10505 10506 static void switched_from_fair(struct rq *rq, struct task_struct *p) 10507 { 10508 detach_task_cfs_rq(p); 10509 } 10510 10511 static void switched_to_fair(struct rq *rq, struct task_struct *p) 10512 { 10513 attach_task_cfs_rq(p); 10514 10515 if (task_on_rq_queued(p)) { 10516 /* 10517 * We were most likely switched from sched_rt, so 10518 * kick off the schedule if running, otherwise just see 10519 * if we can still preempt the current task. 10520 */ 10521 if (rq->curr == p) 10522 resched_curr(rq); 10523 else 10524 check_preempt_curr(rq, p, 0); 10525 } 10526 } 10527 10528 /* Account for a task changing its policy or group. 10529 * 10530 * This routine is mostly called to set cfs_rq->curr field when a task 10531 * migrates between groups/classes. 10532 */ 10533 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) 10534 { 10535 struct sched_entity *se = &p->se; 10536 10537 #ifdef CONFIG_SMP 10538 if (task_on_rq_queued(p)) { 10539 /* 10540 * Move the next running task to the front of the list, so our 10541 * cfs_tasks list becomes MRU one. 10542 */ 10543 list_move(&se->group_node, &rq->cfs_tasks); 10544 } 10545 #endif 10546 10547 for_each_sched_entity(se) { 10548 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10549 10550 set_next_entity(cfs_rq, se); 10551 /* ensure bandwidth has been allocated on our new cfs_rq */ 10552 account_cfs_rq_runtime(cfs_rq, 0); 10553 } 10554 } 10555 10556 void init_cfs_rq(struct cfs_rq *cfs_rq) 10557 { 10558 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 10559 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 10560 #ifndef CONFIG_64BIT 10561 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 10562 #endif 10563 #ifdef CONFIG_SMP 10564 raw_spin_lock_init(&cfs_rq->removed.lock); 10565 #endif 10566 } 10567 10568 #ifdef CONFIG_FAIR_GROUP_SCHED 10569 static void task_set_group_fair(struct task_struct *p) 10570 { 10571 struct sched_entity *se = &p->se; 10572 10573 set_task_rq(p, task_cpu(p)); 10574 se->depth = se->parent ? se->parent->depth + 1 : 0; 10575 } 10576 10577 static void task_move_group_fair(struct task_struct *p) 10578 { 10579 detach_task_cfs_rq(p); 10580 set_task_rq(p, task_cpu(p)); 10581 10582 #ifdef CONFIG_SMP 10583 /* Tell se's cfs_rq has been changed -- migrated */ 10584 p->se.avg.last_update_time = 0; 10585 #endif 10586 attach_task_cfs_rq(p); 10587 } 10588 10589 static void task_change_group_fair(struct task_struct *p, int type) 10590 { 10591 switch (type) { 10592 case TASK_SET_GROUP: 10593 task_set_group_fair(p); 10594 break; 10595 10596 case TASK_MOVE_GROUP: 10597 task_move_group_fair(p); 10598 break; 10599 } 10600 } 10601 10602 void free_fair_sched_group(struct task_group *tg) 10603 { 10604 int i; 10605 10606 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 10607 10608 for_each_possible_cpu(i) { 10609 if (tg->cfs_rq) 10610 kfree(tg->cfs_rq[i]); 10611 if (tg->se) 10612 kfree(tg->se[i]); 10613 } 10614 10615 kfree(tg->cfs_rq); 10616 kfree(tg->se); 10617 } 10618 10619 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 10620 { 10621 struct sched_entity *se; 10622 struct cfs_rq *cfs_rq; 10623 int i; 10624 10625 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); 10626 if (!tg->cfs_rq) 10627 goto err; 10628 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); 10629 if (!tg->se) 10630 goto err; 10631 10632 tg->shares = NICE_0_LOAD; 10633 10634 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 10635 10636 for_each_possible_cpu(i) { 10637 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 10638 GFP_KERNEL, cpu_to_node(i)); 10639 if (!cfs_rq) 10640 goto err; 10641 10642 se = kzalloc_node(sizeof(struct sched_entity), 10643 GFP_KERNEL, cpu_to_node(i)); 10644 if (!se) 10645 goto err_free_rq; 10646 10647 init_cfs_rq(cfs_rq); 10648 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 10649 init_entity_runnable_average(se); 10650 } 10651 10652 return 1; 10653 10654 err_free_rq: 10655 kfree(cfs_rq); 10656 err: 10657 return 0; 10658 } 10659 10660 void online_fair_sched_group(struct task_group *tg) 10661 { 10662 struct sched_entity *se; 10663 struct rq_flags rf; 10664 struct rq *rq; 10665 int i; 10666 10667 for_each_possible_cpu(i) { 10668 rq = cpu_rq(i); 10669 se = tg->se[i]; 10670 rq_lock_irq(rq, &rf); 10671 update_rq_clock(rq); 10672 attach_entity_cfs_rq(se); 10673 sync_throttle(tg, i); 10674 rq_unlock_irq(rq, &rf); 10675 } 10676 } 10677 10678 void unregister_fair_sched_group(struct task_group *tg) 10679 { 10680 unsigned long flags; 10681 struct rq *rq; 10682 int cpu; 10683 10684 for_each_possible_cpu(cpu) { 10685 if (tg->se[cpu]) 10686 remove_entity_load_avg(tg->se[cpu]); 10687 10688 /* 10689 * Only empty task groups can be destroyed; so we can speculatively 10690 * check on_list without danger of it being re-added. 10691 */ 10692 if (!tg->cfs_rq[cpu]->on_list) 10693 continue; 10694 10695 rq = cpu_rq(cpu); 10696 10697 raw_spin_lock_irqsave(&rq->lock, flags); 10698 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 10699 raw_spin_unlock_irqrestore(&rq->lock, flags); 10700 } 10701 } 10702 10703 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 10704 struct sched_entity *se, int cpu, 10705 struct sched_entity *parent) 10706 { 10707 struct rq *rq = cpu_rq(cpu); 10708 10709 cfs_rq->tg = tg; 10710 cfs_rq->rq = rq; 10711 init_cfs_rq_runtime(cfs_rq); 10712 10713 tg->cfs_rq[cpu] = cfs_rq; 10714 tg->se[cpu] = se; 10715 10716 /* se could be NULL for root_task_group */ 10717 if (!se) 10718 return; 10719 10720 if (!parent) { 10721 se->cfs_rq = &rq->cfs; 10722 se->depth = 0; 10723 } else { 10724 se->cfs_rq = parent->my_q; 10725 se->depth = parent->depth + 1; 10726 } 10727 10728 se->my_q = cfs_rq; 10729 /* guarantee group entities always have weight */ 10730 update_load_set(&se->load, NICE_0_LOAD); 10731 se->parent = parent; 10732 } 10733 10734 static DEFINE_MUTEX(shares_mutex); 10735 10736 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 10737 { 10738 int i; 10739 10740 /* 10741 * We can't change the weight of the root cgroup. 10742 */ 10743 if (!tg->se[0]) 10744 return -EINVAL; 10745 10746 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 10747 10748 mutex_lock(&shares_mutex); 10749 if (tg->shares == shares) 10750 goto done; 10751 10752 tg->shares = shares; 10753 for_each_possible_cpu(i) { 10754 struct rq *rq = cpu_rq(i); 10755 struct sched_entity *se = tg->se[i]; 10756 struct rq_flags rf; 10757 10758 /* Propagate contribution to hierarchy */ 10759 rq_lock_irqsave(rq, &rf); 10760 update_rq_clock(rq); 10761 for_each_sched_entity(se) { 10762 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 10763 update_cfs_group(se); 10764 } 10765 rq_unlock_irqrestore(rq, &rf); 10766 } 10767 10768 done: 10769 mutex_unlock(&shares_mutex); 10770 return 0; 10771 } 10772 #else /* CONFIG_FAIR_GROUP_SCHED */ 10773 10774 void free_fair_sched_group(struct task_group *tg) { } 10775 10776 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 10777 { 10778 return 1; 10779 } 10780 10781 void online_fair_sched_group(struct task_group *tg) { } 10782 10783 void unregister_fair_sched_group(struct task_group *tg) { } 10784 10785 #endif /* CONFIG_FAIR_GROUP_SCHED */ 10786 10787 10788 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 10789 { 10790 struct sched_entity *se = &task->se; 10791 unsigned int rr_interval = 0; 10792 10793 /* 10794 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 10795 * idle runqueue: 10796 */ 10797 if (rq->cfs.load.weight) 10798 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 10799 10800 return rr_interval; 10801 } 10802 10803 /* 10804 * All the scheduling class methods: 10805 */ 10806 const struct sched_class fair_sched_class = { 10807 .next = &idle_sched_class, 10808 .enqueue_task = enqueue_task_fair, 10809 .dequeue_task = dequeue_task_fair, 10810 .yield_task = yield_task_fair, 10811 .yield_to_task = yield_to_task_fair, 10812 10813 .check_preempt_curr = check_preempt_wakeup, 10814 10815 .pick_next_task = __pick_next_task_fair, 10816 .put_prev_task = put_prev_task_fair, 10817 .set_next_task = set_next_task_fair, 10818 10819 #ifdef CONFIG_SMP 10820 .balance = balance_fair, 10821 .select_task_rq = select_task_rq_fair, 10822 .migrate_task_rq = migrate_task_rq_fair, 10823 10824 .rq_online = rq_online_fair, 10825 .rq_offline = rq_offline_fair, 10826 10827 .task_dead = task_dead_fair, 10828 .set_cpus_allowed = set_cpus_allowed_common, 10829 #endif 10830 10831 .task_tick = task_tick_fair, 10832 .task_fork = task_fork_fair, 10833 10834 .prio_changed = prio_changed_fair, 10835 .switched_from = switched_from_fair, 10836 .switched_to = switched_to_fair, 10837 10838 .get_rr_interval = get_rr_interval_fair, 10839 10840 .update_curr = update_curr_fair, 10841 10842 #ifdef CONFIG_FAIR_GROUP_SCHED 10843 .task_change_group = task_change_group_fair, 10844 #endif 10845 10846 #ifdef CONFIG_UCLAMP_TASK 10847 .uclamp_enabled = 1, 10848 #endif 10849 }; 10850 10851 #ifdef CONFIG_SCHED_DEBUG 10852 void print_cfs_stats(struct seq_file *m, int cpu) 10853 { 10854 struct cfs_rq *cfs_rq, *pos; 10855 10856 rcu_read_lock(); 10857 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) 10858 print_cfs_rq(m, cpu, cfs_rq); 10859 rcu_read_unlock(); 10860 } 10861 10862 #ifdef CONFIG_NUMA_BALANCING 10863 void show_numa_stats(struct task_struct *p, struct seq_file *m) 10864 { 10865 int node; 10866 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 10867 struct numa_group *ng; 10868 10869 rcu_read_lock(); 10870 ng = rcu_dereference(p->numa_group); 10871 for_each_online_node(node) { 10872 if (p->numa_faults) { 10873 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 10874 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 10875 } 10876 if (ng) { 10877 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], 10878 gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 10879 } 10880 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 10881 } 10882 rcu_read_unlock(); 10883 } 10884 #endif /* CONFIG_NUMA_BALANCING */ 10885 #endif /* CONFIG_SCHED_DEBUG */ 10886 10887 __init void init_sched_fair_class(void) 10888 { 10889 #ifdef CONFIG_SMP 10890 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 10891 10892 #ifdef CONFIG_NO_HZ_COMMON 10893 nohz.next_balance = jiffies; 10894 nohz.next_blocked = jiffies; 10895 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 10896 #endif 10897 #endif /* SMP */ 10898 10899 } 10900 10901 /* 10902 * Helper functions to facilitate extracting info from tracepoints. 10903 */ 10904 10905 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) 10906 { 10907 #ifdef CONFIG_SMP 10908 return cfs_rq ? &cfs_rq->avg : NULL; 10909 #else 10910 return NULL; 10911 #endif 10912 } 10913 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg); 10914 10915 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) 10916 { 10917 if (!cfs_rq) { 10918 if (str) 10919 strlcpy(str, "(null)", len); 10920 else 10921 return NULL; 10922 } 10923 10924 cfs_rq_tg_path(cfs_rq, str, len); 10925 return str; 10926 } 10927 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path); 10928 10929 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) 10930 { 10931 return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; 10932 } 10933 EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu); 10934 10935 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq) 10936 { 10937 #ifdef CONFIG_SMP 10938 return rq ? &rq->avg_rt : NULL; 10939 #else 10940 return NULL; 10941 #endif 10942 } 10943 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt); 10944 10945 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq) 10946 { 10947 #ifdef CONFIG_SMP 10948 return rq ? &rq->avg_dl : NULL; 10949 #else 10950 return NULL; 10951 #endif 10952 } 10953 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl); 10954 10955 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq) 10956 { 10957 #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ) 10958 return rq ? &rq->avg_irq : NULL; 10959 #else 10960 return NULL; 10961 #endif 10962 } 10963 EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq); 10964 10965 int sched_trace_rq_cpu(struct rq *rq) 10966 { 10967 return rq ? cpu_of(rq) : -1; 10968 } 10969 EXPORT_SYMBOL_GPL(sched_trace_rq_cpu); 10970 10971 const struct cpumask *sched_trace_rd_span(struct root_domain *rd) 10972 { 10973 #ifdef CONFIG_SMP 10974 return rd ? rd->span : NULL; 10975 #else 10976 return NULL; 10977 #endif 10978 } 10979 EXPORT_SYMBOL_GPL(sched_trace_rd_span); 10980