1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 */ 23 #include "sched.h" 24 25 #include <trace/events/sched.h> 26 27 /* 28 * Targeted preemption latency for CPU-bound tasks: 29 * 30 * NOTE: this latency value is not the same as the concept of 31 * 'timeslice length' - timeslices in CFS are of variable length 32 * and have no persistent notion like in traditional, time-slice 33 * based scheduling concepts. 34 * 35 * (to see the precise effective timeslice length of your workload, 36 * run vmstat and monitor the context-switches (cs) field) 37 * 38 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 39 */ 40 unsigned int sysctl_sched_latency = 6000000ULL; 41 static unsigned int normalized_sysctl_sched_latency = 6000000ULL; 42 43 /* 44 * The initial- and re-scaling of tunables is configurable 45 * 46 * Options are: 47 * 48 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 49 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 50 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 51 * 52 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 53 */ 54 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 55 56 /* 57 * Minimal preemption granularity for CPU-bound tasks: 58 * 59 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 60 */ 61 unsigned int sysctl_sched_min_granularity = 750000ULL; 62 static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 63 64 /* 65 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 66 */ 67 static unsigned int sched_nr_latency = 8; 68 69 /* 70 * After fork, child runs first. If set to 0 (default) then 71 * parent will (try to) run first. 72 */ 73 unsigned int sysctl_sched_child_runs_first __read_mostly; 74 75 /* 76 * SCHED_OTHER wake-up granularity. 77 * 78 * This option delays the preemption effects of decoupled workloads 79 * and reduces their over-scheduling. Synchronous workloads will still 80 * have immediate wakeup/sleep latencies. 81 * 82 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 83 */ 84 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 85 static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 86 87 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 88 89 #ifdef CONFIG_SMP 90 /* 91 * For asym packing, by default the lower numbered CPU has higher priority. 92 */ 93 int __weak arch_asym_cpu_priority(int cpu) 94 { 95 return -cpu; 96 } 97 98 /* 99 * The margin used when comparing utilization with CPU capacity: 100 * util * margin < capacity * 1024 101 * 102 * (default: ~20%) 103 */ 104 static unsigned int capacity_margin = 1280; 105 #endif 106 107 #ifdef CONFIG_CFS_BANDWIDTH 108 /* 109 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 110 * each time a cfs_rq requests quota. 111 * 112 * Note: in the case that the slice exceeds the runtime remaining (either due 113 * to consumption or the quota being specified to be smaller than the slice) 114 * we will always only issue the remaining available time. 115 * 116 * (default: 5 msec, units: microseconds) 117 */ 118 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 119 #endif 120 121 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 122 { 123 lw->weight += inc; 124 lw->inv_weight = 0; 125 } 126 127 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 128 { 129 lw->weight -= dec; 130 lw->inv_weight = 0; 131 } 132 133 static inline void update_load_set(struct load_weight *lw, unsigned long w) 134 { 135 lw->weight = w; 136 lw->inv_weight = 0; 137 } 138 139 /* 140 * Increase the granularity value when there are more CPUs, 141 * because with more CPUs the 'effective latency' as visible 142 * to users decreases. But the relationship is not linear, 143 * so pick a second-best guess by going with the log2 of the 144 * number of CPUs. 145 * 146 * This idea comes from the SD scheduler of Con Kolivas: 147 */ 148 static unsigned int get_update_sysctl_factor(void) 149 { 150 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 151 unsigned int factor; 152 153 switch (sysctl_sched_tunable_scaling) { 154 case SCHED_TUNABLESCALING_NONE: 155 factor = 1; 156 break; 157 case SCHED_TUNABLESCALING_LINEAR: 158 factor = cpus; 159 break; 160 case SCHED_TUNABLESCALING_LOG: 161 default: 162 factor = 1 + ilog2(cpus); 163 break; 164 } 165 166 return factor; 167 } 168 169 static void update_sysctl(void) 170 { 171 unsigned int factor = get_update_sysctl_factor(); 172 173 #define SET_SYSCTL(name) \ 174 (sysctl_##name = (factor) * normalized_sysctl_##name) 175 SET_SYSCTL(sched_min_granularity); 176 SET_SYSCTL(sched_latency); 177 SET_SYSCTL(sched_wakeup_granularity); 178 #undef SET_SYSCTL 179 } 180 181 void sched_init_granularity(void) 182 { 183 update_sysctl(); 184 } 185 186 #define WMULT_CONST (~0U) 187 #define WMULT_SHIFT 32 188 189 static void __update_inv_weight(struct load_weight *lw) 190 { 191 unsigned long w; 192 193 if (likely(lw->inv_weight)) 194 return; 195 196 w = scale_load_down(lw->weight); 197 198 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 199 lw->inv_weight = 1; 200 else if (unlikely(!w)) 201 lw->inv_weight = WMULT_CONST; 202 else 203 lw->inv_weight = WMULT_CONST / w; 204 } 205 206 /* 207 * delta_exec * weight / lw.weight 208 * OR 209 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 210 * 211 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 212 * we're guaranteed shift stays positive because inv_weight is guaranteed to 213 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 214 * 215 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 216 * weight/lw.weight <= 1, and therefore our shift will also be positive. 217 */ 218 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 219 { 220 u64 fact = scale_load_down(weight); 221 int shift = WMULT_SHIFT; 222 223 __update_inv_weight(lw); 224 225 if (unlikely(fact >> 32)) { 226 while (fact >> 32) { 227 fact >>= 1; 228 shift--; 229 } 230 } 231 232 /* hint to use a 32x32->64 mul */ 233 fact = (u64)(u32)fact * lw->inv_weight; 234 235 while (fact >> 32) { 236 fact >>= 1; 237 shift--; 238 } 239 240 return mul_u64_u32_shr(delta_exec, fact, shift); 241 } 242 243 244 const struct sched_class fair_sched_class; 245 246 /************************************************************** 247 * CFS operations on generic schedulable entities: 248 */ 249 250 #ifdef CONFIG_FAIR_GROUP_SCHED 251 252 /* cpu runqueue to which this cfs_rq is attached */ 253 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 254 { 255 return cfs_rq->rq; 256 } 257 258 static inline struct task_struct *task_of(struct sched_entity *se) 259 { 260 SCHED_WARN_ON(!entity_is_task(se)); 261 return container_of(se, struct task_struct, se); 262 } 263 264 /* Walk up scheduling entities hierarchy */ 265 #define for_each_sched_entity(se) \ 266 for (; se; se = se->parent) 267 268 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 269 { 270 return p->se.cfs_rq; 271 } 272 273 /* runqueue on which this entity is (to be) queued */ 274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 275 { 276 return se->cfs_rq; 277 } 278 279 /* runqueue "owned" by this group */ 280 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 281 { 282 return grp->my_q; 283 } 284 285 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 286 { 287 if (!cfs_rq->on_list) { 288 struct rq *rq = rq_of(cfs_rq); 289 int cpu = cpu_of(rq); 290 /* 291 * Ensure we either appear before our parent (if already 292 * enqueued) or force our parent to appear after us when it is 293 * enqueued. The fact that we always enqueue bottom-up 294 * reduces this to two cases and a special case for the root 295 * cfs_rq. Furthermore, it also means that we will always reset 296 * tmp_alone_branch either when the branch is connected 297 * to a tree or when we reach the beg of the tree 298 */ 299 if (cfs_rq->tg->parent && 300 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 301 /* 302 * If parent is already on the list, we add the child 303 * just before. Thanks to circular linked property of 304 * the list, this means to put the child at the tail 305 * of the list that starts by parent. 306 */ 307 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 308 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 309 /* 310 * The branch is now connected to its tree so we can 311 * reset tmp_alone_branch to the beginning of the 312 * list. 313 */ 314 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 315 } else if (!cfs_rq->tg->parent) { 316 /* 317 * cfs rq without parent should be put 318 * at the tail of the list. 319 */ 320 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 321 &rq->leaf_cfs_rq_list); 322 /* 323 * We have reach the beg of a tree so we can reset 324 * tmp_alone_branch to the beginning of the list. 325 */ 326 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 327 } else { 328 /* 329 * The parent has not already been added so we want to 330 * make sure that it will be put after us. 331 * tmp_alone_branch points to the beg of the branch 332 * where we will add parent. 333 */ 334 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, 335 rq->tmp_alone_branch); 336 /* 337 * update tmp_alone_branch to points to the new beg 338 * of the branch 339 */ 340 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 341 } 342 343 cfs_rq->on_list = 1; 344 } 345 } 346 347 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 348 { 349 if (cfs_rq->on_list) { 350 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 351 cfs_rq->on_list = 0; 352 } 353 } 354 355 /* Iterate through all leaf cfs_rq's on a runqueue: */ 356 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ 357 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 358 359 /* Do the two (enqueued) entities belong to the same group ? */ 360 static inline struct cfs_rq * 361 is_same_group(struct sched_entity *se, struct sched_entity *pse) 362 { 363 if (se->cfs_rq == pse->cfs_rq) 364 return se->cfs_rq; 365 366 return NULL; 367 } 368 369 static inline struct sched_entity *parent_entity(struct sched_entity *se) 370 { 371 return se->parent; 372 } 373 374 static void 375 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 376 { 377 int se_depth, pse_depth; 378 379 /* 380 * preemption test can be made between sibling entities who are in the 381 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 382 * both tasks until we find their ancestors who are siblings of common 383 * parent. 384 */ 385 386 /* First walk up until both entities are at same depth */ 387 se_depth = (*se)->depth; 388 pse_depth = (*pse)->depth; 389 390 while (se_depth > pse_depth) { 391 se_depth--; 392 *se = parent_entity(*se); 393 } 394 395 while (pse_depth > se_depth) { 396 pse_depth--; 397 *pse = parent_entity(*pse); 398 } 399 400 while (!is_same_group(*se, *pse)) { 401 *se = parent_entity(*se); 402 *pse = parent_entity(*pse); 403 } 404 } 405 406 #else /* !CONFIG_FAIR_GROUP_SCHED */ 407 408 static inline struct task_struct *task_of(struct sched_entity *se) 409 { 410 return container_of(se, struct task_struct, se); 411 } 412 413 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 414 { 415 return container_of(cfs_rq, struct rq, cfs); 416 } 417 418 419 #define for_each_sched_entity(se) \ 420 for (; se; se = NULL) 421 422 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 423 { 424 return &task_rq(p)->cfs; 425 } 426 427 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 428 { 429 struct task_struct *p = task_of(se); 430 struct rq *rq = task_rq(p); 431 432 return &rq->cfs; 433 } 434 435 /* runqueue "owned" by this group */ 436 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 437 { 438 return NULL; 439 } 440 441 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 442 { 443 } 444 445 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 446 { 447 } 448 449 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ 450 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) 451 452 static inline struct sched_entity *parent_entity(struct sched_entity *se) 453 { 454 return NULL; 455 } 456 457 static inline void 458 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 459 { 460 } 461 462 #endif /* CONFIG_FAIR_GROUP_SCHED */ 463 464 static __always_inline 465 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 466 467 /************************************************************** 468 * Scheduling class tree data structure manipulation methods: 469 */ 470 471 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 472 { 473 s64 delta = (s64)(vruntime - max_vruntime); 474 if (delta > 0) 475 max_vruntime = vruntime; 476 477 return max_vruntime; 478 } 479 480 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 481 { 482 s64 delta = (s64)(vruntime - min_vruntime); 483 if (delta < 0) 484 min_vruntime = vruntime; 485 486 return min_vruntime; 487 } 488 489 static inline int entity_before(struct sched_entity *a, 490 struct sched_entity *b) 491 { 492 return (s64)(a->vruntime - b->vruntime) < 0; 493 } 494 495 static void update_min_vruntime(struct cfs_rq *cfs_rq) 496 { 497 struct sched_entity *curr = cfs_rq->curr; 498 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); 499 500 u64 vruntime = cfs_rq->min_vruntime; 501 502 if (curr) { 503 if (curr->on_rq) 504 vruntime = curr->vruntime; 505 else 506 curr = NULL; 507 } 508 509 if (leftmost) { /* non-empty tree */ 510 struct sched_entity *se; 511 se = rb_entry(leftmost, struct sched_entity, run_node); 512 513 if (!curr) 514 vruntime = se->vruntime; 515 else 516 vruntime = min_vruntime(vruntime, se->vruntime); 517 } 518 519 /* ensure we never gain time by being placed backwards. */ 520 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 521 #ifndef CONFIG_64BIT 522 smp_wmb(); 523 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 524 #endif 525 } 526 527 /* 528 * Enqueue an entity into the rb-tree: 529 */ 530 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 531 { 532 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; 533 struct rb_node *parent = NULL; 534 struct sched_entity *entry; 535 bool leftmost = true; 536 537 /* 538 * Find the right place in the rbtree: 539 */ 540 while (*link) { 541 parent = *link; 542 entry = rb_entry(parent, struct sched_entity, run_node); 543 /* 544 * We dont care about collisions. Nodes with 545 * the same key stay together. 546 */ 547 if (entity_before(se, entry)) { 548 link = &parent->rb_left; 549 } else { 550 link = &parent->rb_right; 551 leftmost = false; 552 } 553 } 554 555 rb_link_node(&se->run_node, parent, link); 556 rb_insert_color_cached(&se->run_node, 557 &cfs_rq->tasks_timeline, leftmost); 558 } 559 560 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 561 { 562 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); 563 } 564 565 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 566 { 567 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); 568 569 if (!left) 570 return NULL; 571 572 return rb_entry(left, struct sched_entity, run_node); 573 } 574 575 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 576 { 577 struct rb_node *next = rb_next(&se->run_node); 578 579 if (!next) 580 return NULL; 581 582 return rb_entry(next, struct sched_entity, run_node); 583 } 584 585 #ifdef CONFIG_SCHED_DEBUG 586 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 587 { 588 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); 589 590 if (!last) 591 return NULL; 592 593 return rb_entry(last, struct sched_entity, run_node); 594 } 595 596 /************************************************************** 597 * Scheduling class statistics methods: 598 */ 599 600 int sched_proc_update_handler(struct ctl_table *table, int write, 601 void __user *buffer, size_t *lenp, 602 loff_t *ppos) 603 { 604 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 605 unsigned int factor = get_update_sysctl_factor(); 606 607 if (ret || !write) 608 return ret; 609 610 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 611 sysctl_sched_min_granularity); 612 613 #define WRT_SYSCTL(name) \ 614 (normalized_sysctl_##name = sysctl_##name / (factor)) 615 WRT_SYSCTL(sched_min_granularity); 616 WRT_SYSCTL(sched_latency); 617 WRT_SYSCTL(sched_wakeup_granularity); 618 #undef WRT_SYSCTL 619 620 return 0; 621 } 622 #endif 623 624 /* 625 * delta /= w 626 */ 627 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 628 { 629 if (unlikely(se->load.weight != NICE_0_LOAD)) 630 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 631 632 return delta; 633 } 634 635 /* 636 * The idea is to set a period in which each task runs once. 637 * 638 * When there are too many tasks (sched_nr_latency) we have to stretch 639 * this period because otherwise the slices get too small. 640 * 641 * p = (nr <= nl) ? l : l*nr/nl 642 */ 643 static u64 __sched_period(unsigned long nr_running) 644 { 645 if (unlikely(nr_running > sched_nr_latency)) 646 return nr_running * sysctl_sched_min_granularity; 647 else 648 return sysctl_sched_latency; 649 } 650 651 /* 652 * We calculate the wall-time slice from the period by taking a part 653 * proportional to the weight. 654 * 655 * s = p*P[w/rw] 656 */ 657 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 658 { 659 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 660 661 for_each_sched_entity(se) { 662 struct load_weight *load; 663 struct load_weight lw; 664 665 cfs_rq = cfs_rq_of(se); 666 load = &cfs_rq->load; 667 668 if (unlikely(!se->on_rq)) { 669 lw = cfs_rq->load; 670 671 update_load_add(&lw, se->load.weight); 672 load = &lw; 673 } 674 slice = __calc_delta(slice, se->load.weight, load); 675 } 676 return slice; 677 } 678 679 /* 680 * We calculate the vruntime slice of a to-be-inserted task. 681 * 682 * vs = s/w 683 */ 684 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 685 { 686 return calc_delta_fair(sched_slice(cfs_rq, se), se); 687 } 688 689 #ifdef CONFIG_SMP 690 #include "pelt.h" 691 #include "sched-pelt.h" 692 693 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 694 static unsigned long task_h_load(struct task_struct *p); 695 static unsigned long capacity_of(int cpu); 696 697 /* Give new sched_entity start runnable values to heavy its load in infant time */ 698 void init_entity_runnable_average(struct sched_entity *se) 699 { 700 struct sched_avg *sa = &se->avg; 701 702 memset(sa, 0, sizeof(*sa)); 703 704 /* 705 * Tasks are initialized with full load to be seen as heavy tasks until 706 * they get a chance to stabilize to their real load level. 707 * Group entities are initialized with zero load to reflect the fact that 708 * nothing has been attached to the task group yet. 709 */ 710 if (entity_is_task(se)) 711 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); 712 713 se->runnable_weight = se->load.weight; 714 715 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 716 } 717 718 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); 719 static void attach_entity_cfs_rq(struct sched_entity *se); 720 721 /* 722 * With new tasks being created, their initial util_avgs are extrapolated 723 * based on the cfs_rq's current util_avg: 724 * 725 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 726 * 727 * However, in many cases, the above util_avg does not give a desired 728 * value. Moreover, the sum of the util_avgs may be divergent, such 729 * as when the series is a harmonic series. 730 * 731 * To solve this problem, we also cap the util_avg of successive tasks to 732 * only 1/2 of the left utilization budget: 733 * 734 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n 735 * 736 * where n denotes the nth task and cpu_scale the CPU capacity. 737 * 738 * For example, for a CPU with 1024 of capacity, a simplest series from 739 * the beginning would be like: 740 * 741 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 742 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 743 * 744 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 745 * if util_avg > util_avg_cap. 746 */ 747 void post_init_entity_util_avg(struct sched_entity *se) 748 { 749 struct cfs_rq *cfs_rq = cfs_rq_of(se); 750 struct sched_avg *sa = &se->avg; 751 long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); 752 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; 753 754 if (cap > 0) { 755 if (cfs_rq->avg.util_avg != 0) { 756 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 757 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 758 759 if (sa->util_avg > cap) 760 sa->util_avg = cap; 761 } else { 762 sa->util_avg = cap; 763 } 764 } 765 766 if (entity_is_task(se)) { 767 struct task_struct *p = task_of(se); 768 if (p->sched_class != &fair_sched_class) { 769 /* 770 * For !fair tasks do: 771 * 772 update_cfs_rq_load_avg(now, cfs_rq); 773 attach_entity_load_avg(cfs_rq, se, 0); 774 switched_from_fair(rq, p); 775 * 776 * such that the next switched_to_fair() has the 777 * expected state. 778 */ 779 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq); 780 return; 781 } 782 } 783 784 attach_entity_cfs_rq(se); 785 } 786 787 #else /* !CONFIG_SMP */ 788 void init_entity_runnable_average(struct sched_entity *se) 789 { 790 } 791 void post_init_entity_util_avg(struct sched_entity *se) 792 { 793 } 794 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 795 { 796 } 797 #endif /* CONFIG_SMP */ 798 799 /* 800 * Update the current task's runtime statistics. 801 */ 802 static void update_curr(struct cfs_rq *cfs_rq) 803 { 804 struct sched_entity *curr = cfs_rq->curr; 805 u64 now = rq_clock_task(rq_of(cfs_rq)); 806 u64 delta_exec; 807 808 if (unlikely(!curr)) 809 return; 810 811 delta_exec = now - curr->exec_start; 812 if (unlikely((s64)delta_exec <= 0)) 813 return; 814 815 curr->exec_start = now; 816 817 schedstat_set(curr->statistics.exec_max, 818 max(delta_exec, curr->statistics.exec_max)); 819 820 curr->sum_exec_runtime += delta_exec; 821 schedstat_add(cfs_rq->exec_clock, delta_exec); 822 823 curr->vruntime += calc_delta_fair(delta_exec, curr); 824 update_min_vruntime(cfs_rq); 825 826 if (entity_is_task(curr)) { 827 struct task_struct *curtask = task_of(curr); 828 829 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 830 cgroup_account_cputime(curtask, delta_exec); 831 account_group_exec_runtime(curtask, delta_exec); 832 } 833 834 account_cfs_rq_runtime(cfs_rq, delta_exec); 835 } 836 837 static void update_curr_fair(struct rq *rq) 838 { 839 update_curr(cfs_rq_of(&rq->curr->se)); 840 } 841 842 static inline void 843 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 844 { 845 u64 wait_start, prev_wait_start; 846 847 if (!schedstat_enabled()) 848 return; 849 850 wait_start = rq_clock(rq_of(cfs_rq)); 851 prev_wait_start = schedstat_val(se->statistics.wait_start); 852 853 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && 854 likely(wait_start > prev_wait_start)) 855 wait_start -= prev_wait_start; 856 857 __schedstat_set(se->statistics.wait_start, wait_start); 858 } 859 860 static inline void 861 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 862 { 863 struct task_struct *p; 864 u64 delta; 865 866 if (!schedstat_enabled()) 867 return; 868 869 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); 870 871 if (entity_is_task(se)) { 872 p = task_of(se); 873 if (task_on_rq_migrating(p)) { 874 /* 875 * Preserve migrating task's wait time so wait_start 876 * time stamp can be adjusted to accumulate wait time 877 * prior to migration. 878 */ 879 __schedstat_set(se->statistics.wait_start, delta); 880 return; 881 } 882 trace_sched_stat_wait(p, delta); 883 } 884 885 __schedstat_set(se->statistics.wait_max, 886 max(schedstat_val(se->statistics.wait_max), delta)); 887 __schedstat_inc(se->statistics.wait_count); 888 __schedstat_add(se->statistics.wait_sum, delta); 889 __schedstat_set(se->statistics.wait_start, 0); 890 } 891 892 static inline void 893 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 894 { 895 struct task_struct *tsk = NULL; 896 u64 sleep_start, block_start; 897 898 if (!schedstat_enabled()) 899 return; 900 901 sleep_start = schedstat_val(se->statistics.sleep_start); 902 block_start = schedstat_val(se->statistics.block_start); 903 904 if (entity_is_task(se)) 905 tsk = task_of(se); 906 907 if (sleep_start) { 908 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; 909 910 if ((s64)delta < 0) 911 delta = 0; 912 913 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) 914 __schedstat_set(se->statistics.sleep_max, delta); 915 916 __schedstat_set(se->statistics.sleep_start, 0); 917 __schedstat_add(se->statistics.sum_sleep_runtime, delta); 918 919 if (tsk) { 920 account_scheduler_latency(tsk, delta >> 10, 1); 921 trace_sched_stat_sleep(tsk, delta); 922 } 923 } 924 if (block_start) { 925 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; 926 927 if ((s64)delta < 0) 928 delta = 0; 929 930 if (unlikely(delta > schedstat_val(se->statistics.block_max))) 931 __schedstat_set(se->statistics.block_max, delta); 932 933 __schedstat_set(se->statistics.block_start, 0); 934 __schedstat_add(se->statistics.sum_sleep_runtime, delta); 935 936 if (tsk) { 937 if (tsk->in_iowait) { 938 __schedstat_add(se->statistics.iowait_sum, delta); 939 __schedstat_inc(se->statistics.iowait_count); 940 trace_sched_stat_iowait(tsk, delta); 941 } 942 943 trace_sched_stat_blocked(tsk, delta); 944 945 /* 946 * Blocking time is in units of nanosecs, so shift by 947 * 20 to get a milliseconds-range estimation of the 948 * amount of time that the task spent sleeping: 949 */ 950 if (unlikely(prof_on == SLEEP_PROFILING)) { 951 profile_hits(SLEEP_PROFILING, 952 (void *)get_wchan(tsk), 953 delta >> 20); 954 } 955 account_scheduler_latency(tsk, delta >> 10, 0); 956 } 957 } 958 } 959 960 /* 961 * Task is being enqueued - update stats: 962 */ 963 static inline void 964 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 965 { 966 if (!schedstat_enabled()) 967 return; 968 969 /* 970 * Are we enqueueing a waiting task? (for current tasks 971 * a dequeue/enqueue event is a NOP) 972 */ 973 if (se != cfs_rq->curr) 974 update_stats_wait_start(cfs_rq, se); 975 976 if (flags & ENQUEUE_WAKEUP) 977 update_stats_enqueue_sleeper(cfs_rq, se); 978 } 979 980 static inline void 981 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 982 { 983 984 if (!schedstat_enabled()) 985 return; 986 987 /* 988 * Mark the end of the wait period if dequeueing a 989 * waiting task: 990 */ 991 if (se != cfs_rq->curr) 992 update_stats_wait_end(cfs_rq, se); 993 994 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 995 struct task_struct *tsk = task_of(se); 996 997 if (tsk->state & TASK_INTERRUPTIBLE) 998 __schedstat_set(se->statistics.sleep_start, 999 rq_clock(rq_of(cfs_rq))); 1000 if (tsk->state & TASK_UNINTERRUPTIBLE) 1001 __schedstat_set(se->statistics.block_start, 1002 rq_clock(rq_of(cfs_rq))); 1003 } 1004 } 1005 1006 /* 1007 * We are picking a new current task - update its stats: 1008 */ 1009 static inline void 1010 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1011 { 1012 /* 1013 * We are starting a new run period: 1014 */ 1015 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1016 } 1017 1018 /************************************************** 1019 * Scheduling class queueing methods: 1020 */ 1021 1022 #ifdef CONFIG_NUMA_BALANCING 1023 /* 1024 * Approximate time to scan a full NUMA task in ms. The task scan period is 1025 * calculated based on the tasks virtual memory size and 1026 * numa_balancing_scan_size. 1027 */ 1028 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1029 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1030 1031 /* Portion of address space to scan in MB */ 1032 unsigned int sysctl_numa_balancing_scan_size = 256; 1033 1034 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1035 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1036 1037 struct numa_group { 1038 atomic_t refcount; 1039 1040 spinlock_t lock; /* nr_tasks, tasks */ 1041 int nr_tasks; 1042 pid_t gid; 1043 int active_nodes; 1044 1045 struct rcu_head rcu; 1046 unsigned long total_faults; 1047 unsigned long max_faults_cpu; 1048 /* 1049 * Faults_cpu is used to decide whether memory should move 1050 * towards the CPU. As a consequence, these stats are weighted 1051 * more by CPU use than by memory faults. 1052 */ 1053 unsigned long *faults_cpu; 1054 unsigned long faults[0]; 1055 }; 1056 1057 static inline unsigned long group_faults_priv(struct numa_group *ng); 1058 static inline unsigned long group_faults_shared(struct numa_group *ng); 1059 1060 static unsigned int task_nr_scan_windows(struct task_struct *p) 1061 { 1062 unsigned long rss = 0; 1063 unsigned long nr_scan_pages; 1064 1065 /* 1066 * Calculations based on RSS as non-present and empty pages are skipped 1067 * by the PTE scanner and NUMA hinting faults should be trapped based 1068 * on resident pages 1069 */ 1070 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1071 rss = get_mm_rss(p->mm); 1072 if (!rss) 1073 rss = nr_scan_pages; 1074 1075 rss = round_up(rss, nr_scan_pages); 1076 return rss / nr_scan_pages; 1077 } 1078 1079 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1080 #define MAX_SCAN_WINDOW 2560 1081 1082 static unsigned int task_scan_min(struct task_struct *p) 1083 { 1084 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1085 unsigned int scan, floor; 1086 unsigned int windows = 1; 1087 1088 if (scan_size < MAX_SCAN_WINDOW) 1089 windows = MAX_SCAN_WINDOW / scan_size; 1090 floor = 1000 / windows; 1091 1092 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1093 return max_t(unsigned int, floor, scan); 1094 } 1095 1096 static unsigned int task_scan_start(struct task_struct *p) 1097 { 1098 unsigned long smin = task_scan_min(p); 1099 unsigned long period = smin; 1100 1101 /* Scale the maximum scan period with the amount of shared memory. */ 1102 if (p->numa_group) { 1103 struct numa_group *ng = p->numa_group; 1104 unsigned long shared = group_faults_shared(ng); 1105 unsigned long private = group_faults_priv(ng); 1106 1107 period *= atomic_read(&ng->refcount); 1108 period *= shared + 1; 1109 period /= private + shared + 1; 1110 } 1111 1112 return max(smin, period); 1113 } 1114 1115 static unsigned int task_scan_max(struct task_struct *p) 1116 { 1117 unsigned long smin = task_scan_min(p); 1118 unsigned long smax; 1119 1120 /* Watch for min being lower than max due to floor calculations */ 1121 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1122 1123 /* Scale the maximum scan period with the amount of shared memory. */ 1124 if (p->numa_group) { 1125 struct numa_group *ng = p->numa_group; 1126 unsigned long shared = group_faults_shared(ng); 1127 unsigned long private = group_faults_priv(ng); 1128 unsigned long period = smax; 1129 1130 period *= atomic_read(&ng->refcount); 1131 period *= shared + 1; 1132 period /= private + shared + 1; 1133 1134 smax = max(smax, period); 1135 } 1136 1137 return max(smin, smax); 1138 } 1139 1140 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1141 { 1142 int mm_users = 0; 1143 struct mm_struct *mm = p->mm; 1144 1145 if (mm) { 1146 mm_users = atomic_read(&mm->mm_users); 1147 if (mm_users == 1) { 1148 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 1149 mm->numa_scan_seq = 0; 1150 } 1151 } 1152 p->node_stamp = 0; 1153 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; 1154 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 1155 p->numa_work.next = &p->numa_work; 1156 p->numa_faults = NULL; 1157 p->numa_group = NULL; 1158 p->last_task_numa_placement = 0; 1159 p->last_sum_exec_runtime = 0; 1160 1161 /* New address space, reset the preferred nid */ 1162 if (!(clone_flags & CLONE_VM)) { 1163 p->numa_preferred_nid = -1; 1164 return; 1165 } 1166 1167 /* 1168 * New thread, keep existing numa_preferred_nid which should be copied 1169 * already by arch_dup_task_struct but stagger when scans start. 1170 */ 1171 if (mm) { 1172 unsigned int delay; 1173 1174 delay = min_t(unsigned int, task_scan_max(current), 1175 current->numa_scan_period * mm_users * NSEC_PER_MSEC); 1176 delay += 2 * TICK_NSEC; 1177 p->node_stamp = delay; 1178 } 1179 } 1180 1181 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1182 { 1183 rq->nr_numa_running += (p->numa_preferred_nid != -1); 1184 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1185 } 1186 1187 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1188 { 1189 rq->nr_numa_running -= (p->numa_preferred_nid != -1); 1190 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1191 } 1192 1193 /* Shared or private faults. */ 1194 #define NR_NUMA_HINT_FAULT_TYPES 2 1195 1196 /* Memory and CPU locality */ 1197 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1198 1199 /* Averaged statistics, and temporary buffers. */ 1200 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1201 1202 pid_t task_numa_group_id(struct task_struct *p) 1203 { 1204 return p->numa_group ? p->numa_group->gid : 0; 1205 } 1206 1207 /* 1208 * The averaged statistics, shared & private, memory & CPU, 1209 * occupy the first half of the array. The second half of the 1210 * array is for current counters, which are averaged into the 1211 * first set by task_numa_placement. 1212 */ 1213 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1214 { 1215 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1216 } 1217 1218 static inline unsigned long task_faults(struct task_struct *p, int nid) 1219 { 1220 if (!p->numa_faults) 1221 return 0; 1222 1223 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1224 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1225 } 1226 1227 static inline unsigned long group_faults(struct task_struct *p, int nid) 1228 { 1229 if (!p->numa_group) 1230 return 0; 1231 1232 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1233 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1234 } 1235 1236 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1237 { 1238 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + 1239 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; 1240 } 1241 1242 static inline unsigned long group_faults_priv(struct numa_group *ng) 1243 { 1244 unsigned long faults = 0; 1245 int node; 1246 1247 for_each_online_node(node) { 1248 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 1249 } 1250 1251 return faults; 1252 } 1253 1254 static inline unsigned long group_faults_shared(struct numa_group *ng) 1255 { 1256 unsigned long faults = 0; 1257 int node; 1258 1259 for_each_online_node(node) { 1260 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; 1261 } 1262 1263 return faults; 1264 } 1265 1266 /* 1267 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1268 * considered part of a numa group's pseudo-interleaving set. Migrations 1269 * between these nodes are slowed down, to allow things to settle down. 1270 */ 1271 #define ACTIVE_NODE_FRACTION 3 1272 1273 static bool numa_is_active_node(int nid, struct numa_group *ng) 1274 { 1275 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1276 } 1277 1278 /* Handle placement on systems where not all nodes are directly connected. */ 1279 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1280 int maxdist, bool task) 1281 { 1282 unsigned long score = 0; 1283 int node; 1284 1285 /* 1286 * All nodes are directly connected, and the same distance 1287 * from each other. No need for fancy placement algorithms. 1288 */ 1289 if (sched_numa_topology_type == NUMA_DIRECT) 1290 return 0; 1291 1292 /* 1293 * This code is called for each node, introducing N^2 complexity, 1294 * which should be ok given the number of nodes rarely exceeds 8. 1295 */ 1296 for_each_online_node(node) { 1297 unsigned long faults; 1298 int dist = node_distance(nid, node); 1299 1300 /* 1301 * The furthest away nodes in the system are not interesting 1302 * for placement; nid was already counted. 1303 */ 1304 if (dist == sched_max_numa_distance || node == nid) 1305 continue; 1306 1307 /* 1308 * On systems with a backplane NUMA topology, compare groups 1309 * of nodes, and move tasks towards the group with the most 1310 * memory accesses. When comparing two nodes at distance 1311 * "hoplimit", only nodes closer by than "hoplimit" are part 1312 * of each group. Skip other nodes. 1313 */ 1314 if (sched_numa_topology_type == NUMA_BACKPLANE && 1315 dist >= maxdist) 1316 continue; 1317 1318 /* Add up the faults from nearby nodes. */ 1319 if (task) 1320 faults = task_faults(p, node); 1321 else 1322 faults = group_faults(p, node); 1323 1324 /* 1325 * On systems with a glueless mesh NUMA topology, there are 1326 * no fixed "groups of nodes". Instead, nodes that are not 1327 * directly connected bounce traffic through intermediate 1328 * nodes; a numa_group can occupy any set of nodes. 1329 * The further away a node is, the less the faults count. 1330 * This seems to result in good task placement. 1331 */ 1332 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1333 faults *= (sched_max_numa_distance - dist); 1334 faults /= (sched_max_numa_distance - LOCAL_DISTANCE); 1335 } 1336 1337 score += faults; 1338 } 1339 1340 return score; 1341 } 1342 1343 /* 1344 * These return the fraction of accesses done by a particular task, or 1345 * task group, on a particular numa node. The group weight is given a 1346 * larger multiplier, in order to group tasks together that are almost 1347 * evenly spread out between numa nodes. 1348 */ 1349 static inline unsigned long task_weight(struct task_struct *p, int nid, 1350 int dist) 1351 { 1352 unsigned long faults, total_faults; 1353 1354 if (!p->numa_faults) 1355 return 0; 1356 1357 total_faults = p->total_numa_faults; 1358 1359 if (!total_faults) 1360 return 0; 1361 1362 faults = task_faults(p, nid); 1363 faults += score_nearby_nodes(p, nid, dist, true); 1364 1365 return 1000 * faults / total_faults; 1366 } 1367 1368 static inline unsigned long group_weight(struct task_struct *p, int nid, 1369 int dist) 1370 { 1371 unsigned long faults, total_faults; 1372 1373 if (!p->numa_group) 1374 return 0; 1375 1376 total_faults = p->numa_group->total_faults; 1377 1378 if (!total_faults) 1379 return 0; 1380 1381 faults = group_faults(p, nid); 1382 faults += score_nearby_nodes(p, nid, dist, false); 1383 1384 return 1000 * faults / total_faults; 1385 } 1386 1387 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1388 int src_nid, int dst_cpu) 1389 { 1390 struct numa_group *ng = p->numa_group; 1391 int dst_nid = cpu_to_node(dst_cpu); 1392 int last_cpupid, this_cpupid; 1393 1394 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1395 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1396 1397 /* 1398 * Allow first faults or private faults to migrate immediately early in 1399 * the lifetime of a task. The magic number 4 is based on waiting for 1400 * two full passes of the "multi-stage node selection" test that is 1401 * executed below. 1402 */ 1403 if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) && 1404 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) 1405 return true; 1406 1407 /* 1408 * Multi-stage node selection is used in conjunction with a periodic 1409 * migration fault to build a temporal task<->page relation. By using 1410 * a two-stage filter we remove short/unlikely relations. 1411 * 1412 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1413 * a task's usage of a particular page (n_p) per total usage of this 1414 * page (n_t) (in a given time-span) to a probability. 1415 * 1416 * Our periodic faults will sample this probability and getting the 1417 * same result twice in a row, given these samples are fully 1418 * independent, is then given by P(n)^2, provided our sample period 1419 * is sufficiently short compared to the usage pattern. 1420 * 1421 * This quadric squishes small probabilities, making it less likely we 1422 * act on an unlikely task<->page relation. 1423 */ 1424 if (!cpupid_pid_unset(last_cpupid) && 1425 cpupid_to_nid(last_cpupid) != dst_nid) 1426 return false; 1427 1428 /* Always allow migrate on private faults */ 1429 if (cpupid_match_pid(p, last_cpupid)) 1430 return true; 1431 1432 /* A shared fault, but p->numa_group has not been set up yet. */ 1433 if (!ng) 1434 return true; 1435 1436 /* 1437 * Destination node is much more heavily used than the source 1438 * node? Allow migration. 1439 */ 1440 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1441 ACTIVE_NODE_FRACTION) 1442 return true; 1443 1444 /* 1445 * Distribute memory according to CPU & memory use on each node, 1446 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1447 * 1448 * faults_cpu(dst) 3 faults_cpu(src) 1449 * --------------- * - > --------------- 1450 * faults_mem(dst) 4 faults_mem(src) 1451 */ 1452 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1453 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1454 } 1455 1456 static unsigned long weighted_cpuload(struct rq *rq); 1457 static unsigned long source_load(int cpu, int type); 1458 static unsigned long target_load(int cpu, int type); 1459 1460 /* Cached statistics for all CPUs within a node */ 1461 struct numa_stats { 1462 unsigned long load; 1463 1464 /* Total compute capacity of CPUs on a node */ 1465 unsigned long compute_capacity; 1466 }; 1467 1468 /* 1469 * XXX borrowed from update_sg_lb_stats 1470 */ 1471 static void update_numa_stats(struct numa_stats *ns, int nid) 1472 { 1473 int cpu; 1474 1475 memset(ns, 0, sizeof(*ns)); 1476 for_each_cpu(cpu, cpumask_of_node(nid)) { 1477 struct rq *rq = cpu_rq(cpu); 1478 1479 ns->load += weighted_cpuload(rq); 1480 ns->compute_capacity += capacity_of(cpu); 1481 } 1482 1483 } 1484 1485 struct task_numa_env { 1486 struct task_struct *p; 1487 1488 int src_cpu, src_nid; 1489 int dst_cpu, dst_nid; 1490 1491 struct numa_stats src_stats, dst_stats; 1492 1493 int imbalance_pct; 1494 int dist; 1495 1496 struct task_struct *best_task; 1497 long best_imp; 1498 int best_cpu; 1499 }; 1500 1501 static void task_numa_assign(struct task_numa_env *env, 1502 struct task_struct *p, long imp) 1503 { 1504 struct rq *rq = cpu_rq(env->dst_cpu); 1505 1506 /* Bail out if run-queue part of active NUMA balance. */ 1507 if (xchg(&rq->numa_migrate_on, 1)) 1508 return; 1509 1510 /* 1511 * Clear previous best_cpu/rq numa-migrate flag, since task now 1512 * found a better CPU to move/swap. 1513 */ 1514 if (env->best_cpu != -1) { 1515 rq = cpu_rq(env->best_cpu); 1516 WRITE_ONCE(rq->numa_migrate_on, 0); 1517 } 1518 1519 if (env->best_task) 1520 put_task_struct(env->best_task); 1521 if (p) 1522 get_task_struct(p); 1523 1524 env->best_task = p; 1525 env->best_imp = imp; 1526 env->best_cpu = env->dst_cpu; 1527 } 1528 1529 static bool load_too_imbalanced(long src_load, long dst_load, 1530 struct task_numa_env *env) 1531 { 1532 long imb, old_imb; 1533 long orig_src_load, orig_dst_load; 1534 long src_capacity, dst_capacity; 1535 1536 /* 1537 * The load is corrected for the CPU capacity available on each node. 1538 * 1539 * src_load dst_load 1540 * ------------ vs --------- 1541 * src_capacity dst_capacity 1542 */ 1543 src_capacity = env->src_stats.compute_capacity; 1544 dst_capacity = env->dst_stats.compute_capacity; 1545 1546 imb = abs(dst_load * src_capacity - src_load * dst_capacity); 1547 1548 orig_src_load = env->src_stats.load; 1549 orig_dst_load = env->dst_stats.load; 1550 1551 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); 1552 1553 /* Would this change make things worse? */ 1554 return (imb > old_imb); 1555 } 1556 1557 /* 1558 * Maximum NUMA importance can be 1998 (2*999); 1559 * SMALLIMP @ 30 would be close to 1998/64. 1560 * Used to deter task migration. 1561 */ 1562 #define SMALLIMP 30 1563 1564 /* 1565 * This checks if the overall compute and NUMA accesses of the system would 1566 * be improved if the source tasks was migrated to the target dst_cpu taking 1567 * into account that it might be best if task running on the dst_cpu should 1568 * be exchanged with the source task 1569 */ 1570 static void task_numa_compare(struct task_numa_env *env, 1571 long taskimp, long groupimp, bool maymove) 1572 { 1573 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1574 struct task_struct *cur; 1575 long src_load, dst_load; 1576 long load; 1577 long imp = env->p->numa_group ? groupimp : taskimp; 1578 long moveimp = imp; 1579 int dist = env->dist; 1580 1581 if (READ_ONCE(dst_rq->numa_migrate_on)) 1582 return; 1583 1584 rcu_read_lock(); 1585 cur = task_rcu_dereference(&dst_rq->curr); 1586 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1587 cur = NULL; 1588 1589 /* 1590 * Because we have preemption enabled we can get migrated around and 1591 * end try selecting ourselves (current == env->p) as a swap candidate. 1592 */ 1593 if (cur == env->p) 1594 goto unlock; 1595 1596 if (!cur) { 1597 if (maymove && moveimp >= env->best_imp) 1598 goto assign; 1599 else 1600 goto unlock; 1601 } 1602 1603 /* 1604 * "imp" is the fault differential for the source task between the 1605 * source and destination node. Calculate the total differential for 1606 * the source task and potential destination task. The more negative 1607 * the value is, the more remote accesses that would be expected to 1608 * be incurred if the tasks were swapped. 1609 */ 1610 /* Skip this swap candidate if cannot move to the source cpu */ 1611 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) 1612 goto unlock; 1613 1614 /* 1615 * If dst and source tasks are in the same NUMA group, or not 1616 * in any group then look only at task weights. 1617 */ 1618 if (cur->numa_group == env->p->numa_group) { 1619 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1620 task_weight(cur, env->dst_nid, dist); 1621 /* 1622 * Add some hysteresis to prevent swapping the 1623 * tasks within a group over tiny differences. 1624 */ 1625 if (cur->numa_group) 1626 imp -= imp / 16; 1627 } else { 1628 /* 1629 * Compare the group weights. If a task is all by itself 1630 * (not part of a group), use the task weight instead. 1631 */ 1632 if (cur->numa_group && env->p->numa_group) 1633 imp += group_weight(cur, env->src_nid, dist) - 1634 group_weight(cur, env->dst_nid, dist); 1635 else 1636 imp += task_weight(cur, env->src_nid, dist) - 1637 task_weight(cur, env->dst_nid, dist); 1638 } 1639 1640 if (maymove && moveimp > imp && moveimp > env->best_imp) { 1641 imp = moveimp; 1642 cur = NULL; 1643 goto assign; 1644 } 1645 1646 /* 1647 * If the NUMA importance is less than SMALLIMP, 1648 * task migration might only result in ping pong 1649 * of tasks and also hurt performance due to cache 1650 * misses. 1651 */ 1652 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) 1653 goto unlock; 1654 1655 /* 1656 * In the overloaded case, try and keep the load balanced. 1657 */ 1658 load = task_h_load(env->p) - task_h_load(cur); 1659 if (!load) 1660 goto assign; 1661 1662 dst_load = env->dst_stats.load + load; 1663 src_load = env->src_stats.load - load; 1664 1665 if (load_too_imbalanced(src_load, dst_load, env)) 1666 goto unlock; 1667 1668 assign: 1669 /* 1670 * One idle CPU per node is evaluated for a task numa move. 1671 * Call select_idle_sibling to maybe find a better one. 1672 */ 1673 if (!cur) { 1674 /* 1675 * select_idle_siblings() uses an per-CPU cpumask that 1676 * can be used from IRQ context. 1677 */ 1678 local_irq_disable(); 1679 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, 1680 env->dst_cpu); 1681 local_irq_enable(); 1682 } 1683 1684 task_numa_assign(env, cur, imp); 1685 unlock: 1686 rcu_read_unlock(); 1687 } 1688 1689 static void task_numa_find_cpu(struct task_numa_env *env, 1690 long taskimp, long groupimp) 1691 { 1692 long src_load, dst_load, load; 1693 bool maymove = false; 1694 int cpu; 1695 1696 load = task_h_load(env->p); 1697 dst_load = env->dst_stats.load + load; 1698 src_load = env->src_stats.load - load; 1699 1700 /* 1701 * If the improvement from just moving env->p direction is better 1702 * than swapping tasks around, check if a move is possible. 1703 */ 1704 maymove = !load_too_imbalanced(src_load, dst_load, env); 1705 1706 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1707 /* Skip this CPU if the source task cannot migrate */ 1708 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) 1709 continue; 1710 1711 env->dst_cpu = cpu; 1712 task_numa_compare(env, taskimp, groupimp, maymove); 1713 } 1714 } 1715 1716 static int task_numa_migrate(struct task_struct *p) 1717 { 1718 struct task_numa_env env = { 1719 .p = p, 1720 1721 .src_cpu = task_cpu(p), 1722 .src_nid = task_node(p), 1723 1724 .imbalance_pct = 112, 1725 1726 .best_task = NULL, 1727 .best_imp = 0, 1728 .best_cpu = -1, 1729 }; 1730 struct sched_domain *sd; 1731 struct rq *best_rq; 1732 unsigned long taskweight, groupweight; 1733 int nid, ret, dist; 1734 long taskimp, groupimp; 1735 1736 /* 1737 * Pick the lowest SD_NUMA domain, as that would have the smallest 1738 * imbalance and would be the first to start moving tasks about. 1739 * 1740 * And we want to avoid any moving of tasks about, as that would create 1741 * random movement of tasks -- counter the numa conditions we're trying 1742 * to satisfy here. 1743 */ 1744 rcu_read_lock(); 1745 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 1746 if (sd) 1747 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 1748 rcu_read_unlock(); 1749 1750 /* 1751 * Cpusets can break the scheduler domain tree into smaller 1752 * balance domains, some of which do not cross NUMA boundaries. 1753 * Tasks that are "trapped" in such domains cannot be migrated 1754 * elsewhere, so there is no point in (re)trying. 1755 */ 1756 if (unlikely(!sd)) { 1757 sched_setnuma(p, task_node(p)); 1758 return -EINVAL; 1759 } 1760 1761 env.dst_nid = p->numa_preferred_nid; 1762 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 1763 taskweight = task_weight(p, env.src_nid, dist); 1764 groupweight = group_weight(p, env.src_nid, dist); 1765 update_numa_stats(&env.src_stats, env.src_nid); 1766 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 1767 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 1768 update_numa_stats(&env.dst_stats, env.dst_nid); 1769 1770 /* Try to find a spot on the preferred nid. */ 1771 task_numa_find_cpu(&env, taskimp, groupimp); 1772 1773 /* 1774 * Look at other nodes in these cases: 1775 * - there is no space available on the preferred_nid 1776 * - the task is part of a numa_group that is interleaved across 1777 * multiple NUMA nodes; in order to better consolidate the group, 1778 * we need to check other locations. 1779 */ 1780 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { 1781 for_each_online_node(nid) { 1782 if (nid == env.src_nid || nid == p->numa_preferred_nid) 1783 continue; 1784 1785 dist = node_distance(env.src_nid, env.dst_nid); 1786 if (sched_numa_topology_type == NUMA_BACKPLANE && 1787 dist != env.dist) { 1788 taskweight = task_weight(p, env.src_nid, dist); 1789 groupweight = group_weight(p, env.src_nid, dist); 1790 } 1791 1792 /* Only consider nodes where both task and groups benefit */ 1793 taskimp = task_weight(p, nid, dist) - taskweight; 1794 groupimp = group_weight(p, nid, dist) - groupweight; 1795 if (taskimp < 0 && groupimp < 0) 1796 continue; 1797 1798 env.dist = dist; 1799 env.dst_nid = nid; 1800 update_numa_stats(&env.dst_stats, env.dst_nid); 1801 task_numa_find_cpu(&env, taskimp, groupimp); 1802 } 1803 } 1804 1805 /* 1806 * If the task is part of a workload that spans multiple NUMA nodes, 1807 * and is migrating into one of the workload's active nodes, remember 1808 * this node as the task's preferred numa node, so the workload can 1809 * settle down. 1810 * A task that migrated to a second choice node will be better off 1811 * trying for a better one later. Do not set the preferred node here. 1812 */ 1813 if (p->numa_group) { 1814 if (env.best_cpu == -1) 1815 nid = env.src_nid; 1816 else 1817 nid = cpu_to_node(env.best_cpu); 1818 1819 if (nid != p->numa_preferred_nid) 1820 sched_setnuma(p, nid); 1821 } 1822 1823 /* No better CPU than the current one was found. */ 1824 if (env.best_cpu == -1) 1825 return -EAGAIN; 1826 1827 best_rq = cpu_rq(env.best_cpu); 1828 if (env.best_task == NULL) { 1829 ret = migrate_task_to(p, env.best_cpu); 1830 WRITE_ONCE(best_rq->numa_migrate_on, 0); 1831 if (ret != 0) 1832 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); 1833 return ret; 1834 } 1835 1836 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); 1837 WRITE_ONCE(best_rq->numa_migrate_on, 0); 1838 1839 if (ret != 0) 1840 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); 1841 put_task_struct(env.best_task); 1842 return ret; 1843 } 1844 1845 /* Attempt to migrate a task to a CPU on the preferred node. */ 1846 static void numa_migrate_preferred(struct task_struct *p) 1847 { 1848 unsigned long interval = HZ; 1849 1850 /* This task has no NUMA fault statistics yet */ 1851 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) 1852 return; 1853 1854 /* Periodically retry migrating the task to the preferred node */ 1855 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 1856 p->numa_migrate_retry = jiffies + interval; 1857 1858 /* Success if task is already running on preferred CPU */ 1859 if (task_node(p) == p->numa_preferred_nid) 1860 return; 1861 1862 /* Otherwise, try migrate to a CPU on the preferred node */ 1863 task_numa_migrate(p); 1864 } 1865 1866 /* 1867 * Find out how many nodes on the workload is actively running on. Do this by 1868 * tracking the nodes from which NUMA hinting faults are triggered. This can 1869 * be different from the set of nodes where the workload's memory is currently 1870 * located. 1871 */ 1872 static void numa_group_count_active_nodes(struct numa_group *numa_group) 1873 { 1874 unsigned long faults, max_faults = 0; 1875 int nid, active_nodes = 0; 1876 1877 for_each_online_node(nid) { 1878 faults = group_faults_cpu(numa_group, nid); 1879 if (faults > max_faults) 1880 max_faults = faults; 1881 } 1882 1883 for_each_online_node(nid) { 1884 faults = group_faults_cpu(numa_group, nid); 1885 if (faults * ACTIVE_NODE_FRACTION > max_faults) 1886 active_nodes++; 1887 } 1888 1889 numa_group->max_faults_cpu = max_faults; 1890 numa_group->active_nodes = active_nodes; 1891 } 1892 1893 /* 1894 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 1895 * increments. The more local the fault statistics are, the higher the scan 1896 * period will be for the next scan window. If local/(local+remote) ratio is 1897 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 1898 * the scan period will decrease. Aim for 70% local accesses. 1899 */ 1900 #define NUMA_PERIOD_SLOTS 10 1901 #define NUMA_PERIOD_THRESHOLD 7 1902 1903 /* 1904 * Increase the scan period (slow down scanning) if the majority of 1905 * our memory is already on our local node, or if the majority of 1906 * the page accesses are shared with other processes. 1907 * Otherwise, decrease the scan period. 1908 */ 1909 static void update_task_scan_period(struct task_struct *p, 1910 unsigned long shared, unsigned long private) 1911 { 1912 unsigned int period_slot; 1913 int lr_ratio, ps_ratio; 1914 int diff; 1915 1916 unsigned long remote = p->numa_faults_locality[0]; 1917 unsigned long local = p->numa_faults_locality[1]; 1918 1919 /* 1920 * If there were no record hinting faults then either the task is 1921 * completely idle or all activity is areas that are not of interest 1922 * to automatic numa balancing. Related to that, if there were failed 1923 * migration then it implies we are migrating too quickly or the local 1924 * node is overloaded. In either case, scan slower 1925 */ 1926 if (local + shared == 0 || p->numa_faults_locality[2]) { 1927 p->numa_scan_period = min(p->numa_scan_period_max, 1928 p->numa_scan_period << 1); 1929 1930 p->mm->numa_next_scan = jiffies + 1931 msecs_to_jiffies(p->numa_scan_period); 1932 1933 return; 1934 } 1935 1936 /* 1937 * Prepare to scale scan period relative to the current period. 1938 * == NUMA_PERIOD_THRESHOLD scan period stays the same 1939 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 1940 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 1941 */ 1942 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 1943 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 1944 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); 1945 1946 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { 1947 /* 1948 * Most memory accesses are local. There is no need to 1949 * do fast NUMA scanning, since memory is already local. 1950 */ 1951 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; 1952 if (!slot) 1953 slot = 1; 1954 diff = slot * period_slot; 1955 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { 1956 /* 1957 * Most memory accesses are shared with other tasks. 1958 * There is no point in continuing fast NUMA scanning, 1959 * since other tasks may just move the memory elsewhere. 1960 */ 1961 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; 1962 if (!slot) 1963 slot = 1; 1964 diff = slot * period_slot; 1965 } else { 1966 /* 1967 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, 1968 * yet they are not on the local NUMA node. Speed up 1969 * NUMA scanning to get the memory moved over. 1970 */ 1971 int ratio = max(lr_ratio, ps_ratio); 1972 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 1973 } 1974 1975 p->numa_scan_period = clamp(p->numa_scan_period + diff, 1976 task_scan_min(p), task_scan_max(p)); 1977 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 1978 } 1979 1980 /* 1981 * Get the fraction of time the task has been running since the last 1982 * NUMA placement cycle. The scheduler keeps similar statistics, but 1983 * decays those on a 32ms period, which is orders of magnitude off 1984 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 1985 * stats only if the task is so new there are no NUMA statistics yet. 1986 */ 1987 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 1988 { 1989 u64 runtime, delta, now; 1990 /* Use the start of this time slice to avoid calculations. */ 1991 now = p->se.exec_start; 1992 runtime = p->se.sum_exec_runtime; 1993 1994 if (p->last_task_numa_placement) { 1995 delta = runtime - p->last_sum_exec_runtime; 1996 *period = now - p->last_task_numa_placement; 1997 } else { 1998 delta = p->se.avg.load_sum; 1999 *period = LOAD_AVG_MAX; 2000 } 2001 2002 p->last_sum_exec_runtime = runtime; 2003 p->last_task_numa_placement = now; 2004 2005 return delta; 2006 } 2007 2008 /* 2009 * Determine the preferred nid for a task in a numa_group. This needs to 2010 * be done in a way that produces consistent results with group_weight, 2011 * otherwise workloads might not converge. 2012 */ 2013 static int preferred_group_nid(struct task_struct *p, int nid) 2014 { 2015 nodemask_t nodes; 2016 int dist; 2017 2018 /* Direct connections between all NUMA nodes. */ 2019 if (sched_numa_topology_type == NUMA_DIRECT) 2020 return nid; 2021 2022 /* 2023 * On a system with glueless mesh NUMA topology, group_weight 2024 * scores nodes according to the number of NUMA hinting faults on 2025 * both the node itself, and on nearby nodes. 2026 */ 2027 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 2028 unsigned long score, max_score = 0; 2029 int node, max_node = nid; 2030 2031 dist = sched_max_numa_distance; 2032 2033 for_each_online_node(node) { 2034 score = group_weight(p, node, dist); 2035 if (score > max_score) { 2036 max_score = score; 2037 max_node = node; 2038 } 2039 } 2040 return max_node; 2041 } 2042 2043 /* 2044 * Finding the preferred nid in a system with NUMA backplane 2045 * interconnect topology is more involved. The goal is to locate 2046 * tasks from numa_groups near each other in the system, and 2047 * untangle workloads from different sides of the system. This requires 2048 * searching down the hierarchy of node groups, recursively searching 2049 * inside the highest scoring group of nodes. The nodemask tricks 2050 * keep the complexity of the search down. 2051 */ 2052 nodes = node_online_map; 2053 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2054 unsigned long max_faults = 0; 2055 nodemask_t max_group = NODE_MASK_NONE; 2056 int a, b; 2057 2058 /* Are there nodes at this distance from each other? */ 2059 if (!find_numa_distance(dist)) 2060 continue; 2061 2062 for_each_node_mask(a, nodes) { 2063 unsigned long faults = 0; 2064 nodemask_t this_group; 2065 nodes_clear(this_group); 2066 2067 /* Sum group's NUMA faults; includes a==b case. */ 2068 for_each_node_mask(b, nodes) { 2069 if (node_distance(a, b) < dist) { 2070 faults += group_faults(p, b); 2071 node_set(b, this_group); 2072 node_clear(b, nodes); 2073 } 2074 } 2075 2076 /* Remember the top group. */ 2077 if (faults > max_faults) { 2078 max_faults = faults; 2079 max_group = this_group; 2080 /* 2081 * subtle: at the smallest distance there is 2082 * just one node left in each "group", the 2083 * winner is the preferred nid. 2084 */ 2085 nid = a; 2086 } 2087 } 2088 /* Next round, evaluate the nodes within max_group. */ 2089 if (!max_faults) 2090 break; 2091 nodes = max_group; 2092 } 2093 return nid; 2094 } 2095 2096 static void task_numa_placement(struct task_struct *p) 2097 { 2098 int seq, nid, max_nid = -1; 2099 unsigned long max_faults = 0; 2100 unsigned long fault_types[2] = { 0, 0 }; 2101 unsigned long total_faults; 2102 u64 runtime, period; 2103 spinlock_t *group_lock = NULL; 2104 2105 /* 2106 * The p->mm->numa_scan_seq field gets updated without 2107 * exclusive access. Use READ_ONCE() here to ensure 2108 * that the field is read in a single access: 2109 */ 2110 seq = READ_ONCE(p->mm->numa_scan_seq); 2111 if (p->numa_scan_seq == seq) 2112 return; 2113 p->numa_scan_seq = seq; 2114 p->numa_scan_period_max = task_scan_max(p); 2115 2116 total_faults = p->numa_faults_locality[0] + 2117 p->numa_faults_locality[1]; 2118 runtime = numa_get_avg_runtime(p, &period); 2119 2120 /* If the task is part of a group prevent parallel updates to group stats */ 2121 if (p->numa_group) { 2122 group_lock = &p->numa_group->lock; 2123 spin_lock_irq(group_lock); 2124 } 2125 2126 /* Find the node with the highest number of faults */ 2127 for_each_online_node(nid) { 2128 /* Keep track of the offsets in numa_faults array */ 2129 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2130 unsigned long faults = 0, group_faults = 0; 2131 int priv; 2132 2133 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2134 long diff, f_diff, f_weight; 2135 2136 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2137 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2138 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2139 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2140 2141 /* Decay existing window, copy faults since last scan */ 2142 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2143 fault_types[priv] += p->numa_faults[membuf_idx]; 2144 p->numa_faults[membuf_idx] = 0; 2145 2146 /* 2147 * Normalize the faults_from, so all tasks in a group 2148 * count according to CPU use, instead of by the raw 2149 * number of faults. Tasks with little runtime have 2150 * little over-all impact on throughput, and thus their 2151 * faults are less important. 2152 */ 2153 f_weight = div64_u64(runtime << 16, period + 1); 2154 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2155 (total_faults + 1); 2156 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2157 p->numa_faults[cpubuf_idx] = 0; 2158 2159 p->numa_faults[mem_idx] += diff; 2160 p->numa_faults[cpu_idx] += f_diff; 2161 faults += p->numa_faults[mem_idx]; 2162 p->total_numa_faults += diff; 2163 if (p->numa_group) { 2164 /* 2165 * safe because we can only change our own group 2166 * 2167 * mem_idx represents the offset for a given 2168 * nid and priv in a specific region because it 2169 * is at the beginning of the numa_faults array. 2170 */ 2171 p->numa_group->faults[mem_idx] += diff; 2172 p->numa_group->faults_cpu[mem_idx] += f_diff; 2173 p->numa_group->total_faults += diff; 2174 group_faults += p->numa_group->faults[mem_idx]; 2175 } 2176 } 2177 2178 if (!p->numa_group) { 2179 if (faults > max_faults) { 2180 max_faults = faults; 2181 max_nid = nid; 2182 } 2183 } else if (group_faults > max_faults) { 2184 max_faults = group_faults; 2185 max_nid = nid; 2186 } 2187 } 2188 2189 if (p->numa_group) { 2190 numa_group_count_active_nodes(p->numa_group); 2191 spin_unlock_irq(group_lock); 2192 max_nid = preferred_group_nid(p, max_nid); 2193 } 2194 2195 if (max_faults) { 2196 /* Set the new preferred node */ 2197 if (max_nid != p->numa_preferred_nid) 2198 sched_setnuma(p, max_nid); 2199 } 2200 2201 update_task_scan_period(p, fault_types[0], fault_types[1]); 2202 } 2203 2204 static inline int get_numa_group(struct numa_group *grp) 2205 { 2206 return atomic_inc_not_zero(&grp->refcount); 2207 } 2208 2209 static inline void put_numa_group(struct numa_group *grp) 2210 { 2211 if (atomic_dec_and_test(&grp->refcount)) 2212 kfree_rcu(grp, rcu); 2213 } 2214 2215 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2216 int *priv) 2217 { 2218 struct numa_group *grp, *my_grp; 2219 struct task_struct *tsk; 2220 bool join = false; 2221 int cpu = cpupid_to_cpu(cpupid); 2222 int i; 2223 2224 if (unlikely(!p->numa_group)) { 2225 unsigned int size = sizeof(struct numa_group) + 2226 4*nr_node_ids*sizeof(unsigned long); 2227 2228 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2229 if (!grp) 2230 return; 2231 2232 atomic_set(&grp->refcount, 1); 2233 grp->active_nodes = 1; 2234 grp->max_faults_cpu = 0; 2235 spin_lock_init(&grp->lock); 2236 grp->gid = p->pid; 2237 /* Second half of the array tracks nids where faults happen */ 2238 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * 2239 nr_node_ids; 2240 2241 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2242 grp->faults[i] = p->numa_faults[i]; 2243 2244 grp->total_faults = p->total_numa_faults; 2245 2246 grp->nr_tasks++; 2247 rcu_assign_pointer(p->numa_group, grp); 2248 } 2249 2250 rcu_read_lock(); 2251 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2252 2253 if (!cpupid_match_pid(tsk, cpupid)) 2254 goto no_join; 2255 2256 grp = rcu_dereference(tsk->numa_group); 2257 if (!grp) 2258 goto no_join; 2259 2260 my_grp = p->numa_group; 2261 if (grp == my_grp) 2262 goto no_join; 2263 2264 /* 2265 * Only join the other group if its bigger; if we're the bigger group, 2266 * the other task will join us. 2267 */ 2268 if (my_grp->nr_tasks > grp->nr_tasks) 2269 goto no_join; 2270 2271 /* 2272 * Tie-break on the grp address. 2273 */ 2274 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2275 goto no_join; 2276 2277 /* Always join threads in the same process. */ 2278 if (tsk->mm == current->mm) 2279 join = true; 2280 2281 /* Simple filter to avoid false positives due to PID collisions */ 2282 if (flags & TNF_SHARED) 2283 join = true; 2284 2285 /* Update priv based on whether false sharing was detected */ 2286 *priv = !join; 2287 2288 if (join && !get_numa_group(grp)) 2289 goto no_join; 2290 2291 rcu_read_unlock(); 2292 2293 if (!join) 2294 return; 2295 2296 BUG_ON(irqs_disabled()); 2297 double_lock_irq(&my_grp->lock, &grp->lock); 2298 2299 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2300 my_grp->faults[i] -= p->numa_faults[i]; 2301 grp->faults[i] += p->numa_faults[i]; 2302 } 2303 my_grp->total_faults -= p->total_numa_faults; 2304 grp->total_faults += p->total_numa_faults; 2305 2306 my_grp->nr_tasks--; 2307 grp->nr_tasks++; 2308 2309 spin_unlock(&my_grp->lock); 2310 spin_unlock_irq(&grp->lock); 2311 2312 rcu_assign_pointer(p->numa_group, grp); 2313 2314 put_numa_group(my_grp); 2315 return; 2316 2317 no_join: 2318 rcu_read_unlock(); 2319 return; 2320 } 2321 2322 void task_numa_free(struct task_struct *p) 2323 { 2324 struct numa_group *grp = p->numa_group; 2325 void *numa_faults = p->numa_faults; 2326 unsigned long flags; 2327 int i; 2328 2329 if (grp) { 2330 spin_lock_irqsave(&grp->lock, flags); 2331 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2332 grp->faults[i] -= p->numa_faults[i]; 2333 grp->total_faults -= p->total_numa_faults; 2334 2335 grp->nr_tasks--; 2336 spin_unlock_irqrestore(&grp->lock, flags); 2337 RCU_INIT_POINTER(p->numa_group, NULL); 2338 put_numa_group(grp); 2339 } 2340 2341 p->numa_faults = NULL; 2342 kfree(numa_faults); 2343 } 2344 2345 /* 2346 * Got a PROT_NONE fault for a page on @node. 2347 */ 2348 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2349 { 2350 struct task_struct *p = current; 2351 bool migrated = flags & TNF_MIGRATED; 2352 int cpu_node = task_node(current); 2353 int local = !!(flags & TNF_FAULT_LOCAL); 2354 struct numa_group *ng; 2355 int priv; 2356 2357 if (!static_branch_likely(&sched_numa_balancing)) 2358 return; 2359 2360 /* for example, ksmd faulting in a user's mm */ 2361 if (!p->mm) 2362 return; 2363 2364 /* Allocate buffer to track faults on a per-node basis */ 2365 if (unlikely(!p->numa_faults)) { 2366 int size = sizeof(*p->numa_faults) * 2367 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2368 2369 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2370 if (!p->numa_faults) 2371 return; 2372 2373 p->total_numa_faults = 0; 2374 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2375 } 2376 2377 /* 2378 * First accesses are treated as private, otherwise consider accesses 2379 * to be private if the accessing pid has not changed 2380 */ 2381 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2382 priv = 1; 2383 } else { 2384 priv = cpupid_match_pid(p, last_cpupid); 2385 if (!priv && !(flags & TNF_NO_GROUP)) 2386 task_numa_group(p, last_cpupid, flags, &priv); 2387 } 2388 2389 /* 2390 * If a workload spans multiple NUMA nodes, a shared fault that 2391 * occurs wholly within the set of nodes that the workload is 2392 * actively using should be counted as local. This allows the 2393 * scan rate to slow down when a workload has settled down. 2394 */ 2395 ng = p->numa_group; 2396 if (!priv && !local && ng && ng->active_nodes > 1 && 2397 numa_is_active_node(cpu_node, ng) && 2398 numa_is_active_node(mem_node, ng)) 2399 local = 1; 2400 2401 /* 2402 * Retry to migrate task to preferred node periodically, in case it 2403 * previously failed, or the scheduler moved us. 2404 */ 2405 if (time_after(jiffies, p->numa_migrate_retry)) { 2406 task_numa_placement(p); 2407 numa_migrate_preferred(p); 2408 } 2409 2410 if (migrated) 2411 p->numa_pages_migrated += pages; 2412 if (flags & TNF_MIGRATE_FAIL) 2413 p->numa_faults_locality[2] += pages; 2414 2415 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2416 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2417 p->numa_faults_locality[local] += pages; 2418 } 2419 2420 static void reset_ptenuma_scan(struct task_struct *p) 2421 { 2422 /* 2423 * We only did a read acquisition of the mmap sem, so 2424 * p->mm->numa_scan_seq is written to without exclusive access 2425 * and the update is not guaranteed to be atomic. That's not 2426 * much of an issue though, since this is just used for 2427 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2428 * expensive, to avoid any form of compiler optimizations: 2429 */ 2430 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2431 p->mm->numa_scan_offset = 0; 2432 } 2433 2434 /* 2435 * The expensive part of numa migration is done from task_work context. 2436 * Triggered from task_tick_numa(). 2437 */ 2438 void task_numa_work(struct callback_head *work) 2439 { 2440 unsigned long migrate, next_scan, now = jiffies; 2441 struct task_struct *p = current; 2442 struct mm_struct *mm = p->mm; 2443 u64 runtime = p->se.sum_exec_runtime; 2444 struct vm_area_struct *vma; 2445 unsigned long start, end; 2446 unsigned long nr_pte_updates = 0; 2447 long pages, virtpages; 2448 2449 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2450 2451 work->next = work; /* protect against double add */ 2452 /* 2453 * Who cares about NUMA placement when they're dying. 2454 * 2455 * NOTE: make sure not to dereference p->mm before this check, 2456 * exit_task_work() happens _after_ exit_mm() so we could be called 2457 * without p->mm even though we still had it when we enqueued this 2458 * work. 2459 */ 2460 if (p->flags & PF_EXITING) 2461 return; 2462 2463 if (!mm->numa_next_scan) { 2464 mm->numa_next_scan = now + 2465 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2466 } 2467 2468 /* 2469 * Enforce maximal scan/migration frequency.. 2470 */ 2471 migrate = mm->numa_next_scan; 2472 if (time_before(now, migrate)) 2473 return; 2474 2475 if (p->numa_scan_period == 0) { 2476 p->numa_scan_period_max = task_scan_max(p); 2477 p->numa_scan_period = task_scan_start(p); 2478 } 2479 2480 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2481 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2482 return; 2483 2484 /* 2485 * Delay this task enough that another task of this mm will likely win 2486 * the next time around. 2487 */ 2488 p->node_stamp += 2 * TICK_NSEC; 2489 2490 start = mm->numa_scan_offset; 2491 pages = sysctl_numa_balancing_scan_size; 2492 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2493 virtpages = pages * 8; /* Scan up to this much virtual space */ 2494 if (!pages) 2495 return; 2496 2497 2498 if (!down_read_trylock(&mm->mmap_sem)) 2499 return; 2500 vma = find_vma(mm, start); 2501 if (!vma) { 2502 reset_ptenuma_scan(p); 2503 start = 0; 2504 vma = mm->mmap; 2505 } 2506 for (; vma; vma = vma->vm_next) { 2507 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2508 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2509 continue; 2510 } 2511 2512 /* 2513 * Shared library pages mapped by multiple processes are not 2514 * migrated as it is expected they are cache replicated. Avoid 2515 * hinting faults in read-only file-backed mappings or the vdso 2516 * as migrating the pages will be of marginal benefit. 2517 */ 2518 if (!vma->vm_mm || 2519 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 2520 continue; 2521 2522 /* 2523 * Skip inaccessible VMAs to avoid any confusion between 2524 * PROT_NONE and NUMA hinting ptes 2525 */ 2526 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 2527 continue; 2528 2529 do { 2530 start = max(start, vma->vm_start); 2531 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2532 end = min(end, vma->vm_end); 2533 nr_pte_updates = change_prot_numa(vma, start, end); 2534 2535 /* 2536 * Try to scan sysctl_numa_balancing_size worth of 2537 * hpages that have at least one present PTE that 2538 * is not already pte-numa. If the VMA contains 2539 * areas that are unused or already full of prot_numa 2540 * PTEs, scan up to virtpages, to skip through those 2541 * areas faster. 2542 */ 2543 if (nr_pte_updates) 2544 pages -= (end - start) >> PAGE_SHIFT; 2545 virtpages -= (end - start) >> PAGE_SHIFT; 2546 2547 start = end; 2548 if (pages <= 0 || virtpages <= 0) 2549 goto out; 2550 2551 cond_resched(); 2552 } while (end != vma->vm_end); 2553 } 2554 2555 out: 2556 /* 2557 * It is possible to reach the end of the VMA list but the last few 2558 * VMAs are not guaranteed to the vma_migratable. If they are not, we 2559 * would find the !migratable VMA on the next scan but not reset the 2560 * scanner to the start so check it now. 2561 */ 2562 if (vma) 2563 mm->numa_scan_offset = start; 2564 else 2565 reset_ptenuma_scan(p); 2566 up_read(&mm->mmap_sem); 2567 2568 /* 2569 * Make sure tasks use at least 32x as much time to run other code 2570 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 2571 * Usually update_task_scan_period slows down scanning enough; on an 2572 * overloaded system we need to limit overhead on a per task basis. 2573 */ 2574 if (unlikely(p->se.sum_exec_runtime != runtime)) { 2575 u64 diff = p->se.sum_exec_runtime - runtime; 2576 p->node_stamp += 32 * diff; 2577 } 2578 } 2579 2580 /* 2581 * Drive the periodic memory faults.. 2582 */ 2583 void task_tick_numa(struct rq *rq, struct task_struct *curr) 2584 { 2585 struct callback_head *work = &curr->numa_work; 2586 u64 period, now; 2587 2588 /* 2589 * We don't care about NUMA placement if we don't have memory. 2590 */ 2591 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) 2592 return; 2593 2594 /* 2595 * Using runtime rather than walltime has the dual advantage that 2596 * we (mostly) drive the selection from busy threads and that the 2597 * task needs to have done some actual work before we bother with 2598 * NUMA placement. 2599 */ 2600 now = curr->se.sum_exec_runtime; 2601 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 2602 2603 if (now > curr->node_stamp + period) { 2604 if (!curr->node_stamp) 2605 curr->numa_scan_period = task_scan_start(curr); 2606 curr->node_stamp += period; 2607 2608 if (!time_before(jiffies, curr->mm->numa_next_scan)) { 2609 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ 2610 task_work_add(curr, work, true); 2611 } 2612 } 2613 } 2614 2615 static void update_scan_period(struct task_struct *p, int new_cpu) 2616 { 2617 int src_nid = cpu_to_node(task_cpu(p)); 2618 int dst_nid = cpu_to_node(new_cpu); 2619 2620 if (!static_branch_likely(&sched_numa_balancing)) 2621 return; 2622 2623 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) 2624 return; 2625 2626 if (src_nid == dst_nid) 2627 return; 2628 2629 /* 2630 * Allow resets if faults have been trapped before one scan 2631 * has completed. This is most likely due to a new task that 2632 * is pulled cross-node due to wakeups or load balancing. 2633 */ 2634 if (p->numa_scan_seq) { 2635 /* 2636 * Avoid scan adjustments if moving to the preferred 2637 * node or if the task was not previously running on 2638 * the preferred node. 2639 */ 2640 if (dst_nid == p->numa_preferred_nid || 2641 (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid)) 2642 return; 2643 } 2644 2645 p->numa_scan_period = task_scan_start(p); 2646 } 2647 2648 #else 2649 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2650 { 2651 } 2652 2653 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 2654 { 2655 } 2656 2657 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 2658 { 2659 } 2660 2661 static inline void update_scan_period(struct task_struct *p, int new_cpu) 2662 { 2663 } 2664 2665 #endif /* CONFIG_NUMA_BALANCING */ 2666 2667 static void 2668 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2669 { 2670 update_load_add(&cfs_rq->load, se->load.weight); 2671 if (!parent_entity(se)) 2672 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 2673 #ifdef CONFIG_SMP 2674 if (entity_is_task(se)) { 2675 struct rq *rq = rq_of(cfs_rq); 2676 2677 account_numa_enqueue(rq, task_of(se)); 2678 list_add(&se->group_node, &rq->cfs_tasks); 2679 } 2680 #endif 2681 cfs_rq->nr_running++; 2682 } 2683 2684 static void 2685 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2686 { 2687 update_load_sub(&cfs_rq->load, se->load.weight); 2688 if (!parent_entity(se)) 2689 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); 2690 #ifdef CONFIG_SMP 2691 if (entity_is_task(se)) { 2692 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 2693 list_del_init(&se->group_node); 2694 } 2695 #endif 2696 cfs_rq->nr_running--; 2697 } 2698 2699 /* 2700 * Signed add and clamp on underflow. 2701 * 2702 * Explicitly do a load-store to ensure the intermediate value never hits 2703 * memory. This allows lockless observations without ever seeing the negative 2704 * values. 2705 */ 2706 #define add_positive(_ptr, _val) do { \ 2707 typeof(_ptr) ptr = (_ptr); \ 2708 typeof(_val) val = (_val); \ 2709 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 2710 \ 2711 res = var + val; \ 2712 \ 2713 if (val < 0 && res > var) \ 2714 res = 0; \ 2715 \ 2716 WRITE_ONCE(*ptr, res); \ 2717 } while (0) 2718 2719 /* 2720 * Unsigned subtract and clamp on underflow. 2721 * 2722 * Explicitly do a load-store to ensure the intermediate value never hits 2723 * memory. This allows lockless observations without ever seeing the negative 2724 * values. 2725 */ 2726 #define sub_positive(_ptr, _val) do { \ 2727 typeof(_ptr) ptr = (_ptr); \ 2728 typeof(*ptr) val = (_val); \ 2729 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 2730 res = var - val; \ 2731 if (res > var) \ 2732 res = 0; \ 2733 WRITE_ONCE(*ptr, res); \ 2734 } while (0) 2735 2736 /* 2737 * Remove and clamp on negative, from a local variable. 2738 * 2739 * A variant of sub_positive(), which does not use explicit load-store 2740 * and is thus optimized for local variable updates. 2741 */ 2742 #define lsub_positive(_ptr, _val) do { \ 2743 typeof(_ptr) ptr = (_ptr); \ 2744 *ptr -= min_t(typeof(*ptr), *ptr, _val); \ 2745 } while (0) 2746 2747 #ifdef CONFIG_SMP 2748 static inline void 2749 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2750 { 2751 cfs_rq->runnable_weight += se->runnable_weight; 2752 2753 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; 2754 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; 2755 } 2756 2757 static inline void 2758 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2759 { 2760 cfs_rq->runnable_weight -= se->runnable_weight; 2761 2762 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); 2763 sub_positive(&cfs_rq->avg.runnable_load_sum, 2764 se_runnable(se) * se->avg.runnable_load_sum); 2765 } 2766 2767 static inline void 2768 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2769 { 2770 cfs_rq->avg.load_avg += se->avg.load_avg; 2771 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; 2772 } 2773 2774 static inline void 2775 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2776 { 2777 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 2778 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); 2779 } 2780 #else 2781 static inline void 2782 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2783 static inline void 2784 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2785 static inline void 2786 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2787 static inline void 2788 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } 2789 #endif 2790 2791 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2792 unsigned long weight, unsigned long runnable) 2793 { 2794 if (se->on_rq) { 2795 /* commit outstanding execution time */ 2796 if (cfs_rq->curr == se) 2797 update_curr(cfs_rq); 2798 account_entity_dequeue(cfs_rq, se); 2799 dequeue_runnable_load_avg(cfs_rq, se); 2800 } 2801 dequeue_load_avg(cfs_rq, se); 2802 2803 se->runnable_weight = runnable; 2804 update_load_set(&se->load, weight); 2805 2806 #ifdef CONFIG_SMP 2807 do { 2808 u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; 2809 2810 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); 2811 se->avg.runnable_load_avg = 2812 div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); 2813 } while (0); 2814 #endif 2815 2816 enqueue_load_avg(cfs_rq, se); 2817 if (se->on_rq) { 2818 account_entity_enqueue(cfs_rq, se); 2819 enqueue_runnable_load_avg(cfs_rq, se); 2820 } 2821 } 2822 2823 void reweight_task(struct task_struct *p, int prio) 2824 { 2825 struct sched_entity *se = &p->se; 2826 struct cfs_rq *cfs_rq = cfs_rq_of(se); 2827 struct load_weight *load = &se->load; 2828 unsigned long weight = scale_load(sched_prio_to_weight[prio]); 2829 2830 reweight_entity(cfs_rq, se, weight, weight); 2831 load->inv_weight = sched_prio_to_wmult[prio]; 2832 } 2833 2834 #ifdef CONFIG_FAIR_GROUP_SCHED 2835 #ifdef CONFIG_SMP 2836 /* 2837 * All this does is approximate the hierarchical proportion which includes that 2838 * global sum we all love to hate. 2839 * 2840 * That is, the weight of a group entity, is the proportional share of the 2841 * group weight based on the group runqueue weights. That is: 2842 * 2843 * tg->weight * grq->load.weight 2844 * ge->load.weight = ----------------------------- (1) 2845 * \Sum grq->load.weight 2846 * 2847 * Now, because computing that sum is prohibitively expensive to compute (been 2848 * there, done that) we approximate it with this average stuff. The average 2849 * moves slower and therefore the approximation is cheaper and more stable. 2850 * 2851 * So instead of the above, we substitute: 2852 * 2853 * grq->load.weight -> grq->avg.load_avg (2) 2854 * 2855 * which yields the following: 2856 * 2857 * tg->weight * grq->avg.load_avg 2858 * ge->load.weight = ------------------------------ (3) 2859 * tg->load_avg 2860 * 2861 * Where: tg->load_avg ~= \Sum grq->avg.load_avg 2862 * 2863 * That is shares_avg, and it is right (given the approximation (2)). 2864 * 2865 * The problem with it is that because the average is slow -- it was designed 2866 * to be exactly that of course -- this leads to transients in boundary 2867 * conditions. In specific, the case where the group was idle and we start the 2868 * one task. It takes time for our CPU's grq->avg.load_avg to build up, 2869 * yielding bad latency etc.. 2870 * 2871 * Now, in that special case (1) reduces to: 2872 * 2873 * tg->weight * grq->load.weight 2874 * ge->load.weight = ----------------------------- = tg->weight (4) 2875 * grp->load.weight 2876 * 2877 * That is, the sum collapses because all other CPUs are idle; the UP scenario. 2878 * 2879 * So what we do is modify our approximation (3) to approach (4) in the (near) 2880 * UP case, like: 2881 * 2882 * ge->load.weight = 2883 * 2884 * tg->weight * grq->load.weight 2885 * --------------------------------------------------- (5) 2886 * tg->load_avg - grq->avg.load_avg + grq->load.weight 2887 * 2888 * But because grq->load.weight can drop to 0, resulting in a divide by zero, 2889 * we need to use grq->avg.load_avg as its lower bound, which then gives: 2890 * 2891 * 2892 * tg->weight * grq->load.weight 2893 * ge->load.weight = ----------------------------- (6) 2894 * tg_load_avg' 2895 * 2896 * Where: 2897 * 2898 * tg_load_avg' = tg->load_avg - grq->avg.load_avg + 2899 * max(grq->load.weight, grq->avg.load_avg) 2900 * 2901 * And that is shares_weight and is icky. In the (near) UP case it approaches 2902 * (4) while in the normal case it approaches (3). It consistently 2903 * overestimates the ge->load.weight and therefore: 2904 * 2905 * \Sum ge->load.weight >= tg->weight 2906 * 2907 * hence icky! 2908 */ 2909 static long calc_group_shares(struct cfs_rq *cfs_rq) 2910 { 2911 long tg_weight, tg_shares, load, shares; 2912 struct task_group *tg = cfs_rq->tg; 2913 2914 tg_shares = READ_ONCE(tg->shares); 2915 2916 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); 2917 2918 tg_weight = atomic_long_read(&tg->load_avg); 2919 2920 /* Ensure tg_weight >= load */ 2921 tg_weight -= cfs_rq->tg_load_avg_contrib; 2922 tg_weight += load; 2923 2924 shares = (tg_shares * load); 2925 if (tg_weight) 2926 shares /= tg_weight; 2927 2928 /* 2929 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 2930 * of a group with small tg->shares value. It is a floor value which is 2931 * assigned as a minimum load.weight to the sched_entity representing 2932 * the group on a CPU. 2933 * 2934 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 2935 * on an 8-core system with 8 tasks each runnable on one CPU shares has 2936 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 2937 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 2938 * instead of 0. 2939 */ 2940 return clamp_t(long, shares, MIN_SHARES, tg_shares); 2941 } 2942 2943 /* 2944 * This calculates the effective runnable weight for a group entity based on 2945 * the group entity weight calculated above. 2946 * 2947 * Because of the above approximation (2), our group entity weight is 2948 * an load_avg based ratio (3). This means that it includes blocked load and 2949 * does not represent the runnable weight. 2950 * 2951 * Approximate the group entity's runnable weight per ratio from the group 2952 * runqueue: 2953 * 2954 * grq->avg.runnable_load_avg 2955 * ge->runnable_weight = ge->load.weight * -------------------------- (7) 2956 * grq->avg.load_avg 2957 * 2958 * However, analogous to above, since the avg numbers are slow, this leads to 2959 * transients in the from-idle case. Instead we use: 2960 * 2961 * ge->runnable_weight = ge->load.weight * 2962 * 2963 * max(grq->avg.runnable_load_avg, grq->runnable_weight) 2964 * ----------------------------------------------------- (8) 2965 * max(grq->avg.load_avg, grq->load.weight) 2966 * 2967 * Where these max() serve both to use the 'instant' values to fix the slow 2968 * from-idle and avoid the /0 on to-idle, similar to (6). 2969 */ 2970 static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) 2971 { 2972 long runnable, load_avg; 2973 2974 load_avg = max(cfs_rq->avg.load_avg, 2975 scale_load_down(cfs_rq->load.weight)); 2976 2977 runnable = max(cfs_rq->avg.runnable_load_avg, 2978 scale_load_down(cfs_rq->runnable_weight)); 2979 2980 runnable *= shares; 2981 if (load_avg) 2982 runnable /= load_avg; 2983 2984 return clamp_t(long, runnable, MIN_SHARES, shares); 2985 } 2986 #endif /* CONFIG_SMP */ 2987 2988 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 2989 2990 /* 2991 * Recomputes the group entity based on the current state of its group 2992 * runqueue. 2993 */ 2994 static void update_cfs_group(struct sched_entity *se) 2995 { 2996 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 2997 long shares, runnable; 2998 2999 if (!gcfs_rq) 3000 return; 3001 3002 if (throttled_hierarchy(gcfs_rq)) 3003 return; 3004 3005 #ifndef CONFIG_SMP 3006 runnable = shares = READ_ONCE(gcfs_rq->tg->shares); 3007 3008 if (likely(se->load.weight == shares)) 3009 return; 3010 #else 3011 shares = calc_group_shares(gcfs_rq); 3012 runnable = calc_group_runnable(gcfs_rq, shares); 3013 #endif 3014 3015 reweight_entity(cfs_rq_of(se), se, shares, runnable); 3016 } 3017 3018 #else /* CONFIG_FAIR_GROUP_SCHED */ 3019 static inline void update_cfs_group(struct sched_entity *se) 3020 { 3021 } 3022 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3023 3024 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) 3025 { 3026 struct rq *rq = rq_of(cfs_rq); 3027 3028 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { 3029 /* 3030 * There are a few boundary cases this might miss but it should 3031 * get called often enough that that should (hopefully) not be 3032 * a real problem. 3033 * 3034 * It will not get called when we go idle, because the idle 3035 * thread is a different class (!fair), nor will the utilization 3036 * number include things like RT tasks. 3037 * 3038 * As is, the util number is not freq-invariant (we'd have to 3039 * implement arch_scale_freq_capacity() for that). 3040 * 3041 * See cpu_util(). 3042 */ 3043 cpufreq_update_util(rq, flags); 3044 } 3045 } 3046 3047 #ifdef CONFIG_SMP 3048 #ifdef CONFIG_FAIR_GROUP_SCHED 3049 /** 3050 * update_tg_load_avg - update the tg's load avg 3051 * @cfs_rq: the cfs_rq whose avg changed 3052 * @force: update regardless of how small the difference 3053 * 3054 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3055 * However, because tg->load_avg is a global value there are performance 3056 * considerations. 3057 * 3058 * In order to avoid having to look at the other cfs_rq's, we use a 3059 * differential update where we store the last value we propagated. This in 3060 * turn allows skipping updates if the differential is 'small'. 3061 * 3062 * Updating tg's load_avg is necessary before update_cfs_share(). 3063 */ 3064 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 3065 { 3066 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3067 3068 /* 3069 * No need to update load_avg for root_task_group as it is not used. 3070 */ 3071 if (cfs_rq->tg == &root_task_group) 3072 return; 3073 3074 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3075 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3076 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3077 } 3078 } 3079 3080 /* 3081 * Called within set_task_rq() right before setting a task's CPU. The 3082 * caller only guarantees p->pi_lock is held; no other assumptions, 3083 * including the state of rq->lock, should be made. 3084 */ 3085 void set_task_rq_fair(struct sched_entity *se, 3086 struct cfs_rq *prev, struct cfs_rq *next) 3087 { 3088 u64 p_last_update_time; 3089 u64 n_last_update_time; 3090 3091 if (!sched_feat(ATTACH_AGE_LOAD)) 3092 return; 3093 3094 /* 3095 * We are supposed to update the task to "current" time, then its up to 3096 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3097 * getting what current time is, so simply throw away the out-of-date 3098 * time. This will result in the wakee task is less decayed, but giving 3099 * the wakee more load sounds not bad. 3100 */ 3101 if (!(se->avg.last_update_time && prev)) 3102 return; 3103 3104 #ifndef CONFIG_64BIT 3105 { 3106 u64 p_last_update_time_copy; 3107 u64 n_last_update_time_copy; 3108 3109 do { 3110 p_last_update_time_copy = prev->load_last_update_time_copy; 3111 n_last_update_time_copy = next->load_last_update_time_copy; 3112 3113 smp_rmb(); 3114 3115 p_last_update_time = prev->avg.last_update_time; 3116 n_last_update_time = next->avg.last_update_time; 3117 3118 } while (p_last_update_time != p_last_update_time_copy || 3119 n_last_update_time != n_last_update_time_copy); 3120 } 3121 #else 3122 p_last_update_time = prev->avg.last_update_time; 3123 n_last_update_time = next->avg.last_update_time; 3124 #endif 3125 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se); 3126 se->avg.last_update_time = n_last_update_time; 3127 } 3128 3129 3130 /* 3131 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to 3132 * propagate its contribution. The key to this propagation is the invariant 3133 * that for each group: 3134 * 3135 * ge->avg == grq->avg (1) 3136 * 3137 * _IFF_ we look at the pure running and runnable sums. Because they 3138 * represent the very same entity, just at different points in the hierarchy. 3139 * 3140 * Per the above update_tg_cfs_util() is trivial and simply copies the running 3141 * sum over (but still wrong, because the group entity and group rq do not have 3142 * their PELT windows aligned). 3143 * 3144 * However, update_tg_cfs_runnable() is more complex. So we have: 3145 * 3146 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) 3147 * 3148 * And since, like util, the runnable part should be directly transferable, 3149 * the following would _appear_ to be the straight forward approach: 3150 * 3151 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) 3152 * 3153 * And per (1) we have: 3154 * 3155 * ge->avg.runnable_avg == grq->avg.runnable_avg 3156 * 3157 * Which gives: 3158 * 3159 * ge->load.weight * grq->avg.load_avg 3160 * ge->avg.load_avg = ----------------------------------- (4) 3161 * grq->load.weight 3162 * 3163 * Except that is wrong! 3164 * 3165 * Because while for entities historical weight is not important and we 3166 * really only care about our future and therefore can consider a pure 3167 * runnable sum, runqueues can NOT do this. 3168 * 3169 * We specifically want runqueues to have a load_avg that includes 3170 * historical weights. Those represent the blocked load, the load we expect 3171 * to (shortly) return to us. This only works by keeping the weights as 3172 * integral part of the sum. We therefore cannot decompose as per (3). 3173 * 3174 * Another reason this doesn't work is that runnable isn't a 0-sum entity. 3175 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the 3176 * rq itself is runnable anywhere between 2/3 and 1 depending on how the 3177 * runnable section of these tasks overlap (or not). If they were to perfectly 3178 * align the rq as a whole would be runnable 2/3 of the time. If however we 3179 * always have at least 1 runnable task, the rq as a whole is always runnable. 3180 * 3181 * So we'll have to approximate.. :/ 3182 * 3183 * Given the constraint: 3184 * 3185 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX 3186 * 3187 * We can construct a rule that adds runnable to a rq by assuming minimal 3188 * overlap. 3189 * 3190 * On removal, we'll assume each task is equally runnable; which yields: 3191 * 3192 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight 3193 * 3194 * XXX: only do this for the part of runnable > running ? 3195 * 3196 */ 3197 3198 static inline void 3199 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3200 { 3201 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; 3202 3203 /* Nothing to update */ 3204 if (!delta) 3205 return; 3206 3207 /* 3208 * The relation between sum and avg is: 3209 * 3210 * LOAD_AVG_MAX - 1024 + sa->period_contrib 3211 * 3212 * however, the PELT windows are not aligned between grq and gse. 3213 */ 3214 3215 /* Set new sched_entity's utilization */ 3216 se->avg.util_avg = gcfs_rq->avg.util_avg; 3217 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; 3218 3219 /* Update parent cfs_rq utilization */ 3220 add_positive(&cfs_rq->avg.util_avg, delta); 3221 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; 3222 } 3223 3224 static inline void 3225 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) 3226 { 3227 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; 3228 unsigned long runnable_load_avg, load_avg; 3229 u64 runnable_load_sum, load_sum = 0; 3230 s64 delta_sum; 3231 3232 if (!runnable_sum) 3233 return; 3234 3235 gcfs_rq->prop_runnable_sum = 0; 3236 3237 if (runnable_sum >= 0) { 3238 /* 3239 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until 3240 * the CPU is saturated running == runnable. 3241 */ 3242 runnable_sum += se->avg.load_sum; 3243 runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX); 3244 } else { 3245 /* 3246 * Estimate the new unweighted runnable_sum of the gcfs_rq by 3247 * assuming all tasks are equally runnable. 3248 */ 3249 if (scale_load_down(gcfs_rq->load.weight)) { 3250 load_sum = div_s64(gcfs_rq->avg.load_sum, 3251 scale_load_down(gcfs_rq->load.weight)); 3252 } 3253 3254 /* But make sure to not inflate se's runnable */ 3255 runnable_sum = min(se->avg.load_sum, load_sum); 3256 } 3257 3258 /* 3259 * runnable_sum can't be lower than running_sum 3260 * As running sum is scale with CPU capacity wehreas the runnable sum 3261 * is not we rescale running_sum 1st 3262 */ 3263 running_sum = se->avg.util_sum / 3264 arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); 3265 runnable_sum = max(runnable_sum, running_sum); 3266 3267 load_sum = (s64)se_weight(se) * runnable_sum; 3268 load_avg = div_s64(load_sum, LOAD_AVG_MAX); 3269 3270 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; 3271 delta_avg = load_avg - se->avg.load_avg; 3272 3273 se->avg.load_sum = runnable_sum; 3274 se->avg.load_avg = load_avg; 3275 add_positive(&cfs_rq->avg.load_avg, delta_avg); 3276 add_positive(&cfs_rq->avg.load_sum, delta_sum); 3277 3278 runnable_load_sum = (s64)se_runnable(se) * runnable_sum; 3279 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX); 3280 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum; 3281 delta_avg = runnable_load_avg - se->avg.runnable_load_avg; 3282 3283 se->avg.runnable_load_sum = runnable_sum; 3284 se->avg.runnable_load_avg = runnable_load_avg; 3285 3286 if (se->on_rq) { 3287 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg); 3288 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum); 3289 } 3290 } 3291 3292 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) 3293 { 3294 cfs_rq->propagate = 1; 3295 cfs_rq->prop_runnable_sum += runnable_sum; 3296 } 3297 3298 /* Update task and its cfs_rq load average */ 3299 static inline int propagate_entity_load_avg(struct sched_entity *se) 3300 { 3301 struct cfs_rq *cfs_rq, *gcfs_rq; 3302 3303 if (entity_is_task(se)) 3304 return 0; 3305 3306 gcfs_rq = group_cfs_rq(se); 3307 if (!gcfs_rq->propagate) 3308 return 0; 3309 3310 gcfs_rq->propagate = 0; 3311 3312 cfs_rq = cfs_rq_of(se); 3313 3314 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); 3315 3316 update_tg_cfs_util(cfs_rq, se, gcfs_rq); 3317 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); 3318 3319 return 1; 3320 } 3321 3322 /* 3323 * Check if we need to update the load and the utilization of a blocked 3324 * group_entity: 3325 */ 3326 static inline bool skip_blocked_update(struct sched_entity *se) 3327 { 3328 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3329 3330 /* 3331 * If sched_entity still have not zero load or utilization, we have to 3332 * decay it: 3333 */ 3334 if (se->avg.load_avg || se->avg.util_avg) 3335 return false; 3336 3337 /* 3338 * If there is a pending propagation, we have to update the load and 3339 * the utilization of the sched_entity: 3340 */ 3341 if (gcfs_rq->propagate) 3342 return false; 3343 3344 /* 3345 * Otherwise, the load and the utilization of the sched_entity is 3346 * already zero and there is no pending propagation, so it will be a 3347 * waste of time to try to decay it: 3348 */ 3349 return true; 3350 } 3351 3352 #else /* CONFIG_FAIR_GROUP_SCHED */ 3353 3354 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} 3355 3356 static inline int propagate_entity_load_avg(struct sched_entity *se) 3357 { 3358 return 0; 3359 } 3360 3361 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} 3362 3363 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3364 3365 /** 3366 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 3367 * @now: current time, as per cfs_rq_clock_task() 3368 * @cfs_rq: cfs_rq to update 3369 * 3370 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 3371 * avg. The immediate corollary is that all (fair) tasks must be attached, see 3372 * post_init_entity_util_avg(). 3373 * 3374 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 3375 * 3376 * Returns true if the load decayed or we removed load. 3377 * 3378 * Since both these conditions indicate a changed cfs_rq->avg.load we should 3379 * call update_tg_load_avg() when this function returns true. 3380 */ 3381 static inline int 3382 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 3383 { 3384 unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; 3385 struct sched_avg *sa = &cfs_rq->avg; 3386 int decayed = 0; 3387 3388 if (cfs_rq->removed.nr) { 3389 unsigned long r; 3390 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib; 3391 3392 raw_spin_lock(&cfs_rq->removed.lock); 3393 swap(cfs_rq->removed.util_avg, removed_util); 3394 swap(cfs_rq->removed.load_avg, removed_load); 3395 swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); 3396 cfs_rq->removed.nr = 0; 3397 raw_spin_unlock(&cfs_rq->removed.lock); 3398 3399 r = removed_load; 3400 sub_positive(&sa->load_avg, r); 3401 sub_positive(&sa->load_sum, r * divider); 3402 3403 r = removed_util; 3404 sub_positive(&sa->util_avg, r); 3405 sub_positive(&sa->util_sum, r * divider); 3406 3407 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); 3408 3409 decayed = 1; 3410 } 3411 3412 decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq); 3413 3414 #ifndef CONFIG_64BIT 3415 smp_wmb(); 3416 cfs_rq->load_last_update_time_copy = sa->last_update_time; 3417 #endif 3418 3419 if (decayed) 3420 cfs_rq_util_change(cfs_rq, 0); 3421 3422 return decayed; 3423 } 3424 3425 /** 3426 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3427 * @cfs_rq: cfs_rq to attach to 3428 * @se: sched_entity to attach 3429 * @flags: migration hints 3430 * 3431 * Must call update_cfs_rq_load_avg() before this, since we rely on 3432 * cfs_rq->avg.last_update_time being current. 3433 */ 3434 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3435 { 3436 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; 3437 3438 /* 3439 * When we attach the @se to the @cfs_rq, we must align the decay 3440 * window because without that, really weird and wonderful things can 3441 * happen. 3442 * 3443 * XXX illustrate 3444 */ 3445 se->avg.last_update_time = cfs_rq->avg.last_update_time; 3446 se->avg.period_contrib = cfs_rq->avg.period_contrib; 3447 3448 /* 3449 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new 3450 * period_contrib. This isn't strictly correct, but since we're 3451 * entirely outside of the PELT hierarchy, nobody cares if we truncate 3452 * _sum a little. 3453 */ 3454 se->avg.util_sum = se->avg.util_avg * divider; 3455 3456 se->avg.load_sum = divider; 3457 if (se_weight(se)) { 3458 se->avg.load_sum = 3459 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); 3460 } 3461 3462 se->avg.runnable_load_sum = se->avg.load_sum; 3463 3464 enqueue_load_avg(cfs_rq, se); 3465 cfs_rq->avg.util_avg += se->avg.util_avg; 3466 cfs_rq->avg.util_sum += se->avg.util_sum; 3467 3468 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); 3469 3470 cfs_rq_util_change(cfs_rq, flags); 3471 } 3472 3473 /** 3474 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 3475 * @cfs_rq: cfs_rq to detach from 3476 * @se: sched_entity to detach 3477 * 3478 * Must call update_cfs_rq_load_avg() before this, since we rely on 3479 * cfs_rq->avg.last_update_time being current. 3480 */ 3481 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3482 { 3483 dequeue_load_avg(cfs_rq, se); 3484 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 3485 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); 3486 3487 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); 3488 3489 cfs_rq_util_change(cfs_rq, 0); 3490 } 3491 3492 /* 3493 * Optional action to be done while updating the load average 3494 */ 3495 #define UPDATE_TG 0x1 3496 #define SKIP_AGE_LOAD 0x2 3497 #define DO_ATTACH 0x4 3498 3499 /* Update task and its cfs_rq load average */ 3500 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3501 { 3502 u64 now = cfs_rq_clock_task(cfs_rq); 3503 struct rq *rq = rq_of(cfs_rq); 3504 int cpu = cpu_of(rq); 3505 int decayed; 3506 3507 /* 3508 * Track task load average for carrying it to new CPU after migrated, and 3509 * track group sched_entity load average for task_h_load calc in migration 3510 */ 3511 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 3512 __update_load_avg_se(now, cpu, cfs_rq, se); 3513 3514 decayed = update_cfs_rq_load_avg(now, cfs_rq); 3515 decayed |= propagate_entity_load_avg(se); 3516 3517 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { 3518 3519 /* 3520 * DO_ATTACH means we're here from enqueue_entity(). 3521 * !last_update_time means we've passed through 3522 * migrate_task_rq_fair() indicating we migrated. 3523 * 3524 * IOW we're enqueueing a task on a new CPU. 3525 */ 3526 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); 3527 update_tg_load_avg(cfs_rq, 0); 3528 3529 } else if (decayed && (flags & UPDATE_TG)) 3530 update_tg_load_avg(cfs_rq, 0); 3531 } 3532 3533 #ifndef CONFIG_64BIT 3534 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3535 { 3536 u64 last_update_time_copy; 3537 u64 last_update_time; 3538 3539 do { 3540 last_update_time_copy = cfs_rq->load_last_update_time_copy; 3541 smp_rmb(); 3542 last_update_time = cfs_rq->avg.last_update_time; 3543 } while (last_update_time != last_update_time_copy); 3544 3545 return last_update_time; 3546 } 3547 #else 3548 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3549 { 3550 return cfs_rq->avg.last_update_time; 3551 } 3552 #endif 3553 3554 /* 3555 * Synchronize entity load avg of dequeued entity without locking 3556 * the previous rq. 3557 */ 3558 void sync_entity_load_avg(struct sched_entity *se) 3559 { 3560 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3561 u64 last_update_time; 3562 3563 last_update_time = cfs_rq_last_update_time(cfs_rq); 3564 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se); 3565 } 3566 3567 /* 3568 * Task first catches up with cfs_rq, and then subtract 3569 * itself from the cfs_rq (task must be off the queue now). 3570 */ 3571 void remove_entity_load_avg(struct sched_entity *se) 3572 { 3573 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3574 unsigned long flags; 3575 3576 /* 3577 * tasks cannot exit without having gone through wake_up_new_task() -> 3578 * post_init_entity_util_avg() which will have added things to the 3579 * cfs_rq, so we can remove unconditionally. 3580 * 3581 * Similarly for groups, they will have passed through 3582 * post_init_entity_util_avg() before unregister_sched_fair_group() 3583 * calls this. 3584 */ 3585 3586 sync_entity_load_avg(se); 3587 3588 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); 3589 ++cfs_rq->removed.nr; 3590 cfs_rq->removed.util_avg += se->avg.util_avg; 3591 cfs_rq->removed.load_avg += se->avg.load_avg; 3592 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ 3593 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); 3594 } 3595 3596 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) 3597 { 3598 return cfs_rq->avg.runnable_load_avg; 3599 } 3600 3601 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 3602 { 3603 return cfs_rq->avg.load_avg; 3604 } 3605 3606 static int idle_balance(struct rq *this_rq, struct rq_flags *rf); 3607 3608 static inline unsigned long task_util(struct task_struct *p) 3609 { 3610 return READ_ONCE(p->se.avg.util_avg); 3611 } 3612 3613 static inline unsigned long _task_util_est(struct task_struct *p) 3614 { 3615 struct util_est ue = READ_ONCE(p->se.avg.util_est); 3616 3617 return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED); 3618 } 3619 3620 static inline unsigned long task_util_est(struct task_struct *p) 3621 { 3622 return max(task_util(p), _task_util_est(p)); 3623 } 3624 3625 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, 3626 struct task_struct *p) 3627 { 3628 unsigned int enqueued; 3629 3630 if (!sched_feat(UTIL_EST)) 3631 return; 3632 3633 /* Update root cfs_rq's estimated utilization */ 3634 enqueued = cfs_rq->avg.util_est.enqueued; 3635 enqueued += _task_util_est(p); 3636 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); 3637 } 3638 3639 /* 3640 * Check if a (signed) value is within a specified (unsigned) margin, 3641 * based on the observation that: 3642 * 3643 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) 3644 * 3645 * NOTE: this only works when value + maring < INT_MAX. 3646 */ 3647 static inline bool within_margin(int value, int margin) 3648 { 3649 return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); 3650 } 3651 3652 static void 3653 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) 3654 { 3655 long last_ewma_diff; 3656 struct util_est ue; 3657 3658 if (!sched_feat(UTIL_EST)) 3659 return; 3660 3661 /* Update root cfs_rq's estimated utilization */ 3662 ue.enqueued = cfs_rq->avg.util_est.enqueued; 3663 ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); 3664 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); 3665 3666 /* 3667 * Skip update of task's estimated utilization when the task has not 3668 * yet completed an activation, e.g. being migrated. 3669 */ 3670 if (!task_sleep) 3671 return; 3672 3673 /* 3674 * If the PELT values haven't changed since enqueue time, 3675 * skip the util_est update. 3676 */ 3677 ue = p->se.avg.util_est; 3678 if (ue.enqueued & UTIL_AVG_UNCHANGED) 3679 return; 3680 3681 /* 3682 * Skip update of task's estimated utilization when its EWMA is 3683 * already ~1% close to its last activation value. 3684 */ 3685 ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED); 3686 last_ewma_diff = ue.enqueued - ue.ewma; 3687 if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100))) 3688 return; 3689 3690 /* 3691 * Update Task's estimated utilization 3692 * 3693 * When *p completes an activation we can consolidate another sample 3694 * of the task size. This is done by storing the current PELT value 3695 * as ue.enqueued and by using this value to update the Exponential 3696 * Weighted Moving Average (EWMA): 3697 * 3698 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) 3699 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) 3700 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) 3701 * = w * ( last_ewma_diff ) + ewma(t-1) 3702 * = w * (last_ewma_diff + ewma(t-1) / w) 3703 * 3704 * Where 'w' is the weight of new samples, which is configured to be 3705 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) 3706 */ 3707 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; 3708 ue.ewma += last_ewma_diff; 3709 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; 3710 WRITE_ONCE(p->se.avg.util_est, ue); 3711 } 3712 3713 static inline int task_fits_capacity(struct task_struct *p, long capacity) 3714 { 3715 return capacity * 1024 > task_util_est(p) * capacity_margin; 3716 } 3717 3718 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) 3719 { 3720 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 3721 return; 3722 3723 if (!p) { 3724 rq->misfit_task_load = 0; 3725 return; 3726 } 3727 3728 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { 3729 rq->misfit_task_load = 0; 3730 return; 3731 } 3732 3733 rq->misfit_task_load = task_h_load(p); 3734 } 3735 3736 #else /* CONFIG_SMP */ 3737 3738 #define UPDATE_TG 0x0 3739 #define SKIP_AGE_LOAD 0x0 3740 #define DO_ATTACH 0x0 3741 3742 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) 3743 { 3744 cfs_rq_util_change(cfs_rq, 0); 3745 } 3746 3747 static inline void remove_entity_load_avg(struct sched_entity *se) {} 3748 3749 static inline void 3750 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} 3751 static inline void 3752 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3753 3754 static inline int idle_balance(struct rq *rq, struct rq_flags *rf) 3755 { 3756 return 0; 3757 } 3758 3759 static inline void 3760 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} 3761 3762 static inline void 3763 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, 3764 bool task_sleep) {} 3765 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} 3766 3767 #endif /* CONFIG_SMP */ 3768 3769 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 3770 { 3771 #ifdef CONFIG_SCHED_DEBUG 3772 s64 d = se->vruntime - cfs_rq->min_vruntime; 3773 3774 if (d < 0) 3775 d = -d; 3776 3777 if (d > 3*sysctl_sched_latency) 3778 schedstat_inc(cfs_rq->nr_spread_over); 3779 #endif 3780 } 3781 3782 static void 3783 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 3784 { 3785 u64 vruntime = cfs_rq->min_vruntime; 3786 3787 /* 3788 * The 'current' period is already promised to the current tasks, 3789 * however the extra weight of the new task will slow them down a 3790 * little, place the new task so that it fits in the slot that 3791 * stays open at the end. 3792 */ 3793 if (initial && sched_feat(START_DEBIT)) 3794 vruntime += sched_vslice(cfs_rq, se); 3795 3796 /* sleeps up to a single latency don't count. */ 3797 if (!initial) { 3798 unsigned long thresh = sysctl_sched_latency; 3799 3800 /* 3801 * Halve their sleep time's effect, to allow 3802 * for a gentler effect of sleepers: 3803 */ 3804 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 3805 thresh >>= 1; 3806 3807 vruntime -= thresh; 3808 } 3809 3810 /* ensure we never gain time by being placed backwards. */ 3811 se->vruntime = max_vruntime(se->vruntime, vruntime); 3812 } 3813 3814 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 3815 3816 static inline void check_schedstat_required(void) 3817 { 3818 #ifdef CONFIG_SCHEDSTATS 3819 if (schedstat_enabled()) 3820 return; 3821 3822 /* Force schedstat enabled if a dependent tracepoint is active */ 3823 if (trace_sched_stat_wait_enabled() || 3824 trace_sched_stat_sleep_enabled() || 3825 trace_sched_stat_iowait_enabled() || 3826 trace_sched_stat_blocked_enabled() || 3827 trace_sched_stat_runtime_enabled()) { 3828 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3829 "stat_blocked and stat_runtime require the " 3830 "kernel parameter schedstats=enable or " 3831 "kernel.sched_schedstats=1\n"); 3832 } 3833 #endif 3834 } 3835 3836 3837 /* 3838 * MIGRATION 3839 * 3840 * dequeue 3841 * update_curr() 3842 * update_min_vruntime() 3843 * vruntime -= min_vruntime 3844 * 3845 * enqueue 3846 * update_curr() 3847 * update_min_vruntime() 3848 * vruntime += min_vruntime 3849 * 3850 * this way the vruntime transition between RQs is done when both 3851 * min_vruntime are up-to-date. 3852 * 3853 * WAKEUP (remote) 3854 * 3855 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 3856 * vruntime -= min_vruntime 3857 * 3858 * enqueue 3859 * update_curr() 3860 * update_min_vruntime() 3861 * vruntime += min_vruntime 3862 * 3863 * this way we don't have the most up-to-date min_vruntime on the originating 3864 * CPU and an up-to-date min_vruntime on the destination CPU. 3865 */ 3866 3867 static void 3868 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3869 { 3870 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 3871 bool curr = cfs_rq->curr == se; 3872 3873 /* 3874 * If we're the current task, we must renormalise before calling 3875 * update_curr(). 3876 */ 3877 if (renorm && curr) 3878 se->vruntime += cfs_rq->min_vruntime; 3879 3880 update_curr(cfs_rq); 3881 3882 /* 3883 * Otherwise, renormalise after, such that we're placed at the current 3884 * moment in time, instead of some random moment in the past. Being 3885 * placed in the past could significantly boost this task to the 3886 * fairness detriment of existing tasks. 3887 */ 3888 if (renorm && !curr) 3889 se->vruntime += cfs_rq->min_vruntime; 3890 3891 /* 3892 * When enqueuing a sched_entity, we must: 3893 * - Update loads to have both entity and cfs_rq synced with now. 3894 * - Add its load to cfs_rq->runnable_avg 3895 * - For group_entity, update its weight to reflect the new share of 3896 * its group cfs_rq 3897 * - Add its new weight to cfs_rq->load.weight 3898 */ 3899 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); 3900 update_cfs_group(se); 3901 enqueue_runnable_load_avg(cfs_rq, se); 3902 account_entity_enqueue(cfs_rq, se); 3903 3904 if (flags & ENQUEUE_WAKEUP) 3905 place_entity(cfs_rq, se, 0); 3906 3907 check_schedstat_required(); 3908 update_stats_enqueue(cfs_rq, se, flags); 3909 check_spread(cfs_rq, se); 3910 if (!curr) 3911 __enqueue_entity(cfs_rq, se); 3912 se->on_rq = 1; 3913 3914 if (cfs_rq->nr_running == 1) { 3915 list_add_leaf_cfs_rq(cfs_rq); 3916 check_enqueue_throttle(cfs_rq); 3917 } 3918 } 3919 3920 static void __clear_buddies_last(struct sched_entity *se) 3921 { 3922 for_each_sched_entity(se) { 3923 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3924 if (cfs_rq->last != se) 3925 break; 3926 3927 cfs_rq->last = NULL; 3928 } 3929 } 3930 3931 static void __clear_buddies_next(struct sched_entity *se) 3932 { 3933 for_each_sched_entity(se) { 3934 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3935 if (cfs_rq->next != se) 3936 break; 3937 3938 cfs_rq->next = NULL; 3939 } 3940 } 3941 3942 static void __clear_buddies_skip(struct sched_entity *se) 3943 { 3944 for_each_sched_entity(se) { 3945 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3946 if (cfs_rq->skip != se) 3947 break; 3948 3949 cfs_rq->skip = NULL; 3950 } 3951 } 3952 3953 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 3954 { 3955 if (cfs_rq->last == se) 3956 __clear_buddies_last(se); 3957 3958 if (cfs_rq->next == se) 3959 __clear_buddies_next(se); 3960 3961 if (cfs_rq->skip == se) 3962 __clear_buddies_skip(se); 3963 } 3964 3965 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3966 3967 static void 3968 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3969 { 3970 /* 3971 * Update run-time statistics of the 'current'. 3972 */ 3973 update_curr(cfs_rq); 3974 3975 /* 3976 * When dequeuing a sched_entity, we must: 3977 * - Update loads to have both entity and cfs_rq synced with now. 3978 * - Subtract its load from the cfs_rq->runnable_avg. 3979 * - Subtract its previous weight from cfs_rq->load.weight. 3980 * - For group entity, update its weight to reflect the new share 3981 * of its group cfs_rq. 3982 */ 3983 update_load_avg(cfs_rq, se, UPDATE_TG); 3984 dequeue_runnable_load_avg(cfs_rq, se); 3985 3986 update_stats_dequeue(cfs_rq, se, flags); 3987 3988 clear_buddies(cfs_rq, se); 3989 3990 if (se != cfs_rq->curr) 3991 __dequeue_entity(cfs_rq, se); 3992 se->on_rq = 0; 3993 account_entity_dequeue(cfs_rq, se); 3994 3995 /* 3996 * Normalize after update_curr(); which will also have moved 3997 * min_vruntime if @se is the one holding it back. But before doing 3998 * update_min_vruntime() again, which will discount @se's position and 3999 * can move min_vruntime forward still more. 4000 */ 4001 if (!(flags & DEQUEUE_SLEEP)) 4002 se->vruntime -= cfs_rq->min_vruntime; 4003 4004 /* return excess runtime on last dequeue */ 4005 return_cfs_rq_runtime(cfs_rq); 4006 4007 update_cfs_group(se); 4008 4009 /* 4010 * Now advance min_vruntime if @se was the entity holding it back, 4011 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 4012 * put back on, and if we advance min_vruntime, we'll be placed back 4013 * further than we started -- ie. we'll be penalized. 4014 */ 4015 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) 4016 update_min_vruntime(cfs_rq); 4017 } 4018 4019 /* 4020 * Preempt the current task with a newly woken task if needed: 4021 */ 4022 static void 4023 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4024 { 4025 unsigned long ideal_runtime, delta_exec; 4026 struct sched_entity *se; 4027 s64 delta; 4028 4029 ideal_runtime = sched_slice(cfs_rq, curr); 4030 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 4031 if (delta_exec > ideal_runtime) { 4032 resched_curr(rq_of(cfs_rq)); 4033 /* 4034 * The current task ran long enough, ensure it doesn't get 4035 * re-elected due to buddy favours. 4036 */ 4037 clear_buddies(cfs_rq, curr); 4038 return; 4039 } 4040 4041 /* 4042 * Ensure that a task that missed wakeup preemption by a 4043 * narrow margin doesn't have to wait for a full slice. 4044 * This also mitigates buddy induced latencies under load. 4045 */ 4046 if (delta_exec < sysctl_sched_min_granularity) 4047 return; 4048 4049 se = __pick_first_entity(cfs_rq); 4050 delta = curr->vruntime - se->vruntime; 4051 4052 if (delta < 0) 4053 return; 4054 4055 if (delta > ideal_runtime) 4056 resched_curr(rq_of(cfs_rq)); 4057 } 4058 4059 static void 4060 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 4061 { 4062 /* 'current' is not kept within the tree. */ 4063 if (se->on_rq) { 4064 /* 4065 * Any task has to be enqueued before it get to execute on 4066 * a CPU. So account for the time it spent waiting on the 4067 * runqueue. 4068 */ 4069 update_stats_wait_end(cfs_rq, se); 4070 __dequeue_entity(cfs_rq, se); 4071 update_load_avg(cfs_rq, se, UPDATE_TG); 4072 } 4073 4074 update_stats_curr_start(cfs_rq, se); 4075 cfs_rq->curr = se; 4076 4077 /* 4078 * Track our maximum slice length, if the CPU's load is at 4079 * least twice that of our own weight (i.e. dont track it 4080 * when there are only lesser-weight tasks around): 4081 */ 4082 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { 4083 schedstat_set(se->statistics.slice_max, 4084 max((u64)schedstat_val(se->statistics.slice_max), 4085 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 4086 } 4087 4088 se->prev_sum_exec_runtime = se->sum_exec_runtime; 4089 } 4090 4091 static int 4092 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 4093 4094 /* 4095 * Pick the next process, keeping these things in mind, in this order: 4096 * 1) keep things fair between processes/task groups 4097 * 2) pick the "next" process, since someone really wants that to run 4098 * 3) pick the "last" process, for cache locality 4099 * 4) do not run the "skip" process, if something else is available 4100 */ 4101 static struct sched_entity * 4102 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 4103 { 4104 struct sched_entity *left = __pick_first_entity(cfs_rq); 4105 struct sched_entity *se; 4106 4107 /* 4108 * If curr is set we have to see if its left of the leftmost entity 4109 * still in the tree, provided there was anything in the tree at all. 4110 */ 4111 if (!left || (curr && entity_before(curr, left))) 4112 left = curr; 4113 4114 se = left; /* ideally we run the leftmost entity */ 4115 4116 /* 4117 * Avoid running the skip buddy, if running something else can 4118 * be done without getting too unfair. 4119 */ 4120 if (cfs_rq->skip == se) { 4121 struct sched_entity *second; 4122 4123 if (se == curr) { 4124 second = __pick_first_entity(cfs_rq); 4125 } else { 4126 second = __pick_next_entity(se); 4127 if (!second || (curr && entity_before(curr, second))) 4128 second = curr; 4129 } 4130 4131 if (second && wakeup_preempt_entity(second, left) < 1) 4132 se = second; 4133 } 4134 4135 /* 4136 * Prefer last buddy, try to return the CPU to a preempted task. 4137 */ 4138 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) 4139 se = cfs_rq->last; 4140 4141 /* 4142 * Someone really wants this to run. If it's not unfair, run it. 4143 */ 4144 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) 4145 se = cfs_rq->next; 4146 4147 clear_buddies(cfs_rq, se); 4148 4149 return se; 4150 } 4151 4152 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 4153 4154 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 4155 { 4156 /* 4157 * If still on the runqueue then deactivate_task() 4158 * was not called and update_curr() has to be done: 4159 */ 4160 if (prev->on_rq) 4161 update_curr(cfs_rq); 4162 4163 /* throttle cfs_rqs exceeding runtime */ 4164 check_cfs_rq_runtime(cfs_rq); 4165 4166 check_spread(cfs_rq, prev); 4167 4168 if (prev->on_rq) { 4169 update_stats_wait_start(cfs_rq, prev); 4170 /* Put 'current' back into the tree. */ 4171 __enqueue_entity(cfs_rq, prev); 4172 /* in !on_rq case, update occurred at dequeue */ 4173 update_load_avg(cfs_rq, prev, 0); 4174 } 4175 cfs_rq->curr = NULL; 4176 } 4177 4178 static void 4179 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 4180 { 4181 /* 4182 * Update run-time statistics of the 'current'. 4183 */ 4184 update_curr(cfs_rq); 4185 4186 /* 4187 * Ensure that runnable average is periodically updated. 4188 */ 4189 update_load_avg(cfs_rq, curr, UPDATE_TG); 4190 update_cfs_group(curr); 4191 4192 #ifdef CONFIG_SCHED_HRTICK 4193 /* 4194 * queued ticks are scheduled to match the slice, so don't bother 4195 * validating it and just reschedule. 4196 */ 4197 if (queued) { 4198 resched_curr(rq_of(cfs_rq)); 4199 return; 4200 } 4201 /* 4202 * don't let the period tick interfere with the hrtick preemption 4203 */ 4204 if (!sched_feat(DOUBLE_TICK) && 4205 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4206 return; 4207 #endif 4208 4209 if (cfs_rq->nr_running > 1) 4210 check_preempt_tick(cfs_rq, curr); 4211 } 4212 4213 4214 /************************************************** 4215 * CFS bandwidth control machinery 4216 */ 4217 4218 #ifdef CONFIG_CFS_BANDWIDTH 4219 4220 #ifdef CONFIG_JUMP_LABEL 4221 static struct static_key __cfs_bandwidth_used; 4222 4223 static inline bool cfs_bandwidth_used(void) 4224 { 4225 return static_key_false(&__cfs_bandwidth_used); 4226 } 4227 4228 void cfs_bandwidth_usage_inc(void) 4229 { 4230 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); 4231 } 4232 4233 void cfs_bandwidth_usage_dec(void) 4234 { 4235 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); 4236 } 4237 #else /* CONFIG_JUMP_LABEL */ 4238 static bool cfs_bandwidth_used(void) 4239 { 4240 return true; 4241 } 4242 4243 void cfs_bandwidth_usage_inc(void) {} 4244 void cfs_bandwidth_usage_dec(void) {} 4245 #endif /* CONFIG_JUMP_LABEL */ 4246 4247 /* 4248 * default period for cfs group bandwidth. 4249 * default: 0.1s, units: nanoseconds 4250 */ 4251 static inline u64 default_cfs_period(void) 4252 { 4253 return 100000000ULL; 4254 } 4255 4256 static inline u64 sched_cfs_bandwidth_slice(void) 4257 { 4258 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4259 } 4260 4261 /* 4262 * Replenish runtime according to assigned quota and update expiration time. 4263 * We use sched_clock_cpu directly instead of rq->clock to avoid adding 4264 * additional synchronization around rq->lock. 4265 * 4266 * requires cfs_b->lock 4267 */ 4268 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4269 { 4270 u64 now; 4271 4272 if (cfs_b->quota == RUNTIME_INF) 4273 return; 4274 4275 now = sched_clock_cpu(smp_processor_id()); 4276 cfs_b->runtime = cfs_b->quota; 4277 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); 4278 cfs_b->expires_seq++; 4279 } 4280 4281 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4282 { 4283 return &tg->cfs_bandwidth; 4284 } 4285 4286 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ 4287 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 4288 { 4289 if (unlikely(cfs_rq->throttle_count)) 4290 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; 4291 4292 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; 4293 } 4294 4295 /* returns 0 on failure to allocate runtime */ 4296 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4297 { 4298 struct task_group *tg = cfs_rq->tg; 4299 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); 4300 u64 amount = 0, min_amount, expires; 4301 int expires_seq; 4302 4303 /* note: this is a positive sum as runtime_remaining <= 0 */ 4304 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; 4305 4306 raw_spin_lock(&cfs_b->lock); 4307 if (cfs_b->quota == RUNTIME_INF) 4308 amount = min_amount; 4309 else { 4310 start_cfs_bandwidth(cfs_b); 4311 4312 if (cfs_b->runtime > 0) { 4313 amount = min(cfs_b->runtime, min_amount); 4314 cfs_b->runtime -= amount; 4315 cfs_b->idle = 0; 4316 } 4317 } 4318 expires_seq = cfs_b->expires_seq; 4319 expires = cfs_b->runtime_expires; 4320 raw_spin_unlock(&cfs_b->lock); 4321 4322 cfs_rq->runtime_remaining += amount; 4323 /* 4324 * we may have advanced our local expiration to account for allowed 4325 * spread between our sched_clock and the one on which runtime was 4326 * issued. 4327 */ 4328 if (cfs_rq->expires_seq != expires_seq) { 4329 cfs_rq->expires_seq = expires_seq; 4330 cfs_rq->runtime_expires = expires; 4331 } 4332 4333 return cfs_rq->runtime_remaining > 0; 4334 } 4335 4336 /* 4337 * Note: This depends on the synchronization provided by sched_clock and the 4338 * fact that rq->clock snapshots this value. 4339 */ 4340 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4341 { 4342 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4343 4344 /* if the deadline is ahead of our clock, nothing to do */ 4345 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) 4346 return; 4347 4348 if (cfs_rq->runtime_remaining < 0) 4349 return; 4350 4351 /* 4352 * If the local deadline has passed we have to consider the 4353 * possibility that our sched_clock is 'fast' and the global deadline 4354 * has not truly expired. 4355 * 4356 * Fortunately we can check determine whether this the case by checking 4357 * whether the global deadline(cfs_b->expires_seq) has advanced. 4358 */ 4359 if (cfs_rq->expires_seq == cfs_b->expires_seq) { 4360 /* extend local deadline, drift is bounded above by 2 ticks */ 4361 cfs_rq->runtime_expires += TICK_NSEC; 4362 } else { 4363 /* global deadline is ahead, expiration has passed */ 4364 cfs_rq->runtime_remaining = 0; 4365 } 4366 } 4367 4368 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4369 { 4370 /* dock delta_exec before expiring quota (as it could span periods) */ 4371 cfs_rq->runtime_remaining -= delta_exec; 4372 expire_cfs_rq_runtime(cfs_rq); 4373 4374 if (likely(cfs_rq->runtime_remaining > 0)) 4375 return; 4376 4377 /* 4378 * if we're unable to extend our runtime we resched so that the active 4379 * hierarchy can be throttled 4380 */ 4381 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 4382 resched_curr(rq_of(cfs_rq)); 4383 } 4384 4385 static __always_inline 4386 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4387 { 4388 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 4389 return; 4390 4391 __account_cfs_rq_runtime(cfs_rq, delta_exec); 4392 } 4393 4394 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4395 { 4396 return cfs_bandwidth_used() && cfs_rq->throttled; 4397 } 4398 4399 /* check whether cfs_rq, or any parent, is throttled */ 4400 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4401 { 4402 return cfs_bandwidth_used() && cfs_rq->throttle_count; 4403 } 4404 4405 /* 4406 * Ensure that neither of the group entities corresponding to src_cpu or 4407 * dest_cpu are members of a throttled hierarchy when performing group 4408 * load-balance operations. 4409 */ 4410 static inline int throttled_lb_pair(struct task_group *tg, 4411 int src_cpu, int dest_cpu) 4412 { 4413 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 4414 4415 src_cfs_rq = tg->cfs_rq[src_cpu]; 4416 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 4417 4418 return throttled_hierarchy(src_cfs_rq) || 4419 throttled_hierarchy(dest_cfs_rq); 4420 } 4421 4422 static int tg_unthrottle_up(struct task_group *tg, void *data) 4423 { 4424 struct rq *rq = data; 4425 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4426 4427 cfs_rq->throttle_count--; 4428 if (!cfs_rq->throttle_count) { 4429 /* adjust cfs_rq_clock_task() */ 4430 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4431 cfs_rq->throttled_clock_task; 4432 } 4433 4434 return 0; 4435 } 4436 4437 static int tg_throttle_down(struct task_group *tg, void *data) 4438 { 4439 struct rq *rq = data; 4440 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4441 4442 /* group is entering throttled state, stop time */ 4443 if (!cfs_rq->throttle_count) 4444 cfs_rq->throttled_clock_task = rq_clock_task(rq); 4445 cfs_rq->throttle_count++; 4446 4447 return 0; 4448 } 4449 4450 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) 4451 { 4452 struct rq *rq = rq_of(cfs_rq); 4453 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4454 struct sched_entity *se; 4455 long task_delta, dequeue = 1; 4456 bool empty; 4457 4458 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 4459 4460 /* freeze hierarchy runnable averages while throttled */ 4461 rcu_read_lock(); 4462 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 4463 rcu_read_unlock(); 4464 4465 task_delta = cfs_rq->h_nr_running; 4466 for_each_sched_entity(se) { 4467 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4468 /* throttled entity or throttle-on-deactivate */ 4469 if (!se->on_rq) 4470 break; 4471 4472 if (dequeue) 4473 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 4474 qcfs_rq->h_nr_running -= task_delta; 4475 4476 if (qcfs_rq->load.weight) 4477 dequeue = 0; 4478 } 4479 4480 if (!se) 4481 sub_nr_running(rq, task_delta); 4482 4483 cfs_rq->throttled = 1; 4484 cfs_rq->throttled_clock = rq_clock(rq); 4485 raw_spin_lock(&cfs_b->lock); 4486 empty = list_empty(&cfs_b->throttled_cfs_rq); 4487 4488 /* 4489 * Add to the _head_ of the list, so that an already-started 4490 * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is 4491 * not running add to the tail so that later runqueues don't get starved. 4492 */ 4493 if (cfs_b->distribute_running) 4494 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4495 else 4496 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4497 4498 /* 4499 * If we're the first throttled task, make sure the bandwidth 4500 * timer is running. 4501 */ 4502 if (empty) 4503 start_cfs_bandwidth(cfs_b); 4504 4505 raw_spin_unlock(&cfs_b->lock); 4506 } 4507 4508 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 4509 { 4510 struct rq *rq = rq_of(cfs_rq); 4511 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4512 struct sched_entity *se; 4513 int enqueue = 1; 4514 long task_delta; 4515 4516 se = cfs_rq->tg->se[cpu_of(rq)]; 4517 4518 cfs_rq->throttled = 0; 4519 4520 update_rq_clock(rq); 4521 4522 raw_spin_lock(&cfs_b->lock); 4523 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 4524 list_del_rcu(&cfs_rq->throttled_list); 4525 raw_spin_unlock(&cfs_b->lock); 4526 4527 /* update hierarchical throttle state */ 4528 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4529 4530 if (!cfs_rq->load.weight) 4531 return; 4532 4533 task_delta = cfs_rq->h_nr_running; 4534 for_each_sched_entity(se) { 4535 if (se->on_rq) 4536 enqueue = 0; 4537 4538 cfs_rq = cfs_rq_of(se); 4539 if (enqueue) 4540 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); 4541 cfs_rq->h_nr_running += task_delta; 4542 4543 if (cfs_rq_throttled(cfs_rq)) 4544 break; 4545 } 4546 4547 if (!se) 4548 add_nr_running(rq, task_delta); 4549 4550 /* Determine whether we need to wake up potentially idle CPU: */ 4551 if (rq->curr == rq->idle && rq->cfs.nr_running) 4552 resched_curr(rq); 4553 } 4554 4555 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, 4556 u64 remaining, u64 expires) 4557 { 4558 struct cfs_rq *cfs_rq; 4559 u64 runtime; 4560 u64 starting_runtime = remaining; 4561 4562 rcu_read_lock(); 4563 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 4564 throttled_list) { 4565 struct rq *rq = rq_of(cfs_rq); 4566 struct rq_flags rf; 4567 4568 rq_lock(rq, &rf); 4569 if (!cfs_rq_throttled(cfs_rq)) 4570 goto next; 4571 4572 runtime = -cfs_rq->runtime_remaining + 1; 4573 if (runtime > remaining) 4574 runtime = remaining; 4575 remaining -= runtime; 4576 4577 cfs_rq->runtime_remaining += runtime; 4578 cfs_rq->runtime_expires = expires; 4579 4580 /* we check whether we're throttled above */ 4581 if (cfs_rq->runtime_remaining > 0) 4582 unthrottle_cfs_rq(cfs_rq); 4583 4584 next: 4585 rq_unlock(rq, &rf); 4586 4587 if (!remaining) 4588 break; 4589 } 4590 rcu_read_unlock(); 4591 4592 return starting_runtime - remaining; 4593 } 4594 4595 /* 4596 * Responsible for refilling a task_group's bandwidth and unthrottling its 4597 * cfs_rqs as appropriate. If there has been no activity within the last 4598 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 4599 * used to track this state. 4600 */ 4601 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) 4602 { 4603 u64 runtime, runtime_expires; 4604 int throttled; 4605 4606 /* no need to continue the timer with no bandwidth constraint */ 4607 if (cfs_b->quota == RUNTIME_INF) 4608 goto out_deactivate; 4609 4610 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4611 cfs_b->nr_periods += overrun; 4612 4613 /* 4614 * idle depends on !throttled (for the case of a large deficit), and if 4615 * we're going inactive then everything else can be deferred 4616 */ 4617 if (cfs_b->idle && !throttled) 4618 goto out_deactivate; 4619 4620 __refill_cfs_bandwidth_runtime(cfs_b); 4621 4622 if (!throttled) { 4623 /* mark as potentially idle for the upcoming period */ 4624 cfs_b->idle = 1; 4625 return 0; 4626 } 4627 4628 /* account preceding periods in which throttling occurred */ 4629 cfs_b->nr_throttled += overrun; 4630 4631 runtime_expires = cfs_b->runtime_expires; 4632 4633 /* 4634 * This check is repeated as we are holding onto the new bandwidth while 4635 * we unthrottle. This can potentially race with an unthrottled group 4636 * trying to acquire new bandwidth from the global pool. This can result 4637 * in us over-using our runtime if it is all used during this loop, but 4638 * only by limited amounts in that extreme case. 4639 */ 4640 while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { 4641 runtime = cfs_b->runtime; 4642 cfs_b->distribute_running = 1; 4643 raw_spin_unlock(&cfs_b->lock); 4644 /* we can't nest cfs_b->lock while distributing bandwidth */ 4645 runtime = distribute_cfs_runtime(cfs_b, runtime, 4646 runtime_expires); 4647 raw_spin_lock(&cfs_b->lock); 4648 4649 cfs_b->distribute_running = 0; 4650 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4651 4652 lsub_positive(&cfs_b->runtime, runtime); 4653 } 4654 4655 /* 4656 * While we are ensured activity in the period following an 4657 * unthrottle, this also covers the case in which the new bandwidth is 4658 * insufficient to cover the existing bandwidth deficit. (Forcing the 4659 * timer to remain active while there are any throttled entities.) 4660 */ 4661 cfs_b->idle = 0; 4662 4663 return 0; 4664 4665 out_deactivate: 4666 return 1; 4667 } 4668 4669 /* a cfs_rq won't donate quota below this amount */ 4670 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 4671 /* minimum remaining period time to redistribute slack quota */ 4672 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 4673 /* how long we wait to gather additional slack before distributing */ 4674 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 4675 4676 /* 4677 * Are we near the end of the current quota period? 4678 * 4679 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 4680 * hrtimer base being cleared by hrtimer_start. In the case of 4681 * migrate_hrtimers, base is never cleared, so we are fine. 4682 */ 4683 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 4684 { 4685 struct hrtimer *refresh_timer = &cfs_b->period_timer; 4686 u64 remaining; 4687 4688 /* if the call-back is running a quota refresh is already occurring */ 4689 if (hrtimer_callback_running(refresh_timer)) 4690 return 1; 4691 4692 /* is a quota refresh about to occur? */ 4693 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 4694 if (remaining < min_expire) 4695 return 1; 4696 4697 return 0; 4698 } 4699 4700 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 4701 { 4702 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 4703 4704 /* if there's a quota refresh soon don't bother with slack */ 4705 if (runtime_refresh_within(cfs_b, min_left)) 4706 return; 4707 4708 hrtimer_start(&cfs_b->slack_timer, 4709 ns_to_ktime(cfs_bandwidth_slack_period), 4710 HRTIMER_MODE_REL); 4711 } 4712 4713 /* we know any runtime found here is valid as update_curr() precedes return */ 4714 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4715 { 4716 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4717 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 4718 4719 if (slack_runtime <= 0) 4720 return; 4721 4722 raw_spin_lock(&cfs_b->lock); 4723 if (cfs_b->quota != RUNTIME_INF && 4724 cfs_rq->runtime_expires == cfs_b->runtime_expires) { 4725 cfs_b->runtime += slack_runtime; 4726 4727 /* we are under rq->lock, defer unthrottling using a timer */ 4728 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 4729 !list_empty(&cfs_b->throttled_cfs_rq)) 4730 start_cfs_slack_bandwidth(cfs_b); 4731 } 4732 raw_spin_unlock(&cfs_b->lock); 4733 4734 /* even if it's not valid for return we don't want to try again */ 4735 cfs_rq->runtime_remaining -= slack_runtime; 4736 } 4737 4738 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4739 { 4740 if (!cfs_bandwidth_used()) 4741 return; 4742 4743 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 4744 return; 4745 4746 __return_cfs_rq_runtime(cfs_rq); 4747 } 4748 4749 /* 4750 * This is done with a timer (instead of inline with bandwidth return) since 4751 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 4752 */ 4753 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 4754 { 4755 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 4756 u64 expires; 4757 4758 /* confirm we're still not at a refresh boundary */ 4759 raw_spin_lock(&cfs_b->lock); 4760 if (cfs_b->distribute_running) { 4761 raw_spin_unlock(&cfs_b->lock); 4762 return; 4763 } 4764 4765 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 4766 raw_spin_unlock(&cfs_b->lock); 4767 return; 4768 } 4769 4770 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 4771 runtime = cfs_b->runtime; 4772 4773 expires = cfs_b->runtime_expires; 4774 if (runtime) 4775 cfs_b->distribute_running = 1; 4776 4777 raw_spin_unlock(&cfs_b->lock); 4778 4779 if (!runtime) 4780 return; 4781 4782 runtime = distribute_cfs_runtime(cfs_b, runtime, expires); 4783 4784 raw_spin_lock(&cfs_b->lock); 4785 if (expires == cfs_b->runtime_expires) 4786 lsub_positive(&cfs_b->runtime, runtime); 4787 cfs_b->distribute_running = 0; 4788 raw_spin_unlock(&cfs_b->lock); 4789 } 4790 4791 /* 4792 * When a group wakes up we want to make sure that its quota is not already 4793 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 4794 * runtime as update_curr() throttling can not not trigger until it's on-rq. 4795 */ 4796 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 4797 { 4798 if (!cfs_bandwidth_used()) 4799 return; 4800 4801 /* an active group must be handled by the update_curr()->put() path */ 4802 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 4803 return; 4804 4805 /* ensure the group is not already throttled */ 4806 if (cfs_rq_throttled(cfs_rq)) 4807 return; 4808 4809 /* update runtime allocation */ 4810 account_cfs_rq_runtime(cfs_rq, 0); 4811 if (cfs_rq->runtime_remaining <= 0) 4812 throttle_cfs_rq(cfs_rq); 4813 } 4814 4815 static void sync_throttle(struct task_group *tg, int cpu) 4816 { 4817 struct cfs_rq *pcfs_rq, *cfs_rq; 4818 4819 if (!cfs_bandwidth_used()) 4820 return; 4821 4822 if (!tg->parent) 4823 return; 4824 4825 cfs_rq = tg->cfs_rq[cpu]; 4826 pcfs_rq = tg->parent->cfs_rq[cpu]; 4827 4828 cfs_rq->throttle_count = pcfs_rq->throttle_count; 4829 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4830 } 4831 4832 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 4833 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4834 { 4835 if (!cfs_bandwidth_used()) 4836 return false; 4837 4838 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 4839 return false; 4840 4841 /* 4842 * it's possible for a throttled entity to be forced into a running 4843 * state (e.g. set_curr_task), in this case we're finished. 4844 */ 4845 if (cfs_rq_throttled(cfs_rq)) 4846 return true; 4847 4848 throttle_cfs_rq(cfs_rq); 4849 return true; 4850 } 4851 4852 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 4853 { 4854 struct cfs_bandwidth *cfs_b = 4855 container_of(timer, struct cfs_bandwidth, slack_timer); 4856 4857 do_sched_cfs_slack_timer(cfs_b); 4858 4859 return HRTIMER_NORESTART; 4860 } 4861 4862 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 4863 { 4864 struct cfs_bandwidth *cfs_b = 4865 container_of(timer, struct cfs_bandwidth, period_timer); 4866 int overrun; 4867 int idle = 0; 4868 4869 raw_spin_lock(&cfs_b->lock); 4870 for (;;) { 4871 overrun = hrtimer_forward_now(timer, cfs_b->period); 4872 if (!overrun) 4873 break; 4874 4875 idle = do_sched_cfs_period_timer(cfs_b, overrun); 4876 } 4877 if (idle) 4878 cfs_b->period_active = 0; 4879 raw_spin_unlock(&cfs_b->lock); 4880 4881 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 4882 } 4883 4884 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4885 { 4886 raw_spin_lock_init(&cfs_b->lock); 4887 cfs_b->runtime = 0; 4888 cfs_b->quota = RUNTIME_INF; 4889 cfs_b->period = ns_to_ktime(default_cfs_period()); 4890 4891 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 4892 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 4893 cfs_b->period_timer.function = sched_cfs_period_timer; 4894 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4895 cfs_b->slack_timer.function = sched_cfs_slack_timer; 4896 cfs_b->distribute_running = 0; 4897 } 4898 4899 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4900 { 4901 cfs_rq->runtime_enabled = 0; 4902 INIT_LIST_HEAD(&cfs_rq->throttled_list); 4903 } 4904 4905 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4906 { 4907 u64 overrun; 4908 4909 lockdep_assert_held(&cfs_b->lock); 4910 4911 if (cfs_b->period_active) 4912 return; 4913 4914 cfs_b->period_active = 1; 4915 overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 4916 cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period); 4917 cfs_b->expires_seq++; 4918 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 4919 } 4920 4921 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4922 { 4923 /* init_cfs_bandwidth() was not called */ 4924 if (!cfs_b->throttled_cfs_rq.next) 4925 return; 4926 4927 hrtimer_cancel(&cfs_b->period_timer); 4928 hrtimer_cancel(&cfs_b->slack_timer); 4929 } 4930 4931 /* 4932 * Both these CPU hotplug callbacks race against unregister_fair_sched_group() 4933 * 4934 * The race is harmless, since modifying bandwidth settings of unhooked group 4935 * bits doesn't do much. 4936 */ 4937 4938 /* cpu online calback */ 4939 static void __maybe_unused update_runtime_enabled(struct rq *rq) 4940 { 4941 struct task_group *tg; 4942 4943 lockdep_assert_held(&rq->lock); 4944 4945 rcu_read_lock(); 4946 list_for_each_entry_rcu(tg, &task_groups, list) { 4947 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 4948 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4949 4950 raw_spin_lock(&cfs_b->lock); 4951 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 4952 raw_spin_unlock(&cfs_b->lock); 4953 } 4954 rcu_read_unlock(); 4955 } 4956 4957 /* cpu offline callback */ 4958 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 4959 { 4960 struct task_group *tg; 4961 4962 lockdep_assert_held(&rq->lock); 4963 4964 rcu_read_lock(); 4965 list_for_each_entry_rcu(tg, &task_groups, list) { 4966 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4967 4968 if (!cfs_rq->runtime_enabled) 4969 continue; 4970 4971 /* 4972 * clock_task is not advancing so we just need to make sure 4973 * there's some valid quota amount 4974 */ 4975 cfs_rq->runtime_remaining = 1; 4976 /* 4977 * Offline rq is schedulable till CPU is completely disabled 4978 * in take_cpu_down(), so we prevent new cfs throttling here. 4979 */ 4980 cfs_rq->runtime_enabled = 0; 4981 4982 if (cfs_rq_throttled(cfs_rq)) 4983 unthrottle_cfs_rq(cfs_rq); 4984 } 4985 rcu_read_unlock(); 4986 } 4987 4988 #else /* CONFIG_CFS_BANDWIDTH */ 4989 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 4990 { 4991 return rq_clock_task(rq_of(cfs_rq)); 4992 } 4993 4994 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 4995 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 4996 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 4997 static inline void sync_throttle(struct task_group *tg, int cpu) {} 4998 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 4999 5000 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 5001 { 5002 return 0; 5003 } 5004 5005 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 5006 { 5007 return 0; 5008 } 5009 5010 static inline int throttled_lb_pair(struct task_group *tg, 5011 int src_cpu, int dest_cpu) 5012 { 5013 return 0; 5014 } 5015 5016 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5017 5018 #ifdef CONFIG_FAIR_GROUP_SCHED 5019 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 5020 #endif 5021 5022 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 5023 { 5024 return NULL; 5025 } 5026 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 5027 static inline void update_runtime_enabled(struct rq *rq) {} 5028 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 5029 5030 #endif /* CONFIG_CFS_BANDWIDTH */ 5031 5032 /************************************************** 5033 * CFS operations on tasks: 5034 */ 5035 5036 #ifdef CONFIG_SCHED_HRTICK 5037 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 5038 { 5039 struct sched_entity *se = &p->se; 5040 struct cfs_rq *cfs_rq = cfs_rq_of(se); 5041 5042 SCHED_WARN_ON(task_rq(p) != rq); 5043 5044 if (rq->cfs.h_nr_running > 1) { 5045 u64 slice = sched_slice(cfs_rq, se); 5046 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 5047 s64 delta = slice - ran; 5048 5049 if (delta < 0) { 5050 if (rq->curr == p) 5051 resched_curr(rq); 5052 return; 5053 } 5054 hrtick_start(rq, delta); 5055 } 5056 } 5057 5058 /* 5059 * called from enqueue/dequeue and updates the hrtick when the 5060 * current task is from our class and nr_running is low enough 5061 * to matter. 5062 */ 5063 static void hrtick_update(struct rq *rq) 5064 { 5065 struct task_struct *curr = rq->curr; 5066 5067 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) 5068 return; 5069 5070 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 5071 hrtick_start_fair(rq, curr); 5072 } 5073 #else /* !CONFIG_SCHED_HRTICK */ 5074 static inline void 5075 hrtick_start_fair(struct rq *rq, struct task_struct *p) 5076 { 5077 } 5078 5079 static inline void hrtick_update(struct rq *rq) 5080 { 5081 } 5082 #endif 5083 5084 #ifdef CONFIG_SMP 5085 static inline unsigned long cpu_util(int cpu); 5086 static unsigned long capacity_of(int cpu); 5087 5088 static inline bool cpu_overutilized(int cpu) 5089 { 5090 return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin); 5091 } 5092 5093 static inline void update_overutilized_status(struct rq *rq) 5094 { 5095 if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) 5096 WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); 5097 } 5098 #else 5099 static inline void update_overutilized_status(struct rq *rq) { } 5100 #endif 5101 5102 /* 5103 * The enqueue_task method is called before nr_running is 5104 * increased. Here we update the fair scheduling stats and 5105 * then put the task into the rbtree: 5106 */ 5107 static void 5108 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5109 { 5110 struct cfs_rq *cfs_rq; 5111 struct sched_entity *se = &p->se; 5112 5113 /* 5114 * The code below (indirectly) updates schedutil which looks at 5115 * the cfs_rq utilization to select a frequency. 5116 * Let's add the task's estimated utilization to the cfs_rq's 5117 * estimated utilization, before we update schedutil. 5118 */ 5119 util_est_enqueue(&rq->cfs, p); 5120 5121 /* 5122 * If in_iowait is set, the code below may not trigger any cpufreq 5123 * utilization updates, so do it here explicitly with the IOWAIT flag 5124 * passed. 5125 */ 5126 if (p->in_iowait) 5127 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); 5128 5129 for_each_sched_entity(se) { 5130 if (se->on_rq) 5131 break; 5132 cfs_rq = cfs_rq_of(se); 5133 enqueue_entity(cfs_rq, se, flags); 5134 5135 /* 5136 * end evaluation on encountering a throttled cfs_rq 5137 * 5138 * note: in the case of encountering a throttled cfs_rq we will 5139 * post the final h_nr_running increment below. 5140 */ 5141 if (cfs_rq_throttled(cfs_rq)) 5142 break; 5143 cfs_rq->h_nr_running++; 5144 5145 flags = ENQUEUE_WAKEUP; 5146 } 5147 5148 for_each_sched_entity(se) { 5149 cfs_rq = cfs_rq_of(se); 5150 cfs_rq->h_nr_running++; 5151 5152 if (cfs_rq_throttled(cfs_rq)) 5153 break; 5154 5155 update_load_avg(cfs_rq, se, UPDATE_TG); 5156 update_cfs_group(se); 5157 } 5158 5159 if (!se) { 5160 add_nr_running(rq, 1); 5161 /* 5162 * Since new tasks are assigned an initial util_avg equal to 5163 * half of the spare capacity of their CPU, tiny tasks have the 5164 * ability to cross the overutilized threshold, which will 5165 * result in the load balancer ruining all the task placement 5166 * done by EAS. As a way to mitigate that effect, do not account 5167 * for the first enqueue operation of new tasks during the 5168 * overutilized flag detection. 5169 * 5170 * A better way of solving this problem would be to wait for 5171 * the PELT signals of tasks to converge before taking them 5172 * into account, but that is not straightforward to implement, 5173 * and the following generally works well enough in practice. 5174 */ 5175 if (flags & ENQUEUE_WAKEUP) 5176 update_overutilized_status(rq); 5177 5178 } 5179 5180 hrtick_update(rq); 5181 } 5182 5183 static void set_next_buddy(struct sched_entity *se); 5184 5185 /* 5186 * The dequeue_task method is called before nr_running is 5187 * decreased. We remove the task from the rbtree and 5188 * update the fair scheduling stats: 5189 */ 5190 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 5191 { 5192 struct cfs_rq *cfs_rq; 5193 struct sched_entity *se = &p->se; 5194 int task_sleep = flags & DEQUEUE_SLEEP; 5195 5196 for_each_sched_entity(se) { 5197 cfs_rq = cfs_rq_of(se); 5198 dequeue_entity(cfs_rq, se, flags); 5199 5200 /* 5201 * end evaluation on encountering a throttled cfs_rq 5202 * 5203 * note: in the case of encountering a throttled cfs_rq we will 5204 * post the final h_nr_running decrement below. 5205 */ 5206 if (cfs_rq_throttled(cfs_rq)) 5207 break; 5208 cfs_rq->h_nr_running--; 5209 5210 /* Don't dequeue parent if it has other entities besides us */ 5211 if (cfs_rq->load.weight) { 5212 /* Avoid re-evaluating load for this entity: */ 5213 se = parent_entity(se); 5214 /* 5215 * Bias pick_next to pick a task from this cfs_rq, as 5216 * p is sleeping when it is within its sched_slice. 5217 */ 5218 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 5219 set_next_buddy(se); 5220 break; 5221 } 5222 flags |= DEQUEUE_SLEEP; 5223 } 5224 5225 for_each_sched_entity(se) { 5226 cfs_rq = cfs_rq_of(se); 5227 cfs_rq->h_nr_running--; 5228 5229 if (cfs_rq_throttled(cfs_rq)) 5230 break; 5231 5232 update_load_avg(cfs_rq, se, UPDATE_TG); 5233 update_cfs_group(se); 5234 } 5235 5236 if (!se) 5237 sub_nr_running(rq, 1); 5238 5239 util_est_dequeue(&rq->cfs, p, task_sleep); 5240 hrtick_update(rq); 5241 } 5242 5243 #ifdef CONFIG_SMP 5244 5245 /* Working cpumask for: load_balance, load_balance_newidle. */ 5246 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 5247 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); 5248 5249 #ifdef CONFIG_NO_HZ_COMMON 5250 /* 5251 * per rq 'load' arrray crap; XXX kill this. 5252 */ 5253 5254 /* 5255 * The exact cpuload calculated at every tick would be: 5256 * 5257 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load 5258 * 5259 * If a CPU misses updates for n ticks (as it was idle) and update gets 5260 * called on the n+1-th tick when CPU may be busy, then we have: 5261 * 5262 * load_n = (1 - 1/2^i)^n * load_0 5263 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load 5264 * 5265 * decay_load_missed() below does efficient calculation of 5266 * 5267 * load' = (1 - 1/2^i)^n * load 5268 * 5269 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors. 5270 * This allows us to precompute the above in said factors, thereby allowing the 5271 * reduction of an arbitrary n in O(log_2 n) steps. (See also 5272 * fixed_power_int()) 5273 * 5274 * The calculation is approximated on a 128 point scale. 5275 */ 5276 #define DEGRADE_SHIFT 7 5277 5278 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; 5279 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { 5280 { 0, 0, 0, 0, 0, 0, 0, 0 }, 5281 { 64, 32, 8, 0, 0, 0, 0, 0 }, 5282 { 96, 72, 40, 12, 1, 0, 0, 0 }, 5283 { 112, 98, 75, 43, 15, 1, 0, 0 }, 5284 { 120, 112, 98, 76, 45, 16, 2, 0 } 5285 }; 5286 5287 /* 5288 * Update cpu_load for any missed ticks, due to tickless idle. The backlog 5289 * would be when CPU is idle and so we just decay the old load without 5290 * adding any new load. 5291 */ 5292 static unsigned long 5293 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) 5294 { 5295 int j = 0; 5296 5297 if (!missed_updates) 5298 return load; 5299 5300 if (missed_updates >= degrade_zero_ticks[idx]) 5301 return 0; 5302 5303 if (idx == 1) 5304 return load >> missed_updates; 5305 5306 while (missed_updates) { 5307 if (missed_updates % 2) 5308 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; 5309 5310 missed_updates >>= 1; 5311 j++; 5312 } 5313 return load; 5314 } 5315 5316 static struct { 5317 cpumask_var_t idle_cpus_mask; 5318 atomic_t nr_cpus; 5319 int has_blocked; /* Idle CPUS has blocked load */ 5320 unsigned long next_balance; /* in jiffy units */ 5321 unsigned long next_blocked; /* Next update of blocked load in jiffies */ 5322 } nohz ____cacheline_aligned; 5323 5324 #endif /* CONFIG_NO_HZ_COMMON */ 5325 5326 /** 5327 * __cpu_load_update - update the rq->cpu_load[] statistics 5328 * @this_rq: The rq to update statistics for 5329 * @this_load: The current load 5330 * @pending_updates: The number of missed updates 5331 * 5332 * Update rq->cpu_load[] statistics. This function is usually called every 5333 * scheduler tick (TICK_NSEC). 5334 * 5335 * This function computes a decaying average: 5336 * 5337 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load 5338 * 5339 * Because of NOHZ it might not get called on every tick which gives need for 5340 * the @pending_updates argument. 5341 * 5342 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1 5343 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load 5344 * = A * (A * load[i]_n-2 + B) + B 5345 * = A * (A * (A * load[i]_n-3 + B) + B) + B 5346 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B 5347 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B 5348 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B 5349 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load 5350 * 5351 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as 5352 * any change in load would have resulted in the tick being turned back on. 5353 * 5354 * For regular NOHZ, this reduces to: 5355 * 5356 * load[i]_n = (1 - 1/2^i)^n * load[i]_0 5357 * 5358 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra 5359 * term. 5360 */ 5361 static void cpu_load_update(struct rq *this_rq, unsigned long this_load, 5362 unsigned long pending_updates) 5363 { 5364 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0]; 5365 int i, scale; 5366 5367 this_rq->nr_load_updates++; 5368 5369 /* Update our load: */ 5370 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ 5371 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 5372 unsigned long old_load, new_load; 5373 5374 /* scale is effectively 1 << i now, and >> i divides by scale */ 5375 5376 old_load = this_rq->cpu_load[i]; 5377 #ifdef CONFIG_NO_HZ_COMMON 5378 old_load = decay_load_missed(old_load, pending_updates - 1, i); 5379 if (tickless_load) { 5380 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i); 5381 /* 5382 * old_load can never be a negative value because a 5383 * decayed tickless_load cannot be greater than the 5384 * original tickless_load. 5385 */ 5386 old_load += tickless_load; 5387 } 5388 #endif 5389 new_load = this_load; 5390 /* 5391 * Round up the averaging division if load is increasing. This 5392 * prevents us from getting stuck on 9 if the load is 10, for 5393 * example. 5394 */ 5395 if (new_load > old_load) 5396 new_load += scale - 1; 5397 5398 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 5399 } 5400 } 5401 5402 /* Used instead of source_load when we know the type == 0 */ 5403 static unsigned long weighted_cpuload(struct rq *rq) 5404 { 5405 return cfs_rq_runnable_load_avg(&rq->cfs); 5406 } 5407 5408 #ifdef CONFIG_NO_HZ_COMMON 5409 /* 5410 * There is no sane way to deal with nohz on smp when using jiffies because the 5411 * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading 5412 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. 5413 * 5414 * Therefore we need to avoid the delta approach from the regular tick when 5415 * possible since that would seriously skew the load calculation. This is why we 5416 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on 5417 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle 5418 * loop exit, nohz_idle_balance, nohz full exit...) 5419 * 5420 * This means we might still be one tick off for nohz periods. 5421 */ 5422 5423 static void cpu_load_update_nohz(struct rq *this_rq, 5424 unsigned long curr_jiffies, 5425 unsigned long load) 5426 { 5427 unsigned long pending_updates; 5428 5429 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 5430 if (pending_updates) { 5431 this_rq->last_load_update_tick = curr_jiffies; 5432 /* 5433 * In the regular NOHZ case, we were idle, this means load 0. 5434 * In the NOHZ_FULL case, we were non-idle, we should consider 5435 * its weighted load. 5436 */ 5437 cpu_load_update(this_rq, load, pending_updates); 5438 } 5439 } 5440 5441 /* 5442 * Called from nohz_idle_balance() to update the load ratings before doing the 5443 * idle balance. 5444 */ 5445 static void cpu_load_update_idle(struct rq *this_rq) 5446 { 5447 /* 5448 * bail if there's load or we're actually up-to-date. 5449 */ 5450 if (weighted_cpuload(this_rq)) 5451 return; 5452 5453 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0); 5454 } 5455 5456 /* 5457 * Record CPU load on nohz entry so we know the tickless load to account 5458 * on nohz exit. cpu_load[0] happens then to be updated more frequently 5459 * than other cpu_load[idx] but it should be fine as cpu_load readers 5460 * shouldn't rely into synchronized cpu_load[*] updates. 5461 */ 5462 void cpu_load_update_nohz_start(void) 5463 { 5464 struct rq *this_rq = this_rq(); 5465 5466 /* 5467 * This is all lockless but should be fine. If weighted_cpuload changes 5468 * concurrently we'll exit nohz. And cpu_load write can race with 5469 * cpu_load_update_idle() but both updater would be writing the same. 5470 */ 5471 this_rq->cpu_load[0] = weighted_cpuload(this_rq); 5472 } 5473 5474 /* 5475 * Account the tickless load in the end of a nohz frame. 5476 */ 5477 void cpu_load_update_nohz_stop(void) 5478 { 5479 unsigned long curr_jiffies = READ_ONCE(jiffies); 5480 struct rq *this_rq = this_rq(); 5481 unsigned long load; 5482 struct rq_flags rf; 5483 5484 if (curr_jiffies == this_rq->last_load_update_tick) 5485 return; 5486 5487 load = weighted_cpuload(this_rq); 5488 rq_lock(this_rq, &rf); 5489 update_rq_clock(this_rq); 5490 cpu_load_update_nohz(this_rq, curr_jiffies, load); 5491 rq_unlock(this_rq, &rf); 5492 } 5493 #else /* !CONFIG_NO_HZ_COMMON */ 5494 static inline void cpu_load_update_nohz(struct rq *this_rq, 5495 unsigned long curr_jiffies, 5496 unsigned long load) { } 5497 #endif /* CONFIG_NO_HZ_COMMON */ 5498 5499 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) 5500 { 5501 #ifdef CONFIG_NO_HZ_COMMON 5502 /* See the mess around cpu_load_update_nohz(). */ 5503 this_rq->last_load_update_tick = READ_ONCE(jiffies); 5504 #endif 5505 cpu_load_update(this_rq, load, 1); 5506 } 5507 5508 /* 5509 * Called from scheduler_tick() 5510 */ 5511 void cpu_load_update_active(struct rq *this_rq) 5512 { 5513 unsigned long load = weighted_cpuload(this_rq); 5514 5515 if (tick_nohz_tick_stopped()) 5516 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load); 5517 else 5518 cpu_load_update_periodic(this_rq, load); 5519 } 5520 5521 /* 5522 * Return a low guess at the load of a migration-source CPU weighted 5523 * according to the scheduling class and "nice" value. 5524 * 5525 * We want to under-estimate the load of migration sources, to 5526 * balance conservatively. 5527 */ 5528 static unsigned long source_load(int cpu, int type) 5529 { 5530 struct rq *rq = cpu_rq(cpu); 5531 unsigned long total = weighted_cpuload(rq); 5532 5533 if (type == 0 || !sched_feat(LB_BIAS)) 5534 return total; 5535 5536 return min(rq->cpu_load[type-1], total); 5537 } 5538 5539 /* 5540 * Return a high guess at the load of a migration-target CPU weighted 5541 * according to the scheduling class and "nice" value. 5542 */ 5543 static unsigned long target_load(int cpu, int type) 5544 { 5545 struct rq *rq = cpu_rq(cpu); 5546 unsigned long total = weighted_cpuload(rq); 5547 5548 if (type == 0 || !sched_feat(LB_BIAS)) 5549 return total; 5550 5551 return max(rq->cpu_load[type-1], total); 5552 } 5553 5554 static unsigned long capacity_of(int cpu) 5555 { 5556 return cpu_rq(cpu)->cpu_capacity; 5557 } 5558 5559 static unsigned long capacity_orig_of(int cpu) 5560 { 5561 return cpu_rq(cpu)->cpu_capacity_orig; 5562 } 5563 5564 static unsigned long cpu_avg_load_per_task(int cpu) 5565 { 5566 struct rq *rq = cpu_rq(cpu); 5567 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); 5568 unsigned long load_avg = weighted_cpuload(rq); 5569 5570 if (nr_running) 5571 return load_avg / nr_running; 5572 5573 return 0; 5574 } 5575 5576 static void record_wakee(struct task_struct *p) 5577 { 5578 /* 5579 * Only decay a single time; tasks that have less then 1 wakeup per 5580 * jiffy will not have built up many flips. 5581 */ 5582 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 5583 current->wakee_flips >>= 1; 5584 current->wakee_flip_decay_ts = jiffies; 5585 } 5586 5587 if (current->last_wakee != p) { 5588 current->last_wakee = p; 5589 current->wakee_flips++; 5590 } 5591 } 5592 5593 /* 5594 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 5595 * 5596 * A waker of many should wake a different task than the one last awakened 5597 * at a frequency roughly N times higher than one of its wakees. 5598 * 5599 * In order to determine whether we should let the load spread vs consolidating 5600 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 5601 * partner, and a factor of lls_size higher frequency in the other. 5602 * 5603 * With both conditions met, we can be relatively sure that the relationship is 5604 * non-monogamous, with partner count exceeding socket size. 5605 * 5606 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 5607 * whatever is irrelevant, spread criteria is apparent partner count exceeds 5608 * socket size. 5609 */ 5610 static int wake_wide(struct task_struct *p) 5611 { 5612 unsigned int master = current->wakee_flips; 5613 unsigned int slave = p->wakee_flips; 5614 int factor = this_cpu_read(sd_llc_size); 5615 5616 if (master < slave) 5617 swap(master, slave); 5618 if (slave < factor || master < slave * factor) 5619 return 0; 5620 return 1; 5621 } 5622 5623 /* 5624 * The purpose of wake_affine() is to quickly determine on which CPU we can run 5625 * soonest. For the purpose of speed we only consider the waking and previous 5626 * CPU. 5627 * 5628 * wake_affine_idle() - only considers 'now', it check if the waking CPU is 5629 * cache-affine and is (or will be) idle. 5630 * 5631 * wake_affine_weight() - considers the weight to reflect the average 5632 * scheduling latency of the CPUs. This seems to work 5633 * for the overloaded case. 5634 */ 5635 static int 5636 wake_affine_idle(int this_cpu, int prev_cpu, int sync) 5637 { 5638 /* 5639 * If this_cpu is idle, it implies the wakeup is from interrupt 5640 * context. Only allow the move if cache is shared. Otherwise an 5641 * interrupt intensive workload could force all tasks onto one 5642 * node depending on the IO topology or IRQ affinity settings. 5643 * 5644 * If the prev_cpu is idle and cache affine then avoid a migration. 5645 * There is no guarantee that the cache hot data from an interrupt 5646 * is more important than cache hot data on the prev_cpu and from 5647 * a cpufreq perspective, it's better to have higher utilisation 5648 * on one CPU. 5649 */ 5650 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) 5651 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; 5652 5653 if (sync && cpu_rq(this_cpu)->nr_running == 1) 5654 return this_cpu; 5655 5656 return nr_cpumask_bits; 5657 } 5658 5659 static int 5660 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, 5661 int this_cpu, int prev_cpu, int sync) 5662 { 5663 s64 this_eff_load, prev_eff_load; 5664 unsigned long task_load; 5665 5666 this_eff_load = target_load(this_cpu, sd->wake_idx); 5667 5668 if (sync) { 5669 unsigned long current_load = task_h_load(current); 5670 5671 if (current_load > this_eff_load) 5672 return this_cpu; 5673 5674 this_eff_load -= current_load; 5675 } 5676 5677 task_load = task_h_load(p); 5678 5679 this_eff_load += task_load; 5680 if (sched_feat(WA_BIAS)) 5681 this_eff_load *= 100; 5682 this_eff_load *= capacity_of(prev_cpu); 5683 5684 prev_eff_load = source_load(prev_cpu, sd->wake_idx); 5685 prev_eff_load -= task_load; 5686 if (sched_feat(WA_BIAS)) 5687 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; 5688 prev_eff_load *= capacity_of(this_cpu); 5689 5690 /* 5691 * If sync, adjust the weight of prev_eff_load such that if 5692 * prev_eff == this_eff that select_idle_sibling() will consider 5693 * stacking the wakee on top of the waker if no other CPU is 5694 * idle. 5695 */ 5696 if (sync) 5697 prev_eff_load += 1; 5698 5699 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; 5700 } 5701 5702 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 5703 int this_cpu, int prev_cpu, int sync) 5704 { 5705 int target = nr_cpumask_bits; 5706 5707 if (sched_feat(WA_IDLE)) 5708 target = wake_affine_idle(this_cpu, prev_cpu, sync); 5709 5710 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) 5711 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); 5712 5713 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); 5714 if (target == nr_cpumask_bits) 5715 return prev_cpu; 5716 5717 schedstat_inc(sd->ttwu_move_affine); 5718 schedstat_inc(p->se.statistics.nr_wakeups_affine); 5719 return target; 5720 } 5721 5722 static unsigned long cpu_util_without(int cpu, struct task_struct *p); 5723 5724 static unsigned long capacity_spare_without(int cpu, struct task_struct *p) 5725 { 5726 return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0); 5727 } 5728 5729 /* 5730 * find_idlest_group finds and returns the least busy CPU group within the 5731 * domain. 5732 * 5733 * Assumes p is allowed on at least one CPU in sd. 5734 */ 5735 static struct sched_group * 5736 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 5737 int this_cpu, int sd_flag) 5738 { 5739 struct sched_group *idlest = NULL, *group = sd->groups; 5740 struct sched_group *most_spare_sg = NULL; 5741 unsigned long min_runnable_load = ULONG_MAX; 5742 unsigned long this_runnable_load = ULONG_MAX; 5743 unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; 5744 unsigned long most_spare = 0, this_spare = 0; 5745 int load_idx = sd->forkexec_idx; 5746 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; 5747 unsigned long imbalance = scale_load_down(NICE_0_LOAD) * 5748 (sd->imbalance_pct-100) / 100; 5749 5750 if (sd_flag & SD_BALANCE_WAKE) 5751 load_idx = sd->wake_idx; 5752 5753 do { 5754 unsigned long load, avg_load, runnable_load; 5755 unsigned long spare_cap, max_spare_cap; 5756 int local_group; 5757 int i; 5758 5759 /* Skip over this group if it has no CPUs allowed */ 5760 if (!cpumask_intersects(sched_group_span(group), 5761 &p->cpus_allowed)) 5762 continue; 5763 5764 local_group = cpumask_test_cpu(this_cpu, 5765 sched_group_span(group)); 5766 5767 /* 5768 * Tally up the load of all CPUs in the group and find 5769 * the group containing the CPU with most spare capacity. 5770 */ 5771 avg_load = 0; 5772 runnable_load = 0; 5773 max_spare_cap = 0; 5774 5775 for_each_cpu(i, sched_group_span(group)) { 5776 /* Bias balancing toward CPUs of our domain */ 5777 if (local_group) 5778 load = source_load(i, load_idx); 5779 else 5780 load = target_load(i, load_idx); 5781 5782 runnable_load += load; 5783 5784 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); 5785 5786 spare_cap = capacity_spare_without(i, p); 5787 5788 if (spare_cap > max_spare_cap) 5789 max_spare_cap = spare_cap; 5790 } 5791 5792 /* Adjust by relative CPU capacity of the group */ 5793 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / 5794 group->sgc->capacity; 5795 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / 5796 group->sgc->capacity; 5797 5798 if (local_group) { 5799 this_runnable_load = runnable_load; 5800 this_avg_load = avg_load; 5801 this_spare = max_spare_cap; 5802 } else { 5803 if (min_runnable_load > (runnable_load + imbalance)) { 5804 /* 5805 * The runnable load is significantly smaller 5806 * so we can pick this new CPU: 5807 */ 5808 min_runnable_load = runnable_load; 5809 min_avg_load = avg_load; 5810 idlest = group; 5811 } else if ((runnable_load < (min_runnable_load + imbalance)) && 5812 (100*min_avg_load > imbalance_scale*avg_load)) { 5813 /* 5814 * The runnable loads are close so take the 5815 * blocked load into account through avg_load: 5816 */ 5817 min_avg_load = avg_load; 5818 idlest = group; 5819 } 5820 5821 if (most_spare < max_spare_cap) { 5822 most_spare = max_spare_cap; 5823 most_spare_sg = group; 5824 } 5825 } 5826 } while (group = group->next, group != sd->groups); 5827 5828 /* 5829 * The cross-over point between using spare capacity or least load 5830 * is too conservative for high utilization tasks on partially 5831 * utilized systems if we require spare_capacity > task_util(p), 5832 * so we allow for some task stuffing by using 5833 * spare_capacity > task_util(p)/2. 5834 * 5835 * Spare capacity can't be used for fork because the utilization has 5836 * not been set yet, we must first select a rq to compute the initial 5837 * utilization. 5838 */ 5839 if (sd_flag & SD_BALANCE_FORK) 5840 goto skip_spare; 5841 5842 if (this_spare > task_util(p) / 2 && 5843 imbalance_scale*this_spare > 100*most_spare) 5844 return NULL; 5845 5846 if (most_spare > task_util(p) / 2) 5847 return most_spare_sg; 5848 5849 skip_spare: 5850 if (!idlest) 5851 return NULL; 5852 5853 /* 5854 * When comparing groups across NUMA domains, it's possible for the 5855 * local domain to be very lightly loaded relative to the remote 5856 * domains but "imbalance" skews the comparison making remote CPUs 5857 * look much more favourable. When considering cross-domain, add 5858 * imbalance to the runnable load on the remote node and consider 5859 * staying local. 5860 */ 5861 if ((sd->flags & SD_NUMA) && 5862 min_runnable_load + imbalance >= this_runnable_load) 5863 return NULL; 5864 5865 if (min_runnable_load > (this_runnable_load + imbalance)) 5866 return NULL; 5867 5868 if ((this_runnable_load < (min_runnable_load + imbalance)) && 5869 (100*this_avg_load < imbalance_scale*min_avg_load)) 5870 return NULL; 5871 5872 return idlest; 5873 } 5874 5875 /* 5876 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. 5877 */ 5878 static int 5879 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 5880 { 5881 unsigned long load, min_load = ULONG_MAX; 5882 unsigned int min_exit_latency = UINT_MAX; 5883 u64 latest_idle_timestamp = 0; 5884 int least_loaded_cpu = this_cpu; 5885 int shallowest_idle_cpu = -1; 5886 int i; 5887 5888 /* Check if we have any choice: */ 5889 if (group->group_weight == 1) 5890 return cpumask_first(sched_group_span(group)); 5891 5892 /* Traverse only the allowed CPUs */ 5893 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { 5894 if (available_idle_cpu(i)) { 5895 struct rq *rq = cpu_rq(i); 5896 struct cpuidle_state *idle = idle_get_state(rq); 5897 if (idle && idle->exit_latency < min_exit_latency) { 5898 /* 5899 * We give priority to a CPU whose idle state 5900 * has the smallest exit latency irrespective 5901 * of any idle timestamp. 5902 */ 5903 min_exit_latency = idle->exit_latency; 5904 latest_idle_timestamp = rq->idle_stamp; 5905 shallowest_idle_cpu = i; 5906 } else if ((!idle || idle->exit_latency == min_exit_latency) && 5907 rq->idle_stamp > latest_idle_timestamp) { 5908 /* 5909 * If equal or no active idle state, then 5910 * the most recently idled CPU might have 5911 * a warmer cache. 5912 */ 5913 latest_idle_timestamp = rq->idle_stamp; 5914 shallowest_idle_cpu = i; 5915 } 5916 } else if (shallowest_idle_cpu == -1) { 5917 load = weighted_cpuload(cpu_rq(i)); 5918 if (load < min_load) { 5919 min_load = load; 5920 least_loaded_cpu = i; 5921 } 5922 } 5923 } 5924 5925 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 5926 } 5927 5928 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, 5929 int cpu, int prev_cpu, int sd_flag) 5930 { 5931 int new_cpu = cpu; 5932 5933 if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) 5934 return prev_cpu; 5935 5936 /* 5937 * We need task's util for capacity_spare_without, sync it up to 5938 * prev_cpu's last_update_time. 5939 */ 5940 if (!(sd_flag & SD_BALANCE_FORK)) 5941 sync_entity_load_avg(&p->se); 5942 5943 while (sd) { 5944 struct sched_group *group; 5945 struct sched_domain *tmp; 5946 int weight; 5947 5948 if (!(sd->flags & sd_flag)) { 5949 sd = sd->child; 5950 continue; 5951 } 5952 5953 group = find_idlest_group(sd, p, cpu, sd_flag); 5954 if (!group) { 5955 sd = sd->child; 5956 continue; 5957 } 5958 5959 new_cpu = find_idlest_group_cpu(group, p, cpu); 5960 if (new_cpu == cpu) { 5961 /* Now try balancing at a lower domain level of 'cpu': */ 5962 sd = sd->child; 5963 continue; 5964 } 5965 5966 /* Now try balancing at a lower domain level of 'new_cpu': */ 5967 cpu = new_cpu; 5968 weight = sd->span_weight; 5969 sd = NULL; 5970 for_each_domain(cpu, tmp) { 5971 if (weight <= tmp->span_weight) 5972 break; 5973 if (tmp->flags & sd_flag) 5974 sd = tmp; 5975 } 5976 } 5977 5978 return new_cpu; 5979 } 5980 5981 #ifdef CONFIG_SCHED_SMT 5982 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5983 5984 static inline void set_idle_cores(int cpu, int val) 5985 { 5986 struct sched_domain_shared *sds; 5987 5988 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5989 if (sds) 5990 WRITE_ONCE(sds->has_idle_cores, val); 5991 } 5992 5993 static inline bool test_idle_cores(int cpu, bool def) 5994 { 5995 struct sched_domain_shared *sds; 5996 5997 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5998 if (sds) 5999 return READ_ONCE(sds->has_idle_cores); 6000 6001 return def; 6002 } 6003 6004 /* 6005 * Scans the local SMT mask to see if the entire core is idle, and records this 6006 * information in sd_llc_shared->has_idle_cores. 6007 * 6008 * Since SMT siblings share all cache levels, inspecting this limited remote 6009 * state should be fairly cheap. 6010 */ 6011 void __update_idle_core(struct rq *rq) 6012 { 6013 int core = cpu_of(rq); 6014 int cpu; 6015 6016 rcu_read_lock(); 6017 if (test_idle_cores(core, true)) 6018 goto unlock; 6019 6020 for_each_cpu(cpu, cpu_smt_mask(core)) { 6021 if (cpu == core) 6022 continue; 6023 6024 if (!available_idle_cpu(cpu)) 6025 goto unlock; 6026 } 6027 6028 set_idle_cores(core, 1); 6029 unlock: 6030 rcu_read_unlock(); 6031 } 6032 6033 /* 6034 * Scan the entire LLC domain for idle cores; this dynamically switches off if 6035 * there are no idle cores left in the system; tracked through 6036 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 6037 */ 6038 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 6039 { 6040 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 6041 int core, cpu; 6042 6043 if (!static_branch_likely(&sched_smt_present)) 6044 return -1; 6045 6046 if (!test_idle_cores(target, false)) 6047 return -1; 6048 6049 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); 6050 6051 for_each_cpu_wrap(core, cpus, target) { 6052 bool idle = true; 6053 6054 for_each_cpu(cpu, cpu_smt_mask(core)) { 6055 cpumask_clear_cpu(cpu, cpus); 6056 if (!available_idle_cpu(cpu)) 6057 idle = false; 6058 } 6059 6060 if (idle) 6061 return core; 6062 } 6063 6064 /* 6065 * Failed to find an idle core; stop looking for one. 6066 */ 6067 set_idle_cores(target, 0); 6068 6069 return -1; 6070 } 6071 6072 /* 6073 * Scan the local SMT mask for idle CPUs. 6074 */ 6075 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6076 { 6077 int cpu; 6078 6079 if (!static_branch_likely(&sched_smt_present)) 6080 return -1; 6081 6082 for_each_cpu(cpu, cpu_smt_mask(target)) { 6083 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6084 continue; 6085 if (available_idle_cpu(cpu)) 6086 return cpu; 6087 } 6088 6089 return -1; 6090 } 6091 6092 #else /* CONFIG_SCHED_SMT */ 6093 6094 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 6095 { 6096 return -1; 6097 } 6098 6099 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 6100 { 6101 return -1; 6102 } 6103 6104 #endif /* CONFIG_SCHED_SMT */ 6105 6106 /* 6107 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 6108 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 6109 * average idle time for this rq (as found in rq->avg_idle). 6110 */ 6111 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) 6112 { 6113 struct sched_domain *this_sd; 6114 u64 avg_cost, avg_idle; 6115 u64 time, cost; 6116 s64 delta; 6117 int cpu, nr = INT_MAX; 6118 6119 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 6120 if (!this_sd) 6121 return -1; 6122 6123 /* 6124 * Due to large variance we need a large fuzz factor; hackbench in 6125 * particularly is sensitive here. 6126 */ 6127 avg_idle = this_rq()->avg_idle / 512; 6128 avg_cost = this_sd->avg_scan_cost + 1; 6129 6130 if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) 6131 return -1; 6132 6133 if (sched_feat(SIS_PROP)) { 6134 u64 span_avg = sd->span_weight * avg_idle; 6135 if (span_avg > 4*avg_cost) 6136 nr = div_u64(span_avg, avg_cost); 6137 else 6138 nr = 4; 6139 } 6140 6141 time = local_clock(); 6142 6143 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { 6144 if (!--nr) 6145 return -1; 6146 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6147 continue; 6148 if (available_idle_cpu(cpu)) 6149 break; 6150 } 6151 6152 time = local_clock() - time; 6153 cost = this_sd->avg_scan_cost; 6154 delta = (s64)(time - cost) / 8; 6155 this_sd->avg_scan_cost += delta; 6156 6157 return cpu; 6158 } 6159 6160 /* 6161 * Try and locate an idle core/thread in the LLC cache domain. 6162 */ 6163 static int select_idle_sibling(struct task_struct *p, int prev, int target) 6164 { 6165 struct sched_domain *sd; 6166 int i, recent_used_cpu; 6167 6168 if (available_idle_cpu(target)) 6169 return target; 6170 6171 /* 6172 * If the previous CPU is cache affine and idle, don't be stupid: 6173 */ 6174 if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev)) 6175 return prev; 6176 6177 /* Check a recently used CPU as a potential idle candidate: */ 6178 recent_used_cpu = p->recent_used_cpu; 6179 if (recent_used_cpu != prev && 6180 recent_used_cpu != target && 6181 cpus_share_cache(recent_used_cpu, target) && 6182 available_idle_cpu(recent_used_cpu) && 6183 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { 6184 /* 6185 * Replace recent_used_cpu with prev as it is a potential 6186 * candidate for the next wake: 6187 */ 6188 p->recent_used_cpu = prev; 6189 return recent_used_cpu; 6190 } 6191 6192 sd = rcu_dereference(per_cpu(sd_llc, target)); 6193 if (!sd) 6194 return target; 6195 6196 i = select_idle_core(p, sd, target); 6197 if ((unsigned)i < nr_cpumask_bits) 6198 return i; 6199 6200 i = select_idle_cpu(p, sd, target); 6201 if ((unsigned)i < nr_cpumask_bits) 6202 return i; 6203 6204 i = select_idle_smt(p, sd, target); 6205 if ((unsigned)i < nr_cpumask_bits) 6206 return i; 6207 6208 return target; 6209 } 6210 6211 /** 6212 * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks 6213 * @cpu: the CPU to get the utilization of 6214 * 6215 * The unit of the return value must be the one of capacity so we can compare 6216 * the utilization with the capacity of the CPU that is available for CFS task 6217 * (ie cpu_capacity). 6218 * 6219 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the 6220 * recent utilization of currently non-runnable tasks on a CPU. It represents 6221 * the amount of utilization of a CPU in the range [0..capacity_orig] where 6222 * capacity_orig is the cpu_capacity available at the highest frequency 6223 * (arch_scale_freq_capacity()). 6224 * The utilization of a CPU converges towards a sum equal to or less than the 6225 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is 6226 * the running time on this CPU scaled by capacity_curr. 6227 * 6228 * The estimated utilization of a CPU is defined to be the maximum between its 6229 * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks 6230 * currently RUNNABLE on that CPU. 6231 * This allows to properly represent the expected utilization of a CPU which 6232 * has just got a big task running since a long sleep period. At the same time 6233 * however it preserves the benefits of the "blocked utilization" in 6234 * describing the potential for other tasks waking up on the same CPU. 6235 * 6236 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even 6237 * higher than capacity_orig because of unfortunate rounding in 6238 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until 6239 * the average stabilizes with the new running time. We need to check that the 6240 * utilization stays within the range of [0..capacity_orig] and cap it if 6241 * necessary. Without utilization capping, a group could be seen as overloaded 6242 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of 6243 * available capacity. We allow utilization to overshoot capacity_curr (but not 6244 * capacity_orig) as it useful for predicting the capacity required after task 6245 * migrations (scheduler-driven DVFS). 6246 * 6247 * Return: the (estimated) utilization for the specified CPU 6248 */ 6249 static inline unsigned long cpu_util(int cpu) 6250 { 6251 struct cfs_rq *cfs_rq; 6252 unsigned int util; 6253 6254 cfs_rq = &cpu_rq(cpu)->cfs; 6255 util = READ_ONCE(cfs_rq->avg.util_avg); 6256 6257 if (sched_feat(UTIL_EST)) 6258 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); 6259 6260 return min_t(unsigned long, util, capacity_orig_of(cpu)); 6261 } 6262 6263 /* 6264 * cpu_util_without: compute cpu utilization without any contributions from *p 6265 * @cpu: the CPU which utilization is requested 6266 * @p: the task which utilization should be discounted 6267 * 6268 * The utilization of a CPU is defined by the utilization of tasks currently 6269 * enqueued on that CPU as well as tasks which are currently sleeping after an 6270 * execution on that CPU. 6271 * 6272 * This method returns the utilization of the specified CPU by discounting the 6273 * utilization of the specified task, whenever the task is currently 6274 * contributing to the CPU utilization. 6275 */ 6276 static unsigned long cpu_util_without(int cpu, struct task_struct *p) 6277 { 6278 struct cfs_rq *cfs_rq; 6279 unsigned int util; 6280 6281 /* Task has no contribution or is new */ 6282 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) 6283 return cpu_util(cpu); 6284 6285 cfs_rq = &cpu_rq(cpu)->cfs; 6286 util = READ_ONCE(cfs_rq->avg.util_avg); 6287 6288 /* Discount task's util from CPU's util */ 6289 lsub_positive(&util, task_util(p)); 6290 6291 /* 6292 * Covered cases: 6293 * 6294 * a) if *p is the only task sleeping on this CPU, then: 6295 * cpu_util (== task_util) > util_est (== 0) 6296 * and thus we return: 6297 * cpu_util_without = (cpu_util - task_util) = 0 6298 * 6299 * b) if other tasks are SLEEPING on this CPU, which is now exiting 6300 * IDLE, then: 6301 * cpu_util >= task_util 6302 * cpu_util > util_est (== 0) 6303 * and thus we discount *p's blocked utilization to return: 6304 * cpu_util_without = (cpu_util - task_util) >= 0 6305 * 6306 * c) if other tasks are RUNNABLE on that CPU and 6307 * util_est > cpu_util 6308 * then we use util_est since it returns a more restrictive 6309 * estimation of the spare capacity on that CPU, by just 6310 * considering the expected utilization of tasks already 6311 * runnable on that CPU. 6312 * 6313 * Cases a) and b) are covered by the above code, while case c) is 6314 * covered by the following code when estimated utilization is 6315 * enabled. 6316 */ 6317 if (sched_feat(UTIL_EST)) { 6318 unsigned int estimated = 6319 READ_ONCE(cfs_rq->avg.util_est.enqueued); 6320 6321 /* 6322 * Despite the following checks we still have a small window 6323 * for a possible race, when an execl's select_task_rq_fair() 6324 * races with LB's detach_task(): 6325 * 6326 * detach_task() 6327 * p->on_rq = TASK_ON_RQ_MIGRATING; 6328 * ---------------------------------- A 6329 * deactivate_task() \ 6330 * dequeue_task() + RaceTime 6331 * util_est_dequeue() / 6332 * ---------------------------------- B 6333 * 6334 * The additional check on "current == p" it's required to 6335 * properly fix the execl regression and it helps in further 6336 * reducing the chances for the above race. 6337 */ 6338 if (unlikely(task_on_rq_queued(p) || current == p)) 6339 lsub_positive(&estimated, _task_util_est(p)); 6340 6341 util = max(util, estimated); 6342 } 6343 6344 /* 6345 * Utilization (estimated) can exceed the CPU capacity, thus let's 6346 * clamp to the maximum CPU capacity to ensure consistency with 6347 * the cpu_util call. 6348 */ 6349 return min_t(unsigned long, util, capacity_orig_of(cpu)); 6350 } 6351 6352 /* 6353 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the 6354 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. 6355 * 6356 * In that case WAKE_AFFINE doesn't make sense and we'll let 6357 * BALANCE_WAKE sort things out. 6358 */ 6359 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) 6360 { 6361 long min_cap, max_cap; 6362 6363 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 6364 return 0; 6365 6366 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); 6367 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; 6368 6369 /* Minimum capacity is close to max, no need to abort wake_affine */ 6370 if (max_cap - min_cap < max_cap >> 3) 6371 return 0; 6372 6373 /* Bring task utilization in sync with prev_cpu */ 6374 sync_entity_load_avg(&p->se); 6375 6376 return !task_fits_capacity(p, min_cap); 6377 } 6378 6379 /* 6380 * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) 6381 * to @dst_cpu. 6382 */ 6383 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) 6384 { 6385 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; 6386 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); 6387 6388 /* 6389 * If @p migrates from @cpu to another, remove its contribution. Or, 6390 * if @p migrates from another CPU to @cpu, add its contribution. In 6391 * the other cases, @cpu is not impacted by the migration, so the 6392 * util_avg should already be correct. 6393 */ 6394 if (task_cpu(p) == cpu && dst_cpu != cpu) 6395 sub_positive(&util, task_util(p)); 6396 else if (task_cpu(p) != cpu && dst_cpu == cpu) 6397 util += task_util(p); 6398 6399 if (sched_feat(UTIL_EST)) { 6400 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); 6401 6402 /* 6403 * During wake-up, the task isn't enqueued yet and doesn't 6404 * appear in the cfs_rq->avg.util_est.enqueued of any rq, 6405 * so just add it (if needed) to "simulate" what will be 6406 * cpu_util() after the task has been enqueued. 6407 */ 6408 if (dst_cpu == cpu) 6409 util_est += _task_util_est(p); 6410 6411 util = max(util, util_est); 6412 } 6413 6414 return min(util, capacity_orig_of(cpu)); 6415 } 6416 6417 /* 6418 * compute_energy(): Estimates the energy that would be consumed if @p was 6419 * migrated to @dst_cpu. compute_energy() predicts what will be the utilization 6420 * landscape of the * CPUs after the task migration, and uses the Energy Model 6421 * to compute what would be the energy if we decided to actually migrate that 6422 * task. 6423 */ 6424 static long 6425 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) 6426 { 6427 long util, max_util, sum_util, energy = 0; 6428 int cpu; 6429 6430 for (; pd; pd = pd->next) { 6431 max_util = sum_util = 0; 6432 /* 6433 * The capacity state of CPUs of the current rd can be driven by 6434 * CPUs of another rd if they belong to the same performance 6435 * domain. So, account for the utilization of these CPUs too 6436 * by masking pd with cpu_online_mask instead of the rd span. 6437 * 6438 * If an entire performance domain is outside of the current rd, 6439 * it will not appear in its pd list and will not be accounted 6440 * by compute_energy(). 6441 */ 6442 for_each_cpu_and(cpu, perf_domain_span(pd), cpu_online_mask) { 6443 util = cpu_util_next(cpu, p, dst_cpu); 6444 util = schedutil_energy_util(cpu, util); 6445 max_util = max(util, max_util); 6446 sum_util += util; 6447 } 6448 6449 energy += em_pd_energy(pd->em_pd, max_util, sum_util); 6450 } 6451 6452 return energy; 6453 } 6454 6455 /* 6456 * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the 6457 * waking task. find_energy_efficient_cpu() looks for the CPU with maximum 6458 * spare capacity in each performance domain and uses it as a potential 6459 * candidate to execute the task. Then, it uses the Energy Model to figure 6460 * out which of the CPU candidates is the most energy-efficient. 6461 * 6462 * The rationale for this heuristic is as follows. In a performance domain, 6463 * all the most energy efficient CPU candidates (according to the Energy 6464 * Model) are those for which we'll request a low frequency. When there are 6465 * several CPUs for which the frequency request will be the same, we don't 6466 * have enough data to break the tie between them, because the Energy Model 6467 * only includes active power costs. With this model, if we assume that 6468 * frequency requests follow utilization (e.g. using schedutil), the CPU with 6469 * the maximum spare capacity in a performance domain is guaranteed to be among 6470 * the best candidates of the performance domain. 6471 * 6472 * In practice, it could be preferable from an energy standpoint to pack 6473 * small tasks on a CPU in order to let other CPUs go in deeper idle states, 6474 * but that could also hurt our chances to go cluster idle, and we have no 6475 * ways to tell with the current Energy Model if this is actually a good 6476 * idea or not. So, find_energy_efficient_cpu() basically favors 6477 * cluster-packing, and spreading inside a cluster. That should at least be 6478 * a good thing for latency, and this is consistent with the idea that most 6479 * of the energy savings of EAS come from the asymmetry of the system, and 6480 * not so much from breaking the tie between identical CPUs. That's also the 6481 * reason why EAS is enabled in the topology code only for systems where 6482 * SD_ASYM_CPUCAPACITY is set. 6483 * 6484 * NOTE: Forkees are not accepted in the energy-aware wake-up path because 6485 * they don't have any useful utilization data yet and it's not possible to 6486 * forecast their impact on energy consumption. Consequently, they will be 6487 * placed by find_idlest_cpu() on the least loaded CPU, which might turn out 6488 * to be energy-inefficient in some use-cases. The alternative would be to 6489 * bias new tasks towards specific types of CPUs first, or to try to infer 6490 * their util_avg from the parent task, but those heuristics could hurt 6491 * other use-cases too. So, until someone finds a better way to solve this, 6492 * let's keep things simple by re-using the existing slow path. 6493 */ 6494 6495 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) 6496 { 6497 unsigned long prev_energy = ULONG_MAX, best_energy = ULONG_MAX; 6498 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 6499 int cpu, best_energy_cpu = prev_cpu; 6500 struct perf_domain *head, *pd; 6501 unsigned long cpu_cap, util; 6502 struct sched_domain *sd; 6503 6504 rcu_read_lock(); 6505 pd = rcu_dereference(rd->pd); 6506 if (!pd || READ_ONCE(rd->overutilized)) 6507 goto fail; 6508 head = pd; 6509 6510 /* 6511 * Energy-aware wake-up happens on the lowest sched_domain starting 6512 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. 6513 */ 6514 sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); 6515 while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) 6516 sd = sd->parent; 6517 if (!sd) 6518 goto fail; 6519 6520 sync_entity_load_avg(&p->se); 6521 if (!task_util_est(p)) 6522 goto unlock; 6523 6524 for (; pd; pd = pd->next) { 6525 unsigned long cur_energy, spare_cap, max_spare_cap = 0; 6526 int max_spare_cap_cpu = -1; 6527 6528 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { 6529 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 6530 continue; 6531 6532 /* Skip CPUs that will be overutilized. */ 6533 util = cpu_util_next(cpu, p, cpu); 6534 cpu_cap = capacity_of(cpu); 6535 if (cpu_cap * 1024 < util * capacity_margin) 6536 continue; 6537 6538 /* Always use prev_cpu as a candidate. */ 6539 if (cpu == prev_cpu) { 6540 prev_energy = compute_energy(p, prev_cpu, head); 6541 best_energy = min(best_energy, prev_energy); 6542 continue; 6543 } 6544 6545 /* 6546 * Find the CPU with the maximum spare capacity in 6547 * the performance domain 6548 */ 6549 spare_cap = cpu_cap - util; 6550 if (spare_cap > max_spare_cap) { 6551 max_spare_cap = spare_cap; 6552 max_spare_cap_cpu = cpu; 6553 } 6554 } 6555 6556 /* Evaluate the energy impact of using this CPU. */ 6557 if (max_spare_cap_cpu >= 0) { 6558 cur_energy = compute_energy(p, max_spare_cap_cpu, head); 6559 if (cur_energy < best_energy) { 6560 best_energy = cur_energy; 6561 best_energy_cpu = max_spare_cap_cpu; 6562 } 6563 } 6564 } 6565 unlock: 6566 rcu_read_unlock(); 6567 6568 /* 6569 * Pick the best CPU if prev_cpu cannot be used, or if it saves at 6570 * least 6% of the energy used by prev_cpu. 6571 */ 6572 if (prev_energy == ULONG_MAX) 6573 return best_energy_cpu; 6574 6575 if ((prev_energy - best_energy) > (prev_energy >> 4)) 6576 return best_energy_cpu; 6577 6578 return prev_cpu; 6579 6580 fail: 6581 rcu_read_unlock(); 6582 6583 return -1; 6584 } 6585 6586 /* 6587 * select_task_rq_fair: Select target runqueue for the waking task in domains 6588 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, 6589 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 6590 * 6591 * Balances load by selecting the idlest CPU in the idlest group, or under 6592 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. 6593 * 6594 * Returns the target CPU number. 6595 * 6596 * preempt must be disabled. 6597 */ 6598 static int 6599 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) 6600 { 6601 struct sched_domain *tmp, *sd = NULL; 6602 int cpu = smp_processor_id(); 6603 int new_cpu = prev_cpu; 6604 int want_affine = 0; 6605 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); 6606 6607 if (sd_flag & SD_BALANCE_WAKE) { 6608 record_wakee(p); 6609 6610 if (static_branch_unlikely(&sched_energy_present)) { 6611 new_cpu = find_energy_efficient_cpu(p, prev_cpu); 6612 if (new_cpu >= 0) 6613 return new_cpu; 6614 new_cpu = prev_cpu; 6615 } 6616 6617 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) && 6618 cpumask_test_cpu(cpu, &p->cpus_allowed); 6619 } 6620 6621 rcu_read_lock(); 6622 for_each_domain(cpu, tmp) { 6623 if (!(tmp->flags & SD_LOAD_BALANCE)) 6624 break; 6625 6626 /* 6627 * If both 'cpu' and 'prev_cpu' are part of this domain, 6628 * cpu is a valid SD_WAKE_AFFINE target. 6629 */ 6630 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 6631 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 6632 if (cpu != prev_cpu) 6633 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); 6634 6635 sd = NULL; /* Prefer wake_affine over balance flags */ 6636 break; 6637 } 6638 6639 if (tmp->flags & sd_flag) 6640 sd = tmp; 6641 else if (!want_affine) 6642 break; 6643 } 6644 6645 if (unlikely(sd)) { 6646 /* Slow path */ 6647 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); 6648 } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ 6649 /* Fast path */ 6650 6651 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 6652 6653 if (want_affine) 6654 current->recent_used_cpu = cpu; 6655 } 6656 rcu_read_unlock(); 6657 6658 return new_cpu; 6659 } 6660 6661 static void detach_entity_cfs_rq(struct sched_entity *se); 6662 6663 /* 6664 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and 6665 * cfs_rq_of(p) references at time of call are still valid and identify the 6666 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 6667 */ 6668 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) 6669 { 6670 /* 6671 * As blocked tasks retain absolute vruntime the migration needs to 6672 * deal with this by subtracting the old and adding the new 6673 * min_vruntime -- the latter is done by enqueue_entity() when placing 6674 * the task on the new runqueue. 6675 */ 6676 if (p->state == TASK_WAKING) { 6677 struct sched_entity *se = &p->se; 6678 struct cfs_rq *cfs_rq = cfs_rq_of(se); 6679 u64 min_vruntime; 6680 6681 #ifndef CONFIG_64BIT 6682 u64 min_vruntime_copy; 6683 6684 do { 6685 min_vruntime_copy = cfs_rq->min_vruntime_copy; 6686 smp_rmb(); 6687 min_vruntime = cfs_rq->min_vruntime; 6688 } while (min_vruntime != min_vruntime_copy); 6689 #else 6690 min_vruntime = cfs_rq->min_vruntime; 6691 #endif 6692 6693 se->vruntime -= min_vruntime; 6694 } 6695 6696 if (p->on_rq == TASK_ON_RQ_MIGRATING) { 6697 /* 6698 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' 6699 * rq->lock and can modify state directly. 6700 */ 6701 lockdep_assert_held(&task_rq(p)->lock); 6702 detach_entity_cfs_rq(&p->se); 6703 6704 } else { 6705 /* 6706 * We are supposed to update the task to "current" time, then 6707 * its up to date and ready to go to new CPU/cfs_rq. But we 6708 * have difficulty in getting what current time is, so simply 6709 * throw away the out-of-date time. This will result in the 6710 * wakee task is less decayed, but giving the wakee more load 6711 * sounds not bad. 6712 */ 6713 remove_entity_load_avg(&p->se); 6714 } 6715 6716 /* Tell new CPU we are migrated */ 6717 p->se.avg.last_update_time = 0; 6718 6719 /* We have migrated, no longer consider this task hot */ 6720 p->se.exec_start = 0; 6721 6722 update_scan_period(p, new_cpu); 6723 } 6724 6725 static void task_dead_fair(struct task_struct *p) 6726 { 6727 remove_entity_load_avg(&p->se); 6728 } 6729 #endif /* CONFIG_SMP */ 6730 6731 static unsigned long wakeup_gran(struct sched_entity *se) 6732 { 6733 unsigned long gran = sysctl_sched_wakeup_granularity; 6734 6735 /* 6736 * Since its curr running now, convert the gran from real-time 6737 * to virtual-time in his units. 6738 * 6739 * By using 'se' instead of 'curr' we penalize light tasks, so 6740 * they get preempted easier. That is, if 'se' < 'curr' then 6741 * the resulting gran will be larger, therefore penalizing the 6742 * lighter, if otoh 'se' > 'curr' then the resulting gran will 6743 * be smaller, again penalizing the lighter task. 6744 * 6745 * This is especially important for buddies when the leftmost 6746 * task is higher priority than the buddy. 6747 */ 6748 return calc_delta_fair(gran, se); 6749 } 6750 6751 /* 6752 * Should 'se' preempt 'curr'. 6753 * 6754 * |s1 6755 * |s2 6756 * |s3 6757 * g 6758 * |<--->|c 6759 * 6760 * w(c, s1) = -1 6761 * w(c, s2) = 0 6762 * w(c, s3) = 1 6763 * 6764 */ 6765 static int 6766 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 6767 { 6768 s64 gran, vdiff = curr->vruntime - se->vruntime; 6769 6770 if (vdiff <= 0) 6771 return -1; 6772 6773 gran = wakeup_gran(se); 6774 if (vdiff > gran) 6775 return 1; 6776 6777 return 0; 6778 } 6779 6780 static void set_last_buddy(struct sched_entity *se) 6781 { 6782 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) 6783 return; 6784 6785 for_each_sched_entity(se) { 6786 if (SCHED_WARN_ON(!se->on_rq)) 6787 return; 6788 cfs_rq_of(se)->last = se; 6789 } 6790 } 6791 6792 static void set_next_buddy(struct sched_entity *se) 6793 { 6794 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) 6795 return; 6796 6797 for_each_sched_entity(se) { 6798 if (SCHED_WARN_ON(!se->on_rq)) 6799 return; 6800 cfs_rq_of(se)->next = se; 6801 } 6802 } 6803 6804 static void set_skip_buddy(struct sched_entity *se) 6805 { 6806 for_each_sched_entity(se) 6807 cfs_rq_of(se)->skip = se; 6808 } 6809 6810 /* 6811 * Preempt the current task with a newly woken task if needed: 6812 */ 6813 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 6814 { 6815 struct task_struct *curr = rq->curr; 6816 struct sched_entity *se = &curr->se, *pse = &p->se; 6817 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6818 int scale = cfs_rq->nr_running >= sched_nr_latency; 6819 int next_buddy_marked = 0; 6820 6821 if (unlikely(se == pse)) 6822 return; 6823 6824 /* 6825 * This is possible from callers such as attach_tasks(), in which we 6826 * unconditionally check_prempt_curr() after an enqueue (which may have 6827 * lead to a throttle). This both saves work and prevents false 6828 * next-buddy nomination below. 6829 */ 6830 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 6831 return; 6832 6833 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 6834 set_next_buddy(pse); 6835 next_buddy_marked = 1; 6836 } 6837 6838 /* 6839 * We can come here with TIF_NEED_RESCHED already set from new task 6840 * wake up path. 6841 * 6842 * Note: this also catches the edge-case of curr being in a throttled 6843 * group (e.g. via set_curr_task), since update_curr() (in the 6844 * enqueue of curr) will have resulted in resched being set. This 6845 * prevents us from potentially nominating it as a false LAST_BUDDY 6846 * below. 6847 */ 6848 if (test_tsk_need_resched(curr)) 6849 return; 6850 6851 /* Idle tasks are by definition preempted by non-idle tasks. */ 6852 if (unlikely(task_has_idle_policy(curr)) && 6853 likely(!task_has_idle_policy(p))) 6854 goto preempt; 6855 6856 /* 6857 * Batch and idle tasks do not preempt non-idle tasks (their preemption 6858 * is driven by the tick): 6859 */ 6860 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 6861 return; 6862 6863 find_matching_se(&se, &pse); 6864 update_curr(cfs_rq_of(se)); 6865 BUG_ON(!pse); 6866 if (wakeup_preempt_entity(se, pse) == 1) { 6867 /* 6868 * Bias pick_next to pick the sched entity that is 6869 * triggering this preemption. 6870 */ 6871 if (!next_buddy_marked) 6872 set_next_buddy(pse); 6873 goto preempt; 6874 } 6875 6876 return; 6877 6878 preempt: 6879 resched_curr(rq); 6880 /* 6881 * Only set the backward buddy when the current task is still 6882 * on the rq. This can happen when a wakeup gets interleaved 6883 * with schedule on the ->pre_schedule() or idle_balance() 6884 * point, either of which can * drop the rq lock. 6885 * 6886 * Also, during early boot the idle thread is in the fair class, 6887 * for obvious reasons its a bad idea to schedule back to it. 6888 */ 6889 if (unlikely(!se->on_rq || curr == rq->idle)) 6890 return; 6891 6892 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 6893 set_last_buddy(se); 6894 } 6895 6896 static struct task_struct * 6897 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6898 { 6899 struct cfs_rq *cfs_rq = &rq->cfs; 6900 struct sched_entity *se; 6901 struct task_struct *p; 6902 int new_tasks; 6903 6904 again: 6905 if (!cfs_rq->nr_running) 6906 goto idle; 6907 6908 #ifdef CONFIG_FAIR_GROUP_SCHED 6909 if (prev->sched_class != &fair_sched_class) 6910 goto simple; 6911 6912 /* 6913 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 6914 * likely that a next task is from the same cgroup as the current. 6915 * 6916 * Therefore attempt to avoid putting and setting the entire cgroup 6917 * hierarchy, only change the part that actually changes. 6918 */ 6919 6920 do { 6921 struct sched_entity *curr = cfs_rq->curr; 6922 6923 /* 6924 * Since we got here without doing put_prev_entity() we also 6925 * have to consider cfs_rq->curr. If it is still a runnable 6926 * entity, update_curr() will update its vruntime, otherwise 6927 * forget we've ever seen it. 6928 */ 6929 if (curr) { 6930 if (curr->on_rq) 6931 update_curr(cfs_rq); 6932 else 6933 curr = NULL; 6934 6935 /* 6936 * This call to check_cfs_rq_runtime() will do the 6937 * throttle and dequeue its entity in the parent(s). 6938 * Therefore the nr_running test will indeed 6939 * be correct. 6940 */ 6941 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { 6942 cfs_rq = &rq->cfs; 6943 6944 if (!cfs_rq->nr_running) 6945 goto idle; 6946 6947 goto simple; 6948 } 6949 } 6950 6951 se = pick_next_entity(cfs_rq, curr); 6952 cfs_rq = group_cfs_rq(se); 6953 } while (cfs_rq); 6954 6955 p = task_of(se); 6956 6957 /* 6958 * Since we haven't yet done put_prev_entity and if the selected task 6959 * is a different task than we started out with, try and touch the 6960 * least amount of cfs_rqs. 6961 */ 6962 if (prev != p) { 6963 struct sched_entity *pse = &prev->se; 6964 6965 while (!(cfs_rq = is_same_group(se, pse))) { 6966 int se_depth = se->depth; 6967 int pse_depth = pse->depth; 6968 6969 if (se_depth <= pse_depth) { 6970 put_prev_entity(cfs_rq_of(pse), pse); 6971 pse = parent_entity(pse); 6972 } 6973 if (se_depth >= pse_depth) { 6974 set_next_entity(cfs_rq_of(se), se); 6975 se = parent_entity(se); 6976 } 6977 } 6978 6979 put_prev_entity(cfs_rq, pse); 6980 set_next_entity(cfs_rq, se); 6981 } 6982 6983 goto done; 6984 simple: 6985 #endif 6986 6987 put_prev_task(rq, prev); 6988 6989 do { 6990 se = pick_next_entity(cfs_rq, NULL); 6991 set_next_entity(cfs_rq, se); 6992 cfs_rq = group_cfs_rq(se); 6993 } while (cfs_rq); 6994 6995 p = task_of(se); 6996 6997 done: __maybe_unused; 6998 #ifdef CONFIG_SMP 6999 /* 7000 * Move the next running task to the front of 7001 * the list, so our cfs_tasks list becomes MRU 7002 * one. 7003 */ 7004 list_move(&p->se.group_node, &rq->cfs_tasks); 7005 #endif 7006 7007 if (hrtick_enabled(rq)) 7008 hrtick_start_fair(rq, p); 7009 7010 update_misfit_status(p, rq); 7011 7012 return p; 7013 7014 idle: 7015 update_misfit_status(NULL, rq); 7016 new_tasks = idle_balance(rq, rf); 7017 7018 /* 7019 * Because idle_balance() releases (and re-acquires) rq->lock, it is 7020 * possible for any higher priority task to appear. In that case we 7021 * must re-start the pick_next_entity() loop. 7022 */ 7023 if (new_tasks < 0) 7024 return RETRY_TASK; 7025 7026 if (new_tasks > 0) 7027 goto again; 7028 7029 return NULL; 7030 } 7031 7032 /* 7033 * Account for a descheduled task: 7034 */ 7035 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 7036 { 7037 struct sched_entity *se = &prev->se; 7038 struct cfs_rq *cfs_rq; 7039 7040 for_each_sched_entity(se) { 7041 cfs_rq = cfs_rq_of(se); 7042 put_prev_entity(cfs_rq, se); 7043 } 7044 } 7045 7046 /* 7047 * sched_yield() is very simple 7048 * 7049 * The magic of dealing with the ->skip buddy is in pick_next_entity. 7050 */ 7051 static void yield_task_fair(struct rq *rq) 7052 { 7053 struct task_struct *curr = rq->curr; 7054 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 7055 struct sched_entity *se = &curr->se; 7056 7057 /* 7058 * Are we the only task in the tree? 7059 */ 7060 if (unlikely(rq->nr_running == 1)) 7061 return; 7062 7063 clear_buddies(cfs_rq, se); 7064 7065 if (curr->policy != SCHED_BATCH) { 7066 update_rq_clock(rq); 7067 /* 7068 * Update run-time statistics of the 'current'. 7069 */ 7070 update_curr(cfs_rq); 7071 /* 7072 * Tell update_rq_clock() that we've just updated, 7073 * so we don't do microscopic update in schedule() 7074 * and double the fastpath cost. 7075 */ 7076 rq_clock_skip_update(rq); 7077 } 7078 7079 set_skip_buddy(se); 7080 } 7081 7082 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) 7083 { 7084 struct sched_entity *se = &p->se; 7085 7086 /* throttled hierarchies are not runnable */ 7087 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 7088 return false; 7089 7090 /* Tell the scheduler that we'd really like pse to run next. */ 7091 set_next_buddy(se); 7092 7093 yield_task_fair(rq); 7094 7095 return true; 7096 } 7097 7098 #ifdef CONFIG_SMP 7099 /************************************************** 7100 * Fair scheduling class load-balancing methods. 7101 * 7102 * BASICS 7103 * 7104 * The purpose of load-balancing is to achieve the same basic fairness the 7105 * per-CPU scheduler provides, namely provide a proportional amount of compute 7106 * time to each task. This is expressed in the following equation: 7107 * 7108 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 7109 * 7110 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight 7111 * W_i,0 is defined as: 7112 * 7113 * W_i,0 = \Sum_j w_i,j (2) 7114 * 7115 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight 7116 * is derived from the nice value as per sched_prio_to_weight[]. 7117 * 7118 * The weight average is an exponential decay average of the instantaneous 7119 * weight: 7120 * 7121 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 7122 * 7123 * C_i is the compute capacity of CPU i, typically it is the 7124 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 7125 * can also include other factors [XXX]. 7126 * 7127 * To achieve this balance we define a measure of imbalance which follows 7128 * directly from (1): 7129 * 7130 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 7131 * 7132 * We them move tasks around to minimize the imbalance. In the continuous 7133 * function space it is obvious this converges, in the discrete case we get 7134 * a few fun cases generally called infeasible weight scenarios. 7135 * 7136 * [XXX expand on: 7137 * - infeasible weights; 7138 * - local vs global optima in the discrete case. ] 7139 * 7140 * 7141 * SCHED DOMAINS 7142 * 7143 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 7144 * for all i,j solution, we create a tree of CPUs that follows the hardware 7145 * topology where each level pairs two lower groups (or better). This results 7146 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the 7147 * tree to only the first of the previous level and we decrease the frequency 7148 * of load-balance at each level inv. proportional to the number of CPUs in 7149 * the groups. 7150 * 7151 * This yields: 7152 * 7153 * log_2 n 1 n 7154 * \Sum { --- * --- * 2^i } = O(n) (5) 7155 * i = 0 2^i 2^i 7156 * `- size of each group 7157 * | | `- number of CPUs doing load-balance 7158 * | `- freq 7159 * `- sum over all levels 7160 * 7161 * Coupled with a limit on how many tasks we can migrate every balance pass, 7162 * this makes (5) the runtime complexity of the balancer. 7163 * 7164 * An important property here is that each CPU is still (indirectly) connected 7165 * to every other CPU in at most O(log n) steps: 7166 * 7167 * The adjacency matrix of the resulting graph is given by: 7168 * 7169 * log_2 n 7170 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 7171 * k = 0 7172 * 7173 * And you'll find that: 7174 * 7175 * A^(log_2 n)_i,j != 0 for all i,j (7) 7176 * 7177 * Showing there's indeed a path between every CPU in at most O(log n) steps. 7178 * The task movement gives a factor of O(m), giving a convergence complexity 7179 * of: 7180 * 7181 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 7182 * 7183 * 7184 * WORK CONSERVING 7185 * 7186 * In order to avoid CPUs going idle while there's still work to do, new idle 7187 * balancing is more aggressive and has the newly idle CPU iterate up the domain 7188 * tree itself instead of relying on other CPUs to bring it work. 7189 * 7190 * This adds some complexity to both (5) and (8) but it reduces the total idle 7191 * time. 7192 * 7193 * [XXX more?] 7194 * 7195 * 7196 * CGROUPS 7197 * 7198 * Cgroups make a horror show out of (2), instead of a simple sum we get: 7199 * 7200 * s_k,i 7201 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 7202 * S_k 7203 * 7204 * Where 7205 * 7206 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 7207 * 7208 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. 7209 * 7210 * The big problem is S_k, its a global sum needed to compute a local (W_i) 7211 * property. 7212 * 7213 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 7214 * rewrite all of this once again.] 7215 */ 7216 7217 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 7218 7219 enum fbq_type { regular, remote, all }; 7220 7221 enum group_type { 7222 group_other = 0, 7223 group_misfit_task, 7224 group_imbalanced, 7225 group_overloaded, 7226 }; 7227 7228 #define LBF_ALL_PINNED 0x01 7229 #define LBF_NEED_BREAK 0x02 7230 #define LBF_DST_PINNED 0x04 7231 #define LBF_SOME_PINNED 0x08 7232 #define LBF_NOHZ_STATS 0x10 7233 #define LBF_NOHZ_AGAIN 0x20 7234 7235 struct lb_env { 7236 struct sched_domain *sd; 7237 7238 struct rq *src_rq; 7239 int src_cpu; 7240 7241 int dst_cpu; 7242 struct rq *dst_rq; 7243 7244 struct cpumask *dst_grpmask; 7245 int new_dst_cpu; 7246 enum cpu_idle_type idle; 7247 long imbalance; 7248 /* The set of CPUs under consideration for load-balancing */ 7249 struct cpumask *cpus; 7250 7251 unsigned int flags; 7252 7253 unsigned int loop; 7254 unsigned int loop_break; 7255 unsigned int loop_max; 7256 7257 enum fbq_type fbq_type; 7258 enum group_type src_grp_type; 7259 struct list_head tasks; 7260 }; 7261 7262 /* 7263 * Is this task likely cache-hot: 7264 */ 7265 static int task_hot(struct task_struct *p, struct lb_env *env) 7266 { 7267 s64 delta; 7268 7269 lockdep_assert_held(&env->src_rq->lock); 7270 7271 if (p->sched_class != &fair_sched_class) 7272 return 0; 7273 7274 if (unlikely(task_has_idle_policy(p))) 7275 return 0; 7276 7277 /* 7278 * Buddy candidates are cache hot: 7279 */ 7280 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 7281 (&p->se == cfs_rq_of(&p->se)->next || 7282 &p->se == cfs_rq_of(&p->se)->last)) 7283 return 1; 7284 7285 if (sysctl_sched_migration_cost == -1) 7286 return 1; 7287 if (sysctl_sched_migration_cost == 0) 7288 return 0; 7289 7290 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 7291 7292 return delta < (s64)sysctl_sched_migration_cost; 7293 } 7294 7295 #ifdef CONFIG_NUMA_BALANCING 7296 /* 7297 * Returns 1, if task migration degrades locality 7298 * Returns 0, if task migration improves locality i.e migration preferred. 7299 * Returns -1, if task migration is not affected by locality. 7300 */ 7301 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 7302 { 7303 struct numa_group *numa_group = rcu_dereference(p->numa_group); 7304 unsigned long src_weight, dst_weight; 7305 int src_nid, dst_nid, dist; 7306 7307 if (!static_branch_likely(&sched_numa_balancing)) 7308 return -1; 7309 7310 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 7311 return -1; 7312 7313 src_nid = cpu_to_node(env->src_cpu); 7314 dst_nid = cpu_to_node(env->dst_cpu); 7315 7316 if (src_nid == dst_nid) 7317 return -1; 7318 7319 /* Migrating away from the preferred node is always bad. */ 7320 if (src_nid == p->numa_preferred_nid) { 7321 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 7322 return 1; 7323 else 7324 return -1; 7325 } 7326 7327 /* Encourage migration to the preferred node. */ 7328 if (dst_nid == p->numa_preferred_nid) 7329 return 0; 7330 7331 /* Leaving a core idle is often worse than degrading locality. */ 7332 if (env->idle == CPU_IDLE) 7333 return -1; 7334 7335 dist = node_distance(src_nid, dst_nid); 7336 if (numa_group) { 7337 src_weight = group_weight(p, src_nid, dist); 7338 dst_weight = group_weight(p, dst_nid, dist); 7339 } else { 7340 src_weight = task_weight(p, src_nid, dist); 7341 dst_weight = task_weight(p, dst_nid, dist); 7342 } 7343 7344 return dst_weight < src_weight; 7345 } 7346 7347 #else 7348 static inline int migrate_degrades_locality(struct task_struct *p, 7349 struct lb_env *env) 7350 { 7351 return -1; 7352 } 7353 #endif 7354 7355 /* 7356 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 7357 */ 7358 static 7359 int can_migrate_task(struct task_struct *p, struct lb_env *env) 7360 { 7361 int tsk_cache_hot; 7362 7363 lockdep_assert_held(&env->src_rq->lock); 7364 7365 /* 7366 * We do not migrate tasks that are: 7367 * 1) throttled_lb_pair, or 7368 * 2) cannot be migrated to this CPU due to cpus_allowed, or 7369 * 3) running (obviously), or 7370 * 4) are cache-hot on their current CPU. 7371 */ 7372 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 7373 return 0; 7374 7375 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { 7376 int cpu; 7377 7378 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); 7379 7380 env->flags |= LBF_SOME_PINNED; 7381 7382 /* 7383 * Remember if this task can be migrated to any other CPU in 7384 * our sched_group. We may want to revisit it if we couldn't 7385 * meet load balance goals by pulling other tasks on src_cpu. 7386 * 7387 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have 7388 * already computed one in current iteration. 7389 */ 7390 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) 7391 return 0; 7392 7393 /* Prevent to re-select dst_cpu via env's CPUs: */ 7394 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 7395 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { 7396 env->flags |= LBF_DST_PINNED; 7397 env->new_dst_cpu = cpu; 7398 break; 7399 } 7400 } 7401 7402 return 0; 7403 } 7404 7405 /* Record that we found atleast one task that could run on dst_cpu */ 7406 env->flags &= ~LBF_ALL_PINNED; 7407 7408 if (task_running(env->src_rq, p)) { 7409 schedstat_inc(p->se.statistics.nr_failed_migrations_running); 7410 return 0; 7411 } 7412 7413 /* 7414 * Aggressive migration if: 7415 * 1) destination numa is preferred 7416 * 2) task is cache cold, or 7417 * 3) too many balance attempts have failed. 7418 */ 7419 tsk_cache_hot = migrate_degrades_locality(p, env); 7420 if (tsk_cache_hot == -1) 7421 tsk_cache_hot = task_hot(p, env); 7422 7423 if (tsk_cache_hot <= 0 || 7424 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 7425 if (tsk_cache_hot == 1) { 7426 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 7427 schedstat_inc(p->se.statistics.nr_forced_migrations); 7428 } 7429 return 1; 7430 } 7431 7432 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); 7433 return 0; 7434 } 7435 7436 /* 7437 * detach_task() -- detach the task for the migration specified in env 7438 */ 7439 static void detach_task(struct task_struct *p, struct lb_env *env) 7440 { 7441 lockdep_assert_held(&env->src_rq->lock); 7442 7443 p->on_rq = TASK_ON_RQ_MIGRATING; 7444 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 7445 set_task_cpu(p, env->dst_cpu); 7446 } 7447 7448 /* 7449 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 7450 * part of active balancing operations within "domain". 7451 * 7452 * Returns a task if successful and NULL otherwise. 7453 */ 7454 static struct task_struct *detach_one_task(struct lb_env *env) 7455 { 7456 struct task_struct *p; 7457 7458 lockdep_assert_held(&env->src_rq->lock); 7459 7460 list_for_each_entry_reverse(p, 7461 &env->src_rq->cfs_tasks, se.group_node) { 7462 if (!can_migrate_task(p, env)) 7463 continue; 7464 7465 detach_task(p, env); 7466 7467 /* 7468 * Right now, this is only the second place where 7469 * lb_gained[env->idle] is updated (other is detach_tasks) 7470 * so we can safely collect stats here rather than 7471 * inside detach_tasks(). 7472 */ 7473 schedstat_inc(env->sd->lb_gained[env->idle]); 7474 return p; 7475 } 7476 return NULL; 7477 } 7478 7479 static const unsigned int sched_nr_migrate_break = 32; 7480 7481 /* 7482 * detach_tasks() -- tries to detach up to imbalance weighted load from 7483 * busiest_rq, as part of a balancing operation within domain "sd". 7484 * 7485 * Returns number of detached tasks if successful and 0 otherwise. 7486 */ 7487 static int detach_tasks(struct lb_env *env) 7488 { 7489 struct list_head *tasks = &env->src_rq->cfs_tasks; 7490 struct task_struct *p; 7491 unsigned long load; 7492 int detached = 0; 7493 7494 lockdep_assert_held(&env->src_rq->lock); 7495 7496 if (env->imbalance <= 0) 7497 return 0; 7498 7499 while (!list_empty(tasks)) { 7500 /* 7501 * We don't want to steal all, otherwise we may be treated likewise, 7502 * which could at worst lead to a livelock crash. 7503 */ 7504 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 7505 break; 7506 7507 p = list_last_entry(tasks, struct task_struct, se.group_node); 7508 7509 env->loop++; 7510 /* We've more or less seen every task there is, call it quits */ 7511 if (env->loop > env->loop_max) 7512 break; 7513 7514 /* take a breather every nr_migrate tasks */ 7515 if (env->loop > env->loop_break) { 7516 env->loop_break += sched_nr_migrate_break; 7517 env->flags |= LBF_NEED_BREAK; 7518 break; 7519 } 7520 7521 if (!can_migrate_task(p, env)) 7522 goto next; 7523 7524 load = task_h_load(p); 7525 7526 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) 7527 goto next; 7528 7529 if ((load / 2) > env->imbalance) 7530 goto next; 7531 7532 detach_task(p, env); 7533 list_add(&p->se.group_node, &env->tasks); 7534 7535 detached++; 7536 env->imbalance -= load; 7537 7538 #ifdef CONFIG_PREEMPT 7539 /* 7540 * NEWIDLE balancing is a source of latency, so preemptible 7541 * kernels will stop after the first task is detached to minimize 7542 * the critical section. 7543 */ 7544 if (env->idle == CPU_NEWLY_IDLE) 7545 break; 7546 #endif 7547 7548 /* 7549 * We only want to steal up to the prescribed amount of 7550 * weighted load. 7551 */ 7552 if (env->imbalance <= 0) 7553 break; 7554 7555 continue; 7556 next: 7557 list_move(&p->se.group_node, tasks); 7558 } 7559 7560 /* 7561 * Right now, this is one of only two places we collect this stat 7562 * so we can safely collect detach_one_task() stats here rather 7563 * than inside detach_one_task(). 7564 */ 7565 schedstat_add(env->sd->lb_gained[env->idle], detached); 7566 7567 return detached; 7568 } 7569 7570 /* 7571 * attach_task() -- attach the task detached by detach_task() to its new rq. 7572 */ 7573 static void attach_task(struct rq *rq, struct task_struct *p) 7574 { 7575 lockdep_assert_held(&rq->lock); 7576 7577 BUG_ON(task_rq(p) != rq); 7578 activate_task(rq, p, ENQUEUE_NOCLOCK); 7579 p->on_rq = TASK_ON_RQ_QUEUED; 7580 check_preempt_curr(rq, p, 0); 7581 } 7582 7583 /* 7584 * attach_one_task() -- attaches the task returned from detach_one_task() to 7585 * its new rq. 7586 */ 7587 static void attach_one_task(struct rq *rq, struct task_struct *p) 7588 { 7589 struct rq_flags rf; 7590 7591 rq_lock(rq, &rf); 7592 update_rq_clock(rq); 7593 attach_task(rq, p); 7594 rq_unlock(rq, &rf); 7595 } 7596 7597 /* 7598 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 7599 * new rq. 7600 */ 7601 static void attach_tasks(struct lb_env *env) 7602 { 7603 struct list_head *tasks = &env->tasks; 7604 struct task_struct *p; 7605 struct rq_flags rf; 7606 7607 rq_lock(env->dst_rq, &rf); 7608 update_rq_clock(env->dst_rq); 7609 7610 while (!list_empty(tasks)) { 7611 p = list_first_entry(tasks, struct task_struct, se.group_node); 7612 list_del_init(&p->se.group_node); 7613 7614 attach_task(env->dst_rq, p); 7615 } 7616 7617 rq_unlock(env->dst_rq, &rf); 7618 } 7619 7620 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) 7621 { 7622 if (cfs_rq->avg.load_avg) 7623 return true; 7624 7625 if (cfs_rq->avg.util_avg) 7626 return true; 7627 7628 return false; 7629 } 7630 7631 static inline bool others_have_blocked(struct rq *rq) 7632 { 7633 if (READ_ONCE(rq->avg_rt.util_avg)) 7634 return true; 7635 7636 if (READ_ONCE(rq->avg_dl.util_avg)) 7637 return true; 7638 7639 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 7640 if (READ_ONCE(rq->avg_irq.util_avg)) 7641 return true; 7642 #endif 7643 7644 return false; 7645 } 7646 7647 #ifdef CONFIG_FAIR_GROUP_SCHED 7648 7649 static void update_blocked_averages(int cpu) 7650 { 7651 struct rq *rq = cpu_rq(cpu); 7652 struct cfs_rq *cfs_rq; 7653 const struct sched_class *curr_class; 7654 struct rq_flags rf; 7655 bool done = true; 7656 7657 rq_lock_irqsave(rq, &rf); 7658 update_rq_clock(rq); 7659 7660 /* 7661 * Iterates the task_group tree in a bottom up fashion, see 7662 * list_add_leaf_cfs_rq() for details. 7663 */ 7664 for_each_leaf_cfs_rq(rq, cfs_rq) { 7665 struct sched_entity *se; 7666 7667 /* throttled entities do not contribute to load */ 7668 if (throttled_hierarchy(cfs_rq)) 7669 continue; 7670 7671 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) 7672 update_tg_load_avg(cfs_rq, 0); 7673 7674 /* Propagate pending load changes to the parent, if any: */ 7675 se = cfs_rq->tg->se[cpu]; 7676 if (se && !skip_blocked_update(se)) 7677 update_load_avg(cfs_rq_of(se), se, 0); 7678 7679 /* Don't need periodic decay once load/util_avg are null */ 7680 if (cfs_rq_has_blocked(cfs_rq)) 7681 done = false; 7682 } 7683 7684 curr_class = rq->curr->sched_class; 7685 update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); 7686 update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); 7687 update_irq_load_avg(rq, 0); 7688 /* Don't need periodic decay once load/util_avg are null */ 7689 if (others_have_blocked(rq)) 7690 done = false; 7691 7692 #ifdef CONFIG_NO_HZ_COMMON 7693 rq->last_blocked_load_update_tick = jiffies; 7694 if (done) 7695 rq->has_blocked_load = 0; 7696 #endif 7697 rq_unlock_irqrestore(rq, &rf); 7698 } 7699 7700 /* 7701 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 7702 * This needs to be done in a top-down fashion because the load of a child 7703 * group is a fraction of its parents load. 7704 */ 7705 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 7706 { 7707 struct rq *rq = rq_of(cfs_rq); 7708 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 7709 unsigned long now = jiffies; 7710 unsigned long load; 7711 7712 if (cfs_rq->last_h_load_update == now) 7713 return; 7714 7715 cfs_rq->h_load_next = NULL; 7716 for_each_sched_entity(se) { 7717 cfs_rq = cfs_rq_of(se); 7718 cfs_rq->h_load_next = se; 7719 if (cfs_rq->last_h_load_update == now) 7720 break; 7721 } 7722 7723 if (!se) { 7724 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 7725 cfs_rq->last_h_load_update = now; 7726 } 7727 7728 while ((se = cfs_rq->h_load_next) != NULL) { 7729 load = cfs_rq->h_load; 7730 load = div64_ul(load * se->avg.load_avg, 7731 cfs_rq_load_avg(cfs_rq) + 1); 7732 cfs_rq = group_cfs_rq(se); 7733 cfs_rq->h_load = load; 7734 cfs_rq->last_h_load_update = now; 7735 } 7736 } 7737 7738 static unsigned long task_h_load(struct task_struct *p) 7739 { 7740 struct cfs_rq *cfs_rq = task_cfs_rq(p); 7741 7742 update_cfs_rq_h_load(cfs_rq); 7743 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 7744 cfs_rq_load_avg(cfs_rq) + 1); 7745 } 7746 #else 7747 static inline void update_blocked_averages(int cpu) 7748 { 7749 struct rq *rq = cpu_rq(cpu); 7750 struct cfs_rq *cfs_rq = &rq->cfs; 7751 const struct sched_class *curr_class; 7752 struct rq_flags rf; 7753 7754 rq_lock_irqsave(rq, &rf); 7755 update_rq_clock(rq); 7756 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); 7757 7758 curr_class = rq->curr->sched_class; 7759 update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); 7760 update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); 7761 update_irq_load_avg(rq, 0); 7762 #ifdef CONFIG_NO_HZ_COMMON 7763 rq->last_blocked_load_update_tick = jiffies; 7764 if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq)) 7765 rq->has_blocked_load = 0; 7766 #endif 7767 rq_unlock_irqrestore(rq, &rf); 7768 } 7769 7770 static unsigned long task_h_load(struct task_struct *p) 7771 { 7772 return p->se.avg.load_avg; 7773 } 7774 #endif 7775 7776 /********** Helpers for find_busiest_group ************************/ 7777 7778 /* 7779 * sg_lb_stats - stats of a sched_group required for load_balancing 7780 */ 7781 struct sg_lb_stats { 7782 unsigned long avg_load; /*Avg load across the CPUs of the group */ 7783 unsigned long group_load; /* Total load over the CPUs of the group */ 7784 unsigned long sum_weighted_load; /* Weighted load of group's tasks */ 7785 unsigned long load_per_task; 7786 unsigned long group_capacity; 7787 unsigned long group_util; /* Total utilization of the group */ 7788 unsigned int sum_nr_running; /* Nr tasks running in the group */ 7789 unsigned int idle_cpus; 7790 unsigned int group_weight; 7791 enum group_type group_type; 7792 int group_no_capacity; 7793 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ 7794 #ifdef CONFIG_NUMA_BALANCING 7795 unsigned int nr_numa_running; 7796 unsigned int nr_preferred_running; 7797 #endif 7798 }; 7799 7800 /* 7801 * sd_lb_stats - Structure to store the statistics of a sched_domain 7802 * during load balancing. 7803 */ 7804 struct sd_lb_stats { 7805 struct sched_group *busiest; /* Busiest group in this sd */ 7806 struct sched_group *local; /* Local group in this sd */ 7807 unsigned long total_running; 7808 unsigned long total_load; /* Total load of all groups in sd */ 7809 unsigned long total_capacity; /* Total capacity of all groups in sd */ 7810 unsigned long avg_load; /* Average load across all groups in sd */ 7811 7812 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 7813 struct sg_lb_stats local_stat; /* Statistics of the local group */ 7814 }; 7815 7816 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 7817 { 7818 /* 7819 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 7820 * local_stat because update_sg_lb_stats() does a full clear/assignment. 7821 * We must however clear busiest_stat::avg_load because 7822 * update_sd_pick_busiest() reads this before assignment. 7823 */ 7824 *sds = (struct sd_lb_stats){ 7825 .busiest = NULL, 7826 .local = NULL, 7827 .total_running = 0UL, 7828 .total_load = 0UL, 7829 .total_capacity = 0UL, 7830 .busiest_stat = { 7831 .avg_load = 0UL, 7832 .sum_nr_running = 0, 7833 .group_type = group_other, 7834 }, 7835 }; 7836 } 7837 7838 /** 7839 * get_sd_load_idx - Obtain the load index for a given sched domain. 7840 * @sd: The sched_domain whose load_idx is to be obtained. 7841 * @idle: The idle status of the CPU for whose sd load_idx is obtained. 7842 * 7843 * Return: The load index. 7844 */ 7845 static inline int get_sd_load_idx(struct sched_domain *sd, 7846 enum cpu_idle_type idle) 7847 { 7848 int load_idx; 7849 7850 switch (idle) { 7851 case CPU_NOT_IDLE: 7852 load_idx = sd->busy_idx; 7853 break; 7854 7855 case CPU_NEWLY_IDLE: 7856 load_idx = sd->newidle_idx; 7857 break; 7858 default: 7859 load_idx = sd->idle_idx; 7860 break; 7861 } 7862 7863 return load_idx; 7864 } 7865 7866 static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) 7867 { 7868 struct rq *rq = cpu_rq(cpu); 7869 unsigned long max = arch_scale_cpu_capacity(sd, cpu); 7870 unsigned long used, free; 7871 unsigned long irq; 7872 7873 irq = cpu_util_irq(rq); 7874 7875 if (unlikely(irq >= max)) 7876 return 1; 7877 7878 used = READ_ONCE(rq->avg_rt.util_avg); 7879 used += READ_ONCE(rq->avg_dl.util_avg); 7880 7881 if (unlikely(used >= max)) 7882 return 1; 7883 7884 free = max - used; 7885 7886 return scale_irq_capacity(free, irq, max); 7887 } 7888 7889 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 7890 { 7891 unsigned long capacity = scale_rt_capacity(sd, cpu); 7892 struct sched_group *sdg = sd->groups; 7893 7894 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); 7895 7896 if (!capacity) 7897 capacity = 1; 7898 7899 cpu_rq(cpu)->cpu_capacity = capacity; 7900 sdg->sgc->capacity = capacity; 7901 sdg->sgc->min_capacity = capacity; 7902 sdg->sgc->max_capacity = capacity; 7903 } 7904 7905 void update_group_capacity(struct sched_domain *sd, int cpu) 7906 { 7907 struct sched_domain *child = sd->child; 7908 struct sched_group *group, *sdg = sd->groups; 7909 unsigned long capacity, min_capacity, max_capacity; 7910 unsigned long interval; 7911 7912 interval = msecs_to_jiffies(sd->balance_interval); 7913 interval = clamp(interval, 1UL, max_load_balance_interval); 7914 sdg->sgc->next_update = jiffies + interval; 7915 7916 if (!child) { 7917 update_cpu_capacity(sd, cpu); 7918 return; 7919 } 7920 7921 capacity = 0; 7922 min_capacity = ULONG_MAX; 7923 max_capacity = 0; 7924 7925 if (child->flags & SD_OVERLAP) { 7926 /* 7927 * SD_OVERLAP domains cannot assume that child groups 7928 * span the current group. 7929 */ 7930 7931 for_each_cpu(cpu, sched_group_span(sdg)) { 7932 struct sched_group_capacity *sgc; 7933 struct rq *rq = cpu_rq(cpu); 7934 7935 /* 7936 * build_sched_domains() -> init_sched_groups_capacity() 7937 * gets here before we've attached the domains to the 7938 * runqueues. 7939 * 7940 * Use capacity_of(), which is set irrespective of domains 7941 * in update_cpu_capacity(). 7942 * 7943 * This avoids capacity from being 0 and 7944 * causing divide-by-zero issues on boot. 7945 */ 7946 if (unlikely(!rq->sd)) { 7947 capacity += capacity_of(cpu); 7948 } else { 7949 sgc = rq->sd->groups->sgc; 7950 capacity += sgc->capacity; 7951 } 7952 7953 min_capacity = min(capacity, min_capacity); 7954 max_capacity = max(capacity, max_capacity); 7955 } 7956 } else { 7957 /* 7958 * !SD_OVERLAP domains can assume that child groups 7959 * span the current group. 7960 */ 7961 7962 group = child->groups; 7963 do { 7964 struct sched_group_capacity *sgc = group->sgc; 7965 7966 capacity += sgc->capacity; 7967 min_capacity = min(sgc->min_capacity, min_capacity); 7968 max_capacity = max(sgc->max_capacity, max_capacity); 7969 group = group->next; 7970 } while (group != child->groups); 7971 } 7972 7973 sdg->sgc->capacity = capacity; 7974 sdg->sgc->min_capacity = min_capacity; 7975 sdg->sgc->max_capacity = max_capacity; 7976 } 7977 7978 /* 7979 * Check whether the capacity of the rq has been noticeably reduced by side 7980 * activity. The imbalance_pct is used for the threshold. 7981 * Return true is the capacity is reduced 7982 */ 7983 static inline int 7984 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 7985 { 7986 return ((rq->cpu_capacity * sd->imbalance_pct) < 7987 (rq->cpu_capacity_orig * 100)); 7988 } 7989 7990 /* 7991 * Group imbalance indicates (and tries to solve) the problem where balancing 7992 * groups is inadequate due to ->cpus_allowed constraints. 7993 * 7994 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a 7995 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. 7996 * Something like: 7997 * 7998 * { 0 1 2 3 } { 4 5 6 7 } 7999 * * * * * 8000 * 8001 * If we were to balance group-wise we'd place two tasks in the first group and 8002 * two tasks in the second group. Clearly this is undesired as it will overload 8003 * cpu 3 and leave one of the CPUs in the second group unused. 8004 * 8005 * The current solution to this issue is detecting the skew in the first group 8006 * by noticing the lower domain failed to reach balance and had difficulty 8007 * moving tasks due to affinity constraints. 8008 * 8009 * When this is so detected; this group becomes a candidate for busiest; see 8010 * update_sd_pick_busiest(). And calculate_imbalance() and 8011 * find_busiest_group() avoid some of the usual balance conditions to allow it 8012 * to create an effective group imbalance. 8013 * 8014 * This is a somewhat tricky proposition since the next run might not find the 8015 * group imbalance and decide the groups need to be balanced again. A most 8016 * subtle and fragile situation. 8017 */ 8018 8019 static inline int sg_imbalanced(struct sched_group *group) 8020 { 8021 return group->sgc->imbalance; 8022 } 8023 8024 /* 8025 * group_has_capacity returns true if the group has spare capacity that could 8026 * be used by some tasks. 8027 * We consider that a group has spare capacity if the * number of task is 8028 * smaller than the number of CPUs or if the utilization is lower than the 8029 * available capacity for CFS tasks. 8030 * For the latter, we use a threshold to stabilize the state, to take into 8031 * account the variance of the tasks' load and to return true if the available 8032 * capacity in meaningful for the load balancer. 8033 * As an example, an available capacity of 1% can appear but it doesn't make 8034 * any benefit for the load balance. 8035 */ 8036 static inline bool 8037 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) 8038 { 8039 if (sgs->sum_nr_running < sgs->group_weight) 8040 return true; 8041 8042 if ((sgs->group_capacity * 100) > 8043 (sgs->group_util * env->sd->imbalance_pct)) 8044 return true; 8045 8046 return false; 8047 } 8048 8049 /* 8050 * group_is_overloaded returns true if the group has more tasks than it can 8051 * handle. 8052 * group_is_overloaded is not equals to !group_has_capacity because a group 8053 * with the exact right number of tasks, has no more spare capacity but is not 8054 * overloaded so both group_has_capacity and group_is_overloaded return 8055 * false. 8056 */ 8057 static inline bool 8058 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) 8059 { 8060 if (sgs->sum_nr_running <= sgs->group_weight) 8061 return false; 8062 8063 if ((sgs->group_capacity * 100) < 8064 (sgs->group_util * env->sd->imbalance_pct)) 8065 return true; 8066 8067 return false; 8068 } 8069 8070 /* 8071 * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller 8072 * per-CPU capacity than sched_group ref. 8073 */ 8074 static inline bool 8075 group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) 8076 { 8077 return sg->sgc->min_capacity * capacity_margin < 8078 ref->sgc->min_capacity * 1024; 8079 } 8080 8081 /* 8082 * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller 8083 * per-CPU capacity_orig than sched_group ref. 8084 */ 8085 static inline bool 8086 group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) 8087 { 8088 return sg->sgc->max_capacity * capacity_margin < 8089 ref->sgc->max_capacity * 1024; 8090 } 8091 8092 static inline enum 8093 group_type group_classify(struct sched_group *group, 8094 struct sg_lb_stats *sgs) 8095 { 8096 if (sgs->group_no_capacity) 8097 return group_overloaded; 8098 8099 if (sg_imbalanced(group)) 8100 return group_imbalanced; 8101 8102 if (sgs->group_misfit_task_load) 8103 return group_misfit_task; 8104 8105 return group_other; 8106 } 8107 8108 static bool update_nohz_stats(struct rq *rq, bool force) 8109 { 8110 #ifdef CONFIG_NO_HZ_COMMON 8111 unsigned int cpu = rq->cpu; 8112 8113 if (!rq->has_blocked_load) 8114 return false; 8115 8116 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) 8117 return false; 8118 8119 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) 8120 return true; 8121 8122 update_blocked_averages(cpu); 8123 8124 return rq->has_blocked_load; 8125 #else 8126 return false; 8127 #endif 8128 } 8129 8130 /** 8131 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 8132 * @env: The load balancing environment. 8133 * @group: sched_group whose statistics are to be updated. 8134 * @sgs: variable to hold the statistics for this group. 8135 * @sg_status: Holds flag indicating the status of the sched_group 8136 */ 8137 static inline void update_sg_lb_stats(struct lb_env *env, 8138 struct sched_group *group, 8139 struct sg_lb_stats *sgs, 8140 int *sg_status) 8141 { 8142 int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); 8143 int load_idx = get_sd_load_idx(env->sd, env->idle); 8144 unsigned long load; 8145 int i, nr_running; 8146 8147 memset(sgs, 0, sizeof(*sgs)); 8148 8149 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 8150 struct rq *rq = cpu_rq(i); 8151 8152 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) 8153 env->flags |= LBF_NOHZ_AGAIN; 8154 8155 /* Bias balancing toward CPUs of our domain: */ 8156 if (local_group) 8157 load = target_load(i, load_idx); 8158 else 8159 load = source_load(i, load_idx); 8160 8161 sgs->group_load += load; 8162 sgs->group_util += cpu_util(i); 8163 sgs->sum_nr_running += rq->cfs.h_nr_running; 8164 8165 nr_running = rq->nr_running; 8166 if (nr_running > 1) 8167 *sg_status |= SG_OVERLOAD; 8168 8169 if (cpu_overutilized(i)) 8170 *sg_status |= SG_OVERUTILIZED; 8171 8172 #ifdef CONFIG_NUMA_BALANCING 8173 sgs->nr_numa_running += rq->nr_numa_running; 8174 sgs->nr_preferred_running += rq->nr_preferred_running; 8175 #endif 8176 sgs->sum_weighted_load += weighted_cpuload(rq); 8177 /* 8178 * No need to call idle_cpu() if nr_running is not 0 8179 */ 8180 if (!nr_running && idle_cpu(i)) 8181 sgs->idle_cpus++; 8182 8183 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 8184 sgs->group_misfit_task_load < rq->misfit_task_load) { 8185 sgs->group_misfit_task_load = rq->misfit_task_load; 8186 *sg_status |= SG_OVERLOAD; 8187 } 8188 } 8189 8190 /* Adjust by relative CPU capacity of the group */ 8191 sgs->group_capacity = group->sgc->capacity; 8192 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; 8193 8194 if (sgs->sum_nr_running) 8195 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; 8196 8197 sgs->group_weight = group->group_weight; 8198 8199 sgs->group_no_capacity = group_is_overloaded(env, sgs); 8200 sgs->group_type = group_classify(group, sgs); 8201 } 8202 8203 /** 8204 * update_sd_pick_busiest - return 1 on busiest group 8205 * @env: The load balancing environment. 8206 * @sds: sched_domain statistics 8207 * @sg: sched_group candidate to be checked for being the busiest 8208 * @sgs: sched_group statistics 8209 * 8210 * Determine if @sg is a busier group than the previously selected 8211 * busiest group. 8212 * 8213 * Return: %true if @sg is a busier group than the previously selected 8214 * busiest group. %false otherwise. 8215 */ 8216 static bool update_sd_pick_busiest(struct lb_env *env, 8217 struct sd_lb_stats *sds, 8218 struct sched_group *sg, 8219 struct sg_lb_stats *sgs) 8220 { 8221 struct sg_lb_stats *busiest = &sds->busiest_stat; 8222 8223 /* 8224 * Don't try to pull misfit tasks we can't help. 8225 * We can use max_capacity here as reduction in capacity on some 8226 * CPUs in the group should either be possible to resolve 8227 * internally or be covered by avg_load imbalance (eventually). 8228 */ 8229 if (sgs->group_type == group_misfit_task && 8230 (!group_smaller_max_cpu_capacity(sg, sds->local) || 8231 !group_has_capacity(env, &sds->local_stat))) 8232 return false; 8233 8234 if (sgs->group_type > busiest->group_type) 8235 return true; 8236 8237 if (sgs->group_type < busiest->group_type) 8238 return false; 8239 8240 if (sgs->avg_load <= busiest->avg_load) 8241 return false; 8242 8243 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) 8244 goto asym_packing; 8245 8246 /* 8247 * Candidate sg has no more than one task per CPU and 8248 * has higher per-CPU capacity. Migrating tasks to less 8249 * capable CPUs may harm throughput. Maximize throughput, 8250 * power/energy consequences are not considered. 8251 */ 8252 if (sgs->sum_nr_running <= sgs->group_weight && 8253 group_smaller_min_cpu_capacity(sds->local, sg)) 8254 return false; 8255 8256 /* 8257 * If we have more than one misfit sg go with the biggest misfit. 8258 */ 8259 if (sgs->group_type == group_misfit_task && 8260 sgs->group_misfit_task_load < busiest->group_misfit_task_load) 8261 return false; 8262 8263 asym_packing: 8264 /* This is the busiest node in its class. */ 8265 if (!(env->sd->flags & SD_ASYM_PACKING)) 8266 return true; 8267 8268 /* No ASYM_PACKING if target CPU is already busy */ 8269 if (env->idle == CPU_NOT_IDLE) 8270 return true; 8271 /* 8272 * ASYM_PACKING needs to move all the work to the highest 8273 * prority CPUs in the group, therefore mark all groups 8274 * of lower priority than ourself as busy. 8275 */ 8276 if (sgs->sum_nr_running && 8277 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { 8278 if (!sds->busiest) 8279 return true; 8280 8281 /* Prefer to move from lowest priority CPU's work */ 8282 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, 8283 sg->asym_prefer_cpu)) 8284 return true; 8285 } 8286 8287 return false; 8288 } 8289 8290 #ifdef CONFIG_NUMA_BALANCING 8291 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8292 { 8293 if (sgs->sum_nr_running > sgs->nr_numa_running) 8294 return regular; 8295 if (sgs->sum_nr_running > sgs->nr_preferred_running) 8296 return remote; 8297 return all; 8298 } 8299 8300 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8301 { 8302 if (rq->nr_running > rq->nr_numa_running) 8303 return regular; 8304 if (rq->nr_running > rq->nr_preferred_running) 8305 return remote; 8306 return all; 8307 } 8308 #else 8309 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 8310 { 8311 return all; 8312 } 8313 8314 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 8315 { 8316 return regular; 8317 } 8318 #endif /* CONFIG_NUMA_BALANCING */ 8319 8320 /** 8321 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 8322 * @env: The load balancing environment. 8323 * @sds: variable to hold the statistics for this sched_domain. 8324 */ 8325 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 8326 { 8327 struct sched_domain *child = env->sd->child; 8328 struct sched_group *sg = env->sd->groups; 8329 struct sg_lb_stats *local = &sds->local_stat; 8330 struct sg_lb_stats tmp_sgs; 8331 bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING; 8332 int sg_status = 0; 8333 8334 #ifdef CONFIG_NO_HZ_COMMON 8335 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) 8336 env->flags |= LBF_NOHZ_STATS; 8337 #endif 8338 8339 do { 8340 struct sg_lb_stats *sgs = &tmp_sgs; 8341 int local_group; 8342 8343 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 8344 if (local_group) { 8345 sds->local = sg; 8346 sgs = local; 8347 8348 if (env->idle != CPU_NEWLY_IDLE || 8349 time_after_eq(jiffies, sg->sgc->next_update)) 8350 update_group_capacity(env->sd, env->dst_cpu); 8351 } 8352 8353 update_sg_lb_stats(env, sg, sgs, &sg_status); 8354 8355 if (local_group) 8356 goto next_group; 8357 8358 /* 8359 * In case the child domain prefers tasks go to siblings 8360 * first, lower the sg capacity so that we'll try 8361 * and move all the excess tasks away. We lower the capacity 8362 * of a group only if the local group has the capacity to fit 8363 * these excess tasks. The extra check prevents the case where 8364 * you always pull from the heaviest group when it is already 8365 * under-utilized (possible with a large weight task outweighs 8366 * the tasks on the system). 8367 */ 8368 if (prefer_sibling && sds->local && 8369 group_has_capacity(env, local) && 8370 (sgs->sum_nr_running > local->sum_nr_running + 1)) { 8371 sgs->group_no_capacity = 1; 8372 sgs->group_type = group_classify(sg, sgs); 8373 } 8374 8375 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 8376 sds->busiest = sg; 8377 sds->busiest_stat = *sgs; 8378 } 8379 8380 next_group: 8381 /* Now, start updating sd_lb_stats */ 8382 sds->total_running += sgs->sum_nr_running; 8383 sds->total_load += sgs->group_load; 8384 sds->total_capacity += sgs->group_capacity; 8385 8386 sg = sg->next; 8387 } while (sg != env->sd->groups); 8388 8389 #ifdef CONFIG_NO_HZ_COMMON 8390 if ((env->flags & LBF_NOHZ_AGAIN) && 8391 cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { 8392 8393 WRITE_ONCE(nohz.next_blocked, 8394 jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD)); 8395 } 8396 #endif 8397 8398 if (env->sd->flags & SD_NUMA) 8399 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 8400 8401 if (!env->sd->parent) { 8402 struct root_domain *rd = env->dst_rq->rd; 8403 8404 /* update overload indicator if we are at root domain */ 8405 WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); 8406 8407 /* Update over-utilization (tipping point, U >= 0) indicator */ 8408 WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); 8409 } else if (sg_status & SG_OVERUTILIZED) { 8410 WRITE_ONCE(env->dst_rq->rd->overutilized, SG_OVERUTILIZED); 8411 } 8412 } 8413 8414 /** 8415 * check_asym_packing - Check to see if the group is packed into the 8416 * sched domain. 8417 * 8418 * This is primarily intended to used at the sibling level. Some 8419 * cores like POWER7 prefer to use lower numbered SMT threads. In the 8420 * case of POWER7, it can move to lower SMT modes only when higher 8421 * threads are idle. When in lower SMT modes, the threads will 8422 * perform better since they share less core resources. Hence when we 8423 * have idle threads, we want them to be the higher ones. 8424 * 8425 * This packing function is run on idle threads. It checks to see if 8426 * the busiest CPU in this domain (core in the P7 case) has a higher 8427 * CPU number than the packing function is being run on. Here we are 8428 * assuming lower CPU number will be equivalent to lower a SMT thread 8429 * number. 8430 * 8431 * Return: 1 when packing is required and a task should be moved to 8432 * this CPU. The amount of the imbalance is returned in env->imbalance. 8433 * 8434 * @env: The load balancing environment. 8435 * @sds: Statistics of the sched_domain which is to be packed 8436 */ 8437 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) 8438 { 8439 int busiest_cpu; 8440 8441 if (!(env->sd->flags & SD_ASYM_PACKING)) 8442 return 0; 8443 8444 if (env->idle == CPU_NOT_IDLE) 8445 return 0; 8446 8447 if (!sds->busiest) 8448 return 0; 8449 8450 busiest_cpu = sds->busiest->asym_prefer_cpu; 8451 if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) 8452 return 0; 8453 8454 env->imbalance = DIV_ROUND_CLOSEST( 8455 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, 8456 SCHED_CAPACITY_SCALE); 8457 8458 return 1; 8459 } 8460 8461 /** 8462 * fix_small_imbalance - Calculate the minor imbalance that exists 8463 * amongst the groups of a sched_domain, during 8464 * load balancing. 8465 * @env: The load balancing environment. 8466 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 8467 */ 8468 static inline 8469 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 8470 { 8471 unsigned long tmp, capa_now = 0, capa_move = 0; 8472 unsigned int imbn = 2; 8473 unsigned long scaled_busy_load_per_task; 8474 struct sg_lb_stats *local, *busiest; 8475 8476 local = &sds->local_stat; 8477 busiest = &sds->busiest_stat; 8478 8479 if (!local->sum_nr_running) 8480 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); 8481 else if (busiest->load_per_task > local->load_per_task) 8482 imbn = 1; 8483 8484 scaled_busy_load_per_task = 8485 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / 8486 busiest->group_capacity; 8487 8488 if (busiest->avg_load + scaled_busy_load_per_task >= 8489 local->avg_load + (scaled_busy_load_per_task * imbn)) { 8490 env->imbalance = busiest->load_per_task; 8491 return; 8492 } 8493 8494 /* 8495 * OK, we don't have enough imbalance to justify moving tasks, 8496 * however we may be able to increase total CPU capacity used by 8497 * moving them. 8498 */ 8499 8500 capa_now += busiest->group_capacity * 8501 min(busiest->load_per_task, busiest->avg_load); 8502 capa_now += local->group_capacity * 8503 min(local->load_per_task, local->avg_load); 8504 capa_now /= SCHED_CAPACITY_SCALE; 8505 8506 /* Amount of load we'd subtract */ 8507 if (busiest->avg_load > scaled_busy_load_per_task) { 8508 capa_move += busiest->group_capacity * 8509 min(busiest->load_per_task, 8510 busiest->avg_load - scaled_busy_load_per_task); 8511 } 8512 8513 /* Amount of load we'd add */ 8514 if (busiest->avg_load * busiest->group_capacity < 8515 busiest->load_per_task * SCHED_CAPACITY_SCALE) { 8516 tmp = (busiest->avg_load * busiest->group_capacity) / 8517 local->group_capacity; 8518 } else { 8519 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / 8520 local->group_capacity; 8521 } 8522 capa_move += local->group_capacity * 8523 min(local->load_per_task, local->avg_load + tmp); 8524 capa_move /= SCHED_CAPACITY_SCALE; 8525 8526 /* Move if we gain throughput */ 8527 if (capa_move > capa_now) 8528 env->imbalance = busiest->load_per_task; 8529 } 8530 8531 /** 8532 * calculate_imbalance - Calculate the amount of imbalance present within the 8533 * groups of a given sched_domain during load balance. 8534 * @env: load balance environment 8535 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 8536 */ 8537 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 8538 { 8539 unsigned long max_pull, load_above_capacity = ~0UL; 8540 struct sg_lb_stats *local, *busiest; 8541 8542 local = &sds->local_stat; 8543 busiest = &sds->busiest_stat; 8544 8545 if (busiest->group_type == group_imbalanced) { 8546 /* 8547 * In the group_imb case we cannot rely on group-wide averages 8548 * to ensure CPU-load equilibrium, look at wider averages. XXX 8549 */ 8550 busiest->load_per_task = 8551 min(busiest->load_per_task, sds->avg_load); 8552 } 8553 8554 /* 8555 * Avg load of busiest sg can be less and avg load of local sg can 8556 * be greater than avg load across all sgs of sd because avg load 8557 * factors in sg capacity and sgs with smaller group_type are 8558 * skipped when updating the busiest sg: 8559 */ 8560 if (busiest->group_type != group_misfit_task && 8561 (busiest->avg_load <= sds->avg_load || 8562 local->avg_load >= sds->avg_load)) { 8563 env->imbalance = 0; 8564 return fix_small_imbalance(env, sds); 8565 } 8566 8567 /* 8568 * If there aren't any idle CPUs, avoid creating some. 8569 */ 8570 if (busiest->group_type == group_overloaded && 8571 local->group_type == group_overloaded) { 8572 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; 8573 if (load_above_capacity > busiest->group_capacity) { 8574 load_above_capacity -= busiest->group_capacity; 8575 load_above_capacity *= scale_load_down(NICE_0_LOAD); 8576 load_above_capacity /= busiest->group_capacity; 8577 } else 8578 load_above_capacity = ~0UL; 8579 } 8580 8581 /* 8582 * We're trying to get all the CPUs to the average_load, so we don't 8583 * want to push ourselves above the average load, nor do we wish to 8584 * reduce the max loaded CPU below the average load. At the same time, 8585 * we also don't want to reduce the group load below the group 8586 * capacity. Thus we look for the minimum possible imbalance. 8587 */ 8588 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); 8589 8590 /* How much load to actually move to equalise the imbalance */ 8591 env->imbalance = min( 8592 max_pull * busiest->group_capacity, 8593 (sds->avg_load - local->avg_load) * local->group_capacity 8594 ) / SCHED_CAPACITY_SCALE; 8595 8596 /* Boost imbalance to allow misfit task to be balanced. */ 8597 if (busiest->group_type == group_misfit_task) { 8598 env->imbalance = max_t(long, env->imbalance, 8599 busiest->group_misfit_task_load); 8600 } 8601 8602 /* 8603 * if *imbalance is less than the average load per runnable task 8604 * there is no guarantee that any tasks will be moved so we'll have 8605 * a think about bumping its value to force at least one task to be 8606 * moved 8607 */ 8608 if (env->imbalance < busiest->load_per_task) 8609 return fix_small_imbalance(env, sds); 8610 } 8611 8612 /******* find_busiest_group() helpers end here *********************/ 8613 8614 /** 8615 * find_busiest_group - Returns the busiest group within the sched_domain 8616 * if there is an imbalance. 8617 * 8618 * Also calculates the amount of weighted load which should be moved 8619 * to restore balance. 8620 * 8621 * @env: The load balancing environment. 8622 * 8623 * Return: - The busiest group if imbalance exists. 8624 */ 8625 static struct sched_group *find_busiest_group(struct lb_env *env) 8626 { 8627 struct sg_lb_stats *local, *busiest; 8628 struct sd_lb_stats sds; 8629 8630 init_sd_lb_stats(&sds); 8631 8632 /* 8633 * Compute the various statistics relavent for load balancing at 8634 * this level. 8635 */ 8636 update_sd_lb_stats(env, &sds); 8637 8638 if (static_branch_unlikely(&sched_energy_present)) { 8639 struct root_domain *rd = env->dst_rq->rd; 8640 8641 if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) 8642 goto out_balanced; 8643 } 8644 8645 local = &sds.local_stat; 8646 busiest = &sds.busiest_stat; 8647 8648 /* ASYM feature bypasses nice load balance check */ 8649 if (check_asym_packing(env, &sds)) 8650 return sds.busiest; 8651 8652 /* There is no busy sibling group to pull tasks from */ 8653 if (!sds.busiest || busiest->sum_nr_running == 0) 8654 goto out_balanced; 8655 8656 /* XXX broken for overlapping NUMA groups */ 8657 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) 8658 / sds.total_capacity; 8659 8660 /* 8661 * If the busiest group is imbalanced the below checks don't 8662 * work because they assume all things are equal, which typically 8663 * isn't true due to cpus_allowed constraints and the like. 8664 */ 8665 if (busiest->group_type == group_imbalanced) 8666 goto force_balance; 8667 8668 /* 8669 * When dst_cpu is idle, prevent SMP nice and/or asymmetric group 8670 * capacities from resulting in underutilization due to avg_load. 8671 */ 8672 if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && 8673 busiest->group_no_capacity) 8674 goto force_balance; 8675 8676 /* Misfit tasks should be dealt with regardless of the avg load */ 8677 if (busiest->group_type == group_misfit_task) 8678 goto force_balance; 8679 8680 /* 8681 * If the local group is busier than the selected busiest group 8682 * don't try and pull any tasks. 8683 */ 8684 if (local->avg_load >= busiest->avg_load) 8685 goto out_balanced; 8686 8687 /* 8688 * Don't pull any tasks if this group is already above the domain 8689 * average load. 8690 */ 8691 if (local->avg_load >= sds.avg_load) 8692 goto out_balanced; 8693 8694 if (env->idle == CPU_IDLE) { 8695 /* 8696 * This CPU is idle. If the busiest group is not overloaded 8697 * and there is no imbalance between this and busiest group 8698 * wrt idle CPUs, it is balanced. The imbalance becomes 8699 * significant if the diff is greater than 1 otherwise we 8700 * might end up to just move the imbalance on another group 8701 */ 8702 if ((busiest->group_type != group_overloaded) && 8703 (local->idle_cpus <= (busiest->idle_cpus + 1))) 8704 goto out_balanced; 8705 } else { 8706 /* 8707 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use 8708 * imbalance_pct to be conservative. 8709 */ 8710 if (100 * busiest->avg_load <= 8711 env->sd->imbalance_pct * local->avg_load) 8712 goto out_balanced; 8713 } 8714 8715 force_balance: 8716 /* Looks like there is an imbalance. Compute it */ 8717 env->src_grp_type = busiest->group_type; 8718 calculate_imbalance(env, &sds); 8719 return env->imbalance ? sds.busiest : NULL; 8720 8721 out_balanced: 8722 env->imbalance = 0; 8723 return NULL; 8724 } 8725 8726 /* 8727 * find_busiest_queue - find the busiest runqueue among the CPUs in the group. 8728 */ 8729 static struct rq *find_busiest_queue(struct lb_env *env, 8730 struct sched_group *group) 8731 { 8732 struct rq *busiest = NULL, *rq; 8733 unsigned long busiest_load = 0, busiest_capacity = 1; 8734 int i; 8735 8736 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 8737 unsigned long capacity, wl; 8738 enum fbq_type rt; 8739 8740 rq = cpu_rq(i); 8741 rt = fbq_classify_rq(rq); 8742 8743 /* 8744 * We classify groups/runqueues into three groups: 8745 * - regular: there are !numa tasks 8746 * - remote: there are numa tasks that run on the 'wrong' node 8747 * - all: there is no distinction 8748 * 8749 * In order to avoid migrating ideally placed numa tasks, 8750 * ignore those when there's better options. 8751 * 8752 * If we ignore the actual busiest queue to migrate another 8753 * task, the next balance pass can still reduce the busiest 8754 * queue by moving tasks around inside the node. 8755 * 8756 * If we cannot move enough load due to this classification 8757 * the next pass will adjust the group classification and 8758 * allow migration of more tasks. 8759 * 8760 * Both cases only affect the total convergence complexity. 8761 */ 8762 if (rt > env->fbq_type) 8763 continue; 8764 8765 /* 8766 * For ASYM_CPUCAPACITY domains with misfit tasks we simply 8767 * seek the "biggest" misfit task. 8768 */ 8769 if (env->src_grp_type == group_misfit_task) { 8770 if (rq->misfit_task_load > busiest_load) { 8771 busiest_load = rq->misfit_task_load; 8772 busiest = rq; 8773 } 8774 8775 continue; 8776 } 8777 8778 capacity = capacity_of(i); 8779 8780 /* 8781 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could 8782 * eventually lead to active_balancing high->low capacity. 8783 * Higher per-CPU capacity is considered better than balancing 8784 * average load. 8785 */ 8786 if (env->sd->flags & SD_ASYM_CPUCAPACITY && 8787 capacity_of(env->dst_cpu) < capacity && 8788 rq->nr_running == 1) 8789 continue; 8790 8791 wl = weighted_cpuload(rq); 8792 8793 /* 8794 * When comparing with imbalance, use weighted_cpuload() 8795 * which is not scaled with the CPU capacity. 8796 */ 8797 8798 if (rq->nr_running == 1 && wl > env->imbalance && 8799 !check_cpu_capacity(rq, env->sd)) 8800 continue; 8801 8802 /* 8803 * For the load comparisons with the other CPU's, consider 8804 * the weighted_cpuload() scaled with the CPU capacity, so 8805 * that the load can be moved away from the CPU that is 8806 * potentially running at a lower capacity. 8807 * 8808 * Thus we're looking for max(wl_i / capacity_i), crosswise 8809 * multiplication to rid ourselves of the division works out 8810 * to: wl_i * capacity_j > wl_j * capacity_i; where j is 8811 * our previous maximum. 8812 */ 8813 if (wl * busiest_capacity > busiest_load * capacity) { 8814 busiest_load = wl; 8815 busiest_capacity = capacity; 8816 busiest = rq; 8817 } 8818 } 8819 8820 return busiest; 8821 } 8822 8823 /* 8824 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 8825 * so long as it is large enough. 8826 */ 8827 #define MAX_PINNED_INTERVAL 512 8828 8829 static int need_active_balance(struct lb_env *env) 8830 { 8831 struct sched_domain *sd = env->sd; 8832 8833 if (env->idle == CPU_NEWLY_IDLE) { 8834 8835 /* 8836 * ASYM_PACKING needs to force migrate tasks from busy but 8837 * lower priority CPUs in order to pack all tasks in the 8838 * highest priority CPUs. 8839 */ 8840 if ((sd->flags & SD_ASYM_PACKING) && 8841 sched_asym_prefer(env->dst_cpu, env->src_cpu)) 8842 return 1; 8843 } 8844 8845 /* 8846 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 8847 * It's worth migrating the task if the src_cpu's capacity is reduced 8848 * because of other sched_class or IRQs if more capacity stays 8849 * available on dst_cpu. 8850 */ 8851 if ((env->idle != CPU_NOT_IDLE) && 8852 (env->src_rq->cfs.h_nr_running == 1)) { 8853 if ((check_cpu_capacity(env->src_rq, sd)) && 8854 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 8855 return 1; 8856 } 8857 8858 if (env->src_grp_type == group_misfit_task) 8859 return 1; 8860 8861 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); 8862 } 8863 8864 static int active_load_balance_cpu_stop(void *data); 8865 8866 static int should_we_balance(struct lb_env *env) 8867 { 8868 struct sched_group *sg = env->sd->groups; 8869 int cpu, balance_cpu = -1; 8870 8871 /* 8872 * Ensure the balancing environment is consistent; can happen 8873 * when the softirq triggers 'during' hotplug. 8874 */ 8875 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) 8876 return 0; 8877 8878 /* 8879 * In the newly idle case, we will allow all the CPUs 8880 * to do the newly idle load balance. 8881 */ 8882 if (env->idle == CPU_NEWLY_IDLE) 8883 return 1; 8884 8885 /* Try to find first idle CPU */ 8886 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 8887 if (!idle_cpu(cpu)) 8888 continue; 8889 8890 balance_cpu = cpu; 8891 break; 8892 } 8893 8894 if (balance_cpu == -1) 8895 balance_cpu = group_balance_cpu(sg); 8896 8897 /* 8898 * First idle CPU or the first CPU(busiest) in this sched group 8899 * is eligible for doing load balancing at this and above domains. 8900 */ 8901 return balance_cpu == env->dst_cpu; 8902 } 8903 8904 /* 8905 * Check this_cpu to ensure it is balanced within domain. Attempt to move 8906 * tasks if there is an imbalance. 8907 */ 8908 static int load_balance(int this_cpu, struct rq *this_rq, 8909 struct sched_domain *sd, enum cpu_idle_type idle, 8910 int *continue_balancing) 8911 { 8912 int ld_moved, cur_ld_moved, active_balance = 0; 8913 struct sched_domain *sd_parent = sd->parent; 8914 struct sched_group *group; 8915 struct rq *busiest; 8916 struct rq_flags rf; 8917 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 8918 8919 struct lb_env env = { 8920 .sd = sd, 8921 .dst_cpu = this_cpu, 8922 .dst_rq = this_rq, 8923 .dst_grpmask = sched_group_span(sd->groups), 8924 .idle = idle, 8925 .loop_break = sched_nr_migrate_break, 8926 .cpus = cpus, 8927 .fbq_type = all, 8928 .tasks = LIST_HEAD_INIT(env.tasks), 8929 }; 8930 8931 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 8932 8933 schedstat_inc(sd->lb_count[idle]); 8934 8935 redo: 8936 if (!should_we_balance(&env)) { 8937 *continue_balancing = 0; 8938 goto out_balanced; 8939 } 8940 8941 group = find_busiest_group(&env); 8942 if (!group) { 8943 schedstat_inc(sd->lb_nobusyg[idle]); 8944 goto out_balanced; 8945 } 8946 8947 busiest = find_busiest_queue(&env, group); 8948 if (!busiest) { 8949 schedstat_inc(sd->lb_nobusyq[idle]); 8950 goto out_balanced; 8951 } 8952 8953 BUG_ON(busiest == env.dst_rq); 8954 8955 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 8956 8957 env.src_cpu = busiest->cpu; 8958 env.src_rq = busiest; 8959 8960 ld_moved = 0; 8961 if (busiest->nr_running > 1) { 8962 /* 8963 * Attempt to move tasks. If find_busiest_group has found 8964 * an imbalance but busiest->nr_running <= 1, the group is 8965 * still unbalanced. ld_moved simply stays zero, so it is 8966 * correctly treated as an imbalance. 8967 */ 8968 env.flags |= LBF_ALL_PINNED; 8969 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 8970 8971 more_balance: 8972 rq_lock_irqsave(busiest, &rf); 8973 update_rq_clock(busiest); 8974 8975 /* 8976 * cur_ld_moved - load moved in current iteration 8977 * ld_moved - cumulative load moved across iterations 8978 */ 8979 cur_ld_moved = detach_tasks(&env); 8980 8981 /* 8982 * We've detached some tasks from busiest_rq. Every 8983 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 8984 * unlock busiest->lock, and we are able to be sure 8985 * that nobody can manipulate the tasks in parallel. 8986 * See task_rq_lock() family for the details. 8987 */ 8988 8989 rq_unlock(busiest, &rf); 8990 8991 if (cur_ld_moved) { 8992 attach_tasks(&env); 8993 ld_moved += cur_ld_moved; 8994 } 8995 8996 local_irq_restore(rf.flags); 8997 8998 if (env.flags & LBF_NEED_BREAK) { 8999 env.flags &= ~LBF_NEED_BREAK; 9000 goto more_balance; 9001 } 9002 9003 /* 9004 * Revisit (affine) tasks on src_cpu that couldn't be moved to 9005 * us and move them to an alternate dst_cpu in our sched_group 9006 * where they can run. The upper limit on how many times we 9007 * iterate on same src_cpu is dependent on number of CPUs in our 9008 * sched_group. 9009 * 9010 * This changes load balance semantics a bit on who can move 9011 * load to a given_cpu. In addition to the given_cpu itself 9012 * (or a ilb_cpu acting on its behalf where given_cpu is 9013 * nohz-idle), we now have balance_cpu in a position to move 9014 * load to given_cpu. In rare situations, this may cause 9015 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 9016 * _independently_ and at _same_ time to move some load to 9017 * given_cpu) causing exceess load to be moved to given_cpu. 9018 * This however should not happen so much in practice and 9019 * moreover subsequent load balance cycles should correct the 9020 * excess load moved. 9021 */ 9022 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 9023 9024 /* Prevent to re-select dst_cpu via env's CPUs */ 9025 cpumask_clear_cpu(env.dst_cpu, env.cpus); 9026 9027 env.dst_rq = cpu_rq(env.new_dst_cpu); 9028 env.dst_cpu = env.new_dst_cpu; 9029 env.flags &= ~LBF_DST_PINNED; 9030 env.loop = 0; 9031 env.loop_break = sched_nr_migrate_break; 9032 9033 /* 9034 * Go back to "more_balance" rather than "redo" since we 9035 * need to continue with same src_cpu. 9036 */ 9037 goto more_balance; 9038 } 9039 9040 /* 9041 * We failed to reach balance because of affinity. 9042 */ 9043 if (sd_parent) { 9044 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 9045 9046 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 9047 *group_imbalance = 1; 9048 } 9049 9050 /* All tasks on this runqueue were pinned by CPU affinity */ 9051 if (unlikely(env.flags & LBF_ALL_PINNED)) { 9052 cpumask_clear_cpu(cpu_of(busiest), cpus); 9053 /* 9054 * Attempting to continue load balancing at the current 9055 * sched_domain level only makes sense if there are 9056 * active CPUs remaining as possible busiest CPUs to 9057 * pull load from which are not contained within the 9058 * destination group that is receiving any migrated 9059 * load. 9060 */ 9061 if (!cpumask_subset(cpus, env.dst_grpmask)) { 9062 env.loop = 0; 9063 env.loop_break = sched_nr_migrate_break; 9064 goto redo; 9065 } 9066 goto out_all_pinned; 9067 } 9068 } 9069 9070 if (!ld_moved) { 9071 schedstat_inc(sd->lb_failed[idle]); 9072 /* 9073 * Increment the failure counter only on periodic balance. 9074 * We do not want newidle balance, which can be very 9075 * frequent, pollute the failure counter causing 9076 * excessive cache_hot migrations and active balances. 9077 */ 9078 if (idle != CPU_NEWLY_IDLE) 9079 sd->nr_balance_failed++; 9080 9081 if (need_active_balance(&env)) { 9082 unsigned long flags; 9083 9084 raw_spin_lock_irqsave(&busiest->lock, flags); 9085 9086 /* 9087 * Don't kick the active_load_balance_cpu_stop, 9088 * if the curr task on busiest CPU can't be 9089 * moved to this_cpu: 9090 */ 9091 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 9092 raw_spin_unlock_irqrestore(&busiest->lock, 9093 flags); 9094 env.flags |= LBF_ALL_PINNED; 9095 goto out_one_pinned; 9096 } 9097 9098 /* 9099 * ->active_balance synchronizes accesses to 9100 * ->active_balance_work. Once set, it's cleared 9101 * only after active load balance is finished. 9102 */ 9103 if (!busiest->active_balance) { 9104 busiest->active_balance = 1; 9105 busiest->push_cpu = this_cpu; 9106 active_balance = 1; 9107 } 9108 raw_spin_unlock_irqrestore(&busiest->lock, flags); 9109 9110 if (active_balance) { 9111 stop_one_cpu_nowait(cpu_of(busiest), 9112 active_load_balance_cpu_stop, busiest, 9113 &busiest->active_balance_work); 9114 } 9115 9116 /* We've kicked active balancing, force task migration. */ 9117 sd->nr_balance_failed = sd->cache_nice_tries+1; 9118 } 9119 } else 9120 sd->nr_balance_failed = 0; 9121 9122 if (likely(!active_balance)) { 9123 /* We were unbalanced, so reset the balancing interval */ 9124 sd->balance_interval = sd->min_interval; 9125 } else { 9126 /* 9127 * If we've begun active balancing, start to back off. This 9128 * case may not be covered by the all_pinned logic if there 9129 * is only 1 task on the busy runqueue (because we don't call 9130 * detach_tasks). 9131 */ 9132 if (sd->balance_interval < sd->max_interval) 9133 sd->balance_interval *= 2; 9134 } 9135 9136 goto out; 9137 9138 out_balanced: 9139 /* 9140 * We reach balance although we may have faced some affinity 9141 * constraints. Clear the imbalance flag if it was set. 9142 */ 9143 if (sd_parent) { 9144 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 9145 9146 if (*group_imbalance) 9147 *group_imbalance = 0; 9148 } 9149 9150 out_all_pinned: 9151 /* 9152 * We reach balance because all tasks are pinned at this level so 9153 * we can't migrate them. Let the imbalance flag set so parent level 9154 * can try to migrate them. 9155 */ 9156 schedstat_inc(sd->lb_balanced[idle]); 9157 9158 sd->nr_balance_failed = 0; 9159 9160 out_one_pinned: 9161 ld_moved = 0; 9162 9163 /* 9164 * idle_balance() disregards balance intervals, so we could repeatedly 9165 * reach this code, which would lead to balance_interval skyrocketting 9166 * in a short amount of time. Skip the balance_interval increase logic 9167 * to avoid that. 9168 */ 9169 if (env.idle == CPU_NEWLY_IDLE) 9170 goto out; 9171 9172 /* tune up the balancing interval */ 9173 if ((env.flags & LBF_ALL_PINNED && 9174 sd->balance_interval < MAX_PINNED_INTERVAL) || 9175 sd->balance_interval < sd->max_interval) 9176 sd->balance_interval *= 2; 9177 out: 9178 return ld_moved; 9179 } 9180 9181 static inline unsigned long 9182 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 9183 { 9184 unsigned long interval = sd->balance_interval; 9185 9186 if (cpu_busy) 9187 interval *= sd->busy_factor; 9188 9189 /* scale ms to jiffies */ 9190 interval = msecs_to_jiffies(interval); 9191 interval = clamp(interval, 1UL, max_load_balance_interval); 9192 9193 return interval; 9194 } 9195 9196 static inline void 9197 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 9198 { 9199 unsigned long interval, next; 9200 9201 /* used by idle balance, so cpu_busy = 0 */ 9202 interval = get_sd_balance_interval(sd, 0); 9203 next = sd->last_balance + interval; 9204 9205 if (time_after(*next_balance, next)) 9206 *next_balance = next; 9207 } 9208 9209 /* 9210 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes 9211 * running tasks off the busiest CPU onto idle CPUs. It requires at 9212 * least 1 task to be running on each physical CPU where possible, and 9213 * avoids physical / logical imbalances. 9214 */ 9215 static int active_load_balance_cpu_stop(void *data) 9216 { 9217 struct rq *busiest_rq = data; 9218 int busiest_cpu = cpu_of(busiest_rq); 9219 int target_cpu = busiest_rq->push_cpu; 9220 struct rq *target_rq = cpu_rq(target_cpu); 9221 struct sched_domain *sd; 9222 struct task_struct *p = NULL; 9223 struct rq_flags rf; 9224 9225 rq_lock_irq(busiest_rq, &rf); 9226 /* 9227 * Between queueing the stop-work and running it is a hole in which 9228 * CPUs can become inactive. We should not move tasks from or to 9229 * inactive CPUs. 9230 */ 9231 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 9232 goto out_unlock; 9233 9234 /* Make sure the requested CPU hasn't gone down in the meantime: */ 9235 if (unlikely(busiest_cpu != smp_processor_id() || 9236 !busiest_rq->active_balance)) 9237 goto out_unlock; 9238 9239 /* Is there any task to move? */ 9240 if (busiest_rq->nr_running <= 1) 9241 goto out_unlock; 9242 9243 /* 9244 * This condition is "impossible", if it occurs 9245 * we need to fix it. Originally reported by 9246 * Bjorn Helgaas on a 128-CPU setup. 9247 */ 9248 BUG_ON(busiest_rq == target_rq); 9249 9250 /* Search for an sd spanning us and the target CPU. */ 9251 rcu_read_lock(); 9252 for_each_domain(target_cpu, sd) { 9253 if ((sd->flags & SD_LOAD_BALANCE) && 9254 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 9255 break; 9256 } 9257 9258 if (likely(sd)) { 9259 struct lb_env env = { 9260 .sd = sd, 9261 .dst_cpu = target_cpu, 9262 .dst_rq = target_rq, 9263 .src_cpu = busiest_rq->cpu, 9264 .src_rq = busiest_rq, 9265 .idle = CPU_IDLE, 9266 /* 9267 * can_migrate_task() doesn't need to compute new_dst_cpu 9268 * for active balancing. Since we have CPU_IDLE, but no 9269 * @dst_grpmask we need to make that test go away with lying 9270 * about DST_PINNED. 9271 */ 9272 .flags = LBF_DST_PINNED, 9273 }; 9274 9275 schedstat_inc(sd->alb_count); 9276 update_rq_clock(busiest_rq); 9277 9278 p = detach_one_task(&env); 9279 if (p) { 9280 schedstat_inc(sd->alb_pushed); 9281 /* Active balancing done, reset the failure counter. */ 9282 sd->nr_balance_failed = 0; 9283 } else { 9284 schedstat_inc(sd->alb_failed); 9285 } 9286 } 9287 rcu_read_unlock(); 9288 out_unlock: 9289 busiest_rq->active_balance = 0; 9290 rq_unlock(busiest_rq, &rf); 9291 9292 if (p) 9293 attach_one_task(target_rq, p); 9294 9295 local_irq_enable(); 9296 9297 return 0; 9298 } 9299 9300 static DEFINE_SPINLOCK(balancing); 9301 9302 /* 9303 * Scale the max load_balance interval with the number of CPUs in the system. 9304 * This trades load-balance latency on larger machines for less cross talk. 9305 */ 9306 void update_max_interval(void) 9307 { 9308 max_load_balance_interval = HZ*num_online_cpus()/10; 9309 } 9310 9311 /* 9312 * It checks each scheduling domain to see if it is due to be balanced, 9313 * and initiates a balancing operation if so. 9314 * 9315 * Balancing parameters are set up in init_sched_domains. 9316 */ 9317 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 9318 { 9319 int continue_balancing = 1; 9320 int cpu = rq->cpu; 9321 unsigned long interval; 9322 struct sched_domain *sd; 9323 /* Earliest time when we have to do rebalance again */ 9324 unsigned long next_balance = jiffies + 60*HZ; 9325 int update_next_balance = 0; 9326 int need_serialize, need_decay = 0; 9327 u64 max_cost = 0; 9328 9329 rcu_read_lock(); 9330 for_each_domain(cpu, sd) { 9331 /* 9332 * Decay the newidle max times here because this is a regular 9333 * visit to all the domains. Decay ~1% per second. 9334 */ 9335 if (time_after(jiffies, sd->next_decay_max_lb_cost)) { 9336 sd->max_newidle_lb_cost = 9337 (sd->max_newidle_lb_cost * 253) / 256; 9338 sd->next_decay_max_lb_cost = jiffies + HZ; 9339 need_decay = 1; 9340 } 9341 max_cost += sd->max_newidle_lb_cost; 9342 9343 if (!(sd->flags & SD_LOAD_BALANCE)) 9344 continue; 9345 9346 /* 9347 * Stop the load balance at this level. There is another 9348 * CPU in our sched group which is doing load balancing more 9349 * actively. 9350 */ 9351 if (!continue_balancing) { 9352 if (need_decay) 9353 continue; 9354 break; 9355 } 9356 9357 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); 9358 9359 need_serialize = sd->flags & SD_SERIALIZE; 9360 if (need_serialize) { 9361 if (!spin_trylock(&balancing)) 9362 goto out; 9363 } 9364 9365 if (time_after_eq(jiffies, sd->last_balance + interval)) { 9366 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 9367 /* 9368 * The LBF_DST_PINNED logic could have changed 9369 * env->dst_cpu, so we can't know our idle 9370 * state even if we migrated tasks. Update it. 9371 */ 9372 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 9373 } 9374 sd->last_balance = jiffies; 9375 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); 9376 } 9377 if (need_serialize) 9378 spin_unlock(&balancing); 9379 out: 9380 if (time_after(next_balance, sd->last_balance + interval)) { 9381 next_balance = sd->last_balance + interval; 9382 update_next_balance = 1; 9383 } 9384 } 9385 if (need_decay) { 9386 /* 9387 * Ensure the rq-wide value also decays but keep it at a 9388 * reasonable floor to avoid funnies with rq->avg_idle. 9389 */ 9390 rq->max_idle_balance_cost = 9391 max((u64)sysctl_sched_migration_cost, max_cost); 9392 } 9393 rcu_read_unlock(); 9394 9395 /* 9396 * next_balance will be updated only when there is a need. 9397 * When the cpu is attached to null domain for ex, it will not be 9398 * updated. 9399 */ 9400 if (likely(update_next_balance)) { 9401 rq->next_balance = next_balance; 9402 9403 #ifdef CONFIG_NO_HZ_COMMON 9404 /* 9405 * If this CPU has been elected to perform the nohz idle 9406 * balance. Other idle CPUs have already rebalanced with 9407 * nohz_idle_balance() and nohz.next_balance has been 9408 * updated accordingly. This CPU is now running the idle load 9409 * balance for itself and we need to update the 9410 * nohz.next_balance accordingly. 9411 */ 9412 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) 9413 nohz.next_balance = rq->next_balance; 9414 #endif 9415 } 9416 } 9417 9418 static inline int on_null_domain(struct rq *rq) 9419 { 9420 return unlikely(!rcu_dereference_sched(rq->sd)); 9421 } 9422 9423 #ifdef CONFIG_NO_HZ_COMMON 9424 /* 9425 * idle load balancing details 9426 * - When one of the busy CPUs notice that there may be an idle rebalancing 9427 * needed, they will kick the idle load balancer, which then does idle 9428 * load balancing for all the idle CPUs. 9429 */ 9430 9431 static inline int find_new_ilb(void) 9432 { 9433 int ilb = cpumask_first(nohz.idle_cpus_mask); 9434 9435 if (ilb < nr_cpu_ids && idle_cpu(ilb)) 9436 return ilb; 9437 9438 return nr_cpu_ids; 9439 } 9440 9441 /* 9442 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the 9443 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle 9444 * CPU (if there is one). 9445 */ 9446 static void kick_ilb(unsigned int flags) 9447 { 9448 int ilb_cpu; 9449 9450 nohz.next_balance++; 9451 9452 ilb_cpu = find_new_ilb(); 9453 9454 if (ilb_cpu >= nr_cpu_ids) 9455 return; 9456 9457 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); 9458 if (flags & NOHZ_KICK_MASK) 9459 return; 9460 9461 /* 9462 * Use smp_send_reschedule() instead of resched_cpu(). 9463 * This way we generate a sched IPI on the target CPU which 9464 * is idle. And the softirq performing nohz idle load balance 9465 * will be run before returning from the IPI. 9466 */ 9467 smp_send_reschedule(ilb_cpu); 9468 } 9469 9470 /* 9471 * Current heuristic for kicking the idle load balancer in the presence 9472 * of an idle cpu in the system. 9473 * - This rq has more than one task. 9474 * - This rq has at least one CFS task and the capacity of the CPU is 9475 * significantly reduced because of RT tasks or IRQs. 9476 * - At parent of LLC scheduler domain level, this cpu's scheduler group has 9477 * multiple busy cpu. 9478 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler 9479 * domain span are idle. 9480 */ 9481 static void nohz_balancer_kick(struct rq *rq) 9482 { 9483 unsigned long now = jiffies; 9484 struct sched_domain_shared *sds; 9485 struct sched_domain *sd; 9486 int nr_busy, i, cpu = rq->cpu; 9487 unsigned int flags = 0; 9488 9489 if (unlikely(rq->idle_balance)) 9490 return; 9491 9492 /* 9493 * We may be recently in ticked or tickless idle mode. At the first 9494 * busy tick after returning from idle, we will update the busy stats. 9495 */ 9496 nohz_balance_exit_idle(rq); 9497 9498 /* 9499 * None are in tickless mode and hence no need for NOHZ idle load 9500 * balancing. 9501 */ 9502 if (likely(!atomic_read(&nohz.nr_cpus))) 9503 return; 9504 9505 if (READ_ONCE(nohz.has_blocked) && 9506 time_after(now, READ_ONCE(nohz.next_blocked))) 9507 flags = NOHZ_STATS_KICK; 9508 9509 if (time_before(now, nohz.next_balance)) 9510 goto out; 9511 9512 if (rq->nr_running >= 2 || rq->misfit_task_load) { 9513 flags = NOHZ_KICK_MASK; 9514 goto out; 9515 } 9516 9517 rcu_read_lock(); 9518 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 9519 if (sds) { 9520 /* 9521 * XXX: write a coherent comment on why we do this. 9522 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com 9523 */ 9524 nr_busy = atomic_read(&sds->nr_busy_cpus); 9525 if (nr_busy > 1) { 9526 flags = NOHZ_KICK_MASK; 9527 goto unlock; 9528 } 9529 9530 } 9531 9532 sd = rcu_dereference(rq->sd); 9533 if (sd) { 9534 if ((rq->cfs.h_nr_running >= 1) && 9535 check_cpu_capacity(rq, sd)) { 9536 flags = NOHZ_KICK_MASK; 9537 goto unlock; 9538 } 9539 } 9540 9541 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 9542 if (sd) { 9543 for_each_cpu(i, sched_domain_span(sd)) { 9544 if (i == cpu || 9545 !cpumask_test_cpu(i, nohz.idle_cpus_mask)) 9546 continue; 9547 9548 if (sched_asym_prefer(i, cpu)) { 9549 flags = NOHZ_KICK_MASK; 9550 goto unlock; 9551 } 9552 } 9553 } 9554 unlock: 9555 rcu_read_unlock(); 9556 out: 9557 if (flags) 9558 kick_ilb(flags); 9559 } 9560 9561 static void set_cpu_sd_state_busy(int cpu) 9562 { 9563 struct sched_domain *sd; 9564 9565 rcu_read_lock(); 9566 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 9567 9568 if (!sd || !sd->nohz_idle) 9569 goto unlock; 9570 sd->nohz_idle = 0; 9571 9572 atomic_inc(&sd->shared->nr_busy_cpus); 9573 unlock: 9574 rcu_read_unlock(); 9575 } 9576 9577 void nohz_balance_exit_idle(struct rq *rq) 9578 { 9579 SCHED_WARN_ON(rq != this_rq()); 9580 9581 if (likely(!rq->nohz_tick_stopped)) 9582 return; 9583 9584 rq->nohz_tick_stopped = 0; 9585 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); 9586 atomic_dec(&nohz.nr_cpus); 9587 9588 set_cpu_sd_state_busy(rq->cpu); 9589 } 9590 9591 static void set_cpu_sd_state_idle(int cpu) 9592 { 9593 struct sched_domain *sd; 9594 9595 rcu_read_lock(); 9596 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 9597 9598 if (!sd || sd->nohz_idle) 9599 goto unlock; 9600 sd->nohz_idle = 1; 9601 9602 atomic_dec(&sd->shared->nr_busy_cpus); 9603 unlock: 9604 rcu_read_unlock(); 9605 } 9606 9607 /* 9608 * This routine will record that the CPU is going idle with tick stopped. 9609 * This info will be used in performing idle load balancing in the future. 9610 */ 9611 void nohz_balance_enter_idle(int cpu) 9612 { 9613 struct rq *rq = cpu_rq(cpu); 9614 9615 SCHED_WARN_ON(cpu != smp_processor_id()); 9616 9617 /* If this CPU is going down, then nothing needs to be done: */ 9618 if (!cpu_active(cpu)) 9619 return; 9620 9621 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 9622 if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) 9623 return; 9624 9625 /* 9626 * Can be set safely without rq->lock held 9627 * If a clear happens, it will have evaluated last additions because 9628 * rq->lock is held during the check and the clear 9629 */ 9630 rq->has_blocked_load = 1; 9631 9632 /* 9633 * The tick is still stopped but load could have been added in the 9634 * meantime. We set the nohz.has_blocked flag to trig a check of the 9635 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear 9636 * of nohz.has_blocked can only happen after checking the new load 9637 */ 9638 if (rq->nohz_tick_stopped) 9639 goto out; 9640 9641 /* If we're a completely isolated CPU, we don't play: */ 9642 if (on_null_domain(rq)) 9643 return; 9644 9645 rq->nohz_tick_stopped = 1; 9646 9647 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 9648 atomic_inc(&nohz.nr_cpus); 9649 9650 /* 9651 * Ensures that if nohz_idle_balance() fails to observe our 9652 * @idle_cpus_mask store, it must observe the @has_blocked 9653 * store. 9654 */ 9655 smp_mb__after_atomic(); 9656 9657 set_cpu_sd_state_idle(cpu); 9658 9659 out: 9660 /* 9661 * Each time a cpu enter idle, we assume that it has blocked load and 9662 * enable the periodic update of the load of idle cpus 9663 */ 9664 WRITE_ONCE(nohz.has_blocked, 1); 9665 } 9666 9667 /* 9668 * Internal function that runs load balance for all idle cpus. The load balance 9669 * can be a simple update of blocked load or a complete load balance with 9670 * tasks movement depending of flags. 9671 * The function returns false if the loop has stopped before running 9672 * through all idle CPUs. 9673 */ 9674 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, 9675 enum cpu_idle_type idle) 9676 { 9677 /* Earliest time when we have to do rebalance again */ 9678 unsigned long now = jiffies; 9679 unsigned long next_balance = now + 60*HZ; 9680 bool has_blocked_load = false; 9681 int update_next_balance = 0; 9682 int this_cpu = this_rq->cpu; 9683 int balance_cpu; 9684 int ret = false; 9685 struct rq *rq; 9686 9687 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); 9688 9689 /* 9690 * We assume there will be no idle load after this update and clear 9691 * the has_blocked flag. If a cpu enters idle in the mean time, it will 9692 * set the has_blocked flag and trig another update of idle load. 9693 * Because a cpu that becomes idle, is added to idle_cpus_mask before 9694 * setting the flag, we are sure to not clear the state and not 9695 * check the load of an idle cpu. 9696 */ 9697 WRITE_ONCE(nohz.has_blocked, 0); 9698 9699 /* 9700 * Ensures that if we miss the CPU, we must see the has_blocked 9701 * store from nohz_balance_enter_idle(). 9702 */ 9703 smp_mb(); 9704 9705 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { 9706 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) 9707 continue; 9708 9709 /* 9710 * If this CPU gets work to do, stop the load balancing 9711 * work being done for other CPUs. Next load 9712 * balancing owner will pick it up. 9713 */ 9714 if (need_resched()) { 9715 has_blocked_load = true; 9716 goto abort; 9717 } 9718 9719 rq = cpu_rq(balance_cpu); 9720 9721 has_blocked_load |= update_nohz_stats(rq, true); 9722 9723 /* 9724 * If time for next balance is due, 9725 * do the balance. 9726 */ 9727 if (time_after_eq(jiffies, rq->next_balance)) { 9728 struct rq_flags rf; 9729 9730 rq_lock_irqsave(rq, &rf); 9731 update_rq_clock(rq); 9732 cpu_load_update_idle(rq); 9733 rq_unlock_irqrestore(rq, &rf); 9734 9735 if (flags & NOHZ_BALANCE_KICK) 9736 rebalance_domains(rq, CPU_IDLE); 9737 } 9738 9739 if (time_after(next_balance, rq->next_balance)) { 9740 next_balance = rq->next_balance; 9741 update_next_balance = 1; 9742 } 9743 } 9744 9745 /* Newly idle CPU doesn't need an update */ 9746 if (idle != CPU_NEWLY_IDLE) { 9747 update_blocked_averages(this_cpu); 9748 has_blocked_load |= this_rq->has_blocked_load; 9749 } 9750 9751 if (flags & NOHZ_BALANCE_KICK) 9752 rebalance_domains(this_rq, CPU_IDLE); 9753 9754 WRITE_ONCE(nohz.next_blocked, 9755 now + msecs_to_jiffies(LOAD_AVG_PERIOD)); 9756 9757 /* The full idle balance loop has been done */ 9758 ret = true; 9759 9760 abort: 9761 /* There is still blocked load, enable periodic update */ 9762 if (has_blocked_load) 9763 WRITE_ONCE(nohz.has_blocked, 1); 9764 9765 /* 9766 * next_balance will be updated only when there is a need. 9767 * When the CPU is attached to null domain for ex, it will not be 9768 * updated. 9769 */ 9770 if (likely(update_next_balance)) 9771 nohz.next_balance = next_balance; 9772 9773 return ret; 9774 } 9775 9776 /* 9777 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 9778 * rebalancing for all the cpus for whom scheduler ticks are stopped. 9779 */ 9780 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 9781 { 9782 int this_cpu = this_rq->cpu; 9783 unsigned int flags; 9784 9785 if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK)) 9786 return false; 9787 9788 if (idle != CPU_IDLE) { 9789 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); 9790 return false; 9791 } 9792 9793 /* could be _relaxed() */ 9794 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); 9795 if (!(flags & NOHZ_KICK_MASK)) 9796 return false; 9797 9798 _nohz_idle_balance(this_rq, flags, idle); 9799 9800 return true; 9801 } 9802 9803 static void nohz_newidle_balance(struct rq *this_rq) 9804 { 9805 int this_cpu = this_rq->cpu; 9806 9807 /* 9808 * This CPU doesn't want to be disturbed by scheduler 9809 * housekeeping 9810 */ 9811 if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) 9812 return; 9813 9814 /* Will wake up very soon. No time for doing anything else*/ 9815 if (this_rq->avg_idle < sysctl_sched_migration_cost) 9816 return; 9817 9818 /* Don't need to update blocked load of idle CPUs*/ 9819 if (!READ_ONCE(nohz.has_blocked) || 9820 time_before(jiffies, READ_ONCE(nohz.next_blocked))) 9821 return; 9822 9823 raw_spin_unlock(&this_rq->lock); 9824 /* 9825 * This CPU is going to be idle and blocked load of idle CPUs 9826 * need to be updated. Run the ilb locally as it is a good 9827 * candidate for ilb instead of waking up another idle CPU. 9828 * Kick an normal ilb if we failed to do the update. 9829 */ 9830 if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) 9831 kick_ilb(NOHZ_STATS_KICK); 9832 raw_spin_lock(&this_rq->lock); 9833 } 9834 9835 #else /* !CONFIG_NO_HZ_COMMON */ 9836 static inline void nohz_balancer_kick(struct rq *rq) { } 9837 9838 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 9839 { 9840 return false; 9841 } 9842 9843 static inline void nohz_newidle_balance(struct rq *this_rq) { } 9844 #endif /* CONFIG_NO_HZ_COMMON */ 9845 9846 /* 9847 * idle_balance is called by schedule() if this_cpu is about to become 9848 * idle. Attempts to pull tasks from other CPUs. 9849 */ 9850 static int idle_balance(struct rq *this_rq, struct rq_flags *rf) 9851 { 9852 unsigned long next_balance = jiffies + HZ; 9853 int this_cpu = this_rq->cpu; 9854 struct sched_domain *sd; 9855 int pulled_task = 0; 9856 u64 curr_cost = 0; 9857 9858 /* 9859 * We must set idle_stamp _before_ calling idle_balance(), such that we 9860 * measure the duration of idle_balance() as idle time. 9861 */ 9862 this_rq->idle_stamp = rq_clock(this_rq); 9863 9864 /* 9865 * Do not pull tasks towards !active CPUs... 9866 */ 9867 if (!cpu_active(this_cpu)) 9868 return 0; 9869 9870 /* 9871 * This is OK, because current is on_cpu, which avoids it being picked 9872 * for load-balance and preemption/IRQs are still disabled avoiding 9873 * further scheduler activity on it and we're being very careful to 9874 * re-start the picking loop. 9875 */ 9876 rq_unpin_lock(this_rq, rf); 9877 9878 if (this_rq->avg_idle < sysctl_sched_migration_cost || 9879 !READ_ONCE(this_rq->rd->overload)) { 9880 9881 rcu_read_lock(); 9882 sd = rcu_dereference_check_sched_domain(this_rq->sd); 9883 if (sd) 9884 update_next_balance(sd, &next_balance); 9885 rcu_read_unlock(); 9886 9887 nohz_newidle_balance(this_rq); 9888 9889 goto out; 9890 } 9891 9892 raw_spin_unlock(&this_rq->lock); 9893 9894 update_blocked_averages(this_cpu); 9895 rcu_read_lock(); 9896 for_each_domain(this_cpu, sd) { 9897 int continue_balancing = 1; 9898 u64 t0, domain_cost; 9899 9900 if (!(sd->flags & SD_LOAD_BALANCE)) 9901 continue; 9902 9903 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { 9904 update_next_balance(sd, &next_balance); 9905 break; 9906 } 9907 9908 if (sd->flags & SD_BALANCE_NEWIDLE) { 9909 t0 = sched_clock_cpu(this_cpu); 9910 9911 pulled_task = load_balance(this_cpu, this_rq, 9912 sd, CPU_NEWLY_IDLE, 9913 &continue_balancing); 9914 9915 domain_cost = sched_clock_cpu(this_cpu) - t0; 9916 if (domain_cost > sd->max_newidle_lb_cost) 9917 sd->max_newidle_lb_cost = domain_cost; 9918 9919 curr_cost += domain_cost; 9920 } 9921 9922 update_next_balance(sd, &next_balance); 9923 9924 /* 9925 * Stop searching for tasks to pull if there are 9926 * now runnable tasks on this rq. 9927 */ 9928 if (pulled_task || this_rq->nr_running > 0) 9929 break; 9930 } 9931 rcu_read_unlock(); 9932 9933 raw_spin_lock(&this_rq->lock); 9934 9935 if (curr_cost > this_rq->max_idle_balance_cost) 9936 this_rq->max_idle_balance_cost = curr_cost; 9937 9938 out: 9939 /* 9940 * While browsing the domains, we released the rq lock, a task could 9941 * have been enqueued in the meantime. Since we're not going idle, 9942 * pretend we pulled a task. 9943 */ 9944 if (this_rq->cfs.h_nr_running && !pulled_task) 9945 pulled_task = 1; 9946 9947 /* Move the next balance forward */ 9948 if (time_after(this_rq->next_balance, next_balance)) 9949 this_rq->next_balance = next_balance; 9950 9951 /* Is there a task of a high priority class? */ 9952 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 9953 pulled_task = -1; 9954 9955 if (pulled_task) 9956 this_rq->idle_stamp = 0; 9957 9958 rq_repin_lock(this_rq, rf); 9959 9960 return pulled_task; 9961 } 9962 9963 /* 9964 * run_rebalance_domains is triggered when needed from the scheduler tick. 9965 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 9966 */ 9967 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 9968 { 9969 struct rq *this_rq = this_rq(); 9970 enum cpu_idle_type idle = this_rq->idle_balance ? 9971 CPU_IDLE : CPU_NOT_IDLE; 9972 9973 /* 9974 * If this CPU has a pending nohz_balance_kick, then do the 9975 * balancing on behalf of the other idle CPUs whose ticks are 9976 * stopped. Do nohz_idle_balance *before* rebalance_domains to 9977 * give the idle CPUs a chance to load balance. Else we may 9978 * load balance only within the local sched_domain hierarchy 9979 * and abort nohz_idle_balance altogether if we pull some load. 9980 */ 9981 if (nohz_idle_balance(this_rq, idle)) 9982 return; 9983 9984 /* normal load balance */ 9985 update_blocked_averages(this_rq->cpu); 9986 rebalance_domains(this_rq, idle); 9987 } 9988 9989 /* 9990 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 9991 */ 9992 void trigger_load_balance(struct rq *rq) 9993 { 9994 /* Don't need to rebalance while attached to NULL domain */ 9995 if (unlikely(on_null_domain(rq))) 9996 return; 9997 9998 if (time_after_eq(jiffies, rq->next_balance)) 9999 raise_softirq(SCHED_SOFTIRQ); 10000 10001 nohz_balancer_kick(rq); 10002 } 10003 10004 static void rq_online_fair(struct rq *rq) 10005 { 10006 update_sysctl(); 10007 10008 update_runtime_enabled(rq); 10009 } 10010 10011 static void rq_offline_fair(struct rq *rq) 10012 { 10013 update_sysctl(); 10014 10015 /* Ensure any throttled groups are reachable by pick_next_task */ 10016 unthrottle_offline_cfs_rqs(rq); 10017 } 10018 10019 #endif /* CONFIG_SMP */ 10020 10021 /* 10022 * scheduler tick hitting a task of our scheduling class. 10023 * 10024 * NOTE: This function can be called remotely by the tick offload that 10025 * goes along full dynticks. Therefore no local assumption can be made 10026 * and everything must be accessed through the @rq and @curr passed in 10027 * parameters. 10028 */ 10029 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 10030 { 10031 struct cfs_rq *cfs_rq; 10032 struct sched_entity *se = &curr->se; 10033 10034 for_each_sched_entity(se) { 10035 cfs_rq = cfs_rq_of(se); 10036 entity_tick(cfs_rq, se, queued); 10037 } 10038 10039 if (static_branch_unlikely(&sched_numa_balancing)) 10040 task_tick_numa(rq, curr); 10041 10042 update_misfit_status(curr, rq); 10043 update_overutilized_status(task_rq(curr)); 10044 } 10045 10046 /* 10047 * called on fork with the child task as argument from the parent's context 10048 * - child not yet on the tasklist 10049 * - preemption disabled 10050 */ 10051 static void task_fork_fair(struct task_struct *p) 10052 { 10053 struct cfs_rq *cfs_rq; 10054 struct sched_entity *se = &p->se, *curr; 10055 struct rq *rq = this_rq(); 10056 struct rq_flags rf; 10057 10058 rq_lock(rq, &rf); 10059 update_rq_clock(rq); 10060 10061 cfs_rq = task_cfs_rq(current); 10062 curr = cfs_rq->curr; 10063 if (curr) { 10064 update_curr(cfs_rq); 10065 se->vruntime = curr->vruntime; 10066 } 10067 place_entity(cfs_rq, se, 1); 10068 10069 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 10070 /* 10071 * Upon rescheduling, sched_class::put_prev_task() will place 10072 * 'current' within the tree based on its new key value. 10073 */ 10074 swap(curr->vruntime, se->vruntime); 10075 resched_curr(rq); 10076 } 10077 10078 se->vruntime -= cfs_rq->min_vruntime; 10079 rq_unlock(rq, &rf); 10080 } 10081 10082 /* 10083 * Priority of the task has changed. Check to see if we preempt 10084 * the current task. 10085 */ 10086 static void 10087 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 10088 { 10089 if (!task_on_rq_queued(p)) 10090 return; 10091 10092 /* 10093 * Reschedule if we are currently running on this runqueue and 10094 * our priority decreased, or if we are not currently running on 10095 * this runqueue and our priority is higher than the current's 10096 */ 10097 if (rq->curr == p) { 10098 if (p->prio > oldprio) 10099 resched_curr(rq); 10100 } else 10101 check_preempt_curr(rq, p, 0); 10102 } 10103 10104 static inline bool vruntime_normalized(struct task_struct *p) 10105 { 10106 struct sched_entity *se = &p->se; 10107 10108 /* 10109 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 10110 * the dequeue_entity(.flags=0) will already have normalized the 10111 * vruntime. 10112 */ 10113 if (p->on_rq) 10114 return true; 10115 10116 /* 10117 * When !on_rq, vruntime of the task has usually NOT been normalized. 10118 * But there are some cases where it has already been normalized: 10119 * 10120 * - A forked child which is waiting for being woken up by 10121 * wake_up_new_task(). 10122 * - A task which has been woken up by try_to_wake_up() and 10123 * waiting for actually being woken up by sched_ttwu_pending(). 10124 */ 10125 if (!se->sum_exec_runtime || 10126 (p->state == TASK_WAKING && p->sched_remote_wakeup)) 10127 return true; 10128 10129 return false; 10130 } 10131 10132 #ifdef CONFIG_FAIR_GROUP_SCHED 10133 /* 10134 * Propagate the changes of the sched_entity across the tg tree to make it 10135 * visible to the root 10136 */ 10137 static void propagate_entity_cfs_rq(struct sched_entity *se) 10138 { 10139 struct cfs_rq *cfs_rq; 10140 10141 /* Start to propagate at parent */ 10142 se = se->parent; 10143 10144 for_each_sched_entity(se) { 10145 cfs_rq = cfs_rq_of(se); 10146 10147 if (cfs_rq_throttled(cfs_rq)) 10148 break; 10149 10150 update_load_avg(cfs_rq, se, UPDATE_TG); 10151 } 10152 } 10153 #else 10154 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 10155 #endif 10156 10157 static void detach_entity_cfs_rq(struct sched_entity *se) 10158 { 10159 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10160 10161 /* Catch up with the cfs_rq and remove our load when we leave */ 10162 update_load_avg(cfs_rq, se, 0); 10163 detach_entity_load_avg(cfs_rq, se); 10164 update_tg_load_avg(cfs_rq, false); 10165 propagate_entity_cfs_rq(se); 10166 } 10167 10168 static void attach_entity_cfs_rq(struct sched_entity *se) 10169 { 10170 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10171 10172 #ifdef CONFIG_FAIR_GROUP_SCHED 10173 /* 10174 * Since the real-depth could have been changed (only FAIR 10175 * class maintain depth value), reset depth properly. 10176 */ 10177 se->depth = se->parent ? se->parent->depth + 1 : 0; 10178 #endif 10179 10180 /* Synchronize entity with its cfs_rq */ 10181 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 10182 attach_entity_load_avg(cfs_rq, se, 0); 10183 update_tg_load_avg(cfs_rq, false); 10184 propagate_entity_cfs_rq(se); 10185 } 10186 10187 static void detach_task_cfs_rq(struct task_struct *p) 10188 { 10189 struct sched_entity *se = &p->se; 10190 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10191 10192 if (!vruntime_normalized(p)) { 10193 /* 10194 * Fix up our vruntime so that the current sleep doesn't 10195 * cause 'unlimited' sleep bonus. 10196 */ 10197 place_entity(cfs_rq, se, 0); 10198 se->vruntime -= cfs_rq->min_vruntime; 10199 } 10200 10201 detach_entity_cfs_rq(se); 10202 } 10203 10204 static void attach_task_cfs_rq(struct task_struct *p) 10205 { 10206 struct sched_entity *se = &p->se; 10207 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10208 10209 attach_entity_cfs_rq(se); 10210 10211 if (!vruntime_normalized(p)) 10212 se->vruntime += cfs_rq->min_vruntime; 10213 } 10214 10215 static void switched_from_fair(struct rq *rq, struct task_struct *p) 10216 { 10217 detach_task_cfs_rq(p); 10218 } 10219 10220 static void switched_to_fair(struct rq *rq, struct task_struct *p) 10221 { 10222 attach_task_cfs_rq(p); 10223 10224 if (task_on_rq_queued(p)) { 10225 /* 10226 * We were most likely switched from sched_rt, so 10227 * kick off the schedule if running, otherwise just see 10228 * if we can still preempt the current task. 10229 */ 10230 if (rq->curr == p) 10231 resched_curr(rq); 10232 else 10233 check_preempt_curr(rq, p, 0); 10234 } 10235 } 10236 10237 /* Account for a task changing its policy or group. 10238 * 10239 * This routine is mostly called to set cfs_rq->curr field when a task 10240 * migrates between groups/classes. 10241 */ 10242 static void set_curr_task_fair(struct rq *rq) 10243 { 10244 struct sched_entity *se = &rq->curr->se; 10245 10246 for_each_sched_entity(se) { 10247 struct cfs_rq *cfs_rq = cfs_rq_of(se); 10248 10249 set_next_entity(cfs_rq, se); 10250 /* ensure bandwidth has been allocated on our new cfs_rq */ 10251 account_cfs_rq_runtime(cfs_rq, 0); 10252 } 10253 } 10254 10255 void init_cfs_rq(struct cfs_rq *cfs_rq) 10256 { 10257 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 10258 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 10259 #ifndef CONFIG_64BIT 10260 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 10261 #endif 10262 #ifdef CONFIG_SMP 10263 raw_spin_lock_init(&cfs_rq->removed.lock); 10264 #endif 10265 } 10266 10267 #ifdef CONFIG_FAIR_GROUP_SCHED 10268 static void task_set_group_fair(struct task_struct *p) 10269 { 10270 struct sched_entity *se = &p->se; 10271 10272 set_task_rq(p, task_cpu(p)); 10273 se->depth = se->parent ? se->parent->depth + 1 : 0; 10274 } 10275 10276 static void task_move_group_fair(struct task_struct *p) 10277 { 10278 detach_task_cfs_rq(p); 10279 set_task_rq(p, task_cpu(p)); 10280 10281 #ifdef CONFIG_SMP 10282 /* Tell se's cfs_rq has been changed -- migrated */ 10283 p->se.avg.last_update_time = 0; 10284 #endif 10285 attach_task_cfs_rq(p); 10286 } 10287 10288 static void task_change_group_fair(struct task_struct *p, int type) 10289 { 10290 switch (type) { 10291 case TASK_SET_GROUP: 10292 task_set_group_fair(p); 10293 break; 10294 10295 case TASK_MOVE_GROUP: 10296 task_move_group_fair(p); 10297 break; 10298 } 10299 } 10300 10301 void free_fair_sched_group(struct task_group *tg) 10302 { 10303 int i; 10304 10305 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 10306 10307 for_each_possible_cpu(i) { 10308 if (tg->cfs_rq) 10309 kfree(tg->cfs_rq[i]); 10310 if (tg->se) 10311 kfree(tg->se[i]); 10312 } 10313 10314 kfree(tg->cfs_rq); 10315 kfree(tg->se); 10316 } 10317 10318 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 10319 { 10320 struct sched_entity *se; 10321 struct cfs_rq *cfs_rq; 10322 int i; 10323 10324 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); 10325 if (!tg->cfs_rq) 10326 goto err; 10327 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); 10328 if (!tg->se) 10329 goto err; 10330 10331 tg->shares = NICE_0_LOAD; 10332 10333 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 10334 10335 for_each_possible_cpu(i) { 10336 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 10337 GFP_KERNEL, cpu_to_node(i)); 10338 if (!cfs_rq) 10339 goto err; 10340 10341 se = kzalloc_node(sizeof(struct sched_entity), 10342 GFP_KERNEL, cpu_to_node(i)); 10343 if (!se) 10344 goto err_free_rq; 10345 10346 init_cfs_rq(cfs_rq); 10347 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 10348 init_entity_runnable_average(se); 10349 } 10350 10351 return 1; 10352 10353 err_free_rq: 10354 kfree(cfs_rq); 10355 err: 10356 return 0; 10357 } 10358 10359 void online_fair_sched_group(struct task_group *tg) 10360 { 10361 struct sched_entity *se; 10362 struct rq *rq; 10363 int i; 10364 10365 for_each_possible_cpu(i) { 10366 rq = cpu_rq(i); 10367 se = tg->se[i]; 10368 10369 raw_spin_lock_irq(&rq->lock); 10370 update_rq_clock(rq); 10371 attach_entity_cfs_rq(se); 10372 sync_throttle(tg, i); 10373 raw_spin_unlock_irq(&rq->lock); 10374 } 10375 } 10376 10377 void unregister_fair_sched_group(struct task_group *tg) 10378 { 10379 unsigned long flags; 10380 struct rq *rq; 10381 int cpu; 10382 10383 for_each_possible_cpu(cpu) { 10384 if (tg->se[cpu]) 10385 remove_entity_load_avg(tg->se[cpu]); 10386 10387 /* 10388 * Only empty task groups can be destroyed; so we can speculatively 10389 * check on_list without danger of it being re-added. 10390 */ 10391 if (!tg->cfs_rq[cpu]->on_list) 10392 continue; 10393 10394 rq = cpu_rq(cpu); 10395 10396 raw_spin_lock_irqsave(&rq->lock, flags); 10397 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 10398 raw_spin_unlock_irqrestore(&rq->lock, flags); 10399 } 10400 } 10401 10402 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 10403 struct sched_entity *se, int cpu, 10404 struct sched_entity *parent) 10405 { 10406 struct rq *rq = cpu_rq(cpu); 10407 10408 cfs_rq->tg = tg; 10409 cfs_rq->rq = rq; 10410 init_cfs_rq_runtime(cfs_rq); 10411 10412 tg->cfs_rq[cpu] = cfs_rq; 10413 tg->se[cpu] = se; 10414 10415 /* se could be NULL for root_task_group */ 10416 if (!se) 10417 return; 10418 10419 if (!parent) { 10420 se->cfs_rq = &rq->cfs; 10421 se->depth = 0; 10422 } else { 10423 se->cfs_rq = parent->my_q; 10424 se->depth = parent->depth + 1; 10425 } 10426 10427 se->my_q = cfs_rq; 10428 /* guarantee group entities always have weight */ 10429 update_load_set(&se->load, NICE_0_LOAD); 10430 se->parent = parent; 10431 } 10432 10433 static DEFINE_MUTEX(shares_mutex); 10434 10435 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 10436 { 10437 int i; 10438 10439 /* 10440 * We can't change the weight of the root cgroup. 10441 */ 10442 if (!tg->se[0]) 10443 return -EINVAL; 10444 10445 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 10446 10447 mutex_lock(&shares_mutex); 10448 if (tg->shares == shares) 10449 goto done; 10450 10451 tg->shares = shares; 10452 for_each_possible_cpu(i) { 10453 struct rq *rq = cpu_rq(i); 10454 struct sched_entity *se = tg->se[i]; 10455 struct rq_flags rf; 10456 10457 /* Propagate contribution to hierarchy */ 10458 rq_lock_irqsave(rq, &rf); 10459 update_rq_clock(rq); 10460 for_each_sched_entity(se) { 10461 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); 10462 update_cfs_group(se); 10463 } 10464 rq_unlock_irqrestore(rq, &rf); 10465 } 10466 10467 done: 10468 mutex_unlock(&shares_mutex); 10469 return 0; 10470 } 10471 #else /* CONFIG_FAIR_GROUP_SCHED */ 10472 10473 void free_fair_sched_group(struct task_group *tg) { } 10474 10475 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 10476 { 10477 return 1; 10478 } 10479 10480 void online_fair_sched_group(struct task_group *tg) { } 10481 10482 void unregister_fair_sched_group(struct task_group *tg) { } 10483 10484 #endif /* CONFIG_FAIR_GROUP_SCHED */ 10485 10486 10487 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 10488 { 10489 struct sched_entity *se = &task->se; 10490 unsigned int rr_interval = 0; 10491 10492 /* 10493 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 10494 * idle runqueue: 10495 */ 10496 if (rq->cfs.load.weight) 10497 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 10498 10499 return rr_interval; 10500 } 10501 10502 /* 10503 * All the scheduling class methods: 10504 */ 10505 const struct sched_class fair_sched_class = { 10506 .next = &idle_sched_class, 10507 .enqueue_task = enqueue_task_fair, 10508 .dequeue_task = dequeue_task_fair, 10509 .yield_task = yield_task_fair, 10510 .yield_to_task = yield_to_task_fair, 10511 10512 .check_preempt_curr = check_preempt_wakeup, 10513 10514 .pick_next_task = pick_next_task_fair, 10515 .put_prev_task = put_prev_task_fair, 10516 10517 #ifdef CONFIG_SMP 10518 .select_task_rq = select_task_rq_fair, 10519 .migrate_task_rq = migrate_task_rq_fair, 10520 10521 .rq_online = rq_online_fair, 10522 .rq_offline = rq_offline_fair, 10523 10524 .task_dead = task_dead_fair, 10525 .set_cpus_allowed = set_cpus_allowed_common, 10526 #endif 10527 10528 .set_curr_task = set_curr_task_fair, 10529 .task_tick = task_tick_fair, 10530 .task_fork = task_fork_fair, 10531 10532 .prio_changed = prio_changed_fair, 10533 .switched_from = switched_from_fair, 10534 .switched_to = switched_to_fair, 10535 10536 .get_rr_interval = get_rr_interval_fair, 10537 10538 .update_curr = update_curr_fair, 10539 10540 #ifdef CONFIG_FAIR_GROUP_SCHED 10541 .task_change_group = task_change_group_fair, 10542 #endif 10543 }; 10544 10545 #ifdef CONFIG_SCHED_DEBUG 10546 void print_cfs_stats(struct seq_file *m, int cpu) 10547 { 10548 struct cfs_rq *cfs_rq; 10549 10550 rcu_read_lock(); 10551 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) 10552 print_cfs_rq(m, cpu, cfs_rq); 10553 rcu_read_unlock(); 10554 } 10555 10556 #ifdef CONFIG_NUMA_BALANCING 10557 void show_numa_stats(struct task_struct *p, struct seq_file *m) 10558 { 10559 int node; 10560 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 10561 10562 for_each_online_node(node) { 10563 if (p->numa_faults) { 10564 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 10565 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 10566 } 10567 if (p->numa_group) { 10568 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)], 10569 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)]; 10570 } 10571 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 10572 } 10573 } 10574 #endif /* CONFIG_NUMA_BALANCING */ 10575 #endif /* CONFIG_SCHED_DEBUG */ 10576 10577 __init void init_sched_fair_class(void) 10578 { 10579 #ifdef CONFIG_SMP 10580 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 10581 10582 #ifdef CONFIG_NO_HZ_COMMON 10583 nohz.next_balance = jiffies; 10584 nohz.next_blocked = jiffies; 10585 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 10586 #endif 10587 #endif /* SMP */ 10588 10589 } 10590