1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 4 * 5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * 7 * Interactivity improvements by Mike Galbraith 8 * (C) 2007 Mike Galbraith <efault@gmx.de> 9 * 10 * Various enhancements by Dmitry Adamushko. 11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 12 * 13 * Group scheduling enhancements by Srivatsa Vaddagiri 14 * Copyright IBM Corporation, 2007 15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 16 * 17 * Scaled math optimizations by Thomas Gleixner 18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 19 * 20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 22 */ 23 24 #include <linux/sched/mm.h> 25 #include <linux/sched/topology.h> 26 27 #include <linux/latencytop.h> 28 #include <linux/cpumask.h> 29 #include <linux/cpuidle.h> 30 #include <linux/slab.h> 31 #include <linux/profile.h> 32 #include <linux/interrupt.h> 33 #include <linux/mempolicy.h> 34 #include <linux/migrate.h> 35 #include <linux/task_work.h> 36 37 #include <trace/events/sched.h> 38 39 #include "sched.h" 40 41 /* 42 * Targeted preemption latency for CPU-bound tasks: 43 * 44 * NOTE: this latency value is not the same as the concept of 45 * 'timeslice length' - timeslices in CFS are of variable length 46 * and have no persistent notion like in traditional, time-slice 47 * based scheduling concepts. 48 * 49 * (to see the precise effective timeslice length of your workload, 50 * run vmstat and monitor the context-switches (cs) field) 51 * 52 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 53 */ 54 unsigned int sysctl_sched_latency = 6000000ULL; 55 unsigned int normalized_sysctl_sched_latency = 6000000ULL; 56 57 /* 58 * The initial- and re-scaling of tunables is configurable 59 * 60 * Options are: 61 * 62 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 63 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 64 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 65 * 66 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 67 */ 68 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 69 70 /* 71 * Minimal preemption granularity for CPU-bound tasks: 72 * 73 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 74 */ 75 unsigned int sysctl_sched_min_granularity = 750000ULL; 76 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 77 78 /* 79 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 80 */ 81 static unsigned int sched_nr_latency = 8; 82 83 /* 84 * After fork, child runs first. If set to 0 (default) then 85 * parent will (try to) run first. 86 */ 87 unsigned int sysctl_sched_child_runs_first __read_mostly; 88 89 /* 90 * SCHED_OTHER wake-up granularity. 91 * 92 * This option delays the preemption effects of decoupled workloads 93 * and reduces their over-scheduling. Synchronous workloads will still 94 * have immediate wakeup/sleep latencies. 95 * 96 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 97 */ 98 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 99 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 100 101 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 102 103 #ifdef CONFIG_SMP 104 /* 105 * For asym packing, by default the lower numbered cpu has higher priority. 106 */ 107 int __weak arch_asym_cpu_priority(int cpu) 108 { 109 return -cpu; 110 } 111 #endif 112 113 #ifdef CONFIG_CFS_BANDWIDTH 114 /* 115 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 116 * each time a cfs_rq requests quota. 117 * 118 * Note: in the case that the slice exceeds the runtime remaining (either due 119 * to consumption or the quota being specified to be smaller than the slice) 120 * we will always only issue the remaining available time. 121 * 122 * (default: 5 msec, units: microseconds) 123 */ 124 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 125 #endif 126 127 /* 128 * The margin used when comparing utilization with CPU capacity: 129 * util * margin < capacity * 1024 130 * 131 * (default: ~20%) 132 */ 133 unsigned int capacity_margin = 1280; 134 135 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 136 { 137 lw->weight += inc; 138 lw->inv_weight = 0; 139 } 140 141 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 142 { 143 lw->weight -= dec; 144 lw->inv_weight = 0; 145 } 146 147 static inline void update_load_set(struct load_weight *lw, unsigned long w) 148 { 149 lw->weight = w; 150 lw->inv_weight = 0; 151 } 152 153 /* 154 * Increase the granularity value when there are more CPUs, 155 * because with more CPUs the 'effective latency' as visible 156 * to users decreases. But the relationship is not linear, 157 * so pick a second-best guess by going with the log2 of the 158 * number of CPUs. 159 * 160 * This idea comes from the SD scheduler of Con Kolivas: 161 */ 162 static unsigned int get_update_sysctl_factor(void) 163 { 164 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 165 unsigned int factor; 166 167 switch (sysctl_sched_tunable_scaling) { 168 case SCHED_TUNABLESCALING_NONE: 169 factor = 1; 170 break; 171 case SCHED_TUNABLESCALING_LINEAR: 172 factor = cpus; 173 break; 174 case SCHED_TUNABLESCALING_LOG: 175 default: 176 factor = 1 + ilog2(cpus); 177 break; 178 } 179 180 return factor; 181 } 182 183 static void update_sysctl(void) 184 { 185 unsigned int factor = get_update_sysctl_factor(); 186 187 #define SET_SYSCTL(name) \ 188 (sysctl_##name = (factor) * normalized_sysctl_##name) 189 SET_SYSCTL(sched_min_granularity); 190 SET_SYSCTL(sched_latency); 191 SET_SYSCTL(sched_wakeup_granularity); 192 #undef SET_SYSCTL 193 } 194 195 void sched_init_granularity(void) 196 { 197 update_sysctl(); 198 } 199 200 #define WMULT_CONST (~0U) 201 #define WMULT_SHIFT 32 202 203 static void __update_inv_weight(struct load_weight *lw) 204 { 205 unsigned long w; 206 207 if (likely(lw->inv_weight)) 208 return; 209 210 w = scale_load_down(lw->weight); 211 212 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 213 lw->inv_weight = 1; 214 else if (unlikely(!w)) 215 lw->inv_weight = WMULT_CONST; 216 else 217 lw->inv_weight = WMULT_CONST / w; 218 } 219 220 /* 221 * delta_exec * weight / lw.weight 222 * OR 223 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 224 * 225 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 226 * we're guaranteed shift stays positive because inv_weight is guaranteed to 227 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 228 * 229 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 230 * weight/lw.weight <= 1, and therefore our shift will also be positive. 231 */ 232 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 233 { 234 u64 fact = scale_load_down(weight); 235 int shift = WMULT_SHIFT; 236 237 __update_inv_weight(lw); 238 239 if (unlikely(fact >> 32)) { 240 while (fact >> 32) { 241 fact >>= 1; 242 shift--; 243 } 244 } 245 246 /* hint to use a 32x32->64 mul */ 247 fact = (u64)(u32)fact * lw->inv_weight; 248 249 while (fact >> 32) { 250 fact >>= 1; 251 shift--; 252 } 253 254 return mul_u64_u32_shr(delta_exec, fact, shift); 255 } 256 257 258 const struct sched_class fair_sched_class; 259 260 /************************************************************** 261 * CFS operations on generic schedulable entities: 262 */ 263 264 #ifdef CONFIG_FAIR_GROUP_SCHED 265 266 /* cpu runqueue to which this cfs_rq is attached */ 267 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 268 { 269 return cfs_rq->rq; 270 } 271 272 /* An entity is a task if it doesn't "own" a runqueue */ 273 #define entity_is_task(se) (!se->my_q) 274 275 static inline struct task_struct *task_of(struct sched_entity *se) 276 { 277 SCHED_WARN_ON(!entity_is_task(se)); 278 return container_of(se, struct task_struct, se); 279 } 280 281 /* Walk up scheduling entities hierarchy */ 282 #define for_each_sched_entity(se) \ 283 for (; se; se = se->parent) 284 285 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 286 { 287 return p->se.cfs_rq; 288 } 289 290 /* runqueue on which this entity is (to be) queued */ 291 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 292 { 293 return se->cfs_rq; 294 } 295 296 /* runqueue "owned" by this group */ 297 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 298 { 299 return grp->my_q; 300 } 301 302 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 303 { 304 if (!cfs_rq->on_list) { 305 struct rq *rq = rq_of(cfs_rq); 306 int cpu = cpu_of(rq); 307 /* 308 * Ensure we either appear before our parent (if already 309 * enqueued) or force our parent to appear after us when it is 310 * enqueued. The fact that we always enqueue bottom-up 311 * reduces this to two cases and a special case for the root 312 * cfs_rq. Furthermore, it also means that we will always reset 313 * tmp_alone_branch either when the branch is connected 314 * to a tree or when we reach the beg of the tree 315 */ 316 if (cfs_rq->tg->parent && 317 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 318 /* 319 * If parent is already on the list, we add the child 320 * just before. Thanks to circular linked property of 321 * the list, this means to put the child at the tail 322 * of the list that starts by parent. 323 */ 324 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 325 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 326 /* 327 * The branch is now connected to its tree so we can 328 * reset tmp_alone_branch to the beginning of the 329 * list. 330 */ 331 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 332 } else if (!cfs_rq->tg->parent) { 333 /* 334 * cfs rq without parent should be put 335 * at the tail of the list. 336 */ 337 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 338 &rq->leaf_cfs_rq_list); 339 /* 340 * We have reach the beg of a tree so we can reset 341 * tmp_alone_branch to the beginning of the list. 342 */ 343 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 344 } else { 345 /* 346 * The parent has not already been added so we want to 347 * make sure that it will be put after us. 348 * tmp_alone_branch points to the beg of the branch 349 * where we will add parent. 350 */ 351 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, 352 rq->tmp_alone_branch); 353 /* 354 * update tmp_alone_branch to points to the new beg 355 * of the branch 356 */ 357 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 358 } 359 360 cfs_rq->on_list = 1; 361 } 362 } 363 364 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 365 { 366 if (cfs_rq->on_list) { 367 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 368 cfs_rq->on_list = 0; 369 } 370 } 371 372 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 373 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 374 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ 375 leaf_cfs_rq_list) 376 377 /* Do the two (enqueued) entities belong to the same group ? */ 378 static inline struct cfs_rq * 379 is_same_group(struct sched_entity *se, struct sched_entity *pse) 380 { 381 if (se->cfs_rq == pse->cfs_rq) 382 return se->cfs_rq; 383 384 return NULL; 385 } 386 387 static inline struct sched_entity *parent_entity(struct sched_entity *se) 388 { 389 return se->parent; 390 } 391 392 static void 393 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 394 { 395 int se_depth, pse_depth; 396 397 /* 398 * preemption test can be made between sibling entities who are in the 399 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 400 * both tasks until we find their ancestors who are siblings of common 401 * parent. 402 */ 403 404 /* First walk up until both entities are at same depth */ 405 se_depth = (*se)->depth; 406 pse_depth = (*pse)->depth; 407 408 while (se_depth > pse_depth) { 409 se_depth--; 410 *se = parent_entity(*se); 411 } 412 413 while (pse_depth > se_depth) { 414 pse_depth--; 415 *pse = parent_entity(*pse); 416 } 417 418 while (!is_same_group(*se, *pse)) { 419 *se = parent_entity(*se); 420 *pse = parent_entity(*pse); 421 } 422 } 423 424 #else /* !CONFIG_FAIR_GROUP_SCHED */ 425 426 static inline struct task_struct *task_of(struct sched_entity *se) 427 { 428 return container_of(se, struct task_struct, se); 429 } 430 431 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 432 { 433 return container_of(cfs_rq, struct rq, cfs); 434 } 435 436 #define entity_is_task(se) 1 437 438 #define for_each_sched_entity(se) \ 439 for (; se; se = NULL) 440 441 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 442 { 443 return &task_rq(p)->cfs; 444 } 445 446 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 447 { 448 struct task_struct *p = task_of(se); 449 struct rq *rq = task_rq(p); 450 451 return &rq->cfs; 452 } 453 454 /* runqueue "owned" by this group */ 455 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 456 { 457 return NULL; 458 } 459 460 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 461 { 462 } 463 464 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 465 { 466 } 467 468 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 469 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 470 471 static inline struct sched_entity *parent_entity(struct sched_entity *se) 472 { 473 return NULL; 474 } 475 476 static inline void 477 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 478 { 479 } 480 481 #endif /* CONFIG_FAIR_GROUP_SCHED */ 482 483 static __always_inline 484 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 485 486 /************************************************************** 487 * Scheduling class tree data structure manipulation methods: 488 */ 489 490 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 491 { 492 s64 delta = (s64)(vruntime - max_vruntime); 493 if (delta > 0) 494 max_vruntime = vruntime; 495 496 return max_vruntime; 497 } 498 499 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 500 { 501 s64 delta = (s64)(vruntime - min_vruntime); 502 if (delta < 0) 503 min_vruntime = vruntime; 504 505 return min_vruntime; 506 } 507 508 static inline int entity_before(struct sched_entity *a, 509 struct sched_entity *b) 510 { 511 return (s64)(a->vruntime - b->vruntime) < 0; 512 } 513 514 static void update_min_vruntime(struct cfs_rq *cfs_rq) 515 { 516 struct sched_entity *curr = cfs_rq->curr; 517 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); 518 519 u64 vruntime = cfs_rq->min_vruntime; 520 521 if (curr) { 522 if (curr->on_rq) 523 vruntime = curr->vruntime; 524 else 525 curr = NULL; 526 } 527 528 if (leftmost) { /* non-empty tree */ 529 struct sched_entity *se; 530 se = rb_entry(leftmost, struct sched_entity, run_node); 531 532 if (!curr) 533 vruntime = se->vruntime; 534 else 535 vruntime = min_vruntime(vruntime, se->vruntime); 536 } 537 538 /* ensure we never gain time by being placed backwards. */ 539 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 540 #ifndef CONFIG_64BIT 541 smp_wmb(); 542 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 543 #endif 544 } 545 546 /* 547 * Enqueue an entity into the rb-tree: 548 */ 549 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 550 { 551 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; 552 struct rb_node *parent = NULL; 553 struct sched_entity *entry; 554 bool leftmost = true; 555 556 /* 557 * Find the right place in the rbtree: 558 */ 559 while (*link) { 560 parent = *link; 561 entry = rb_entry(parent, struct sched_entity, run_node); 562 /* 563 * We dont care about collisions. Nodes with 564 * the same key stay together. 565 */ 566 if (entity_before(se, entry)) { 567 link = &parent->rb_left; 568 } else { 569 link = &parent->rb_right; 570 leftmost = false; 571 } 572 } 573 574 rb_link_node(&se->run_node, parent, link); 575 rb_insert_color_cached(&se->run_node, 576 &cfs_rq->tasks_timeline, leftmost); 577 } 578 579 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 580 { 581 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); 582 } 583 584 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 585 { 586 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); 587 588 if (!left) 589 return NULL; 590 591 return rb_entry(left, struct sched_entity, run_node); 592 } 593 594 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 595 { 596 struct rb_node *next = rb_next(&se->run_node); 597 598 if (!next) 599 return NULL; 600 601 return rb_entry(next, struct sched_entity, run_node); 602 } 603 604 #ifdef CONFIG_SCHED_DEBUG 605 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 606 { 607 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); 608 609 if (!last) 610 return NULL; 611 612 return rb_entry(last, struct sched_entity, run_node); 613 } 614 615 /************************************************************** 616 * Scheduling class statistics methods: 617 */ 618 619 int sched_proc_update_handler(struct ctl_table *table, int write, 620 void __user *buffer, size_t *lenp, 621 loff_t *ppos) 622 { 623 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 624 unsigned int factor = get_update_sysctl_factor(); 625 626 if (ret || !write) 627 return ret; 628 629 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 630 sysctl_sched_min_granularity); 631 632 #define WRT_SYSCTL(name) \ 633 (normalized_sysctl_##name = sysctl_##name / (factor)) 634 WRT_SYSCTL(sched_min_granularity); 635 WRT_SYSCTL(sched_latency); 636 WRT_SYSCTL(sched_wakeup_granularity); 637 #undef WRT_SYSCTL 638 639 return 0; 640 } 641 #endif 642 643 /* 644 * delta /= w 645 */ 646 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 647 { 648 if (unlikely(se->load.weight != NICE_0_LOAD)) 649 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 650 651 return delta; 652 } 653 654 /* 655 * The idea is to set a period in which each task runs once. 656 * 657 * When there are too many tasks (sched_nr_latency) we have to stretch 658 * this period because otherwise the slices get too small. 659 * 660 * p = (nr <= nl) ? l : l*nr/nl 661 */ 662 static u64 __sched_period(unsigned long nr_running) 663 { 664 if (unlikely(nr_running > sched_nr_latency)) 665 return nr_running * sysctl_sched_min_granularity; 666 else 667 return sysctl_sched_latency; 668 } 669 670 /* 671 * We calculate the wall-time slice from the period by taking a part 672 * proportional to the weight. 673 * 674 * s = p*P[w/rw] 675 */ 676 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 677 { 678 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 679 680 for_each_sched_entity(se) { 681 struct load_weight *load; 682 struct load_weight lw; 683 684 cfs_rq = cfs_rq_of(se); 685 load = &cfs_rq->load; 686 687 if (unlikely(!se->on_rq)) { 688 lw = cfs_rq->load; 689 690 update_load_add(&lw, se->load.weight); 691 load = &lw; 692 } 693 slice = __calc_delta(slice, se->load.weight, load); 694 } 695 return slice; 696 } 697 698 /* 699 * We calculate the vruntime slice of a to-be-inserted task. 700 * 701 * vs = s/w 702 */ 703 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 704 { 705 return calc_delta_fair(sched_slice(cfs_rq, se), se); 706 } 707 708 #ifdef CONFIG_SMP 709 710 #include "sched-pelt.h" 711 712 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 713 static unsigned long task_h_load(struct task_struct *p); 714 715 /* Give new sched_entity start runnable values to heavy its load in infant time */ 716 void init_entity_runnable_average(struct sched_entity *se) 717 { 718 struct sched_avg *sa = &se->avg; 719 720 sa->last_update_time = 0; 721 /* 722 * sched_avg's period_contrib should be strictly less then 1024, so 723 * we give it 1023 to make sure it is almost a period (1024us), and 724 * will definitely be update (after enqueue). 725 */ 726 sa->period_contrib = 1023; 727 /* 728 * Tasks are intialized with full load to be seen as heavy tasks until 729 * they get a chance to stabilize to their real load level. 730 * Group entities are intialized with zero load to reflect the fact that 731 * nothing has been attached to the task group yet. 732 */ 733 if (entity_is_task(se)) 734 sa->load_avg = scale_load_down(se->load.weight); 735 sa->load_sum = sa->load_avg * LOAD_AVG_MAX; 736 /* 737 * At this point, util_avg won't be used in select_task_rq_fair anyway 738 */ 739 sa->util_avg = 0; 740 sa->util_sum = 0; 741 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 742 } 743 744 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); 745 static void attach_entity_cfs_rq(struct sched_entity *se); 746 747 /* 748 * With new tasks being created, their initial util_avgs are extrapolated 749 * based on the cfs_rq's current util_avg: 750 * 751 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 752 * 753 * However, in many cases, the above util_avg does not give a desired 754 * value. Moreover, the sum of the util_avgs may be divergent, such 755 * as when the series is a harmonic series. 756 * 757 * To solve this problem, we also cap the util_avg of successive tasks to 758 * only 1/2 of the left utilization budget: 759 * 760 * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n 761 * 762 * where n denotes the nth task. 763 * 764 * For example, a simplest series from the beginning would be like: 765 * 766 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 767 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 768 * 769 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 770 * if util_avg > util_avg_cap. 771 */ 772 void post_init_entity_util_avg(struct sched_entity *se) 773 { 774 struct cfs_rq *cfs_rq = cfs_rq_of(se); 775 struct sched_avg *sa = &se->avg; 776 long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2; 777 778 if (cap > 0) { 779 if (cfs_rq->avg.util_avg != 0) { 780 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 781 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 782 783 if (sa->util_avg > cap) 784 sa->util_avg = cap; 785 } else { 786 sa->util_avg = cap; 787 } 788 sa->util_sum = sa->util_avg * LOAD_AVG_MAX; 789 } 790 791 if (entity_is_task(se)) { 792 struct task_struct *p = task_of(se); 793 if (p->sched_class != &fair_sched_class) { 794 /* 795 * For !fair tasks do: 796 * 797 update_cfs_rq_load_avg(now, cfs_rq); 798 attach_entity_load_avg(cfs_rq, se); 799 switched_from_fair(rq, p); 800 * 801 * such that the next switched_to_fair() has the 802 * expected state. 803 */ 804 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq); 805 return; 806 } 807 } 808 809 attach_entity_cfs_rq(se); 810 } 811 812 #else /* !CONFIG_SMP */ 813 void init_entity_runnable_average(struct sched_entity *se) 814 { 815 } 816 void post_init_entity_util_avg(struct sched_entity *se) 817 { 818 } 819 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 820 { 821 } 822 #endif /* CONFIG_SMP */ 823 824 /* 825 * Update the current task's runtime statistics. 826 */ 827 static void update_curr(struct cfs_rq *cfs_rq) 828 { 829 struct sched_entity *curr = cfs_rq->curr; 830 u64 now = rq_clock_task(rq_of(cfs_rq)); 831 u64 delta_exec; 832 833 if (unlikely(!curr)) 834 return; 835 836 delta_exec = now - curr->exec_start; 837 if (unlikely((s64)delta_exec <= 0)) 838 return; 839 840 curr->exec_start = now; 841 842 schedstat_set(curr->statistics.exec_max, 843 max(delta_exec, curr->statistics.exec_max)); 844 845 curr->sum_exec_runtime += delta_exec; 846 schedstat_add(cfs_rq->exec_clock, delta_exec); 847 848 curr->vruntime += calc_delta_fair(delta_exec, curr); 849 update_min_vruntime(cfs_rq); 850 851 if (entity_is_task(curr)) { 852 struct task_struct *curtask = task_of(curr); 853 854 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 855 cpuacct_charge(curtask, delta_exec); 856 account_group_exec_runtime(curtask, delta_exec); 857 } 858 859 account_cfs_rq_runtime(cfs_rq, delta_exec); 860 } 861 862 static void update_curr_fair(struct rq *rq) 863 { 864 update_curr(cfs_rq_of(&rq->curr->se)); 865 } 866 867 static inline void 868 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 869 { 870 u64 wait_start, prev_wait_start; 871 872 if (!schedstat_enabled()) 873 return; 874 875 wait_start = rq_clock(rq_of(cfs_rq)); 876 prev_wait_start = schedstat_val(se->statistics.wait_start); 877 878 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && 879 likely(wait_start > prev_wait_start)) 880 wait_start -= prev_wait_start; 881 882 schedstat_set(se->statistics.wait_start, wait_start); 883 } 884 885 static inline void 886 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 887 { 888 struct task_struct *p; 889 u64 delta; 890 891 if (!schedstat_enabled()) 892 return; 893 894 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); 895 896 if (entity_is_task(se)) { 897 p = task_of(se); 898 if (task_on_rq_migrating(p)) { 899 /* 900 * Preserve migrating task's wait time so wait_start 901 * time stamp can be adjusted to accumulate wait time 902 * prior to migration. 903 */ 904 schedstat_set(se->statistics.wait_start, delta); 905 return; 906 } 907 trace_sched_stat_wait(p, delta); 908 } 909 910 schedstat_set(se->statistics.wait_max, 911 max(schedstat_val(se->statistics.wait_max), delta)); 912 schedstat_inc(se->statistics.wait_count); 913 schedstat_add(se->statistics.wait_sum, delta); 914 schedstat_set(se->statistics.wait_start, 0); 915 } 916 917 static inline void 918 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 919 { 920 struct task_struct *tsk = NULL; 921 u64 sleep_start, block_start; 922 923 if (!schedstat_enabled()) 924 return; 925 926 sleep_start = schedstat_val(se->statistics.sleep_start); 927 block_start = schedstat_val(se->statistics.block_start); 928 929 if (entity_is_task(se)) 930 tsk = task_of(se); 931 932 if (sleep_start) { 933 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; 934 935 if ((s64)delta < 0) 936 delta = 0; 937 938 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) 939 schedstat_set(se->statistics.sleep_max, delta); 940 941 schedstat_set(se->statistics.sleep_start, 0); 942 schedstat_add(se->statistics.sum_sleep_runtime, delta); 943 944 if (tsk) { 945 account_scheduler_latency(tsk, delta >> 10, 1); 946 trace_sched_stat_sleep(tsk, delta); 947 } 948 } 949 if (block_start) { 950 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; 951 952 if ((s64)delta < 0) 953 delta = 0; 954 955 if (unlikely(delta > schedstat_val(se->statistics.block_max))) 956 schedstat_set(se->statistics.block_max, delta); 957 958 schedstat_set(se->statistics.block_start, 0); 959 schedstat_add(se->statistics.sum_sleep_runtime, delta); 960 961 if (tsk) { 962 if (tsk->in_iowait) { 963 schedstat_add(se->statistics.iowait_sum, delta); 964 schedstat_inc(se->statistics.iowait_count); 965 trace_sched_stat_iowait(tsk, delta); 966 } 967 968 trace_sched_stat_blocked(tsk, delta); 969 970 /* 971 * Blocking time is in units of nanosecs, so shift by 972 * 20 to get a milliseconds-range estimation of the 973 * amount of time that the task spent sleeping: 974 */ 975 if (unlikely(prof_on == SLEEP_PROFILING)) { 976 profile_hits(SLEEP_PROFILING, 977 (void *)get_wchan(tsk), 978 delta >> 20); 979 } 980 account_scheduler_latency(tsk, delta >> 10, 0); 981 } 982 } 983 } 984 985 /* 986 * Task is being enqueued - update stats: 987 */ 988 static inline void 989 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 990 { 991 if (!schedstat_enabled()) 992 return; 993 994 /* 995 * Are we enqueueing a waiting task? (for current tasks 996 * a dequeue/enqueue event is a NOP) 997 */ 998 if (se != cfs_rq->curr) 999 update_stats_wait_start(cfs_rq, se); 1000 1001 if (flags & ENQUEUE_WAKEUP) 1002 update_stats_enqueue_sleeper(cfs_rq, se); 1003 } 1004 1005 static inline void 1006 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1007 { 1008 1009 if (!schedstat_enabled()) 1010 return; 1011 1012 /* 1013 * Mark the end of the wait period if dequeueing a 1014 * waiting task: 1015 */ 1016 if (se != cfs_rq->curr) 1017 update_stats_wait_end(cfs_rq, se); 1018 1019 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 1020 struct task_struct *tsk = task_of(se); 1021 1022 if (tsk->state & TASK_INTERRUPTIBLE) 1023 schedstat_set(se->statistics.sleep_start, 1024 rq_clock(rq_of(cfs_rq))); 1025 if (tsk->state & TASK_UNINTERRUPTIBLE) 1026 schedstat_set(se->statistics.block_start, 1027 rq_clock(rq_of(cfs_rq))); 1028 } 1029 } 1030 1031 /* 1032 * We are picking a new current task - update its stats: 1033 */ 1034 static inline void 1035 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1036 { 1037 /* 1038 * We are starting a new run period: 1039 */ 1040 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1041 } 1042 1043 /************************************************** 1044 * Scheduling class queueing methods: 1045 */ 1046 1047 #ifdef CONFIG_NUMA_BALANCING 1048 /* 1049 * Approximate time to scan a full NUMA task in ms. The task scan period is 1050 * calculated based on the tasks virtual memory size and 1051 * numa_balancing_scan_size. 1052 */ 1053 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1054 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1055 1056 /* Portion of address space to scan in MB */ 1057 unsigned int sysctl_numa_balancing_scan_size = 256; 1058 1059 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1060 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1061 1062 struct numa_group { 1063 atomic_t refcount; 1064 1065 spinlock_t lock; /* nr_tasks, tasks */ 1066 int nr_tasks; 1067 pid_t gid; 1068 int active_nodes; 1069 1070 struct rcu_head rcu; 1071 unsigned long total_faults; 1072 unsigned long max_faults_cpu; 1073 /* 1074 * Faults_cpu is used to decide whether memory should move 1075 * towards the CPU. As a consequence, these stats are weighted 1076 * more by CPU use than by memory faults. 1077 */ 1078 unsigned long *faults_cpu; 1079 unsigned long faults[0]; 1080 }; 1081 1082 static inline unsigned long group_faults_priv(struct numa_group *ng); 1083 static inline unsigned long group_faults_shared(struct numa_group *ng); 1084 1085 static unsigned int task_nr_scan_windows(struct task_struct *p) 1086 { 1087 unsigned long rss = 0; 1088 unsigned long nr_scan_pages; 1089 1090 /* 1091 * Calculations based on RSS as non-present and empty pages are skipped 1092 * by the PTE scanner and NUMA hinting faults should be trapped based 1093 * on resident pages 1094 */ 1095 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1096 rss = get_mm_rss(p->mm); 1097 if (!rss) 1098 rss = nr_scan_pages; 1099 1100 rss = round_up(rss, nr_scan_pages); 1101 return rss / nr_scan_pages; 1102 } 1103 1104 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1105 #define MAX_SCAN_WINDOW 2560 1106 1107 static unsigned int task_scan_min(struct task_struct *p) 1108 { 1109 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1110 unsigned int scan, floor; 1111 unsigned int windows = 1; 1112 1113 if (scan_size < MAX_SCAN_WINDOW) 1114 windows = MAX_SCAN_WINDOW / scan_size; 1115 floor = 1000 / windows; 1116 1117 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1118 return max_t(unsigned int, floor, scan); 1119 } 1120 1121 static unsigned int task_scan_start(struct task_struct *p) 1122 { 1123 unsigned long smin = task_scan_min(p); 1124 unsigned long period = smin; 1125 1126 /* Scale the maximum scan period with the amount of shared memory. */ 1127 if (p->numa_group) { 1128 struct numa_group *ng = p->numa_group; 1129 unsigned long shared = group_faults_shared(ng); 1130 unsigned long private = group_faults_priv(ng); 1131 1132 period *= atomic_read(&ng->refcount); 1133 period *= shared + 1; 1134 period /= private + shared + 1; 1135 } 1136 1137 return max(smin, period); 1138 } 1139 1140 static unsigned int task_scan_max(struct task_struct *p) 1141 { 1142 unsigned long smin = task_scan_min(p); 1143 unsigned long smax; 1144 1145 /* Watch for min being lower than max due to floor calculations */ 1146 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1147 1148 /* Scale the maximum scan period with the amount of shared memory. */ 1149 if (p->numa_group) { 1150 struct numa_group *ng = p->numa_group; 1151 unsigned long shared = group_faults_shared(ng); 1152 unsigned long private = group_faults_priv(ng); 1153 unsigned long period = smax; 1154 1155 period *= atomic_read(&ng->refcount); 1156 period *= shared + 1; 1157 period /= private + shared + 1; 1158 1159 smax = max(smax, period); 1160 } 1161 1162 return max(smin, smax); 1163 } 1164 1165 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1166 { 1167 rq->nr_numa_running += (p->numa_preferred_nid != -1); 1168 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1169 } 1170 1171 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1172 { 1173 rq->nr_numa_running -= (p->numa_preferred_nid != -1); 1174 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1175 } 1176 1177 /* Shared or private faults. */ 1178 #define NR_NUMA_HINT_FAULT_TYPES 2 1179 1180 /* Memory and CPU locality */ 1181 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1182 1183 /* Averaged statistics, and temporary buffers. */ 1184 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1185 1186 pid_t task_numa_group_id(struct task_struct *p) 1187 { 1188 return p->numa_group ? p->numa_group->gid : 0; 1189 } 1190 1191 /* 1192 * The averaged statistics, shared & private, memory & cpu, 1193 * occupy the first half of the array. The second half of the 1194 * array is for current counters, which are averaged into the 1195 * first set by task_numa_placement. 1196 */ 1197 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1198 { 1199 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1200 } 1201 1202 static inline unsigned long task_faults(struct task_struct *p, int nid) 1203 { 1204 if (!p->numa_faults) 1205 return 0; 1206 1207 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1208 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1209 } 1210 1211 static inline unsigned long group_faults(struct task_struct *p, int nid) 1212 { 1213 if (!p->numa_group) 1214 return 0; 1215 1216 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1217 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1218 } 1219 1220 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1221 { 1222 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + 1223 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; 1224 } 1225 1226 static inline unsigned long group_faults_priv(struct numa_group *ng) 1227 { 1228 unsigned long faults = 0; 1229 int node; 1230 1231 for_each_online_node(node) { 1232 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; 1233 } 1234 1235 return faults; 1236 } 1237 1238 static inline unsigned long group_faults_shared(struct numa_group *ng) 1239 { 1240 unsigned long faults = 0; 1241 int node; 1242 1243 for_each_online_node(node) { 1244 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; 1245 } 1246 1247 return faults; 1248 } 1249 1250 /* 1251 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1252 * considered part of a numa group's pseudo-interleaving set. Migrations 1253 * between these nodes are slowed down, to allow things to settle down. 1254 */ 1255 #define ACTIVE_NODE_FRACTION 3 1256 1257 static bool numa_is_active_node(int nid, struct numa_group *ng) 1258 { 1259 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1260 } 1261 1262 /* Handle placement on systems where not all nodes are directly connected. */ 1263 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1264 int maxdist, bool task) 1265 { 1266 unsigned long score = 0; 1267 int node; 1268 1269 /* 1270 * All nodes are directly connected, and the same distance 1271 * from each other. No need for fancy placement algorithms. 1272 */ 1273 if (sched_numa_topology_type == NUMA_DIRECT) 1274 return 0; 1275 1276 /* 1277 * This code is called for each node, introducing N^2 complexity, 1278 * which should be ok given the number of nodes rarely exceeds 8. 1279 */ 1280 for_each_online_node(node) { 1281 unsigned long faults; 1282 int dist = node_distance(nid, node); 1283 1284 /* 1285 * The furthest away nodes in the system are not interesting 1286 * for placement; nid was already counted. 1287 */ 1288 if (dist == sched_max_numa_distance || node == nid) 1289 continue; 1290 1291 /* 1292 * On systems with a backplane NUMA topology, compare groups 1293 * of nodes, and move tasks towards the group with the most 1294 * memory accesses. When comparing two nodes at distance 1295 * "hoplimit", only nodes closer by than "hoplimit" are part 1296 * of each group. Skip other nodes. 1297 */ 1298 if (sched_numa_topology_type == NUMA_BACKPLANE && 1299 dist > maxdist) 1300 continue; 1301 1302 /* Add up the faults from nearby nodes. */ 1303 if (task) 1304 faults = task_faults(p, node); 1305 else 1306 faults = group_faults(p, node); 1307 1308 /* 1309 * On systems with a glueless mesh NUMA topology, there are 1310 * no fixed "groups of nodes". Instead, nodes that are not 1311 * directly connected bounce traffic through intermediate 1312 * nodes; a numa_group can occupy any set of nodes. 1313 * The further away a node is, the less the faults count. 1314 * This seems to result in good task placement. 1315 */ 1316 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1317 faults *= (sched_max_numa_distance - dist); 1318 faults /= (sched_max_numa_distance - LOCAL_DISTANCE); 1319 } 1320 1321 score += faults; 1322 } 1323 1324 return score; 1325 } 1326 1327 /* 1328 * These return the fraction of accesses done by a particular task, or 1329 * task group, on a particular numa node. The group weight is given a 1330 * larger multiplier, in order to group tasks together that are almost 1331 * evenly spread out between numa nodes. 1332 */ 1333 static inline unsigned long task_weight(struct task_struct *p, int nid, 1334 int dist) 1335 { 1336 unsigned long faults, total_faults; 1337 1338 if (!p->numa_faults) 1339 return 0; 1340 1341 total_faults = p->total_numa_faults; 1342 1343 if (!total_faults) 1344 return 0; 1345 1346 faults = task_faults(p, nid); 1347 faults += score_nearby_nodes(p, nid, dist, true); 1348 1349 return 1000 * faults / total_faults; 1350 } 1351 1352 static inline unsigned long group_weight(struct task_struct *p, int nid, 1353 int dist) 1354 { 1355 unsigned long faults, total_faults; 1356 1357 if (!p->numa_group) 1358 return 0; 1359 1360 total_faults = p->numa_group->total_faults; 1361 1362 if (!total_faults) 1363 return 0; 1364 1365 faults = group_faults(p, nid); 1366 faults += score_nearby_nodes(p, nid, dist, false); 1367 1368 return 1000 * faults / total_faults; 1369 } 1370 1371 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1372 int src_nid, int dst_cpu) 1373 { 1374 struct numa_group *ng = p->numa_group; 1375 int dst_nid = cpu_to_node(dst_cpu); 1376 int last_cpupid, this_cpupid; 1377 1378 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1379 1380 /* 1381 * Multi-stage node selection is used in conjunction with a periodic 1382 * migration fault to build a temporal task<->page relation. By using 1383 * a two-stage filter we remove short/unlikely relations. 1384 * 1385 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1386 * a task's usage of a particular page (n_p) per total usage of this 1387 * page (n_t) (in a given time-span) to a probability. 1388 * 1389 * Our periodic faults will sample this probability and getting the 1390 * same result twice in a row, given these samples are fully 1391 * independent, is then given by P(n)^2, provided our sample period 1392 * is sufficiently short compared to the usage pattern. 1393 * 1394 * This quadric squishes small probabilities, making it less likely we 1395 * act on an unlikely task<->page relation. 1396 */ 1397 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1398 if (!cpupid_pid_unset(last_cpupid) && 1399 cpupid_to_nid(last_cpupid) != dst_nid) 1400 return false; 1401 1402 /* Always allow migrate on private faults */ 1403 if (cpupid_match_pid(p, last_cpupid)) 1404 return true; 1405 1406 /* A shared fault, but p->numa_group has not been set up yet. */ 1407 if (!ng) 1408 return true; 1409 1410 /* 1411 * Destination node is much more heavily used than the source 1412 * node? Allow migration. 1413 */ 1414 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1415 ACTIVE_NODE_FRACTION) 1416 return true; 1417 1418 /* 1419 * Distribute memory according to CPU & memory use on each node, 1420 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1421 * 1422 * faults_cpu(dst) 3 faults_cpu(src) 1423 * --------------- * - > --------------- 1424 * faults_mem(dst) 4 faults_mem(src) 1425 */ 1426 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1427 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1428 } 1429 1430 static unsigned long weighted_cpuload(struct rq *rq); 1431 static unsigned long source_load(int cpu, int type); 1432 static unsigned long target_load(int cpu, int type); 1433 static unsigned long capacity_of(int cpu); 1434 1435 /* Cached statistics for all CPUs within a node */ 1436 struct numa_stats { 1437 unsigned long nr_running; 1438 unsigned long load; 1439 1440 /* Total compute capacity of CPUs on a node */ 1441 unsigned long compute_capacity; 1442 1443 /* Approximate capacity in terms of runnable tasks on a node */ 1444 unsigned long task_capacity; 1445 int has_free_capacity; 1446 }; 1447 1448 /* 1449 * XXX borrowed from update_sg_lb_stats 1450 */ 1451 static void update_numa_stats(struct numa_stats *ns, int nid) 1452 { 1453 int smt, cpu, cpus = 0; 1454 unsigned long capacity; 1455 1456 memset(ns, 0, sizeof(*ns)); 1457 for_each_cpu(cpu, cpumask_of_node(nid)) { 1458 struct rq *rq = cpu_rq(cpu); 1459 1460 ns->nr_running += rq->nr_running; 1461 ns->load += weighted_cpuload(rq); 1462 ns->compute_capacity += capacity_of(cpu); 1463 1464 cpus++; 1465 } 1466 1467 /* 1468 * If we raced with hotplug and there are no CPUs left in our mask 1469 * the @ns structure is NULL'ed and task_numa_compare() will 1470 * not find this node attractive. 1471 * 1472 * We'll either bail at !has_free_capacity, or we'll detect a huge 1473 * imbalance and bail there. 1474 */ 1475 if (!cpus) 1476 return; 1477 1478 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */ 1479 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity); 1480 capacity = cpus / smt; /* cores */ 1481 1482 ns->task_capacity = min_t(unsigned, capacity, 1483 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE)); 1484 ns->has_free_capacity = (ns->nr_running < ns->task_capacity); 1485 } 1486 1487 struct task_numa_env { 1488 struct task_struct *p; 1489 1490 int src_cpu, src_nid; 1491 int dst_cpu, dst_nid; 1492 1493 struct numa_stats src_stats, dst_stats; 1494 1495 int imbalance_pct; 1496 int dist; 1497 1498 struct task_struct *best_task; 1499 long best_imp; 1500 int best_cpu; 1501 }; 1502 1503 static void task_numa_assign(struct task_numa_env *env, 1504 struct task_struct *p, long imp) 1505 { 1506 if (env->best_task) 1507 put_task_struct(env->best_task); 1508 if (p) 1509 get_task_struct(p); 1510 1511 env->best_task = p; 1512 env->best_imp = imp; 1513 env->best_cpu = env->dst_cpu; 1514 } 1515 1516 static bool load_too_imbalanced(long src_load, long dst_load, 1517 struct task_numa_env *env) 1518 { 1519 long imb, old_imb; 1520 long orig_src_load, orig_dst_load; 1521 long src_capacity, dst_capacity; 1522 1523 /* 1524 * The load is corrected for the CPU capacity available on each node. 1525 * 1526 * src_load dst_load 1527 * ------------ vs --------- 1528 * src_capacity dst_capacity 1529 */ 1530 src_capacity = env->src_stats.compute_capacity; 1531 dst_capacity = env->dst_stats.compute_capacity; 1532 1533 /* We care about the slope of the imbalance, not the direction. */ 1534 if (dst_load < src_load) 1535 swap(dst_load, src_load); 1536 1537 /* Is the difference below the threshold? */ 1538 imb = dst_load * src_capacity * 100 - 1539 src_load * dst_capacity * env->imbalance_pct; 1540 if (imb <= 0) 1541 return false; 1542 1543 /* 1544 * The imbalance is above the allowed threshold. 1545 * Compare it with the old imbalance. 1546 */ 1547 orig_src_load = env->src_stats.load; 1548 orig_dst_load = env->dst_stats.load; 1549 1550 if (orig_dst_load < orig_src_load) 1551 swap(orig_dst_load, orig_src_load); 1552 1553 old_imb = orig_dst_load * src_capacity * 100 - 1554 orig_src_load * dst_capacity * env->imbalance_pct; 1555 1556 /* Would this change make things worse? */ 1557 return (imb > old_imb); 1558 } 1559 1560 /* 1561 * This checks if the overall compute and NUMA accesses of the system would 1562 * be improved if the source tasks was migrated to the target dst_cpu taking 1563 * into account that it might be best if task running on the dst_cpu should 1564 * be exchanged with the source task 1565 */ 1566 static void task_numa_compare(struct task_numa_env *env, 1567 long taskimp, long groupimp) 1568 { 1569 struct rq *src_rq = cpu_rq(env->src_cpu); 1570 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1571 struct task_struct *cur; 1572 long src_load, dst_load; 1573 long load; 1574 long imp = env->p->numa_group ? groupimp : taskimp; 1575 long moveimp = imp; 1576 int dist = env->dist; 1577 1578 rcu_read_lock(); 1579 cur = task_rcu_dereference(&dst_rq->curr); 1580 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1581 cur = NULL; 1582 1583 /* 1584 * Because we have preemption enabled we can get migrated around and 1585 * end try selecting ourselves (current == env->p) as a swap candidate. 1586 */ 1587 if (cur == env->p) 1588 goto unlock; 1589 1590 /* 1591 * "imp" is the fault differential for the source task between the 1592 * source and destination node. Calculate the total differential for 1593 * the source task and potential destination task. The more negative 1594 * the value is, the more rmeote accesses that would be expected to 1595 * be incurred if the tasks were swapped. 1596 */ 1597 if (cur) { 1598 /* Skip this swap candidate if cannot move to the source cpu */ 1599 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) 1600 goto unlock; 1601 1602 /* 1603 * If dst and source tasks are in the same NUMA group, or not 1604 * in any group then look only at task weights. 1605 */ 1606 if (cur->numa_group == env->p->numa_group) { 1607 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1608 task_weight(cur, env->dst_nid, dist); 1609 /* 1610 * Add some hysteresis to prevent swapping the 1611 * tasks within a group over tiny differences. 1612 */ 1613 if (cur->numa_group) 1614 imp -= imp/16; 1615 } else { 1616 /* 1617 * Compare the group weights. If a task is all by 1618 * itself (not part of a group), use the task weight 1619 * instead. 1620 */ 1621 if (cur->numa_group) 1622 imp += group_weight(cur, env->src_nid, dist) - 1623 group_weight(cur, env->dst_nid, dist); 1624 else 1625 imp += task_weight(cur, env->src_nid, dist) - 1626 task_weight(cur, env->dst_nid, dist); 1627 } 1628 } 1629 1630 if (imp <= env->best_imp && moveimp <= env->best_imp) 1631 goto unlock; 1632 1633 if (!cur) { 1634 /* Is there capacity at our destination? */ 1635 if (env->src_stats.nr_running <= env->src_stats.task_capacity && 1636 !env->dst_stats.has_free_capacity) 1637 goto unlock; 1638 1639 goto balance; 1640 } 1641 1642 /* Balance doesn't matter much if we're running a task per cpu */ 1643 if (imp > env->best_imp && src_rq->nr_running == 1 && 1644 dst_rq->nr_running == 1) 1645 goto assign; 1646 1647 /* 1648 * In the overloaded case, try and keep the load balanced. 1649 */ 1650 balance: 1651 load = task_h_load(env->p); 1652 dst_load = env->dst_stats.load + load; 1653 src_load = env->src_stats.load - load; 1654 1655 if (moveimp > imp && moveimp > env->best_imp) { 1656 /* 1657 * If the improvement from just moving env->p direction is 1658 * better than swapping tasks around, check if a move is 1659 * possible. Store a slightly smaller score than moveimp, 1660 * so an actually idle CPU will win. 1661 */ 1662 if (!load_too_imbalanced(src_load, dst_load, env)) { 1663 imp = moveimp - 1; 1664 cur = NULL; 1665 goto assign; 1666 } 1667 } 1668 1669 if (imp <= env->best_imp) 1670 goto unlock; 1671 1672 if (cur) { 1673 load = task_h_load(cur); 1674 dst_load -= load; 1675 src_load += load; 1676 } 1677 1678 if (load_too_imbalanced(src_load, dst_load, env)) 1679 goto unlock; 1680 1681 /* 1682 * One idle CPU per node is evaluated for a task numa move. 1683 * Call select_idle_sibling to maybe find a better one. 1684 */ 1685 if (!cur) { 1686 /* 1687 * select_idle_siblings() uses an per-cpu cpumask that 1688 * can be used from IRQ context. 1689 */ 1690 local_irq_disable(); 1691 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, 1692 env->dst_cpu); 1693 local_irq_enable(); 1694 } 1695 1696 assign: 1697 task_numa_assign(env, cur, imp); 1698 unlock: 1699 rcu_read_unlock(); 1700 } 1701 1702 static void task_numa_find_cpu(struct task_numa_env *env, 1703 long taskimp, long groupimp) 1704 { 1705 int cpu; 1706 1707 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1708 /* Skip this CPU if the source task cannot migrate */ 1709 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) 1710 continue; 1711 1712 env->dst_cpu = cpu; 1713 task_numa_compare(env, taskimp, groupimp); 1714 } 1715 } 1716 1717 /* Only move tasks to a NUMA node less busy than the current node. */ 1718 static bool numa_has_capacity(struct task_numa_env *env) 1719 { 1720 struct numa_stats *src = &env->src_stats; 1721 struct numa_stats *dst = &env->dst_stats; 1722 1723 if (src->has_free_capacity && !dst->has_free_capacity) 1724 return false; 1725 1726 /* 1727 * Only consider a task move if the source has a higher load 1728 * than the destination, corrected for CPU capacity on each node. 1729 * 1730 * src->load dst->load 1731 * --------------------- vs --------------------- 1732 * src->compute_capacity dst->compute_capacity 1733 */ 1734 if (src->load * dst->compute_capacity * env->imbalance_pct > 1735 1736 dst->load * src->compute_capacity * 100) 1737 return true; 1738 1739 return false; 1740 } 1741 1742 static int task_numa_migrate(struct task_struct *p) 1743 { 1744 struct task_numa_env env = { 1745 .p = p, 1746 1747 .src_cpu = task_cpu(p), 1748 .src_nid = task_node(p), 1749 1750 .imbalance_pct = 112, 1751 1752 .best_task = NULL, 1753 .best_imp = 0, 1754 .best_cpu = -1, 1755 }; 1756 struct sched_domain *sd; 1757 unsigned long taskweight, groupweight; 1758 int nid, ret, dist; 1759 long taskimp, groupimp; 1760 1761 /* 1762 * Pick the lowest SD_NUMA domain, as that would have the smallest 1763 * imbalance and would be the first to start moving tasks about. 1764 * 1765 * And we want to avoid any moving of tasks about, as that would create 1766 * random movement of tasks -- counter the numa conditions we're trying 1767 * to satisfy here. 1768 */ 1769 rcu_read_lock(); 1770 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 1771 if (sd) 1772 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 1773 rcu_read_unlock(); 1774 1775 /* 1776 * Cpusets can break the scheduler domain tree into smaller 1777 * balance domains, some of which do not cross NUMA boundaries. 1778 * Tasks that are "trapped" in such domains cannot be migrated 1779 * elsewhere, so there is no point in (re)trying. 1780 */ 1781 if (unlikely(!sd)) { 1782 p->numa_preferred_nid = task_node(p); 1783 return -EINVAL; 1784 } 1785 1786 env.dst_nid = p->numa_preferred_nid; 1787 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 1788 taskweight = task_weight(p, env.src_nid, dist); 1789 groupweight = group_weight(p, env.src_nid, dist); 1790 update_numa_stats(&env.src_stats, env.src_nid); 1791 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 1792 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 1793 update_numa_stats(&env.dst_stats, env.dst_nid); 1794 1795 /* Try to find a spot on the preferred nid. */ 1796 if (numa_has_capacity(&env)) 1797 task_numa_find_cpu(&env, taskimp, groupimp); 1798 1799 /* 1800 * Look at other nodes in these cases: 1801 * - there is no space available on the preferred_nid 1802 * - the task is part of a numa_group that is interleaved across 1803 * multiple NUMA nodes; in order to better consolidate the group, 1804 * we need to check other locations. 1805 */ 1806 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { 1807 for_each_online_node(nid) { 1808 if (nid == env.src_nid || nid == p->numa_preferred_nid) 1809 continue; 1810 1811 dist = node_distance(env.src_nid, env.dst_nid); 1812 if (sched_numa_topology_type == NUMA_BACKPLANE && 1813 dist != env.dist) { 1814 taskweight = task_weight(p, env.src_nid, dist); 1815 groupweight = group_weight(p, env.src_nid, dist); 1816 } 1817 1818 /* Only consider nodes where both task and groups benefit */ 1819 taskimp = task_weight(p, nid, dist) - taskweight; 1820 groupimp = group_weight(p, nid, dist) - groupweight; 1821 if (taskimp < 0 && groupimp < 0) 1822 continue; 1823 1824 env.dist = dist; 1825 env.dst_nid = nid; 1826 update_numa_stats(&env.dst_stats, env.dst_nid); 1827 if (numa_has_capacity(&env)) 1828 task_numa_find_cpu(&env, taskimp, groupimp); 1829 } 1830 } 1831 1832 /* 1833 * If the task is part of a workload that spans multiple NUMA nodes, 1834 * and is migrating into one of the workload's active nodes, remember 1835 * this node as the task's preferred numa node, so the workload can 1836 * settle down. 1837 * A task that migrated to a second choice node will be better off 1838 * trying for a better one later. Do not set the preferred node here. 1839 */ 1840 if (p->numa_group) { 1841 struct numa_group *ng = p->numa_group; 1842 1843 if (env.best_cpu == -1) 1844 nid = env.src_nid; 1845 else 1846 nid = env.dst_nid; 1847 1848 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng)) 1849 sched_setnuma(p, env.dst_nid); 1850 } 1851 1852 /* No better CPU than the current one was found. */ 1853 if (env.best_cpu == -1) 1854 return -EAGAIN; 1855 1856 /* 1857 * Reset the scan period if the task is being rescheduled on an 1858 * alternative node to recheck if the tasks is now properly placed. 1859 */ 1860 p->numa_scan_period = task_scan_start(p); 1861 1862 if (env.best_task == NULL) { 1863 ret = migrate_task_to(p, env.best_cpu); 1864 if (ret != 0) 1865 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); 1866 return ret; 1867 } 1868 1869 ret = migrate_swap(p, env.best_task); 1870 if (ret != 0) 1871 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); 1872 put_task_struct(env.best_task); 1873 return ret; 1874 } 1875 1876 /* Attempt to migrate a task to a CPU on the preferred node. */ 1877 static void numa_migrate_preferred(struct task_struct *p) 1878 { 1879 unsigned long interval = HZ; 1880 1881 /* This task has no NUMA fault statistics yet */ 1882 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) 1883 return; 1884 1885 /* Periodically retry migrating the task to the preferred node */ 1886 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 1887 p->numa_migrate_retry = jiffies + interval; 1888 1889 /* Success if task is already running on preferred CPU */ 1890 if (task_node(p) == p->numa_preferred_nid) 1891 return; 1892 1893 /* Otherwise, try migrate to a CPU on the preferred node */ 1894 task_numa_migrate(p); 1895 } 1896 1897 /* 1898 * Find out how many nodes on the workload is actively running on. Do this by 1899 * tracking the nodes from which NUMA hinting faults are triggered. This can 1900 * be different from the set of nodes where the workload's memory is currently 1901 * located. 1902 */ 1903 static void numa_group_count_active_nodes(struct numa_group *numa_group) 1904 { 1905 unsigned long faults, max_faults = 0; 1906 int nid, active_nodes = 0; 1907 1908 for_each_online_node(nid) { 1909 faults = group_faults_cpu(numa_group, nid); 1910 if (faults > max_faults) 1911 max_faults = faults; 1912 } 1913 1914 for_each_online_node(nid) { 1915 faults = group_faults_cpu(numa_group, nid); 1916 if (faults * ACTIVE_NODE_FRACTION > max_faults) 1917 active_nodes++; 1918 } 1919 1920 numa_group->max_faults_cpu = max_faults; 1921 numa_group->active_nodes = active_nodes; 1922 } 1923 1924 /* 1925 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 1926 * increments. The more local the fault statistics are, the higher the scan 1927 * period will be for the next scan window. If local/(local+remote) ratio is 1928 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 1929 * the scan period will decrease. Aim for 70% local accesses. 1930 */ 1931 #define NUMA_PERIOD_SLOTS 10 1932 #define NUMA_PERIOD_THRESHOLD 7 1933 1934 /* 1935 * Increase the scan period (slow down scanning) if the majority of 1936 * our memory is already on our local node, or if the majority of 1937 * the page accesses are shared with other processes. 1938 * Otherwise, decrease the scan period. 1939 */ 1940 static void update_task_scan_period(struct task_struct *p, 1941 unsigned long shared, unsigned long private) 1942 { 1943 unsigned int period_slot; 1944 int lr_ratio, ps_ratio; 1945 int diff; 1946 1947 unsigned long remote = p->numa_faults_locality[0]; 1948 unsigned long local = p->numa_faults_locality[1]; 1949 1950 /* 1951 * If there were no record hinting faults then either the task is 1952 * completely idle or all activity is areas that are not of interest 1953 * to automatic numa balancing. Related to that, if there were failed 1954 * migration then it implies we are migrating too quickly or the local 1955 * node is overloaded. In either case, scan slower 1956 */ 1957 if (local + shared == 0 || p->numa_faults_locality[2]) { 1958 p->numa_scan_period = min(p->numa_scan_period_max, 1959 p->numa_scan_period << 1); 1960 1961 p->mm->numa_next_scan = jiffies + 1962 msecs_to_jiffies(p->numa_scan_period); 1963 1964 return; 1965 } 1966 1967 /* 1968 * Prepare to scale scan period relative to the current period. 1969 * == NUMA_PERIOD_THRESHOLD scan period stays the same 1970 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 1971 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 1972 */ 1973 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 1974 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 1975 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); 1976 1977 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { 1978 /* 1979 * Most memory accesses are local. There is no need to 1980 * do fast NUMA scanning, since memory is already local. 1981 */ 1982 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; 1983 if (!slot) 1984 slot = 1; 1985 diff = slot * period_slot; 1986 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { 1987 /* 1988 * Most memory accesses are shared with other tasks. 1989 * There is no point in continuing fast NUMA scanning, 1990 * since other tasks may just move the memory elsewhere. 1991 */ 1992 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; 1993 if (!slot) 1994 slot = 1; 1995 diff = slot * period_slot; 1996 } else { 1997 /* 1998 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, 1999 * yet they are not on the local NUMA node. Speed up 2000 * NUMA scanning to get the memory moved over. 2001 */ 2002 int ratio = max(lr_ratio, ps_ratio); 2003 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 2004 } 2005 2006 p->numa_scan_period = clamp(p->numa_scan_period + diff, 2007 task_scan_min(p), task_scan_max(p)); 2008 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2009 } 2010 2011 /* 2012 * Get the fraction of time the task has been running since the last 2013 * NUMA placement cycle. The scheduler keeps similar statistics, but 2014 * decays those on a 32ms period, which is orders of magnitude off 2015 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 2016 * stats only if the task is so new there are no NUMA statistics yet. 2017 */ 2018 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 2019 { 2020 u64 runtime, delta, now; 2021 /* Use the start of this time slice to avoid calculations. */ 2022 now = p->se.exec_start; 2023 runtime = p->se.sum_exec_runtime; 2024 2025 if (p->last_task_numa_placement) { 2026 delta = runtime - p->last_sum_exec_runtime; 2027 *period = now - p->last_task_numa_placement; 2028 } else { 2029 delta = p->se.avg.load_sum / p->se.load.weight; 2030 *period = LOAD_AVG_MAX; 2031 } 2032 2033 p->last_sum_exec_runtime = runtime; 2034 p->last_task_numa_placement = now; 2035 2036 return delta; 2037 } 2038 2039 /* 2040 * Determine the preferred nid for a task in a numa_group. This needs to 2041 * be done in a way that produces consistent results with group_weight, 2042 * otherwise workloads might not converge. 2043 */ 2044 static int preferred_group_nid(struct task_struct *p, int nid) 2045 { 2046 nodemask_t nodes; 2047 int dist; 2048 2049 /* Direct connections between all NUMA nodes. */ 2050 if (sched_numa_topology_type == NUMA_DIRECT) 2051 return nid; 2052 2053 /* 2054 * On a system with glueless mesh NUMA topology, group_weight 2055 * scores nodes according to the number of NUMA hinting faults on 2056 * both the node itself, and on nearby nodes. 2057 */ 2058 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 2059 unsigned long score, max_score = 0; 2060 int node, max_node = nid; 2061 2062 dist = sched_max_numa_distance; 2063 2064 for_each_online_node(node) { 2065 score = group_weight(p, node, dist); 2066 if (score > max_score) { 2067 max_score = score; 2068 max_node = node; 2069 } 2070 } 2071 return max_node; 2072 } 2073 2074 /* 2075 * Finding the preferred nid in a system with NUMA backplane 2076 * interconnect topology is more involved. The goal is to locate 2077 * tasks from numa_groups near each other in the system, and 2078 * untangle workloads from different sides of the system. This requires 2079 * searching down the hierarchy of node groups, recursively searching 2080 * inside the highest scoring group of nodes. The nodemask tricks 2081 * keep the complexity of the search down. 2082 */ 2083 nodes = node_online_map; 2084 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2085 unsigned long max_faults = 0; 2086 nodemask_t max_group = NODE_MASK_NONE; 2087 int a, b; 2088 2089 /* Are there nodes at this distance from each other? */ 2090 if (!find_numa_distance(dist)) 2091 continue; 2092 2093 for_each_node_mask(a, nodes) { 2094 unsigned long faults = 0; 2095 nodemask_t this_group; 2096 nodes_clear(this_group); 2097 2098 /* Sum group's NUMA faults; includes a==b case. */ 2099 for_each_node_mask(b, nodes) { 2100 if (node_distance(a, b) < dist) { 2101 faults += group_faults(p, b); 2102 node_set(b, this_group); 2103 node_clear(b, nodes); 2104 } 2105 } 2106 2107 /* Remember the top group. */ 2108 if (faults > max_faults) { 2109 max_faults = faults; 2110 max_group = this_group; 2111 /* 2112 * subtle: at the smallest distance there is 2113 * just one node left in each "group", the 2114 * winner is the preferred nid. 2115 */ 2116 nid = a; 2117 } 2118 } 2119 /* Next round, evaluate the nodes within max_group. */ 2120 if (!max_faults) 2121 break; 2122 nodes = max_group; 2123 } 2124 return nid; 2125 } 2126 2127 static void task_numa_placement(struct task_struct *p) 2128 { 2129 int seq, nid, max_nid = -1, max_group_nid = -1; 2130 unsigned long max_faults = 0, max_group_faults = 0; 2131 unsigned long fault_types[2] = { 0, 0 }; 2132 unsigned long total_faults; 2133 u64 runtime, period; 2134 spinlock_t *group_lock = NULL; 2135 2136 /* 2137 * The p->mm->numa_scan_seq field gets updated without 2138 * exclusive access. Use READ_ONCE() here to ensure 2139 * that the field is read in a single access: 2140 */ 2141 seq = READ_ONCE(p->mm->numa_scan_seq); 2142 if (p->numa_scan_seq == seq) 2143 return; 2144 p->numa_scan_seq = seq; 2145 p->numa_scan_period_max = task_scan_max(p); 2146 2147 total_faults = p->numa_faults_locality[0] + 2148 p->numa_faults_locality[1]; 2149 runtime = numa_get_avg_runtime(p, &period); 2150 2151 /* If the task is part of a group prevent parallel updates to group stats */ 2152 if (p->numa_group) { 2153 group_lock = &p->numa_group->lock; 2154 spin_lock_irq(group_lock); 2155 } 2156 2157 /* Find the node with the highest number of faults */ 2158 for_each_online_node(nid) { 2159 /* Keep track of the offsets in numa_faults array */ 2160 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2161 unsigned long faults = 0, group_faults = 0; 2162 int priv; 2163 2164 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2165 long diff, f_diff, f_weight; 2166 2167 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2168 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2169 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2170 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2171 2172 /* Decay existing window, copy faults since last scan */ 2173 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2174 fault_types[priv] += p->numa_faults[membuf_idx]; 2175 p->numa_faults[membuf_idx] = 0; 2176 2177 /* 2178 * Normalize the faults_from, so all tasks in a group 2179 * count according to CPU use, instead of by the raw 2180 * number of faults. Tasks with little runtime have 2181 * little over-all impact on throughput, and thus their 2182 * faults are less important. 2183 */ 2184 f_weight = div64_u64(runtime << 16, period + 1); 2185 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2186 (total_faults + 1); 2187 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2188 p->numa_faults[cpubuf_idx] = 0; 2189 2190 p->numa_faults[mem_idx] += diff; 2191 p->numa_faults[cpu_idx] += f_diff; 2192 faults += p->numa_faults[mem_idx]; 2193 p->total_numa_faults += diff; 2194 if (p->numa_group) { 2195 /* 2196 * safe because we can only change our own group 2197 * 2198 * mem_idx represents the offset for a given 2199 * nid and priv in a specific region because it 2200 * is at the beginning of the numa_faults array. 2201 */ 2202 p->numa_group->faults[mem_idx] += diff; 2203 p->numa_group->faults_cpu[mem_idx] += f_diff; 2204 p->numa_group->total_faults += diff; 2205 group_faults += p->numa_group->faults[mem_idx]; 2206 } 2207 } 2208 2209 if (faults > max_faults) { 2210 max_faults = faults; 2211 max_nid = nid; 2212 } 2213 2214 if (group_faults > max_group_faults) { 2215 max_group_faults = group_faults; 2216 max_group_nid = nid; 2217 } 2218 } 2219 2220 update_task_scan_period(p, fault_types[0], fault_types[1]); 2221 2222 if (p->numa_group) { 2223 numa_group_count_active_nodes(p->numa_group); 2224 spin_unlock_irq(group_lock); 2225 max_nid = preferred_group_nid(p, max_group_nid); 2226 } 2227 2228 if (max_faults) { 2229 /* Set the new preferred node */ 2230 if (max_nid != p->numa_preferred_nid) 2231 sched_setnuma(p, max_nid); 2232 2233 if (task_node(p) != p->numa_preferred_nid) 2234 numa_migrate_preferred(p); 2235 } 2236 } 2237 2238 static inline int get_numa_group(struct numa_group *grp) 2239 { 2240 return atomic_inc_not_zero(&grp->refcount); 2241 } 2242 2243 static inline void put_numa_group(struct numa_group *grp) 2244 { 2245 if (atomic_dec_and_test(&grp->refcount)) 2246 kfree_rcu(grp, rcu); 2247 } 2248 2249 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2250 int *priv) 2251 { 2252 struct numa_group *grp, *my_grp; 2253 struct task_struct *tsk; 2254 bool join = false; 2255 int cpu = cpupid_to_cpu(cpupid); 2256 int i; 2257 2258 if (unlikely(!p->numa_group)) { 2259 unsigned int size = sizeof(struct numa_group) + 2260 4*nr_node_ids*sizeof(unsigned long); 2261 2262 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2263 if (!grp) 2264 return; 2265 2266 atomic_set(&grp->refcount, 1); 2267 grp->active_nodes = 1; 2268 grp->max_faults_cpu = 0; 2269 spin_lock_init(&grp->lock); 2270 grp->gid = p->pid; 2271 /* Second half of the array tracks nids where faults happen */ 2272 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * 2273 nr_node_ids; 2274 2275 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2276 grp->faults[i] = p->numa_faults[i]; 2277 2278 grp->total_faults = p->total_numa_faults; 2279 2280 grp->nr_tasks++; 2281 rcu_assign_pointer(p->numa_group, grp); 2282 } 2283 2284 rcu_read_lock(); 2285 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2286 2287 if (!cpupid_match_pid(tsk, cpupid)) 2288 goto no_join; 2289 2290 grp = rcu_dereference(tsk->numa_group); 2291 if (!grp) 2292 goto no_join; 2293 2294 my_grp = p->numa_group; 2295 if (grp == my_grp) 2296 goto no_join; 2297 2298 /* 2299 * Only join the other group if its bigger; if we're the bigger group, 2300 * the other task will join us. 2301 */ 2302 if (my_grp->nr_tasks > grp->nr_tasks) 2303 goto no_join; 2304 2305 /* 2306 * Tie-break on the grp address. 2307 */ 2308 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2309 goto no_join; 2310 2311 /* Always join threads in the same process. */ 2312 if (tsk->mm == current->mm) 2313 join = true; 2314 2315 /* Simple filter to avoid false positives due to PID collisions */ 2316 if (flags & TNF_SHARED) 2317 join = true; 2318 2319 /* Update priv based on whether false sharing was detected */ 2320 *priv = !join; 2321 2322 if (join && !get_numa_group(grp)) 2323 goto no_join; 2324 2325 rcu_read_unlock(); 2326 2327 if (!join) 2328 return; 2329 2330 BUG_ON(irqs_disabled()); 2331 double_lock_irq(&my_grp->lock, &grp->lock); 2332 2333 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2334 my_grp->faults[i] -= p->numa_faults[i]; 2335 grp->faults[i] += p->numa_faults[i]; 2336 } 2337 my_grp->total_faults -= p->total_numa_faults; 2338 grp->total_faults += p->total_numa_faults; 2339 2340 my_grp->nr_tasks--; 2341 grp->nr_tasks++; 2342 2343 spin_unlock(&my_grp->lock); 2344 spin_unlock_irq(&grp->lock); 2345 2346 rcu_assign_pointer(p->numa_group, grp); 2347 2348 put_numa_group(my_grp); 2349 return; 2350 2351 no_join: 2352 rcu_read_unlock(); 2353 return; 2354 } 2355 2356 void task_numa_free(struct task_struct *p) 2357 { 2358 struct numa_group *grp = p->numa_group; 2359 void *numa_faults = p->numa_faults; 2360 unsigned long flags; 2361 int i; 2362 2363 if (grp) { 2364 spin_lock_irqsave(&grp->lock, flags); 2365 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2366 grp->faults[i] -= p->numa_faults[i]; 2367 grp->total_faults -= p->total_numa_faults; 2368 2369 grp->nr_tasks--; 2370 spin_unlock_irqrestore(&grp->lock, flags); 2371 RCU_INIT_POINTER(p->numa_group, NULL); 2372 put_numa_group(grp); 2373 } 2374 2375 p->numa_faults = NULL; 2376 kfree(numa_faults); 2377 } 2378 2379 /* 2380 * Got a PROT_NONE fault for a page on @node. 2381 */ 2382 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2383 { 2384 struct task_struct *p = current; 2385 bool migrated = flags & TNF_MIGRATED; 2386 int cpu_node = task_node(current); 2387 int local = !!(flags & TNF_FAULT_LOCAL); 2388 struct numa_group *ng; 2389 int priv; 2390 2391 if (!static_branch_likely(&sched_numa_balancing)) 2392 return; 2393 2394 /* for example, ksmd faulting in a user's mm */ 2395 if (!p->mm) 2396 return; 2397 2398 /* Allocate buffer to track faults on a per-node basis */ 2399 if (unlikely(!p->numa_faults)) { 2400 int size = sizeof(*p->numa_faults) * 2401 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2402 2403 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2404 if (!p->numa_faults) 2405 return; 2406 2407 p->total_numa_faults = 0; 2408 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2409 } 2410 2411 /* 2412 * First accesses are treated as private, otherwise consider accesses 2413 * to be private if the accessing pid has not changed 2414 */ 2415 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2416 priv = 1; 2417 } else { 2418 priv = cpupid_match_pid(p, last_cpupid); 2419 if (!priv && !(flags & TNF_NO_GROUP)) 2420 task_numa_group(p, last_cpupid, flags, &priv); 2421 } 2422 2423 /* 2424 * If a workload spans multiple NUMA nodes, a shared fault that 2425 * occurs wholly within the set of nodes that the workload is 2426 * actively using should be counted as local. This allows the 2427 * scan rate to slow down when a workload has settled down. 2428 */ 2429 ng = p->numa_group; 2430 if (!priv && !local && ng && ng->active_nodes > 1 && 2431 numa_is_active_node(cpu_node, ng) && 2432 numa_is_active_node(mem_node, ng)) 2433 local = 1; 2434 2435 task_numa_placement(p); 2436 2437 /* 2438 * Retry task to preferred node migration periodically, in case it 2439 * case it previously failed, or the scheduler moved us. 2440 */ 2441 if (time_after(jiffies, p->numa_migrate_retry)) 2442 numa_migrate_preferred(p); 2443 2444 if (migrated) 2445 p->numa_pages_migrated += pages; 2446 if (flags & TNF_MIGRATE_FAIL) 2447 p->numa_faults_locality[2] += pages; 2448 2449 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2450 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2451 p->numa_faults_locality[local] += pages; 2452 } 2453 2454 static void reset_ptenuma_scan(struct task_struct *p) 2455 { 2456 /* 2457 * We only did a read acquisition of the mmap sem, so 2458 * p->mm->numa_scan_seq is written to without exclusive access 2459 * and the update is not guaranteed to be atomic. That's not 2460 * much of an issue though, since this is just used for 2461 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2462 * expensive, to avoid any form of compiler optimizations: 2463 */ 2464 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2465 p->mm->numa_scan_offset = 0; 2466 } 2467 2468 /* 2469 * The expensive part of numa migration is done from task_work context. 2470 * Triggered from task_tick_numa(). 2471 */ 2472 void task_numa_work(struct callback_head *work) 2473 { 2474 unsigned long migrate, next_scan, now = jiffies; 2475 struct task_struct *p = current; 2476 struct mm_struct *mm = p->mm; 2477 u64 runtime = p->se.sum_exec_runtime; 2478 struct vm_area_struct *vma; 2479 unsigned long start, end; 2480 unsigned long nr_pte_updates = 0; 2481 long pages, virtpages; 2482 2483 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2484 2485 work->next = work; /* protect against double add */ 2486 /* 2487 * Who cares about NUMA placement when they're dying. 2488 * 2489 * NOTE: make sure not to dereference p->mm before this check, 2490 * exit_task_work() happens _after_ exit_mm() so we could be called 2491 * without p->mm even though we still had it when we enqueued this 2492 * work. 2493 */ 2494 if (p->flags & PF_EXITING) 2495 return; 2496 2497 if (!mm->numa_next_scan) { 2498 mm->numa_next_scan = now + 2499 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2500 } 2501 2502 /* 2503 * Enforce maximal scan/migration frequency.. 2504 */ 2505 migrate = mm->numa_next_scan; 2506 if (time_before(now, migrate)) 2507 return; 2508 2509 if (p->numa_scan_period == 0) { 2510 p->numa_scan_period_max = task_scan_max(p); 2511 p->numa_scan_period = task_scan_start(p); 2512 } 2513 2514 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2515 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2516 return; 2517 2518 /* 2519 * Delay this task enough that another task of this mm will likely win 2520 * the next time around. 2521 */ 2522 p->node_stamp += 2 * TICK_NSEC; 2523 2524 start = mm->numa_scan_offset; 2525 pages = sysctl_numa_balancing_scan_size; 2526 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2527 virtpages = pages * 8; /* Scan up to this much virtual space */ 2528 if (!pages) 2529 return; 2530 2531 2532 if (!down_read_trylock(&mm->mmap_sem)) 2533 return; 2534 vma = find_vma(mm, start); 2535 if (!vma) { 2536 reset_ptenuma_scan(p); 2537 start = 0; 2538 vma = mm->mmap; 2539 } 2540 for (; vma; vma = vma->vm_next) { 2541 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2542 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2543 continue; 2544 } 2545 2546 /* 2547 * Shared library pages mapped by multiple processes are not 2548 * migrated as it is expected they are cache replicated. Avoid 2549 * hinting faults in read-only file-backed mappings or the vdso 2550 * as migrating the pages will be of marginal benefit. 2551 */ 2552 if (!vma->vm_mm || 2553 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 2554 continue; 2555 2556 /* 2557 * Skip inaccessible VMAs to avoid any confusion between 2558 * PROT_NONE and NUMA hinting ptes 2559 */ 2560 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 2561 continue; 2562 2563 do { 2564 start = max(start, vma->vm_start); 2565 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2566 end = min(end, vma->vm_end); 2567 nr_pte_updates = change_prot_numa(vma, start, end); 2568 2569 /* 2570 * Try to scan sysctl_numa_balancing_size worth of 2571 * hpages that have at least one present PTE that 2572 * is not already pte-numa. If the VMA contains 2573 * areas that are unused or already full of prot_numa 2574 * PTEs, scan up to virtpages, to skip through those 2575 * areas faster. 2576 */ 2577 if (nr_pte_updates) 2578 pages -= (end - start) >> PAGE_SHIFT; 2579 virtpages -= (end - start) >> PAGE_SHIFT; 2580 2581 start = end; 2582 if (pages <= 0 || virtpages <= 0) 2583 goto out; 2584 2585 cond_resched(); 2586 } while (end != vma->vm_end); 2587 } 2588 2589 out: 2590 /* 2591 * It is possible to reach the end of the VMA list but the last few 2592 * VMAs are not guaranteed to the vma_migratable. If they are not, we 2593 * would find the !migratable VMA on the next scan but not reset the 2594 * scanner to the start so check it now. 2595 */ 2596 if (vma) 2597 mm->numa_scan_offset = start; 2598 else 2599 reset_ptenuma_scan(p); 2600 up_read(&mm->mmap_sem); 2601 2602 /* 2603 * Make sure tasks use at least 32x as much time to run other code 2604 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 2605 * Usually update_task_scan_period slows down scanning enough; on an 2606 * overloaded system we need to limit overhead on a per task basis. 2607 */ 2608 if (unlikely(p->se.sum_exec_runtime != runtime)) { 2609 u64 diff = p->se.sum_exec_runtime - runtime; 2610 p->node_stamp += 32 * diff; 2611 } 2612 } 2613 2614 /* 2615 * Drive the periodic memory faults.. 2616 */ 2617 void task_tick_numa(struct rq *rq, struct task_struct *curr) 2618 { 2619 struct callback_head *work = &curr->numa_work; 2620 u64 period, now; 2621 2622 /* 2623 * We don't care about NUMA placement if we don't have memory. 2624 */ 2625 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) 2626 return; 2627 2628 /* 2629 * Using runtime rather than walltime has the dual advantage that 2630 * we (mostly) drive the selection from busy threads and that the 2631 * task needs to have done some actual work before we bother with 2632 * NUMA placement. 2633 */ 2634 now = curr->se.sum_exec_runtime; 2635 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 2636 2637 if (now > curr->node_stamp + period) { 2638 if (!curr->node_stamp) 2639 curr->numa_scan_period = task_scan_start(curr); 2640 curr->node_stamp += period; 2641 2642 if (!time_before(jiffies, curr->mm->numa_next_scan)) { 2643 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ 2644 task_work_add(curr, work, true); 2645 } 2646 } 2647 } 2648 2649 #else 2650 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2651 { 2652 } 2653 2654 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 2655 { 2656 } 2657 2658 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 2659 { 2660 } 2661 2662 #endif /* CONFIG_NUMA_BALANCING */ 2663 2664 static void 2665 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2666 { 2667 update_load_add(&cfs_rq->load, se->load.weight); 2668 if (!parent_entity(se)) 2669 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 2670 #ifdef CONFIG_SMP 2671 if (entity_is_task(se)) { 2672 struct rq *rq = rq_of(cfs_rq); 2673 2674 account_numa_enqueue(rq, task_of(se)); 2675 list_add(&se->group_node, &rq->cfs_tasks); 2676 } 2677 #endif 2678 cfs_rq->nr_running++; 2679 } 2680 2681 static void 2682 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2683 { 2684 update_load_sub(&cfs_rq->load, se->load.weight); 2685 if (!parent_entity(se)) 2686 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); 2687 #ifdef CONFIG_SMP 2688 if (entity_is_task(se)) { 2689 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 2690 list_del_init(&se->group_node); 2691 } 2692 #endif 2693 cfs_rq->nr_running--; 2694 } 2695 2696 #ifdef CONFIG_FAIR_GROUP_SCHED 2697 # ifdef CONFIG_SMP 2698 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 2699 { 2700 long tg_weight, load, shares; 2701 2702 /* 2703 * This really should be: cfs_rq->avg.load_avg, but instead we use 2704 * cfs_rq->load.weight, which is its upper bound. This helps ramp up 2705 * the shares for small weight interactive tasks. 2706 */ 2707 load = scale_load_down(cfs_rq->load.weight); 2708 2709 tg_weight = atomic_long_read(&tg->load_avg); 2710 2711 /* Ensure tg_weight >= load */ 2712 tg_weight -= cfs_rq->tg_load_avg_contrib; 2713 tg_weight += load; 2714 2715 shares = (tg->shares * load); 2716 if (tg_weight) 2717 shares /= tg_weight; 2718 2719 /* 2720 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 2721 * of a group with small tg->shares value. It is a floor value which is 2722 * assigned as a minimum load.weight to the sched_entity representing 2723 * the group on a CPU. 2724 * 2725 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 2726 * on an 8-core system with 8 tasks each runnable on one CPU shares has 2727 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 2728 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 2729 * instead of 0. 2730 */ 2731 if (shares < MIN_SHARES) 2732 shares = MIN_SHARES; 2733 if (shares > tg->shares) 2734 shares = tg->shares; 2735 2736 return shares; 2737 } 2738 # else /* CONFIG_SMP */ 2739 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 2740 { 2741 return tg->shares; 2742 } 2743 # endif /* CONFIG_SMP */ 2744 2745 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2746 unsigned long weight) 2747 { 2748 if (se->on_rq) { 2749 /* commit outstanding execution time */ 2750 if (cfs_rq->curr == se) 2751 update_curr(cfs_rq); 2752 account_entity_dequeue(cfs_rq, se); 2753 } 2754 2755 update_load_set(&se->load, weight); 2756 2757 if (se->on_rq) 2758 account_entity_enqueue(cfs_rq, se); 2759 } 2760 2761 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 2762 2763 static void update_cfs_shares(struct sched_entity *se) 2764 { 2765 struct cfs_rq *cfs_rq = group_cfs_rq(se); 2766 struct task_group *tg; 2767 long shares; 2768 2769 if (!cfs_rq) 2770 return; 2771 2772 if (throttled_hierarchy(cfs_rq)) 2773 return; 2774 2775 tg = cfs_rq->tg; 2776 2777 #ifndef CONFIG_SMP 2778 if (likely(se->load.weight == tg->shares)) 2779 return; 2780 #endif 2781 shares = calc_cfs_shares(cfs_rq, tg); 2782 2783 reweight_entity(cfs_rq_of(se), se, shares); 2784 } 2785 2786 #else /* CONFIG_FAIR_GROUP_SCHED */ 2787 static inline void update_cfs_shares(struct sched_entity *se) 2788 { 2789 } 2790 #endif /* CONFIG_FAIR_GROUP_SCHED */ 2791 2792 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) 2793 { 2794 struct rq *rq = rq_of(cfs_rq); 2795 2796 if (&rq->cfs == cfs_rq) { 2797 /* 2798 * There are a few boundary cases this might miss but it should 2799 * get called often enough that that should (hopefully) not be 2800 * a real problem -- added to that it only calls on the local 2801 * CPU, so if we enqueue remotely we'll miss an update, but 2802 * the next tick/schedule should update. 2803 * 2804 * It will not get called when we go idle, because the idle 2805 * thread is a different class (!fair), nor will the utilization 2806 * number include things like RT tasks. 2807 * 2808 * As is, the util number is not freq-invariant (we'd have to 2809 * implement arch_scale_freq_capacity() for that). 2810 * 2811 * See cpu_util(). 2812 */ 2813 cpufreq_update_util(rq, 0); 2814 } 2815 } 2816 2817 #ifdef CONFIG_SMP 2818 /* 2819 * Approximate: 2820 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) 2821 */ 2822 static u64 decay_load(u64 val, u64 n) 2823 { 2824 unsigned int local_n; 2825 2826 if (unlikely(n > LOAD_AVG_PERIOD * 63)) 2827 return 0; 2828 2829 /* after bounds checking we can collapse to 32-bit */ 2830 local_n = n; 2831 2832 /* 2833 * As y^PERIOD = 1/2, we can combine 2834 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) 2835 * With a look-up table which covers y^n (n<PERIOD) 2836 * 2837 * To achieve constant time decay_load. 2838 */ 2839 if (unlikely(local_n >= LOAD_AVG_PERIOD)) { 2840 val >>= local_n / LOAD_AVG_PERIOD; 2841 local_n %= LOAD_AVG_PERIOD; 2842 } 2843 2844 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32); 2845 return val; 2846 } 2847 2848 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) 2849 { 2850 u32 c1, c2, c3 = d3; /* y^0 == 1 */ 2851 2852 /* 2853 * c1 = d1 y^p 2854 */ 2855 c1 = decay_load((u64)d1, periods); 2856 2857 /* 2858 * p-1 2859 * c2 = 1024 \Sum y^n 2860 * n=1 2861 * 2862 * inf inf 2863 * = 1024 ( \Sum y^n - \Sum y^n - y^0 ) 2864 * n=0 n=p 2865 */ 2866 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024; 2867 2868 return c1 + c2 + c3; 2869 } 2870 2871 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 2872 2873 /* 2874 * Accumulate the three separate parts of the sum; d1 the remainder 2875 * of the last (incomplete) period, d2 the span of full periods and d3 2876 * the remainder of the (incomplete) current period. 2877 * 2878 * d1 d2 d3 2879 * ^ ^ ^ 2880 * | | | 2881 * |<->|<----------------->|<--->| 2882 * ... |---x---|------| ... |------|-----x (now) 2883 * 2884 * p-1 2885 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0 2886 * n=1 2887 * 2888 * = u y^p + (Step 1) 2889 * 2890 * p-1 2891 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2) 2892 * n=1 2893 */ 2894 static __always_inline u32 2895 accumulate_sum(u64 delta, int cpu, struct sched_avg *sa, 2896 unsigned long weight, int running, struct cfs_rq *cfs_rq) 2897 { 2898 unsigned long scale_freq, scale_cpu; 2899 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ 2900 u64 periods; 2901 2902 scale_freq = arch_scale_freq_capacity(NULL, cpu); 2903 scale_cpu = arch_scale_cpu_capacity(NULL, cpu); 2904 2905 delta += sa->period_contrib; 2906 periods = delta / 1024; /* A period is 1024us (~1ms) */ 2907 2908 /* 2909 * Step 1: decay old *_sum if we crossed period boundaries. 2910 */ 2911 if (periods) { 2912 sa->load_sum = decay_load(sa->load_sum, periods); 2913 if (cfs_rq) { 2914 cfs_rq->runnable_load_sum = 2915 decay_load(cfs_rq->runnable_load_sum, periods); 2916 } 2917 sa->util_sum = decay_load((u64)(sa->util_sum), periods); 2918 2919 /* 2920 * Step 2 2921 */ 2922 delta %= 1024; 2923 contrib = __accumulate_pelt_segments(periods, 2924 1024 - sa->period_contrib, delta); 2925 } 2926 sa->period_contrib = delta; 2927 2928 contrib = cap_scale(contrib, scale_freq); 2929 if (weight) { 2930 sa->load_sum += weight * contrib; 2931 if (cfs_rq) 2932 cfs_rq->runnable_load_sum += weight * contrib; 2933 } 2934 if (running) 2935 sa->util_sum += contrib * scale_cpu; 2936 2937 return periods; 2938 } 2939 2940 /* 2941 * We can represent the historical contribution to runnable average as the 2942 * coefficients of a geometric series. To do this we sub-divide our runnable 2943 * history into segments of approximately 1ms (1024us); label the segment that 2944 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. 2945 * 2946 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... 2947 * p0 p1 p2 2948 * (now) (~1ms ago) (~2ms ago) 2949 * 2950 * Let u_i denote the fraction of p_i that the entity was runnable. 2951 * 2952 * We then designate the fractions u_i as our co-efficients, yielding the 2953 * following representation of historical load: 2954 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... 2955 * 2956 * We choose y based on the with of a reasonably scheduling period, fixing: 2957 * y^32 = 0.5 2958 * 2959 * This means that the contribution to load ~32ms ago (u_32) will be weighted 2960 * approximately half as much as the contribution to load within the last ms 2961 * (u_0). 2962 * 2963 * When a period "rolls over" and we have new u_0`, multiplying the previous 2964 * sum again by y is sufficient to update: 2965 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) 2966 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] 2967 */ 2968 static __always_inline int 2969 ___update_load_avg(u64 now, int cpu, struct sched_avg *sa, 2970 unsigned long weight, int running, struct cfs_rq *cfs_rq) 2971 { 2972 u64 delta; 2973 2974 delta = now - sa->last_update_time; 2975 /* 2976 * This should only happen when time goes backwards, which it 2977 * unfortunately does during sched clock init when we swap over to TSC. 2978 */ 2979 if ((s64)delta < 0) { 2980 sa->last_update_time = now; 2981 return 0; 2982 } 2983 2984 /* 2985 * Use 1024ns as the unit of measurement since it's a reasonable 2986 * approximation of 1us and fast to compute. 2987 */ 2988 delta >>= 10; 2989 if (!delta) 2990 return 0; 2991 2992 sa->last_update_time += delta << 10; 2993 2994 /* 2995 * running is a subset of runnable (weight) so running can't be set if 2996 * runnable is clear. But there are some corner cases where the current 2997 * se has been already dequeued but cfs_rq->curr still points to it. 2998 * This means that weight will be 0 but not running for a sched_entity 2999 * but also for a cfs_rq if the latter becomes idle. As an example, 3000 * this happens during idle_balance() which calls 3001 * update_blocked_averages() 3002 */ 3003 if (!weight) 3004 running = 0; 3005 3006 /* 3007 * Now we know we crossed measurement unit boundaries. The *_avg 3008 * accrues by two steps: 3009 * 3010 * Step 1: accumulate *_sum since last_update_time. If we haven't 3011 * crossed period boundaries, finish. 3012 */ 3013 if (!accumulate_sum(delta, cpu, sa, weight, running, cfs_rq)) 3014 return 0; 3015 3016 /* 3017 * Step 2: update *_avg. 3018 */ 3019 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib); 3020 if (cfs_rq) { 3021 cfs_rq->runnable_load_avg = 3022 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib); 3023 } 3024 sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib); 3025 3026 return 1; 3027 } 3028 3029 static int 3030 __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se) 3031 { 3032 return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL); 3033 } 3034 3035 static int 3036 __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se) 3037 { 3038 return ___update_load_avg(now, cpu, &se->avg, 3039 se->on_rq * scale_load_down(se->load.weight), 3040 cfs_rq->curr == se, NULL); 3041 } 3042 3043 static int 3044 __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) 3045 { 3046 return ___update_load_avg(now, cpu, &cfs_rq->avg, 3047 scale_load_down(cfs_rq->load.weight), 3048 cfs_rq->curr != NULL, cfs_rq); 3049 } 3050 3051 /* 3052 * Signed add and clamp on underflow. 3053 * 3054 * Explicitly do a load-store to ensure the intermediate value never hits 3055 * memory. This allows lockless observations without ever seeing the negative 3056 * values. 3057 */ 3058 #define add_positive(_ptr, _val) do { \ 3059 typeof(_ptr) ptr = (_ptr); \ 3060 typeof(_val) val = (_val); \ 3061 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3062 \ 3063 res = var + val; \ 3064 \ 3065 if (val < 0 && res > var) \ 3066 res = 0; \ 3067 \ 3068 WRITE_ONCE(*ptr, res); \ 3069 } while (0) 3070 3071 #ifdef CONFIG_FAIR_GROUP_SCHED 3072 /** 3073 * update_tg_load_avg - update the tg's load avg 3074 * @cfs_rq: the cfs_rq whose avg changed 3075 * @force: update regardless of how small the difference 3076 * 3077 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3078 * However, because tg->load_avg is a global value there are performance 3079 * considerations. 3080 * 3081 * In order to avoid having to look at the other cfs_rq's, we use a 3082 * differential update where we store the last value we propagated. This in 3083 * turn allows skipping updates if the differential is 'small'. 3084 * 3085 * Updating tg's load_avg is necessary before update_cfs_share(). 3086 */ 3087 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 3088 { 3089 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3090 3091 /* 3092 * No need to update load_avg for root_task_group as it is not used. 3093 */ 3094 if (cfs_rq->tg == &root_task_group) 3095 return; 3096 3097 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3098 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3099 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3100 } 3101 } 3102 3103 /* 3104 * Called within set_task_rq() right before setting a task's cpu. The 3105 * caller only guarantees p->pi_lock is held; no other assumptions, 3106 * including the state of rq->lock, should be made. 3107 */ 3108 void set_task_rq_fair(struct sched_entity *se, 3109 struct cfs_rq *prev, struct cfs_rq *next) 3110 { 3111 u64 p_last_update_time; 3112 u64 n_last_update_time; 3113 3114 if (!sched_feat(ATTACH_AGE_LOAD)) 3115 return; 3116 3117 /* 3118 * We are supposed to update the task to "current" time, then its up to 3119 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3120 * getting what current time is, so simply throw away the out-of-date 3121 * time. This will result in the wakee task is less decayed, but giving 3122 * the wakee more load sounds not bad. 3123 */ 3124 if (!(se->avg.last_update_time && prev)) 3125 return; 3126 3127 #ifndef CONFIG_64BIT 3128 { 3129 u64 p_last_update_time_copy; 3130 u64 n_last_update_time_copy; 3131 3132 do { 3133 p_last_update_time_copy = prev->load_last_update_time_copy; 3134 n_last_update_time_copy = next->load_last_update_time_copy; 3135 3136 smp_rmb(); 3137 3138 p_last_update_time = prev->avg.last_update_time; 3139 n_last_update_time = next->avg.last_update_time; 3140 3141 } while (p_last_update_time != p_last_update_time_copy || 3142 n_last_update_time != n_last_update_time_copy); 3143 } 3144 #else 3145 p_last_update_time = prev->avg.last_update_time; 3146 n_last_update_time = next->avg.last_update_time; 3147 #endif 3148 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se); 3149 se->avg.last_update_time = n_last_update_time; 3150 } 3151 3152 /* Take into account change of utilization of a child task group */ 3153 static inline void 3154 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se) 3155 { 3156 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3157 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; 3158 3159 /* Nothing to update */ 3160 if (!delta) 3161 return; 3162 3163 /* Set new sched_entity's utilization */ 3164 se->avg.util_avg = gcfs_rq->avg.util_avg; 3165 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; 3166 3167 /* Update parent cfs_rq utilization */ 3168 add_positive(&cfs_rq->avg.util_avg, delta); 3169 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; 3170 } 3171 3172 /* Take into account change of load of a child task group */ 3173 static inline void 3174 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se) 3175 { 3176 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3177 long delta, load = gcfs_rq->avg.load_avg; 3178 3179 /* 3180 * If the load of group cfs_rq is null, the load of the 3181 * sched_entity will also be null so we can skip the formula 3182 */ 3183 if (load) { 3184 long tg_load; 3185 3186 /* Get tg's load and ensure tg_load > 0 */ 3187 tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1; 3188 3189 /* Ensure tg_load >= load and updated with current load*/ 3190 tg_load -= gcfs_rq->tg_load_avg_contrib; 3191 tg_load += load; 3192 3193 /* 3194 * We need to compute a correction term in the case that the 3195 * task group is consuming more CPU than a task of equal 3196 * weight. A task with a weight equals to tg->shares will have 3197 * a load less or equal to scale_load_down(tg->shares). 3198 * Similarly, the sched_entities that represent the task group 3199 * at parent level, can't have a load higher than 3200 * scale_load_down(tg->shares). And the Sum of sched_entities' 3201 * load must be <= scale_load_down(tg->shares). 3202 */ 3203 if (tg_load > scale_load_down(gcfs_rq->tg->shares)) { 3204 /* scale gcfs_rq's load into tg's shares*/ 3205 load *= scale_load_down(gcfs_rq->tg->shares); 3206 load /= tg_load; 3207 } 3208 } 3209 3210 delta = load - se->avg.load_avg; 3211 3212 /* Nothing to update */ 3213 if (!delta) 3214 return; 3215 3216 /* Set new sched_entity's load */ 3217 se->avg.load_avg = load; 3218 se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX; 3219 3220 /* Update parent cfs_rq load */ 3221 add_positive(&cfs_rq->avg.load_avg, delta); 3222 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX; 3223 3224 /* 3225 * If the sched_entity is already enqueued, we also have to update the 3226 * runnable load avg. 3227 */ 3228 if (se->on_rq) { 3229 /* Update parent cfs_rq runnable_load_avg */ 3230 add_positive(&cfs_rq->runnable_load_avg, delta); 3231 cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX; 3232 } 3233 } 3234 3235 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) 3236 { 3237 cfs_rq->propagate_avg = 1; 3238 } 3239 3240 static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se) 3241 { 3242 struct cfs_rq *cfs_rq = group_cfs_rq(se); 3243 3244 if (!cfs_rq->propagate_avg) 3245 return 0; 3246 3247 cfs_rq->propagate_avg = 0; 3248 return 1; 3249 } 3250 3251 /* Update task and its cfs_rq load average */ 3252 static inline int propagate_entity_load_avg(struct sched_entity *se) 3253 { 3254 struct cfs_rq *cfs_rq; 3255 3256 if (entity_is_task(se)) 3257 return 0; 3258 3259 if (!test_and_clear_tg_cfs_propagate(se)) 3260 return 0; 3261 3262 cfs_rq = cfs_rq_of(se); 3263 3264 set_tg_cfs_propagate(cfs_rq); 3265 3266 update_tg_cfs_util(cfs_rq, se); 3267 update_tg_cfs_load(cfs_rq, se); 3268 3269 return 1; 3270 } 3271 3272 /* 3273 * Check if we need to update the load and the utilization of a blocked 3274 * group_entity: 3275 */ 3276 static inline bool skip_blocked_update(struct sched_entity *se) 3277 { 3278 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3279 3280 /* 3281 * If sched_entity still have not zero load or utilization, we have to 3282 * decay it: 3283 */ 3284 if (se->avg.load_avg || se->avg.util_avg) 3285 return false; 3286 3287 /* 3288 * If there is a pending propagation, we have to update the load and 3289 * the utilization of the sched_entity: 3290 */ 3291 if (gcfs_rq->propagate_avg) 3292 return false; 3293 3294 /* 3295 * Otherwise, the load and the utilization of the sched_entity is 3296 * already zero and there is no pending propagation, so it will be a 3297 * waste of time to try to decay it: 3298 */ 3299 return true; 3300 } 3301 3302 #else /* CONFIG_FAIR_GROUP_SCHED */ 3303 3304 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} 3305 3306 static inline int propagate_entity_load_avg(struct sched_entity *se) 3307 { 3308 return 0; 3309 } 3310 3311 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {} 3312 3313 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3314 3315 /* 3316 * Unsigned subtract and clamp on underflow. 3317 * 3318 * Explicitly do a load-store to ensure the intermediate value never hits 3319 * memory. This allows lockless observations without ever seeing the negative 3320 * values. 3321 */ 3322 #define sub_positive(_ptr, _val) do { \ 3323 typeof(_ptr) ptr = (_ptr); \ 3324 typeof(*ptr) val = (_val); \ 3325 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3326 res = var - val; \ 3327 if (res > var) \ 3328 res = 0; \ 3329 WRITE_ONCE(*ptr, res); \ 3330 } while (0) 3331 3332 /** 3333 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 3334 * @now: current time, as per cfs_rq_clock_task() 3335 * @cfs_rq: cfs_rq to update 3336 * 3337 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 3338 * avg. The immediate corollary is that all (fair) tasks must be attached, see 3339 * post_init_entity_util_avg(). 3340 * 3341 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 3342 * 3343 * Returns true if the load decayed or we removed load. 3344 * 3345 * Since both these conditions indicate a changed cfs_rq->avg.load we should 3346 * call update_tg_load_avg() when this function returns true. 3347 */ 3348 static inline int 3349 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 3350 { 3351 struct sched_avg *sa = &cfs_rq->avg; 3352 int decayed, removed_load = 0, removed_util = 0; 3353 3354 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 3355 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 3356 sub_positive(&sa->load_avg, r); 3357 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX); 3358 removed_load = 1; 3359 set_tg_cfs_propagate(cfs_rq); 3360 } 3361 3362 if (atomic_long_read(&cfs_rq->removed_util_avg)) { 3363 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); 3364 sub_positive(&sa->util_avg, r); 3365 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); 3366 removed_util = 1; 3367 set_tg_cfs_propagate(cfs_rq); 3368 } 3369 3370 decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq); 3371 3372 #ifndef CONFIG_64BIT 3373 smp_wmb(); 3374 cfs_rq->load_last_update_time_copy = sa->last_update_time; 3375 #endif 3376 3377 if (decayed || removed_util) 3378 cfs_rq_util_change(cfs_rq); 3379 3380 return decayed || removed_load; 3381 } 3382 3383 /* 3384 * Optional action to be done while updating the load average 3385 */ 3386 #define UPDATE_TG 0x1 3387 #define SKIP_AGE_LOAD 0x2 3388 3389 /* Update task and its cfs_rq load average */ 3390 static inline void update_load_avg(struct sched_entity *se, int flags) 3391 { 3392 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3393 u64 now = cfs_rq_clock_task(cfs_rq); 3394 struct rq *rq = rq_of(cfs_rq); 3395 int cpu = cpu_of(rq); 3396 int decayed; 3397 3398 /* 3399 * Track task load average for carrying it to new CPU after migrated, and 3400 * track group sched_entity load average for task_h_load calc in migration 3401 */ 3402 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 3403 __update_load_avg_se(now, cpu, cfs_rq, se); 3404 3405 decayed = update_cfs_rq_load_avg(now, cfs_rq); 3406 decayed |= propagate_entity_load_avg(se); 3407 3408 if (decayed && (flags & UPDATE_TG)) 3409 update_tg_load_avg(cfs_rq, 0); 3410 } 3411 3412 /** 3413 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3414 * @cfs_rq: cfs_rq to attach to 3415 * @se: sched_entity to attach 3416 * 3417 * Must call update_cfs_rq_load_avg() before this, since we rely on 3418 * cfs_rq->avg.last_update_time being current. 3419 */ 3420 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3421 { 3422 se->avg.last_update_time = cfs_rq->avg.last_update_time; 3423 cfs_rq->avg.load_avg += se->avg.load_avg; 3424 cfs_rq->avg.load_sum += se->avg.load_sum; 3425 cfs_rq->avg.util_avg += se->avg.util_avg; 3426 cfs_rq->avg.util_sum += se->avg.util_sum; 3427 set_tg_cfs_propagate(cfs_rq); 3428 3429 cfs_rq_util_change(cfs_rq); 3430 } 3431 3432 /** 3433 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 3434 * @cfs_rq: cfs_rq to detach from 3435 * @se: sched_entity to detach 3436 * 3437 * Must call update_cfs_rq_load_avg() before this, since we rely on 3438 * cfs_rq->avg.last_update_time being current. 3439 */ 3440 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3441 { 3442 3443 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 3444 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); 3445 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 3446 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); 3447 set_tg_cfs_propagate(cfs_rq); 3448 3449 cfs_rq_util_change(cfs_rq); 3450 } 3451 3452 /* Add the load generated by se into cfs_rq's load average */ 3453 static inline void 3454 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3455 { 3456 struct sched_avg *sa = &se->avg; 3457 3458 cfs_rq->runnable_load_avg += sa->load_avg; 3459 cfs_rq->runnable_load_sum += sa->load_sum; 3460 3461 if (!sa->last_update_time) { 3462 attach_entity_load_avg(cfs_rq, se); 3463 update_tg_load_avg(cfs_rq, 0); 3464 } 3465 } 3466 3467 /* Remove the runnable load generated by se from cfs_rq's runnable load average */ 3468 static inline void 3469 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3470 { 3471 cfs_rq->runnable_load_avg = 3472 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); 3473 cfs_rq->runnable_load_sum = 3474 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); 3475 } 3476 3477 #ifndef CONFIG_64BIT 3478 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3479 { 3480 u64 last_update_time_copy; 3481 u64 last_update_time; 3482 3483 do { 3484 last_update_time_copy = cfs_rq->load_last_update_time_copy; 3485 smp_rmb(); 3486 last_update_time = cfs_rq->avg.last_update_time; 3487 } while (last_update_time != last_update_time_copy); 3488 3489 return last_update_time; 3490 } 3491 #else 3492 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3493 { 3494 return cfs_rq->avg.last_update_time; 3495 } 3496 #endif 3497 3498 /* 3499 * Synchronize entity load avg of dequeued entity without locking 3500 * the previous rq. 3501 */ 3502 void sync_entity_load_avg(struct sched_entity *se) 3503 { 3504 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3505 u64 last_update_time; 3506 3507 last_update_time = cfs_rq_last_update_time(cfs_rq); 3508 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se); 3509 } 3510 3511 /* 3512 * Task first catches up with cfs_rq, and then subtract 3513 * itself from the cfs_rq (task must be off the queue now). 3514 */ 3515 void remove_entity_load_avg(struct sched_entity *se) 3516 { 3517 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3518 3519 /* 3520 * tasks cannot exit without having gone through wake_up_new_task() -> 3521 * post_init_entity_util_avg() which will have added things to the 3522 * cfs_rq, so we can remove unconditionally. 3523 * 3524 * Similarly for groups, they will have passed through 3525 * post_init_entity_util_avg() before unregister_sched_fair_group() 3526 * calls this. 3527 */ 3528 3529 sync_entity_load_avg(se); 3530 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); 3531 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); 3532 } 3533 3534 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) 3535 { 3536 return cfs_rq->runnable_load_avg; 3537 } 3538 3539 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 3540 { 3541 return cfs_rq->avg.load_avg; 3542 } 3543 3544 static int idle_balance(struct rq *this_rq, struct rq_flags *rf); 3545 3546 #else /* CONFIG_SMP */ 3547 3548 static inline int 3549 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 3550 { 3551 return 0; 3552 } 3553 3554 #define UPDATE_TG 0x0 3555 #define SKIP_AGE_LOAD 0x0 3556 3557 static inline void update_load_avg(struct sched_entity *se, int not_used1) 3558 { 3559 cfs_rq_util_change(cfs_rq_of(se)); 3560 } 3561 3562 static inline void 3563 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3564 static inline void 3565 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3566 static inline void remove_entity_load_avg(struct sched_entity *se) {} 3567 3568 static inline void 3569 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3570 static inline void 3571 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3572 3573 static inline int idle_balance(struct rq *rq, struct rq_flags *rf) 3574 { 3575 return 0; 3576 } 3577 3578 #endif /* CONFIG_SMP */ 3579 3580 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 3581 { 3582 #ifdef CONFIG_SCHED_DEBUG 3583 s64 d = se->vruntime - cfs_rq->min_vruntime; 3584 3585 if (d < 0) 3586 d = -d; 3587 3588 if (d > 3*sysctl_sched_latency) 3589 schedstat_inc(cfs_rq->nr_spread_over); 3590 #endif 3591 } 3592 3593 static void 3594 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 3595 { 3596 u64 vruntime = cfs_rq->min_vruntime; 3597 3598 /* 3599 * The 'current' period is already promised to the current tasks, 3600 * however the extra weight of the new task will slow them down a 3601 * little, place the new task so that it fits in the slot that 3602 * stays open at the end. 3603 */ 3604 if (initial && sched_feat(START_DEBIT)) 3605 vruntime += sched_vslice(cfs_rq, se); 3606 3607 /* sleeps up to a single latency don't count. */ 3608 if (!initial) { 3609 unsigned long thresh = sysctl_sched_latency; 3610 3611 /* 3612 * Halve their sleep time's effect, to allow 3613 * for a gentler effect of sleepers: 3614 */ 3615 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 3616 thresh >>= 1; 3617 3618 vruntime -= thresh; 3619 } 3620 3621 /* ensure we never gain time by being placed backwards. */ 3622 se->vruntime = max_vruntime(se->vruntime, vruntime); 3623 } 3624 3625 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 3626 3627 static inline void check_schedstat_required(void) 3628 { 3629 #ifdef CONFIG_SCHEDSTATS 3630 if (schedstat_enabled()) 3631 return; 3632 3633 /* Force schedstat enabled if a dependent tracepoint is active */ 3634 if (trace_sched_stat_wait_enabled() || 3635 trace_sched_stat_sleep_enabled() || 3636 trace_sched_stat_iowait_enabled() || 3637 trace_sched_stat_blocked_enabled() || 3638 trace_sched_stat_runtime_enabled()) { 3639 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3640 "stat_blocked and stat_runtime require the " 3641 "kernel parameter schedstats=enable or " 3642 "kernel.sched_schedstats=1\n"); 3643 } 3644 #endif 3645 } 3646 3647 3648 /* 3649 * MIGRATION 3650 * 3651 * dequeue 3652 * update_curr() 3653 * update_min_vruntime() 3654 * vruntime -= min_vruntime 3655 * 3656 * enqueue 3657 * update_curr() 3658 * update_min_vruntime() 3659 * vruntime += min_vruntime 3660 * 3661 * this way the vruntime transition between RQs is done when both 3662 * min_vruntime are up-to-date. 3663 * 3664 * WAKEUP (remote) 3665 * 3666 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 3667 * vruntime -= min_vruntime 3668 * 3669 * enqueue 3670 * update_curr() 3671 * update_min_vruntime() 3672 * vruntime += min_vruntime 3673 * 3674 * this way we don't have the most up-to-date min_vruntime on the originating 3675 * CPU and an up-to-date min_vruntime on the destination CPU. 3676 */ 3677 3678 static void 3679 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3680 { 3681 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 3682 bool curr = cfs_rq->curr == se; 3683 3684 /* 3685 * If we're the current task, we must renormalise before calling 3686 * update_curr(). 3687 */ 3688 if (renorm && curr) 3689 se->vruntime += cfs_rq->min_vruntime; 3690 3691 update_curr(cfs_rq); 3692 3693 /* 3694 * Otherwise, renormalise after, such that we're placed at the current 3695 * moment in time, instead of some random moment in the past. Being 3696 * placed in the past could significantly boost this task to the 3697 * fairness detriment of existing tasks. 3698 */ 3699 if (renorm && !curr) 3700 se->vruntime += cfs_rq->min_vruntime; 3701 3702 /* 3703 * When enqueuing a sched_entity, we must: 3704 * - Update loads to have both entity and cfs_rq synced with now. 3705 * - Add its load to cfs_rq->runnable_avg 3706 * - For group_entity, update its weight to reflect the new share of 3707 * its group cfs_rq 3708 * - Add its new weight to cfs_rq->load.weight 3709 */ 3710 update_load_avg(se, UPDATE_TG); 3711 enqueue_entity_load_avg(cfs_rq, se); 3712 update_cfs_shares(se); 3713 account_entity_enqueue(cfs_rq, se); 3714 3715 if (flags & ENQUEUE_WAKEUP) 3716 place_entity(cfs_rq, se, 0); 3717 3718 check_schedstat_required(); 3719 update_stats_enqueue(cfs_rq, se, flags); 3720 check_spread(cfs_rq, se); 3721 if (!curr) 3722 __enqueue_entity(cfs_rq, se); 3723 se->on_rq = 1; 3724 3725 if (cfs_rq->nr_running == 1) { 3726 list_add_leaf_cfs_rq(cfs_rq); 3727 check_enqueue_throttle(cfs_rq); 3728 } 3729 } 3730 3731 static void __clear_buddies_last(struct sched_entity *se) 3732 { 3733 for_each_sched_entity(se) { 3734 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3735 if (cfs_rq->last != se) 3736 break; 3737 3738 cfs_rq->last = NULL; 3739 } 3740 } 3741 3742 static void __clear_buddies_next(struct sched_entity *se) 3743 { 3744 for_each_sched_entity(se) { 3745 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3746 if (cfs_rq->next != se) 3747 break; 3748 3749 cfs_rq->next = NULL; 3750 } 3751 } 3752 3753 static void __clear_buddies_skip(struct sched_entity *se) 3754 { 3755 for_each_sched_entity(se) { 3756 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3757 if (cfs_rq->skip != se) 3758 break; 3759 3760 cfs_rq->skip = NULL; 3761 } 3762 } 3763 3764 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 3765 { 3766 if (cfs_rq->last == se) 3767 __clear_buddies_last(se); 3768 3769 if (cfs_rq->next == se) 3770 __clear_buddies_next(se); 3771 3772 if (cfs_rq->skip == se) 3773 __clear_buddies_skip(se); 3774 } 3775 3776 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3777 3778 static void 3779 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3780 { 3781 /* 3782 * Update run-time statistics of the 'current'. 3783 */ 3784 update_curr(cfs_rq); 3785 3786 /* 3787 * When dequeuing a sched_entity, we must: 3788 * - Update loads to have both entity and cfs_rq synced with now. 3789 * - Substract its load from the cfs_rq->runnable_avg. 3790 * - Substract its previous weight from cfs_rq->load.weight. 3791 * - For group entity, update its weight to reflect the new share 3792 * of its group cfs_rq. 3793 */ 3794 update_load_avg(se, UPDATE_TG); 3795 dequeue_entity_load_avg(cfs_rq, se); 3796 3797 update_stats_dequeue(cfs_rq, se, flags); 3798 3799 clear_buddies(cfs_rq, se); 3800 3801 if (se != cfs_rq->curr) 3802 __dequeue_entity(cfs_rq, se); 3803 se->on_rq = 0; 3804 account_entity_dequeue(cfs_rq, se); 3805 3806 /* 3807 * Normalize after update_curr(); which will also have moved 3808 * min_vruntime if @se is the one holding it back. But before doing 3809 * update_min_vruntime() again, which will discount @se's position and 3810 * can move min_vruntime forward still more. 3811 */ 3812 if (!(flags & DEQUEUE_SLEEP)) 3813 se->vruntime -= cfs_rq->min_vruntime; 3814 3815 /* return excess runtime on last dequeue */ 3816 return_cfs_rq_runtime(cfs_rq); 3817 3818 update_cfs_shares(se); 3819 3820 /* 3821 * Now advance min_vruntime if @se was the entity holding it back, 3822 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 3823 * put back on, and if we advance min_vruntime, we'll be placed back 3824 * further than we started -- ie. we'll be penalized. 3825 */ 3826 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 3827 update_min_vruntime(cfs_rq); 3828 } 3829 3830 /* 3831 * Preempt the current task with a newly woken task if needed: 3832 */ 3833 static void 3834 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 3835 { 3836 unsigned long ideal_runtime, delta_exec; 3837 struct sched_entity *se; 3838 s64 delta; 3839 3840 ideal_runtime = sched_slice(cfs_rq, curr); 3841 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 3842 if (delta_exec > ideal_runtime) { 3843 resched_curr(rq_of(cfs_rq)); 3844 /* 3845 * The current task ran long enough, ensure it doesn't get 3846 * re-elected due to buddy favours. 3847 */ 3848 clear_buddies(cfs_rq, curr); 3849 return; 3850 } 3851 3852 /* 3853 * Ensure that a task that missed wakeup preemption by a 3854 * narrow margin doesn't have to wait for a full slice. 3855 * This also mitigates buddy induced latencies under load. 3856 */ 3857 if (delta_exec < sysctl_sched_min_granularity) 3858 return; 3859 3860 se = __pick_first_entity(cfs_rq); 3861 delta = curr->vruntime - se->vruntime; 3862 3863 if (delta < 0) 3864 return; 3865 3866 if (delta > ideal_runtime) 3867 resched_curr(rq_of(cfs_rq)); 3868 } 3869 3870 static void 3871 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 3872 { 3873 /* 'current' is not kept within the tree. */ 3874 if (se->on_rq) { 3875 /* 3876 * Any task has to be enqueued before it get to execute on 3877 * a CPU. So account for the time it spent waiting on the 3878 * runqueue. 3879 */ 3880 update_stats_wait_end(cfs_rq, se); 3881 __dequeue_entity(cfs_rq, se); 3882 update_load_avg(se, UPDATE_TG); 3883 } 3884 3885 update_stats_curr_start(cfs_rq, se); 3886 cfs_rq->curr = se; 3887 3888 /* 3889 * Track our maximum slice length, if the CPU's load is at 3890 * least twice that of our own weight (i.e. dont track it 3891 * when there are only lesser-weight tasks around): 3892 */ 3893 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { 3894 schedstat_set(se->statistics.slice_max, 3895 max((u64)schedstat_val(se->statistics.slice_max), 3896 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 3897 } 3898 3899 se->prev_sum_exec_runtime = se->sum_exec_runtime; 3900 } 3901 3902 static int 3903 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 3904 3905 /* 3906 * Pick the next process, keeping these things in mind, in this order: 3907 * 1) keep things fair between processes/task groups 3908 * 2) pick the "next" process, since someone really wants that to run 3909 * 3) pick the "last" process, for cache locality 3910 * 4) do not run the "skip" process, if something else is available 3911 */ 3912 static struct sched_entity * 3913 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 3914 { 3915 struct sched_entity *left = __pick_first_entity(cfs_rq); 3916 struct sched_entity *se; 3917 3918 /* 3919 * If curr is set we have to see if its left of the leftmost entity 3920 * still in the tree, provided there was anything in the tree at all. 3921 */ 3922 if (!left || (curr && entity_before(curr, left))) 3923 left = curr; 3924 3925 se = left; /* ideally we run the leftmost entity */ 3926 3927 /* 3928 * Avoid running the skip buddy, if running something else can 3929 * be done without getting too unfair. 3930 */ 3931 if (cfs_rq->skip == se) { 3932 struct sched_entity *second; 3933 3934 if (se == curr) { 3935 second = __pick_first_entity(cfs_rq); 3936 } else { 3937 second = __pick_next_entity(se); 3938 if (!second || (curr && entity_before(curr, second))) 3939 second = curr; 3940 } 3941 3942 if (second && wakeup_preempt_entity(second, left) < 1) 3943 se = second; 3944 } 3945 3946 /* 3947 * Prefer last buddy, try to return the CPU to a preempted task. 3948 */ 3949 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) 3950 se = cfs_rq->last; 3951 3952 /* 3953 * Someone really wants this to run. If it's not unfair, run it. 3954 */ 3955 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) 3956 se = cfs_rq->next; 3957 3958 clear_buddies(cfs_rq, se); 3959 3960 return se; 3961 } 3962 3963 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3964 3965 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 3966 { 3967 /* 3968 * If still on the runqueue then deactivate_task() 3969 * was not called and update_curr() has to be done: 3970 */ 3971 if (prev->on_rq) 3972 update_curr(cfs_rq); 3973 3974 /* throttle cfs_rqs exceeding runtime */ 3975 check_cfs_rq_runtime(cfs_rq); 3976 3977 check_spread(cfs_rq, prev); 3978 3979 if (prev->on_rq) { 3980 update_stats_wait_start(cfs_rq, prev); 3981 /* Put 'current' back into the tree. */ 3982 __enqueue_entity(cfs_rq, prev); 3983 /* in !on_rq case, update occurred at dequeue */ 3984 update_load_avg(prev, 0); 3985 } 3986 cfs_rq->curr = NULL; 3987 } 3988 3989 static void 3990 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 3991 { 3992 /* 3993 * Update run-time statistics of the 'current'. 3994 */ 3995 update_curr(cfs_rq); 3996 3997 /* 3998 * Ensure that runnable average is periodically updated. 3999 */ 4000 update_load_avg(curr, UPDATE_TG); 4001 update_cfs_shares(curr); 4002 4003 #ifdef CONFIG_SCHED_HRTICK 4004 /* 4005 * queued ticks are scheduled to match the slice, so don't bother 4006 * validating it and just reschedule. 4007 */ 4008 if (queued) { 4009 resched_curr(rq_of(cfs_rq)); 4010 return; 4011 } 4012 /* 4013 * don't let the period tick interfere with the hrtick preemption 4014 */ 4015 if (!sched_feat(DOUBLE_TICK) && 4016 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4017 return; 4018 #endif 4019 4020 if (cfs_rq->nr_running > 1) 4021 check_preempt_tick(cfs_rq, curr); 4022 } 4023 4024 4025 /************************************************** 4026 * CFS bandwidth control machinery 4027 */ 4028 4029 #ifdef CONFIG_CFS_BANDWIDTH 4030 4031 #ifdef HAVE_JUMP_LABEL 4032 static struct static_key __cfs_bandwidth_used; 4033 4034 static inline bool cfs_bandwidth_used(void) 4035 { 4036 return static_key_false(&__cfs_bandwidth_used); 4037 } 4038 4039 void cfs_bandwidth_usage_inc(void) 4040 { 4041 static_key_slow_inc(&__cfs_bandwidth_used); 4042 } 4043 4044 void cfs_bandwidth_usage_dec(void) 4045 { 4046 static_key_slow_dec(&__cfs_bandwidth_used); 4047 } 4048 #else /* HAVE_JUMP_LABEL */ 4049 static bool cfs_bandwidth_used(void) 4050 { 4051 return true; 4052 } 4053 4054 void cfs_bandwidth_usage_inc(void) {} 4055 void cfs_bandwidth_usage_dec(void) {} 4056 #endif /* HAVE_JUMP_LABEL */ 4057 4058 /* 4059 * default period for cfs group bandwidth. 4060 * default: 0.1s, units: nanoseconds 4061 */ 4062 static inline u64 default_cfs_period(void) 4063 { 4064 return 100000000ULL; 4065 } 4066 4067 static inline u64 sched_cfs_bandwidth_slice(void) 4068 { 4069 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4070 } 4071 4072 /* 4073 * Replenish runtime according to assigned quota and update expiration time. 4074 * We use sched_clock_cpu directly instead of rq->clock to avoid adding 4075 * additional synchronization around rq->lock. 4076 * 4077 * requires cfs_b->lock 4078 */ 4079 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4080 { 4081 u64 now; 4082 4083 if (cfs_b->quota == RUNTIME_INF) 4084 return; 4085 4086 now = sched_clock_cpu(smp_processor_id()); 4087 cfs_b->runtime = cfs_b->quota; 4088 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); 4089 } 4090 4091 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4092 { 4093 return &tg->cfs_bandwidth; 4094 } 4095 4096 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ 4097 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 4098 { 4099 if (unlikely(cfs_rq->throttle_count)) 4100 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; 4101 4102 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; 4103 } 4104 4105 /* returns 0 on failure to allocate runtime */ 4106 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4107 { 4108 struct task_group *tg = cfs_rq->tg; 4109 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); 4110 u64 amount = 0, min_amount, expires; 4111 4112 /* note: this is a positive sum as runtime_remaining <= 0 */ 4113 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; 4114 4115 raw_spin_lock(&cfs_b->lock); 4116 if (cfs_b->quota == RUNTIME_INF) 4117 amount = min_amount; 4118 else { 4119 start_cfs_bandwidth(cfs_b); 4120 4121 if (cfs_b->runtime > 0) { 4122 amount = min(cfs_b->runtime, min_amount); 4123 cfs_b->runtime -= amount; 4124 cfs_b->idle = 0; 4125 } 4126 } 4127 expires = cfs_b->runtime_expires; 4128 raw_spin_unlock(&cfs_b->lock); 4129 4130 cfs_rq->runtime_remaining += amount; 4131 /* 4132 * we may have advanced our local expiration to account for allowed 4133 * spread between our sched_clock and the one on which runtime was 4134 * issued. 4135 */ 4136 if ((s64)(expires - cfs_rq->runtime_expires) > 0) 4137 cfs_rq->runtime_expires = expires; 4138 4139 return cfs_rq->runtime_remaining > 0; 4140 } 4141 4142 /* 4143 * Note: This depends on the synchronization provided by sched_clock and the 4144 * fact that rq->clock snapshots this value. 4145 */ 4146 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4147 { 4148 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4149 4150 /* if the deadline is ahead of our clock, nothing to do */ 4151 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) 4152 return; 4153 4154 if (cfs_rq->runtime_remaining < 0) 4155 return; 4156 4157 /* 4158 * If the local deadline has passed we have to consider the 4159 * possibility that our sched_clock is 'fast' and the global deadline 4160 * has not truly expired. 4161 * 4162 * Fortunately we can check determine whether this the case by checking 4163 * whether the global deadline has advanced. It is valid to compare 4164 * cfs_b->runtime_expires without any locks since we only care about 4165 * exact equality, so a partial write will still work. 4166 */ 4167 4168 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { 4169 /* extend local deadline, drift is bounded above by 2 ticks */ 4170 cfs_rq->runtime_expires += TICK_NSEC; 4171 } else { 4172 /* global deadline is ahead, expiration has passed */ 4173 cfs_rq->runtime_remaining = 0; 4174 } 4175 } 4176 4177 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4178 { 4179 /* dock delta_exec before expiring quota (as it could span periods) */ 4180 cfs_rq->runtime_remaining -= delta_exec; 4181 expire_cfs_rq_runtime(cfs_rq); 4182 4183 if (likely(cfs_rq->runtime_remaining > 0)) 4184 return; 4185 4186 /* 4187 * if we're unable to extend our runtime we resched so that the active 4188 * hierarchy can be throttled 4189 */ 4190 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 4191 resched_curr(rq_of(cfs_rq)); 4192 } 4193 4194 static __always_inline 4195 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4196 { 4197 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 4198 return; 4199 4200 __account_cfs_rq_runtime(cfs_rq, delta_exec); 4201 } 4202 4203 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4204 { 4205 return cfs_bandwidth_used() && cfs_rq->throttled; 4206 } 4207 4208 /* check whether cfs_rq, or any parent, is throttled */ 4209 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4210 { 4211 return cfs_bandwidth_used() && cfs_rq->throttle_count; 4212 } 4213 4214 /* 4215 * Ensure that neither of the group entities corresponding to src_cpu or 4216 * dest_cpu are members of a throttled hierarchy when performing group 4217 * load-balance operations. 4218 */ 4219 static inline int throttled_lb_pair(struct task_group *tg, 4220 int src_cpu, int dest_cpu) 4221 { 4222 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 4223 4224 src_cfs_rq = tg->cfs_rq[src_cpu]; 4225 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 4226 4227 return throttled_hierarchy(src_cfs_rq) || 4228 throttled_hierarchy(dest_cfs_rq); 4229 } 4230 4231 /* updated child weight may affect parent so we have to do this bottom up */ 4232 static int tg_unthrottle_up(struct task_group *tg, void *data) 4233 { 4234 struct rq *rq = data; 4235 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4236 4237 cfs_rq->throttle_count--; 4238 if (!cfs_rq->throttle_count) { 4239 /* adjust cfs_rq_clock_task() */ 4240 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4241 cfs_rq->throttled_clock_task; 4242 } 4243 4244 return 0; 4245 } 4246 4247 static int tg_throttle_down(struct task_group *tg, void *data) 4248 { 4249 struct rq *rq = data; 4250 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4251 4252 /* group is entering throttled state, stop time */ 4253 if (!cfs_rq->throttle_count) 4254 cfs_rq->throttled_clock_task = rq_clock_task(rq); 4255 cfs_rq->throttle_count++; 4256 4257 return 0; 4258 } 4259 4260 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) 4261 { 4262 struct rq *rq = rq_of(cfs_rq); 4263 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4264 struct sched_entity *se; 4265 long task_delta, dequeue = 1; 4266 bool empty; 4267 4268 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 4269 4270 /* freeze hierarchy runnable averages while throttled */ 4271 rcu_read_lock(); 4272 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 4273 rcu_read_unlock(); 4274 4275 task_delta = cfs_rq->h_nr_running; 4276 for_each_sched_entity(se) { 4277 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4278 /* throttled entity or throttle-on-deactivate */ 4279 if (!se->on_rq) 4280 break; 4281 4282 if (dequeue) 4283 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 4284 qcfs_rq->h_nr_running -= task_delta; 4285 4286 if (qcfs_rq->load.weight) 4287 dequeue = 0; 4288 } 4289 4290 if (!se) 4291 sub_nr_running(rq, task_delta); 4292 4293 cfs_rq->throttled = 1; 4294 cfs_rq->throttled_clock = rq_clock(rq); 4295 raw_spin_lock(&cfs_b->lock); 4296 empty = list_empty(&cfs_b->throttled_cfs_rq); 4297 4298 /* 4299 * Add to the _head_ of the list, so that an already-started 4300 * distribute_cfs_runtime will not see us 4301 */ 4302 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4303 4304 /* 4305 * If we're the first throttled task, make sure the bandwidth 4306 * timer is running. 4307 */ 4308 if (empty) 4309 start_cfs_bandwidth(cfs_b); 4310 4311 raw_spin_unlock(&cfs_b->lock); 4312 } 4313 4314 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 4315 { 4316 struct rq *rq = rq_of(cfs_rq); 4317 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4318 struct sched_entity *se; 4319 int enqueue = 1; 4320 long task_delta; 4321 4322 se = cfs_rq->tg->se[cpu_of(rq)]; 4323 4324 cfs_rq->throttled = 0; 4325 4326 update_rq_clock(rq); 4327 4328 raw_spin_lock(&cfs_b->lock); 4329 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 4330 list_del_rcu(&cfs_rq->throttled_list); 4331 raw_spin_unlock(&cfs_b->lock); 4332 4333 /* update hierarchical throttle state */ 4334 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4335 4336 if (!cfs_rq->load.weight) 4337 return; 4338 4339 task_delta = cfs_rq->h_nr_running; 4340 for_each_sched_entity(se) { 4341 if (se->on_rq) 4342 enqueue = 0; 4343 4344 cfs_rq = cfs_rq_of(se); 4345 if (enqueue) 4346 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); 4347 cfs_rq->h_nr_running += task_delta; 4348 4349 if (cfs_rq_throttled(cfs_rq)) 4350 break; 4351 } 4352 4353 if (!se) 4354 add_nr_running(rq, task_delta); 4355 4356 /* determine whether we need to wake up potentially idle cpu */ 4357 if (rq->curr == rq->idle && rq->cfs.nr_running) 4358 resched_curr(rq); 4359 } 4360 4361 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, 4362 u64 remaining, u64 expires) 4363 { 4364 struct cfs_rq *cfs_rq; 4365 u64 runtime; 4366 u64 starting_runtime = remaining; 4367 4368 rcu_read_lock(); 4369 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 4370 throttled_list) { 4371 struct rq *rq = rq_of(cfs_rq); 4372 struct rq_flags rf; 4373 4374 rq_lock(rq, &rf); 4375 if (!cfs_rq_throttled(cfs_rq)) 4376 goto next; 4377 4378 runtime = -cfs_rq->runtime_remaining + 1; 4379 if (runtime > remaining) 4380 runtime = remaining; 4381 remaining -= runtime; 4382 4383 cfs_rq->runtime_remaining += runtime; 4384 cfs_rq->runtime_expires = expires; 4385 4386 /* we check whether we're throttled above */ 4387 if (cfs_rq->runtime_remaining > 0) 4388 unthrottle_cfs_rq(cfs_rq); 4389 4390 next: 4391 rq_unlock(rq, &rf); 4392 4393 if (!remaining) 4394 break; 4395 } 4396 rcu_read_unlock(); 4397 4398 return starting_runtime - remaining; 4399 } 4400 4401 /* 4402 * Responsible for refilling a task_group's bandwidth and unthrottling its 4403 * cfs_rqs as appropriate. If there has been no activity within the last 4404 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 4405 * used to track this state. 4406 */ 4407 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) 4408 { 4409 u64 runtime, runtime_expires; 4410 int throttled; 4411 4412 /* no need to continue the timer with no bandwidth constraint */ 4413 if (cfs_b->quota == RUNTIME_INF) 4414 goto out_deactivate; 4415 4416 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4417 cfs_b->nr_periods += overrun; 4418 4419 /* 4420 * idle depends on !throttled (for the case of a large deficit), and if 4421 * we're going inactive then everything else can be deferred 4422 */ 4423 if (cfs_b->idle && !throttled) 4424 goto out_deactivate; 4425 4426 __refill_cfs_bandwidth_runtime(cfs_b); 4427 4428 if (!throttled) { 4429 /* mark as potentially idle for the upcoming period */ 4430 cfs_b->idle = 1; 4431 return 0; 4432 } 4433 4434 /* account preceding periods in which throttling occurred */ 4435 cfs_b->nr_throttled += overrun; 4436 4437 runtime_expires = cfs_b->runtime_expires; 4438 4439 /* 4440 * This check is repeated as we are holding onto the new bandwidth while 4441 * we unthrottle. This can potentially race with an unthrottled group 4442 * trying to acquire new bandwidth from the global pool. This can result 4443 * in us over-using our runtime if it is all used during this loop, but 4444 * only by limited amounts in that extreme case. 4445 */ 4446 while (throttled && cfs_b->runtime > 0) { 4447 runtime = cfs_b->runtime; 4448 raw_spin_unlock(&cfs_b->lock); 4449 /* we can't nest cfs_b->lock while distributing bandwidth */ 4450 runtime = distribute_cfs_runtime(cfs_b, runtime, 4451 runtime_expires); 4452 raw_spin_lock(&cfs_b->lock); 4453 4454 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4455 4456 cfs_b->runtime -= min(runtime, cfs_b->runtime); 4457 } 4458 4459 /* 4460 * While we are ensured activity in the period following an 4461 * unthrottle, this also covers the case in which the new bandwidth is 4462 * insufficient to cover the existing bandwidth deficit. (Forcing the 4463 * timer to remain active while there are any throttled entities.) 4464 */ 4465 cfs_b->idle = 0; 4466 4467 return 0; 4468 4469 out_deactivate: 4470 return 1; 4471 } 4472 4473 /* a cfs_rq won't donate quota below this amount */ 4474 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 4475 /* minimum remaining period time to redistribute slack quota */ 4476 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 4477 /* how long we wait to gather additional slack before distributing */ 4478 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 4479 4480 /* 4481 * Are we near the end of the current quota period? 4482 * 4483 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 4484 * hrtimer base being cleared by hrtimer_start. In the case of 4485 * migrate_hrtimers, base is never cleared, so we are fine. 4486 */ 4487 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 4488 { 4489 struct hrtimer *refresh_timer = &cfs_b->period_timer; 4490 u64 remaining; 4491 4492 /* if the call-back is running a quota refresh is already occurring */ 4493 if (hrtimer_callback_running(refresh_timer)) 4494 return 1; 4495 4496 /* is a quota refresh about to occur? */ 4497 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 4498 if (remaining < min_expire) 4499 return 1; 4500 4501 return 0; 4502 } 4503 4504 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 4505 { 4506 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 4507 4508 /* if there's a quota refresh soon don't bother with slack */ 4509 if (runtime_refresh_within(cfs_b, min_left)) 4510 return; 4511 4512 hrtimer_start(&cfs_b->slack_timer, 4513 ns_to_ktime(cfs_bandwidth_slack_period), 4514 HRTIMER_MODE_REL); 4515 } 4516 4517 /* we know any runtime found here is valid as update_curr() precedes return */ 4518 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4519 { 4520 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4521 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 4522 4523 if (slack_runtime <= 0) 4524 return; 4525 4526 raw_spin_lock(&cfs_b->lock); 4527 if (cfs_b->quota != RUNTIME_INF && 4528 cfs_rq->runtime_expires == cfs_b->runtime_expires) { 4529 cfs_b->runtime += slack_runtime; 4530 4531 /* we are under rq->lock, defer unthrottling using a timer */ 4532 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 4533 !list_empty(&cfs_b->throttled_cfs_rq)) 4534 start_cfs_slack_bandwidth(cfs_b); 4535 } 4536 raw_spin_unlock(&cfs_b->lock); 4537 4538 /* even if it's not valid for return we don't want to try again */ 4539 cfs_rq->runtime_remaining -= slack_runtime; 4540 } 4541 4542 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4543 { 4544 if (!cfs_bandwidth_used()) 4545 return; 4546 4547 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 4548 return; 4549 4550 __return_cfs_rq_runtime(cfs_rq); 4551 } 4552 4553 /* 4554 * This is done with a timer (instead of inline with bandwidth return) since 4555 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 4556 */ 4557 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 4558 { 4559 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 4560 u64 expires; 4561 4562 /* confirm we're still not at a refresh boundary */ 4563 raw_spin_lock(&cfs_b->lock); 4564 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 4565 raw_spin_unlock(&cfs_b->lock); 4566 return; 4567 } 4568 4569 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 4570 runtime = cfs_b->runtime; 4571 4572 expires = cfs_b->runtime_expires; 4573 raw_spin_unlock(&cfs_b->lock); 4574 4575 if (!runtime) 4576 return; 4577 4578 runtime = distribute_cfs_runtime(cfs_b, runtime, expires); 4579 4580 raw_spin_lock(&cfs_b->lock); 4581 if (expires == cfs_b->runtime_expires) 4582 cfs_b->runtime -= min(runtime, cfs_b->runtime); 4583 raw_spin_unlock(&cfs_b->lock); 4584 } 4585 4586 /* 4587 * When a group wakes up we want to make sure that its quota is not already 4588 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 4589 * runtime as update_curr() throttling can not not trigger until it's on-rq. 4590 */ 4591 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 4592 { 4593 if (!cfs_bandwidth_used()) 4594 return; 4595 4596 /* an active group must be handled by the update_curr()->put() path */ 4597 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 4598 return; 4599 4600 /* ensure the group is not already throttled */ 4601 if (cfs_rq_throttled(cfs_rq)) 4602 return; 4603 4604 /* update runtime allocation */ 4605 account_cfs_rq_runtime(cfs_rq, 0); 4606 if (cfs_rq->runtime_remaining <= 0) 4607 throttle_cfs_rq(cfs_rq); 4608 } 4609 4610 static void sync_throttle(struct task_group *tg, int cpu) 4611 { 4612 struct cfs_rq *pcfs_rq, *cfs_rq; 4613 4614 if (!cfs_bandwidth_used()) 4615 return; 4616 4617 if (!tg->parent) 4618 return; 4619 4620 cfs_rq = tg->cfs_rq[cpu]; 4621 pcfs_rq = tg->parent->cfs_rq[cpu]; 4622 4623 cfs_rq->throttle_count = pcfs_rq->throttle_count; 4624 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4625 } 4626 4627 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 4628 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4629 { 4630 if (!cfs_bandwidth_used()) 4631 return false; 4632 4633 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 4634 return false; 4635 4636 /* 4637 * it's possible for a throttled entity to be forced into a running 4638 * state (e.g. set_curr_task), in this case we're finished. 4639 */ 4640 if (cfs_rq_throttled(cfs_rq)) 4641 return true; 4642 4643 throttle_cfs_rq(cfs_rq); 4644 return true; 4645 } 4646 4647 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 4648 { 4649 struct cfs_bandwidth *cfs_b = 4650 container_of(timer, struct cfs_bandwidth, slack_timer); 4651 4652 do_sched_cfs_slack_timer(cfs_b); 4653 4654 return HRTIMER_NORESTART; 4655 } 4656 4657 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 4658 { 4659 struct cfs_bandwidth *cfs_b = 4660 container_of(timer, struct cfs_bandwidth, period_timer); 4661 int overrun; 4662 int idle = 0; 4663 4664 raw_spin_lock(&cfs_b->lock); 4665 for (;;) { 4666 overrun = hrtimer_forward_now(timer, cfs_b->period); 4667 if (!overrun) 4668 break; 4669 4670 idle = do_sched_cfs_period_timer(cfs_b, overrun); 4671 } 4672 if (idle) 4673 cfs_b->period_active = 0; 4674 raw_spin_unlock(&cfs_b->lock); 4675 4676 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 4677 } 4678 4679 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4680 { 4681 raw_spin_lock_init(&cfs_b->lock); 4682 cfs_b->runtime = 0; 4683 cfs_b->quota = RUNTIME_INF; 4684 cfs_b->period = ns_to_ktime(default_cfs_period()); 4685 4686 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 4687 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 4688 cfs_b->period_timer.function = sched_cfs_period_timer; 4689 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4690 cfs_b->slack_timer.function = sched_cfs_slack_timer; 4691 } 4692 4693 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4694 { 4695 cfs_rq->runtime_enabled = 0; 4696 INIT_LIST_HEAD(&cfs_rq->throttled_list); 4697 } 4698 4699 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4700 { 4701 lockdep_assert_held(&cfs_b->lock); 4702 4703 if (!cfs_b->period_active) { 4704 cfs_b->period_active = 1; 4705 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 4706 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 4707 } 4708 } 4709 4710 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4711 { 4712 /* init_cfs_bandwidth() was not called */ 4713 if (!cfs_b->throttled_cfs_rq.next) 4714 return; 4715 4716 hrtimer_cancel(&cfs_b->period_timer); 4717 hrtimer_cancel(&cfs_b->slack_timer); 4718 } 4719 4720 /* 4721 * Both these cpu hotplug callbacks race against unregister_fair_sched_group() 4722 * 4723 * The race is harmless, since modifying bandwidth settings of unhooked group 4724 * bits doesn't do much. 4725 */ 4726 4727 /* cpu online calback */ 4728 static void __maybe_unused update_runtime_enabled(struct rq *rq) 4729 { 4730 struct task_group *tg; 4731 4732 lockdep_assert_held(&rq->lock); 4733 4734 rcu_read_lock(); 4735 list_for_each_entry_rcu(tg, &task_groups, list) { 4736 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 4737 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4738 4739 raw_spin_lock(&cfs_b->lock); 4740 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 4741 raw_spin_unlock(&cfs_b->lock); 4742 } 4743 rcu_read_unlock(); 4744 } 4745 4746 /* cpu offline callback */ 4747 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 4748 { 4749 struct task_group *tg; 4750 4751 lockdep_assert_held(&rq->lock); 4752 4753 rcu_read_lock(); 4754 list_for_each_entry_rcu(tg, &task_groups, list) { 4755 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4756 4757 if (!cfs_rq->runtime_enabled) 4758 continue; 4759 4760 /* 4761 * clock_task is not advancing so we just need to make sure 4762 * there's some valid quota amount 4763 */ 4764 cfs_rq->runtime_remaining = 1; 4765 /* 4766 * Offline rq is schedulable till cpu is completely disabled 4767 * in take_cpu_down(), so we prevent new cfs throttling here. 4768 */ 4769 cfs_rq->runtime_enabled = 0; 4770 4771 if (cfs_rq_throttled(cfs_rq)) 4772 unthrottle_cfs_rq(cfs_rq); 4773 } 4774 rcu_read_unlock(); 4775 } 4776 4777 #else /* CONFIG_CFS_BANDWIDTH */ 4778 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 4779 { 4780 return rq_clock_task(rq_of(cfs_rq)); 4781 } 4782 4783 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 4784 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 4785 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 4786 static inline void sync_throttle(struct task_group *tg, int cpu) {} 4787 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 4788 4789 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4790 { 4791 return 0; 4792 } 4793 4794 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4795 { 4796 return 0; 4797 } 4798 4799 static inline int throttled_lb_pair(struct task_group *tg, 4800 int src_cpu, int dest_cpu) 4801 { 4802 return 0; 4803 } 4804 4805 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 4806 4807 #ifdef CONFIG_FAIR_GROUP_SCHED 4808 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 4809 #endif 4810 4811 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4812 { 4813 return NULL; 4814 } 4815 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 4816 static inline void update_runtime_enabled(struct rq *rq) {} 4817 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 4818 4819 #endif /* CONFIG_CFS_BANDWIDTH */ 4820 4821 /************************************************** 4822 * CFS operations on tasks: 4823 */ 4824 4825 #ifdef CONFIG_SCHED_HRTICK 4826 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 4827 { 4828 struct sched_entity *se = &p->se; 4829 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4830 4831 SCHED_WARN_ON(task_rq(p) != rq); 4832 4833 if (rq->cfs.h_nr_running > 1) { 4834 u64 slice = sched_slice(cfs_rq, se); 4835 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 4836 s64 delta = slice - ran; 4837 4838 if (delta < 0) { 4839 if (rq->curr == p) 4840 resched_curr(rq); 4841 return; 4842 } 4843 hrtick_start(rq, delta); 4844 } 4845 } 4846 4847 /* 4848 * called from enqueue/dequeue and updates the hrtick when the 4849 * current task is from our class and nr_running is low enough 4850 * to matter. 4851 */ 4852 static void hrtick_update(struct rq *rq) 4853 { 4854 struct task_struct *curr = rq->curr; 4855 4856 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) 4857 return; 4858 4859 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 4860 hrtick_start_fair(rq, curr); 4861 } 4862 #else /* !CONFIG_SCHED_HRTICK */ 4863 static inline void 4864 hrtick_start_fair(struct rq *rq, struct task_struct *p) 4865 { 4866 } 4867 4868 static inline void hrtick_update(struct rq *rq) 4869 { 4870 } 4871 #endif 4872 4873 /* 4874 * The enqueue_task method is called before nr_running is 4875 * increased. Here we update the fair scheduling stats and 4876 * then put the task into the rbtree: 4877 */ 4878 static void 4879 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 4880 { 4881 struct cfs_rq *cfs_rq; 4882 struct sched_entity *se = &p->se; 4883 4884 /* 4885 * If in_iowait is set, the code below may not trigger any cpufreq 4886 * utilization updates, so do it here explicitly with the IOWAIT flag 4887 * passed. 4888 */ 4889 if (p->in_iowait) 4890 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); 4891 4892 for_each_sched_entity(se) { 4893 if (se->on_rq) 4894 break; 4895 cfs_rq = cfs_rq_of(se); 4896 enqueue_entity(cfs_rq, se, flags); 4897 4898 /* 4899 * end evaluation on encountering a throttled cfs_rq 4900 * 4901 * note: in the case of encountering a throttled cfs_rq we will 4902 * post the final h_nr_running increment below. 4903 */ 4904 if (cfs_rq_throttled(cfs_rq)) 4905 break; 4906 cfs_rq->h_nr_running++; 4907 4908 flags = ENQUEUE_WAKEUP; 4909 } 4910 4911 for_each_sched_entity(se) { 4912 cfs_rq = cfs_rq_of(se); 4913 cfs_rq->h_nr_running++; 4914 4915 if (cfs_rq_throttled(cfs_rq)) 4916 break; 4917 4918 update_load_avg(se, UPDATE_TG); 4919 update_cfs_shares(se); 4920 } 4921 4922 if (!se) 4923 add_nr_running(rq, 1); 4924 4925 hrtick_update(rq); 4926 } 4927 4928 static void set_next_buddy(struct sched_entity *se); 4929 4930 /* 4931 * The dequeue_task method is called before nr_running is 4932 * decreased. We remove the task from the rbtree and 4933 * update the fair scheduling stats: 4934 */ 4935 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 4936 { 4937 struct cfs_rq *cfs_rq; 4938 struct sched_entity *se = &p->se; 4939 int task_sleep = flags & DEQUEUE_SLEEP; 4940 4941 for_each_sched_entity(se) { 4942 cfs_rq = cfs_rq_of(se); 4943 dequeue_entity(cfs_rq, se, flags); 4944 4945 /* 4946 * end evaluation on encountering a throttled cfs_rq 4947 * 4948 * note: in the case of encountering a throttled cfs_rq we will 4949 * post the final h_nr_running decrement below. 4950 */ 4951 if (cfs_rq_throttled(cfs_rq)) 4952 break; 4953 cfs_rq->h_nr_running--; 4954 4955 /* Don't dequeue parent if it has other entities besides us */ 4956 if (cfs_rq->load.weight) { 4957 /* Avoid re-evaluating load for this entity: */ 4958 se = parent_entity(se); 4959 /* 4960 * Bias pick_next to pick a task from this cfs_rq, as 4961 * p is sleeping when it is within its sched_slice. 4962 */ 4963 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 4964 set_next_buddy(se); 4965 break; 4966 } 4967 flags |= DEQUEUE_SLEEP; 4968 } 4969 4970 for_each_sched_entity(se) { 4971 cfs_rq = cfs_rq_of(se); 4972 cfs_rq->h_nr_running--; 4973 4974 if (cfs_rq_throttled(cfs_rq)) 4975 break; 4976 4977 update_load_avg(se, UPDATE_TG); 4978 update_cfs_shares(se); 4979 } 4980 4981 if (!se) 4982 sub_nr_running(rq, 1); 4983 4984 hrtick_update(rq); 4985 } 4986 4987 #ifdef CONFIG_SMP 4988 4989 /* Working cpumask for: load_balance, load_balance_newidle. */ 4990 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 4991 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); 4992 4993 #ifdef CONFIG_NO_HZ_COMMON 4994 /* 4995 * per rq 'load' arrray crap; XXX kill this. 4996 */ 4997 4998 /* 4999 * The exact cpuload calculated at every tick would be: 5000 * 5001 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load 5002 * 5003 * If a cpu misses updates for n ticks (as it was idle) and update gets 5004 * called on the n+1-th tick when cpu may be busy, then we have: 5005 * 5006 * load_n = (1 - 1/2^i)^n * load_0 5007 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load 5008 * 5009 * decay_load_missed() below does efficient calculation of 5010 * 5011 * load' = (1 - 1/2^i)^n * load 5012 * 5013 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors. 5014 * This allows us to precompute the above in said factors, thereby allowing the 5015 * reduction of an arbitrary n in O(log_2 n) steps. (See also 5016 * fixed_power_int()) 5017 * 5018 * The calculation is approximated on a 128 point scale. 5019 */ 5020 #define DEGRADE_SHIFT 7 5021 5022 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; 5023 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { 5024 { 0, 0, 0, 0, 0, 0, 0, 0 }, 5025 { 64, 32, 8, 0, 0, 0, 0, 0 }, 5026 { 96, 72, 40, 12, 1, 0, 0, 0 }, 5027 { 112, 98, 75, 43, 15, 1, 0, 0 }, 5028 { 120, 112, 98, 76, 45, 16, 2, 0 } 5029 }; 5030 5031 /* 5032 * Update cpu_load for any missed ticks, due to tickless idle. The backlog 5033 * would be when CPU is idle and so we just decay the old load without 5034 * adding any new load. 5035 */ 5036 static unsigned long 5037 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) 5038 { 5039 int j = 0; 5040 5041 if (!missed_updates) 5042 return load; 5043 5044 if (missed_updates >= degrade_zero_ticks[idx]) 5045 return 0; 5046 5047 if (idx == 1) 5048 return load >> missed_updates; 5049 5050 while (missed_updates) { 5051 if (missed_updates % 2) 5052 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; 5053 5054 missed_updates >>= 1; 5055 j++; 5056 } 5057 return load; 5058 } 5059 #endif /* CONFIG_NO_HZ_COMMON */ 5060 5061 /** 5062 * __cpu_load_update - update the rq->cpu_load[] statistics 5063 * @this_rq: The rq to update statistics for 5064 * @this_load: The current load 5065 * @pending_updates: The number of missed updates 5066 * 5067 * Update rq->cpu_load[] statistics. This function is usually called every 5068 * scheduler tick (TICK_NSEC). 5069 * 5070 * This function computes a decaying average: 5071 * 5072 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load 5073 * 5074 * Because of NOHZ it might not get called on every tick which gives need for 5075 * the @pending_updates argument. 5076 * 5077 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1 5078 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load 5079 * = A * (A * load[i]_n-2 + B) + B 5080 * = A * (A * (A * load[i]_n-3 + B) + B) + B 5081 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B 5082 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B 5083 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B 5084 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load 5085 * 5086 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as 5087 * any change in load would have resulted in the tick being turned back on. 5088 * 5089 * For regular NOHZ, this reduces to: 5090 * 5091 * load[i]_n = (1 - 1/2^i)^n * load[i]_0 5092 * 5093 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra 5094 * term. 5095 */ 5096 static void cpu_load_update(struct rq *this_rq, unsigned long this_load, 5097 unsigned long pending_updates) 5098 { 5099 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0]; 5100 int i, scale; 5101 5102 this_rq->nr_load_updates++; 5103 5104 /* Update our load: */ 5105 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ 5106 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 5107 unsigned long old_load, new_load; 5108 5109 /* scale is effectively 1 << i now, and >> i divides by scale */ 5110 5111 old_load = this_rq->cpu_load[i]; 5112 #ifdef CONFIG_NO_HZ_COMMON 5113 old_load = decay_load_missed(old_load, pending_updates - 1, i); 5114 if (tickless_load) { 5115 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i); 5116 /* 5117 * old_load can never be a negative value because a 5118 * decayed tickless_load cannot be greater than the 5119 * original tickless_load. 5120 */ 5121 old_load += tickless_load; 5122 } 5123 #endif 5124 new_load = this_load; 5125 /* 5126 * Round up the averaging division if load is increasing. This 5127 * prevents us from getting stuck on 9 if the load is 10, for 5128 * example. 5129 */ 5130 if (new_load > old_load) 5131 new_load += scale - 1; 5132 5133 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 5134 } 5135 5136 sched_avg_update(this_rq); 5137 } 5138 5139 /* Used instead of source_load when we know the type == 0 */ 5140 static unsigned long weighted_cpuload(struct rq *rq) 5141 { 5142 return cfs_rq_runnable_load_avg(&rq->cfs); 5143 } 5144 5145 #ifdef CONFIG_NO_HZ_COMMON 5146 /* 5147 * There is no sane way to deal with nohz on smp when using jiffies because the 5148 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading 5149 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. 5150 * 5151 * Therefore we need to avoid the delta approach from the regular tick when 5152 * possible since that would seriously skew the load calculation. This is why we 5153 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on 5154 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle 5155 * loop exit, nohz_idle_balance, nohz full exit...) 5156 * 5157 * This means we might still be one tick off for nohz periods. 5158 */ 5159 5160 static void cpu_load_update_nohz(struct rq *this_rq, 5161 unsigned long curr_jiffies, 5162 unsigned long load) 5163 { 5164 unsigned long pending_updates; 5165 5166 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 5167 if (pending_updates) { 5168 this_rq->last_load_update_tick = curr_jiffies; 5169 /* 5170 * In the regular NOHZ case, we were idle, this means load 0. 5171 * In the NOHZ_FULL case, we were non-idle, we should consider 5172 * its weighted load. 5173 */ 5174 cpu_load_update(this_rq, load, pending_updates); 5175 } 5176 } 5177 5178 /* 5179 * Called from nohz_idle_balance() to update the load ratings before doing the 5180 * idle balance. 5181 */ 5182 static void cpu_load_update_idle(struct rq *this_rq) 5183 { 5184 /* 5185 * bail if there's load or we're actually up-to-date. 5186 */ 5187 if (weighted_cpuload(this_rq)) 5188 return; 5189 5190 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0); 5191 } 5192 5193 /* 5194 * Record CPU load on nohz entry so we know the tickless load to account 5195 * on nohz exit. cpu_load[0] happens then to be updated more frequently 5196 * than other cpu_load[idx] but it should be fine as cpu_load readers 5197 * shouldn't rely into synchronized cpu_load[*] updates. 5198 */ 5199 void cpu_load_update_nohz_start(void) 5200 { 5201 struct rq *this_rq = this_rq(); 5202 5203 /* 5204 * This is all lockless but should be fine. If weighted_cpuload changes 5205 * concurrently we'll exit nohz. And cpu_load write can race with 5206 * cpu_load_update_idle() but both updater would be writing the same. 5207 */ 5208 this_rq->cpu_load[0] = weighted_cpuload(this_rq); 5209 } 5210 5211 /* 5212 * Account the tickless load in the end of a nohz frame. 5213 */ 5214 void cpu_load_update_nohz_stop(void) 5215 { 5216 unsigned long curr_jiffies = READ_ONCE(jiffies); 5217 struct rq *this_rq = this_rq(); 5218 unsigned long load; 5219 struct rq_flags rf; 5220 5221 if (curr_jiffies == this_rq->last_load_update_tick) 5222 return; 5223 5224 load = weighted_cpuload(this_rq); 5225 rq_lock(this_rq, &rf); 5226 update_rq_clock(this_rq); 5227 cpu_load_update_nohz(this_rq, curr_jiffies, load); 5228 rq_unlock(this_rq, &rf); 5229 } 5230 #else /* !CONFIG_NO_HZ_COMMON */ 5231 static inline void cpu_load_update_nohz(struct rq *this_rq, 5232 unsigned long curr_jiffies, 5233 unsigned long load) { } 5234 #endif /* CONFIG_NO_HZ_COMMON */ 5235 5236 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) 5237 { 5238 #ifdef CONFIG_NO_HZ_COMMON 5239 /* See the mess around cpu_load_update_nohz(). */ 5240 this_rq->last_load_update_tick = READ_ONCE(jiffies); 5241 #endif 5242 cpu_load_update(this_rq, load, 1); 5243 } 5244 5245 /* 5246 * Called from scheduler_tick() 5247 */ 5248 void cpu_load_update_active(struct rq *this_rq) 5249 { 5250 unsigned long load = weighted_cpuload(this_rq); 5251 5252 if (tick_nohz_tick_stopped()) 5253 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load); 5254 else 5255 cpu_load_update_periodic(this_rq, load); 5256 } 5257 5258 /* 5259 * Return a low guess at the load of a migration-source cpu weighted 5260 * according to the scheduling class and "nice" value. 5261 * 5262 * We want to under-estimate the load of migration sources, to 5263 * balance conservatively. 5264 */ 5265 static unsigned long source_load(int cpu, int type) 5266 { 5267 struct rq *rq = cpu_rq(cpu); 5268 unsigned long total = weighted_cpuload(rq); 5269 5270 if (type == 0 || !sched_feat(LB_BIAS)) 5271 return total; 5272 5273 return min(rq->cpu_load[type-1], total); 5274 } 5275 5276 /* 5277 * Return a high guess at the load of a migration-target cpu weighted 5278 * according to the scheduling class and "nice" value. 5279 */ 5280 static unsigned long target_load(int cpu, int type) 5281 { 5282 struct rq *rq = cpu_rq(cpu); 5283 unsigned long total = weighted_cpuload(rq); 5284 5285 if (type == 0 || !sched_feat(LB_BIAS)) 5286 return total; 5287 5288 return max(rq->cpu_load[type-1], total); 5289 } 5290 5291 static unsigned long capacity_of(int cpu) 5292 { 5293 return cpu_rq(cpu)->cpu_capacity; 5294 } 5295 5296 static unsigned long capacity_orig_of(int cpu) 5297 { 5298 return cpu_rq(cpu)->cpu_capacity_orig; 5299 } 5300 5301 static unsigned long cpu_avg_load_per_task(int cpu) 5302 { 5303 struct rq *rq = cpu_rq(cpu); 5304 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); 5305 unsigned long load_avg = weighted_cpuload(rq); 5306 5307 if (nr_running) 5308 return load_avg / nr_running; 5309 5310 return 0; 5311 } 5312 5313 static void record_wakee(struct task_struct *p) 5314 { 5315 /* 5316 * Only decay a single time; tasks that have less then 1 wakeup per 5317 * jiffy will not have built up many flips. 5318 */ 5319 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 5320 current->wakee_flips >>= 1; 5321 current->wakee_flip_decay_ts = jiffies; 5322 } 5323 5324 if (current->last_wakee != p) { 5325 current->last_wakee = p; 5326 current->wakee_flips++; 5327 } 5328 } 5329 5330 /* 5331 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 5332 * 5333 * A waker of many should wake a different task than the one last awakened 5334 * at a frequency roughly N times higher than one of its wakees. 5335 * 5336 * In order to determine whether we should let the load spread vs consolidating 5337 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 5338 * partner, and a factor of lls_size higher frequency in the other. 5339 * 5340 * With both conditions met, we can be relatively sure that the relationship is 5341 * non-monogamous, with partner count exceeding socket size. 5342 * 5343 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 5344 * whatever is irrelevant, spread criteria is apparent partner count exceeds 5345 * socket size. 5346 */ 5347 static int wake_wide(struct task_struct *p) 5348 { 5349 unsigned int master = current->wakee_flips; 5350 unsigned int slave = p->wakee_flips; 5351 int factor = this_cpu_read(sd_llc_size); 5352 5353 if (master < slave) 5354 swap(master, slave); 5355 if (slave < factor || master < slave * factor) 5356 return 0; 5357 return 1; 5358 } 5359 5360 /* 5361 * The purpose of wake_affine() is to quickly determine on which CPU we can run 5362 * soonest. For the purpose of speed we only consider the waking and previous 5363 * CPU. 5364 * 5365 * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or 5366 * will be) idle. 5367 * 5368 * wake_affine_weight() - considers the weight to reflect the average 5369 * scheduling latency of the CPUs. This seems to work 5370 * for the overloaded case. 5371 */ 5372 5373 static bool 5374 wake_affine_idle(struct sched_domain *sd, struct task_struct *p, 5375 int this_cpu, int prev_cpu, int sync) 5376 { 5377 if (idle_cpu(this_cpu)) 5378 return true; 5379 5380 if (sync && cpu_rq(this_cpu)->nr_running == 1) 5381 return true; 5382 5383 return false; 5384 } 5385 5386 static bool 5387 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, 5388 int this_cpu, int prev_cpu, int sync) 5389 { 5390 s64 this_eff_load, prev_eff_load; 5391 unsigned long task_load; 5392 5393 this_eff_load = target_load(this_cpu, sd->wake_idx); 5394 prev_eff_load = source_load(prev_cpu, sd->wake_idx); 5395 5396 if (sync) { 5397 unsigned long current_load = task_h_load(current); 5398 5399 if (current_load > this_eff_load) 5400 return true; 5401 5402 this_eff_load -= current_load; 5403 } 5404 5405 task_load = task_h_load(p); 5406 5407 this_eff_load += task_load; 5408 if (sched_feat(WA_BIAS)) 5409 this_eff_load *= 100; 5410 this_eff_load *= capacity_of(prev_cpu); 5411 5412 prev_eff_load -= task_load; 5413 if (sched_feat(WA_BIAS)) 5414 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; 5415 prev_eff_load *= capacity_of(this_cpu); 5416 5417 return this_eff_load <= prev_eff_load; 5418 } 5419 5420 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 5421 int prev_cpu, int sync) 5422 { 5423 int this_cpu = smp_processor_id(); 5424 bool affine = false; 5425 5426 if (sched_feat(WA_IDLE) && !affine) 5427 affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync); 5428 5429 if (sched_feat(WA_WEIGHT) && !affine) 5430 affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); 5431 5432 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); 5433 if (affine) { 5434 schedstat_inc(sd->ttwu_move_affine); 5435 schedstat_inc(p->se.statistics.nr_wakeups_affine); 5436 } 5437 5438 return affine; 5439 } 5440 5441 static inline int task_util(struct task_struct *p); 5442 static int cpu_util_wake(int cpu, struct task_struct *p); 5443 5444 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) 5445 { 5446 return capacity_orig_of(cpu) - cpu_util_wake(cpu, p); 5447 } 5448 5449 /* 5450 * find_idlest_group finds and returns the least busy CPU group within the 5451 * domain. 5452 */ 5453 static struct sched_group * 5454 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 5455 int this_cpu, int sd_flag) 5456 { 5457 struct sched_group *idlest = NULL, *group = sd->groups; 5458 struct sched_group *most_spare_sg = NULL; 5459 unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0; 5460 unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0; 5461 unsigned long most_spare = 0, this_spare = 0; 5462 int load_idx = sd->forkexec_idx; 5463 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; 5464 unsigned long imbalance = scale_load_down(NICE_0_LOAD) * 5465 (sd->imbalance_pct-100) / 100; 5466 5467 if (sd_flag & SD_BALANCE_WAKE) 5468 load_idx = sd->wake_idx; 5469 5470 do { 5471 unsigned long load, avg_load, runnable_load; 5472 unsigned long spare_cap, max_spare_cap; 5473 int local_group; 5474 int i; 5475 5476 /* Skip over this group if it has no CPUs allowed */ 5477 if (!cpumask_intersects(sched_group_span(group), 5478 &p->cpus_allowed)) 5479 continue; 5480 5481 local_group = cpumask_test_cpu(this_cpu, 5482 sched_group_span(group)); 5483 5484 /* 5485 * Tally up the load of all CPUs in the group and find 5486 * the group containing the CPU with most spare capacity. 5487 */ 5488 avg_load = 0; 5489 runnable_load = 0; 5490 max_spare_cap = 0; 5491 5492 for_each_cpu(i, sched_group_span(group)) { 5493 /* Bias balancing toward cpus of our domain */ 5494 if (local_group) 5495 load = source_load(i, load_idx); 5496 else 5497 load = target_load(i, load_idx); 5498 5499 runnable_load += load; 5500 5501 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); 5502 5503 spare_cap = capacity_spare_wake(i, p); 5504 5505 if (spare_cap > max_spare_cap) 5506 max_spare_cap = spare_cap; 5507 } 5508 5509 /* Adjust by relative CPU capacity of the group */ 5510 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / 5511 group->sgc->capacity; 5512 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / 5513 group->sgc->capacity; 5514 5515 if (local_group) { 5516 this_runnable_load = runnable_load; 5517 this_avg_load = avg_load; 5518 this_spare = max_spare_cap; 5519 } else { 5520 if (min_runnable_load > (runnable_load + imbalance)) { 5521 /* 5522 * The runnable load is significantly smaller 5523 * so we can pick this new cpu 5524 */ 5525 min_runnable_load = runnable_load; 5526 min_avg_load = avg_load; 5527 idlest = group; 5528 } else if ((runnable_load < (min_runnable_load + imbalance)) && 5529 (100*min_avg_load > imbalance_scale*avg_load)) { 5530 /* 5531 * The runnable loads are close so take the 5532 * blocked load into account through avg_load. 5533 */ 5534 min_avg_load = avg_load; 5535 idlest = group; 5536 } 5537 5538 if (most_spare < max_spare_cap) { 5539 most_spare = max_spare_cap; 5540 most_spare_sg = group; 5541 } 5542 } 5543 } while (group = group->next, group != sd->groups); 5544 5545 /* 5546 * The cross-over point between using spare capacity or least load 5547 * is too conservative for high utilization tasks on partially 5548 * utilized systems if we require spare_capacity > task_util(p), 5549 * so we allow for some task stuffing by using 5550 * spare_capacity > task_util(p)/2. 5551 * 5552 * Spare capacity can't be used for fork because the utilization has 5553 * not been set yet, we must first select a rq to compute the initial 5554 * utilization. 5555 */ 5556 if (sd_flag & SD_BALANCE_FORK) 5557 goto skip_spare; 5558 5559 if (this_spare > task_util(p) / 2 && 5560 imbalance_scale*this_spare > 100*most_spare) 5561 return NULL; 5562 5563 if (most_spare > task_util(p) / 2) 5564 return most_spare_sg; 5565 5566 skip_spare: 5567 if (!idlest) 5568 return NULL; 5569 5570 if (min_runnable_load > (this_runnable_load + imbalance)) 5571 return NULL; 5572 5573 if ((this_runnable_load < (min_runnable_load + imbalance)) && 5574 (100*this_avg_load < imbalance_scale*min_avg_load)) 5575 return NULL; 5576 5577 return idlest; 5578 } 5579 5580 /* 5581 * find_idlest_cpu - find the idlest cpu among the cpus in group. 5582 */ 5583 static int 5584 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 5585 { 5586 unsigned long load, min_load = ULONG_MAX; 5587 unsigned int min_exit_latency = UINT_MAX; 5588 u64 latest_idle_timestamp = 0; 5589 int least_loaded_cpu = this_cpu; 5590 int shallowest_idle_cpu = -1; 5591 int i; 5592 5593 /* Check if we have any choice: */ 5594 if (group->group_weight == 1) 5595 return cpumask_first(sched_group_span(group)); 5596 5597 /* Traverse only the allowed CPUs */ 5598 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { 5599 if (idle_cpu(i)) { 5600 struct rq *rq = cpu_rq(i); 5601 struct cpuidle_state *idle = idle_get_state(rq); 5602 if (idle && idle->exit_latency < min_exit_latency) { 5603 /* 5604 * We give priority to a CPU whose idle state 5605 * has the smallest exit latency irrespective 5606 * of any idle timestamp. 5607 */ 5608 min_exit_latency = idle->exit_latency; 5609 latest_idle_timestamp = rq->idle_stamp; 5610 shallowest_idle_cpu = i; 5611 } else if ((!idle || idle->exit_latency == min_exit_latency) && 5612 rq->idle_stamp > latest_idle_timestamp) { 5613 /* 5614 * If equal or no active idle state, then 5615 * the most recently idled CPU might have 5616 * a warmer cache. 5617 */ 5618 latest_idle_timestamp = rq->idle_stamp; 5619 shallowest_idle_cpu = i; 5620 } 5621 } else if (shallowest_idle_cpu == -1) { 5622 load = weighted_cpuload(cpu_rq(i)); 5623 if (load < min_load || (load == min_load && i == this_cpu)) { 5624 min_load = load; 5625 least_loaded_cpu = i; 5626 } 5627 } 5628 } 5629 5630 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 5631 } 5632 5633 #ifdef CONFIG_SCHED_SMT 5634 5635 static inline void set_idle_cores(int cpu, int val) 5636 { 5637 struct sched_domain_shared *sds; 5638 5639 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5640 if (sds) 5641 WRITE_ONCE(sds->has_idle_cores, val); 5642 } 5643 5644 static inline bool test_idle_cores(int cpu, bool def) 5645 { 5646 struct sched_domain_shared *sds; 5647 5648 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5649 if (sds) 5650 return READ_ONCE(sds->has_idle_cores); 5651 5652 return def; 5653 } 5654 5655 /* 5656 * Scans the local SMT mask to see if the entire core is idle, and records this 5657 * information in sd_llc_shared->has_idle_cores. 5658 * 5659 * Since SMT siblings share all cache levels, inspecting this limited remote 5660 * state should be fairly cheap. 5661 */ 5662 void __update_idle_core(struct rq *rq) 5663 { 5664 int core = cpu_of(rq); 5665 int cpu; 5666 5667 rcu_read_lock(); 5668 if (test_idle_cores(core, true)) 5669 goto unlock; 5670 5671 for_each_cpu(cpu, cpu_smt_mask(core)) { 5672 if (cpu == core) 5673 continue; 5674 5675 if (!idle_cpu(cpu)) 5676 goto unlock; 5677 } 5678 5679 set_idle_cores(core, 1); 5680 unlock: 5681 rcu_read_unlock(); 5682 } 5683 5684 /* 5685 * Scan the entire LLC domain for idle cores; this dynamically switches off if 5686 * there are no idle cores left in the system; tracked through 5687 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 5688 */ 5689 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 5690 { 5691 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 5692 int core, cpu; 5693 5694 if (!static_branch_likely(&sched_smt_present)) 5695 return -1; 5696 5697 if (!test_idle_cores(target, false)) 5698 return -1; 5699 5700 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); 5701 5702 for_each_cpu_wrap(core, cpus, target) { 5703 bool idle = true; 5704 5705 for_each_cpu(cpu, cpu_smt_mask(core)) { 5706 cpumask_clear_cpu(cpu, cpus); 5707 if (!idle_cpu(cpu)) 5708 idle = false; 5709 } 5710 5711 if (idle) 5712 return core; 5713 } 5714 5715 /* 5716 * Failed to find an idle core; stop looking for one. 5717 */ 5718 set_idle_cores(target, 0); 5719 5720 return -1; 5721 } 5722 5723 /* 5724 * Scan the local SMT mask for idle CPUs. 5725 */ 5726 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 5727 { 5728 int cpu; 5729 5730 if (!static_branch_likely(&sched_smt_present)) 5731 return -1; 5732 5733 for_each_cpu(cpu, cpu_smt_mask(target)) { 5734 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 5735 continue; 5736 if (idle_cpu(cpu)) 5737 return cpu; 5738 } 5739 5740 return -1; 5741 } 5742 5743 #else /* CONFIG_SCHED_SMT */ 5744 5745 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 5746 { 5747 return -1; 5748 } 5749 5750 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 5751 { 5752 return -1; 5753 } 5754 5755 #endif /* CONFIG_SCHED_SMT */ 5756 5757 /* 5758 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 5759 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 5760 * average idle time for this rq (as found in rq->avg_idle). 5761 */ 5762 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) 5763 { 5764 struct sched_domain *this_sd; 5765 u64 avg_cost, avg_idle; 5766 u64 time, cost; 5767 s64 delta; 5768 int cpu, nr = INT_MAX; 5769 5770 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 5771 if (!this_sd) 5772 return -1; 5773 5774 /* 5775 * Due to large variance we need a large fuzz factor; hackbench in 5776 * particularly is sensitive here. 5777 */ 5778 avg_idle = this_rq()->avg_idle / 512; 5779 avg_cost = this_sd->avg_scan_cost + 1; 5780 5781 if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) 5782 return -1; 5783 5784 if (sched_feat(SIS_PROP)) { 5785 u64 span_avg = sd->span_weight * avg_idle; 5786 if (span_avg > 4*avg_cost) 5787 nr = div_u64(span_avg, avg_cost); 5788 else 5789 nr = 4; 5790 } 5791 5792 time = local_clock(); 5793 5794 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { 5795 if (!--nr) 5796 return -1; 5797 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 5798 continue; 5799 if (idle_cpu(cpu)) 5800 break; 5801 } 5802 5803 time = local_clock() - time; 5804 cost = this_sd->avg_scan_cost; 5805 delta = (s64)(time - cost) / 8; 5806 this_sd->avg_scan_cost += delta; 5807 5808 return cpu; 5809 } 5810 5811 /* 5812 * Try and locate an idle core/thread in the LLC cache domain. 5813 */ 5814 static int select_idle_sibling(struct task_struct *p, int prev, int target) 5815 { 5816 struct sched_domain *sd; 5817 int i; 5818 5819 if (idle_cpu(target)) 5820 return target; 5821 5822 /* 5823 * If the previous cpu is cache affine and idle, don't be stupid. 5824 */ 5825 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) 5826 return prev; 5827 5828 sd = rcu_dereference(per_cpu(sd_llc, target)); 5829 if (!sd) 5830 return target; 5831 5832 i = select_idle_core(p, sd, target); 5833 if ((unsigned)i < nr_cpumask_bits) 5834 return i; 5835 5836 i = select_idle_cpu(p, sd, target); 5837 if ((unsigned)i < nr_cpumask_bits) 5838 return i; 5839 5840 i = select_idle_smt(p, sd, target); 5841 if ((unsigned)i < nr_cpumask_bits) 5842 return i; 5843 5844 return target; 5845 } 5846 5847 /* 5848 * cpu_util returns the amount of capacity of a CPU that is used by CFS 5849 * tasks. The unit of the return value must be the one of capacity so we can 5850 * compare the utilization with the capacity of the CPU that is available for 5851 * CFS task (ie cpu_capacity). 5852 * 5853 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the 5854 * recent utilization of currently non-runnable tasks on a CPU. It represents 5855 * the amount of utilization of a CPU in the range [0..capacity_orig] where 5856 * capacity_orig is the cpu_capacity available at the highest frequency 5857 * (arch_scale_freq_capacity()). 5858 * The utilization of a CPU converges towards a sum equal to or less than the 5859 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is 5860 * the running time on this CPU scaled by capacity_curr. 5861 * 5862 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even 5863 * higher than capacity_orig because of unfortunate rounding in 5864 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until 5865 * the average stabilizes with the new running time. We need to check that the 5866 * utilization stays within the range of [0..capacity_orig] and cap it if 5867 * necessary. Without utilization capping, a group could be seen as overloaded 5868 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of 5869 * available capacity. We allow utilization to overshoot capacity_curr (but not 5870 * capacity_orig) as it useful for predicting the capacity required after task 5871 * migrations (scheduler-driven DVFS). 5872 */ 5873 static int cpu_util(int cpu) 5874 { 5875 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; 5876 unsigned long capacity = capacity_orig_of(cpu); 5877 5878 return (util >= capacity) ? capacity : util; 5879 } 5880 5881 static inline int task_util(struct task_struct *p) 5882 { 5883 return p->se.avg.util_avg; 5884 } 5885 5886 /* 5887 * cpu_util_wake: Compute cpu utilization with any contributions from 5888 * the waking task p removed. 5889 */ 5890 static int cpu_util_wake(int cpu, struct task_struct *p) 5891 { 5892 unsigned long util, capacity; 5893 5894 /* Task has no contribution or is new */ 5895 if (cpu != task_cpu(p) || !p->se.avg.last_update_time) 5896 return cpu_util(cpu); 5897 5898 capacity = capacity_orig_of(cpu); 5899 util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0); 5900 5901 return (util >= capacity) ? capacity : util; 5902 } 5903 5904 /* 5905 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the 5906 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. 5907 * 5908 * In that case WAKE_AFFINE doesn't make sense and we'll let 5909 * BALANCE_WAKE sort things out. 5910 */ 5911 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) 5912 { 5913 long min_cap, max_cap; 5914 5915 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); 5916 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; 5917 5918 /* Minimum capacity is close to max, no need to abort wake_affine */ 5919 if (max_cap - min_cap < max_cap >> 3) 5920 return 0; 5921 5922 /* Bring task utilization in sync with prev_cpu */ 5923 sync_entity_load_avg(&p->se); 5924 5925 return min_cap * 1024 < task_util(p) * capacity_margin; 5926 } 5927 5928 /* 5929 * select_task_rq_fair: Select target runqueue for the waking task in domains 5930 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, 5931 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 5932 * 5933 * Balances load by selecting the idlest cpu in the idlest group, or under 5934 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set. 5935 * 5936 * Returns the target cpu number. 5937 * 5938 * preempt must be disabled. 5939 */ 5940 static int 5941 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) 5942 { 5943 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; 5944 int cpu = smp_processor_id(); 5945 int new_cpu = prev_cpu; 5946 int want_affine = 0; 5947 int sync = wake_flags & WF_SYNC; 5948 5949 if (sd_flag & SD_BALANCE_WAKE) { 5950 record_wakee(p); 5951 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) 5952 && cpumask_test_cpu(cpu, &p->cpus_allowed); 5953 } 5954 5955 rcu_read_lock(); 5956 for_each_domain(cpu, tmp) { 5957 if (!(tmp->flags & SD_LOAD_BALANCE)) 5958 break; 5959 5960 /* 5961 * If both cpu and prev_cpu are part of this domain, 5962 * cpu is a valid SD_WAKE_AFFINE target. 5963 */ 5964 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 5965 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 5966 affine_sd = tmp; 5967 break; 5968 } 5969 5970 if (tmp->flags & sd_flag) 5971 sd = tmp; 5972 else if (!want_affine) 5973 break; 5974 } 5975 5976 if (affine_sd) { 5977 sd = NULL; /* Prefer wake_affine over balance flags */ 5978 if (cpu == prev_cpu) 5979 goto pick_cpu; 5980 5981 if (wake_affine(affine_sd, p, prev_cpu, sync)) 5982 new_cpu = cpu; 5983 } 5984 5985 if (!sd) { 5986 pick_cpu: 5987 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */ 5988 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 5989 5990 } else while (sd) { 5991 struct sched_group *group; 5992 int weight; 5993 5994 if (!(sd->flags & sd_flag)) { 5995 sd = sd->child; 5996 continue; 5997 } 5998 5999 group = find_idlest_group(sd, p, cpu, sd_flag); 6000 if (!group) { 6001 sd = sd->child; 6002 continue; 6003 } 6004 6005 new_cpu = find_idlest_cpu(group, p, cpu); 6006 if (new_cpu == -1 || new_cpu == cpu) { 6007 /* Now try balancing at a lower domain level of cpu */ 6008 sd = sd->child; 6009 continue; 6010 } 6011 6012 /* Now try balancing at a lower domain level of new_cpu */ 6013 cpu = new_cpu; 6014 weight = sd->span_weight; 6015 sd = NULL; 6016 for_each_domain(cpu, tmp) { 6017 if (weight <= tmp->span_weight) 6018 break; 6019 if (tmp->flags & sd_flag) 6020 sd = tmp; 6021 } 6022 /* while loop will break here if sd == NULL */ 6023 } 6024 rcu_read_unlock(); 6025 6026 return new_cpu; 6027 } 6028 6029 /* 6030 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and 6031 * cfs_rq_of(p) references at time of call are still valid and identify the 6032 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 6033 */ 6034 static void migrate_task_rq_fair(struct task_struct *p) 6035 { 6036 /* 6037 * As blocked tasks retain absolute vruntime the migration needs to 6038 * deal with this by subtracting the old and adding the new 6039 * min_vruntime -- the latter is done by enqueue_entity() when placing 6040 * the task on the new runqueue. 6041 */ 6042 if (p->state == TASK_WAKING) { 6043 struct sched_entity *se = &p->se; 6044 struct cfs_rq *cfs_rq = cfs_rq_of(se); 6045 u64 min_vruntime; 6046 6047 #ifndef CONFIG_64BIT 6048 u64 min_vruntime_copy; 6049 6050 do { 6051 min_vruntime_copy = cfs_rq->min_vruntime_copy; 6052 smp_rmb(); 6053 min_vruntime = cfs_rq->min_vruntime; 6054 } while (min_vruntime != min_vruntime_copy); 6055 #else 6056 min_vruntime = cfs_rq->min_vruntime; 6057 #endif 6058 6059 se->vruntime -= min_vruntime; 6060 } 6061 6062 /* 6063 * We are supposed to update the task to "current" time, then its up to date 6064 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting 6065 * what current time is, so simply throw away the out-of-date time. This 6066 * will result in the wakee task is less decayed, but giving the wakee more 6067 * load sounds not bad. 6068 */ 6069 remove_entity_load_avg(&p->se); 6070 6071 /* Tell new CPU we are migrated */ 6072 p->se.avg.last_update_time = 0; 6073 6074 /* We have migrated, no longer consider this task hot */ 6075 p->se.exec_start = 0; 6076 } 6077 6078 static void task_dead_fair(struct task_struct *p) 6079 { 6080 remove_entity_load_avg(&p->se); 6081 } 6082 #endif /* CONFIG_SMP */ 6083 6084 static unsigned long 6085 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) 6086 { 6087 unsigned long gran = sysctl_sched_wakeup_granularity; 6088 6089 /* 6090 * Since its curr running now, convert the gran from real-time 6091 * to virtual-time in his units. 6092 * 6093 * By using 'se' instead of 'curr' we penalize light tasks, so 6094 * they get preempted easier. That is, if 'se' < 'curr' then 6095 * the resulting gran will be larger, therefore penalizing the 6096 * lighter, if otoh 'se' > 'curr' then the resulting gran will 6097 * be smaller, again penalizing the lighter task. 6098 * 6099 * This is especially important for buddies when the leftmost 6100 * task is higher priority than the buddy. 6101 */ 6102 return calc_delta_fair(gran, se); 6103 } 6104 6105 /* 6106 * Should 'se' preempt 'curr'. 6107 * 6108 * |s1 6109 * |s2 6110 * |s3 6111 * g 6112 * |<--->|c 6113 * 6114 * w(c, s1) = -1 6115 * w(c, s2) = 0 6116 * w(c, s3) = 1 6117 * 6118 */ 6119 static int 6120 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 6121 { 6122 s64 gran, vdiff = curr->vruntime - se->vruntime; 6123 6124 if (vdiff <= 0) 6125 return -1; 6126 6127 gran = wakeup_gran(curr, se); 6128 if (vdiff > gran) 6129 return 1; 6130 6131 return 0; 6132 } 6133 6134 static void set_last_buddy(struct sched_entity *se) 6135 { 6136 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) 6137 return; 6138 6139 for_each_sched_entity(se) { 6140 if (SCHED_WARN_ON(!se->on_rq)) 6141 return; 6142 cfs_rq_of(se)->last = se; 6143 } 6144 } 6145 6146 static void set_next_buddy(struct sched_entity *se) 6147 { 6148 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) 6149 return; 6150 6151 for_each_sched_entity(se) { 6152 if (SCHED_WARN_ON(!se->on_rq)) 6153 return; 6154 cfs_rq_of(se)->next = se; 6155 } 6156 } 6157 6158 static void set_skip_buddy(struct sched_entity *se) 6159 { 6160 for_each_sched_entity(se) 6161 cfs_rq_of(se)->skip = se; 6162 } 6163 6164 /* 6165 * Preempt the current task with a newly woken task if needed: 6166 */ 6167 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 6168 { 6169 struct task_struct *curr = rq->curr; 6170 struct sched_entity *se = &curr->se, *pse = &p->se; 6171 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6172 int scale = cfs_rq->nr_running >= sched_nr_latency; 6173 int next_buddy_marked = 0; 6174 6175 if (unlikely(se == pse)) 6176 return; 6177 6178 /* 6179 * This is possible from callers such as attach_tasks(), in which we 6180 * unconditionally check_prempt_curr() after an enqueue (which may have 6181 * lead to a throttle). This both saves work and prevents false 6182 * next-buddy nomination below. 6183 */ 6184 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 6185 return; 6186 6187 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 6188 set_next_buddy(pse); 6189 next_buddy_marked = 1; 6190 } 6191 6192 /* 6193 * We can come here with TIF_NEED_RESCHED already set from new task 6194 * wake up path. 6195 * 6196 * Note: this also catches the edge-case of curr being in a throttled 6197 * group (e.g. via set_curr_task), since update_curr() (in the 6198 * enqueue of curr) will have resulted in resched being set. This 6199 * prevents us from potentially nominating it as a false LAST_BUDDY 6200 * below. 6201 */ 6202 if (test_tsk_need_resched(curr)) 6203 return; 6204 6205 /* Idle tasks are by definition preempted by non-idle tasks. */ 6206 if (unlikely(curr->policy == SCHED_IDLE) && 6207 likely(p->policy != SCHED_IDLE)) 6208 goto preempt; 6209 6210 /* 6211 * Batch and idle tasks do not preempt non-idle tasks (their preemption 6212 * is driven by the tick): 6213 */ 6214 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 6215 return; 6216 6217 find_matching_se(&se, &pse); 6218 update_curr(cfs_rq_of(se)); 6219 BUG_ON(!pse); 6220 if (wakeup_preempt_entity(se, pse) == 1) { 6221 /* 6222 * Bias pick_next to pick the sched entity that is 6223 * triggering this preemption. 6224 */ 6225 if (!next_buddy_marked) 6226 set_next_buddy(pse); 6227 goto preempt; 6228 } 6229 6230 return; 6231 6232 preempt: 6233 resched_curr(rq); 6234 /* 6235 * Only set the backward buddy when the current task is still 6236 * on the rq. This can happen when a wakeup gets interleaved 6237 * with schedule on the ->pre_schedule() or idle_balance() 6238 * point, either of which can * drop the rq lock. 6239 * 6240 * Also, during early boot the idle thread is in the fair class, 6241 * for obvious reasons its a bad idea to schedule back to it. 6242 */ 6243 if (unlikely(!se->on_rq || curr == rq->idle)) 6244 return; 6245 6246 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 6247 set_last_buddy(se); 6248 } 6249 6250 static struct task_struct * 6251 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6252 { 6253 struct cfs_rq *cfs_rq = &rq->cfs; 6254 struct sched_entity *se; 6255 struct task_struct *p; 6256 int new_tasks; 6257 6258 again: 6259 if (!cfs_rq->nr_running) 6260 goto idle; 6261 6262 #ifdef CONFIG_FAIR_GROUP_SCHED 6263 if (prev->sched_class != &fair_sched_class) 6264 goto simple; 6265 6266 /* 6267 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 6268 * likely that a next task is from the same cgroup as the current. 6269 * 6270 * Therefore attempt to avoid putting and setting the entire cgroup 6271 * hierarchy, only change the part that actually changes. 6272 */ 6273 6274 do { 6275 struct sched_entity *curr = cfs_rq->curr; 6276 6277 /* 6278 * Since we got here without doing put_prev_entity() we also 6279 * have to consider cfs_rq->curr. If it is still a runnable 6280 * entity, update_curr() will update its vruntime, otherwise 6281 * forget we've ever seen it. 6282 */ 6283 if (curr) { 6284 if (curr->on_rq) 6285 update_curr(cfs_rq); 6286 else 6287 curr = NULL; 6288 6289 /* 6290 * This call to check_cfs_rq_runtime() will do the 6291 * throttle and dequeue its entity in the parent(s). 6292 * Therefore the nr_running test will indeed 6293 * be correct. 6294 */ 6295 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { 6296 cfs_rq = &rq->cfs; 6297 6298 if (!cfs_rq->nr_running) 6299 goto idle; 6300 6301 goto simple; 6302 } 6303 } 6304 6305 se = pick_next_entity(cfs_rq, curr); 6306 cfs_rq = group_cfs_rq(se); 6307 } while (cfs_rq); 6308 6309 p = task_of(se); 6310 6311 /* 6312 * Since we haven't yet done put_prev_entity and if the selected task 6313 * is a different task than we started out with, try and touch the 6314 * least amount of cfs_rqs. 6315 */ 6316 if (prev != p) { 6317 struct sched_entity *pse = &prev->se; 6318 6319 while (!(cfs_rq = is_same_group(se, pse))) { 6320 int se_depth = se->depth; 6321 int pse_depth = pse->depth; 6322 6323 if (se_depth <= pse_depth) { 6324 put_prev_entity(cfs_rq_of(pse), pse); 6325 pse = parent_entity(pse); 6326 } 6327 if (se_depth >= pse_depth) { 6328 set_next_entity(cfs_rq_of(se), se); 6329 se = parent_entity(se); 6330 } 6331 } 6332 6333 put_prev_entity(cfs_rq, pse); 6334 set_next_entity(cfs_rq, se); 6335 } 6336 6337 if (hrtick_enabled(rq)) 6338 hrtick_start_fair(rq, p); 6339 6340 return p; 6341 simple: 6342 #endif 6343 6344 put_prev_task(rq, prev); 6345 6346 do { 6347 se = pick_next_entity(cfs_rq, NULL); 6348 set_next_entity(cfs_rq, se); 6349 cfs_rq = group_cfs_rq(se); 6350 } while (cfs_rq); 6351 6352 p = task_of(se); 6353 6354 if (hrtick_enabled(rq)) 6355 hrtick_start_fair(rq, p); 6356 6357 return p; 6358 6359 idle: 6360 new_tasks = idle_balance(rq, rf); 6361 6362 /* 6363 * Because idle_balance() releases (and re-acquires) rq->lock, it is 6364 * possible for any higher priority task to appear. In that case we 6365 * must re-start the pick_next_entity() loop. 6366 */ 6367 if (new_tasks < 0) 6368 return RETRY_TASK; 6369 6370 if (new_tasks > 0) 6371 goto again; 6372 6373 return NULL; 6374 } 6375 6376 /* 6377 * Account for a descheduled task: 6378 */ 6379 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 6380 { 6381 struct sched_entity *se = &prev->se; 6382 struct cfs_rq *cfs_rq; 6383 6384 for_each_sched_entity(se) { 6385 cfs_rq = cfs_rq_of(se); 6386 put_prev_entity(cfs_rq, se); 6387 } 6388 } 6389 6390 /* 6391 * sched_yield() is very simple 6392 * 6393 * The magic of dealing with the ->skip buddy is in pick_next_entity. 6394 */ 6395 static void yield_task_fair(struct rq *rq) 6396 { 6397 struct task_struct *curr = rq->curr; 6398 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6399 struct sched_entity *se = &curr->se; 6400 6401 /* 6402 * Are we the only task in the tree? 6403 */ 6404 if (unlikely(rq->nr_running == 1)) 6405 return; 6406 6407 clear_buddies(cfs_rq, se); 6408 6409 if (curr->policy != SCHED_BATCH) { 6410 update_rq_clock(rq); 6411 /* 6412 * Update run-time statistics of the 'current'. 6413 */ 6414 update_curr(cfs_rq); 6415 /* 6416 * Tell update_rq_clock() that we've just updated, 6417 * so we don't do microscopic update in schedule() 6418 * and double the fastpath cost. 6419 */ 6420 rq_clock_skip_update(rq, true); 6421 } 6422 6423 set_skip_buddy(se); 6424 } 6425 6426 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) 6427 { 6428 struct sched_entity *se = &p->se; 6429 6430 /* throttled hierarchies are not runnable */ 6431 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 6432 return false; 6433 6434 /* Tell the scheduler that we'd really like pse to run next. */ 6435 set_next_buddy(se); 6436 6437 yield_task_fair(rq); 6438 6439 return true; 6440 } 6441 6442 #ifdef CONFIG_SMP 6443 /************************************************** 6444 * Fair scheduling class load-balancing methods. 6445 * 6446 * BASICS 6447 * 6448 * The purpose of load-balancing is to achieve the same basic fairness the 6449 * per-cpu scheduler provides, namely provide a proportional amount of compute 6450 * time to each task. This is expressed in the following equation: 6451 * 6452 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 6453 * 6454 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight 6455 * W_i,0 is defined as: 6456 * 6457 * W_i,0 = \Sum_j w_i,j (2) 6458 * 6459 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight 6460 * is derived from the nice value as per sched_prio_to_weight[]. 6461 * 6462 * The weight average is an exponential decay average of the instantaneous 6463 * weight: 6464 * 6465 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 6466 * 6467 * C_i is the compute capacity of cpu i, typically it is the 6468 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 6469 * can also include other factors [XXX]. 6470 * 6471 * To achieve this balance we define a measure of imbalance which follows 6472 * directly from (1): 6473 * 6474 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 6475 * 6476 * We them move tasks around to minimize the imbalance. In the continuous 6477 * function space it is obvious this converges, in the discrete case we get 6478 * a few fun cases generally called infeasible weight scenarios. 6479 * 6480 * [XXX expand on: 6481 * - infeasible weights; 6482 * - local vs global optima in the discrete case. ] 6483 * 6484 * 6485 * SCHED DOMAINS 6486 * 6487 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 6488 * for all i,j solution, we create a tree of cpus that follows the hardware 6489 * topology where each level pairs two lower groups (or better). This results 6490 * in O(log n) layers. Furthermore we reduce the number of cpus going up the 6491 * tree to only the first of the previous level and we decrease the frequency 6492 * of load-balance at each level inv. proportional to the number of cpus in 6493 * the groups. 6494 * 6495 * This yields: 6496 * 6497 * log_2 n 1 n 6498 * \Sum { --- * --- * 2^i } = O(n) (5) 6499 * i = 0 2^i 2^i 6500 * `- size of each group 6501 * | | `- number of cpus doing load-balance 6502 * | `- freq 6503 * `- sum over all levels 6504 * 6505 * Coupled with a limit on how many tasks we can migrate every balance pass, 6506 * this makes (5) the runtime complexity of the balancer. 6507 * 6508 * An important property here is that each CPU is still (indirectly) connected 6509 * to every other cpu in at most O(log n) steps: 6510 * 6511 * The adjacency matrix of the resulting graph is given by: 6512 * 6513 * log_2 n 6514 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 6515 * k = 0 6516 * 6517 * And you'll find that: 6518 * 6519 * A^(log_2 n)_i,j != 0 for all i,j (7) 6520 * 6521 * Showing there's indeed a path between every cpu in at most O(log n) steps. 6522 * The task movement gives a factor of O(m), giving a convergence complexity 6523 * of: 6524 * 6525 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 6526 * 6527 * 6528 * WORK CONSERVING 6529 * 6530 * In order to avoid CPUs going idle while there's still work to do, new idle 6531 * balancing is more aggressive and has the newly idle cpu iterate up the domain 6532 * tree itself instead of relying on other CPUs to bring it work. 6533 * 6534 * This adds some complexity to both (5) and (8) but it reduces the total idle 6535 * time. 6536 * 6537 * [XXX more?] 6538 * 6539 * 6540 * CGROUPS 6541 * 6542 * Cgroups make a horror show out of (2), instead of a simple sum we get: 6543 * 6544 * s_k,i 6545 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 6546 * S_k 6547 * 6548 * Where 6549 * 6550 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 6551 * 6552 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i. 6553 * 6554 * The big problem is S_k, its a global sum needed to compute a local (W_i) 6555 * property. 6556 * 6557 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 6558 * rewrite all of this once again.] 6559 */ 6560 6561 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 6562 6563 enum fbq_type { regular, remote, all }; 6564 6565 #define LBF_ALL_PINNED 0x01 6566 #define LBF_NEED_BREAK 0x02 6567 #define LBF_DST_PINNED 0x04 6568 #define LBF_SOME_PINNED 0x08 6569 6570 struct lb_env { 6571 struct sched_domain *sd; 6572 6573 struct rq *src_rq; 6574 int src_cpu; 6575 6576 int dst_cpu; 6577 struct rq *dst_rq; 6578 6579 struct cpumask *dst_grpmask; 6580 int new_dst_cpu; 6581 enum cpu_idle_type idle; 6582 long imbalance; 6583 /* The set of CPUs under consideration for load-balancing */ 6584 struct cpumask *cpus; 6585 6586 unsigned int flags; 6587 6588 unsigned int loop; 6589 unsigned int loop_break; 6590 unsigned int loop_max; 6591 6592 enum fbq_type fbq_type; 6593 struct list_head tasks; 6594 }; 6595 6596 /* 6597 * Is this task likely cache-hot: 6598 */ 6599 static int task_hot(struct task_struct *p, struct lb_env *env) 6600 { 6601 s64 delta; 6602 6603 lockdep_assert_held(&env->src_rq->lock); 6604 6605 if (p->sched_class != &fair_sched_class) 6606 return 0; 6607 6608 if (unlikely(p->policy == SCHED_IDLE)) 6609 return 0; 6610 6611 /* 6612 * Buddy candidates are cache hot: 6613 */ 6614 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 6615 (&p->se == cfs_rq_of(&p->se)->next || 6616 &p->se == cfs_rq_of(&p->se)->last)) 6617 return 1; 6618 6619 if (sysctl_sched_migration_cost == -1) 6620 return 1; 6621 if (sysctl_sched_migration_cost == 0) 6622 return 0; 6623 6624 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 6625 6626 return delta < (s64)sysctl_sched_migration_cost; 6627 } 6628 6629 #ifdef CONFIG_NUMA_BALANCING 6630 /* 6631 * Returns 1, if task migration degrades locality 6632 * Returns 0, if task migration improves locality i.e migration preferred. 6633 * Returns -1, if task migration is not affected by locality. 6634 */ 6635 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 6636 { 6637 struct numa_group *numa_group = rcu_dereference(p->numa_group); 6638 unsigned long src_faults, dst_faults; 6639 int src_nid, dst_nid; 6640 6641 if (!static_branch_likely(&sched_numa_balancing)) 6642 return -1; 6643 6644 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 6645 return -1; 6646 6647 src_nid = cpu_to_node(env->src_cpu); 6648 dst_nid = cpu_to_node(env->dst_cpu); 6649 6650 if (src_nid == dst_nid) 6651 return -1; 6652 6653 /* Migrating away from the preferred node is always bad. */ 6654 if (src_nid == p->numa_preferred_nid) { 6655 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 6656 return 1; 6657 else 6658 return -1; 6659 } 6660 6661 /* Encourage migration to the preferred node. */ 6662 if (dst_nid == p->numa_preferred_nid) 6663 return 0; 6664 6665 /* Leaving a core idle is often worse than degrading locality. */ 6666 if (env->idle != CPU_NOT_IDLE) 6667 return -1; 6668 6669 if (numa_group) { 6670 src_faults = group_faults(p, src_nid); 6671 dst_faults = group_faults(p, dst_nid); 6672 } else { 6673 src_faults = task_faults(p, src_nid); 6674 dst_faults = task_faults(p, dst_nid); 6675 } 6676 6677 return dst_faults < src_faults; 6678 } 6679 6680 #else 6681 static inline int migrate_degrades_locality(struct task_struct *p, 6682 struct lb_env *env) 6683 { 6684 return -1; 6685 } 6686 #endif 6687 6688 /* 6689 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 6690 */ 6691 static 6692 int can_migrate_task(struct task_struct *p, struct lb_env *env) 6693 { 6694 int tsk_cache_hot; 6695 6696 lockdep_assert_held(&env->src_rq->lock); 6697 6698 /* 6699 * We do not migrate tasks that are: 6700 * 1) throttled_lb_pair, or 6701 * 2) cannot be migrated to this CPU due to cpus_allowed, or 6702 * 3) running (obviously), or 6703 * 4) are cache-hot on their current CPU. 6704 */ 6705 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 6706 return 0; 6707 6708 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { 6709 int cpu; 6710 6711 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); 6712 6713 env->flags |= LBF_SOME_PINNED; 6714 6715 /* 6716 * Remember if this task can be migrated to any other cpu in 6717 * our sched_group. We may want to revisit it if we couldn't 6718 * meet load balance goals by pulling other tasks on src_cpu. 6719 * 6720 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have 6721 * already computed one in current iteration. 6722 */ 6723 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) 6724 return 0; 6725 6726 /* Prevent to re-select dst_cpu via env's cpus */ 6727 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 6728 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { 6729 env->flags |= LBF_DST_PINNED; 6730 env->new_dst_cpu = cpu; 6731 break; 6732 } 6733 } 6734 6735 return 0; 6736 } 6737 6738 /* Record that we found atleast one task that could run on dst_cpu */ 6739 env->flags &= ~LBF_ALL_PINNED; 6740 6741 if (task_running(env->src_rq, p)) { 6742 schedstat_inc(p->se.statistics.nr_failed_migrations_running); 6743 return 0; 6744 } 6745 6746 /* 6747 * Aggressive migration if: 6748 * 1) destination numa is preferred 6749 * 2) task is cache cold, or 6750 * 3) too many balance attempts have failed. 6751 */ 6752 tsk_cache_hot = migrate_degrades_locality(p, env); 6753 if (tsk_cache_hot == -1) 6754 tsk_cache_hot = task_hot(p, env); 6755 6756 if (tsk_cache_hot <= 0 || 6757 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 6758 if (tsk_cache_hot == 1) { 6759 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 6760 schedstat_inc(p->se.statistics.nr_forced_migrations); 6761 } 6762 return 1; 6763 } 6764 6765 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); 6766 return 0; 6767 } 6768 6769 /* 6770 * detach_task() -- detach the task for the migration specified in env 6771 */ 6772 static void detach_task(struct task_struct *p, struct lb_env *env) 6773 { 6774 lockdep_assert_held(&env->src_rq->lock); 6775 6776 p->on_rq = TASK_ON_RQ_MIGRATING; 6777 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 6778 set_task_cpu(p, env->dst_cpu); 6779 } 6780 6781 /* 6782 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 6783 * part of active balancing operations within "domain". 6784 * 6785 * Returns a task if successful and NULL otherwise. 6786 */ 6787 static struct task_struct *detach_one_task(struct lb_env *env) 6788 { 6789 struct task_struct *p, *n; 6790 6791 lockdep_assert_held(&env->src_rq->lock); 6792 6793 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { 6794 if (!can_migrate_task(p, env)) 6795 continue; 6796 6797 detach_task(p, env); 6798 6799 /* 6800 * Right now, this is only the second place where 6801 * lb_gained[env->idle] is updated (other is detach_tasks) 6802 * so we can safely collect stats here rather than 6803 * inside detach_tasks(). 6804 */ 6805 schedstat_inc(env->sd->lb_gained[env->idle]); 6806 return p; 6807 } 6808 return NULL; 6809 } 6810 6811 static const unsigned int sched_nr_migrate_break = 32; 6812 6813 /* 6814 * detach_tasks() -- tries to detach up to imbalance weighted load from 6815 * busiest_rq, as part of a balancing operation within domain "sd". 6816 * 6817 * Returns number of detached tasks if successful and 0 otherwise. 6818 */ 6819 static int detach_tasks(struct lb_env *env) 6820 { 6821 struct list_head *tasks = &env->src_rq->cfs_tasks; 6822 struct task_struct *p; 6823 unsigned long load; 6824 int detached = 0; 6825 6826 lockdep_assert_held(&env->src_rq->lock); 6827 6828 if (env->imbalance <= 0) 6829 return 0; 6830 6831 while (!list_empty(tasks)) { 6832 /* 6833 * We don't want to steal all, otherwise we may be treated likewise, 6834 * which could at worst lead to a livelock crash. 6835 */ 6836 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 6837 break; 6838 6839 p = list_first_entry(tasks, struct task_struct, se.group_node); 6840 6841 env->loop++; 6842 /* We've more or less seen every task there is, call it quits */ 6843 if (env->loop > env->loop_max) 6844 break; 6845 6846 /* take a breather every nr_migrate tasks */ 6847 if (env->loop > env->loop_break) { 6848 env->loop_break += sched_nr_migrate_break; 6849 env->flags |= LBF_NEED_BREAK; 6850 break; 6851 } 6852 6853 if (!can_migrate_task(p, env)) 6854 goto next; 6855 6856 load = task_h_load(p); 6857 6858 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) 6859 goto next; 6860 6861 if ((load / 2) > env->imbalance) 6862 goto next; 6863 6864 detach_task(p, env); 6865 list_add(&p->se.group_node, &env->tasks); 6866 6867 detached++; 6868 env->imbalance -= load; 6869 6870 #ifdef CONFIG_PREEMPT 6871 /* 6872 * NEWIDLE balancing is a source of latency, so preemptible 6873 * kernels will stop after the first task is detached to minimize 6874 * the critical section. 6875 */ 6876 if (env->idle == CPU_NEWLY_IDLE) 6877 break; 6878 #endif 6879 6880 /* 6881 * We only want to steal up to the prescribed amount of 6882 * weighted load. 6883 */ 6884 if (env->imbalance <= 0) 6885 break; 6886 6887 continue; 6888 next: 6889 list_move_tail(&p->se.group_node, tasks); 6890 } 6891 6892 /* 6893 * Right now, this is one of only two places we collect this stat 6894 * so we can safely collect detach_one_task() stats here rather 6895 * than inside detach_one_task(). 6896 */ 6897 schedstat_add(env->sd->lb_gained[env->idle], detached); 6898 6899 return detached; 6900 } 6901 6902 /* 6903 * attach_task() -- attach the task detached by detach_task() to its new rq. 6904 */ 6905 static void attach_task(struct rq *rq, struct task_struct *p) 6906 { 6907 lockdep_assert_held(&rq->lock); 6908 6909 BUG_ON(task_rq(p) != rq); 6910 activate_task(rq, p, ENQUEUE_NOCLOCK); 6911 p->on_rq = TASK_ON_RQ_QUEUED; 6912 check_preempt_curr(rq, p, 0); 6913 } 6914 6915 /* 6916 * attach_one_task() -- attaches the task returned from detach_one_task() to 6917 * its new rq. 6918 */ 6919 static void attach_one_task(struct rq *rq, struct task_struct *p) 6920 { 6921 struct rq_flags rf; 6922 6923 rq_lock(rq, &rf); 6924 update_rq_clock(rq); 6925 attach_task(rq, p); 6926 rq_unlock(rq, &rf); 6927 } 6928 6929 /* 6930 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 6931 * new rq. 6932 */ 6933 static void attach_tasks(struct lb_env *env) 6934 { 6935 struct list_head *tasks = &env->tasks; 6936 struct task_struct *p; 6937 struct rq_flags rf; 6938 6939 rq_lock(env->dst_rq, &rf); 6940 update_rq_clock(env->dst_rq); 6941 6942 while (!list_empty(tasks)) { 6943 p = list_first_entry(tasks, struct task_struct, se.group_node); 6944 list_del_init(&p->se.group_node); 6945 6946 attach_task(env->dst_rq, p); 6947 } 6948 6949 rq_unlock(env->dst_rq, &rf); 6950 } 6951 6952 #ifdef CONFIG_FAIR_GROUP_SCHED 6953 6954 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 6955 { 6956 if (cfs_rq->load.weight) 6957 return false; 6958 6959 if (cfs_rq->avg.load_sum) 6960 return false; 6961 6962 if (cfs_rq->avg.util_sum) 6963 return false; 6964 6965 if (cfs_rq->runnable_load_sum) 6966 return false; 6967 6968 return true; 6969 } 6970 6971 static void update_blocked_averages(int cpu) 6972 { 6973 struct rq *rq = cpu_rq(cpu); 6974 struct cfs_rq *cfs_rq, *pos; 6975 struct rq_flags rf; 6976 6977 rq_lock_irqsave(rq, &rf); 6978 update_rq_clock(rq); 6979 6980 /* 6981 * Iterates the task_group tree in a bottom up fashion, see 6982 * list_add_leaf_cfs_rq() for details. 6983 */ 6984 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { 6985 struct sched_entity *se; 6986 6987 /* throttled entities do not contribute to load */ 6988 if (throttled_hierarchy(cfs_rq)) 6989 continue; 6990 6991 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) 6992 update_tg_load_avg(cfs_rq, 0); 6993 6994 /* Propagate pending load changes to the parent, if any: */ 6995 se = cfs_rq->tg->se[cpu]; 6996 if (se && !skip_blocked_update(se)) 6997 update_load_avg(se, 0); 6998 6999 /* 7000 * There can be a lot of idle CPU cgroups. Don't let fully 7001 * decayed cfs_rqs linger on the list. 7002 */ 7003 if (cfs_rq_is_decayed(cfs_rq)) 7004 list_del_leaf_cfs_rq(cfs_rq); 7005 } 7006 rq_unlock_irqrestore(rq, &rf); 7007 } 7008 7009 /* 7010 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 7011 * This needs to be done in a top-down fashion because the load of a child 7012 * group is a fraction of its parents load. 7013 */ 7014 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 7015 { 7016 struct rq *rq = rq_of(cfs_rq); 7017 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 7018 unsigned long now = jiffies; 7019 unsigned long load; 7020 7021 if (cfs_rq->last_h_load_update == now) 7022 return; 7023 7024 cfs_rq->h_load_next = NULL; 7025 for_each_sched_entity(se) { 7026 cfs_rq = cfs_rq_of(se); 7027 cfs_rq->h_load_next = se; 7028 if (cfs_rq->last_h_load_update == now) 7029 break; 7030 } 7031 7032 if (!se) { 7033 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 7034 cfs_rq->last_h_load_update = now; 7035 } 7036 7037 while ((se = cfs_rq->h_load_next) != NULL) { 7038 load = cfs_rq->h_load; 7039 load = div64_ul(load * se->avg.load_avg, 7040 cfs_rq_load_avg(cfs_rq) + 1); 7041 cfs_rq = group_cfs_rq(se); 7042 cfs_rq->h_load = load; 7043 cfs_rq->last_h_load_update = now; 7044 } 7045 } 7046 7047 static unsigned long task_h_load(struct task_struct *p) 7048 { 7049 struct cfs_rq *cfs_rq = task_cfs_rq(p); 7050 7051 update_cfs_rq_h_load(cfs_rq); 7052 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 7053 cfs_rq_load_avg(cfs_rq) + 1); 7054 } 7055 #else 7056 static inline void update_blocked_averages(int cpu) 7057 { 7058 struct rq *rq = cpu_rq(cpu); 7059 struct cfs_rq *cfs_rq = &rq->cfs; 7060 struct rq_flags rf; 7061 7062 rq_lock_irqsave(rq, &rf); 7063 update_rq_clock(rq); 7064 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); 7065 rq_unlock_irqrestore(rq, &rf); 7066 } 7067 7068 static unsigned long task_h_load(struct task_struct *p) 7069 { 7070 return p->se.avg.load_avg; 7071 } 7072 #endif 7073 7074 /********** Helpers for find_busiest_group ************************/ 7075 7076 enum group_type { 7077 group_other = 0, 7078 group_imbalanced, 7079 group_overloaded, 7080 }; 7081 7082 /* 7083 * sg_lb_stats - stats of a sched_group required for load_balancing 7084 */ 7085 struct sg_lb_stats { 7086 unsigned long avg_load; /*Avg load across the CPUs of the group */ 7087 unsigned long group_load; /* Total load over the CPUs of the group */ 7088 unsigned long sum_weighted_load; /* Weighted load of group's tasks */ 7089 unsigned long load_per_task; 7090 unsigned long group_capacity; 7091 unsigned long group_util; /* Total utilization of the group */ 7092 unsigned int sum_nr_running; /* Nr tasks running in the group */ 7093 unsigned int idle_cpus; 7094 unsigned int group_weight; 7095 enum group_type group_type; 7096 int group_no_capacity; 7097 #ifdef CONFIG_NUMA_BALANCING 7098 unsigned int nr_numa_running; 7099 unsigned int nr_preferred_running; 7100 #endif 7101 }; 7102 7103 /* 7104 * sd_lb_stats - Structure to store the statistics of a sched_domain 7105 * during load balancing. 7106 */ 7107 struct sd_lb_stats { 7108 struct sched_group *busiest; /* Busiest group in this sd */ 7109 struct sched_group *local; /* Local group in this sd */ 7110 unsigned long total_running; 7111 unsigned long total_load; /* Total load of all groups in sd */ 7112 unsigned long total_capacity; /* Total capacity of all groups in sd */ 7113 unsigned long avg_load; /* Average load across all groups in sd */ 7114 7115 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 7116 struct sg_lb_stats local_stat; /* Statistics of the local group */ 7117 }; 7118 7119 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 7120 { 7121 /* 7122 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 7123 * local_stat because update_sg_lb_stats() does a full clear/assignment. 7124 * We must however clear busiest_stat::avg_load because 7125 * update_sd_pick_busiest() reads this before assignment. 7126 */ 7127 *sds = (struct sd_lb_stats){ 7128 .busiest = NULL, 7129 .local = NULL, 7130 .total_running = 0UL, 7131 .total_load = 0UL, 7132 .total_capacity = 0UL, 7133 .busiest_stat = { 7134 .avg_load = 0UL, 7135 .sum_nr_running = 0, 7136 .group_type = group_other, 7137 }, 7138 }; 7139 } 7140 7141 /** 7142 * get_sd_load_idx - Obtain the load index for a given sched domain. 7143 * @sd: The sched_domain whose load_idx is to be obtained. 7144 * @idle: The idle status of the CPU for whose sd load_idx is obtained. 7145 * 7146 * Return: The load index. 7147 */ 7148 static inline int get_sd_load_idx(struct sched_domain *sd, 7149 enum cpu_idle_type idle) 7150 { 7151 int load_idx; 7152 7153 switch (idle) { 7154 case CPU_NOT_IDLE: 7155 load_idx = sd->busy_idx; 7156 break; 7157 7158 case CPU_NEWLY_IDLE: 7159 load_idx = sd->newidle_idx; 7160 break; 7161 default: 7162 load_idx = sd->idle_idx; 7163 break; 7164 } 7165 7166 return load_idx; 7167 } 7168 7169 static unsigned long scale_rt_capacity(int cpu) 7170 { 7171 struct rq *rq = cpu_rq(cpu); 7172 u64 total, used, age_stamp, avg; 7173 s64 delta; 7174 7175 /* 7176 * Since we're reading these variables without serialization make sure 7177 * we read them once before doing sanity checks on them. 7178 */ 7179 age_stamp = READ_ONCE(rq->age_stamp); 7180 avg = READ_ONCE(rq->rt_avg); 7181 delta = __rq_clock_broken(rq) - age_stamp; 7182 7183 if (unlikely(delta < 0)) 7184 delta = 0; 7185 7186 total = sched_avg_period() + delta; 7187 7188 used = div_u64(avg, total); 7189 7190 if (likely(used < SCHED_CAPACITY_SCALE)) 7191 return SCHED_CAPACITY_SCALE - used; 7192 7193 return 1; 7194 } 7195 7196 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 7197 { 7198 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu); 7199 struct sched_group *sdg = sd->groups; 7200 7201 cpu_rq(cpu)->cpu_capacity_orig = capacity; 7202 7203 capacity *= scale_rt_capacity(cpu); 7204 capacity >>= SCHED_CAPACITY_SHIFT; 7205 7206 if (!capacity) 7207 capacity = 1; 7208 7209 cpu_rq(cpu)->cpu_capacity = capacity; 7210 sdg->sgc->capacity = capacity; 7211 sdg->sgc->min_capacity = capacity; 7212 } 7213 7214 void update_group_capacity(struct sched_domain *sd, int cpu) 7215 { 7216 struct sched_domain *child = sd->child; 7217 struct sched_group *group, *sdg = sd->groups; 7218 unsigned long capacity, min_capacity; 7219 unsigned long interval; 7220 7221 interval = msecs_to_jiffies(sd->balance_interval); 7222 interval = clamp(interval, 1UL, max_load_balance_interval); 7223 sdg->sgc->next_update = jiffies + interval; 7224 7225 if (!child) { 7226 update_cpu_capacity(sd, cpu); 7227 return; 7228 } 7229 7230 capacity = 0; 7231 min_capacity = ULONG_MAX; 7232 7233 if (child->flags & SD_OVERLAP) { 7234 /* 7235 * SD_OVERLAP domains cannot assume that child groups 7236 * span the current group. 7237 */ 7238 7239 for_each_cpu(cpu, sched_group_span(sdg)) { 7240 struct sched_group_capacity *sgc; 7241 struct rq *rq = cpu_rq(cpu); 7242 7243 /* 7244 * build_sched_domains() -> init_sched_groups_capacity() 7245 * gets here before we've attached the domains to the 7246 * runqueues. 7247 * 7248 * Use capacity_of(), which is set irrespective of domains 7249 * in update_cpu_capacity(). 7250 * 7251 * This avoids capacity from being 0 and 7252 * causing divide-by-zero issues on boot. 7253 */ 7254 if (unlikely(!rq->sd)) { 7255 capacity += capacity_of(cpu); 7256 } else { 7257 sgc = rq->sd->groups->sgc; 7258 capacity += sgc->capacity; 7259 } 7260 7261 min_capacity = min(capacity, min_capacity); 7262 } 7263 } else { 7264 /* 7265 * !SD_OVERLAP domains can assume that child groups 7266 * span the current group. 7267 */ 7268 7269 group = child->groups; 7270 do { 7271 struct sched_group_capacity *sgc = group->sgc; 7272 7273 capacity += sgc->capacity; 7274 min_capacity = min(sgc->min_capacity, min_capacity); 7275 group = group->next; 7276 } while (group != child->groups); 7277 } 7278 7279 sdg->sgc->capacity = capacity; 7280 sdg->sgc->min_capacity = min_capacity; 7281 } 7282 7283 /* 7284 * Check whether the capacity of the rq has been noticeably reduced by side 7285 * activity. The imbalance_pct is used for the threshold. 7286 * Return true is the capacity is reduced 7287 */ 7288 static inline int 7289 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 7290 { 7291 return ((rq->cpu_capacity * sd->imbalance_pct) < 7292 (rq->cpu_capacity_orig * 100)); 7293 } 7294 7295 /* 7296 * Group imbalance indicates (and tries to solve) the problem where balancing 7297 * groups is inadequate due to ->cpus_allowed constraints. 7298 * 7299 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a 7300 * cpumask covering 1 cpu of the first group and 3 cpus of the second group. 7301 * Something like: 7302 * 7303 * { 0 1 2 3 } { 4 5 6 7 } 7304 * * * * * 7305 * 7306 * If we were to balance group-wise we'd place two tasks in the first group and 7307 * two tasks in the second group. Clearly this is undesired as it will overload 7308 * cpu 3 and leave one of the cpus in the second group unused. 7309 * 7310 * The current solution to this issue is detecting the skew in the first group 7311 * by noticing the lower domain failed to reach balance and had difficulty 7312 * moving tasks due to affinity constraints. 7313 * 7314 * When this is so detected; this group becomes a candidate for busiest; see 7315 * update_sd_pick_busiest(). And calculate_imbalance() and 7316 * find_busiest_group() avoid some of the usual balance conditions to allow it 7317 * to create an effective group imbalance. 7318 * 7319 * This is a somewhat tricky proposition since the next run might not find the 7320 * group imbalance and decide the groups need to be balanced again. A most 7321 * subtle and fragile situation. 7322 */ 7323 7324 static inline int sg_imbalanced(struct sched_group *group) 7325 { 7326 return group->sgc->imbalance; 7327 } 7328 7329 /* 7330 * group_has_capacity returns true if the group has spare capacity that could 7331 * be used by some tasks. 7332 * We consider that a group has spare capacity if the * number of task is 7333 * smaller than the number of CPUs or if the utilization is lower than the 7334 * available capacity for CFS tasks. 7335 * For the latter, we use a threshold to stabilize the state, to take into 7336 * account the variance of the tasks' load and to return true if the available 7337 * capacity in meaningful for the load balancer. 7338 * As an example, an available capacity of 1% can appear but it doesn't make 7339 * any benefit for the load balance. 7340 */ 7341 static inline bool 7342 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) 7343 { 7344 if (sgs->sum_nr_running < sgs->group_weight) 7345 return true; 7346 7347 if ((sgs->group_capacity * 100) > 7348 (sgs->group_util * env->sd->imbalance_pct)) 7349 return true; 7350 7351 return false; 7352 } 7353 7354 /* 7355 * group_is_overloaded returns true if the group has more tasks than it can 7356 * handle. 7357 * group_is_overloaded is not equals to !group_has_capacity because a group 7358 * with the exact right number of tasks, has no more spare capacity but is not 7359 * overloaded so both group_has_capacity and group_is_overloaded return 7360 * false. 7361 */ 7362 static inline bool 7363 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) 7364 { 7365 if (sgs->sum_nr_running <= sgs->group_weight) 7366 return false; 7367 7368 if ((sgs->group_capacity * 100) < 7369 (sgs->group_util * env->sd->imbalance_pct)) 7370 return true; 7371 7372 return false; 7373 } 7374 7375 /* 7376 * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller 7377 * per-CPU capacity than sched_group ref. 7378 */ 7379 static inline bool 7380 group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref) 7381 { 7382 return sg->sgc->min_capacity * capacity_margin < 7383 ref->sgc->min_capacity * 1024; 7384 } 7385 7386 static inline enum 7387 group_type group_classify(struct sched_group *group, 7388 struct sg_lb_stats *sgs) 7389 { 7390 if (sgs->group_no_capacity) 7391 return group_overloaded; 7392 7393 if (sg_imbalanced(group)) 7394 return group_imbalanced; 7395 7396 return group_other; 7397 } 7398 7399 /** 7400 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 7401 * @env: The load balancing environment. 7402 * @group: sched_group whose statistics are to be updated. 7403 * @load_idx: Load index of sched_domain of this_cpu for load calc. 7404 * @local_group: Does group contain this_cpu. 7405 * @sgs: variable to hold the statistics for this group. 7406 * @overload: Indicate more than one runnable task for any CPU. 7407 */ 7408 static inline void update_sg_lb_stats(struct lb_env *env, 7409 struct sched_group *group, int load_idx, 7410 int local_group, struct sg_lb_stats *sgs, 7411 bool *overload) 7412 { 7413 unsigned long load; 7414 int i, nr_running; 7415 7416 memset(sgs, 0, sizeof(*sgs)); 7417 7418 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 7419 struct rq *rq = cpu_rq(i); 7420 7421 /* Bias balancing toward cpus of our domain */ 7422 if (local_group) 7423 load = target_load(i, load_idx); 7424 else 7425 load = source_load(i, load_idx); 7426 7427 sgs->group_load += load; 7428 sgs->group_util += cpu_util(i); 7429 sgs->sum_nr_running += rq->cfs.h_nr_running; 7430 7431 nr_running = rq->nr_running; 7432 if (nr_running > 1) 7433 *overload = true; 7434 7435 #ifdef CONFIG_NUMA_BALANCING 7436 sgs->nr_numa_running += rq->nr_numa_running; 7437 sgs->nr_preferred_running += rq->nr_preferred_running; 7438 #endif 7439 sgs->sum_weighted_load += weighted_cpuload(rq); 7440 /* 7441 * No need to call idle_cpu() if nr_running is not 0 7442 */ 7443 if (!nr_running && idle_cpu(i)) 7444 sgs->idle_cpus++; 7445 } 7446 7447 /* Adjust by relative CPU capacity of the group */ 7448 sgs->group_capacity = group->sgc->capacity; 7449 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; 7450 7451 if (sgs->sum_nr_running) 7452 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; 7453 7454 sgs->group_weight = group->group_weight; 7455 7456 sgs->group_no_capacity = group_is_overloaded(env, sgs); 7457 sgs->group_type = group_classify(group, sgs); 7458 } 7459 7460 /** 7461 * update_sd_pick_busiest - return 1 on busiest group 7462 * @env: The load balancing environment. 7463 * @sds: sched_domain statistics 7464 * @sg: sched_group candidate to be checked for being the busiest 7465 * @sgs: sched_group statistics 7466 * 7467 * Determine if @sg is a busier group than the previously selected 7468 * busiest group. 7469 * 7470 * Return: %true if @sg is a busier group than the previously selected 7471 * busiest group. %false otherwise. 7472 */ 7473 static bool update_sd_pick_busiest(struct lb_env *env, 7474 struct sd_lb_stats *sds, 7475 struct sched_group *sg, 7476 struct sg_lb_stats *sgs) 7477 { 7478 struct sg_lb_stats *busiest = &sds->busiest_stat; 7479 7480 if (sgs->group_type > busiest->group_type) 7481 return true; 7482 7483 if (sgs->group_type < busiest->group_type) 7484 return false; 7485 7486 if (sgs->avg_load <= busiest->avg_load) 7487 return false; 7488 7489 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) 7490 goto asym_packing; 7491 7492 /* 7493 * Candidate sg has no more than one task per CPU and 7494 * has higher per-CPU capacity. Migrating tasks to less 7495 * capable CPUs may harm throughput. Maximize throughput, 7496 * power/energy consequences are not considered. 7497 */ 7498 if (sgs->sum_nr_running <= sgs->group_weight && 7499 group_smaller_cpu_capacity(sds->local, sg)) 7500 return false; 7501 7502 asym_packing: 7503 /* This is the busiest node in its class. */ 7504 if (!(env->sd->flags & SD_ASYM_PACKING)) 7505 return true; 7506 7507 /* No ASYM_PACKING if target cpu is already busy */ 7508 if (env->idle == CPU_NOT_IDLE) 7509 return true; 7510 /* 7511 * ASYM_PACKING needs to move all the work to the highest 7512 * prority CPUs in the group, therefore mark all groups 7513 * of lower priority than ourself as busy. 7514 */ 7515 if (sgs->sum_nr_running && 7516 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { 7517 if (!sds->busiest) 7518 return true; 7519 7520 /* Prefer to move from lowest priority cpu's work */ 7521 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, 7522 sg->asym_prefer_cpu)) 7523 return true; 7524 } 7525 7526 return false; 7527 } 7528 7529 #ifdef CONFIG_NUMA_BALANCING 7530 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 7531 { 7532 if (sgs->sum_nr_running > sgs->nr_numa_running) 7533 return regular; 7534 if (sgs->sum_nr_running > sgs->nr_preferred_running) 7535 return remote; 7536 return all; 7537 } 7538 7539 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 7540 { 7541 if (rq->nr_running > rq->nr_numa_running) 7542 return regular; 7543 if (rq->nr_running > rq->nr_preferred_running) 7544 return remote; 7545 return all; 7546 } 7547 #else 7548 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 7549 { 7550 return all; 7551 } 7552 7553 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 7554 { 7555 return regular; 7556 } 7557 #endif /* CONFIG_NUMA_BALANCING */ 7558 7559 /** 7560 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 7561 * @env: The load balancing environment. 7562 * @sds: variable to hold the statistics for this sched_domain. 7563 */ 7564 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 7565 { 7566 struct sched_domain *child = env->sd->child; 7567 struct sched_group *sg = env->sd->groups; 7568 struct sg_lb_stats *local = &sds->local_stat; 7569 struct sg_lb_stats tmp_sgs; 7570 int load_idx, prefer_sibling = 0; 7571 bool overload = false; 7572 7573 if (child && child->flags & SD_PREFER_SIBLING) 7574 prefer_sibling = 1; 7575 7576 load_idx = get_sd_load_idx(env->sd, env->idle); 7577 7578 do { 7579 struct sg_lb_stats *sgs = &tmp_sgs; 7580 int local_group; 7581 7582 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 7583 if (local_group) { 7584 sds->local = sg; 7585 sgs = local; 7586 7587 if (env->idle != CPU_NEWLY_IDLE || 7588 time_after_eq(jiffies, sg->sgc->next_update)) 7589 update_group_capacity(env->sd, env->dst_cpu); 7590 } 7591 7592 update_sg_lb_stats(env, sg, load_idx, local_group, sgs, 7593 &overload); 7594 7595 if (local_group) 7596 goto next_group; 7597 7598 /* 7599 * In case the child domain prefers tasks go to siblings 7600 * first, lower the sg capacity so that we'll try 7601 * and move all the excess tasks away. We lower the capacity 7602 * of a group only if the local group has the capacity to fit 7603 * these excess tasks. The extra check prevents the case where 7604 * you always pull from the heaviest group when it is already 7605 * under-utilized (possible with a large weight task outweighs 7606 * the tasks on the system). 7607 */ 7608 if (prefer_sibling && sds->local && 7609 group_has_capacity(env, local) && 7610 (sgs->sum_nr_running > local->sum_nr_running + 1)) { 7611 sgs->group_no_capacity = 1; 7612 sgs->group_type = group_classify(sg, sgs); 7613 } 7614 7615 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 7616 sds->busiest = sg; 7617 sds->busiest_stat = *sgs; 7618 } 7619 7620 next_group: 7621 /* Now, start updating sd_lb_stats */ 7622 sds->total_running += sgs->sum_nr_running; 7623 sds->total_load += sgs->group_load; 7624 sds->total_capacity += sgs->group_capacity; 7625 7626 sg = sg->next; 7627 } while (sg != env->sd->groups); 7628 7629 if (env->sd->flags & SD_NUMA) 7630 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 7631 7632 if (!env->sd->parent) { 7633 /* update overload indicator if we are at root domain */ 7634 if (env->dst_rq->rd->overload != overload) 7635 env->dst_rq->rd->overload = overload; 7636 } 7637 } 7638 7639 /** 7640 * check_asym_packing - Check to see if the group is packed into the 7641 * sched domain. 7642 * 7643 * This is primarily intended to used at the sibling level. Some 7644 * cores like POWER7 prefer to use lower numbered SMT threads. In the 7645 * case of POWER7, it can move to lower SMT modes only when higher 7646 * threads are idle. When in lower SMT modes, the threads will 7647 * perform better since they share less core resources. Hence when we 7648 * have idle threads, we want them to be the higher ones. 7649 * 7650 * This packing function is run on idle threads. It checks to see if 7651 * the busiest CPU in this domain (core in the P7 case) has a higher 7652 * CPU number than the packing function is being run on. Here we are 7653 * assuming lower CPU number will be equivalent to lower a SMT thread 7654 * number. 7655 * 7656 * Return: 1 when packing is required and a task should be moved to 7657 * this CPU. The amount of the imbalance is returned in env->imbalance. 7658 * 7659 * @env: The load balancing environment. 7660 * @sds: Statistics of the sched_domain which is to be packed 7661 */ 7662 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) 7663 { 7664 int busiest_cpu; 7665 7666 if (!(env->sd->flags & SD_ASYM_PACKING)) 7667 return 0; 7668 7669 if (env->idle == CPU_NOT_IDLE) 7670 return 0; 7671 7672 if (!sds->busiest) 7673 return 0; 7674 7675 busiest_cpu = sds->busiest->asym_prefer_cpu; 7676 if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) 7677 return 0; 7678 7679 env->imbalance = DIV_ROUND_CLOSEST( 7680 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, 7681 SCHED_CAPACITY_SCALE); 7682 7683 return 1; 7684 } 7685 7686 /** 7687 * fix_small_imbalance - Calculate the minor imbalance that exists 7688 * amongst the groups of a sched_domain, during 7689 * load balancing. 7690 * @env: The load balancing environment. 7691 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 7692 */ 7693 static inline 7694 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 7695 { 7696 unsigned long tmp, capa_now = 0, capa_move = 0; 7697 unsigned int imbn = 2; 7698 unsigned long scaled_busy_load_per_task; 7699 struct sg_lb_stats *local, *busiest; 7700 7701 local = &sds->local_stat; 7702 busiest = &sds->busiest_stat; 7703 7704 if (!local->sum_nr_running) 7705 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); 7706 else if (busiest->load_per_task > local->load_per_task) 7707 imbn = 1; 7708 7709 scaled_busy_load_per_task = 7710 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / 7711 busiest->group_capacity; 7712 7713 if (busiest->avg_load + scaled_busy_load_per_task >= 7714 local->avg_load + (scaled_busy_load_per_task * imbn)) { 7715 env->imbalance = busiest->load_per_task; 7716 return; 7717 } 7718 7719 /* 7720 * OK, we don't have enough imbalance to justify moving tasks, 7721 * however we may be able to increase total CPU capacity used by 7722 * moving them. 7723 */ 7724 7725 capa_now += busiest->group_capacity * 7726 min(busiest->load_per_task, busiest->avg_load); 7727 capa_now += local->group_capacity * 7728 min(local->load_per_task, local->avg_load); 7729 capa_now /= SCHED_CAPACITY_SCALE; 7730 7731 /* Amount of load we'd subtract */ 7732 if (busiest->avg_load > scaled_busy_load_per_task) { 7733 capa_move += busiest->group_capacity * 7734 min(busiest->load_per_task, 7735 busiest->avg_load - scaled_busy_load_per_task); 7736 } 7737 7738 /* Amount of load we'd add */ 7739 if (busiest->avg_load * busiest->group_capacity < 7740 busiest->load_per_task * SCHED_CAPACITY_SCALE) { 7741 tmp = (busiest->avg_load * busiest->group_capacity) / 7742 local->group_capacity; 7743 } else { 7744 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / 7745 local->group_capacity; 7746 } 7747 capa_move += local->group_capacity * 7748 min(local->load_per_task, local->avg_load + tmp); 7749 capa_move /= SCHED_CAPACITY_SCALE; 7750 7751 /* Move if we gain throughput */ 7752 if (capa_move > capa_now) 7753 env->imbalance = busiest->load_per_task; 7754 } 7755 7756 /** 7757 * calculate_imbalance - Calculate the amount of imbalance present within the 7758 * groups of a given sched_domain during load balance. 7759 * @env: load balance environment 7760 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 7761 */ 7762 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 7763 { 7764 unsigned long max_pull, load_above_capacity = ~0UL; 7765 struct sg_lb_stats *local, *busiest; 7766 7767 local = &sds->local_stat; 7768 busiest = &sds->busiest_stat; 7769 7770 if (busiest->group_type == group_imbalanced) { 7771 /* 7772 * In the group_imb case we cannot rely on group-wide averages 7773 * to ensure cpu-load equilibrium, look at wider averages. XXX 7774 */ 7775 busiest->load_per_task = 7776 min(busiest->load_per_task, sds->avg_load); 7777 } 7778 7779 /* 7780 * Avg load of busiest sg can be less and avg load of local sg can 7781 * be greater than avg load across all sgs of sd because avg load 7782 * factors in sg capacity and sgs with smaller group_type are 7783 * skipped when updating the busiest sg: 7784 */ 7785 if (busiest->avg_load <= sds->avg_load || 7786 local->avg_load >= sds->avg_load) { 7787 env->imbalance = 0; 7788 return fix_small_imbalance(env, sds); 7789 } 7790 7791 /* 7792 * If there aren't any idle cpus, avoid creating some. 7793 */ 7794 if (busiest->group_type == group_overloaded && 7795 local->group_type == group_overloaded) { 7796 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; 7797 if (load_above_capacity > busiest->group_capacity) { 7798 load_above_capacity -= busiest->group_capacity; 7799 load_above_capacity *= scale_load_down(NICE_0_LOAD); 7800 load_above_capacity /= busiest->group_capacity; 7801 } else 7802 load_above_capacity = ~0UL; 7803 } 7804 7805 /* 7806 * We're trying to get all the cpus to the average_load, so we don't 7807 * want to push ourselves above the average load, nor do we wish to 7808 * reduce the max loaded cpu below the average load. At the same time, 7809 * we also don't want to reduce the group load below the group 7810 * capacity. Thus we look for the minimum possible imbalance. 7811 */ 7812 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); 7813 7814 /* How much load to actually move to equalise the imbalance */ 7815 env->imbalance = min( 7816 max_pull * busiest->group_capacity, 7817 (sds->avg_load - local->avg_load) * local->group_capacity 7818 ) / SCHED_CAPACITY_SCALE; 7819 7820 /* 7821 * if *imbalance is less than the average load per runnable task 7822 * there is no guarantee that any tasks will be moved so we'll have 7823 * a think about bumping its value to force at least one task to be 7824 * moved 7825 */ 7826 if (env->imbalance < busiest->load_per_task) 7827 return fix_small_imbalance(env, sds); 7828 } 7829 7830 /******* find_busiest_group() helpers end here *********************/ 7831 7832 /** 7833 * find_busiest_group - Returns the busiest group within the sched_domain 7834 * if there is an imbalance. 7835 * 7836 * Also calculates the amount of weighted load which should be moved 7837 * to restore balance. 7838 * 7839 * @env: The load balancing environment. 7840 * 7841 * Return: - The busiest group if imbalance exists. 7842 */ 7843 static struct sched_group *find_busiest_group(struct lb_env *env) 7844 { 7845 struct sg_lb_stats *local, *busiest; 7846 struct sd_lb_stats sds; 7847 7848 init_sd_lb_stats(&sds); 7849 7850 /* 7851 * Compute the various statistics relavent for load balancing at 7852 * this level. 7853 */ 7854 update_sd_lb_stats(env, &sds); 7855 local = &sds.local_stat; 7856 busiest = &sds.busiest_stat; 7857 7858 /* ASYM feature bypasses nice load balance check */ 7859 if (check_asym_packing(env, &sds)) 7860 return sds.busiest; 7861 7862 /* There is no busy sibling group to pull tasks from */ 7863 if (!sds.busiest || busiest->sum_nr_running == 0) 7864 goto out_balanced; 7865 7866 /* XXX broken for overlapping NUMA groups */ 7867 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) 7868 / sds.total_capacity; 7869 7870 /* 7871 * If the busiest group is imbalanced the below checks don't 7872 * work because they assume all things are equal, which typically 7873 * isn't true due to cpus_allowed constraints and the like. 7874 */ 7875 if (busiest->group_type == group_imbalanced) 7876 goto force_balance; 7877 7878 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ 7879 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) && 7880 busiest->group_no_capacity) 7881 goto force_balance; 7882 7883 /* 7884 * If the local group is busier than the selected busiest group 7885 * don't try and pull any tasks. 7886 */ 7887 if (local->avg_load >= busiest->avg_load) 7888 goto out_balanced; 7889 7890 /* 7891 * Don't pull any tasks if this group is already above the domain 7892 * average load. 7893 */ 7894 if (local->avg_load >= sds.avg_load) 7895 goto out_balanced; 7896 7897 if (env->idle == CPU_IDLE) { 7898 /* 7899 * This cpu is idle. If the busiest group is not overloaded 7900 * and there is no imbalance between this and busiest group 7901 * wrt idle cpus, it is balanced. The imbalance becomes 7902 * significant if the diff is greater than 1 otherwise we 7903 * might end up to just move the imbalance on another group 7904 */ 7905 if ((busiest->group_type != group_overloaded) && 7906 (local->idle_cpus <= (busiest->idle_cpus + 1))) 7907 goto out_balanced; 7908 } else { 7909 /* 7910 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use 7911 * imbalance_pct to be conservative. 7912 */ 7913 if (100 * busiest->avg_load <= 7914 env->sd->imbalance_pct * local->avg_load) 7915 goto out_balanced; 7916 } 7917 7918 force_balance: 7919 /* Looks like there is an imbalance. Compute it */ 7920 calculate_imbalance(env, &sds); 7921 return sds.busiest; 7922 7923 out_balanced: 7924 env->imbalance = 0; 7925 return NULL; 7926 } 7927 7928 /* 7929 * find_busiest_queue - find the busiest runqueue among the cpus in group. 7930 */ 7931 static struct rq *find_busiest_queue(struct lb_env *env, 7932 struct sched_group *group) 7933 { 7934 struct rq *busiest = NULL, *rq; 7935 unsigned long busiest_load = 0, busiest_capacity = 1; 7936 int i; 7937 7938 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 7939 unsigned long capacity, wl; 7940 enum fbq_type rt; 7941 7942 rq = cpu_rq(i); 7943 rt = fbq_classify_rq(rq); 7944 7945 /* 7946 * We classify groups/runqueues into three groups: 7947 * - regular: there are !numa tasks 7948 * - remote: there are numa tasks that run on the 'wrong' node 7949 * - all: there is no distinction 7950 * 7951 * In order to avoid migrating ideally placed numa tasks, 7952 * ignore those when there's better options. 7953 * 7954 * If we ignore the actual busiest queue to migrate another 7955 * task, the next balance pass can still reduce the busiest 7956 * queue by moving tasks around inside the node. 7957 * 7958 * If we cannot move enough load due to this classification 7959 * the next pass will adjust the group classification and 7960 * allow migration of more tasks. 7961 * 7962 * Both cases only affect the total convergence complexity. 7963 */ 7964 if (rt > env->fbq_type) 7965 continue; 7966 7967 capacity = capacity_of(i); 7968 7969 wl = weighted_cpuload(rq); 7970 7971 /* 7972 * When comparing with imbalance, use weighted_cpuload() 7973 * which is not scaled with the cpu capacity. 7974 */ 7975 7976 if (rq->nr_running == 1 && wl > env->imbalance && 7977 !check_cpu_capacity(rq, env->sd)) 7978 continue; 7979 7980 /* 7981 * For the load comparisons with the other cpu's, consider 7982 * the weighted_cpuload() scaled with the cpu capacity, so 7983 * that the load can be moved away from the cpu that is 7984 * potentially running at a lower capacity. 7985 * 7986 * Thus we're looking for max(wl_i / capacity_i), crosswise 7987 * multiplication to rid ourselves of the division works out 7988 * to: wl_i * capacity_j > wl_j * capacity_i; where j is 7989 * our previous maximum. 7990 */ 7991 if (wl * busiest_capacity > busiest_load * capacity) { 7992 busiest_load = wl; 7993 busiest_capacity = capacity; 7994 busiest = rq; 7995 } 7996 } 7997 7998 return busiest; 7999 } 8000 8001 /* 8002 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 8003 * so long as it is large enough. 8004 */ 8005 #define MAX_PINNED_INTERVAL 512 8006 8007 static int need_active_balance(struct lb_env *env) 8008 { 8009 struct sched_domain *sd = env->sd; 8010 8011 if (env->idle == CPU_NEWLY_IDLE) { 8012 8013 /* 8014 * ASYM_PACKING needs to force migrate tasks from busy but 8015 * lower priority CPUs in order to pack all tasks in the 8016 * highest priority CPUs. 8017 */ 8018 if ((sd->flags & SD_ASYM_PACKING) && 8019 sched_asym_prefer(env->dst_cpu, env->src_cpu)) 8020 return 1; 8021 } 8022 8023 /* 8024 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 8025 * It's worth migrating the task if the src_cpu's capacity is reduced 8026 * because of other sched_class or IRQs if more capacity stays 8027 * available on dst_cpu. 8028 */ 8029 if ((env->idle != CPU_NOT_IDLE) && 8030 (env->src_rq->cfs.h_nr_running == 1)) { 8031 if ((check_cpu_capacity(env->src_rq, sd)) && 8032 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 8033 return 1; 8034 } 8035 8036 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); 8037 } 8038 8039 static int active_load_balance_cpu_stop(void *data); 8040 8041 static int should_we_balance(struct lb_env *env) 8042 { 8043 struct sched_group *sg = env->sd->groups; 8044 int cpu, balance_cpu = -1; 8045 8046 /* 8047 * Ensure the balancing environment is consistent; can happen 8048 * when the softirq triggers 'during' hotplug. 8049 */ 8050 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) 8051 return 0; 8052 8053 /* 8054 * In the newly idle case, we will allow all the cpu's 8055 * to do the newly idle load balance. 8056 */ 8057 if (env->idle == CPU_NEWLY_IDLE) 8058 return 1; 8059 8060 /* Try to find first idle cpu */ 8061 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 8062 if (!idle_cpu(cpu)) 8063 continue; 8064 8065 balance_cpu = cpu; 8066 break; 8067 } 8068 8069 if (balance_cpu == -1) 8070 balance_cpu = group_balance_cpu(sg); 8071 8072 /* 8073 * First idle cpu or the first cpu(busiest) in this sched group 8074 * is eligible for doing load balancing at this and above domains. 8075 */ 8076 return balance_cpu == env->dst_cpu; 8077 } 8078 8079 /* 8080 * Check this_cpu to ensure it is balanced within domain. Attempt to move 8081 * tasks if there is an imbalance. 8082 */ 8083 static int load_balance(int this_cpu, struct rq *this_rq, 8084 struct sched_domain *sd, enum cpu_idle_type idle, 8085 int *continue_balancing) 8086 { 8087 int ld_moved, cur_ld_moved, active_balance = 0; 8088 struct sched_domain *sd_parent = sd->parent; 8089 struct sched_group *group; 8090 struct rq *busiest; 8091 struct rq_flags rf; 8092 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 8093 8094 struct lb_env env = { 8095 .sd = sd, 8096 .dst_cpu = this_cpu, 8097 .dst_rq = this_rq, 8098 .dst_grpmask = sched_group_span(sd->groups), 8099 .idle = idle, 8100 .loop_break = sched_nr_migrate_break, 8101 .cpus = cpus, 8102 .fbq_type = all, 8103 .tasks = LIST_HEAD_INIT(env.tasks), 8104 }; 8105 8106 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 8107 8108 schedstat_inc(sd->lb_count[idle]); 8109 8110 redo: 8111 if (!should_we_balance(&env)) { 8112 *continue_balancing = 0; 8113 goto out_balanced; 8114 } 8115 8116 group = find_busiest_group(&env); 8117 if (!group) { 8118 schedstat_inc(sd->lb_nobusyg[idle]); 8119 goto out_balanced; 8120 } 8121 8122 busiest = find_busiest_queue(&env, group); 8123 if (!busiest) { 8124 schedstat_inc(sd->lb_nobusyq[idle]); 8125 goto out_balanced; 8126 } 8127 8128 BUG_ON(busiest == env.dst_rq); 8129 8130 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 8131 8132 env.src_cpu = busiest->cpu; 8133 env.src_rq = busiest; 8134 8135 ld_moved = 0; 8136 if (busiest->nr_running > 1) { 8137 /* 8138 * Attempt to move tasks. If find_busiest_group has found 8139 * an imbalance but busiest->nr_running <= 1, the group is 8140 * still unbalanced. ld_moved simply stays zero, so it is 8141 * correctly treated as an imbalance. 8142 */ 8143 env.flags |= LBF_ALL_PINNED; 8144 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 8145 8146 more_balance: 8147 rq_lock_irqsave(busiest, &rf); 8148 update_rq_clock(busiest); 8149 8150 /* 8151 * cur_ld_moved - load moved in current iteration 8152 * ld_moved - cumulative load moved across iterations 8153 */ 8154 cur_ld_moved = detach_tasks(&env); 8155 8156 /* 8157 * We've detached some tasks from busiest_rq. Every 8158 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 8159 * unlock busiest->lock, and we are able to be sure 8160 * that nobody can manipulate the tasks in parallel. 8161 * See task_rq_lock() family for the details. 8162 */ 8163 8164 rq_unlock(busiest, &rf); 8165 8166 if (cur_ld_moved) { 8167 attach_tasks(&env); 8168 ld_moved += cur_ld_moved; 8169 } 8170 8171 local_irq_restore(rf.flags); 8172 8173 if (env.flags & LBF_NEED_BREAK) { 8174 env.flags &= ~LBF_NEED_BREAK; 8175 goto more_balance; 8176 } 8177 8178 /* 8179 * Revisit (affine) tasks on src_cpu that couldn't be moved to 8180 * us and move them to an alternate dst_cpu in our sched_group 8181 * where they can run. The upper limit on how many times we 8182 * iterate on same src_cpu is dependent on number of cpus in our 8183 * sched_group. 8184 * 8185 * This changes load balance semantics a bit on who can move 8186 * load to a given_cpu. In addition to the given_cpu itself 8187 * (or a ilb_cpu acting on its behalf where given_cpu is 8188 * nohz-idle), we now have balance_cpu in a position to move 8189 * load to given_cpu. In rare situations, this may cause 8190 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 8191 * _independently_ and at _same_ time to move some load to 8192 * given_cpu) causing exceess load to be moved to given_cpu. 8193 * This however should not happen so much in practice and 8194 * moreover subsequent load balance cycles should correct the 8195 * excess load moved. 8196 */ 8197 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 8198 8199 /* Prevent to re-select dst_cpu via env's cpus */ 8200 cpumask_clear_cpu(env.dst_cpu, env.cpus); 8201 8202 env.dst_rq = cpu_rq(env.new_dst_cpu); 8203 env.dst_cpu = env.new_dst_cpu; 8204 env.flags &= ~LBF_DST_PINNED; 8205 env.loop = 0; 8206 env.loop_break = sched_nr_migrate_break; 8207 8208 /* 8209 * Go back to "more_balance" rather than "redo" since we 8210 * need to continue with same src_cpu. 8211 */ 8212 goto more_balance; 8213 } 8214 8215 /* 8216 * We failed to reach balance because of affinity. 8217 */ 8218 if (sd_parent) { 8219 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 8220 8221 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 8222 *group_imbalance = 1; 8223 } 8224 8225 /* All tasks on this runqueue were pinned by CPU affinity */ 8226 if (unlikely(env.flags & LBF_ALL_PINNED)) { 8227 cpumask_clear_cpu(cpu_of(busiest), cpus); 8228 /* 8229 * Attempting to continue load balancing at the current 8230 * sched_domain level only makes sense if there are 8231 * active CPUs remaining as possible busiest CPUs to 8232 * pull load from which are not contained within the 8233 * destination group that is receiving any migrated 8234 * load. 8235 */ 8236 if (!cpumask_subset(cpus, env.dst_grpmask)) { 8237 env.loop = 0; 8238 env.loop_break = sched_nr_migrate_break; 8239 goto redo; 8240 } 8241 goto out_all_pinned; 8242 } 8243 } 8244 8245 if (!ld_moved) { 8246 schedstat_inc(sd->lb_failed[idle]); 8247 /* 8248 * Increment the failure counter only on periodic balance. 8249 * We do not want newidle balance, which can be very 8250 * frequent, pollute the failure counter causing 8251 * excessive cache_hot migrations and active balances. 8252 */ 8253 if (idle != CPU_NEWLY_IDLE) 8254 sd->nr_balance_failed++; 8255 8256 if (need_active_balance(&env)) { 8257 unsigned long flags; 8258 8259 raw_spin_lock_irqsave(&busiest->lock, flags); 8260 8261 /* don't kick the active_load_balance_cpu_stop, 8262 * if the curr task on busiest cpu can't be 8263 * moved to this_cpu 8264 */ 8265 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 8266 raw_spin_unlock_irqrestore(&busiest->lock, 8267 flags); 8268 env.flags |= LBF_ALL_PINNED; 8269 goto out_one_pinned; 8270 } 8271 8272 /* 8273 * ->active_balance synchronizes accesses to 8274 * ->active_balance_work. Once set, it's cleared 8275 * only after active load balance is finished. 8276 */ 8277 if (!busiest->active_balance) { 8278 busiest->active_balance = 1; 8279 busiest->push_cpu = this_cpu; 8280 active_balance = 1; 8281 } 8282 raw_spin_unlock_irqrestore(&busiest->lock, flags); 8283 8284 if (active_balance) { 8285 stop_one_cpu_nowait(cpu_of(busiest), 8286 active_load_balance_cpu_stop, busiest, 8287 &busiest->active_balance_work); 8288 } 8289 8290 /* We've kicked active balancing, force task migration. */ 8291 sd->nr_balance_failed = sd->cache_nice_tries+1; 8292 } 8293 } else 8294 sd->nr_balance_failed = 0; 8295 8296 if (likely(!active_balance)) { 8297 /* We were unbalanced, so reset the balancing interval */ 8298 sd->balance_interval = sd->min_interval; 8299 } else { 8300 /* 8301 * If we've begun active balancing, start to back off. This 8302 * case may not be covered by the all_pinned logic if there 8303 * is only 1 task on the busy runqueue (because we don't call 8304 * detach_tasks). 8305 */ 8306 if (sd->balance_interval < sd->max_interval) 8307 sd->balance_interval *= 2; 8308 } 8309 8310 goto out; 8311 8312 out_balanced: 8313 /* 8314 * We reach balance although we may have faced some affinity 8315 * constraints. Clear the imbalance flag if it was set. 8316 */ 8317 if (sd_parent) { 8318 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 8319 8320 if (*group_imbalance) 8321 *group_imbalance = 0; 8322 } 8323 8324 out_all_pinned: 8325 /* 8326 * We reach balance because all tasks are pinned at this level so 8327 * we can't migrate them. Let the imbalance flag set so parent level 8328 * can try to migrate them. 8329 */ 8330 schedstat_inc(sd->lb_balanced[idle]); 8331 8332 sd->nr_balance_failed = 0; 8333 8334 out_one_pinned: 8335 /* tune up the balancing interval */ 8336 if (((env.flags & LBF_ALL_PINNED) && 8337 sd->balance_interval < MAX_PINNED_INTERVAL) || 8338 (sd->balance_interval < sd->max_interval)) 8339 sd->balance_interval *= 2; 8340 8341 ld_moved = 0; 8342 out: 8343 return ld_moved; 8344 } 8345 8346 static inline unsigned long 8347 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 8348 { 8349 unsigned long interval = sd->balance_interval; 8350 8351 if (cpu_busy) 8352 interval *= sd->busy_factor; 8353 8354 /* scale ms to jiffies */ 8355 interval = msecs_to_jiffies(interval); 8356 interval = clamp(interval, 1UL, max_load_balance_interval); 8357 8358 return interval; 8359 } 8360 8361 static inline void 8362 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 8363 { 8364 unsigned long interval, next; 8365 8366 /* used by idle balance, so cpu_busy = 0 */ 8367 interval = get_sd_balance_interval(sd, 0); 8368 next = sd->last_balance + interval; 8369 8370 if (time_after(*next_balance, next)) 8371 *next_balance = next; 8372 } 8373 8374 /* 8375 * idle_balance is called by schedule() if this_cpu is about to become 8376 * idle. Attempts to pull tasks from other CPUs. 8377 */ 8378 static int idle_balance(struct rq *this_rq, struct rq_flags *rf) 8379 { 8380 unsigned long next_balance = jiffies + HZ; 8381 int this_cpu = this_rq->cpu; 8382 struct sched_domain *sd; 8383 int pulled_task = 0; 8384 u64 curr_cost = 0; 8385 8386 /* 8387 * We must set idle_stamp _before_ calling idle_balance(), such that we 8388 * measure the duration of idle_balance() as idle time. 8389 */ 8390 this_rq->idle_stamp = rq_clock(this_rq); 8391 8392 /* 8393 * Do not pull tasks towards !active CPUs... 8394 */ 8395 if (!cpu_active(this_cpu)) 8396 return 0; 8397 8398 /* 8399 * This is OK, because current is on_cpu, which avoids it being picked 8400 * for load-balance and preemption/IRQs are still disabled avoiding 8401 * further scheduler activity on it and we're being very careful to 8402 * re-start the picking loop. 8403 */ 8404 rq_unpin_lock(this_rq, rf); 8405 8406 if (this_rq->avg_idle < sysctl_sched_migration_cost || 8407 !this_rq->rd->overload) { 8408 rcu_read_lock(); 8409 sd = rcu_dereference_check_sched_domain(this_rq->sd); 8410 if (sd) 8411 update_next_balance(sd, &next_balance); 8412 rcu_read_unlock(); 8413 8414 goto out; 8415 } 8416 8417 raw_spin_unlock(&this_rq->lock); 8418 8419 update_blocked_averages(this_cpu); 8420 rcu_read_lock(); 8421 for_each_domain(this_cpu, sd) { 8422 int continue_balancing = 1; 8423 u64 t0, domain_cost; 8424 8425 if (!(sd->flags & SD_LOAD_BALANCE)) 8426 continue; 8427 8428 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { 8429 update_next_balance(sd, &next_balance); 8430 break; 8431 } 8432 8433 if (sd->flags & SD_BALANCE_NEWIDLE) { 8434 t0 = sched_clock_cpu(this_cpu); 8435 8436 pulled_task = load_balance(this_cpu, this_rq, 8437 sd, CPU_NEWLY_IDLE, 8438 &continue_balancing); 8439 8440 domain_cost = sched_clock_cpu(this_cpu) - t0; 8441 if (domain_cost > sd->max_newidle_lb_cost) 8442 sd->max_newidle_lb_cost = domain_cost; 8443 8444 curr_cost += domain_cost; 8445 } 8446 8447 update_next_balance(sd, &next_balance); 8448 8449 /* 8450 * Stop searching for tasks to pull if there are 8451 * now runnable tasks on this rq. 8452 */ 8453 if (pulled_task || this_rq->nr_running > 0) 8454 break; 8455 } 8456 rcu_read_unlock(); 8457 8458 raw_spin_lock(&this_rq->lock); 8459 8460 if (curr_cost > this_rq->max_idle_balance_cost) 8461 this_rq->max_idle_balance_cost = curr_cost; 8462 8463 /* 8464 * While browsing the domains, we released the rq lock, a task could 8465 * have been enqueued in the meantime. Since we're not going idle, 8466 * pretend we pulled a task. 8467 */ 8468 if (this_rq->cfs.h_nr_running && !pulled_task) 8469 pulled_task = 1; 8470 8471 out: 8472 /* Move the next balance forward */ 8473 if (time_after(this_rq->next_balance, next_balance)) 8474 this_rq->next_balance = next_balance; 8475 8476 /* Is there a task of a high priority class? */ 8477 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 8478 pulled_task = -1; 8479 8480 if (pulled_task) 8481 this_rq->idle_stamp = 0; 8482 8483 rq_repin_lock(this_rq, rf); 8484 8485 return pulled_task; 8486 } 8487 8488 /* 8489 * active_load_balance_cpu_stop is run by cpu stopper. It pushes 8490 * running tasks off the busiest CPU onto idle CPUs. It requires at 8491 * least 1 task to be running on each physical CPU where possible, and 8492 * avoids physical / logical imbalances. 8493 */ 8494 static int active_load_balance_cpu_stop(void *data) 8495 { 8496 struct rq *busiest_rq = data; 8497 int busiest_cpu = cpu_of(busiest_rq); 8498 int target_cpu = busiest_rq->push_cpu; 8499 struct rq *target_rq = cpu_rq(target_cpu); 8500 struct sched_domain *sd; 8501 struct task_struct *p = NULL; 8502 struct rq_flags rf; 8503 8504 rq_lock_irq(busiest_rq, &rf); 8505 /* 8506 * Between queueing the stop-work and running it is a hole in which 8507 * CPUs can become inactive. We should not move tasks from or to 8508 * inactive CPUs. 8509 */ 8510 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) 8511 goto out_unlock; 8512 8513 /* make sure the requested cpu hasn't gone down in the meantime */ 8514 if (unlikely(busiest_cpu != smp_processor_id() || 8515 !busiest_rq->active_balance)) 8516 goto out_unlock; 8517 8518 /* Is there any task to move? */ 8519 if (busiest_rq->nr_running <= 1) 8520 goto out_unlock; 8521 8522 /* 8523 * This condition is "impossible", if it occurs 8524 * we need to fix it. Originally reported by 8525 * Bjorn Helgaas on a 128-cpu setup. 8526 */ 8527 BUG_ON(busiest_rq == target_rq); 8528 8529 /* Search for an sd spanning us and the target CPU. */ 8530 rcu_read_lock(); 8531 for_each_domain(target_cpu, sd) { 8532 if ((sd->flags & SD_LOAD_BALANCE) && 8533 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 8534 break; 8535 } 8536 8537 if (likely(sd)) { 8538 struct lb_env env = { 8539 .sd = sd, 8540 .dst_cpu = target_cpu, 8541 .dst_rq = target_rq, 8542 .src_cpu = busiest_rq->cpu, 8543 .src_rq = busiest_rq, 8544 .idle = CPU_IDLE, 8545 /* 8546 * can_migrate_task() doesn't need to compute new_dst_cpu 8547 * for active balancing. Since we have CPU_IDLE, but no 8548 * @dst_grpmask we need to make that test go away with lying 8549 * about DST_PINNED. 8550 */ 8551 .flags = LBF_DST_PINNED, 8552 }; 8553 8554 schedstat_inc(sd->alb_count); 8555 update_rq_clock(busiest_rq); 8556 8557 p = detach_one_task(&env); 8558 if (p) { 8559 schedstat_inc(sd->alb_pushed); 8560 /* Active balancing done, reset the failure counter. */ 8561 sd->nr_balance_failed = 0; 8562 } else { 8563 schedstat_inc(sd->alb_failed); 8564 } 8565 } 8566 rcu_read_unlock(); 8567 out_unlock: 8568 busiest_rq->active_balance = 0; 8569 rq_unlock(busiest_rq, &rf); 8570 8571 if (p) 8572 attach_one_task(target_rq, p); 8573 8574 local_irq_enable(); 8575 8576 return 0; 8577 } 8578 8579 static inline int on_null_domain(struct rq *rq) 8580 { 8581 return unlikely(!rcu_dereference_sched(rq->sd)); 8582 } 8583 8584 #ifdef CONFIG_NO_HZ_COMMON 8585 /* 8586 * idle load balancing details 8587 * - When one of the busy CPUs notice that there may be an idle rebalancing 8588 * needed, they will kick the idle load balancer, which then does idle 8589 * load balancing for all the idle CPUs. 8590 */ 8591 static struct { 8592 cpumask_var_t idle_cpus_mask; 8593 atomic_t nr_cpus; 8594 unsigned long next_balance; /* in jiffy units */ 8595 } nohz ____cacheline_aligned; 8596 8597 static inline int find_new_ilb(void) 8598 { 8599 int ilb = cpumask_first(nohz.idle_cpus_mask); 8600 8601 if (ilb < nr_cpu_ids && idle_cpu(ilb)) 8602 return ilb; 8603 8604 return nr_cpu_ids; 8605 } 8606 8607 /* 8608 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the 8609 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle 8610 * CPU (if there is one). 8611 */ 8612 static void nohz_balancer_kick(void) 8613 { 8614 int ilb_cpu; 8615 8616 nohz.next_balance++; 8617 8618 ilb_cpu = find_new_ilb(); 8619 8620 if (ilb_cpu >= nr_cpu_ids) 8621 return; 8622 8623 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) 8624 return; 8625 /* 8626 * Use smp_send_reschedule() instead of resched_cpu(). 8627 * This way we generate a sched IPI on the target cpu which 8628 * is idle. And the softirq performing nohz idle load balance 8629 * will be run before returning from the IPI. 8630 */ 8631 smp_send_reschedule(ilb_cpu); 8632 return; 8633 } 8634 8635 void nohz_balance_exit_idle(unsigned int cpu) 8636 { 8637 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 8638 /* 8639 * Completely isolated CPUs don't ever set, so we must test. 8640 */ 8641 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { 8642 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); 8643 atomic_dec(&nohz.nr_cpus); 8644 } 8645 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 8646 } 8647 } 8648 8649 static inline void set_cpu_sd_state_busy(void) 8650 { 8651 struct sched_domain *sd; 8652 int cpu = smp_processor_id(); 8653 8654 rcu_read_lock(); 8655 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 8656 8657 if (!sd || !sd->nohz_idle) 8658 goto unlock; 8659 sd->nohz_idle = 0; 8660 8661 atomic_inc(&sd->shared->nr_busy_cpus); 8662 unlock: 8663 rcu_read_unlock(); 8664 } 8665 8666 void set_cpu_sd_state_idle(void) 8667 { 8668 struct sched_domain *sd; 8669 int cpu = smp_processor_id(); 8670 8671 rcu_read_lock(); 8672 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 8673 8674 if (!sd || sd->nohz_idle) 8675 goto unlock; 8676 sd->nohz_idle = 1; 8677 8678 atomic_dec(&sd->shared->nr_busy_cpus); 8679 unlock: 8680 rcu_read_unlock(); 8681 } 8682 8683 /* 8684 * This routine will record that the cpu is going idle with tick stopped. 8685 * This info will be used in performing idle load balancing in the future. 8686 */ 8687 void nohz_balance_enter_idle(int cpu) 8688 { 8689 /* 8690 * If this cpu is going down, then nothing needs to be done. 8691 */ 8692 if (!cpu_active(cpu)) 8693 return; 8694 8695 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 8696 if (!is_housekeeping_cpu(cpu)) 8697 return; 8698 8699 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) 8700 return; 8701 8702 /* 8703 * If we're a completely isolated CPU, we don't play. 8704 */ 8705 if (on_null_domain(cpu_rq(cpu))) 8706 return; 8707 8708 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 8709 atomic_inc(&nohz.nr_cpus); 8710 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 8711 } 8712 #endif 8713 8714 static DEFINE_SPINLOCK(balancing); 8715 8716 /* 8717 * Scale the max load_balance interval with the number of CPUs in the system. 8718 * This trades load-balance latency on larger machines for less cross talk. 8719 */ 8720 void update_max_interval(void) 8721 { 8722 max_load_balance_interval = HZ*num_online_cpus()/10; 8723 } 8724 8725 /* 8726 * It checks each scheduling domain to see if it is due to be balanced, 8727 * and initiates a balancing operation if so. 8728 * 8729 * Balancing parameters are set up in init_sched_domains. 8730 */ 8731 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 8732 { 8733 int continue_balancing = 1; 8734 int cpu = rq->cpu; 8735 unsigned long interval; 8736 struct sched_domain *sd; 8737 /* Earliest time when we have to do rebalance again */ 8738 unsigned long next_balance = jiffies + 60*HZ; 8739 int update_next_balance = 0; 8740 int need_serialize, need_decay = 0; 8741 u64 max_cost = 0; 8742 8743 update_blocked_averages(cpu); 8744 8745 rcu_read_lock(); 8746 for_each_domain(cpu, sd) { 8747 /* 8748 * Decay the newidle max times here because this is a regular 8749 * visit to all the domains. Decay ~1% per second. 8750 */ 8751 if (time_after(jiffies, sd->next_decay_max_lb_cost)) { 8752 sd->max_newidle_lb_cost = 8753 (sd->max_newidle_lb_cost * 253) / 256; 8754 sd->next_decay_max_lb_cost = jiffies + HZ; 8755 need_decay = 1; 8756 } 8757 max_cost += sd->max_newidle_lb_cost; 8758 8759 if (!(sd->flags & SD_LOAD_BALANCE)) 8760 continue; 8761 8762 /* 8763 * Stop the load balance at this level. There is another 8764 * CPU in our sched group which is doing load balancing more 8765 * actively. 8766 */ 8767 if (!continue_balancing) { 8768 if (need_decay) 8769 continue; 8770 break; 8771 } 8772 8773 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); 8774 8775 need_serialize = sd->flags & SD_SERIALIZE; 8776 if (need_serialize) { 8777 if (!spin_trylock(&balancing)) 8778 goto out; 8779 } 8780 8781 if (time_after_eq(jiffies, sd->last_balance + interval)) { 8782 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 8783 /* 8784 * The LBF_DST_PINNED logic could have changed 8785 * env->dst_cpu, so we can't know our idle 8786 * state even if we migrated tasks. Update it. 8787 */ 8788 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 8789 } 8790 sd->last_balance = jiffies; 8791 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); 8792 } 8793 if (need_serialize) 8794 spin_unlock(&balancing); 8795 out: 8796 if (time_after(next_balance, sd->last_balance + interval)) { 8797 next_balance = sd->last_balance + interval; 8798 update_next_balance = 1; 8799 } 8800 } 8801 if (need_decay) { 8802 /* 8803 * Ensure the rq-wide value also decays but keep it at a 8804 * reasonable floor to avoid funnies with rq->avg_idle. 8805 */ 8806 rq->max_idle_balance_cost = 8807 max((u64)sysctl_sched_migration_cost, max_cost); 8808 } 8809 rcu_read_unlock(); 8810 8811 /* 8812 * next_balance will be updated only when there is a need. 8813 * When the cpu is attached to null domain for ex, it will not be 8814 * updated. 8815 */ 8816 if (likely(update_next_balance)) { 8817 rq->next_balance = next_balance; 8818 8819 #ifdef CONFIG_NO_HZ_COMMON 8820 /* 8821 * If this CPU has been elected to perform the nohz idle 8822 * balance. Other idle CPUs have already rebalanced with 8823 * nohz_idle_balance() and nohz.next_balance has been 8824 * updated accordingly. This CPU is now running the idle load 8825 * balance for itself and we need to update the 8826 * nohz.next_balance accordingly. 8827 */ 8828 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) 8829 nohz.next_balance = rq->next_balance; 8830 #endif 8831 } 8832 } 8833 8834 #ifdef CONFIG_NO_HZ_COMMON 8835 /* 8836 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 8837 * rebalancing for all the cpus for whom scheduler ticks are stopped. 8838 */ 8839 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 8840 { 8841 int this_cpu = this_rq->cpu; 8842 struct rq *rq; 8843 int balance_cpu; 8844 /* Earliest time when we have to do rebalance again */ 8845 unsigned long next_balance = jiffies + 60*HZ; 8846 int update_next_balance = 0; 8847 8848 if (idle != CPU_IDLE || 8849 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) 8850 goto end; 8851 8852 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { 8853 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) 8854 continue; 8855 8856 /* 8857 * If this cpu gets work to do, stop the load balancing 8858 * work being done for other cpus. Next load 8859 * balancing owner will pick it up. 8860 */ 8861 if (need_resched()) 8862 break; 8863 8864 rq = cpu_rq(balance_cpu); 8865 8866 /* 8867 * If time for next balance is due, 8868 * do the balance. 8869 */ 8870 if (time_after_eq(jiffies, rq->next_balance)) { 8871 struct rq_flags rf; 8872 8873 rq_lock_irq(rq, &rf); 8874 update_rq_clock(rq); 8875 cpu_load_update_idle(rq); 8876 rq_unlock_irq(rq, &rf); 8877 8878 rebalance_domains(rq, CPU_IDLE); 8879 } 8880 8881 if (time_after(next_balance, rq->next_balance)) { 8882 next_balance = rq->next_balance; 8883 update_next_balance = 1; 8884 } 8885 } 8886 8887 /* 8888 * next_balance will be updated only when there is a need. 8889 * When the CPU is attached to null domain for ex, it will not be 8890 * updated. 8891 */ 8892 if (likely(update_next_balance)) 8893 nohz.next_balance = next_balance; 8894 end: 8895 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); 8896 } 8897 8898 /* 8899 * Current heuristic for kicking the idle load balancer in the presence 8900 * of an idle cpu in the system. 8901 * - This rq has more than one task. 8902 * - This rq has at least one CFS task and the capacity of the CPU is 8903 * significantly reduced because of RT tasks or IRQs. 8904 * - At parent of LLC scheduler domain level, this cpu's scheduler group has 8905 * multiple busy cpu. 8906 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler 8907 * domain span are idle. 8908 */ 8909 static inline bool nohz_kick_needed(struct rq *rq) 8910 { 8911 unsigned long now = jiffies; 8912 struct sched_domain_shared *sds; 8913 struct sched_domain *sd; 8914 int nr_busy, i, cpu = rq->cpu; 8915 bool kick = false; 8916 8917 if (unlikely(rq->idle_balance)) 8918 return false; 8919 8920 /* 8921 * We may be recently in ticked or tickless idle mode. At the first 8922 * busy tick after returning from idle, we will update the busy stats. 8923 */ 8924 set_cpu_sd_state_busy(); 8925 nohz_balance_exit_idle(cpu); 8926 8927 /* 8928 * None are in tickless mode and hence no need for NOHZ idle load 8929 * balancing. 8930 */ 8931 if (likely(!atomic_read(&nohz.nr_cpus))) 8932 return false; 8933 8934 if (time_before(now, nohz.next_balance)) 8935 return false; 8936 8937 if (rq->nr_running >= 2) 8938 return true; 8939 8940 rcu_read_lock(); 8941 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 8942 if (sds) { 8943 /* 8944 * XXX: write a coherent comment on why we do this. 8945 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com 8946 */ 8947 nr_busy = atomic_read(&sds->nr_busy_cpus); 8948 if (nr_busy > 1) { 8949 kick = true; 8950 goto unlock; 8951 } 8952 8953 } 8954 8955 sd = rcu_dereference(rq->sd); 8956 if (sd) { 8957 if ((rq->cfs.h_nr_running >= 1) && 8958 check_cpu_capacity(rq, sd)) { 8959 kick = true; 8960 goto unlock; 8961 } 8962 } 8963 8964 sd = rcu_dereference(per_cpu(sd_asym, cpu)); 8965 if (sd) { 8966 for_each_cpu(i, sched_domain_span(sd)) { 8967 if (i == cpu || 8968 !cpumask_test_cpu(i, nohz.idle_cpus_mask)) 8969 continue; 8970 8971 if (sched_asym_prefer(i, cpu)) { 8972 kick = true; 8973 goto unlock; 8974 } 8975 } 8976 } 8977 unlock: 8978 rcu_read_unlock(); 8979 return kick; 8980 } 8981 #else 8982 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } 8983 #endif 8984 8985 /* 8986 * run_rebalance_domains is triggered when needed from the scheduler tick. 8987 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 8988 */ 8989 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 8990 { 8991 struct rq *this_rq = this_rq(); 8992 enum cpu_idle_type idle = this_rq->idle_balance ? 8993 CPU_IDLE : CPU_NOT_IDLE; 8994 8995 /* 8996 * If this cpu has a pending nohz_balance_kick, then do the 8997 * balancing on behalf of the other idle cpus whose ticks are 8998 * stopped. Do nohz_idle_balance *before* rebalance_domains to 8999 * give the idle cpus a chance to load balance. Else we may 9000 * load balance only within the local sched_domain hierarchy 9001 * and abort nohz_idle_balance altogether if we pull some load. 9002 */ 9003 nohz_idle_balance(this_rq, idle); 9004 rebalance_domains(this_rq, idle); 9005 } 9006 9007 /* 9008 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 9009 */ 9010 void trigger_load_balance(struct rq *rq) 9011 { 9012 /* Don't need to rebalance while attached to NULL domain */ 9013 if (unlikely(on_null_domain(rq))) 9014 return; 9015 9016 if (time_after_eq(jiffies, rq->next_balance)) 9017 raise_softirq(SCHED_SOFTIRQ); 9018 #ifdef CONFIG_NO_HZ_COMMON 9019 if (nohz_kick_needed(rq)) 9020 nohz_balancer_kick(); 9021 #endif 9022 } 9023 9024 static void rq_online_fair(struct rq *rq) 9025 { 9026 update_sysctl(); 9027 9028 update_runtime_enabled(rq); 9029 } 9030 9031 static void rq_offline_fair(struct rq *rq) 9032 { 9033 update_sysctl(); 9034 9035 /* Ensure any throttled groups are reachable by pick_next_task */ 9036 unthrottle_offline_cfs_rqs(rq); 9037 } 9038 9039 #endif /* CONFIG_SMP */ 9040 9041 /* 9042 * scheduler tick hitting a task of our scheduling class: 9043 */ 9044 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 9045 { 9046 struct cfs_rq *cfs_rq; 9047 struct sched_entity *se = &curr->se; 9048 9049 for_each_sched_entity(se) { 9050 cfs_rq = cfs_rq_of(se); 9051 entity_tick(cfs_rq, se, queued); 9052 } 9053 9054 if (static_branch_unlikely(&sched_numa_balancing)) 9055 task_tick_numa(rq, curr); 9056 } 9057 9058 /* 9059 * called on fork with the child task as argument from the parent's context 9060 * - child not yet on the tasklist 9061 * - preemption disabled 9062 */ 9063 static void task_fork_fair(struct task_struct *p) 9064 { 9065 struct cfs_rq *cfs_rq; 9066 struct sched_entity *se = &p->se, *curr; 9067 struct rq *rq = this_rq(); 9068 struct rq_flags rf; 9069 9070 rq_lock(rq, &rf); 9071 update_rq_clock(rq); 9072 9073 cfs_rq = task_cfs_rq(current); 9074 curr = cfs_rq->curr; 9075 if (curr) { 9076 update_curr(cfs_rq); 9077 se->vruntime = curr->vruntime; 9078 } 9079 place_entity(cfs_rq, se, 1); 9080 9081 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 9082 /* 9083 * Upon rescheduling, sched_class::put_prev_task() will place 9084 * 'current' within the tree based on its new key value. 9085 */ 9086 swap(curr->vruntime, se->vruntime); 9087 resched_curr(rq); 9088 } 9089 9090 se->vruntime -= cfs_rq->min_vruntime; 9091 rq_unlock(rq, &rf); 9092 } 9093 9094 /* 9095 * Priority of the task has changed. Check to see if we preempt 9096 * the current task. 9097 */ 9098 static void 9099 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 9100 { 9101 if (!task_on_rq_queued(p)) 9102 return; 9103 9104 /* 9105 * Reschedule if we are currently running on this runqueue and 9106 * our priority decreased, or if we are not currently running on 9107 * this runqueue and our priority is higher than the current's 9108 */ 9109 if (rq->curr == p) { 9110 if (p->prio > oldprio) 9111 resched_curr(rq); 9112 } else 9113 check_preempt_curr(rq, p, 0); 9114 } 9115 9116 static inline bool vruntime_normalized(struct task_struct *p) 9117 { 9118 struct sched_entity *se = &p->se; 9119 9120 /* 9121 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 9122 * the dequeue_entity(.flags=0) will already have normalized the 9123 * vruntime. 9124 */ 9125 if (p->on_rq) 9126 return true; 9127 9128 /* 9129 * When !on_rq, vruntime of the task has usually NOT been normalized. 9130 * But there are some cases where it has already been normalized: 9131 * 9132 * - A forked child which is waiting for being woken up by 9133 * wake_up_new_task(). 9134 * - A task which has been woken up by try_to_wake_up() and 9135 * waiting for actually being woken up by sched_ttwu_pending(). 9136 */ 9137 if (!se->sum_exec_runtime || p->state == TASK_WAKING) 9138 return true; 9139 9140 return false; 9141 } 9142 9143 #ifdef CONFIG_FAIR_GROUP_SCHED 9144 /* 9145 * Propagate the changes of the sched_entity across the tg tree to make it 9146 * visible to the root 9147 */ 9148 static void propagate_entity_cfs_rq(struct sched_entity *se) 9149 { 9150 struct cfs_rq *cfs_rq; 9151 9152 /* Start to propagate at parent */ 9153 se = se->parent; 9154 9155 for_each_sched_entity(se) { 9156 cfs_rq = cfs_rq_of(se); 9157 9158 if (cfs_rq_throttled(cfs_rq)) 9159 break; 9160 9161 update_load_avg(se, UPDATE_TG); 9162 } 9163 } 9164 #else 9165 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 9166 #endif 9167 9168 static void detach_entity_cfs_rq(struct sched_entity *se) 9169 { 9170 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9171 9172 /* Catch up with the cfs_rq and remove our load when we leave */ 9173 update_load_avg(se, 0); 9174 detach_entity_load_avg(cfs_rq, se); 9175 update_tg_load_avg(cfs_rq, false); 9176 propagate_entity_cfs_rq(se); 9177 } 9178 9179 static void attach_entity_cfs_rq(struct sched_entity *se) 9180 { 9181 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9182 9183 #ifdef CONFIG_FAIR_GROUP_SCHED 9184 /* 9185 * Since the real-depth could have been changed (only FAIR 9186 * class maintain depth value), reset depth properly. 9187 */ 9188 se->depth = se->parent ? se->parent->depth + 1 : 0; 9189 #endif 9190 9191 /* Synchronize entity with its cfs_rq */ 9192 update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 9193 attach_entity_load_avg(cfs_rq, se); 9194 update_tg_load_avg(cfs_rq, false); 9195 propagate_entity_cfs_rq(se); 9196 } 9197 9198 static void detach_task_cfs_rq(struct task_struct *p) 9199 { 9200 struct sched_entity *se = &p->se; 9201 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9202 9203 if (!vruntime_normalized(p)) { 9204 /* 9205 * Fix up our vruntime so that the current sleep doesn't 9206 * cause 'unlimited' sleep bonus. 9207 */ 9208 place_entity(cfs_rq, se, 0); 9209 se->vruntime -= cfs_rq->min_vruntime; 9210 } 9211 9212 detach_entity_cfs_rq(se); 9213 } 9214 9215 static void attach_task_cfs_rq(struct task_struct *p) 9216 { 9217 struct sched_entity *se = &p->se; 9218 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9219 9220 attach_entity_cfs_rq(se); 9221 9222 if (!vruntime_normalized(p)) 9223 se->vruntime += cfs_rq->min_vruntime; 9224 } 9225 9226 static void switched_from_fair(struct rq *rq, struct task_struct *p) 9227 { 9228 detach_task_cfs_rq(p); 9229 } 9230 9231 static void switched_to_fair(struct rq *rq, struct task_struct *p) 9232 { 9233 attach_task_cfs_rq(p); 9234 9235 if (task_on_rq_queued(p)) { 9236 /* 9237 * We were most likely switched from sched_rt, so 9238 * kick off the schedule if running, otherwise just see 9239 * if we can still preempt the current task. 9240 */ 9241 if (rq->curr == p) 9242 resched_curr(rq); 9243 else 9244 check_preempt_curr(rq, p, 0); 9245 } 9246 } 9247 9248 /* Account for a task changing its policy or group. 9249 * 9250 * This routine is mostly called to set cfs_rq->curr field when a task 9251 * migrates between groups/classes. 9252 */ 9253 static void set_curr_task_fair(struct rq *rq) 9254 { 9255 struct sched_entity *se = &rq->curr->se; 9256 9257 for_each_sched_entity(se) { 9258 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9259 9260 set_next_entity(cfs_rq, se); 9261 /* ensure bandwidth has been allocated on our new cfs_rq */ 9262 account_cfs_rq_runtime(cfs_rq, 0); 9263 } 9264 } 9265 9266 void init_cfs_rq(struct cfs_rq *cfs_rq) 9267 { 9268 cfs_rq->tasks_timeline = RB_ROOT_CACHED; 9269 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 9270 #ifndef CONFIG_64BIT 9271 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 9272 #endif 9273 #ifdef CONFIG_SMP 9274 #ifdef CONFIG_FAIR_GROUP_SCHED 9275 cfs_rq->propagate_avg = 0; 9276 #endif 9277 atomic_long_set(&cfs_rq->removed_load_avg, 0); 9278 atomic_long_set(&cfs_rq->removed_util_avg, 0); 9279 #endif 9280 } 9281 9282 #ifdef CONFIG_FAIR_GROUP_SCHED 9283 static void task_set_group_fair(struct task_struct *p) 9284 { 9285 struct sched_entity *se = &p->se; 9286 9287 set_task_rq(p, task_cpu(p)); 9288 se->depth = se->parent ? se->parent->depth + 1 : 0; 9289 } 9290 9291 static void task_move_group_fair(struct task_struct *p) 9292 { 9293 detach_task_cfs_rq(p); 9294 set_task_rq(p, task_cpu(p)); 9295 9296 #ifdef CONFIG_SMP 9297 /* Tell se's cfs_rq has been changed -- migrated */ 9298 p->se.avg.last_update_time = 0; 9299 #endif 9300 attach_task_cfs_rq(p); 9301 } 9302 9303 static void task_change_group_fair(struct task_struct *p, int type) 9304 { 9305 switch (type) { 9306 case TASK_SET_GROUP: 9307 task_set_group_fair(p); 9308 break; 9309 9310 case TASK_MOVE_GROUP: 9311 task_move_group_fair(p); 9312 break; 9313 } 9314 } 9315 9316 void free_fair_sched_group(struct task_group *tg) 9317 { 9318 int i; 9319 9320 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 9321 9322 for_each_possible_cpu(i) { 9323 if (tg->cfs_rq) 9324 kfree(tg->cfs_rq[i]); 9325 if (tg->se) 9326 kfree(tg->se[i]); 9327 } 9328 9329 kfree(tg->cfs_rq); 9330 kfree(tg->se); 9331 } 9332 9333 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 9334 { 9335 struct sched_entity *se; 9336 struct cfs_rq *cfs_rq; 9337 int i; 9338 9339 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); 9340 if (!tg->cfs_rq) 9341 goto err; 9342 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); 9343 if (!tg->se) 9344 goto err; 9345 9346 tg->shares = NICE_0_LOAD; 9347 9348 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 9349 9350 for_each_possible_cpu(i) { 9351 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 9352 GFP_KERNEL, cpu_to_node(i)); 9353 if (!cfs_rq) 9354 goto err; 9355 9356 se = kzalloc_node(sizeof(struct sched_entity), 9357 GFP_KERNEL, cpu_to_node(i)); 9358 if (!se) 9359 goto err_free_rq; 9360 9361 init_cfs_rq(cfs_rq); 9362 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 9363 init_entity_runnable_average(se); 9364 } 9365 9366 return 1; 9367 9368 err_free_rq: 9369 kfree(cfs_rq); 9370 err: 9371 return 0; 9372 } 9373 9374 void online_fair_sched_group(struct task_group *tg) 9375 { 9376 struct sched_entity *se; 9377 struct rq *rq; 9378 int i; 9379 9380 for_each_possible_cpu(i) { 9381 rq = cpu_rq(i); 9382 se = tg->se[i]; 9383 9384 raw_spin_lock_irq(&rq->lock); 9385 update_rq_clock(rq); 9386 attach_entity_cfs_rq(se); 9387 sync_throttle(tg, i); 9388 raw_spin_unlock_irq(&rq->lock); 9389 } 9390 } 9391 9392 void unregister_fair_sched_group(struct task_group *tg) 9393 { 9394 unsigned long flags; 9395 struct rq *rq; 9396 int cpu; 9397 9398 for_each_possible_cpu(cpu) { 9399 if (tg->se[cpu]) 9400 remove_entity_load_avg(tg->se[cpu]); 9401 9402 /* 9403 * Only empty task groups can be destroyed; so we can speculatively 9404 * check on_list without danger of it being re-added. 9405 */ 9406 if (!tg->cfs_rq[cpu]->on_list) 9407 continue; 9408 9409 rq = cpu_rq(cpu); 9410 9411 raw_spin_lock_irqsave(&rq->lock, flags); 9412 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 9413 raw_spin_unlock_irqrestore(&rq->lock, flags); 9414 } 9415 } 9416 9417 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 9418 struct sched_entity *se, int cpu, 9419 struct sched_entity *parent) 9420 { 9421 struct rq *rq = cpu_rq(cpu); 9422 9423 cfs_rq->tg = tg; 9424 cfs_rq->rq = rq; 9425 init_cfs_rq_runtime(cfs_rq); 9426 9427 tg->cfs_rq[cpu] = cfs_rq; 9428 tg->se[cpu] = se; 9429 9430 /* se could be NULL for root_task_group */ 9431 if (!se) 9432 return; 9433 9434 if (!parent) { 9435 se->cfs_rq = &rq->cfs; 9436 se->depth = 0; 9437 } else { 9438 se->cfs_rq = parent->my_q; 9439 se->depth = parent->depth + 1; 9440 } 9441 9442 se->my_q = cfs_rq; 9443 /* guarantee group entities always have weight */ 9444 update_load_set(&se->load, NICE_0_LOAD); 9445 se->parent = parent; 9446 } 9447 9448 static DEFINE_MUTEX(shares_mutex); 9449 9450 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 9451 { 9452 int i; 9453 9454 /* 9455 * We can't change the weight of the root cgroup. 9456 */ 9457 if (!tg->se[0]) 9458 return -EINVAL; 9459 9460 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 9461 9462 mutex_lock(&shares_mutex); 9463 if (tg->shares == shares) 9464 goto done; 9465 9466 tg->shares = shares; 9467 for_each_possible_cpu(i) { 9468 struct rq *rq = cpu_rq(i); 9469 struct sched_entity *se = tg->se[i]; 9470 struct rq_flags rf; 9471 9472 /* Propagate contribution to hierarchy */ 9473 rq_lock_irqsave(rq, &rf); 9474 update_rq_clock(rq); 9475 for_each_sched_entity(se) { 9476 update_load_avg(se, UPDATE_TG); 9477 update_cfs_shares(se); 9478 } 9479 rq_unlock_irqrestore(rq, &rf); 9480 } 9481 9482 done: 9483 mutex_unlock(&shares_mutex); 9484 return 0; 9485 } 9486 #else /* CONFIG_FAIR_GROUP_SCHED */ 9487 9488 void free_fair_sched_group(struct task_group *tg) { } 9489 9490 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 9491 { 9492 return 1; 9493 } 9494 9495 void online_fair_sched_group(struct task_group *tg) { } 9496 9497 void unregister_fair_sched_group(struct task_group *tg) { } 9498 9499 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9500 9501 9502 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 9503 { 9504 struct sched_entity *se = &task->se; 9505 unsigned int rr_interval = 0; 9506 9507 /* 9508 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 9509 * idle runqueue: 9510 */ 9511 if (rq->cfs.load.weight) 9512 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 9513 9514 return rr_interval; 9515 } 9516 9517 /* 9518 * All the scheduling class methods: 9519 */ 9520 const struct sched_class fair_sched_class = { 9521 .next = &idle_sched_class, 9522 .enqueue_task = enqueue_task_fair, 9523 .dequeue_task = dequeue_task_fair, 9524 .yield_task = yield_task_fair, 9525 .yield_to_task = yield_to_task_fair, 9526 9527 .check_preempt_curr = check_preempt_wakeup, 9528 9529 .pick_next_task = pick_next_task_fair, 9530 .put_prev_task = put_prev_task_fair, 9531 9532 #ifdef CONFIG_SMP 9533 .select_task_rq = select_task_rq_fair, 9534 .migrate_task_rq = migrate_task_rq_fair, 9535 9536 .rq_online = rq_online_fair, 9537 .rq_offline = rq_offline_fair, 9538 9539 .task_dead = task_dead_fair, 9540 .set_cpus_allowed = set_cpus_allowed_common, 9541 #endif 9542 9543 .set_curr_task = set_curr_task_fair, 9544 .task_tick = task_tick_fair, 9545 .task_fork = task_fork_fair, 9546 9547 .prio_changed = prio_changed_fair, 9548 .switched_from = switched_from_fair, 9549 .switched_to = switched_to_fair, 9550 9551 .get_rr_interval = get_rr_interval_fair, 9552 9553 .update_curr = update_curr_fair, 9554 9555 #ifdef CONFIG_FAIR_GROUP_SCHED 9556 .task_change_group = task_change_group_fair, 9557 #endif 9558 }; 9559 9560 #ifdef CONFIG_SCHED_DEBUG 9561 void print_cfs_stats(struct seq_file *m, int cpu) 9562 { 9563 struct cfs_rq *cfs_rq, *pos; 9564 9565 rcu_read_lock(); 9566 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) 9567 print_cfs_rq(m, cpu, cfs_rq); 9568 rcu_read_unlock(); 9569 } 9570 9571 #ifdef CONFIG_NUMA_BALANCING 9572 void show_numa_stats(struct task_struct *p, struct seq_file *m) 9573 { 9574 int node; 9575 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 9576 9577 for_each_online_node(node) { 9578 if (p->numa_faults) { 9579 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 9580 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 9581 } 9582 if (p->numa_group) { 9583 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)], 9584 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)]; 9585 } 9586 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 9587 } 9588 } 9589 #endif /* CONFIG_NUMA_BALANCING */ 9590 #endif /* CONFIG_SCHED_DEBUG */ 9591 9592 __init void init_sched_fair_class(void) 9593 { 9594 #ifdef CONFIG_SMP 9595 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 9596 9597 #ifdef CONFIG_NO_HZ_COMMON 9598 nohz.next_balance = jiffies; 9599 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 9600 #endif 9601 #endif /* SMP */ 9602 9603 } 9604