1 /* 2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) 3 * 4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 * 6 * Interactivity improvements by Mike Galbraith 7 * (C) 2007 Mike Galbraith <efault@gmx.de> 8 * 9 * Various enhancements by Dmitry Adamushko. 10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> 11 * 12 * Group scheduling enhancements by Srivatsa Vaddagiri 13 * Copyright IBM Corporation, 2007 14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> 15 * 16 * Scaled math optimizations by Thomas Gleixner 17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 18 * 19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 21 */ 22 23 #include <linux/sched/mm.h> 24 #include <linux/sched/topology.h> 25 26 #include <linux/latencytop.h> 27 #include <linux/cpumask.h> 28 #include <linux/cpuidle.h> 29 #include <linux/slab.h> 30 #include <linux/profile.h> 31 #include <linux/interrupt.h> 32 #include <linux/mempolicy.h> 33 #include <linux/migrate.h> 34 #include <linux/task_work.h> 35 36 #include <trace/events/sched.h> 37 38 #include "sched.h" 39 40 /* 41 * Targeted preemption latency for CPU-bound tasks: 42 * 43 * NOTE: this latency value is not the same as the concept of 44 * 'timeslice length' - timeslices in CFS are of variable length 45 * and have no persistent notion like in traditional, time-slice 46 * based scheduling concepts. 47 * 48 * (to see the precise effective timeslice length of your workload, 49 * run vmstat and monitor the context-switches (cs) field) 50 * 51 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) 52 */ 53 unsigned int sysctl_sched_latency = 6000000ULL; 54 unsigned int normalized_sysctl_sched_latency = 6000000ULL; 55 56 /* 57 * The initial- and re-scaling of tunables is configurable 58 * 59 * Options are: 60 * 61 * SCHED_TUNABLESCALING_NONE - unscaled, always *1 62 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) 63 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus 64 * 65 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) 66 */ 67 enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; 68 69 /* 70 * Minimal preemption granularity for CPU-bound tasks: 71 * 72 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) 73 */ 74 unsigned int sysctl_sched_min_granularity = 750000ULL; 75 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; 76 77 /* 78 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity 79 */ 80 static unsigned int sched_nr_latency = 8; 81 82 /* 83 * After fork, child runs first. If set to 0 (default) then 84 * parent will (try to) run first. 85 */ 86 unsigned int sysctl_sched_child_runs_first __read_mostly; 87 88 /* 89 * SCHED_OTHER wake-up granularity. 90 * 91 * This option delays the preemption effects of decoupled workloads 92 * and reduces their over-scheduling. Synchronous workloads will still 93 * have immediate wakeup/sleep latencies. 94 * 95 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 96 */ 97 unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 98 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; 99 100 const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 101 102 #ifdef CONFIG_SMP 103 /* 104 * For asym packing, by default the lower numbered cpu has higher priority. 105 */ 106 int __weak arch_asym_cpu_priority(int cpu) 107 { 108 return -cpu; 109 } 110 #endif 111 112 #ifdef CONFIG_CFS_BANDWIDTH 113 /* 114 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool 115 * each time a cfs_rq requests quota. 116 * 117 * Note: in the case that the slice exceeds the runtime remaining (either due 118 * to consumption or the quota being specified to be smaller than the slice) 119 * we will always only issue the remaining available time. 120 * 121 * (default: 5 msec, units: microseconds) 122 */ 123 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; 124 #endif 125 126 /* 127 * The margin used when comparing utilization with CPU capacity: 128 * util * margin < capacity * 1024 129 * 130 * (default: ~20%) 131 */ 132 unsigned int capacity_margin = 1280; 133 134 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 135 { 136 lw->weight += inc; 137 lw->inv_weight = 0; 138 } 139 140 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 141 { 142 lw->weight -= dec; 143 lw->inv_weight = 0; 144 } 145 146 static inline void update_load_set(struct load_weight *lw, unsigned long w) 147 { 148 lw->weight = w; 149 lw->inv_weight = 0; 150 } 151 152 /* 153 * Increase the granularity value when there are more CPUs, 154 * because with more CPUs the 'effective latency' as visible 155 * to users decreases. But the relationship is not linear, 156 * so pick a second-best guess by going with the log2 of the 157 * number of CPUs. 158 * 159 * This idea comes from the SD scheduler of Con Kolivas: 160 */ 161 static unsigned int get_update_sysctl_factor(void) 162 { 163 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); 164 unsigned int factor; 165 166 switch (sysctl_sched_tunable_scaling) { 167 case SCHED_TUNABLESCALING_NONE: 168 factor = 1; 169 break; 170 case SCHED_TUNABLESCALING_LINEAR: 171 factor = cpus; 172 break; 173 case SCHED_TUNABLESCALING_LOG: 174 default: 175 factor = 1 + ilog2(cpus); 176 break; 177 } 178 179 return factor; 180 } 181 182 static void update_sysctl(void) 183 { 184 unsigned int factor = get_update_sysctl_factor(); 185 186 #define SET_SYSCTL(name) \ 187 (sysctl_##name = (factor) * normalized_sysctl_##name) 188 SET_SYSCTL(sched_min_granularity); 189 SET_SYSCTL(sched_latency); 190 SET_SYSCTL(sched_wakeup_granularity); 191 #undef SET_SYSCTL 192 } 193 194 void sched_init_granularity(void) 195 { 196 update_sysctl(); 197 } 198 199 #define WMULT_CONST (~0U) 200 #define WMULT_SHIFT 32 201 202 static void __update_inv_weight(struct load_weight *lw) 203 { 204 unsigned long w; 205 206 if (likely(lw->inv_weight)) 207 return; 208 209 w = scale_load_down(lw->weight); 210 211 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) 212 lw->inv_weight = 1; 213 else if (unlikely(!w)) 214 lw->inv_weight = WMULT_CONST; 215 else 216 lw->inv_weight = WMULT_CONST / w; 217 } 218 219 /* 220 * delta_exec * weight / lw.weight 221 * OR 222 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT 223 * 224 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case 225 * we're guaranteed shift stays positive because inv_weight is guaranteed to 226 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. 227 * 228 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus 229 * weight/lw.weight <= 1, and therefore our shift will also be positive. 230 */ 231 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) 232 { 233 u64 fact = scale_load_down(weight); 234 int shift = WMULT_SHIFT; 235 236 __update_inv_weight(lw); 237 238 if (unlikely(fact >> 32)) { 239 while (fact >> 32) { 240 fact >>= 1; 241 shift--; 242 } 243 } 244 245 /* hint to use a 32x32->64 mul */ 246 fact = (u64)(u32)fact * lw->inv_weight; 247 248 while (fact >> 32) { 249 fact >>= 1; 250 shift--; 251 } 252 253 return mul_u64_u32_shr(delta_exec, fact, shift); 254 } 255 256 257 const struct sched_class fair_sched_class; 258 259 /************************************************************** 260 * CFS operations on generic schedulable entities: 261 */ 262 263 #ifdef CONFIG_FAIR_GROUP_SCHED 264 265 /* cpu runqueue to which this cfs_rq is attached */ 266 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 267 { 268 return cfs_rq->rq; 269 } 270 271 /* An entity is a task if it doesn't "own" a runqueue */ 272 #define entity_is_task(se) (!se->my_q) 273 274 static inline struct task_struct *task_of(struct sched_entity *se) 275 { 276 SCHED_WARN_ON(!entity_is_task(se)); 277 return container_of(se, struct task_struct, se); 278 } 279 280 /* Walk up scheduling entities hierarchy */ 281 #define for_each_sched_entity(se) \ 282 for (; se; se = se->parent) 283 284 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 285 { 286 return p->se.cfs_rq; 287 } 288 289 /* runqueue on which this entity is (to be) queued */ 290 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 291 { 292 return se->cfs_rq; 293 } 294 295 /* runqueue "owned" by this group */ 296 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 297 { 298 return grp->my_q; 299 } 300 301 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 302 { 303 if (!cfs_rq->on_list) { 304 struct rq *rq = rq_of(cfs_rq); 305 int cpu = cpu_of(rq); 306 /* 307 * Ensure we either appear before our parent (if already 308 * enqueued) or force our parent to appear after us when it is 309 * enqueued. The fact that we always enqueue bottom-up 310 * reduces this to two cases and a special case for the root 311 * cfs_rq. Furthermore, it also means that we will always reset 312 * tmp_alone_branch either when the branch is connected 313 * to a tree or when we reach the beg of the tree 314 */ 315 if (cfs_rq->tg->parent && 316 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 317 /* 318 * If parent is already on the list, we add the child 319 * just before. Thanks to circular linked property of 320 * the list, this means to put the child at the tail 321 * of the list that starts by parent. 322 */ 323 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 324 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 325 /* 326 * The branch is now connected to its tree so we can 327 * reset tmp_alone_branch to the beginning of the 328 * list. 329 */ 330 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 331 } else if (!cfs_rq->tg->parent) { 332 /* 333 * cfs rq without parent should be put 334 * at the tail of the list. 335 */ 336 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 337 &rq->leaf_cfs_rq_list); 338 /* 339 * We have reach the beg of a tree so we can reset 340 * tmp_alone_branch to the beginning of the list. 341 */ 342 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 343 } else { 344 /* 345 * The parent has not already been added so we want to 346 * make sure that it will be put after us. 347 * tmp_alone_branch points to the beg of the branch 348 * where we will add parent. 349 */ 350 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, 351 rq->tmp_alone_branch); 352 /* 353 * update tmp_alone_branch to points to the new beg 354 * of the branch 355 */ 356 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; 357 } 358 359 cfs_rq->on_list = 1; 360 } 361 } 362 363 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 364 { 365 if (cfs_rq->on_list) { 366 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 367 cfs_rq->on_list = 0; 368 } 369 } 370 371 /* Iterate thr' all leaf cfs_rq's on a runqueue */ 372 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 373 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ 374 leaf_cfs_rq_list) 375 376 /* Do the two (enqueued) entities belong to the same group ? */ 377 static inline struct cfs_rq * 378 is_same_group(struct sched_entity *se, struct sched_entity *pse) 379 { 380 if (se->cfs_rq == pse->cfs_rq) 381 return se->cfs_rq; 382 383 return NULL; 384 } 385 386 static inline struct sched_entity *parent_entity(struct sched_entity *se) 387 { 388 return se->parent; 389 } 390 391 static void 392 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 393 { 394 int se_depth, pse_depth; 395 396 /* 397 * preemption test can be made between sibling entities who are in the 398 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 399 * both tasks until we find their ancestors who are siblings of common 400 * parent. 401 */ 402 403 /* First walk up until both entities are at same depth */ 404 se_depth = (*se)->depth; 405 pse_depth = (*pse)->depth; 406 407 while (se_depth > pse_depth) { 408 se_depth--; 409 *se = parent_entity(*se); 410 } 411 412 while (pse_depth > se_depth) { 413 pse_depth--; 414 *pse = parent_entity(*pse); 415 } 416 417 while (!is_same_group(*se, *pse)) { 418 *se = parent_entity(*se); 419 *pse = parent_entity(*pse); 420 } 421 } 422 423 #else /* !CONFIG_FAIR_GROUP_SCHED */ 424 425 static inline struct task_struct *task_of(struct sched_entity *se) 426 { 427 return container_of(se, struct task_struct, se); 428 } 429 430 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 431 { 432 return container_of(cfs_rq, struct rq, cfs); 433 } 434 435 #define entity_is_task(se) 1 436 437 #define for_each_sched_entity(se) \ 438 for (; se; se = NULL) 439 440 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 441 { 442 return &task_rq(p)->cfs; 443 } 444 445 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 446 { 447 struct task_struct *p = task_of(se); 448 struct rq *rq = task_rq(p); 449 450 return &rq->cfs; 451 } 452 453 /* runqueue "owned" by this group */ 454 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 455 { 456 return NULL; 457 } 458 459 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 460 { 461 } 462 463 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 464 { 465 } 466 467 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ 468 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 469 470 static inline struct sched_entity *parent_entity(struct sched_entity *se) 471 { 472 return NULL; 473 } 474 475 static inline void 476 find_matching_se(struct sched_entity **se, struct sched_entity **pse) 477 { 478 } 479 480 #endif /* CONFIG_FAIR_GROUP_SCHED */ 481 482 static __always_inline 483 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); 484 485 /************************************************************** 486 * Scheduling class tree data structure manipulation methods: 487 */ 488 489 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) 490 { 491 s64 delta = (s64)(vruntime - max_vruntime); 492 if (delta > 0) 493 max_vruntime = vruntime; 494 495 return max_vruntime; 496 } 497 498 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) 499 { 500 s64 delta = (s64)(vruntime - min_vruntime); 501 if (delta < 0) 502 min_vruntime = vruntime; 503 504 return min_vruntime; 505 } 506 507 static inline int entity_before(struct sched_entity *a, 508 struct sched_entity *b) 509 { 510 return (s64)(a->vruntime - b->vruntime) < 0; 511 } 512 513 static void update_min_vruntime(struct cfs_rq *cfs_rq) 514 { 515 struct sched_entity *curr = cfs_rq->curr; 516 517 u64 vruntime = cfs_rq->min_vruntime; 518 519 if (curr) { 520 if (curr->on_rq) 521 vruntime = curr->vruntime; 522 else 523 curr = NULL; 524 } 525 526 if (cfs_rq->rb_leftmost) { 527 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, 528 struct sched_entity, 529 run_node); 530 531 if (!curr) 532 vruntime = se->vruntime; 533 else 534 vruntime = min_vruntime(vruntime, se->vruntime); 535 } 536 537 /* ensure we never gain time by being placed backwards. */ 538 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); 539 #ifndef CONFIG_64BIT 540 smp_wmb(); 541 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 542 #endif 543 } 544 545 /* 546 * Enqueue an entity into the rb-tree: 547 */ 548 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 549 { 550 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; 551 struct rb_node *parent = NULL; 552 struct sched_entity *entry; 553 int leftmost = 1; 554 555 /* 556 * Find the right place in the rbtree: 557 */ 558 while (*link) { 559 parent = *link; 560 entry = rb_entry(parent, struct sched_entity, run_node); 561 /* 562 * We dont care about collisions. Nodes with 563 * the same key stay together. 564 */ 565 if (entity_before(se, entry)) { 566 link = &parent->rb_left; 567 } else { 568 link = &parent->rb_right; 569 leftmost = 0; 570 } 571 } 572 573 /* 574 * Maintain a cache of leftmost tree entries (it is frequently 575 * used): 576 */ 577 if (leftmost) 578 cfs_rq->rb_leftmost = &se->run_node; 579 580 rb_link_node(&se->run_node, parent, link); 581 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); 582 } 583 584 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 585 { 586 if (cfs_rq->rb_leftmost == &se->run_node) { 587 struct rb_node *next_node; 588 589 next_node = rb_next(&se->run_node); 590 cfs_rq->rb_leftmost = next_node; 591 } 592 593 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); 594 } 595 596 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) 597 { 598 struct rb_node *left = cfs_rq->rb_leftmost; 599 600 if (!left) 601 return NULL; 602 603 return rb_entry(left, struct sched_entity, run_node); 604 } 605 606 static struct sched_entity *__pick_next_entity(struct sched_entity *se) 607 { 608 struct rb_node *next = rb_next(&se->run_node); 609 610 if (!next) 611 return NULL; 612 613 return rb_entry(next, struct sched_entity, run_node); 614 } 615 616 #ifdef CONFIG_SCHED_DEBUG 617 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 618 { 619 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); 620 621 if (!last) 622 return NULL; 623 624 return rb_entry(last, struct sched_entity, run_node); 625 } 626 627 /************************************************************** 628 * Scheduling class statistics methods: 629 */ 630 631 int sched_proc_update_handler(struct ctl_table *table, int write, 632 void __user *buffer, size_t *lenp, 633 loff_t *ppos) 634 { 635 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 636 unsigned int factor = get_update_sysctl_factor(); 637 638 if (ret || !write) 639 return ret; 640 641 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, 642 sysctl_sched_min_granularity); 643 644 #define WRT_SYSCTL(name) \ 645 (normalized_sysctl_##name = sysctl_##name / (factor)) 646 WRT_SYSCTL(sched_min_granularity); 647 WRT_SYSCTL(sched_latency); 648 WRT_SYSCTL(sched_wakeup_granularity); 649 #undef WRT_SYSCTL 650 651 return 0; 652 } 653 #endif 654 655 /* 656 * delta /= w 657 */ 658 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) 659 { 660 if (unlikely(se->load.weight != NICE_0_LOAD)) 661 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); 662 663 return delta; 664 } 665 666 /* 667 * The idea is to set a period in which each task runs once. 668 * 669 * When there are too many tasks (sched_nr_latency) we have to stretch 670 * this period because otherwise the slices get too small. 671 * 672 * p = (nr <= nl) ? l : l*nr/nl 673 */ 674 static u64 __sched_period(unsigned long nr_running) 675 { 676 if (unlikely(nr_running > sched_nr_latency)) 677 return nr_running * sysctl_sched_min_granularity; 678 else 679 return sysctl_sched_latency; 680 } 681 682 /* 683 * We calculate the wall-time slice from the period by taking a part 684 * proportional to the weight. 685 * 686 * s = p*P[w/rw] 687 */ 688 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 689 { 690 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 691 692 for_each_sched_entity(se) { 693 struct load_weight *load; 694 struct load_weight lw; 695 696 cfs_rq = cfs_rq_of(se); 697 load = &cfs_rq->load; 698 699 if (unlikely(!se->on_rq)) { 700 lw = cfs_rq->load; 701 702 update_load_add(&lw, se->load.weight); 703 load = &lw; 704 } 705 slice = __calc_delta(slice, se->load.weight, load); 706 } 707 return slice; 708 } 709 710 /* 711 * We calculate the vruntime slice of a to-be-inserted task. 712 * 713 * vs = s/w 714 */ 715 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) 716 { 717 return calc_delta_fair(sched_slice(cfs_rq, se), se); 718 } 719 720 #ifdef CONFIG_SMP 721 722 #include "sched-pelt.h" 723 724 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 725 static unsigned long task_h_load(struct task_struct *p); 726 727 /* Give new sched_entity start runnable values to heavy its load in infant time */ 728 void init_entity_runnable_average(struct sched_entity *se) 729 { 730 struct sched_avg *sa = &se->avg; 731 732 sa->last_update_time = 0; 733 /* 734 * sched_avg's period_contrib should be strictly less then 1024, so 735 * we give it 1023 to make sure it is almost a period (1024us), and 736 * will definitely be update (after enqueue). 737 */ 738 sa->period_contrib = 1023; 739 /* 740 * Tasks are intialized with full load to be seen as heavy tasks until 741 * they get a chance to stabilize to their real load level. 742 * Group entities are intialized with zero load to reflect the fact that 743 * nothing has been attached to the task group yet. 744 */ 745 if (entity_is_task(se)) 746 sa->load_avg = scale_load_down(se->load.weight); 747 sa->load_sum = sa->load_avg * LOAD_AVG_MAX; 748 /* 749 * At this point, util_avg won't be used in select_task_rq_fair anyway 750 */ 751 sa->util_avg = 0; 752 sa->util_sum = 0; 753 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ 754 } 755 756 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); 757 static void attach_entity_cfs_rq(struct sched_entity *se); 758 759 /* 760 * With new tasks being created, their initial util_avgs are extrapolated 761 * based on the cfs_rq's current util_avg: 762 * 763 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight 764 * 765 * However, in many cases, the above util_avg does not give a desired 766 * value. Moreover, the sum of the util_avgs may be divergent, such 767 * as when the series is a harmonic series. 768 * 769 * To solve this problem, we also cap the util_avg of successive tasks to 770 * only 1/2 of the left utilization budget: 771 * 772 * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n 773 * 774 * where n denotes the nth task. 775 * 776 * For example, a simplest series from the beginning would be like: 777 * 778 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... 779 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... 780 * 781 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) 782 * if util_avg > util_avg_cap. 783 */ 784 void post_init_entity_util_avg(struct sched_entity *se) 785 { 786 struct cfs_rq *cfs_rq = cfs_rq_of(se); 787 struct sched_avg *sa = &se->avg; 788 long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2; 789 790 if (cap > 0) { 791 if (cfs_rq->avg.util_avg != 0) { 792 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; 793 sa->util_avg /= (cfs_rq->avg.load_avg + 1); 794 795 if (sa->util_avg > cap) 796 sa->util_avg = cap; 797 } else { 798 sa->util_avg = cap; 799 } 800 sa->util_sum = sa->util_avg * LOAD_AVG_MAX; 801 } 802 803 if (entity_is_task(se)) { 804 struct task_struct *p = task_of(se); 805 if (p->sched_class != &fair_sched_class) { 806 /* 807 * For !fair tasks do: 808 * 809 update_cfs_rq_load_avg(now, cfs_rq, false); 810 attach_entity_load_avg(cfs_rq, se); 811 switched_from_fair(rq, p); 812 * 813 * such that the next switched_to_fair() has the 814 * expected state. 815 */ 816 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq); 817 return; 818 } 819 } 820 821 attach_entity_cfs_rq(se); 822 } 823 824 #else /* !CONFIG_SMP */ 825 void init_entity_runnable_average(struct sched_entity *se) 826 { 827 } 828 void post_init_entity_util_avg(struct sched_entity *se) 829 { 830 } 831 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 832 { 833 } 834 #endif /* CONFIG_SMP */ 835 836 /* 837 * Update the current task's runtime statistics. 838 */ 839 static void update_curr(struct cfs_rq *cfs_rq) 840 { 841 struct sched_entity *curr = cfs_rq->curr; 842 u64 now = rq_clock_task(rq_of(cfs_rq)); 843 u64 delta_exec; 844 845 if (unlikely(!curr)) 846 return; 847 848 delta_exec = now - curr->exec_start; 849 if (unlikely((s64)delta_exec <= 0)) 850 return; 851 852 curr->exec_start = now; 853 854 schedstat_set(curr->statistics.exec_max, 855 max(delta_exec, curr->statistics.exec_max)); 856 857 curr->sum_exec_runtime += delta_exec; 858 schedstat_add(cfs_rq->exec_clock, delta_exec); 859 860 curr->vruntime += calc_delta_fair(delta_exec, curr); 861 update_min_vruntime(cfs_rq); 862 863 if (entity_is_task(curr)) { 864 struct task_struct *curtask = task_of(curr); 865 866 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); 867 cpuacct_charge(curtask, delta_exec); 868 account_group_exec_runtime(curtask, delta_exec); 869 } 870 871 account_cfs_rq_runtime(cfs_rq, delta_exec); 872 } 873 874 static void update_curr_fair(struct rq *rq) 875 { 876 update_curr(cfs_rq_of(&rq->curr->se)); 877 } 878 879 static inline void 880 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 881 { 882 u64 wait_start, prev_wait_start; 883 884 if (!schedstat_enabled()) 885 return; 886 887 wait_start = rq_clock(rq_of(cfs_rq)); 888 prev_wait_start = schedstat_val(se->statistics.wait_start); 889 890 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && 891 likely(wait_start > prev_wait_start)) 892 wait_start -= prev_wait_start; 893 894 schedstat_set(se->statistics.wait_start, wait_start); 895 } 896 897 static inline void 898 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 899 { 900 struct task_struct *p; 901 u64 delta; 902 903 if (!schedstat_enabled()) 904 return; 905 906 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); 907 908 if (entity_is_task(se)) { 909 p = task_of(se); 910 if (task_on_rq_migrating(p)) { 911 /* 912 * Preserve migrating task's wait time so wait_start 913 * time stamp can be adjusted to accumulate wait time 914 * prior to migration. 915 */ 916 schedstat_set(se->statistics.wait_start, delta); 917 return; 918 } 919 trace_sched_stat_wait(p, delta); 920 } 921 922 schedstat_set(se->statistics.wait_max, 923 max(schedstat_val(se->statistics.wait_max), delta)); 924 schedstat_inc(se->statistics.wait_count); 925 schedstat_add(se->statistics.wait_sum, delta); 926 schedstat_set(se->statistics.wait_start, 0); 927 } 928 929 static inline void 930 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 931 { 932 struct task_struct *tsk = NULL; 933 u64 sleep_start, block_start; 934 935 if (!schedstat_enabled()) 936 return; 937 938 sleep_start = schedstat_val(se->statistics.sleep_start); 939 block_start = schedstat_val(se->statistics.block_start); 940 941 if (entity_is_task(se)) 942 tsk = task_of(se); 943 944 if (sleep_start) { 945 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; 946 947 if ((s64)delta < 0) 948 delta = 0; 949 950 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) 951 schedstat_set(se->statistics.sleep_max, delta); 952 953 schedstat_set(se->statistics.sleep_start, 0); 954 schedstat_add(se->statistics.sum_sleep_runtime, delta); 955 956 if (tsk) { 957 account_scheduler_latency(tsk, delta >> 10, 1); 958 trace_sched_stat_sleep(tsk, delta); 959 } 960 } 961 if (block_start) { 962 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; 963 964 if ((s64)delta < 0) 965 delta = 0; 966 967 if (unlikely(delta > schedstat_val(se->statistics.block_max))) 968 schedstat_set(se->statistics.block_max, delta); 969 970 schedstat_set(se->statistics.block_start, 0); 971 schedstat_add(se->statistics.sum_sleep_runtime, delta); 972 973 if (tsk) { 974 if (tsk->in_iowait) { 975 schedstat_add(se->statistics.iowait_sum, delta); 976 schedstat_inc(se->statistics.iowait_count); 977 trace_sched_stat_iowait(tsk, delta); 978 } 979 980 trace_sched_stat_blocked(tsk, delta); 981 982 /* 983 * Blocking time is in units of nanosecs, so shift by 984 * 20 to get a milliseconds-range estimation of the 985 * amount of time that the task spent sleeping: 986 */ 987 if (unlikely(prof_on == SLEEP_PROFILING)) { 988 profile_hits(SLEEP_PROFILING, 989 (void *)get_wchan(tsk), 990 delta >> 20); 991 } 992 account_scheduler_latency(tsk, delta >> 10, 0); 993 } 994 } 995 } 996 997 /* 998 * Task is being enqueued - update stats: 999 */ 1000 static inline void 1001 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1002 { 1003 if (!schedstat_enabled()) 1004 return; 1005 1006 /* 1007 * Are we enqueueing a waiting task? (for current tasks 1008 * a dequeue/enqueue event is a NOP) 1009 */ 1010 if (se != cfs_rq->curr) 1011 update_stats_wait_start(cfs_rq, se); 1012 1013 if (flags & ENQUEUE_WAKEUP) 1014 update_stats_enqueue_sleeper(cfs_rq, se); 1015 } 1016 1017 static inline void 1018 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1019 { 1020 1021 if (!schedstat_enabled()) 1022 return; 1023 1024 /* 1025 * Mark the end of the wait period if dequeueing a 1026 * waiting task: 1027 */ 1028 if (se != cfs_rq->curr) 1029 update_stats_wait_end(cfs_rq, se); 1030 1031 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { 1032 struct task_struct *tsk = task_of(se); 1033 1034 if (tsk->state & TASK_INTERRUPTIBLE) 1035 schedstat_set(se->statistics.sleep_start, 1036 rq_clock(rq_of(cfs_rq))); 1037 if (tsk->state & TASK_UNINTERRUPTIBLE) 1038 schedstat_set(se->statistics.block_start, 1039 rq_clock(rq_of(cfs_rq))); 1040 } 1041 } 1042 1043 /* 1044 * We are picking a new current task - update its stats: 1045 */ 1046 static inline void 1047 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 1048 { 1049 /* 1050 * We are starting a new run period: 1051 */ 1052 se->exec_start = rq_clock_task(rq_of(cfs_rq)); 1053 } 1054 1055 /************************************************** 1056 * Scheduling class queueing methods: 1057 */ 1058 1059 #ifdef CONFIG_NUMA_BALANCING 1060 /* 1061 * Approximate time to scan a full NUMA task in ms. The task scan period is 1062 * calculated based on the tasks virtual memory size and 1063 * numa_balancing_scan_size. 1064 */ 1065 unsigned int sysctl_numa_balancing_scan_period_min = 1000; 1066 unsigned int sysctl_numa_balancing_scan_period_max = 60000; 1067 1068 /* Portion of address space to scan in MB */ 1069 unsigned int sysctl_numa_balancing_scan_size = 256; 1070 1071 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 1072 unsigned int sysctl_numa_balancing_scan_delay = 1000; 1073 1074 static unsigned int task_nr_scan_windows(struct task_struct *p) 1075 { 1076 unsigned long rss = 0; 1077 unsigned long nr_scan_pages; 1078 1079 /* 1080 * Calculations based on RSS as non-present and empty pages are skipped 1081 * by the PTE scanner and NUMA hinting faults should be trapped based 1082 * on resident pages 1083 */ 1084 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1085 rss = get_mm_rss(p->mm); 1086 if (!rss) 1087 rss = nr_scan_pages; 1088 1089 rss = round_up(rss, nr_scan_pages); 1090 return rss / nr_scan_pages; 1091 } 1092 1093 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ 1094 #define MAX_SCAN_WINDOW 2560 1095 1096 static unsigned int task_scan_min(struct task_struct *p) 1097 { 1098 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); 1099 unsigned int scan, floor; 1100 unsigned int windows = 1; 1101 1102 if (scan_size < MAX_SCAN_WINDOW) 1103 windows = MAX_SCAN_WINDOW / scan_size; 1104 floor = 1000 / windows; 1105 1106 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 1107 return max_t(unsigned int, floor, scan); 1108 } 1109 1110 static unsigned int task_scan_max(struct task_struct *p) 1111 { 1112 unsigned int smin = task_scan_min(p); 1113 unsigned int smax; 1114 1115 /* Watch for min being lower than max due to floor calculations */ 1116 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); 1117 return max(smin, smax); 1118 } 1119 1120 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) 1121 { 1122 rq->nr_numa_running += (p->numa_preferred_nid != -1); 1123 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); 1124 } 1125 1126 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) 1127 { 1128 rq->nr_numa_running -= (p->numa_preferred_nid != -1); 1129 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); 1130 } 1131 1132 struct numa_group { 1133 atomic_t refcount; 1134 1135 spinlock_t lock; /* nr_tasks, tasks */ 1136 int nr_tasks; 1137 pid_t gid; 1138 int active_nodes; 1139 1140 struct rcu_head rcu; 1141 unsigned long total_faults; 1142 unsigned long max_faults_cpu; 1143 /* 1144 * Faults_cpu is used to decide whether memory should move 1145 * towards the CPU. As a consequence, these stats are weighted 1146 * more by CPU use than by memory faults. 1147 */ 1148 unsigned long *faults_cpu; 1149 unsigned long faults[0]; 1150 }; 1151 1152 /* Shared or private faults. */ 1153 #define NR_NUMA_HINT_FAULT_TYPES 2 1154 1155 /* Memory and CPU locality */ 1156 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) 1157 1158 /* Averaged statistics, and temporary buffers. */ 1159 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) 1160 1161 pid_t task_numa_group_id(struct task_struct *p) 1162 { 1163 return p->numa_group ? p->numa_group->gid : 0; 1164 } 1165 1166 /* 1167 * The averaged statistics, shared & private, memory & cpu, 1168 * occupy the first half of the array. The second half of the 1169 * array is for current counters, which are averaged into the 1170 * first set by task_numa_placement. 1171 */ 1172 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) 1173 { 1174 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; 1175 } 1176 1177 static inline unsigned long task_faults(struct task_struct *p, int nid) 1178 { 1179 if (!p->numa_faults) 1180 return 0; 1181 1182 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1183 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1184 } 1185 1186 static inline unsigned long group_faults(struct task_struct *p, int nid) 1187 { 1188 if (!p->numa_group) 1189 return 0; 1190 1191 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] + 1192 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)]; 1193 } 1194 1195 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) 1196 { 1197 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + 1198 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; 1199 } 1200 1201 /* 1202 * A node triggering more than 1/3 as many NUMA faults as the maximum is 1203 * considered part of a numa group's pseudo-interleaving set. Migrations 1204 * between these nodes are slowed down, to allow things to settle down. 1205 */ 1206 #define ACTIVE_NODE_FRACTION 3 1207 1208 static bool numa_is_active_node(int nid, struct numa_group *ng) 1209 { 1210 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; 1211 } 1212 1213 /* Handle placement on systems where not all nodes are directly connected. */ 1214 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, 1215 int maxdist, bool task) 1216 { 1217 unsigned long score = 0; 1218 int node; 1219 1220 /* 1221 * All nodes are directly connected, and the same distance 1222 * from each other. No need for fancy placement algorithms. 1223 */ 1224 if (sched_numa_topology_type == NUMA_DIRECT) 1225 return 0; 1226 1227 /* 1228 * This code is called for each node, introducing N^2 complexity, 1229 * which should be ok given the number of nodes rarely exceeds 8. 1230 */ 1231 for_each_online_node(node) { 1232 unsigned long faults; 1233 int dist = node_distance(nid, node); 1234 1235 /* 1236 * The furthest away nodes in the system are not interesting 1237 * for placement; nid was already counted. 1238 */ 1239 if (dist == sched_max_numa_distance || node == nid) 1240 continue; 1241 1242 /* 1243 * On systems with a backplane NUMA topology, compare groups 1244 * of nodes, and move tasks towards the group with the most 1245 * memory accesses. When comparing two nodes at distance 1246 * "hoplimit", only nodes closer by than "hoplimit" are part 1247 * of each group. Skip other nodes. 1248 */ 1249 if (sched_numa_topology_type == NUMA_BACKPLANE && 1250 dist > maxdist) 1251 continue; 1252 1253 /* Add up the faults from nearby nodes. */ 1254 if (task) 1255 faults = task_faults(p, node); 1256 else 1257 faults = group_faults(p, node); 1258 1259 /* 1260 * On systems with a glueless mesh NUMA topology, there are 1261 * no fixed "groups of nodes". Instead, nodes that are not 1262 * directly connected bounce traffic through intermediate 1263 * nodes; a numa_group can occupy any set of nodes. 1264 * The further away a node is, the less the faults count. 1265 * This seems to result in good task placement. 1266 */ 1267 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1268 faults *= (sched_max_numa_distance - dist); 1269 faults /= (sched_max_numa_distance - LOCAL_DISTANCE); 1270 } 1271 1272 score += faults; 1273 } 1274 1275 return score; 1276 } 1277 1278 /* 1279 * These return the fraction of accesses done by a particular task, or 1280 * task group, on a particular numa node. The group weight is given a 1281 * larger multiplier, in order to group tasks together that are almost 1282 * evenly spread out between numa nodes. 1283 */ 1284 static inline unsigned long task_weight(struct task_struct *p, int nid, 1285 int dist) 1286 { 1287 unsigned long faults, total_faults; 1288 1289 if (!p->numa_faults) 1290 return 0; 1291 1292 total_faults = p->total_numa_faults; 1293 1294 if (!total_faults) 1295 return 0; 1296 1297 faults = task_faults(p, nid); 1298 faults += score_nearby_nodes(p, nid, dist, true); 1299 1300 return 1000 * faults / total_faults; 1301 } 1302 1303 static inline unsigned long group_weight(struct task_struct *p, int nid, 1304 int dist) 1305 { 1306 unsigned long faults, total_faults; 1307 1308 if (!p->numa_group) 1309 return 0; 1310 1311 total_faults = p->numa_group->total_faults; 1312 1313 if (!total_faults) 1314 return 0; 1315 1316 faults = group_faults(p, nid); 1317 faults += score_nearby_nodes(p, nid, dist, false); 1318 1319 return 1000 * faults / total_faults; 1320 } 1321 1322 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, 1323 int src_nid, int dst_cpu) 1324 { 1325 struct numa_group *ng = p->numa_group; 1326 int dst_nid = cpu_to_node(dst_cpu); 1327 int last_cpupid, this_cpupid; 1328 1329 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1330 1331 /* 1332 * Multi-stage node selection is used in conjunction with a periodic 1333 * migration fault to build a temporal task<->page relation. By using 1334 * a two-stage filter we remove short/unlikely relations. 1335 * 1336 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate 1337 * a task's usage of a particular page (n_p) per total usage of this 1338 * page (n_t) (in a given time-span) to a probability. 1339 * 1340 * Our periodic faults will sample this probability and getting the 1341 * same result twice in a row, given these samples are fully 1342 * independent, is then given by P(n)^2, provided our sample period 1343 * is sufficiently short compared to the usage pattern. 1344 * 1345 * This quadric squishes small probabilities, making it less likely we 1346 * act on an unlikely task<->page relation. 1347 */ 1348 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 1349 if (!cpupid_pid_unset(last_cpupid) && 1350 cpupid_to_nid(last_cpupid) != dst_nid) 1351 return false; 1352 1353 /* Always allow migrate on private faults */ 1354 if (cpupid_match_pid(p, last_cpupid)) 1355 return true; 1356 1357 /* A shared fault, but p->numa_group has not been set up yet. */ 1358 if (!ng) 1359 return true; 1360 1361 /* 1362 * Destination node is much more heavily used than the source 1363 * node? Allow migration. 1364 */ 1365 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * 1366 ACTIVE_NODE_FRACTION) 1367 return true; 1368 1369 /* 1370 * Distribute memory according to CPU & memory use on each node, 1371 * with 3/4 hysteresis to avoid unnecessary memory migrations: 1372 * 1373 * faults_cpu(dst) 3 faults_cpu(src) 1374 * --------------- * - > --------------- 1375 * faults_mem(dst) 4 faults_mem(src) 1376 */ 1377 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > 1378 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; 1379 } 1380 1381 static unsigned long weighted_cpuload(const int cpu); 1382 static unsigned long source_load(int cpu, int type); 1383 static unsigned long target_load(int cpu, int type); 1384 static unsigned long capacity_of(int cpu); 1385 1386 /* Cached statistics for all CPUs within a node */ 1387 struct numa_stats { 1388 unsigned long nr_running; 1389 unsigned long load; 1390 1391 /* Total compute capacity of CPUs on a node */ 1392 unsigned long compute_capacity; 1393 1394 /* Approximate capacity in terms of runnable tasks on a node */ 1395 unsigned long task_capacity; 1396 int has_free_capacity; 1397 }; 1398 1399 /* 1400 * XXX borrowed from update_sg_lb_stats 1401 */ 1402 static void update_numa_stats(struct numa_stats *ns, int nid) 1403 { 1404 int smt, cpu, cpus = 0; 1405 unsigned long capacity; 1406 1407 memset(ns, 0, sizeof(*ns)); 1408 for_each_cpu(cpu, cpumask_of_node(nid)) { 1409 struct rq *rq = cpu_rq(cpu); 1410 1411 ns->nr_running += rq->nr_running; 1412 ns->load += weighted_cpuload(cpu); 1413 ns->compute_capacity += capacity_of(cpu); 1414 1415 cpus++; 1416 } 1417 1418 /* 1419 * If we raced with hotplug and there are no CPUs left in our mask 1420 * the @ns structure is NULL'ed and task_numa_compare() will 1421 * not find this node attractive. 1422 * 1423 * We'll either bail at !has_free_capacity, or we'll detect a huge 1424 * imbalance and bail there. 1425 */ 1426 if (!cpus) 1427 return; 1428 1429 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */ 1430 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity); 1431 capacity = cpus / smt; /* cores */ 1432 1433 ns->task_capacity = min_t(unsigned, capacity, 1434 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE)); 1435 ns->has_free_capacity = (ns->nr_running < ns->task_capacity); 1436 } 1437 1438 struct task_numa_env { 1439 struct task_struct *p; 1440 1441 int src_cpu, src_nid; 1442 int dst_cpu, dst_nid; 1443 1444 struct numa_stats src_stats, dst_stats; 1445 1446 int imbalance_pct; 1447 int dist; 1448 1449 struct task_struct *best_task; 1450 long best_imp; 1451 int best_cpu; 1452 }; 1453 1454 static void task_numa_assign(struct task_numa_env *env, 1455 struct task_struct *p, long imp) 1456 { 1457 if (env->best_task) 1458 put_task_struct(env->best_task); 1459 if (p) 1460 get_task_struct(p); 1461 1462 env->best_task = p; 1463 env->best_imp = imp; 1464 env->best_cpu = env->dst_cpu; 1465 } 1466 1467 static bool load_too_imbalanced(long src_load, long dst_load, 1468 struct task_numa_env *env) 1469 { 1470 long imb, old_imb; 1471 long orig_src_load, orig_dst_load; 1472 long src_capacity, dst_capacity; 1473 1474 /* 1475 * The load is corrected for the CPU capacity available on each node. 1476 * 1477 * src_load dst_load 1478 * ------------ vs --------- 1479 * src_capacity dst_capacity 1480 */ 1481 src_capacity = env->src_stats.compute_capacity; 1482 dst_capacity = env->dst_stats.compute_capacity; 1483 1484 /* We care about the slope of the imbalance, not the direction. */ 1485 if (dst_load < src_load) 1486 swap(dst_load, src_load); 1487 1488 /* Is the difference below the threshold? */ 1489 imb = dst_load * src_capacity * 100 - 1490 src_load * dst_capacity * env->imbalance_pct; 1491 if (imb <= 0) 1492 return false; 1493 1494 /* 1495 * The imbalance is above the allowed threshold. 1496 * Compare it with the old imbalance. 1497 */ 1498 orig_src_load = env->src_stats.load; 1499 orig_dst_load = env->dst_stats.load; 1500 1501 if (orig_dst_load < orig_src_load) 1502 swap(orig_dst_load, orig_src_load); 1503 1504 old_imb = orig_dst_load * src_capacity * 100 - 1505 orig_src_load * dst_capacity * env->imbalance_pct; 1506 1507 /* Would this change make things worse? */ 1508 return (imb > old_imb); 1509 } 1510 1511 /* 1512 * This checks if the overall compute and NUMA accesses of the system would 1513 * be improved if the source tasks was migrated to the target dst_cpu taking 1514 * into account that it might be best if task running on the dst_cpu should 1515 * be exchanged with the source task 1516 */ 1517 static void task_numa_compare(struct task_numa_env *env, 1518 long taskimp, long groupimp) 1519 { 1520 struct rq *src_rq = cpu_rq(env->src_cpu); 1521 struct rq *dst_rq = cpu_rq(env->dst_cpu); 1522 struct task_struct *cur; 1523 long src_load, dst_load; 1524 long load; 1525 long imp = env->p->numa_group ? groupimp : taskimp; 1526 long moveimp = imp; 1527 int dist = env->dist; 1528 1529 rcu_read_lock(); 1530 cur = task_rcu_dereference(&dst_rq->curr); 1531 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1532 cur = NULL; 1533 1534 /* 1535 * Because we have preemption enabled we can get migrated around and 1536 * end try selecting ourselves (current == env->p) as a swap candidate. 1537 */ 1538 if (cur == env->p) 1539 goto unlock; 1540 1541 /* 1542 * "imp" is the fault differential for the source task between the 1543 * source and destination node. Calculate the total differential for 1544 * the source task and potential destination task. The more negative 1545 * the value is, the more rmeote accesses that would be expected to 1546 * be incurred if the tasks were swapped. 1547 */ 1548 if (cur) { 1549 /* Skip this swap candidate if cannot move to the source cpu */ 1550 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) 1551 goto unlock; 1552 1553 /* 1554 * If dst and source tasks are in the same NUMA group, or not 1555 * in any group then look only at task weights. 1556 */ 1557 if (cur->numa_group == env->p->numa_group) { 1558 imp = taskimp + task_weight(cur, env->src_nid, dist) - 1559 task_weight(cur, env->dst_nid, dist); 1560 /* 1561 * Add some hysteresis to prevent swapping the 1562 * tasks within a group over tiny differences. 1563 */ 1564 if (cur->numa_group) 1565 imp -= imp/16; 1566 } else { 1567 /* 1568 * Compare the group weights. If a task is all by 1569 * itself (not part of a group), use the task weight 1570 * instead. 1571 */ 1572 if (cur->numa_group) 1573 imp += group_weight(cur, env->src_nid, dist) - 1574 group_weight(cur, env->dst_nid, dist); 1575 else 1576 imp += task_weight(cur, env->src_nid, dist) - 1577 task_weight(cur, env->dst_nid, dist); 1578 } 1579 } 1580 1581 if (imp <= env->best_imp && moveimp <= env->best_imp) 1582 goto unlock; 1583 1584 if (!cur) { 1585 /* Is there capacity at our destination? */ 1586 if (env->src_stats.nr_running <= env->src_stats.task_capacity && 1587 !env->dst_stats.has_free_capacity) 1588 goto unlock; 1589 1590 goto balance; 1591 } 1592 1593 /* Balance doesn't matter much if we're running a task per cpu */ 1594 if (imp > env->best_imp && src_rq->nr_running == 1 && 1595 dst_rq->nr_running == 1) 1596 goto assign; 1597 1598 /* 1599 * In the overloaded case, try and keep the load balanced. 1600 */ 1601 balance: 1602 load = task_h_load(env->p); 1603 dst_load = env->dst_stats.load + load; 1604 src_load = env->src_stats.load - load; 1605 1606 if (moveimp > imp && moveimp > env->best_imp) { 1607 /* 1608 * If the improvement from just moving env->p direction is 1609 * better than swapping tasks around, check if a move is 1610 * possible. Store a slightly smaller score than moveimp, 1611 * so an actually idle CPU will win. 1612 */ 1613 if (!load_too_imbalanced(src_load, dst_load, env)) { 1614 imp = moveimp - 1; 1615 cur = NULL; 1616 goto assign; 1617 } 1618 } 1619 1620 if (imp <= env->best_imp) 1621 goto unlock; 1622 1623 if (cur) { 1624 load = task_h_load(cur); 1625 dst_load -= load; 1626 src_load += load; 1627 } 1628 1629 if (load_too_imbalanced(src_load, dst_load, env)) 1630 goto unlock; 1631 1632 /* 1633 * One idle CPU per node is evaluated for a task numa move. 1634 * Call select_idle_sibling to maybe find a better one. 1635 */ 1636 if (!cur) { 1637 /* 1638 * select_idle_siblings() uses an per-cpu cpumask that 1639 * can be used from IRQ context. 1640 */ 1641 local_irq_disable(); 1642 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, 1643 env->dst_cpu); 1644 local_irq_enable(); 1645 } 1646 1647 assign: 1648 task_numa_assign(env, cur, imp); 1649 unlock: 1650 rcu_read_unlock(); 1651 } 1652 1653 static void task_numa_find_cpu(struct task_numa_env *env, 1654 long taskimp, long groupimp) 1655 { 1656 int cpu; 1657 1658 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { 1659 /* Skip this CPU if the source task cannot migrate */ 1660 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) 1661 continue; 1662 1663 env->dst_cpu = cpu; 1664 task_numa_compare(env, taskimp, groupimp); 1665 } 1666 } 1667 1668 /* Only move tasks to a NUMA node less busy than the current node. */ 1669 static bool numa_has_capacity(struct task_numa_env *env) 1670 { 1671 struct numa_stats *src = &env->src_stats; 1672 struct numa_stats *dst = &env->dst_stats; 1673 1674 if (src->has_free_capacity && !dst->has_free_capacity) 1675 return false; 1676 1677 /* 1678 * Only consider a task move if the source has a higher load 1679 * than the destination, corrected for CPU capacity on each node. 1680 * 1681 * src->load dst->load 1682 * --------------------- vs --------------------- 1683 * src->compute_capacity dst->compute_capacity 1684 */ 1685 if (src->load * dst->compute_capacity * env->imbalance_pct > 1686 1687 dst->load * src->compute_capacity * 100) 1688 return true; 1689 1690 return false; 1691 } 1692 1693 static int task_numa_migrate(struct task_struct *p) 1694 { 1695 struct task_numa_env env = { 1696 .p = p, 1697 1698 .src_cpu = task_cpu(p), 1699 .src_nid = task_node(p), 1700 1701 .imbalance_pct = 112, 1702 1703 .best_task = NULL, 1704 .best_imp = 0, 1705 .best_cpu = -1, 1706 }; 1707 struct sched_domain *sd; 1708 unsigned long taskweight, groupweight; 1709 int nid, ret, dist; 1710 long taskimp, groupimp; 1711 1712 /* 1713 * Pick the lowest SD_NUMA domain, as that would have the smallest 1714 * imbalance and would be the first to start moving tasks about. 1715 * 1716 * And we want to avoid any moving of tasks about, as that would create 1717 * random movement of tasks -- counter the numa conditions we're trying 1718 * to satisfy here. 1719 */ 1720 rcu_read_lock(); 1721 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); 1722 if (sd) 1723 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; 1724 rcu_read_unlock(); 1725 1726 /* 1727 * Cpusets can break the scheduler domain tree into smaller 1728 * balance domains, some of which do not cross NUMA boundaries. 1729 * Tasks that are "trapped" in such domains cannot be migrated 1730 * elsewhere, so there is no point in (re)trying. 1731 */ 1732 if (unlikely(!sd)) { 1733 p->numa_preferred_nid = task_node(p); 1734 return -EINVAL; 1735 } 1736 1737 env.dst_nid = p->numa_preferred_nid; 1738 dist = env.dist = node_distance(env.src_nid, env.dst_nid); 1739 taskweight = task_weight(p, env.src_nid, dist); 1740 groupweight = group_weight(p, env.src_nid, dist); 1741 update_numa_stats(&env.src_stats, env.src_nid); 1742 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; 1743 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; 1744 update_numa_stats(&env.dst_stats, env.dst_nid); 1745 1746 /* Try to find a spot on the preferred nid. */ 1747 if (numa_has_capacity(&env)) 1748 task_numa_find_cpu(&env, taskimp, groupimp); 1749 1750 /* 1751 * Look at other nodes in these cases: 1752 * - there is no space available on the preferred_nid 1753 * - the task is part of a numa_group that is interleaved across 1754 * multiple NUMA nodes; in order to better consolidate the group, 1755 * we need to check other locations. 1756 */ 1757 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) { 1758 for_each_online_node(nid) { 1759 if (nid == env.src_nid || nid == p->numa_preferred_nid) 1760 continue; 1761 1762 dist = node_distance(env.src_nid, env.dst_nid); 1763 if (sched_numa_topology_type == NUMA_BACKPLANE && 1764 dist != env.dist) { 1765 taskweight = task_weight(p, env.src_nid, dist); 1766 groupweight = group_weight(p, env.src_nid, dist); 1767 } 1768 1769 /* Only consider nodes where both task and groups benefit */ 1770 taskimp = task_weight(p, nid, dist) - taskweight; 1771 groupimp = group_weight(p, nid, dist) - groupweight; 1772 if (taskimp < 0 && groupimp < 0) 1773 continue; 1774 1775 env.dist = dist; 1776 env.dst_nid = nid; 1777 update_numa_stats(&env.dst_stats, env.dst_nid); 1778 if (numa_has_capacity(&env)) 1779 task_numa_find_cpu(&env, taskimp, groupimp); 1780 } 1781 } 1782 1783 /* 1784 * If the task is part of a workload that spans multiple NUMA nodes, 1785 * and is migrating into one of the workload's active nodes, remember 1786 * this node as the task's preferred numa node, so the workload can 1787 * settle down. 1788 * A task that migrated to a second choice node will be better off 1789 * trying for a better one later. Do not set the preferred node here. 1790 */ 1791 if (p->numa_group) { 1792 struct numa_group *ng = p->numa_group; 1793 1794 if (env.best_cpu == -1) 1795 nid = env.src_nid; 1796 else 1797 nid = env.dst_nid; 1798 1799 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng)) 1800 sched_setnuma(p, env.dst_nid); 1801 } 1802 1803 /* No better CPU than the current one was found. */ 1804 if (env.best_cpu == -1) 1805 return -EAGAIN; 1806 1807 /* 1808 * Reset the scan period if the task is being rescheduled on an 1809 * alternative node to recheck if the tasks is now properly placed. 1810 */ 1811 p->numa_scan_period = task_scan_min(p); 1812 1813 if (env.best_task == NULL) { 1814 ret = migrate_task_to(p, env.best_cpu); 1815 if (ret != 0) 1816 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); 1817 return ret; 1818 } 1819 1820 ret = migrate_swap(p, env.best_task); 1821 if (ret != 0) 1822 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); 1823 put_task_struct(env.best_task); 1824 return ret; 1825 } 1826 1827 /* Attempt to migrate a task to a CPU on the preferred node. */ 1828 static void numa_migrate_preferred(struct task_struct *p) 1829 { 1830 unsigned long interval = HZ; 1831 1832 /* This task has no NUMA fault statistics yet */ 1833 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) 1834 return; 1835 1836 /* Periodically retry migrating the task to the preferred node */ 1837 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); 1838 p->numa_migrate_retry = jiffies + interval; 1839 1840 /* Success if task is already running on preferred CPU */ 1841 if (task_node(p) == p->numa_preferred_nid) 1842 return; 1843 1844 /* Otherwise, try migrate to a CPU on the preferred node */ 1845 task_numa_migrate(p); 1846 } 1847 1848 /* 1849 * Find out how many nodes on the workload is actively running on. Do this by 1850 * tracking the nodes from which NUMA hinting faults are triggered. This can 1851 * be different from the set of nodes where the workload's memory is currently 1852 * located. 1853 */ 1854 static void numa_group_count_active_nodes(struct numa_group *numa_group) 1855 { 1856 unsigned long faults, max_faults = 0; 1857 int nid, active_nodes = 0; 1858 1859 for_each_online_node(nid) { 1860 faults = group_faults_cpu(numa_group, nid); 1861 if (faults > max_faults) 1862 max_faults = faults; 1863 } 1864 1865 for_each_online_node(nid) { 1866 faults = group_faults_cpu(numa_group, nid); 1867 if (faults * ACTIVE_NODE_FRACTION > max_faults) 1868 active_nodes++; 1869 } 1870 1871 numa_group->max_faults_cpu = max_faults; 1872 numa_group->active_nodes = active_nodes; 1873 } 1874 1875 /* 1876 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS 1877 * increments. The more local the fault statistics are, the higher the scan 1878 * period will be for the next scan window. If local/(local+remote) ratio is 1879 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) 1880 * the scan period will decrease. Aim for 70% local accesses. 1881 */ 1882 #define NUMA_PERIOD_SLOTS 10 1883 #define NUMA_PERIOD_THRESHOLD 7 1884 1885 /* 1886 * Increase the scan period (slow down scanning) if the majority of 1887 * our memory is already on our local node, or if the majority of 1888 * the page accesses are shared with other processes. 1889 * Otherwise, decrease the scan period. 1890 */ 1891 static void update_task_scan_period(struct task_struct *p, 1892 unsigned long shared, unsigned long private) 1893 { 1894 unsigned int period_slot; 1895 int ratio; 1896 int diff; 1897 1898 unsigned long remote = p->numa_faults_locality[0]; 1899 unsigned long local = p->numa_faults_locality[1]; 1900 1901 /* 1902 * If there were no record hinting faults then either the task is 1903 * completely idle or all activity is areas that are not of interest 1904 * to automatic numa balancing. Related to that, if there were failed 1905 * migration then it implies we are migrating too quickly or the local 1906 * node is overloaded. In either case, scan slower 1907 */ 1908 if (local + shared == 0 || p->numa_faults_locality[2]) { 1909 p->numa_scan_period = min(p->numa_scan_period_max, 1910 p->numa_scan_period << 1); 1911 1912 p->mm->numa_next_scan = jiffies + 1913 msecs_to_jiffies(p->numa_scan_period); 1914 1915 return; 1916 } 1917 1918 /* 1919 * Prepare to scale scan period relative to the current period. 1920 * == NUMA_PERIOD_THRESHOLD scan period stays the same 1921 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) 1922 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) 1923 */ 1924 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); 1925 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); 1926 if (ratio >= NUMA_PERIOD_THRESHOLD) { 1927 int slot = ratio - NUMA_PERIOD_THRESHOLD; 1928 if (!slot) 1929 slot = 1; 1930 diff = slot * period_slot; 1931 } else { 1932 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; 1933 1934 /* 1935 * Scale scan rate increases based on sharing. There is an 1936 * inverse relationship between the degree of sharing and 1937 * the adjustment made to the scanning period. Broadly 1938 * speaking the intent is that there is little point 1939 * scanning faster if shared accesses dominate as it may 1940 * simply bounce migrations uselessly 1941 */ 1942 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1)); 1943 diff = (diff * ratio) / NUMA_PERIOD_SLOTS; 1944 } 1945 1946 p->numa_scan_period = clamp(p->numa_scan_period + diff, 1947 task_scan_min(p), task_scan_max(p)); 1948 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 1949 } 1950 1951 /* 1952 * Get the fraction of time the task has been running since the last 1953 * NUMA placement cycle. The scheduler keeps similar statistics, but 1954 * decays those on a 32ms period, which is orders of magnitude off 1955 * from the dozens-of-seconds NUMA balancing period. Use the scheduler 1956 * stats only if the task is so new there are no NUMA statistics yet. 1957 */ 1958 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) 1959 { 1960 u64 runtime, delta, now; 1961 /* Use the start of this time slice to avoid calculations. */ 1962 now = p->se.exec_start; 1963 runtime = p->se.sum_exec_runtime; 1964 1965 if (p->last_task_numa_placement) { 1966 delta = runtime - p->last_sum_exec_runtime; 1967 *period = now - p->last_task_numa_placement; 1968 } else { 1969 delta = p->se.avg.load_sum / p->se.load.weight; 1970 *period = LOAD_AVG_MAX; 1971 } 1972 1973 p->last_sum_exec_runtime = runtime; 1974 p->last_task_numa_placement = now; 1975 1976 return delta; 1977 } 1978 1979 /* 1980 * Determine the preferred nid for a task in a numa_group. This needs to 1981 * be done in a way that produces consistent results with group_weight, 1982 * otherwise workloads might not converge. 1983 */ 1984 static int preferred_group_nid(struct task_struct *p, int nid) 1985 { 1986 nodemask_t nodes; 1987 int dist; 1988 1989 /* Direct connections between all NUMA nodes. */ 1990 if (sched_numa_topology_type == NUMA_DIRECT) 1991 return nid; 1992 1993 /* 1994 * On a system with glueless mesh NUMA topology, group_weight 1995 * scores nodes according to the number of NUMA hinting faults on 1996 * both the node itself, and on nearby nodes. 1997 */ 1998 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { 1999 unsigned long score, max_score = 0; 2000 int node, max_node = nid; 2001 2002 dist = sched_max_numa_distance; 2003 2004 for_each_online_node(node) { 2005 score = group_weight(p, node, dist); 2006 if (score > max_score) { 2007 max_score = score; 2008 max_node = node; 2009 } 2010 } 2011 return max_node; 2012 } 2013 2014 /* 2015 * Finding the preferred nid in a system with NUMA backplane 2016 * interconnect topology is more involved. The goal is to locate 2017 * tasks from numa_groups near each other in the system, and 2018 * untangle workloads from different sides of the system. This requires 2019 * searching down the hierarchy of node groups, recursively searching 2020 * inside the highest scoring group of nodes. The nodemask tricks 2021 * keep the complexity of the search down. 2022 */ 2023 nodes = node_online_map; 2024 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 2025 unsigned long max_faults = 0; 2026 nodemask_t max_group = NODE_MASK_NONE; 2027 int a, b; 2028 2029 /* Are there nodes at this distance from each other? */ 2030 if (!find_numa_distance(dist)) 2031 continue; 2032 2033 for_each_node_mask(a, nodes) { 2034 unsigned long faults = 0; 2035 nodemask_t this_group; 2036 nodes_clear(this_group); 2037 2038 /* Sum group's NUMA faults; includes a==b case. */ 2039 for_each_node_mask(b, nodes) { 2040 if (node_distance(a, b) < dist) { 2041 faults += group_faults(p, b); 2042 node_set(b, this_group); 2043 node_clear(b, nodes); 2044 } 2045 } 2046 2047 /* Remember the top group. */ 2048 if (faults > max_faults) { 2049 max_faults = faults; 2050 max_group = this_group; 2051 /* 2052 * subtle: at the smallest distance there is 2053 * just one node left in each "group", the 2054 * winner is the preferred nid. 2055 */ 2056 nid = a; 2057 } 2058 } 2059 /* Next round, evaluate the nodes within max_group. */ 2060 if (!max_faults) 2061 break; 2062 nodes = max_group; 2063 } 2064 return nid; 2065 } 2066 2067 static void task_numa_placement(struct task_struct *p) 2068 { 2069 int seq, nid, max_nid = -1, max_group_nid = -1; 2070 unsigned long max_faults = 0, max_group_faults = 0; 2071 unsigned long fault_types[2] = { 0, 0 }; 2072 unsigned long total_faults; 2073 u64 runtime, period; 2074 spinlock_t *group_lock = NULL; 2075 2076 /* 2077 * The p->mm->numa_scan_seq field gets updated without 2078 * exclusive access. Use READ_ONCE() here to ensure 2079 * that the field is read in a single access: 2080 */ 2081 seq = READ_ONCE(p->mm->numa_scan_seq); 2082 if (p->numa_scan_seq == seq) 2083 return; 2084 p->numa_scan_seq = seq; 2085 p->numa_scan_period_max = task_scan_max(p); 2086 2087 total_faults = p->numa_faults_locality[0] + 2088 p->numa_faults_locality[1]; 2089 runtime = numa_get_avg_runtime(p, &period); 2090 2091 /* If the task is part of a group prevent parallel updates to group stats */ 2092 if (p->numa_group) { 2093 group_lock = &p->numa_group->lock; 2094 spin_lock_irq(group_lock); 2095 } 2096 2097 /* Find the node with the highest number of faults */ 2098 for_each_online_node(nid) { 2099 /* Keep track of the offsets in numa_faults array */ 2100 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; 2101 unsigned long faults = 0, group_faults = 0; 2102 int priv; 2103 2104 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { 2105 long diff, f_diff, f_weight; 2106 2107 mem_idx = task_faults_idx(NUMA_MEM, nid, priv); 2108 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); 2109 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); 2110 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); 2111 2112 /* Decay existing window, copy faults since last scan */ 2113 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; 2114 fault_types[priv] += p->numa_faults[membuf_idx]; 2115 p->numa_faults[membuf_idx] = 0; 2116 2117 /* 2118 * Normalize the faults_from, so all tasks in a group 2119 * count according to CPU use, instead of by the raw 2120 * number of faults. Tasks with little runtime have 2121 * little over-all impact on throughput, and thus their 2122 * faults are less important. 2123 */ 2124 f_weight = div64_u64(runtime << 16, period + 1); 2125 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / 2126 (total_faults + 1); 2127 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; 2128 p->numa_faults[cpubuf_idx] = 0; 2129 2130 p->numa_faults[mem_idx] += diff; 2131 p->numa_faults[cpu_idx] += f_diff; 2132 faults += p->numa_faults[mem_idx]; 2133 p->total_numa_faults += diff; 2134 if (p->numa_group) { 2135 /* 2136 * safe because we can only change our own group 2137 * 2138 * mem_idx represents the offset for a given 2139 * nid and priv in a specific region because it 2140 * is at the beginning of the numa_faults array. 2141 */ 2142 p->numa_group->faults[mem_idx] += diff; 2143 p->numa_group->faults_cpu[mem_idx] += f_diff; 2144 p->numa_group->total_faults += diff; 2145 group_faults += p->numa_group->faults[mem_idx]; 2146 } 2147 } 2148 2149 if (faults > max_faults) { 2150 max_faults = faults; 2151 max_nid = nid; 2152 } 2153 2154 if (group_faults > max_group_faults) { 2155 max_group_faults = group_faults; 2156 max_group_nid = nid; 2157 } 2158 } 2159 2160 update_task_scan_period(p, fault_types[0], fault_types[1]); 2161 2162 if (p->numa_group) { 2163 numa_group_count_active_nodes(p->numa_group); 2164 spin_unlock_irq(group_lock); 2165 max_nid = preferred_group_nid(p, max_group_nid); 2166 } 2167 2168 if (max_faults) { 2169 /* Set the new preferred node */ 2170 if (max_nid != p->numa_preferred_nid) 2171 sched_setnuma(p, max_nid); 2172 2173 if (task_node(p) != p->numa_preferred_nid) 2174 numa_migrate_preferred(p); 2175 } 2176 } 2177 2178 static inline int get_numa_group(struct numa_group *grp) 2179 { 2180 return atomic_inc_not_zero(&grp->refcount); 2181 } 2182 2183 static inline void put_numa_group(struct numa_group *grp) 2184 { 2185 if (atomic_dec_and_test(&grp->refcount)) 2186 kfree_rcu(grp, rcu); 2187 } 2188 2189 static void task_numa_group(struct task_struct *p, int cpupid, int flags, 2190 int *priv) 2191 { 2192 struct numa_group *grp, *my_grp; 2193 struct task_struct *tsk; 2194 bool join = false; 2195 int cpu = cpupid_to_cpu(cpupid); 2196 int i; 2197 2198 if (unlikely(!p->numa_group)) { 2199 unsigned int size = sizeof(struct numa_group) + 2200 4*nr_node_ids*sizeof(unsigned long); 2201 2202 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 2203 if (!grp) 2204 return; 2205 2206 atomic_set(&grp->refcount, 1); 2207 grp->active_nodes = 1; 2208 grp->max_faults_cpu = 0; 2209 spin_lock_init(&grp->lock); 2210 grp->gid = p->pid; 2211 /* Second half of the array tracks nids where faults happen */ 2212 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * 2213 nr_node_ids; 2214 2215 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2216 grp->faults[i] = p->numa_faults[i]; 2217 2218 grp->total_faults = p->total_numa_faults; 2219 2220 grp->nr_tasks++; 2221 rcu_assign_pointer(p->numa_group, grp); 2222 } 2223 2224 rcu_read_lock(); 2225 tsk = READ_ONCE(cpu_rq(cpu)->curr); 2226 2227 if (!cpupid_match_pid(tsk, cpupid)) 2228 goto no_join; 2229 2230 grp = rcu_dereference(tsk->numa_group); 2231 if (!grp) 2232 goto no_join; 2233 2234 my_grp = p->numa_group; 2235 if (grp == my_grp) 2236 goto no_join; 2237 2238 /* 2239 * Only join the other group if its bigger; if we're the bigger group, 2240 * the other task will join us. 2241 */ 2242 if (my_grp->nr_tasks > grp->nr_tasks) 2243 goto no_join; 2244 2245 /* 2246 * Tie-break on the grp address. 2247 */ 2248 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 2249 goto no_join; 2250 2251 /* Always join threads in the same process. */ 2252 if (tsk->mm == current->mm) 2253 join = true; 2254 2255 /* Simple filter to avoid false positives due to PID collisions */ 2256 if (flags & TNF_SHARED) 2257 join = true; 2258 2259 /* Update priv based on whether false sharing was detected */ 2260 *priv = !join; 2261 2262 if (join && !get_numa_group(grp)) 2263 goto no_join; 2264 2265 rcu_read_unlock(); 2266 2267 if (!join) 2268 return; 2269 2270 BUG_ON(irqs_disabled()); 2271 double_lock_irq(&my_grp->lock, &grp->lock); 2272 2273 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { 2274 my_grp->faults[i] -= p->numa_faults[i]; 2275 grp->faults[i] += p->numa_faults[i]; 2276 } 2277 my_grp->total_faults -= p->total_numa_faults; 2278 grp->total_faults += p->total_numa_faults; 2279 2280 my_grp->nr_tasks--; 2281 grp->nr_tasks++; 2282 2283 spin_unlock(&my_grp->lock); 2284 spin_unlock_irq(&grp->lock); 2285 2286 rcu_assign_pointer(p->numa_group, grp); 2287 2288 put_numa_group(my_grp); 2289 return; 2290 2291 no_join: 2292 rcu_read_unlock(); 2293 return; 2294 } 2295 2296 void task_numa_free(struct task_struct *p) 2297 { 2298 struct numa_group *grp = p->numa_group; 2299 void *numa_faults = p->numa_faults; 2300 unsigned long flags; 2301 int i; 2302 2303 if (grp) { 2304 spin_lock_irqsave(&grp->lock, flags); 2305 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) 2306 grp->faults[i] -= p->numa_faults[i]; 2307 grp->total_faults -= p->total_numa_faults; 2308 2309 grp->nr_tasks--; 2310 spin_unlock_irqrestore(&grp->lock, flags); 2311 RCU_INIT_POINTER(p->numa_group, NULL); 2312 put_numa_group(grp); 2313 } 2314 2315 p->numa_faults = NULL; 2316 kfree(numa_faults); 2317 } 2318 2319 /* 2320 * Got a PROT_NONE fault for a page on @node. 2321 */ 2322 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) 2323 { 2324 struct task_struct *p = current; 2325 bool migrated = flags & TNF_MIGRATED; 2326 int cpu_node = task_node(current); 2327 int local = !!(flags & TNF_FAULT_LOCAL); 2328 struct numa_group *ng; 2329 int priv; 2330 2331 if (!static_branch_likely(&sched_numa_balancing)) 2332 return; 2333 2334 /* for example, ksmd faulting in a user's mm */ 2335 if (!p->mm) 2336 return; 2337 2338 /* Allocate buffer to track faults on a per-node basis */ 2339 if (unlikely(!p->numa_faults)) { 2340 int size = sizeof(*p->numa_faults) * 2341 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; 2342 2343 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); 2344 if (!p->numa_faults) 2345 return; 2346 2347 p->total_numa_faults = 0; 2348 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); 2349 } 2350 2351 /* 2352 * First accesses are treated as private, otherwise consider accesses 2353 * to be private if the accessing pid has not changed 2354 */ 2355 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { 2356 priv = 1; 2357 } else { 2358 priv = cpupid_match_pid(p, last_cpupid); 2359 if (!priv && !(flags & TNF_NO_GROUP)) 2360 task_numa_group(p, last_cpupid, flags, &priv); 2361 } 2362 2363 /* 2364 * If a workload spans multiple NUMA nodes, a shared fault that 2365 * occurs wholly within the set of nodes that the workload is 2366 * actively using should be counted as local. This allows the 2367 * scan rate to slow down when a workload has settled down. 2368 */ 2369 ng = p->numa_group; 2370 if (!priv && !local && ng && ng->active_nodes > 1 && 2371 numa_is_active_node(cpu_node, ng) && 2372 numa_is_active_node(mem_node, ng)) 2373 local = 1; 2374 2375 task_numa_placement(p); 2376 2377 /* 2378 * Retry task to preferred node migration periodically, in case it 2379 * case it previously failed, or the scheduler moved us. 2380 */ 2381 if (time_after(jiffies, p->numa_migrate_retry)) 2382 numa_migrate_preferred(p); 2383 2384 if (migrated) 2385 p->numa_pages_migrated += pages; 2386 if (flags & TNF_MIGRATE_FAIL) 2387 p->numa_faults_locality[2] += pages; 2388 2389 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2390 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2391 p->numa_faults_locality[local] += pages; 2392 } 2393 2394 static void reset_ptenuma_scan(struct task_struct *p) 2395 { 2396 /* 2397 * We only did a read acquisition of the mmap sem, so 2398 * p->mm->numa_scan_seq is written to without exclusive access 2399 * and the update is not guaranteed to be atomic. That's not 2400 * much of an issue though, since this is just used for 2401 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not 2402 * expensive, to avoid any form of compiler optimizations: 2403 */ 2404 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); 2405 p->mm->numa_scan_offset = 0; 2406 } 2407 2408 /* 2409 * The expensive part of numa migration is done from task_work context. 2410 * Triggered from task_tick_numa(). 2411 */ 2412 void task_numa_work(struct callback_head *work) 2413 { 2414 unsigned long migrate, next_scan, now = jiffies; 2415 struct task_struct *p = current; 2416 struct mm_struct *mm = p->mm; 2417 u64 runtime = p->se.sum_exec_runtime; 2418 struct vm_area_struct *vma; 2419 unsigned long start, end; 2420 unsigned long nr_pte_updates = 0; 2421 long pages, virtpages; 2422 2423 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); 2424 2425 work->next = work; /* protect against double add */ 2426 /* 2427 * Who cares about NUMA placement when they're dying. 2428 * 2429 * NOTE: make sure not to dereference p->mm before this check, 2430 * exit_task_work() happens _after_ exit_mm() so we could be called 2431 * without p->mm even though we still had it when we enqueued this 2432 * work. 2433 */ 2434 if (p->flags & PF_EXITING) 2435 return; 2436 2437 if (!mm->numa_next_scan) { 2438 mm->numa_next_scan = now + 2439 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 2440 } 2441 2442 /* 2443 * Enforce maximal scan/migration frequency.. 2444 */ 2445 migrate = mm->numa_next_scan; 2446 if (time_before(now, migrate)) 2447 return; 2448 2449 if (p->numa_scan_period == 0) { 2450 p->numa_scan_period_max = task_scan_max(p); 2451 p->numa_scan_period = task_scan_min(p); 2452 } 2453 2454 next_scan = now + msecs_to_jiffies(p->numa_scan_period); 2455 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 2456 return; 2457 2458 /* 2459 * Delay this task enough that another task of this mm will likely win 2460 * the next time around. 2461 */ 2462 p->node_stamp += 2 * TICK_NSEC; 2463 2464 start = mm->numa_scan_offset; 2465 pages = sysctl_numa_balancing_scan_size; 2466 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 2467 virtpages = pages * 8; /* Scan up to this much virtual space */ 2468 if (!pages) 2469 return; 2470 2471 2472 if (!down_read_trylock(&mm->mmap_sem)) 2473 return; 2474 vma = find_vma(mm, start); 2475 if (!vma) { 2476 reset_ptenuma_scan(p); 2477 start = 0; 2478 vma = mm->mmap; 2479 } 2480 for (; vma; vma = vma->vm_next) { 2481 if (!vma_migratable(vma) || !vma_policy_mof(vma) || 2482 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { 2483 continue; 2484 } 2485 2486 /* 2487 * Shared library pages mapped by multiple processes are not 2488 * migrated as it is expected they are cache replicated. Avoid 2489 * hinting faults in read-only file-backed mappings or the vdso 2490 * as migrating the pages will be of marginal benefit. 2491 */ 2492 if (!vma->vm_mm || 2493 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) 2494 continue; 2495 2496 /* 2497 * Skip inaccessible VMAs to avoid any confusion between 2498 * PROT_NONE and NUMA hinting ptes 2499 */ 2500 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 2501 continue; 2502 2503 do { 2504 start = max(start, vma->vm_start); 2505 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); 2506 end = min(end, vma->vm_end); 2507 nr_pte_updates = change_prot_numa(vma, start, end); 2508 2509 /* 2510 * Try to scan sysctl_numa_balancing_size worth of 2511 * hpages that have at least one present PTE that 2512 * is not already pte-numa. If the VMA contains 2513 * areas that are unused or already full of prot_numa 2514 * PTEs, scan up to virtpages, to skip through those 2515 * areas faster. 2516 */ 2517 if (nr_pte_updates) 2518 pages -= (end - start) >> PAGE_SHIFT; 2519 virtpages -= (end - start) >> PAGE_SHIFT; 2520 2521 start = end; 2522 if (pages <= 0 || virtpages <= 0) 2523 goto out; 2524 2525 cond_resched(); 2526 } while (end != vma->vm_end); 2527 } 2528 2529 out: 2530 /* 2531 * It is possible to reach the end of the VMA list but the last few 2532 * VMAs are not guaranteed to the vma_migratable. If they are not, we 2533 * would find the !migratable VMA on the next scan but not reset the 2534 * scanner to the start so check it now. 2535 */ 2536 if (vma) 2537 mm->numa_scan_offset = start; 2538 else 2539 reset_ptenuma_scan(p); 2540 up_read(&mm->mmap_sem); 2541 2542 /* 2543 * Make sure tasks use at least 32x as much time to run other code 2544 * than they used here, to limit NUMA PTE scanning overhead to 3% max. 2545 * Usually update_task_scan_period slows down scanning enough; on an 2546 * overloaded system we need to limit overhead on a per task basis. 2547 */ 2548 if (unlikely(p->se.sum_exec_runtime != runtime)) { 2549 u64 diff = p->se.sum_exec_runtime - runtime; 2550 p->node_stamp += 32 * diff; 2551 } 2552 } 2553 2554 /* 2555 * Drive the periodic memory faults.. 2556 */ 2557 void task_tick_numa(struct rq *rq, struct task_struct *curr) 2558 { 2559 struct callback_head *work = &curr->numa_work; 2560 u64 period, now; 2561 2562 /* 2563 * We don't care about NUMA placement if we don't have memory. 2564 */ 2565 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) 2566 return; 2567 2568 /* 2569 * Using runtime rather than walltime has the dual advantage that 2570 * we (mostly) drive the selection from busy threads and that the 2571 * task needs to have done some actual work before we bother with 2572 * NUMA placement. 2573 */ 2574 now = curr->se.sum_exec_runtime; 2575 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; 2576 2577 if (now > curr->node_stamp + period) { 2578 if (!curr->node_stamp) 2579 curr->numa_scan_period = task_scan_min(curr); 2580 curr->node_stamp += period; 2581 2582 if (!time_before(jiffies, curr->mm->numa_next_scan)) { 2583 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */ 2584 task_work_add(curr, work, true); 2585 } 2586 } 2587 } 2588 2589 /* 2590 * Can a task be moved from prev_cpu to this_cpu without causing a load 2591 * imbalance that would trigger the load balancer? 2592 */ 2593 static inline bool numa_wake_affine(struct sched_domain *sd, 2594 struct task_struct *p, int this_cpu, 2595 int prev_cpu, int sync) 2596 { 2597 struct numa_stats prev_load, this_load; 2598 s64 this_eff_load, prev_eff_load; 2599 2600 update_numa_stats(&prev_load, cpu_to_node(prev_cpu)); 2601 update_numa_stats(&this_load, cpu_to_node(this_cpu)); 2602 2603 /* 2604 * If sync wakeup then subtract the (maximum possible) 2605 * effect of the currently running task from the load 2606 * of the current CPU: 2607 */ 2608 if (sync) { 2609 unsigned long current_load = task_h_load(current); 2610 2611 if (this_load.load > current_load) 2612 this_load.load -= current_load; 2613 else 2614 this_load.load = 0; 2615 } 2616 2617 /* 2618 * In low-load situations, where this_cpu's node is idle due to the 2619 * sync cause above having dropped this_load.load to 0, move the task. 2620 * Moving to an idle socket will not create a bad imbalance. 2621 * 2622 * Otherwise check if the nodes are near enough in load to allow this 2623 * task to be woken on this_cpu's node. 2624 */ 2625 if (this_load.load > 0) { 2626 unsigned long task_load = task_h_load(p); 2627 2628 this_eff_load = 100; 2629 this_eff_load *= prev_load.compute_capacity; 2630 2631 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; 2632 prev_eff_load *= this_load.compute_capacity; 2633 2634 this_eff_load *= this_load.load + task_load; 2635 prev_eff_load *= prev_load.load - task_load; 2636 2637 return this_eff_load <= prev_eff_load; 2638 } 2639 2640 return true; 2641 } 2642 #else 2643 static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2644 { 2645 } 2646 2647 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) 2648 { 2649 } 2650 2651 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) 2652 { 2653 } 2654 2655 #ifdef CONFIG_SMP 2656 static inline bool numa_wake_affine(struct sched_domain *sd, 2657 struct task_struct *p, int this_cpu, 2658 int prev_cpu, int sync) 2659 { 2660 return true; 2661 } 2662 #endif /* !SMP */ 2663 #endif /* CONFIG_NUMA_BALANCING */ 2664 2665 static void 2666 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2667 { 2668 update_load_add(&cfs_rq->load, se->load.weight); 2669 if (!parent_entity(se)) 2670 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); 2671 #ifdef CONFIG_SMP 2672 if (entity_is_task(se)) { 2673 struct rq *rq = rq_of(cfs_rq); 2674 2675 account_numa_enqueue(rq, task_of(se)); 2676 list_add(&se->group_node, &rq->cfs_tasks); 2677 } 2678 #endif 2679 cfs_rq->nr_running++; 2680 } 2681 2682 static void 2683 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) 2684 { 2685 update_load_sub(&cfs_rq->load, se->load.weight); 2686 if (!parent_entity(se)) 2687 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); 2688 #ifdef CONFIG_SMP 2689 if (entity_is_task(se)) { 2690 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); 2691 list_del_init(&se->group_node); 2692 } 2693 #endif 2694 cfs_rq->nr_running--; 2695 } 2696 2697 #ifdef CONFIG_FAIR_GROUP_SCHED 2698 # ifdef CONFIG_SMP 2699 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 2700 { 2701 long tg_weight, load, shares; 2702 2703 /* 2704 * This really should be: cfs_rq->avg.load_avg, but instead we use 2705 * cfs_rq->load.weight, which is its upper bound. This helps ramp up 2706 * the shares for small weight interactive tasks. 2707 */ 2708 load = scale_load_down(cfs_rq->load.weight); 2709 2710 tg_weight = atomic_long_read(&tg->load_avg); 2711 2712 /* Ensure tg_weight >= load */ 2713 tg_weight -= cfs_rq->tg_load_avg_contrib; 2714 tg_weight += load; 2715 2716 shares = (tg->shares * load); 2717 if (tg_weight) 2718 shares /= tg_weight; 2719 2720 /* 2721 * MIN_SHARES has to be unscaled here to support per-CPU partitioning 2722 * of a group with small tg->shares value. It is a floor value which is 2723 * assigned as a minimum load.weight to the sched_entity representing 2724 * the group on a CPU. 2725 * 2726 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 2727 * on an 8-core system with 8 tasks each runnable on one CPU shares has 2728 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In 2729 * case no task is runnable on a CPU MIN_SHARES=2 should be returned 2730 * instead of 0. 2731 */ 2732 if (shares < MIN_SHARES) 2733 shares = MIN_SHARES; 2734 if (shares > tg->shares) 2735 shares = tg->shares; 2736 2737 return shares; 2738 } 2739 # else /* CONFIG_SMP */ 2740 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 2741 { 2742 return tg->shares; 2743 } 2744 # endif /* CONFIG_SMP */ 2745 2746 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2747 unsigned long weight) 2748 { 2749 if (se->on_rq) { 2750 /* commit outstanding execution time */ 2751 if (cfs_rq->curr == se) 2752 update_curr(cfs_rq); 2753 account_entity_dequeue(cfs_rq, se); 2754 } 2755 2756 update_load_set(&se->load, weight); 2757 2758 if (se->on_rq) 2759 account_entity_enqueue(cfs_rq, se); 2760 } 2761 2762 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); 2763 2764 static void update_cfs_shares(struct sched_entity *se) 2765 { 2766 struct cfs_rq *cfs_rq = group_cfs_rq(se); 2767 struct task_group *tg; 2768 long shares; 2769 2770 if (!cfs_rq) 2771 return; 2772 2773 if (throttled_hierarchy(cfs_rq)) 2774 return; 2775 2776 tg = cfs_rq->tg; 2777 2778 #ifndef CONFIG_SMP 2779 if (likely(se->load.weight == tg->shares)) 2780 return; 2781 #endif 2782 shares = calc_cfs_shares(cfs_rq, tg); 2783 2784 reweight_entity(cfs_rq_of(se), se, shares); 2785 } 2786 2787 #else /* CONFIG_FAIR_GROUP_SCHED */ 2788 static inline void update_cfs_shares(struct sched_entity *se) 2789 { 2790 } 2791 #endif /* CONFIG_FAIR_GROUP_SCHED */ 2792 2793 #ifdef CONFIG_SMP 2794 /* 2795 * Approximate: 2796 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) 2797 */ 2798 static u64 decay_load(u64 val, u64 n) 2799 { 2800 unsigned int local_n; 2801 2802 if (unlikely(n > LOAD_AVG_PERIOD * 63)) 2803 return 0; 2804 2805 /* after bounds checking we can collapse to 32-bit */ 2806 local_n = n; 2807 2808 /* 2809 * As y^PERIOD = 1/2, we can combine 2810 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) 2811 * With a look-up table which covers y^n (n<PERIOD) 2812 * 2813 * To achieve constant time decay_load. 2814 */ 2815 if (unlikely(local_n >= LOAD_AVG_PERIOD)) { 2816 val >>= local_n / LOAD_AVG_PERIOD; 2817 local_n %= LOAD_AVG_PERIOD; 2818 } 2819 2820 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32); 2821 return val; 2822 } 2823 2824 static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3) 2825 { 2826 u32 c1, c2, c3 = d3; /* y^0 == 1 */ 2827 2828 /* 2829 * c1 = d1 y^p 2830 */ 2831 c1 = decay_load((u64)d1, periods); 2832 2833 /* 2834 * p-1 2835 * c2 = 1024 \Sum y^n 2836 * n=1 2837 * 2838 * inf inf 2839 * = 1024 ( \Sum y^n - \Sum y^n - y^0 ) 2840 * n=0 n=p 2841 */ 2842 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024; 2843 2844 return c1 + c2 + c3; 2845 } 2846 2847 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 2848 2849 /* 2850 * Accumulate the three separate parts of the sum; d1 the remainder 2851 * of the last (incomplete) period, d2 the span of full periods and d3 2852 * the remainder of the (incomplete) current period. 2853 * 2854 * d1 d2 d3 2855 * ^ ^ ^ 2856 * | | | 2857 * |<->|<----------------->|<--->| 2858 * ... |---x---|------| ... |------|-----x (now) 2859 * 2860 * p-1 2861 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0 2862 * n=1 2863 * 2864 * = u y^p + (Step 1) 2865 * 2866 * p-1 2867 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2) 2868 * n=1 2869 */ 2870 static __always_inline u32 2871 accumulate_sum(u64 delta, int cpu, struct sched_avg *sa, 2872 unsigned long weight, int running, struct cfs_rq *cfs_rq) 2873 { 2874 unsigned long scale_freq, scale_cpu; 2875 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ 2876 u64 periods; 2877 2878 scale_freq = arch_scale_freq_capacity(NULL, cpu); 2879 scale_cpu = arch_scale_cpu_capacity(NULL, cpu); 2880 2881 delta += sa->period_contrib; 2882 periods = delta / 1024; /* A period is 1024us (~1ms) */ 2883 2884 /* 2885 * Step 1: decay old *_sum if we crossed period boundaries. 2886 */ 2887 if (periods) { 2888 sa->load_sum = decay_load(sa->load_sum, periods); 2889 if (cfs_rq) { 2890 cfs_rq->runnable_load_sum = 2891 decay_load(cfs_rq->runnable_load_sum, periods); 2892 } 2893 sa->util_sum = decay_load((u64)(sa->util_sum), periods); 2894 2895 /* 2896 * Step 2 2897 */ 2898 delta %= 1024; 2899 contrib = __accumulate_pelt_segments(periods, 2900 1024 - sa->period_contrib, delta); 2901 } 2902 sa->period_contrib = delta; 2903 2904 contrib = cap_scale(contrib, scale_freq); 2905 if (weight) { 2906 sa->load_sum += weight * contrib; 2907 if (cfs_rq) 2908 cfs_rq->runnable_load_sum += weight * contrib; 2909 } 2910 if (running) 2911 sa->util_sum += contrib * scale_cpu; 2912 2913 return periods; 2914 } 2915 2916 /* 2917 * We can represent the historical contribution to runnable average as the 2918 * coefficients of a geometric series. To do this we sub-divide our runnable 2919 * history into segments of approximately 1ms (1024us); label the segment that 2920 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. 2921 * 2922 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... 2923 * p0 p1 p2 2924 * (now) (~1ms ago) (~2ms ago) 2925 * 2926 * Let u_i denote the fraction of p_i that the entity was runnable. 2927 * 2928 * We then designate the fractions u_i as our co-efficients, yielding the 2929 * following representation of historical load: 2930 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... 2931 * 2932 * We choose y based on the with of a reasonably scheduling period, fixing: 2933 * y^32 = 0.5 2934 * 2935 * This means that the contribution to load ~32ms ago (u_32) will be weighted 2936 * approximately half as much as the contribution to load within the last ms 2937 * (u_0). 2938 * 2939 * When a period "rolls over" and we have new u_0`, multiplying the previous 2940 * sum again by y is sufficient to update: 2941 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) 2942 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] 2943 */ 2944 static __always_inline int 2945 ___update_load_avg(u64 now, int cpu, struct sched_avg *sa, 2946 unsigned long weight, int running, struct cfs_rq *cfs_rq) 2947 { 2948 u64 delta; 2949 2950 delta = now - sa->last_update_time; 2951 /* 2952 * This should only happen when time goes backwards, which it 2953 * unfortunately does during sched clock init when we swap over to TSC. 2954 */ 2955 if ((s64)delta < 0) { 2956 sa->last_update_time = now; 2957 return 0; 2958 } 2959 2960 /* 2961 * Use 1024ns as the unit of measurement since it's a reasonable 2962 * approximation of 1us and fast to compute. 2963 */ 2964 delta >>= 10; 2965 if (!delta) 2966 return 0; 2967 2968 sa->last_update_time += delta << 10; 2969 2970 /* 2971 * Now we know we crossed measurement unit boundaries. The *_avg 2972 * accrues by two steps: 2973 * 2974 * Step 1: accumulate *_sum since last_update_time. If we haven't 2975 * crossed period boundaries, finish. 2976 */ 2977 if (!accumulate_sum(delta, cpu, sa, weight, running, cfs_rq)) 2978 return 0; 2979 2980 /* 2981 * Step 2: update *_avg. 2982 */ 2983 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib); 2984 if (cfs_rq) { 2985 cfs_rq->runnable_load_avg = 2986 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib); 2987 } 2988 sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib); 2989 2990 return 1; 2991 } 2992 2993 static int 2994 __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se) 2995 { 2996 return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL); 2997 } 2998 2999 static int 3000 __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se) 3001 { 3002 return ___update_load_avg(now, cpu, &se->avg, 3003 se->on_rq * scale_load_down(se->load.weight), 3004 cfs_rq->curr == se, NULL); 3005 } 3006 3007 static int 3008 __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) 3009 { 3010 return ___update_load_avg(now, cpu, &cfs_rq->avg, 3011 scale_load_down(cfs_rq->load.weight), 3012 cfs_rq->curr != NULL, cfs_rq); 3013 } 3014 3015 /* 3016 * Signed add and clamp on underflow. 3017 * 3018 * Explicitly do a load-store to ensure the intermediate value never hits 3019 * memory. This allows lockless observations without ever seeing the negative 3020 * values. 3021 */ 3022 #define add_positive(_ptr, _val) do { \ 3023 typeof(_ptr) ptr = (_ptr); \ 3024 typeof(_val) val = (_val); \ 3025 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3026 \ 3027 res = var + val; \ 3028 \ 3029 if (val < 0 && res > var) \ 3030 res = 0; \ 3031 \ 3032 WRITE_ONCE(*ptr, res); \ 3033 } while (0) 3034 3035 #ifdef CONFIG_FAIR_GROUP_SCHED 3036 /** 3037 * update_tg_load_avg - update the tg's load avg 3038 * @cfs_rq: the cfs_rq whose avg changed 3039 * @force: update regardless of how small the difference 3040 * 3041 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. 3042 * However, because tg->load_avg is a global value there are performance 3043 * considerations. 3044 * 3045 * In order to avoid having to look at the other cfs_rq's, we use a 3046 * differential update where we store the last value we propagated. This in 3047 * turn allows skipping updates if the differential is 'small'. 3048 * 3049 * Updating tg's load_avg is necessary before update_cfs_share(). 3050 */ 3051 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) 3052 { 3053 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; 3054 3055 /* 3056 * No need to update load_avg for root_task_group as it is not used. 3057 */ 3058 if (cfs_rq->tg == &root_task_group) 3059 return; 3060 3061 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { 3062 atomic_long_add(delta, &cfs_rq->tg->load_avg); 3063 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; 3064 } 3065 } 3066 3067 /* 3068 * Called within set_task_rq() right before setting a task's cpu. The 3069 * caller only guarantees p->pi_lock is held; no other assumptions, 3070 * including the state of rq->lock, should be made. 3071 */ 3072 void set_task_rq_fair(struct sched_entity *se, 3073 struct cfs_rq *prev, struct cfs_rq *next) 3074 { 3075 u64 p_last_update_time; 3076 u64 n_last_update_time; 3077 3078 if (!sched_feat(ATTACH_AGE_LOAD)) 3079 return; 3080 3081 /* 3082 * We are supposed to update the task to "current" time, then its up to 3083 * date and ready to go to new CPU/cfs_rq. But we have difficulty in 3084 * getting what current time is, so simply throw away the out-of-date 3085 * time. This will result in the wakee task is less decayed, but giving 3086 * the wakee more load sounds not bad. 3087 */ 3088 if (!(se->avg.last_update_time && prev)) 3089 return; 3090 3091 #ifndef CONFIG_64BIT 3092 { 3093 u64 p_last_update_time_copy; 3094 u64 n_last_update_time_copy; 3095 3096 do { 3097 p_last_update_time_copy = prev->load_last_update_time_copy; 3098 n_last_update_time_copy = next->load_last_update_time_copy; 3099 3100 smp_rmb(); 3101 3102 p_last_update_time = prev->avg.last_update_time; 3103 n_last_update_time = next->avg.last_update_time; 3104 3105 } while (p_last_update_time != p_last_update_time_copy || 3106 n_last_update_time != n_last_update_time_copy); 3107 } 3108 #else 3109 p_last_update_time = prev->avg.last_update_time; 3110 n_last_update_time = next->avg.last_update_time; 3111 #endif 3112 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se); 3113 se->avg.last_update_time = n_last_update_time; 3114 } 3115 3116 /* Take into account change of utilization of a child task group */ 3117 static inline void 3118 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se) 3119 { 3120 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3121 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; 3122 3123 /* Nothing to update */ 3124 if (!delta) 3125 return; 3126 3127 /* Set new sched_entity's utilization */ 3128 se->avg.util_avg = gcfs_rq->avg.util_avg; 3129 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; 3130 3131 /* Update parent cfs_rq utilization */ 3132 add_positive(&cfs_rq->avg.util_avg, delta); 3133 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; 3134 } 3135 3136 /* Take into account change of load of a child task group */ 3137 static inline void 3138 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se) 3139 { 3140 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3141 long delta, load = gcfs_rq->avg.load_avg; 3142 3143 /* 3144 * If the load of group cfs_rq is null, the load of the 3145 * sched_entity will also be null so we can skip the formula 3146 */ 3147 if (load) { 3148 long tg_load; 3149 3150 /* Get tg's load and ensure tg_load > 0 */ 3151 tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1; 3152 3153 /* Ensure tg_load >= load and updated with current load*/ 3154 tg_load -= gcfs_rq->tg_load_avg_contrib; 3155 tg_load += load; 3156 3157 /* 3158 * We need to compute a correction term in the case that the 3159 * task group is consuming more CPU than a task of equal 3160 * weight. A task with a weight equals to tg->shares will have 3161 * a load less or equal to scale_load_down(tg->shares). 3162 * Similarly, the sched_entities that represent the task group 3163 * at parent level, can't have a load higher than 3164 * scale_load_down(tg->shares). And the Sum of sched_entities' 3165 * load must be <= scale_load_down(tg->shares). 3166 */ 3167 if (tg_load > scale_load_down(gcfs_rq->tg->shares)) { 3168 /* scale gcfs_rq's load into tg's shares*/ 3169 load *= scale_load_down(gcfs_rq->tg->shares); 3170 load /= tg_load; 3171 } 3172 } 3173 3174 delta = load - se->avg.load_avg; 3175 3176 /* Nothing to update */ 3177 if (!delta) 3178 return; 3179 3180 /* Set new sched_entity's load */ 3181 se->avg.load_avg = load; 3182 se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX; 3183 3184 /* Update parent cfs_rq load */ 3185 add_positive(&cfs_rq->avg.load_avg, delta); 3186 cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX; 3187 3188 /* 3189 * If the sched_entity is already enqueued, we also have to update the 3190 * runnable load avg. 3191 */ 3192 if (se->on_rq) { 3193 /* Update parent cfs_rq runnable_load_avg */ 3194 add_positive(&cfs_rq->runnable_load_avg, delta); 3195 cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX; 3196 } 3197 } 3198 3199 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) 3200 { 3201 cfs_rq->propagate_avg = 1; 3202 } 3203 3204 static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se) 3205 { 3206 struct cfs_rq *cfs_rq = group_cfs_rq(se); 3207 3208 if (!cfs_rq->propagate_avg) 3209 return 0; 3210 3211 cfs_rq->propagate_avg = 0; 3212 return 1; 3213 } 3214 3215 /* Update task and its cfs_rq load average */ 3216 static inline int propagate_entity_load_avg(struct sched_entity *se) 3217 { 3218 struct cfs_rq *cfs_rq; 3219 3220 if (entity_is_task(se)) 3221 return 0; 3222 3223 if (!test_and_clear_tg_cfs_propagate(se)) 3224 return 0; 3225 3226 cfs_rq = cfs_rq_of(se); 3227 3228 set_tg_cfs_propagate(cfs_rq); 3229 3230 update_tg_cfs_util(cfs_rq, se); 3231 update_tg_cfs_load(cfs_rq, se); 3232 3233 return 1; 3234 } 3235 3236 /* 3237 * Check if we need to update the load and the utilization of a blocked 3238 * group_entity: 3239 */ 3240 static inline bool skip_blocked_update(struct sched_entity *se) 3241 { 3242 struct cfs_rq *gcfs_rq = group_cfs_rq(se); 3243 3244 /* 3245 * If sched_entity still have not zero load or utilization, we have to 3246 * decay it: 3247 */ 3248 if (se->avg.load_avg || se->avg.util_avg) 3249 return false; 3250 3251 /* 3252 * If there is a pending propagation, we have to update the load and 3253 * the utilization of the sched_entity: 3254 */ 3255 if (gcfs_rq->propagate_avg) 3256 return false; 3257 3258 /* 3259 * Otherwise, the load and the utilization of the sched_entity is 3260 * already zero and there is no pending propagation, so it will be a 3261 * waste of time to try to decay it: 3262 */ 3263 return true; 3264 } 3265 3266 #else /* CONFIG_FAIR_GROUP_SCHED */ 3267 3268 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} 3269 3270 static inline int propagate_entity_load_avg(struct sched_entity *se) 3271 { 3272 return 0; 3273 } 3274 3275 static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {} 3276 3277 #endif /* CONFIG_FAIR_GROUP_SCHED */ 3278 3279 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) 3280 { 3281 if (&this_rq()->cfs == cfs_rq) { 3282 /* 3283 * There are a few boundary cases this might miss but it should 3284 * get called often enough that that should (hopefully) not be 3285 * a real problem -- added to that it only calls on the local 3286 * CPU, so if we enqueue remotely we'll miss an update, but 3287 * the next tick/schedule should update. 3288 * 3289 * It will not get called when we go idle, because the idle 3290 * thread is a different class (!fair), nor will the utilization 3291 * number include things like RT tasks. 3292 * 3293 * As is, the util number is not freq-invariant (we'd have to 3294 * implement arch_scale_freq_capacity() for that). 3295 * 3296 * See cpu_util(). 3297 */ 3298 cpufreq_update_util(rq_of(cfs_rq), 0); 3299 } 3300 } 3301 3302 /* 3303 * Unsigned subtract and clamp on underflow. 3304 * 3305 * Explicitly do a load-store to ensure the intermediate value never hits 3306 * memory. This allows lockless observations without ever seeing the negative 3307 * values. 3308 */ 3309 #define sub_positive(_ptr, _val) do { \ 3310 typeof(_ptr) ptr = (_ptr); \ 3311 typeof(*ptr) val = (_val); \ 3312 typeof(*ptr) res, var = READ_ONCE(*ptr); \ 3313 res = var - val; \ 3314 if (res > var) \ 3315 res = 0; \ 3316 WRITE_ONCE(*ptr, res); \ 3317 } while (0) 3318 3319 /** 3320 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages 3321 * @now: current time, as per cfs_rq_clock_task() 3322 * @cfs_rq: cfs_rq to update 3323 * @update_freq: should we call cfs_rq_util_change() or will the call do so 3324 * 3325 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) 3326 * avg. The immediate corollary is that all (fair) tasks must be attached, see 3327 * post_init_entity_util_avg(). 3328 * 3329 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. 3330 * 3331 * Returns true if the load decayed or we removed load. 3332 * 3333 * Since both these conditions indicate a changed cfs_rq->avg.load we should 3334 * call update_tg_load_avg() when this function returns true. 3335 */ 3336 static inline int 3337 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) 3338 { 3339 struct sched_avg *sa = &cfs_rq->avg; 3340 int decayed, removed_load = 0, removed_util = 0; 3341 3342 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 3343 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 3344 sub_positive(&sa->load_avg, r); 3345 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX); 3346 removed_load = 1; 3347 set_tg_cfs_propagate(cfs_rq); 3348 } 3349 3350 if (atomic_long_read(&cfs_rq->removed_util_avg)) { 3351 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); 3352 sub_positive(&sa->util_avg, r); 3353 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); 3354 removed_util = 1; 3355 set_tg_cfs_propagate(cfs_rq); 3356 } 3357 3358 decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq); 3359 3360 #ifndef CONFIG_64BIT 3361 smp_wmb(); 3362 cfs_rq->load_last_update_time_copy = sa->last_update_time; 3363 #endif 3364 3365 if (update_freq && (decayed || removed_util)) 3366 cfs_rq_util_change(cfs_rq); 3367 3368 return decayed || removed_load; 3369 } 3370 3371 /* 3372 * Optional action to be done while updating the load average 3373 */ 3374 #define UPDATE_TG 0x1 3375 #define SKIP_AGE_LOAD 0x2 3376 3377 /* Update task and its cfs_rq load average */ 3378 static inline void update_load_avg(struct sched_entity *se, int flags) 3379 { 3380 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3381 u64 now = cfs_rq_clock_task(cfs_rq); 3382 struct rq *rq = rq_of(cfs_rq); 3383 int cpu = cpu_of(rq); 3384 int decayed; 3385 3386 /* 3387 * Track task load average for carrying it to new CPU after migrated, and 3388 * track group sched_entity load average for task_h_load calc in migration 3389 */ 3390 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) 3391 __update_load_avg_se(now, cpu, cfs_rq, se); 3392 3393 decayed = update_cfs_rq_load_avg(now, cfs_rq, true); 3394 decayed |= propagate_entity_load_avg(se); 3395 3396 if (decayed && (flags & UPDATE_TG)) 3397 update_tg_load_avg(cfs_rq, 0); 3398 } 3399 3400 /** 3401 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3402 * @cfs_rq: cfs_rq to attach to 3403 * @se: sched_entity to attach 3404 * 3405 * Must call update_cfs_rq_load_avg() before this, since we rely on 3406 * cfs_rq->avg.last_update_time being current. 3407 */ 3408 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3409 { 3410 se->avg.last_update_time = cfs_rq->avg.last_update_time; 3411 cfs_rq->avg.load_avg += se->avg.load_avg; 3412 cfs_rq->avg.load_sum += se->avg.load_sum; 3413 cfs_rq->avg.util_avg += se->avg.util_avg; 3414 cfs_rq->avg.util_sum += se->avg.util_sum; 3415 set_tg_cfs_propagate(cfs_rq); 3416 3417 cfs_rq_util_change(cfs_rq); 3418 } 3419 3420 /** 3421 * detach_entity_load_avg - detach this entity from its cfs_rq load avg 3422 * @cfs_rq: cfs_rq to detach from 3423 * @se: sched_entity to detach 3424 * 3425 * Must call update_cfs_rq_load_avg() before this, since we rely on 3426 * cfs_rq->avg.last_update_time being current. 3427 */ 3428 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3429 { 3430 3431 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); 3432 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); 3433 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); 3434 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); 3435 set_tg_cfs_propagate(cfs_rq); 3436 3437 cfs_rq_util_change(cfs_rq); 3438 } 3439 3440 /* Add the load generated by se into cfs_rq's load average */ 3441 static inline void 3442 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3443 { 3444 struct sched_avg *sa = &se->avg; 3445 3446 cfs_rq->runnable_load_avg += sa->load_avg; 3447 cfs_rq->runnable_load_sum += sa->load_sum; 3448 3449 if (!sa->last_update_time) { 3450 attach_entity_load_avg(cfs_rq, se); 3451 update_tg_load_avg(cfs_rq, 0); 3452 } 3453 } 3454 3455 /* Remove the runnable load generated by se from cfs_rq's runnable load average */ 3456 static inline void 3457 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 3458 { 3459 cfs_rq->runnable_load_avg = 3460 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); 3461 cfs_rq->runnable_load_sum = 3462 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0); 3463 } 3464 3465 #ifndef CONFIG_64BIT 3466 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3467 { 3468 u64 last_update_time_copy; 3469 u64 last_update_time; 3470 3471 do { 3472 last_update_time_copy = cfs_rq->load_last_update_time_copy; 3473 smp_rmb(); 3474 last_update_time = cfs_rq->avg.last_update_time; 3475 } while (last_update_time != last_update_time_copy); 3476 3477 return last_update_time; 3478 } 3479 #else 3480 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) 3481 { 3482 return cfs_rq->avg.last_update_time; 3483 } 3484 #endif 3485 3486 /* 3487 * Synchronize entity load avg of dequeued entity without locking 3488 * the previous rq. 3489 */ 3490 void sync_entity_load_avg(struct sched_entity *se) 3491 { 3492 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3493 u64 last_update_time; 3494 3495 last_update_time = cfs_rq_last_update_time(cfs_rq); 3496 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se); 3497 } 3498 3499 /* 3500 * Task first catches up with cfs_rq, and then subtract 3501 * itself from the cfs_rq (task must be off the queue now). 3502 */ 3503 void remove_entity_load_avg(struct sched_entity *se) 3504 { 3505 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3506 3507 /* 3508 * tasks cannot exit without having gone through wake_up_new_task() -> 3509 * post_init_entity_util_avg() which will have added things to the 3510 * cfs_rq, so we can remove unconditionally. 3511 * 3512 * Similarly for groups, they will have passed through 3513 * post_init_entity_util_avg() before unregister_sched_fair_group() 3514 * calls this. 3515 */ 3516 3517 sync_entity_load_avg(se); 3518 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); 3519 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); 3520 } 3521 3522 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) 3523 { 3524 return cfs_rq->runnable_load_avg; 3525 } 3526 3527 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) 3528 { 3529 return cfs_rq->avg.load_avg; 3530 } 3531 3532 static int idle_balance(struct rq *this_rq, struct rq_flags *rf); 3533 3534 #else /* CONFIG_SMP */ 3535 3536 static inline int 3537 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) 3538 { 3539 return 0; 3540 } 3541 3542 #define UPDATE_TG 0x0 3543 #define SKIP_AGE_LOAD 0x0 3544 3545 static inline void update_load_avg(struct sched_entity *se, int not_used1) 3546 { 3547 cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); 3548 } 3549 3550 static inline void 3551 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3552 static inline void 3553 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3554 static inline void remove_entity_load_avg(struct sched_entity *se) {} 3555 3556 static inline void 3557 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3558 static inline void 3559 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} 3560 3561 static inline int idle_balance(struct rq *rq, struct rq_flags *rf) 3562 { 3563 return 0; 3564 } 3565 3566 #endif /* CONFIG_SMP */ 3567 3568 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) 3569 { 3570 #ifdef CONFIG_SCHED_DEBUG 3571 s64 d = se->vruntime - cfs_rq->min_vruntime; 3572 3573 if (d < 0) 3574 d = -d; 3575 3576 if (d > 3*sysctl_sched_latency) 3577 schedstat_inc(cfs_rq->nr_spread_over); 3578 #endif 3579 } 3580 3581 static void 3582 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 3583 { 3584 u64 vruntime = cfs_rq->min_vruntime; 3585 3586 /* 3587 * The 'current' period is already promised to the current tasks, 3588 * however the extra weight of the new task will slow them down a 3589 * little, place the new task so that it fits in the slot that 3590 * stays open at the end. 3591 */ 3592 if (initial && sched_feat(START_DEBIT)) 3593 vruntime += sched_vslice(cfs_rq, se); 3594 3595 /* sleeps up to a single latency don't count. */ 3596 if (!initial) { 3597 unsigned long thresh = sysctl_sched_latency; 3598 3599 /* 3600 * Halve their sleep time's effect, to allow 3601 * for a gentler effect of sleepers: 3602 */ 3603 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 3604 thresh >>= 1; 3605 3606 vruntime -= thresh; 3607 } 3608 3609 /* ensure we never gain time by being placed backwards. */ 3610 se->vruntime = max_vruntime(se->vruntime, vruntime); 3611 } 3612 3613 static void check_enqueue_throttle(struct cfs_rq *cfs_rq); 3614 3615 static inline void check_schedstat_required(void) 3616 { 3617 #ifdef CONFIG_SCHEDSTATS 3618 if (schedstat_enabled()) 3619 return; 3620 3621 /* Force schedstat enabled if a dependent tracepoint is active */ 3622 if (trace_sched_stat_wait_enabled() || 3623 trace_sched_stat_sleep_enabled() || 3624 trace_sched_stat_iowait_enabled() || 3625 trace_sched_stat_blocked_enabled() || 3626 trace_sched_stat_runtime_enabled()) { 3627 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3628 "stat_blocked and stat_runtime require the " 3629 "kernel parameter schedstats=enable or " 3630 "kernel.sched_schedstats=1\n"); 3631 } 3632 #endif 3633 } 3634 3635 3636 /* 3637 * MIGRATION 3638 * 3639 * dequeue 3640 * update_curr() 3641 * update_min_vruntime() 3642 * vruntime -= min_vruntime 3643 * 3644 * enqueue 3645 * update_curr() 3646 * update_min_vruntime() 3647 * vruntime += min_vruntime 3648 * 3649 * this way the vruntime transition between RQs is done when both 3650 * min_vruntime are up-to-date. 3651 * 3652 * WAKEUP (remote) 3653 * 3654 * ->migrate_task_rq_fair() (p->state == TASK_WAKING) 3655 * vruntime -= min_vruntime 3656 * 3657 * enqueue 3658 * update_curr() 3659 * update_min_vruntime() 3660 * vruntime += min_vruntime 3661 * 3662 * this way we don't have the most up-to-date min_vruntime on the originating 3663 * CPU and an up-to-date min_vruntime on the destination CPU. 3664 */ 3665 3666 static void 3667 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3668 { 3669 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); 3670 bool curr = cfs_rq->curr == se; 3671 3672 /* 3673 * If we're the current task, we must renormalise before calling 3674 * update_curr(). 3675 */ 3676 if (renorm && curr) 3677 se->vruntime += cfs_rq->min_vruntime; 3678 3679 update_curr(cfs_rq); 3680 3681 /* 3682 * Otherwise, renormalise after, such that we're placed at the current 3683 * moment in time, instead of some random moment in the past. Being 3684 * placed in the past could significantly boost this task to the 3685 * fairness detriment of existing tasks. 3686 */ 3687 if (renorm && !curr) 3688 se->vruntime += cfs_rq->min_vruntime; 3689 3690 /* 3691 * When enqueuing a sched_entity, we must: 3692 * - Update loads to have both entity and cfs_rq synced with now. 3693 * - Add its load to cfs_rq->runnable_avg 3694 * - For group_entity, update its weight to reflect the new share of 3695 * its group cfs_rq 3696 * - Add its new weight to cfs_rq->load.weight 3697 */ 3698 update_load_avg(se, UPDATE_TG); 3699 enqueue_entity_load_avg(cfs_rq, se); 3700 update_cfs_shares(se); 3701 account_entity_enqueue(cfs_rq, se); 3702 3703 if (flags & ENQUEUE_WAKEUP) 3704 place_entity(cfs_rq, se, 0); 3705 3706 check_schedstat_required(); 3707 update_stats_enqueue(cfs_rq, se, flags); 3708 check_spread(cfs_rq, se); 3709 if (!curr) 3710 __enqueue_entity(cfs_rq, se); 3711 se->on_rq = 1; 3712 3713 if (cfs_rq->nr_running == 1) { 3714 list_add_leaf_cfs_rq(cfs_rq); 3715 check_enqueue_throttle(cfs_rq); 3716 } 3717 } 3718 3719 static void __clear_buddies_last(struct sched_entity *se) 3720 { 3721 for_each_sched_entity(se) { 3722 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3723 if (cfs_rq->last != se) 3724 break; 3725 3726 cfs_rq->last = NULL; 3727 } 3728 } 3729 3730 static void __clear_buddies_next(struct sched_entity *se) 3731 { 3732 for_each_sched_entity(se) { 3733 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3734 if (cfs_rq->next != se) 3735 break; 3736 3737 cfs_rq->next = NULL; 3738 } 3739 } 3740 3741 static void __clear_buddies_skip(struct sched_entity *se) 3742 { 3743 for_each_sched_entity(se) { 3744 struct cfs_rq *cfs_rq = cfs_rq_of(se); 3745 if (cfs_rq->skip != se) 3746 break; 3747 3748 cfs_rq->skip = NULL; 3749 } 3750 } 3751 3752 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 3753 { 3754 if (cfs_rq->last == se) 3755 __clear_buddies_last(se); 3756 3757 if (cfs_rq->next == se) 3758 __clear_buddies_next(se); 3759 3760 if (cfs_rq->skip == se) 3761 __clear_buddies_skip(se); 3762 } 3763 3764 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3765 3766 static void 3767 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 3768 { 3769 /* 3770 * Update run-time statistics of the 'current'. 3771 */ 3772 update_curr(cfs_rq); 3773 3774 /* 3775 * When dequeuing a sched_entity, we must: 3776 * - Update loads to have both entity and cfs_rq synced with now. 3777 * - Substract its load from the cfs_rq->runnable_avg. 3778 * - Substract its previous weight from cfs_rq->load.weight. 3779 * - For group entity, update its weight to reflect the new share 3780 * of its group cfs_rq. 3781 */ 3782 update_load_avg(se, UPDATE_TG); 3783 dequeue_entity_load_avg(cfs_rq, se); 3784 3785 update_stats_dequeue(cfs_rq, se, flags); 3786 3787 clear_buddies(cfs_rq, se); 3788 3789 if (se != cfs_rq->curr) 3790 __dequeue_entity(cfs_rq, se); 3791 se->on_rq = 0; 3792 account_entity_dequeue(cfs_rq, se); 3793 3794 /* 3795 * Normalize after update_curr(); which will also have moved 3796 * min_vruntime if @se is the one holding it back. But before doing 3797 * update_min_vruntime() again, which will discount @se's position and 3798 * can move min_vruntime forward still more. 3799 */ 3800 if (!(flags & DEQUEUE_SLEEP)) 3801 se->vruntime -= cfs_rq->min_vruntime; 3802 3803 /* return excess runtime on last dequeue */ 3804 return_cfs_rq_runtime(cfs_rq); 3805 3806 update_cfs_shares(se); 3807 3808 /* 3809 * Now advance min_vruntime if @se was the entity holding it back, 3810 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be 3811 * put back on, and if we advance min_vruntime, we'll be placed back 3812 * further than we started -- ie. we'll be penalized. 3813 */ 3814 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 3815 update_min_vruntime(cfs_rq); 3816 } 3817 3818 /* 3819 * Preempt the current task with a newly woken task if needed: 3820 */ 3821 static void 3822 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 3823 { 3824 unsigned long ideal_runtime, delta_exec; 3825 struct sched_entity *se; 3826 s64 delta; 3827 3828 ideal_runtime = sched_slice(cfs_rq, curr); 3829 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 3830 if (delta_exec > ideal_runtime) { 3831 resched_curr(rq_of(cfs_rq)); 3832 /* 3833 * The current task ran long enough, ensure it doesn't get 3834 * re-elected due to buddy favours. 3835 */ 3836 clear_buddies(cfs_rq, curr); 3837 return; 3838 } 3839 3840 /* 3841 * Ensure that a task that missed wakeup preemption by a 3842 * narrow margin doesn't have to wait for a full slice. 3843 * This also mitigates buddy induced latencies under load. 3844 */ 3845 if (delta_exec < sysctl_sched_min_granularity) 3846 return; 3847 3848 se = __pick_first_entity(cfs_rq); 3849 delta = curr->vruntime - se->vruntime; 3850 3851 if (delta < 0) 3852 return; 3853 3854 if (delta > ideal_runtime) 3855 resched_curr(rq_of(cfs_rq)); 3856 } 3857 3858 static void 3859 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) 3860 { 3861 /* 'current' is not kept within the tree. */ 3862 if (se->on_rq) { 3863 /* 3864 * Any task has to be enqueued before it get to execute on 3865 * a CPU. So account for the time it spent waiting on the 3866 * runqueue. 3867 */ 3868 update_stats_wait_end(cfs_rq, se); 3869 __dequeue_entity(cfs_rq, se); 3870 update_load_avg(se, UPDATE_TG); 3871 } 3872 3873 update_stats_curr_start(cfs_rq, se); 3874 cfs_rq->curr = se; 3875 3876 /* 3877 * Track our maximum slice length, if the CPU's load is at 3878 * least twice that of our own weight (i.e. dont track it 3879 * when there are only lesser-weight tasks around): 3880 */ 3881 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { 3882 schedstat_set(se->statistics.slice_max, 3883 max((u64)schedstat_val(se->statistics.slice_max), 3884 se->sum_exec_runtime - se->prev_sum_exec_runtime)); 3885 } 3886 3887 se->prev_sum_exec_runtime = se->sum_exec_runtime; 3888 } 3889 3890 static int 3891 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 3892 3893 /* 3894 * Pick the next process, keeping these things in mind, in this order: 3895 * 1) keep things fair between processes/task groups 3896 * 2) pick the "next" process, since someone really wants that to run 3897 * 3) pick the "last" process, for cache locality 3898 * 4) do not run the "skip" process, if something else is available 3899 */ 3900 static struct sched_entity * 3901 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) 3902 { 3903 struct sched_entity *left = __pick_first_entity(cfs_rq); 3904 struct sched_entity *se; 3905 3906 /* 3907 * If curr is set we have to see if its left of the leftmost entity 3908 * still in the tree, provided there was anything in the tree at all. 3909 */ 3910 if (!left || (curr && entity_before(curr, left))) 3911 left = curr; 3912 3913 se = left; /* ideally we run the leftmost entity */ 3914 3915 /* 3916 * Avoid running the skip buddy, if running something else can 3917 * be done without getting too unfair. 3918 */ 3919 if (cfs_rq->skip == se) { 3920 struct sched_entity *second; 3921 3922 if (se == curr) { 3923 second = __pick_first_entity(cfs_rq); 3924 } else { 3925 second = __pick_next_entity(se); 3926 if (!second || (curr && entity_before(curr, second))) 3927 second = curr; 3928 } 3929 3930 if (second && wakeup_preempt_entity(second, left) < 1) 3931 se = second; 3932 } 3933 3934 /* 3935 * Prefer last buddy, try to return the CPU to a preempted task. 3936 */ 3937 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) 3938 se = cfs_rq->last; 3939 3940 /* 3941 * Someone really wants this to run. If it's not unfair, run it. 3942 */ 3943 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) 3944 se = cfs_rq->next; 3945 3946 clear_buddies(cfs_rq, se); 3947 3948 return se; 3949 } 3950 3951 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); 3952 3953 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) 3954 { 3955 /* 3956 * If still on the runqueue then deactivate_task() 3957 * was not called and update_curr() has to be done: 3958 */ 3959 if (prev->on_rq) 3960 update_curr(cfs_rq); 3961 3962 /* throttle cfs_rqs exceeding runtime */ 3963 check_cfs_rq_runtime(cfs_rq); 3964 3965 check_spread(cfs_rq, prev); 3966 3967 if (prev->on_rq) { 3968 update_stats_wait_start(cfs_rq, prev); 3969 /* Put 'current' back into the tree. */ 3970 __enqueue_entity(cfs_rq, prev); 3971 /* in !on_rq case, update occurred at dequeue */ 3972 update_load_avg(prev, 0); 3973 } 3974 cfs_rq->curr = NULL; 3975 } 3976 3977 static void 3978 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) 3979 { 3980 /* 3981 * Update run-time statistics of the 'current'. 3982 */ 3983 update_curr(cfs_rq); 3984 3985 /* 3986 * Ensure that runnable average is periodically updated. 3987 */ 3988 update_load_avg(curr, UPDATE_TG); 3989 update_cfs_shares(curr); 3990 3991 #ifdef CONFIG_SCHED_HRTICK 3992 /* 3993 * queued ticks are scheduled to match the slice, so don't bother 3994 * validating it and just reschedule. 3995 */ 3996 if (queued) { 3997 resched_curr(rq_of(cfs_rq)); 3998 return; 3999 } 4000 /* 4001 * don't let the period tick interfere with the hrtick preemption 4002 */ 4003 if (!sched_feat(DOUBLE_TICK) && 4004 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) 4005 return; 4006 #endif 4007 4008 if (cfs_rq->nr_running > 1) 4009 check_preempt_tick(cfs_rq, curr); 4010 } 4011 4012 4013 /************************************************** 4014 * CFS bandwidth control machinery 4015 */ 4016 4017 #ifdef CONFIG_CFS_BANDWIDTH 4018 4019 #ifdef HAVE_JUMP_LABEL 4020 static struct static_key __cfs_bandwidth_used; 4021 4022 static inline bool cfs_bandwidth_used(void) 4023 { 4024 return static_key_false(&__cfs_bandwidth_used); 4025 } 4026 4027 void cfs_bandwidth_usage_inc(void) 4028 { 4029 static_key_slow_inc(&__cfs_bandwidth_used); 4030 } 4031 4032 void cfs_bandwidth_usage_dec(void) 4033 { 4034 static_key_slow_dec(&__cfs_bandwidth_used); 4035 } 4036 #else /* HAVE_JUMP_LABEL */ 4037 static bool cfs_bandwidth_used(void) 4038 { 4039 return true; 4040 } 4041 4042 void cfs_bandwidth_usage_inc(void) {} 4043 void cfs_bandwidth_usage_dec(void) {} 4044 #endif /* HAVE_JUMP_LABEL */ 4045 4046 /* 4047 * default period for cfs group bandwidth. 4048 * default: 0.1s, units: nanoseconds 4049 */ 4050 static inline u64 default_cfs_period(void) 4051 { 4052 return 100000000ULL; 4053 } 4054 4055 static inline u64 sched_cfs_bandwidth_slice(void) 4056 { 4057 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; 4058 } 4059 4060 /* 4061 * Replenish runtime according to assigned quota and update expiration time. 4062 * We use sched_clock_cpu directly instead of rq->clock to avoid adding 4063 * additional synchronization around rq->lock. 4064 * 4065 * requires cfs_b->lock 4066 */ 4067 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) 4068 { 4069 u64 now; 4070 4071 if (cfs_b->quota == RUNTIME_INF) 4072 return; 4073 4074 now = sched_clock_cpu(smp_processor_id()); 4075 cfs_b->runtime = cfs_b->quota; 4076 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); 4077 } 4078 4079 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4080 { 4081 return &tg->cfs_bandwidth; 4082 } 4083 4084 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */ 4085 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 4086 { 4087 if (unlikely(cfs_rq->throttle_count)) 4088 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; 4089 4090 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; 4091 } 4092 4093 /* returns 0 on failure to allocate runtime */ 4094 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4095 { 4096 struct task_group *tg = cfs_rq->tg; 4097 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); 4098 u64 amount = 0, min_amount, expires; 4099 4100 /* note: this is a positive sum as runtime_remaining <= 0 */ 4101 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; 4102 4103 raw_spin_lock(&cfs_b->lock); 4104 if (cfs_b->quota == RUNTIME_INF) 4105 amount = min_amount; 4106 else { 4107 start_cfs_bandwidth(cfs_b); 4108 4109 if (cfs_b->runtime > 0) { 4110 amount = min(cfs_b->runtime, min_amount); 4111 cfs_b->runtime -= amount; 4112 cfs_b->idle = 0; 4113 } 4114 } 4115 expires = cfs_b->runtime_expires; 4116 raw_spin_unlock(&cfs_b->lock); 4117 4118 cfs_rq->runtime_remaining += amount; 4119 /* 4120 * we may have advanced our local expiration to account for allowed 4121 * spread between our sched_clock and the one on which runtime was 4122 * issued. 4123 */ 4124 if ((s64)(expires - cfs_rq->runtime_expires) > 0) 4125 cfs_rq->runtime_expires = expires; 4126 4127 return cfs_rq->runtime_remaining > 0; 4128 } 4129 4130 /* 4131 * Note: This depends on the synchronization provided by sched_clock and the 4132 * fact that rq->clock snapshots this value. 4133 */ 4134 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4135 { 4136 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4137 4138 /* if the deadline is ahead of our clock, nothing to do */ 4139 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) 4140 return; 4141 4142 if (cfs_rq->runtime_remaining < 0) 4143 return; 4144 4145 /* 4146 * If the local deadline has passed we have to consider the 4147 * possibility that our sched_clock is 'fast' and the global deadline 4148 * has not truly expired. 4149 * 4150 * Fortunately we can check determine whether this the case by checking 4151 * whether the global deadline has advanced. It is valid to compare 4152 * cfs_b->runtime_expires without any locks since we only care about 4153 * exact equality, so a partial write will still work. 4154 */ 4155 4156 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { 4157 /* extend local deadline, drift is bounded above by 2 ticks */ 4158 cfs_rq->runtime_expires += TICK_NSEC; 4159 } else { 4160 /* global deadline is ahead, expiration has passed */ 4161 cfs_rq->runtime_remaining = 0; 4162 } 4163 } 4164 4165 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4166 { 4167 /* dock delta_exec before expiring quota (as it could span periods) */ 4168 cfs_rq->runtime_remaining -= delta_exec; 4169 expire_cfs_rq_runtime(cfs_rq); 4170 4171 if (likely(cfs_rq->runtime_remaining > 0)) 4172 return; 4173 4174 /* 4175 * if we're unable to extend our runtime we resched so that the active 4176 * hierarchy can be throttled 4177 */ 4178 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) 4179 resched_curr(rq_of(cfs_rq)); 4180 } 4181 4182 static __always_inline 4183 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) 4184 { 4185 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 4186 return; 4187 4188 __account_cfs_rq_runtime(cfs_rq, delta_exec); 4189 } 4190 4191 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4192 { 4193 return cfs_bandwidth_used() && cfs_rq->throttled; 4194 } 4195 4196 /* check whether cfs_rq, or any parent, is throttled */ 4197 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4198 { 4199 return cfs_bandwidth_used() && cfs_rq->throttle_count; 4200 } 4201 4202 /* 4203 * Ensure that neither of the group entities corresponding to src_cpu or 4204 * dest_cpu are members of a throttled hierarchy when performing group 4205 * load-balance operations. 4206 */ 4207 static inline int throttled_lb_pair(struct task_group *tg, 4208 int src_cpu, int dest_cpu) 4209 { 4210 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; 4211 4212 src_cfs_rq = tg->cfs_rq[src_cpu]; 4213 dest_cfs_rq = tg->cfs_rq[dest_cpu]; 4214 4215 return throttled_hierarchy(src_cfs_rq) || 4216 throttled_hierarchy(dest_cfs_rq); 4217 } 4218 4219 /* updated child weight may affect parent so we have to do this bottom up */ 4220 static int tg_unthrottle_up(struct task_group *tg, void *data) 4221 { 4222 struct rq *rq = data; 4223 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4224 4225 cfs_rq->throttle_count--; 4226 if (!cfs_rq->throttle_count) { 4227 /* adjust cfs_rq_clock_task() */ 4228 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - 4229 cfs_rq->throttled_clock_task; 4230 } 4231 4232 return 0; 4233 } 4234 4235 static int tg_throttle_down(struct task_group *tg, void *data) 4236 { 4237 struct rq *rq = data; 4238 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4239 4240 /* group is entering throttled state, stop time */ 4241 if (!cfs_rq->throttle_count) 4242 cfs_rq->throttled_clock_task = rq_clock_task(rq); 4243 cfs_rq->throttle_count++; 4244 4245 return 0; 4246 } 4247 4248 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) 4249 { 4250 struct rq *rq = rq_of(cfs_rq); 4251 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4252 struct sched_entity *se; 4253 long task_delta, dequeue = 1; 4254 bool empty; 4255 4256 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; 4257 4258 /* freeze hierarchy runnable averages while throttled */ 4259 rcu_read_lock(); 4260 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); 4261 rcu_read_unlock(); 4262 4263 task_delta = cfs_rq->h_nr_running; 4264 for_each_sched_entity(se) { 4265 struct cfs_rq *qcfs_rq = cfs_rq_of(se); 4266 /* throttled entity or throttle-on-deactivate */ 4267 if (!se->on_rq) 4268 break; 4269 4270 if (dequeue) 4271 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); 4272 qcfs_rq->h_nr_running -= task_delta; 4273 4274 if (qcfs_rq->load.weight) 4275 dequeue = 0; 4276 } 4277 4278 if (!se) 4279 sub_nr_running(rq, task_delta); 4280 4281 cfs_rq->throttled = 1; 4282 cfs_rq->throttled_clock = rq_clock(rq); 4283 raw_spin_lock(&cfs_b->lock); 4284 empty = list_empty(&cfs_b->throttled_cfs_rq); 4285 4286 /* 4287 * Add to the _head_ of the list, so that an already-started 4288 * distribute_cfs_runtime will not see us 4289 */ 4290 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 4291 4292 /* 4293 * If we're the first throttled task, make sure the bandwidth 4294 * timer is running. 4295 */ 4296 if (empty) 4297 start_cfs_bandwidth(cfs_b); 4298 4299 raw_spin_unlock(&cfs_b->lock); 4300 } 4301 4302 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) 4303 { 4304 struct rq *rq = rq_of(cfs_rq); 4305 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4306 struct sched_entity *se; 4307 int enqueue = 1; 4308 long task_delta; 4309 4310 se = cfs_rq->tg->se[cpu_of(rq)]; 4311 4312 cfs_rq->throttled = 0; 4313 4314 update_rq_clock(rq); 4315 4316 raw_spin_lock(&cfs_b->lock); 4317 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; 4318 list_del_rcu(&cfs_rq->throttled_list); 4319 raw_spin_unlock(&cfs_b->lock); 4320 4321 /* update hierarchical throttle state */ 4322 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4323 4324 if (!cfs_rq->load.weight) 4325 return; 4326 4327 task_delta = cfs_rq->h_nr_running; 4328 for_each_sched_entity(se) { 4329 if (se->on_rq) 4330 enqueue = 0; 4331 4332 cfs_rq = cfs_rq_of(se); 4333 if (enqueue) 4334 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); 4335 cfs_rq->h_nr_running += task_delta; 4336 4337 if (cfs_rq_throttled(cfs_rq)) 4338 break; 4339 } 4340 4341 if (!se) 4342 add_nr_running(rq, task_delta); 4343 4344 /* determine whether we need to wake up potentially idle cpu */ 4345 if (rq->curr == rq->idle && rq->cfs.nr_running) 4346 resched_curr(rq); 4347 } 4348 4349 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, 4350 u64 remaining, u64 expires) 4351 { 4352 struct cfs_rq *cfs_rq; 4353 u64 runtime; 4354 u64 starting_runtime = remaining; 4355 4356 rcu_read_lock(); 4357 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, 4358 throttled_list) { 4359 struct rq *rq = rq_of(cfs_rq); 4360 struct rq_flags rf; 4361 4362 rq_lock(rq, &rf); 4363 if (!cfs_rq_throttled(cfs_rq)) 4364 goto next; 4365 4366 runtime = -cfs_rq->runtime_remaining + 1; 4367 if (runtime > remaining) 4368 runtime = remaining; 4369 remaining -= runtime; 4370 4371 cfs_rq->runtime_remaining += runtime; 4372 cfs_rq->runtime_expires = expires; 4373 4374 /* we check whether we're throttled above */ 4375 if (cfs_rq->runtime_remaining > 0) 4376 unthrottle_cfs_rq(cfs_rq); 4377 4378 next: 4379 rq_unlock(rq, &rf); 4380 4381 if (!remaining) 4382 break; 4383 } 4384 rcu_read_unlock(); 4385 4386 return starting_runtime - remaining; 4387 } 4388 4389 /* 4390 * Responsible for refilling a task_group's bandwidth and unthrottling its 4391 * cfs_rqs as appropriate. If there has been no activity within the last 4392 * period the timer is deactivated until scheduling resumes; cfs_b->idle is 4393 * used to track this state. 4394 */ 4395 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) 4396 { 4397 u64 runtime, runtime_expires; 4398 int throttled; 4399 4400 /* no need to continue the timer with no bandwidth constraint */ 4401 if (cfs_b->quota == RUNTIME_INF) 4402 goto out_deactivate; 4403 4404 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4405 cfs_b->nr_periods += overrun; 4406 4407 /* 4408 * idle depends on !throttled (for the case of a large deficit), and if 4409 * we're going inactive then everything else can be deferred 4410 */ 4411 if (cfs_b->idle && !throttled) 4412 goto out_deactivate; 4413 4414 __refill_cfs_bandwidth_runtime(cfs_b); 4415 4416 if (!throttled) { 4417 /* mark as potentially idle for the upcoming period */ 4418 cfs_b->idle = 1; 4419 return 0; 4420 } 4421 4422 /* account preceding periods in which throttling occurred */ 4423 cfs_b->nr_throttled += overrun; 4424 4425 runtime_expires = cfs_b->runtime_expires; 4426 4427 /* 4428 * This check is repeated as we are holding onto the new bandwidth while 4429 * we unthrottle. This can potentially race with an unthrottled group 4430 * trying to acquire new bandwidth from the global pool. This can result 4431 * in us over-using our runtime if it is all used during this loop, but 4432 * only by limited amounts in that extreme case. 4433 */ 4434 while (throttled && cfs_b->runtime > 0) { 4435 runtime = cfs_b->runtime; 4436 raw_spin_unlock(&cfs_b->lock); 4437 /* we can't nest cfs_b->lock while distributing bandwidth */ 4438 runtime = distribute_cfs_runtime(cfs_b, runtime, 4439 runtime_expires); 4440 raw_spin_lock(&cfs_b->lock); 4441 4442 throttled = !list_empty(&cfs_b->throttled_cfs_rq); 4443 4444 cfs_b->runtime -= min(runtime, cfs_b->runtime); 4445 } 4446 4447 /* 4448 * While we are ensured activity in the period following an 4449 * unthrottle, this also covers the case in which the new bandwidth is 4450 * insufficient to cover the existing bandwidth deficit. (Forcing the 4451 * timer to remain active while there are any throttled entities.) 4452 */ 4453 cfs_b->idle = 0; 4454 4455 return 0; 4456 4457 out_deactivate: 4458 return 1; 4459 } 4460 4461 /* a cfs_rq won't donate quota below this amount */ 4462 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; 4463 /* minimum remaining period time to redistribute slack quota */ 4464 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; 4465 /* how long we wait to gather additional slack before distributing */ 4466 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; 4467 4468 /* 4469 * Are we near the end of the current quota period? 4470 * 4471 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the 4472 * hrtimer base being cleared by hrtimer_start. In the case of 4473 * migrate_hrtimers, base is never cleared, so we are fine. 4474 */ 4475 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) 4476 { 4477 struct hrtimer *refresh_timer = &cfs_b->period_timer; 4478 u64 remaining; 4479 4480 /* if the call-back is running a quota refresh is already occurring */ 4481 if (hrtimer_callback_running(refresh_timer)) 4482 return 1; 4483 4484 /* is a quota refresh about to occur? */ 4485 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); 4486 if (remaining < min_expire) 4487 return 1; 4488 4489 return 0; 4490 } 4491 4492 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) 4493 { 4494 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; 4495 4496 /* if there's a quota refresh soon don't bother with slack */ 4497 if (runtime_refresh_within(cfs_b, min_left)) 4498 return; 4499 4500 hrtimer_start(&cfs_b->slack_timer, 4501 ns_to_ktime(cfs_bandwidth_slack_period), 4502 HRTIMER_MODE_REL); 4503 } 4504 4505 /* we know any runtime found here is valid as update_curr() precedes return */ 4506 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4507 { 4508 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); 4509 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; 4510 4511 if (slack_runtime <= 0) 4512 return; 4513 4514 raw_spin_lock(&cfs_b->lock); 4515 if (cfs_b->quota != RUNTIME_INF && 4516 cfs_rq->runtime_expires == cfs_b->runtime_expires) { 4517 cfs_b->runtime += slack_runtime; 4518 4519 /* we are under rq->lock, defer unthrottling using a timer */ 4520 if (cfs_b->runtime > sched_cfs_bandwidth_slice() && 4521 !list_empty(&cfs_b->throttled_cfs_rq)) 4522 start_cfs_slack_bandwidth(cfs_b); 4523 } 4524 raw_spin_unlock(&cfs_b->lock); 4525 4526 /* even if it's not valid for return we don't want to try again */ 4527 cfs_rq->runtime_remaining -= slack_runtime; 4528 } 4529 4530 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4531 { 4532 if (!cfs_bandwidth_used()) 4533 return; 4534 4535 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) 4536 return; 4537 4538 __return_cfs_rq_runtime(cfs_rq); 4539 } 4540 4541 /* 4542 * This is done with a timer (instead of inline with bandwidth return) since 4543 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. 4544 */ 4545 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) 4546 { 4547 u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); 4548 u64 expires; 4549 4550 /* confirm we're still not at a refresh boundary */ 4551 raw_spin_lock(&cfs_b->lock); 4552 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { 4553 raw_spin_unlock(&cfs_b->lock); 4554 return; 4555 } 4556 4557 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) 4558 runtime = cfs_b->runtime; 4559 4560 expires = cfs_b->runtime_expires; 4561 raw_spin_unlock(&cfs_b->lock); 4562 4563 if (!runtime) 4564 return; 4565 4566 runtime = distribute_cfs_runtime(cfs_b, runtime, expires); 4567 4568 raw_spin_lock(&cfs_b->lock); 4569 if (expires == cfs_b->runtime_expires) 4570 cfs_b->runtime -= min(runtime, cfs_b->runtime); 4571 raw_spin_unlock(&cfs_b->lock); 4572 } 4573 4574 /* 4575 * When a group wakes up we want to make sure that its quota is not already 4576 * expired/exceeded, otherwise it may be allowed to steal additional ticks of 4577 * runtime as update_curr() throttling can not not trigger until it's on-rq. 4578 */ 4579 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) 4580 { 4581 if (!cfs_bandwidth_used()) 4582 return; 4583 4584 /* an active group must be handled by the update_curr()->put() path */ 4585 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 4586 return; 4587 4588 /* ensure the group is not already throttled */ 4589 if (cfs_rq_throttled(cfs_rq)) 4590 return; 4591 4592 /* update runtime allocation */ 4593 account_cfs_rq_runtime(cfs_rq, 0); 4594 if (cfs_rq->runtime_remaining <= 0) 4595 throttle_cfs_rq(cfs_rq); 4596 } 4597 4598 static void sync_throttle(struct task_group *tg, int cpu) 4599 { 4600 struct cfs_rq *pcfs_rq, *cfs_rq; 4601 4602 if (!cfs_bandwidth_used()) 4603 return; 4604 4605 if (!tg->parent) 4606 return; 4607 4608 cfs_rq = tg->cfs_rq[cpu]; 4609 pcfs_rq = tg->parent->cfs_rq[cpu]; 4610 4611 cfs_rq->throttle_count = pcfs_rq->throttle_count; 4612 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4613 } 4614 4615 /* conditionally throttle active cfs_rq's from put_prev_entity() */ 4616 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4617 { 4618 if (!cfs_bandwidth_used()) 4619 return false; 4620 4621 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) 4622 return false; 4623 4624 /* 4625 * it's possible for a throttled entity to be forced into a running 4626 * state (e.g. set_curr_task), in this case we're finished. 4627 */ 4628 if (cfs_rq_throttled(cfs_rq)) 4629 return true; 4630 4631 throttle_cfs_rq(cfs_rq); 4632 return true; 4633 } 4634 4635 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) 4636 { 4637 struct cfs_bandwidth *cfs_b = 4638 container_of(timer, struct cfs_bandwidth, slack_timer); 4639 4640 do_sched_cfs_slack_timer(cfs_b); 4641 4642 return HRTIMER_NORESTART; 4643 } 4644 4645 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) 4646 { 4647 struct cfs_bandwidth *cfs_b = 4648 container_of(timer, struct cfs_bandwidth, period_timer); 4649 int overrun; 4650 int idle = 0; 4651 4652 raw_spin_lock(&cfs_b->lock); 4653 for (;;) { 4654 overrun = hrtimer_forward_now(timer, cfs_b->period); 4655 if (!overrun) 4656 break; 4657 4658 idle = do_sched_cfs_period_timer(cfs_b, overrun); 4659 } 4660 if (idle) 4661 cfs_b->period_active = 0; 4662 raw_spin_unlock(&cfs_b->lock); 4663 4664 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 4665 } 4666 4667 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4668 { 4669 raw_spin_lock_init(&cfs_b->lock); 4670 cfs_b->runtime = 0; 4671 cfs_b->quota = RUNTIME_INF; 4672 cfs_b->period = ns_to_ktime(default_cfs_period()); 4673 4674 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); 4675 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); 4676 cfs_b->period_timer.function = sched_cfs_period_timer; 4677 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4678 cfs_b->slack_timer.function = sched_cfs_slack_timer; 4679 } 4680 4681 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) 4682 { 4683 cfs_rq->runtime_enabled = 0; 4684 INIT_LIST_HEAD(&cfs_rq->throttled_list); 4685 } 4686 4687 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4688 { 4689 lockdep_assert_held(&cfs_b->lock); 4690 4691 if (!cfs_b->period_active) { 4692 cfs_b->period_active = 1; 4693 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); 4694 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); 4695 } 4696 } 4697 4698 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4699 { 4700 /* init_cfs_bandwidth() was not called */ 4701 if (!cfs_b->throttled_cfs_rq.next) 4702 return; 4703 4704 hrtimer_cancel(&cfs_b->period_timer); 4705 hrtimer_cancel(&cfs_b->slack_timer); 4706 } 4707 4708 /* 4709 * Both these cpu hotplug callbacks race against unregister_fair_sched_group() 4710 * 4711 * The race is harmless, since modifying bandwidth settings of unhooked group 4712 * bits doesn't do much. 4713 */ 4714 4715 /* cpu online calback */ 4716 static void __maybe_unused update_runtime_enabled(struct rq *rq) 4717 { 4718 struct task_group *tg; 4719 4720 lockdep_assert_held(&rq->lock); 4721 4722 rcu_read_lock(); 4723 list_for_each_entry_rcu(tg, &task_groups, list) { 4724 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 4725 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4726 4727 raw_spin_lock(&cfs_b->lock); 4728 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; 4729 raw_spin_unlock(&cfs_b->lock); 4730 } 4731 rcu_read_unlock(); 4732 } 4733 4734 /* cpu offline callback */ 4735 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 4736 { 4737 struct task_group *tg; 4738 4739 lockdep_assert_held(&rq->lock); 4740 4741 rcu_read_lock(); 4742 list_for_each_entry_rcu(tg, &task_groups, list) { 4743 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; 4744 4745 if (!cfs_rq->runtime_enabled) 4746 continue; 4747 4748 /* 4749 * clock_task is not advancing so we just need to make sure 4750 * there's some valid quota amount 4751 */ 4752 cfs_rq->runtime_remaining = 1; 4753 /* 4754 * Offline rq is schedulable till cpu is completely disabled 4755 * in take_cpu_down(), so we prevent new cfs throttling here. 4756 */ 4757 cfs_rq->runtime_enabled = 0; 4758 4759 if (cfs_rq_throttled(cfs_rq)) 4760 unthrottle_cfs_rq(cfs_rq); 4761 } 4762 rcu_read_unlock(); 4763 } 4764 4765 #else /* CONFIG_CFS_BANDWIDTH */ 4766 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 4767 { 4768 return rq_clock_task(rq_of(cfs_rq)); 4769 } 4770 4771 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} 4772 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } 4773 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 4774 static inline void sync_throttle(struct task_group *tg, int cpu) {} 4775 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 4776 4777 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 4778 { 4779 return 0; 4780 } 4781 4782 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) 4783 { 4784 return 0; 4785 } 4786 4787 static inline int throttled_lb_pair(struct task_group *tg, 4788 int src_cpu, int dest_cpu) 4789 { 4790 return 0; 4791 } 4792 4793 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 4794 4795 #ifdef CONFIG_FAIR_GROUP_SCHED 4796 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 4797 #endif 4798 4799 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) 4800 { 4801 return NULL; 4802 } 4803 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 4804 static inline void update_runtime_enabled(struct rq *rq) {} 4805 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} 4806 4807 #endif /* CONFIG_CFS_BANDWIDTH */ 4808 4809 /************************************************** 4810 * CFS operations on tasks: 4811 */ 4812 4813 #ifdef CONFIG_SCHED_HRTICK 4814 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 4815 { 4816 struct sched_entity *se = &p->se; 4817 struct cfs_rq *cfs_rq = cfs_rq_of(se); 4818 4819 SCHED_WARN_ON(task_rq(p) != rq); 4820 4821 if (rq->cfs.h_nr_running > 1) { 4822 u64 slice = sched_slice(cfs_rq, se); 4823 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; 4824 s64 delta = slice - ran; 4825 4826 if (delta < 0) { 4827 if (rq->curr == p) 4828 resched_curr(rq); 4829 return; 4830 } 4831 hrtick_start(rq, delta); 4832 } 4833 } 4834 4835 /* 4836 * called from enqueue/dequeue and updates the hrtick when the 4837 * current task is from our class and nr_running is low enough 4838 * to matter. 4839 */ 4840 static void hrtick_update(struct rq *rq) 4841 { 4842 struct task_struct *curr = rq->curr; 4843 4844 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) 4845 return; 4846 4847 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) 4848 hrtick_start_fair(rq, curr); 4849 } 4850 #else /* !CONFIG_SCHED_HRTICK */ 4851 static inline void 4852 hrtick_start_fair(struct rq *rq, struct task_struct *p) 4853 { 4854 } 4855 4856 static inline void hrtick_update(struct rq *rq) 4857 { 4858 } 4859 #endif 4860 4861 /* 4862 * The enqueue_task method is called before nr_running is 4863 * increased. Here we update the fair scheduling stats and 4864 * then put the task into the rbtree: 4865 */ 4866 static void 4867 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) 4868 { 4869 struct cfs_rq *cfs_rq; 4870 struct sched_entity *se = &p->se; 4871 4872 /* 4873 * If in_iowait is set, the code below may not trigger any cpufreq 4874 * utilization updates, so do it here explicitly with the IOWAIT flag 4875 * passed. 4876 */ 4877 if (p->in_iowait) 4878 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT); 4879 4880 for_each_sched_entity(se) { 4881 if (se->on_rq) 4882 break; 4883 cfs_rq = cfs_rq_of(se); 4884 enqueue_entity(cfs_rq, se, flags); 4885 4886 /* 4887 * end evaluation on encountering a throttled cfs_rq 4888 * 4889 * note: in the case of encountering a throttled cfs_rq we will 4890 * post the final h_nr_running increment below. 4891 */ 4892 if (cfs_rq_throttled(cfs_rq)) 4893 break; 4894 cfs_rq->h_nr_running++; 4895 4896 flags = ENQUEUE_WAKEUP; 4897 } 4898 4899 for_each_sched_entity(se) { 4900 cfs_rq = cfs_rq_of(se); 4901 cfs_rq->h_nr_running++; 4902 4903 if (cfs_rq_throttled(cfs_rq)) 4904 break; 4905 4906 update_load_avg(se, UPDATE_TG); 4907 update_cfs_shares(se); 4908 } 4909 4910 if (!se) 4911 add_nr_running(rq, 1); 4912 4913 hrtick_update(rq); 4914 } 4915 4916 static void set_next_buddy(struct sched_entity *se); 4917 4918 /* 4919 * The dequeue_task method is called before nr_running is 4920 * decreased. We remove the task from the rbtree and 4921 * update the fair scheduling stats: 4922 */ 4923 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) 4924 { 4925 struct cfs_rq *cfs_rq; 4926 struct sched_entity *se = &p->se; 4927 int task_sleep = flags & DEQUEUE_SLEEP; 4928 4929 for_each_sched_entity(se) { 4930 cfs_rq = cfs_rq_of(se); 4931 dequeue_entity(cfs_rq, se, flags); 4932 4933 /* 4934 * end evaluation on encountering a throttled cfs_rq 4935 * 4936 * note: in the case of encountering a throttled cfs_rq we will 4937 * post the final h_nr_running decrement below. 4938 */ 4939 if (cfs_rq_throttled(cfs_rq)) 4940 break; 4941 cfs_rq->h_nr_running--; 4942 4943 /* Don't dequeue parent if it has other entities besides us */ 4944 if (cfs_rq->load.weight) { 4945 /* Avoid re-evaluating load for this entity: */ 4946 se = parent_entity(se); 4947 /* 4948 * Bias pick_next to pick a task from this cfs_rq, as 4949 * p is sleeping when it is within its sched_slice. 4950 */ 4951 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) 4952 set_next_buddy(se); 4953 break; 4954 } 4955 flags |= DEQUEUE_SLEEP; 4956 } 4957 4958 for_each_sched_entity(se) { 4959 cfs_rq = cfs_rq_of(se); 4960 cfs_rq->h_nr_running--; 4961 4962 if (cfs_rq_throttled(cfs_rq)) 4963 break; 4964 4965 update_load_avg(se, UPDATE_TG); 4966 update_cfs_shares(se); 4967 } 4968 4969 if (!se) 4970 sub_nr_running(rq, 1); 4971 4972 hrtick_update(rq); 4973 } 4974 4975 #ifdef CONFIG_SMP 4976 4977 /* Working cpumask for: load_balance, load_balance_newidle. */ 4978 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); 4979 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); 4980 4981 #ifdef CONFIG_NO_HZ_COMMON 4982 /* 4983 * per rq 'load' arrray crap; XXX kill this. 4984 */ 4985 4986 /* 4987 * The exact cpuload calculated at every tick would be: 4988 * 4989 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load 4990 * 4991 * If a cpu misses updates for n ticks (as it was idle) and update gets 4992 * called on the n+1-th tick when cpu may be busy, then we have: 4993 * 4994 * load_n = (1 - 1/2^i)^n * load_0 4995 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load 4996 * 4997 * decay_load_missed() below does efficient calculation of 4998 * 4999 * load' = (1 - 1/2^i)^n * load 5000 * 5001 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors. 5002 * This allows us to precompute the above in said factors, thereby allowing the 5003 * reduction of an arbitrary n in O(log_2 n) steps. (See also 5004 * fixed_power_int()) 5005 * 5006 * The calculation is approximated on a 128 point scale. 5007 */ 5008 #define DEGRADE_SHIFT 7 5009 5010 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; 5011 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { 5012 { 0, 0, 0, 0, 0, 0, 0, 0 }, 5013 { 64, 32, 8, 0, 0, 0, 0, 0 }, 5014 { 96, 72, 40, 12, 1, 0, 0, 0 }, 5015 { 112, 98, 75, 43, 15, 1, 0, 0 }, 5016 { 120, 112, 98, 76, 45, 16, 2, 0 } 5017 }; 5018 5019 /* 5020 * Update cpu_load for any missed ticks, due to tickless idle. The backlog 5021 * would be when CPU is idle and so we just decay the old load without 5022 * adding any new load. 5023 */ 5024 static unsigned long 5025 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) 5026 { 5027 int j = 0; 5028 5029 if (!missed_updates) 5030 return load; 5031 5032 if (missed_updates >= degrade_zero_ticks[idx]) 5033 return 0; 5034 5035 if (idx == 1) 5036 return load >> missed_updates; 5037 5038 while (missed_updates) { 5039 if (missed_updates % 2) 5040 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; 5041 5042 missed_updates >>= 1; 5043 j++; 5044 } 5045 return load; 5046 } 5047 #endif /* CONFIG_NO_HZ_COMMON */ 5048 5049 /** 5050 * __cpu_load_update - update the rq->cpu_load[] statistics 5051 * @this_rq: The rq to update statistics for 5052 * @this_load: The current load 5053 * @pending_updates: The number of missed updates 5054 * 5055 * Update rq->cpu_load[] statistics. This function is usually called every 5056 * scheduler tick (TICK_NSEC). 5057 * 5058 * This function computes a decaying average: 5059 * 5060 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load 5061 * 5062 * Because of NOHZ it might not get called on every tick which gives need for 5063 * the @pending_updates argument. 5064 * 5065 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1 5066 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load 5067 * = A * (A * load[i]_n-2 + B) + B 5068 * = A * (A * (A * load[i]_n-3 + B) + B) + B 5069 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B 5070 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B 5071 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B 5072 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load 5073 * 5074 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as 5075 * any change in load would have resulted in the tick being turned back on. 5076 * 5077 * For regular NOHZ, this reduces to: 5078 * 5079 * load[i]_n = (1 - 1/2^i)^n * load[i]_0 5080 * 5081 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra 5082 * term. 5083 */ 5084 static void cpu_load_update(struct rq *this_rq, unsigned long this_load, 5085 unsigned long pending_updates) 5086 { 5087 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0]; 5088 int i, scale; 5089 5090 this_rq->nr_load_updates++; 5091 5092 /* Update our load: */ 5093 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ 5094 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 5095 unsigned long old_load, new_load; 5096 5097 /* scale is effectively 1 << i now, and >> i divides by scale */ 5098 5099 old_load = this_rq->cpu_load[i]; 5100 #ifdef CONFIG_NO_HZ_COMMON 5101 old_load = decay_load_missed(old_load, pending_updates - 1, i); 5102 if (tickless_load) { 5103 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i); 5104 /* 5105 * old_load can never be a negative value because a 5106 * decayed tickless_load cannot be greater than the 5107 * original tickless_load. 5108 */ 5109 old_load += tickless_load; 5110 } 5111 #endif 5112 new_load = this_load; 5113 /* 5114 * Round up the averaging division if load is increasing. This 5115 * prevents us from getting stuck on 9 if the load is 10, for 5116 * example. 5117 */ 5118 if (new_load > old_load) 5119 new_load += scale - 1; 5120 5121 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 5122 } 5123 5124 sched_avg_update(this_rq); 5125 } 5126 5127 /* Used instead of source_load when we know the type == 0 */ 5128 static unsigned long weighted_cpuload(const int cpu) 5129 { 5130 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs); 5131 } 5132 5133 #ifdef CONFIG_NO_HZ_COMMON 5134 /* 5135 * There is no sane way to deal with nohz on smp when using jiffies because the 5136 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading 5137 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. 5138 * 5139 * Therefore we need to avoid the delta approach from the regular tick when 5140 * possible since that would seriously skew the load calculation. This is why we 5141 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on 5142 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle 5143 * loop exit, nohz_idle_balance, nohz full exit...) 5144 * 5145 * This means we might still be one tick off for nohz periods. 5146 */ 5147 5148 static void cpu_load_update_nohz(struct rq *this_rq, 5149 unsigned long curr_jiffies, 5150 unsigned long load) 5151 { 5152 unsigned long pending_updates; 5153 5154 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 5155 if (pending_updates) { 5156 this_rq->last_load_update_tick = curr_jiffies; 5157 /* 5158 * In the regular NOHZ case, we were idle, this means load 0. 5159 * In the NOHZ_FULL case, we were non-idle, we should consider 5160 * its weighted load. 5161 */ 5162 cpu_load_update(this_rq, load, pending_updates); 5163 } 5164 } 5165 5166 /* 5167 * Called from nohz_idle_balance() to update the load ratings before doing the 5168 * idle balance. 5169 */ 5170 static void cpu_load_update_idle(struct rq *this_rq) 5171 { 5172 /* 5173 * bail if there's load or we're actually up-to-date. 5174 */ 5175 if (weighted_cpuload(cpu_of(this_rq))) 5176 return; 5177 5178 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0); 5179 } 5180 5181 /* 5182 * Record CPU load on nohz entry so we know the tickless load to account 5183 * on nohz exit. cpu_load[0] happens then to be updated more frequently 5184 * than other cpu_load[idx] but it should be fine as cpu_load readers 5185 * shouldn't rely into synchronized cpu_load[*] updates. 5186 */ 5187 void cpu_load_update_nohz_start(void) 5188 { 5189 struct rq *this_rq = this_rq(); 5190 5191 /* 5192 * This is all lockless but should be fine. If weighted_cpuload changes 5193 * concurrently we'll exit nohz. And cpu_load write can race with 5194 * cpu_load_update_idle() but both updater would be writing the same. 5195 */ 5196 this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq)); 5197 } 5198 5199 /* 5200 * Account the tickless load in the end of a nohz frame. 5201 */ 5202 void cpu_load_update_nohz_stop(void) 5203 { 5204 unsigned long curr_jiffies = READ_ONCE(jiffies); 5205 struct rq *this_rq = this_rq(); 5206 unsigned long load; 5207 struct rq_flags rf; 5208 5209 if (curr_jiffies == this_rq->last_load_update_tick) 5210 return; 5211 5212 load = weighted_cpuload(cpu_of(this_rq)); 5213 rq_lock(this_rq, &rf); 5214 update_rq_clock(this_rq); 5215 cpu_load_update_nohz(this_rq, curr_jiffies, load); 5216 rq_unlock(this_rq, &rf); 5217 } 5218 #else /* !CONFIG_NO_HZ_COMMON */ 5219 static inline void cpu_load_update_nohz(struct rq *this_rq, 5220 unsigned long curr_jiffies, 5221 unsigned long load) { } 5222 #endif /* CONFIG_NO_HZ_COMMON */ 5223 5224 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) 5225 { 5226 #ifdef CONFIG_NO_HZ_COMMON 5227 /* See the mess around cpu_load_update_nohz(). */ 5228 this_rq->last_load_update_tick = READ_ONCE(jiffies); 5229 #endif 5230 cpu_load_update(this_rq, load, 1); 5231 } 5232 5233 /* 5234 * Called from scheduler_tick() 5235 */ 5236 void cpu_load_update_active(struct rq *this_rq) 5237 { 5238 unsigned long load = weighted_cpuload(cpu_of(this_rq)); 5239 5240 if (tick_nohz_tick_stopped()) 5241 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load); 5242 else 5243 cpu_load_update_periodic(this_rq, load); 5244 } 5245 5246 /* 5247 * Return a low guess at the load of a migration-source cpu weighted 5248 * according to the scheduling class and "nice" value. 5249 * 5250 * We want to under-estimate the load of migration sources, to 5251 * balance conservatively. 5252 */ 5253 static unsigned long source_load(int cpu, int type) 5254 { 5255 struct rq *rq = cpu_rq(cpu); 5256 unsigned long total = weighted_cpuload(cpu); 5257 5258 if (type == 0 || !sched_feat(LB_BIAS)) 5259 return total; 5260 5261 return min(rq->cpu_load[type-1], total); 5262 } 5263 5264 /* 5265 * Return a high guess at the load of a migration-target cpu weighted 5266 * according to the scheduling class and "nice" value. 5267 */ 5268 static unsigned long target_load(int cpu, int type) 5269 { 5270 struct rq *rq = cpu_rq(cpu); 5271 unsigned long total = weighted_cpuload(cpu); 5272 5273 if (type == 0 || !sched_feat(LB_BIAS)) 5274 return total; 5275 5276 return max(rq->cpu_load[type-1], total); 5277 } 5278 5279 static unsigned long capacity_of(int cpu) 5280 { 5281 return cpu_rq(cpu)->cpu_capacity; 5282 } 5283 5284 static unsigned long capacity_orig_of(int cpu) 5285 { 5286 return cpu_rq(cpu)->cpu_capacity_orig; 5287 } 5288 5289 static unsigned long cpu_avg_load_per_task(int cpu) 5290 { 5291 struct rq *rq = cpu_rq(cpu); 5292 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); 5293 unsigned long load_avg = weighted_cpuload(cpu); 5294 5295 if (nr_running) 5296 return load_avg / nr_running; 5297 5298 return 0; 5299 } 5300 5301 static void record_wakee(struct task_struct *p) 5302 { 5303 /* 5304 * Only decay a single time; tasks that have less then 1 wakeup per 5305 * jiffy will not have built up many flips. 5306 */ 5307 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { 5308 current->wakee_flips >>= 1; 5309 current->wakee_flip_decay_ts = jiffies; 5310 } 5311 5312 if (current->last_wakee != p) { 5313 current->last_wakee = p; 5314 current->wakee_flips++; 5315 } 5316 } 5317 5318 /* 5319 * Detect M:N waker/wakee relationships via a switching-frequency heuristic. 5320 * 5321 * A waker of many should wake a different task than the one last awakened 5322 * at a frequency roughly N times higher than one of its wakees. 5323 * 5324 * In order to determine whether we should let the load spread vs consolidating 5325 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one 5326 * partner, and a factor of lls_size higher frequency in the other. 5327 * 5328 * With both conditions met, we can be relatively sure that the relationship is 5329 * non-monogamous, with partner count exceeding socket size. 5330 * 5331 * Waker/wakee being client/server, worker/dispatcher, interrupt source or 5332 * whatever is irrelevant, spread criteria is apparent partner count exceeds 5333 * socket size. 5334 */ 5335 static int wake_wide(struct task_struct *p) 5336 { 5337 unsigned int master = current->wakee_flips; 5338 unsigned int slave = p->wakee_flips; 5339 int factor = this_cpu_read(sd_llc_size); 5340 5341 if (master < slave) 5342 swap(master, slave); 5343 if (slave < factor || master < slave * factor) 5344 return 0; 5345 return 1; 5346 } 5347 5348 static int wake_affine(struct sched_domain *sd, struct task_struct *p, 5349 int prev_cpu, int sync) 5350 { 5351 int this_cpu = smp_processor_id(); 5352 bool affine = false; 5353 5354 /* 5355 * Common case: CPUs are in the same socket, and select_idle_sibling() 5356 * will do its thing regardless of what we return: 5357 */ 5358 if (cpus_share_cache(prev_cpu, this_cpu)) 5359 affine = true; 5360 else 5361 affine = numa_wake_affine(sd, p, this_cpu, prev_cpu, sync); 5362 5363 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); 5364 if (affine) { 5365 schedstat_inc(sd->ttwu_move_affine); 5366 schedstat_inc(p->se.statistics.nr_wakeups_affine); 5367 } 5368 5369 return affine; 5370 } 5371 5372 static inline int task_util(struct task_struct *p); 5373 static int cpu_util_wake(int cpu, struct task_struct *p); 5374 5375 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) 5376 { 5377 return capacity_orig_of(cpu) - cpu_util_wake(cpu, p); 5378 } 5379 5380 /* 5381 * find_idlest_group finds and returns the least busy CPU group within the 5382 * domain. 5383 */ 5384 static struct sched_group * 5385 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 5386 int this_cpu, int sd_flag) 5387 { 5388 struct sched_group *idlest = NULL, *group = sd->groups; 5389 struct sched_group *most_spare_sg = NULL; 5390 unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0; 5391 unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0; 5392 unsigned long most_spare = 0, this_spare = 0; 5393 int load_idx = sd->forkexec_idx; 5394 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; 5395 unsigned long imbalance = scale_load_down(NICE_0_LOAD) * 5396 (sd->imbalance_pct-100) / 100; 5397 5398 if (sd_flag & SD_BALANCE_WAKE) 5399 load_idx = sd->wake_idx; 5400 5401 do { 5402 unsigned long load, avg_load, runnable_load; 5403 unsigned long spare_cap, max_spare_cap; 5404 int local_group; 5405 int i; 5406 5407 /* Skip over this group if it has no CPUs allowed */ 5408 if (!cpumask_intersects(sched_group_span(group), 5409 &p->cpus_allowed)) 5410 continue; 5411 5412 local_group = cpumask_test_cpu(this_cpu, 5413 sched_group_span(group)); 5414 5415 /* 5416 * Tally up the load of all CPUs in the group and find 5417 * the group containing the CPU with most spare capacity. 5418 */ 5419 avg_load = 0; 5420 runnable_load = 0; 5421 max_spare_cap = 0; 5422 5423 for_each_cpu(i, sched_group_span(group)) { 5424 /* Bias balancing toward cpus of our domain */ 5425 if (local_group) 5426 load = source_load(i, load_idx); 5427 else 5428 load = target_load(i, load_idx); 5429 5430 runnable_load += load; 5431 5432 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); 5433 5434 spare_cap = capacity_spare_wake(i, p); 5435 5436 if (spare_cap > max_spare_cap) 5437 max_spare_cap = spare_cap; 5438 } 5439 5440 /* Adjust by relative CPU capacity of the group */ 5441 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / 5442 group->sgc->capacity; 5443 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / 5444 group->sgc->capacity; 5445 5446 if (local_group) { 5447 this_runnable_load = runnable_load; 5448 this_avg_load = avg_load; 5449 this_spare = max_spare_cap; 5450 } else { 5451 if (min_runnable_load > (runnable_load + imbalance)) { 5452 /* 5453 * The runnable load is significantly smaller 5454 * so we can pick this new cpu 5455 */ 5456 min_runnable_load = runnable_load; 5457 min_avg_load = avg_load; 5458 idlest = group; 5459 } else if ((runnable_load < (min_runnable_load + imbalance)) && 5460 (100*min_avg_load > imbalance_scale*avg_load)) { 5461 /* 5462 * The runnable loads are close so take the 5463 * blocked load into account through avg_load. 5464 */ 5465 min_avg_load = avg_load; 5466 idlest = group; 5467 } 5468 5469 if (most_spare < max_spare_cap) { 5470 most_spare = max_spare_cap; 5471 most_spare_sg = group; 5472 } 5473 } 5474 } while (group = group->next, group != sd->groups); 5475 5476 /* 5477 * The cross-over point between using spare capacity or least load 5478 * is too conservative for high utilization tasks on partially 5479 * utilized systems if we require spare_capacity > task_util(p), 5480 * so we allow for some task stuffing by using 5481 * spare_capacity > task_util(p)/2. 5482 * 5483 * Spare capacity can't be used for fork because the utilization has 5484 * not been set yet, we must first select a rq to compute the initial 5485 * utilization. 5486 */ 5487 if (sd_flag & SD_BALANCE_FORK) 5488 goto skip_spare; 5489 5490 if (this_spare > task_util(p) / 2 && 5491 imbalance_scale*this_spare > 100*most_spare) 5492 return NULL; 5493 5494 if (most_spare > task_util(p) / 2) 5495 return most_spare_sg; 5496 5497 skip_spare: 5498 if (!idlest) 5499 return NULL; 5500 5501 if (min_runnable_load > (this_runnable_load + imbalance)) 5502 return NULL; 5503 5504 if ((this_runnable_load < (min_runnable_load + imbalance)) && 5505 (100*this_avg_load < imbalance_scale*min_avg_load)) 5506 return NULL; 5507 5508 return idlest; 5509 } 5510 5511 /* 5512 * find_idlest_cpu - find the idlest cpu among the cpus in group. 5513 */ 5514 static int 5515 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) 5516 { 5517 unsigned long load, min_load = ULONG_MAX; 5518 unsigned int min_exit_latency = UINT_MAX; 5519 u64 latest_idle_timestamp = 0; 5520 int least_loaded_cpu = this_cpu; 5521 int shallowest_idle_cpu = -1; 5522 int i; 5523 5524 /* Check if we have any choice: */ 5525 if (group->group_weight == 1) 5526 return cpumask_first(sched_group_span(group)); 5527 5528 /* Traverse only the allowed CPUs */ 5529 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { 5530 if (idle_cpu(i)) { 5531 struct rq *rq = cpu_rq(i); 5532 struct cpuidle_state *idle = idle_get_state(rq); 5533 if (idle && idle->exit_latency < min_exit_latency) { 5534 /* 5535 * We give priority to a CPU whose idle state 5536 * has the smallest exit latency irrespective 5537 * of any idle timestamp. 5538 */ 5539 min_exit_latency = idle->exit_latency; 5540 latest_idle_timestamp = rq->idle_stamp; 5541 shallowest_idle_cpu = i; 5542 } else if ((!idle || idle->exit_latency == min_exit_latency) && 5543 rq->idle_stamp > latest_idle_timestamp) { 5544 /* 5545 * If equal or no active idle state, then 5546 * the most recently idled CPU might have 5547 * a warmer cache. 5548 */ 5549 latest_idle_timestamp = rq->idle_stamp; 5550 shallowest_idle_cpu = i; 5551 } 5552 } else if (shallowest_idle_cpu == -1) { 5553 load = weighted_cpuload(i); 5554 if (load < min_load || (load == min_load && i == this_cpu)) { 5555 min_load = load; 5556 least_loaded_cpu = i; 5557 } 5558 } 5559 } 5560 5561 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; 5562 } 5563 5564 #ifdef CONFIG_SCHED_SMT 5565 5566 static inline void set_idle_cores(int cpu, int val) 5567 { 5568 struct sched_domain_shared *sds; 5569 5570 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5571 if (sds) 5572 WRITE_ONCE(sds->has_idle_cores, val); 5573 } 5574 5575 static inline bool test_idle_cores(int cpu, bool def) 5576 { 5577 struct sched_domain_shared *sds; 5578 5579 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5580 if (sds) 5581 return READ_ONCE(sds->has_idle_cores); 5582 5583 return def; 5584 } 5585 5586 /* 5587 * Scans the local SMT mask to see if the entire core is idle, and records this 5588 * information in sd_llc_shared->has_idle_cores. 5589 * 5590 * Since SMT siblings share all cache levels, inspecting this limited remote 5591 * state should be fairly cheap. 5592 */ 5593 void __update_idle_core(struct rq *rq) 5594 { 5595 int core = cpu_of(rq); 5596 int cpu; 5597 5598 rcu_read_lock(); 5599 if (test_idle_cores(core, true)) 5600 goto unlock; 5601 5602 for_each_cpu(cpu, cpu_smt_mask(core)) { 5603 if (cpu == core) 5604 continue; 5605 5606 if (!idle_cpu(cpu)) 5607 goto unlock; 5608 } 5609 5610 set_idle_cores(core, 1); 5611 unlock: 5612 rcu_read_unlock(); 5613 } 5614 5615 /* 5616 * Scan the entire LLC domain for idle cores; this dynamically switches off if 5617 * there are no idle cores left in the system; tracked through 5618 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. 5619 */ 5620 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 5621 { 5622 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 5623 int core, cpu; 5624 5625 if (!static_branch_likely(&sched_smt_present)) 5626 return -1; 5627 5628 if (!test_idle_cores(target, false)) 5629 return -1; 5630 5631 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); 5632 5633 for_each_cpu_wrap(core, cpus, target) { 5634 bool idle = true; 5635 5636 for_each_cpu(cpu, cpu_smt_mask(core)) { 5637 cpumask_clear_cpu(cpu, cpus); 5638 if (!idle_cpu(cpu)) 5639 idle = false; 5640 } 5641 5642 if (idle) 5643 return core; 5644 } 5645 5646 /* 5647 * Failed to find an idle core; stop looking for one. 5648 */ 5649 set_idle_cores(target, 0); 5650 5651 return -1; 5652 } 5653 5654 /* 5655 * Scan the local SMT mask for idle CPUs. 5656 */ 5657 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 5658 { 5659 int cpu; 5660 5661 if (!static_branch_likely(&sched_smt_present)) 5662 return -1; 5663 5664 for_each_cpu(cpu, cpu_smt_mask(target)) { 5665 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 5666 continue; 5667 if (idle_cpu(cpu)) 5668 return cpu; 5669 } 5670 5671 return -1; 5672 } 5673 5674 #else /* CONFIG_SCHED_SMT */ 5675 5676 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) 5677 { 5678 return -1; 5679 } 5680 5681 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) 5682 { 5683 return -1; 5684 } 5685 5686 #endif /* CONFIG_SCHED_SMT */ 5687 5688 /* 5689 * Scan the LLC domain for idle CPUs; this is dynamically regulated by 5690 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the 5691 * average idle time for this rq (as found in rq->avg_idle). 5692 */ 5693 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) 5694 { 5695 struct sched_domain *this_sd; 5696 u64 avg_cost, avg_idle; 5697 u64 time, cost; 5698 s64 delta; 5699 int cpu, nr = INT_MAX; 5700 5701 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 5702 if (!this_sd) 5703 return -1; 5704 5705 /* 5706 * Due to large variance we need a large fuzz factor; hackbench in 5707 * particularly is sensitive here. 5708 */ 5709 avg_idle = this_rq()->avg_idle / 512; 5710 avg_cost = this_sd->avg_scan_cost + 1; 5711 5712 if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) 5713 return -1; 5714 5715 if (sched_feat(SIS_PROP)) { 5716 u64 span_avg = sd->span_weight * avg_idle; 5717 if (span_avg > 4*avg_cost) 5718 nr = div_u64(span_avg, avg_cost); 5719 else 5720 nr = 4; 5721 } 5722 5723 time = local_clock(); 5724 5725 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { 5726 if (!--nr) 5727 return -1; 5728 if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) 5729 continue; 5730 if (idle_cpu(cpu)) 5731 break; 5732 } 5733 5734 time = local_clock() - time; 5735 cost = this_sd->avg_scan_cost; 5736 delta = (s64)(time - cost) / 8; 5737 this_sd->avg_scan_cost += delta; 5738 5739 return cpu; 5740 } 5741 5742 /* 5743 * Try and locate an idle core/thread in the LLC cache domain. 5744 */ 5745 static int select_idle_sibling(struct task_struct *p, int prev, int target) 5746 { 5747 struct sched_domain *sd; 5748 int i; 5749 5750 if (idle_cpu(target)) 5751 return target; 5752 5753 /* 5754 * If the previous cpu is cache affine and idle, don't be stupid. 5755 */ 5756 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) 5757 return prev; 5758 5759 sd = rcu_dereference(per_cpu(sd_llc, target)); 5760 if (!sd) 5761 return target; 5762 5763 i = select_idle_core(p, sd, target); 5764 if ((unsigned)i < nr_cpumask_bits) 5765 return i; 5766 5767 i = select_idle_cpu(p, sd, target); 5768 if ((unsigned)i < nr_cpumask_bits) 5769 return i; 5770 5771 i = select_idle_smt(p, sd, target); 5772 if ((unsigned)i < nr_cpumask_bits) 5773 return i; 5774 5775 return target; 5776 } 5777 5778 /* 5779 * cpu_util returns the amount of capacity of a CPU that is used by CFS 5780 * tasks. The unit of the return value must be the one of capacity so we can 5781 * compare the utilization with the capacity of the CPU that is available for 5782 * CFS task (ie cpu_capacity). 5783 * 5784 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the 5785 * recent utilization of currently non-runnable tasks on a CPU. It represents 5786 * the amount of utilization of a CPU in the range [0..capacity_orig] where 5787 * capacity_orig is the cpu_capacity available at the highest frequency 5788 * (arch_scale_freq_capacity()). 5789 * The utilization of a CPU converges towards a sum equal to or less than the 5790 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is 5791 * the running time on this CPU scaled by capacity_curr. 5792 * 5793 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even 5794 * higher than capacity_orig because of unfortunate rounding in 5795 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until 5796 * the average stabilizes with the new running time. We need to check that the 5797 * utilization stays within the range of [0..capacity_orig] and cap it if 5798 * necessary. Without utilization capping, a group could be seen as overloaded 5799 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of 5800 * available capacity. We allow utilization to overshoot capacity_curr (but not 5801 * capacity_orig) as it useful for predicting the capacity required after task 5802 * migrations (scheduler-driven DVFS). 5803 */ 5804 static int cpu_util(int cpu) 5805 { 5806 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; 5807 unsigned long capacity = capacity_orig_of(cpu); 5808 5809 return (util >= capacity) ? capacity : util; 5810 } 5811 5812 static inline int task_util(struct task_struct *p) 5813 { 5814 return p->se.avg.util_avg; 5815 } 5816 5817 /* 5818 * cpu_util_wake: Compute cpu utilization with any contributions from 5819 * the waking task p removed. 5820 */ 5821 static int cpu_util_wake(int cpu, struct task_struct *p) 5822 { 5823 unsigned long util, capacity; 5824 5825 /* Task has no contribution or is new */ 5826 if (cpu != task_cpu(p) || !p->se.avg.last_update_time) 5827 return cpu_util(cpu); 5828 5829 capacity = capacity_orig_of(cpu); 5830 util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0); 5831 5832 return (util >= capacity) ? capacity : util; 5833 } 5834 5835 /* 5836 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the 5837 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. 5838 * 5839 * In that case WAKE_AFFINE doesn't make sense and we'll let 5840 * BALANCE_WAKE sort things out. 5841 */ 5842 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) 5843 { 5844 long min_cap, max_cap; 5845 5846 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); 5847 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; 5848 5849 /* Minimum capacity is close to max, no need to abort wake_affine */ 5850 if (max_cap - min_cap < max_cap >> 3) 5851 return 0; 5852 5853 /* Bring task utilization in sync with prev_cpu */ 5854 sync_entity_load_avg(&p->se); 5855 5856 return min_cap * 1024 < task_util(p) * capacity_margin; 5857 } 5858 5859 /* 5860 * select_task_rq_fair: Select target runqueue for the waking task in domains 5861 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, 5862 * SD_BALANCE_FORK, or SD_BALANCE_EXEC. 5863 * 5864 * Balances load by selecting the idlest cpu in the idlest group, or under 5865 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set. 5866 * 5867 * Returns the target cpu number. 5868 * 5869 * preempt must be disabled. 5870 */ 5871 static int 5872 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) 5873 { 5874 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; 5875 int cpu = smp_processor_id(); 5876 int new_cpu = prev_cpu; 5877 int want_affine = 0; 5878 int sync = wake_flags & WF_SYNC; 5879 5880 if (sd_flag & SD_BALANCE_WAKE) { 5881 record_wakee(p); 5882 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) 5883 && cpumask_test_cpu(cpu, &p->cpus_allowed); 5884 } 5885 5886 rcu_read_lock(); 5887 for_each_domain(cpu, tmp) { 5888 if (!(tmp->flags & SD_LOAD_BALANCE)) 5889 break; 5890 5891 /* 5892 * If both cpu and prev_cpu are part of this domain, 5893 * cpu is a valid SD_WAKE_AFFINE target. 5894 */ 5895 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 5896 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 5897 affine_sd = tmp; 5898 break; 5899 } 5900 5901 if (tmp->flags & sd_flag) 5902 sd = tmp; 5903 else if (!want_affine) 5904 break; 5905 } 5906 5907 if (affine_sd) { 5908 sd = NULL; /* Prefer wake_affine over balance flags */ 5909 if (cpu == prev_cpu) 5910 goto pick_cpu; 5911 5912 if (wake_affine(affine_sd, p, prev_cpu, sync)) 5913 new_cpu = cpu; 5914 } 5915 5916 if (!sd) { 5917 pick_cpu: 5918 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */ 5919 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 5920 5921 } else while (sd) { 5922 struct sched_group *group; 5923 int weight; 5924 5925 if (!(sd->flags & sd_flag)) { 5926 sd = sd->child; 5927 continue; 5928 } 5929 5930 group = find_idlest_group(sd, p, cpu, sd_flag); 5931 if (!group) { 5932 sd = sd->child; 5933 continue; 5934 } 5935 5936 new_cpu = find_idlest_cpu(group, p, cpu); 5937 if (new_cpu == -1 || new_cpu == cpu) { 5938 /* Now try balancing at a lower domain level of cpu */ 5939 sd = sd->child; 5940 continue; 5941 } 5942 5943 /* Now try balancing at a lower domain level of new_cpu */ 5944 cpu = new_cpu; 5945 weight = sd->span_weight; 5946 sd = NULL; 5947 for_each_domain(cpu, tmp) { 5948 if (weight <= tmp->span_weight) 5949 break; 5950 if (tmp->flags & sd_flag) 5951 sd = tmp; 5952 } 5953 /* while loop will break here if sd == NULL */ 5954 } 5955 rcu_read_unlock(); 5956 5957 return new_cpu; 5958 } 5959 5960 /* 5961 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and 5962 * cfs_rq_of(p) references at time of call are still valid and identify the 5963 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 5964 */ 5965 static void migrate_task_rq_fair(struct task_struct *p) 5966 { 5967 /* 5968 * As blocked tasks retain absolute vruntime the migration needs to 5969 * deal with this by subtracting the old and adding the new 5970 * min_vruntime -- the latter is done by enqueue_entity() when placing 5971 * the task on the new runqueue. 5972 */ 5973 if (p->state == TASK_WAKING) { 5974 struct sched_entity *se = &p->se; 5975 struct cfs_rq *cfs_rq = cfs_rq_of(se); 5976 u64 min_vruntime; 5977 5978 #ifndef CONFIG_64BIT 5979 u64 min_vruntime_copy; 5980 5981 do { 5982 min_vruntime_copy = cfs_rq->min_vruntime_copy; 5983 smp_rmb(); 5984 min_vruntime = cfs_rq->min_vruntime; 5985 } while (min_vruntime != min_vruntime_copy); 5986 #else 5987 min_vruntime = cfs_rq->min_vruntime; 5988 #endif 5989 5990 se->vruntime -= min_vruntime; 5991 } 5992 5993 /* 5994 * We are supposed to update the task to "current" time, then its up to date 5995 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting 5996 * what current time is, so simply throw away the out-of-date time. This 5997 * will result in the wakee task is less decayed, but giving the wakee more 5998 * load sounds not bad. 5999 */ 6000 remove_entity_load_avg(&p->se); 6001 6002 /* Tell new CPU we are migrated */ 6003 p->se.avg.last_update_time = 0; 6004 6005 /* We have migrated, no longer consider this task hot */ 6006 p->se.exec_start = 0; 6007 } 6008 6009 static void task_dead_fair(struct task_struct *p) 6010 { 6011 remove_entity_load_avg(&p->se); 6012 } 6013 #endif /* CONFIG_SMP */ 6014 6015 static unsigned long 6016 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) 6017 { 6018 unsigned long gran = sysctl_sched_wakeup_granularity; 6019 6020 /* 6021 * Since its curr running now, convert the gran from real-time 6022 * to virtual-time in his units. 6023 * 6024 * By using 'se' instead of 'curr' we penalize light tasks, so 6025 * they get preempted easier. That is, if 'se' < 'curr' then 6026 * the resulting gran will be larger, therefore penalizing the 6027 * lighter, if otoh 'se' > 'curr' then the resulting gran will 6028 * be smaller, again penalizing the lighter task. 6029 * 6030 * This is especially important for buddies when the leftmost 6031 * task is higher priority than the buddy. 6032 */ 6033 return calc_delta_fair(gran, se); 6034 } 6035 6036 /* 6037 * Should 'se' preempt 'curr'. 6038 * 6039 * |s1 6040 * |s2 6041 * |s3 6042 * g 6043 * |<--->|c 6044 * 6045 * w(c, s1) = -1 6046 * w(c, s2) = 0 6047 * w(c, s3) = 1 6048 * 6049 */ 6050 static int 6051 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) 6052 { 6053 s64 gran, vdiff = curr->vruntime - se->vruntime; 6054 6055 if (vdiff <= 0) 6056 return -1; 6057 6058 gran = wakeup_gran(curr, se); 6059 if (vdiff > gran) 6060 return 1; 6061 6062 return 0; 6063 } 6064 6065 static void set_last_buddy(struct sched_entity *se) 6066 { 6067 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) 6068 return; 6069 6070 for_each_sched_entity(se) { 6071 if (SCHED_WARN_ON(!se->on_rq)) 6072 return; 6073 cfs_rq_of(se)->last = se; 6074 } 6075 } 6076 6077 static void set_next_buddy(struct sched_entity *se) 6078 { 6079 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) 6080 return; 6081 6082 for_each_sched_entity(se) { 6083 if (SCHED_WARN_ON(!se->on_rq)) 6084 return; 6085 cfs_rq_of(se)->next = se; 6086 } 6087 } 6088 6089 static void set_skip_buddy(struct sched_entity *se) 6090 { 6091 for_each_sched_entity(se) 6092 cfs_rq_of(se)->skip = se; 6093 } 6094 6095 /* 6096 * Preempt the current task with a newly woken task if needed: 6097 */ 6098 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 6099 { 6100 struct task_struct *curr = rq->curr; 6101 struct sched_entity *se = &curr->se, *pse = &p->se; 6102 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6103 int scale = cfs_rq->nr_running >= sched_nr_latency; 6104 int next_buddy_marked = 0; 6105 6106 if (unlikely(se == pse)) 6107 return; 6108 6109 /* 6110 * This is possible from callers such as attach_tasks(), in which we 6111 * unconditionally check_prempt_curr() after an enqueue (which may have 6112 * lead to a throttle). This both saves work and prevents false 6113 * next-buddy nomination below. 6114 */ 6115 if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) 6116 return; 6117 6118 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { 6119 set_next_buddy(pse); 6120 next_buddy_marked = 1; 6121 } 6122 6123 /* 6124 * We can come here with TIF_NEED_RESCHED already set from new task 6125 * wake up path. 6126 * 6127 * Note: this also catches the edge-case of curr being in a throttled 6128 * group (e.g. via set_curr_task), since update_curr() (in the 6129 * enqueue of curr) will have resulted in resched being set. This 6130 * prevents us from potentially nominating it as a false LAST_BUDDY 6131 * below. 6132 */ 6133 if (test_tsk_need_resched(curr)) 6134 return; 6135 6136 /* Idle tasks are by definition preempted by non-idle tasks. */ 6137 if (unlikely(curr->policy == SCHED_IDLE) && 6138 likely(p->policy != SCHED_IDLE)) 6139 goto preempt; 6140 6141 /* 6142 * Batch and idle tasks do not preempt non-idle tasks (their preemption 6143 * is driven by the tick): 6144 */ 6145 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) 6146 return; 6147 6148 find_matching_se(&se, &pse); 6149 update_curr(cfs_rq_of(se)); 6150 BUG_ON(!pse); 6151 if (wakeup_preempt_entity(se, pse) == 1) { 6152 /* 6153 * Bias pick_next to pick the sched entity that is 6154 * triggering this preemption. 6155 */ 6156 if (!next_buddy_marked) 6157 set_next_buddy(pse); 6158 goto preempt; 6159 } 6160 6161 return; 6162 6163 preempt: 6164 resched_curr(rq); 6165 /* 6166 * Only set the backward buddy when the current task is still 6167 * on the rq. This can happen when a wakeup gets interleaved 6168 * with schedule on the ->pre_schedule() or idle_balance() 6169 * point, either of which can * drop the rq lock. 6170 * 6171 * Also, during early boot the idle thread is in the fair class, 6172 * for obvious reasons its a bad idea to schedule back to it. 6173 */ 6174 if (unlikely(!se->on_rq || curr == rq->idle)) 6175 return; 6176 6177 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) 6178 set_last_buddy(se); 6179 } 6180 6181 static struct task_struct * 6182 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6183 { 6184 struct cfs_rq *cfs_rq = &rq->cfs; 6185 struct sched_entity *se; 6186 struct task_struct *p; 6187 int new_tasks; 6188 6189 again: 6190 #ifdef CONFIG_FAIR_GROUP_SCHED 6191 if (!cfs_rq->nr_running) 6192 goto idle; 6193 6194 if (prev->sched_class != &fair_sched_class) 6195 goto simple; 6196 6197 /* 6198 * Because of the set_next_buddy() in dequeue_task_fair() it is rather 6199 * likely that a next task is from the same cgroup as the current. 6200 * 6201 * Therefore attempt to avoid putting and setting the entire cgroup 6202 * hierarchy, only change the part that actually changes. 6203 */ 6204 6205 do { 6206 struct sched_entity *curr = cfs_rq->curr; 6207 6208 /* 6209 * Since we got here without doing put_prev_entity() we also 6210 * have to consider cfs_rq->curr. If it is still a runnable 6211 * entity, update_curr() will update its vruntime, otherwise 6212 * forget we've ever seen it. 6213 */ 6214 if (curr) { 6215 if (curr->on_rq) 6216 update_curr(cfs_rq); 6217 else 6218 curr = NULL; 6219 6220 /* 6221 * This call to check_cfs_rq_runtime() will do the 6222 * throttle and dequeue its entity in the parent(s). 6223 * Therefore the 'simple' nr_running test will indeed 6224 * be correct. 6225 */ 6226 if (unlikely(check_cfs_rq_runtime(cfs_rq))) 6227 goto simple; 6228 } 6229 6230 se = pick_next_entity(cfs_rq, curr); 6231 cfs_rq = group_cfs_rq(se); 6232 } while (cfs_rq); 6233 6234 p = task_of(se); 6235 6236 /* 6237 * Since we haven't yet done put_prev_entity and if the selected task 6238 * is a different task than we started out with, try and touch the 6239 * least amount of cfs_rqs. 6240 */ 6241 if (prev != p) { 6242 struct sched_entity *pse = &prev->se; 6243 6244 while (!(cfs_rq = is_same_group(se, pse))) { 6245 int se_depth = se->depth; 6246 int pse_depth = pse->depth; 6247 6248 if (se_depth <= pse_depth) { 6249 put_prev_entity(cfs_rq_of(pse), pse); 6250 pse = parent_entity(pse); 6251 } 6252 if (se_depth >= pse_depth) { 6253 set_next_entity(cfs_rq_of(se), se); 6254 se = parent_entity(se); 6255 } 6256 } 6257 6258 put_prev_entity(cfs_rq, pse); 6259 set_next_entity(cfs_rq, se); 6260 } 6261 6262 if (hrtick_enabled(rq)) 6263 hrtick_start_fair(rq, p); 6264 6265 return p; 6266 simple: 6267 cfs_rq = &rq->cfs; 6268 #endif 6269 6270 if (!cfs_rq->nr_running) 6271 goto idle; 6272 6273 put_prev_task(rq, prev); 6274 6275 do { 6276 se = pick_next_entity(cfs_rq, NULL); 6277 set_next_entity(cfs_rq, se); 6278 cfs_rq = group_cfs_rq(se); 6279 } while (cfs_rq); 6280 6281 p = task_of(se); 6282 6283 if (hrtick_enabled(rq)) 6284 hrtick_start_fair(rq, p); 6285 6286 return p; 6287 6288 idle: 6289 new_tasks = idle_balance(rq, rf); 6290 6291 /* 6292 * Because idle_balance() releases (and re-acquires) rq->lock, it is 6293 * possible for any higher priority task to appear. In that case we 6294 * must re-start the pick_next_entity() loop. 6295 */ 6296 if (new_tasks < 0) 6297 return RETRY_TASK; 6298 6299 if (new_tasks > 0) 6300 goto again; 6301 6302 return NULL; 6303 } 6304 6305 /* 6306 * Account for a descheduled task: 6307 */ 6308 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) 6309 { 6310 struct sched_entity *se = &prev->se; 6311 struct cfs_rq *cfs_rq; 6312 6313 for_each_sched_entity(se) { 6314 cfs_rq = cfs_rq_of(se); 6315 put_prev_entity(cfs_rq, se); 6316 } 6317 } 6318 6319 /* 6320 * sched_yield() is very simple 6321 * 6322 * The magic of dealing with the ->skip buddy is in pick_next_entity. 6323 */ 6324 static void yield_task_fair(struct rq *rq) 6325 { 6326 struct task_struct *curr = rq->curr; 6327 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 6328 struct sched_entity *se = &curr->se; 6329 6330 /* 6331 * Are we the only task in the tree? 6332 */ 6333 if (unlikely(rq->nr_running == 1)) 6334 return; 6335 6336 clear_buddies(cfs_rq, se); 6337 6338 if (curr->policy != SCHED_BATCH) { 6339 update_rq_clock(rq); 6340 /* 6341 * Update run-time statistics of the 'current'. 6342 */ 6343 update_curr(cfs_rq); 6344 /* 6345 * Tell update_rq_clock() that we've just updated, 6346 * so we don't do microscopic update in schedule() 6347 * and double the fastpath cost. 6348 */ 6349 rq_clock_skip_update(rq, true); 6350 } 6351 6352 set_skip_buddy(se); 6353 } 6354 6355 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) 6356 { 6357 struct sched_entity *se = &p->se; 6358 6359 /* throttled hierarchies are not runnable */ 6360 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) 6361 return false; 6362 6363 /* Tell the scheduler that we'd really like pse to run next. */ 6364 set_next_buddy(se); 6365 6366 yield_task_fair(rq); 6367 6368 return true; 6369 } 6370 6371 #ifdef CONFIG_SMP 6372 /************************************************** 6373 * Fair scheduling class load-balancing methods. 6374 * 6375 * BASICS 6376 * 6377 * The purpose of load-balancing is to achieve the same basic fairness the 6378 * per-cpu scheduler provides, namely provide a proportional amount of compute 6379 * time to each task. This is expressed in the following equation: 6380 * 6381 * W_i,n/P_i == W_j,n/P_j for all i,j (1) 6382 * 6383 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight 6384 * W_i,0 is defined as: 6385 * 6386 * W_i,0 = \Sum_j w_i,j (2) 6387 * 6388 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight 6389 * is derived from the nice value as per sched_prio_to_weight[]. 6390 * 6391 * The weight average is an exponential decay average of the instantaneous 6392 * weight: 6393 * 6394 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 6395 * 6396 * C_i is the compute capacity of cpu i, typically it is the 6397 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 6398 * can also include other factors [XXX]. 6399 * 6400 * To achieve this balance we define a measure of imbalance which follows 6401 * directly from (1): 6402 * 6403 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) 6404 * 6405 * We them move tasks around to minimize the imbalance. In the continuous 6406 * function space it is obvious this converges, in the discrete case we get 6407 * a few fun cases generally called infeasible weight scenarios. 6408 * 6409 * [XXX expand on: 6410 * - infeasible weights; 6411 * - local vs global optima in the discrete case. ] 6412 * 6413 * 6414 * SCHED DOMAINS 6415 * 6416 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) 6417 * for all i,j solution, we create a tree of cpus that follows the hardware 6418 * topology where each level pairs two lower groups (or better). This results 6419 * in O(log n) layers. Furthermore we reduce the number of cpus going up the 6420 * tree to only the first of the previous level and we decrease the frequency 6421 * of load-balance at each level inv. proportional to the number of cpus in 6422 * the groups. 6423 * 6424 * This yields: 6425 * 6426 * log_2 n 1 n 6427 * \Sum { --- * --- * 2^i } = O(n) (5) 6428 * i = 0 2^i 2^i 6429 * `- size of each group 6430 * | | `- number of cpus doing load-balance 6431 * | `- freq 6432 * `- sum over all levels 6433 * 6434 * Coupled with a limit on how many tasks we can migrate every balance pass, 6435 * this makes (5) the runtime complexity of the balancer. 6436 * 6437 * An important property here is that each CPU is still (indirectly) connected 6438 * to every other cpu in at most O(log n) steps: 6439 * 6440 * The adjacency matrix of the resulting graph is given by: 6441 * 6442 * log_2 n 6443 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) 6444 * k = 0 6445 * 6446 * And you'll find that: 6447 * 6448 * A^(log_2 n)_i,j != 0 for all i,j (7) 6449 * 6450 * Showing there's indeed a path between every cpu in at most O(log n) steps. 6451 * The task movement gives a factor of O(m), giving a convergence complexity 6452 * of: 6453 * 6454 * O(nm log n), n := nr_cpus, m := nr_tasks (8) 6455 * 6456 * 6457 * WORK CONSERVING 6458 * 6459 * In order to avoid CPUs going idle while there's still work to do, new idle 6460 * balancing is more aggressive and has the newly idle cpu iterate up the domain 6461 * tree itself instead of relying on other CPUs to bring it work. 6462 * 6463 * This adds some complexity to both (5) and (8) but it reduces the total idle 6464 * time. 6465 * 6466 * [XXX more?] 6467 * 6468 * 6469 * CGROUPS 6470 * 6471 * Cgroups make a horror show out of (2), instead of a simple sum we get: 6472 * 6473 * s_k,i 6474 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) 6475 * S_k 6476 * 6477 * Where 6478 * 6479 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) 6480 * 6481 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i. 6482 * 6483 * The big problem is S_k, its a global sum needed to compute a local (W_i) 6484 * property. 6485 * 6486 * [XXX write more on how we solve this.. _after_ merging pjt's patches that 6487 * rewrite all of this once again.] 6488 */ 6489 6490 static unsigned long __read_mostly max_load_balance_interval = HZ/10; 6491 6492 enum fbq_type { regular, remote, all }; 6493 6494 #define LBF_ALL_PINNED 0x01 6495 #define LBF_NEED_BREAK 0x02 6496 #define LBF_DST_PINNED 0x04 6497 #define LBF_SOME_PINNED 0x08 6498 6499 struct lb_env { 6500 struct sched_domain *sd; 6501 6502 struct rq *src_rq; 6503 int src_cpu; 6504 6505 int dst_cpu; 6506 struct rq *dst_rq; 6507 6508 struct cpumask *dst_grpmask; 6509 int new_dst_cpu; 6510 enum cpu_idle_type idle; 6511 long imbalance; 6512 /* The set of CPUs under consideration for load-balancing */ 6513 struct cpumask *cpus; 6514 6515 unsigned int flags; 6516 6517 unsigned int loop; 6518 unsigned int loop_break; 6519 unsigned int loop_max; 6520 6521 enum fbq_type fbq_type; 6522 struct list_head tasks; 6523 }; 6524 6525 /* 6526 * Is this task likely cache-hot: 6527 */ 6528 static int task_hot(struct task_struct *p, struct lb_env *env) 6529 { 6530 s64 delta; 6531 6532 lockdep_assert_held(&env->src_rq->lock); 6533 6534 if (p->sched_class != &fair_sched_class) 6535 return 0; 6536 6537 if (unlikely(p->policy == SCHED_IDLE)) 6538 return 0; 6539 6540 /* 6541 * Buddy candidates are cache hot: 6542 */ 6543 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && 6544 (&p->se == cfs_rq_of(&p->se)->next || 6545 &p->se == cfs_rq_of(&p->se)->last)) 6546 return 1; 6547 6548 if (sysctl_sched_migration_cost == -1) 6549 return 1; 6550 if (sysctl_sched_migration_cost == 0) 6551 return 0; 6552 6553 delta = rq_clock_task(env->src_rq) - p->se.exec_start; 6554 6555 return delta < (s64)sysctl_sched_migration_cost; 6556 } 6557 6558 #ifdef CONFIG_NUMA_BALANCING 6559 /* 6560 * Returns 1, if task migration degrades locality 6561 * Returns 0, if task migration improves locality i.e migration preferred. 6562 * Returns -1, if task migration is not affected by locality. 6563 */ 6564 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) 6565 { 6566 struct numa_group *numa_group = rcu_dereference(p->numa_group); 6567 unsigned long src_faults, dst_faults; 6568 int src_nid, dst_nid; 6569 6570 if (!static_branch_likely(&sched_numa_balancing)) 6571 return -1; 6572 6573 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) 6574 return -1; 6575 6576 src_nid = cpu_to_node(env->src_cpu); 6577 dst_nid = cpu_to_node(env->dst_cpu); 6578 6579 if (src_nid == dst_nid) 6580 return -1; 6581 6582 /* Migrating away from the preferred node is always bad. */ 6583 if (src_nid == p->numa_preferred_nid) { 6584 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) 6585 return 1; 6586 else 6587 return -1; 6588 } 6589 6590 /* Encourage migration to the preferred node. */ 6591 if (dst_nid == p->numa_preferred_nid) 6592 return 0; 6593 6594 /* Leaving a core idle is often worse than degrading locality. */ 6595 if (env->idle != CPU_NOT_IDLE) 6596 return -1; 6597 6598 if (numa_group) { 6599 src_faults = group_faults(p, src_nid); 6600 dst_faults = group_faults(p, dst_nid); 6601 } else { 6602 src_faults = task_faults(p, src_nid); 6603 dst_faults = task_faults(p, dst_nid); 6604 } 6605 6606 return dst_faults < src_faults; 6607 } 6608 6609 #else 6610 static inline int migrate_degrades_locality(struct task_struct *p, 6611 struct lb_env *env) 6612 { 6613 return -1; 6614 } 6615 #endif 6616 6617 /* 6618 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 6619 */ 6620 static 6621 int can_migrate_task(struct task_struct *p, struct lb_env *env) 6622 { 6623 int tsk_cache_hot; 6624 6625 lockdep_assert_held(&env->src_rq->lock); 6626 6627 /* 6628 * We do not migrate tasks that are: 6629 * 1) throttled_lb_pair, or 6630 * 2) cannot be migrated to this CPU due to cpus_allowed, or 6631 * 3) running (obviously), or 6632 * 4) are cache-hot on their current CPU. 6633 */ 6634 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) 6635 return 0; 6636 6637 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { 6638 int cpu; 6639 6640 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); 6641 6642 env->flags |= LBF_SOME_PINNED; 6643 6644 /* 6645 * Remember if this task can be migrated to any other cpu in 6646 * our sched_group. We may want to revisit it if we couldn't 6647 * meet load balance goals by pulling other tasks on src_cpu. 6648 * 6649 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have 6650 * already computed one in current iteration. 6651 */ 6652 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) 6653 return 0; 6654 6655 /* Prevent to re-select dst_cpu via env's cpus */ 6656 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { 6657 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { 6658 env->flags |= LBF_DST_PINNED; 6659 env->new_dst_cpu = cpu; 6660 break; 6661 } 6662 } 6663 6664 return 0; 6665 } 6666 6667 /* Record that we found atleast one task that could run on dst_cpu */ 6668 env->flags &= ~LBF_ALL_PINNED; 6669 6670 if (task_running(env->src_rq, p)) { 6671 schedstat_inc(p->se.statistics.nr_failed_migrations_running); 6672 return 0; 6673 } 6674 6675 /* 6676 * Aggressive migration if: 6677 * 1) destination numa is preferred 6678 * 2) task is cache cold, or 6679 * 3) too many balance attempts have failed. 6680 */ 6681 tsk_cache_hot = migrate_degrades_locality(p, env); 6682 if (tsk_cache_hot == -1) 6683 tsk_cache_hot = task_hot(p, env); 6684 6685 if (tsk_cache_hot <= 0 || 6686 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 6687 if (tsk_cache_hot == 1) { 6688 schedstat_inc(env->sd->lb_hot_gained[env->idle]); 6689 schedstat_inc(p->se.statistics.nr_forced_migrations); 6690 } 6691 return 1; 6692 } 6693 6694 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); 6695 return 0; 6696 } 6697 6698 /* 6699 * detach_task() -- detach the task for the migration specified in env 6700 */ 6701 static void detach_task(struct task_struct *p, struct lb_env *env) 6702 { 6703 lockdep_assert_held(&env->src_rq->lock); 6704 6705 p->on_rq = TASK_ON_RQ_MIGRATING; 6706 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); 6707 set_task_cpu(p, env->dst_cpu); 6708 } 6709 6710 /* 6711 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as 6712 * part of active balancing operations within "domain". 6713 * 6714 * Returns a task if successful and NULL otherwise. 6715 */ 6716 static struct task_struct *detach_one_task(struct lb_env *env) 6717 { 6718 struct task_struct *p, *n; 6719 6720 lockdep_assert_held(&env->src_rq->lock); 6721 6722 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { 6723 if (!can_migrate_task(p, env)) 6724 continue; 6725 6726 detach_task(p, env); 6727 6728 /* 6729 * Right now, this is only the second place where 6730 * lb_gained[env->idle] is updated (other is detach_tasks) 6731 * so we can safely collect stats here rather than 6732 * inside detach_tasks(). 6733 */ 6734 schedstat_inc(env->sd->lb_gained[env->idle]); 6735 return p; 6736 } 6737 return NULL; 6738 } 6739 6740 static const unsigned int sched_nr_migrate_break = 32; 6741 6742 /* 6743 * detach_tasks() -- tries to detach up to imbalance weighted load from 6744 * busiest_rq, as part of a balancing operation within domain "sd". 6745 * 6746 * Returns number of detached tasks if successful and 0 otherwise. 6747 */ 6748 static int detach_tasks(struct lb_env *env) 6749 { 6750 struct list_head *tasks = &env->src_rq->cfs_tasks; 6751 struct task_struct *p; 6752 unsigned long load; 6753 int detached = 0; 6754 6755 lockdep_assert_held(&env->src_rq->lock); 6756 6757 if (env->imbalance <= 0) 6758 return 0; 6759 6760 while (!list_empty(tasks)) { 6761 /* 6762 * We don't want to steal all, otherwise we may be treated likewise, 6763 * which could at worst lead to a livelock crash. 6764 */ 6765 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) 6766 break; 6767 6768 p = list_first_entry(tasks, struct task_struct, se.group_node); 6769 6770 env->loop++; 6771 /* We've more or less seen every task there is, call it quits */ 6772 if (env->loop > env->loop_max) 6773 break; 6774 6775 /* take a breather every nr_migrate tasks */ 6776 if (env->loop > env->loop_break) { 6777 env->loop_break += sched_nr_migrate_break; 6778 env->flags |= LBF_NEED_BREAK; 6779 break; 6780 } 6781 6782 if (!can_migrate_task(p, env)) 6783 goto next; 6784 6785 load = task_h_load(p); 6786 6787 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) 6788 goto next; 6789 6790 if ((load / 2) > env->imbalance) 6791 goto next; 6792 6793 detach_task(p, env); 6794 list_add(&p->se.group_node, &env->tasks); 6795 6796 detached++; 6797 env->imbalance -= load; 6798 6799 #ifdef CONFIG_PREEMPT 6800 /* 6801 * NEWIDLE balancing is a source of latency, so preemptible 6802 * kernels will stop after the first task is detached to minimize 6803 * the critical section. 6804 */ 6805 if (env->idle == CPU_NEWLY_IDLE) 6806 break; 6807 #endif 6808 6809 /* 6810 * We only want to steal up to the prescribed amount of 6811 * weighted load. 6812 */ 6813 if (env->imbalance <= 0) 6814 break; 6815 6816 continue; 6817 next: 6818 list_move_tail(&p->se.group_node, tasks); 6819 } 6820 6821 /* 6822 * Right now, this is one of only two places we collect this stat 6823 * so we can safely collect detach_one_task() stats here rather 6824 * than inside detach_one_task(). 6825 */ 6826 schedstat_add(env->sd->lb_gained[env->idle], detached); 6827 6828 return detached; 6829 } 6830 6831 /* 6832 * attach_task() -- attach the task detached by detach_task() to its new rq. 6833 */ 6834 static void attach_task(struct rq *rq, struct task_struct *p) 6835 { 6836 lockdep_assert_held(&rq->lock); 6837 6838 BUG_ON(task_rq(p) != rq); 6839 activate_task(rq, p, ENQUEUE_NOCLOCK); 6840 p->on_rq = TASK_ON_RQ_QUEUED; 6841 check_preempt_curr(rq, p, 0); 6842 } 6843 6844 /* 6845 * attach_one_task() -- attaches the task returned from detach_one_task() to 6846 * its new rq. 6847 */ 6848 static void attach_one_task(struct rq *rq, struct task_struct *p) 6849 { 6850 struct rq_flags rf; 6851 6852 rq_lock(rq, &rf); 6853 update_rq_clock(rq); 6854 attach_task(rq, p); 6855 rq_unlock(rq, &rf); 6856 } 6857 6858 /* 6859 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their 6860 * new rq. 6861 */ 6862 static void attach_tasks(struct lb_env *env) 6863 { 6864 struct list_head *tasks = &env->tasks; 6865 struct task_struct *p; 6866 struct rq_flags rf; 6867 6868 rq_lock(env->dst_rq, &rf); 6869 update_rq_clock(env->dst_rq); 6870 6871 while (!list_empty(tasks)) { 6872 p = list_first_entry(tasks, struct task_struct, se.group_node); 6873 list_del_init(&p->se.group_node); 6874 6875 attach_task(env->dst_rq, p); 6876 } 6877 6878 rq_unlock(env->dst_rq, &rf); 6879 } 6880 6881 #ifdef CONFIG_FAIR_GROUP_SCHED 6882 6883 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) 6884 { 6885 if (cfs_rq->load.weight) 6886 return false; 6887 6888 if (cfs_rq->avg.load_sum) 6889 return false; 6890 6891 if (cfs_rq->avg.util_sum) 6892 return false; 6893 6894 if (cfs_rq->runnable_load_sum) 6895 return false; 6896 6897 return true; 6898 } 6899 6900 static void update_blocked_averages(int cpu) 6901 { 6902 struct rq *rq = cpu_rq(cpu); 6903 struct cfs_rq *cfs_rq, *pos; 6904 struct rq_flags rf; 6905 6906 rq_lock_irqsave(rq, &rf); 6907 update_rq_clock(rq); 6908 6909 /* 6910 * Iterates the task_group tree in a bottom up fashion, see 6911 * list_add_leaf_cfs_rq() for details. 6912 */ 6913 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { 6914 struct sched_entity *se; 6915 6916 /* throttled entities do not contribute to load */ 6917 if (throttled_hierarchy(cfs_rq)) 6918 continue; 6919 6920 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) 6921 update_tg_load_avg(cfs_rq, 0); 6922 6923 /* Propagate pending load changes to the parent, if any: */ 6924 se = cfs_rq->tg->se[cpu]; 6925 if (se && !skip_blocked_update(se)) 6926 update_load_avg(se, 0); 6927 6928 /* 6929 * There can be a lot of idle CPU cgroups. Don't let fully 6930 * decayed cfs_rqs linger on the list. 6931 */ 6932 if (cfs_rq_is_decayed(cfs_rq)) 6933 list_del_leaf_cfs_rq(cfs_rq); 6934 } 6935 rq_unlock_irqrestore(rq, &rf); 6936 } 6937 6938 /* 6939 * Compute the hierarchical load factor for cfs_rq and all its ascendants. 6940 * This needs to be done in a top-down fashion because the load of a child 6941 * group is a fraction of its parents load. 6942 */ 6943 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) 6944 { 6945 struct rq *rq = rq_of(cfs_rq); 6946 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; 6947 unsigned long now = jiffies; 6948 unsigned long load; 6949 6950 if (cfs_rq->last_h_load_update == now) 6951 return; 6952 6953 cfs_rq->h_load_next = NULL; 6954 for_each_sched_entity(se) { 6955 cfs_rq = cfs_rq_of(se); 6956 cfs_rq->h_load_next = se; 6957 if (cfs_rq->last_h_load_update == now) 6958 break; 6959 } 6960 6961 if (!se) { 6962 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); 6963 cfs_rq->last_h_load_update = now; 6964 } 6965 6966 while ((se = cfs_rq->h_load_next) != NULL) { 6967 load = cfs_rq->h_load; 6968 load = div64_ul(load * se->avg.load_avg, 6969 cfs_rq_load_avg(cfs_rq) + 1); 6970 cfs_rq = group_cfs_rq(se); 6971 cfs_rq->h_load = load; 6972 cfs_rq->last_h_load_update = now; 6973 } 6974 } 6975 6976 static unsigned long task_h_load(struct task_struct *p) 6977 { 6978 struct cfs_rq *cfs_rq = task_cfs_rq(p); 6979 6980 update_cfs_rq_h_load(cfs_rq); 6981 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, 6982 cfs_rq_load_avg(cfs_rq) + 1); 6983 } 6984 #else 6985 static inline void update_blocked_averages(int cpu) 6986 { 6987 struct rq *rq = cpu_rq(cpu); 6988 struct cfs_rq *cfs_rq = &rq->cfs; 6989 struct rq_flags rf; 6990 6991 rq_lock_irqsave(rq, &rf); 6992 update_rq_clock(rq); 6993 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); 6994 rq_unlock_irqrestore(rq, &rf); 6995 } 6996 6997 static unsigned long task_h_load(struct task_struct *p) 6998 { 6999 return p->se.avg.load_avg; 7000 } 7001 #endif 7002 7003 /********** Helpers for find_busiest_group ************************/ 7004 7005 enum group_type { 7006 group_other = 0, 7007 group_imbalanced, 7008 group_overloaded, 7009 }; 7010 7011 /* 7012 * sg_lb_stats - stats of a sched_group required for load_balancing 7013 */ 7014 struct sg_lb_stats { 7015 unsigned long avg_load; /*Avg load across the CPUs of the group */ 7016 unsigned long group_load; /* Total load over the CPUs of the group */ 7017 unsigned long sum_weighted_load; /* Weighted load of group's tasks */ 7018 unsigned long load_per_task; 7019 unsigned long group_capacity; 7020 unsigned long group_util; /* Total utilization of the group */ 7021 unsigned int sum_nr_running; /* Nr tasks running in the group */ 7022 unsigned int idle_cpus; 7023 unsigned int group_weight; 7024 enum group_type group_type; 7025 int group_no_capacity; 7026 #ifdef CONFIG_NUMA_BALANCING 7027 unsigned int nr_numa_running; 7028 unsigned int nr_preferred_running; 7029 #endif 7030 }; 7031 7032 /* 7033 * sd_lb_stats - Structure to store the statistics of a sched_domain 7034 * during load balancing. 7035 */ 7036 struct sd_lb_stats { 7037 struct sched_group *busiest; /* Busiest group in this sd */ 7038 struct sched_group *local; /* Local group in this sd */ 7039 unsigned long total_load; /* Total load of all groups in sd */ 7040 unsigned long total_capacity; /* Total capacity of all groups in sd */ 7041 unsigned long avg_load; /* Average load across all groups in sd */ 7042 7043 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 7044 struct sg_lb_stats local_stat; /* Statistics of the local group */ 7045 }; 7046 7047 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) 7048 { 7049 /* 7050 * Skimp on the clearing to avoid duplicate work. We can avoid clearing 7051 * local_stat because update_sg_lb_stats() does a full clear/assignment. 7052 * We must however clear busiest_stat::avg_load because 7053 * update_sd_pick_busiest() reads this before assignment. 7054 */ 7055 *sds = (struct sd_lb_stats){ 7056 .busiest = NULL, 7057 .local = NULL, 7058 .total_load = 0UL, 7059 .total_capacity = 0UL, 7060 .busiest_stat = { 7061 .avg_load = 0UL, 7062 .sum_nr_running = 0, 7063 .group_type = group_other, 7064 }, 7065 }; 7066 } 7067 7068 /** 7069 * get_sd_load_idx - Obtain the load index for a given sched domain. 7070 * @sd: The sched_domain whose load_idx is to be obtained. 7071 * @idle: The idle status of the CPU for whose sd load_idx is obtained. 7072 * 7073 * Return: The load index. 7074 */ 7075 static inline int get_sd_load_idx(struct sched_domain *sd, 7076 enum cpu_idle_type idle) 7077 { 7078 int load_idx; 7079 7080 switch (idle) { 7081 case CPU_NOT_IDLE: 7082 load_idx = sd->busy_idx; 7083 break; 7084 7085 case CPU_NEWLY_IDLE: 7086 load_idx = sd->newidle_idx; 7087 break; 7088 default: 7089 load_idx = sd->idle_idx; 7090 break; 7091 } 7092 7093 return load_idx; 7094 } 7095 7096 static unsigned long scale_rt_capacity(int cpu) 7097 { 7098 struct rq *rq = cpu_rq(cpu); 7099 u64 total, used, age_stamp, avg; 7100 s64 delta; 7101 7102 /* 7103 * Since we're reading these variables without serialization make sure 7104 * we read them once before doing sanity checks on them. 7105 */ 7106 age_stamp = READ_ONCE(rq->age_stamp); 7107 avg = READ_ONCE(rq->rt_avg); 7108 delta = __rq_clock_broken(rq) - age_stamp; 7109 7110 if (unlikely(delta < 0)) 7111 delta = 0; 7112 7113 total = sched_avg_period() + delta; 7114 7115 used = div_u64(avg, total); 7116 7117 if (likely(used < SCHED_CAPACITY_SCALE)) 7118 return SCHED_CAPACITY_SCALE - used; 7119 7120 return 1; 7121 } 7122 7123 static void update_cpu_capacity(struct sched_domain *sd, int cpu) 7124 { 7125 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu); 7126 struct sched_group *sdg = sd->groups; 7127 7128 cpu_rq(cpu)->cpu_capacity_orig = capacity; 7129 7130 capacity *= scale_rt_capacity(cpu); 7131 capacity >>= SCHED_CAPACITY_SHIFT; 7132 7133 if (!capacity) 7134 capacity = 1; 7135 7136 cpu_rq(cpu)->cpu_capacity = capacity; 7137 sdg->sgc->capacity = capacity; 7138 sdg->sgc->min_capacity = capacity; 7139 } 7140 7141 void update_group_capacity(struct sched_domain *sd, int cpu) 7142 { 7143 struct sched_domain *child = sd->child; 7144 struct sched_group *group, *sdg = sd->groups; 7145 unsigned long capacity, min_capacity; 7146 unsigned long interval; 7147 7148 interval = msecs_to_jiffies(sd->balance_interval); 7149 interval = clamp(interval, 1UL, max_load_balance_interval); 7150 sdg->sgc->next_update = jiffies + interval; 7151 7152 if (!child) { 7153 update_cpu_capacity(sd, cpu); 7154 return; 7155 } 7156 7157 capacity = 0; 7158 min_capacity = ULONG_MAX; 7159 7160 if (child->flags & SD_OVERLAP) { 7161 /* 7162 * SD_OVERLAP domains cannot assume that child groups 7163 * span the current group. 7164 */ 7165 7166 for_each_cpu(cpu, sched_group_span(sdg)) { 7167 struct sched_group_capacity *sgc; 7168 struct rq *rq = cpu_rq(cpu); 7169 7170 /* 7171 * build_sched_domains() -> init_sched_groups_capacity() 7172 * gets here before we've attached the domains to the 7173 * runqueues. 7174 * 7175 * Use capacity_of(), which is set irrespective of domains 7176 * in update_cpu_capacity(). 7177 * 7178 * This avoids capacity from being 0 and 7179 * causing divide-by-zero issues on boot. 7180 */ 7181 if (unlikely(!rq->sd)) { 7182 capacity += capacity_of(cpu); 7183 } else { 7184 sgc = rq->sd->groups->sgc; 7185 capacity += sgc->capacity; 7186 } 7187 7188 min_capacity = min(capacity, min_capacity); 7189 } 7190 } else { 7191 /* 7192 * !SD_OVERLAP domains can assume that child groups 7193 * span the current group. 7194 */ 7195 7196 group = child->groups; 7197 do { 7198 struct sched_group_capacity *sgc = group->sgc; 7199 7200 capacity += sgc->capacity; 7201 min_capacity = min(sgc->min_capacity, min_capacity); 7202 group = group->next; 7203 } while (group != child->groups); 7204 } 7205 7206 sdg->sgc->capacity = capacity; 7207 sdg->sgc->min_capacity = min_capacity; 7208 } 7209 7210 /* 7211 * Check whether the capacity of the rq has been noticeably reduced by side 7212 * activity. The imbalance_pct is used for the threshold. 7213 * Return true is the capacity is reduced 7214 */ 7215 static inline int 7216 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) 7217 { 7218 return ((rq->cpu_capacity * sd->imbalance_pct) < 7219 (rq->cpu_capacity_orig * 100)); 7220 } 7221 7222 /* 7223 * Group imbalance indicates (and tries to solve) the problem where balancing 7224 * groups is inadequate due to ->cpus_allowed constraints. 7225 * 7226 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a 7227 * cpumask covering 1 cpu of the first group and 3 cpus of the second group. 7228 * Something like: 7229 * 7230 * { 0 1 2 3 } { 4 5 6 7 } 7231 * * * * * 7232 * 7233 * If we were to balance group-wise we'd place two tasks in the first group and 7234 * two tasks in the second group. Clearly this is undesired as it will overload 7235 * cpu 3 and leave one of the cpus in the second group unused. 7236 * 7237 * The current solution to this issue is detecting the skew in the first group 7238 * by noticing the lower domain failed to reach balance and had difficulty 7239 * moving tasks due to affinity constraints. 7240 * 7241 * When this is so detected; this group becomes a candidate for busiest; see 7242 * update_sd_pick_busiest(). And calculate_imbalance() and 7243 * find_busiest_group() avoid some of the usual balance conditions to allow it 7244 * to create an effective group imbalance. 7245 * 7246 * This is a somewhat tricky proposition since the next run might not find the 7247 * group imbalance and decide the groups need to be balanced again. A most 7248 * subtle and fragile situation. 7249 */ 7250 7251 static inline int sg_imbalanced(struct sched_group *group) 7252 { 7253 return group->sgc->imbalance; 7254 } 7255 7256 /* 7257 * group_has_capacity returns true if the group has spare capacity that could 7258 * be used by some tasks. 7259 * We consider that a group has spare capacity if the * number of task is 7260 * smaller than the number of CPUs or if the utilization is lower than the 7261 * available capacity for CFS tasks. 7262 * For the latter, we use a threshold to stabilize the state, to take into 7263 * account the variance of the tasks' load and to return true if the available 7264 * capacity in meaningful for the load balancer. 7265 * As an example, an available capacity of 1% can appear but it doesn't make 7266 * any benefit for the load balance. 7267 */ 7268 static inline bool 7269 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) 7270 { 7271 if (sgs->sum_nr_running < sgs->group_weight) 7272 return true; 7273 7274 if ((sgs->group_capacity * 100) > 7275 (sgs->group_util * env->sd->imbalance_pct)) 7276 return true; 7277 7278 return false; 7279 } 7280 7281 /* 7282 * group_is_overloaded returns true if the group has more tasks than it can 7283 * handle. 7284 * group_is_overloaded is not equals to !group_has_capacity because a group 7285 * with the exact right number of tasks, has no more spare capacity but is not 7286 * overloaded so both group_has_capacity and group_is_overloaded return 7287 * false. 7288 */ 7289 static inline bool 7290 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) 7291 { 7292 if (sgs->sum_nr_running <= sgs->group_weight) 7293 return false; 7294 7295 if ((sgs->group_capacity * 100) < 7296 (sgs->group_util * env->sd->imbalance_pct)) 7297 return true; 7298 7299 return false; 7300 } 7301 7302 /* 7303 * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller 7304 * per-CPU capacity than sched_group ref. 7305 */ 7306 static inline bool 7307 group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref) 7308 { 7309 return sg->sgc->min_capacity * capacity_margin < 7310 ref->sgc->min_capacity * 1024; 7311 } 7312 7313 static inline enum 7314 group_type group_classify(struct sched_group *group, 7315 struct sg_lb_stats *sgs) 7316 { 7317 if (sgs->group_no_capacity) 7318 return group_overloaded; 7319 7320 if (sg_imbalanced(group)) 7321 return group_imbalanced; 7322 7323 return group_other; 7324 } 7325 7326 /** 7327 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 7328 * @env: The load balancing environment. 7329 * @group: sched_group whose statistics are to be updated. 7330 * @load_idx: Load index of sched_domain of this_cpu for load calc. 7331 * @local_group: Does group contain this_cpu. 7332 * @sgs: variable to hold the statistics for this group. 7333 * @overload: Indicate more than one runnable task for any CPU. 7334 */ 7335 static inline void update_sg_lb_stats(struct lb_env *env, 7336 struct sched_group *group, int load_idx, 7337 int local_group, struct sg_lb_stats *sgs, 7338 bool *overload) 7339 { 7340 unsigned long load; 7341 int i, nr_running; 7342 7343 memset(sgs, 0, sizeof(*sgs)); 7344 7345 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 7346 struct rq *rq = cpu_rq(i); 7347 7348 /* Bias balancing toward cpus of our domain */ 7349 if (local_group) 7350 load = target_load(i, load_idx); 7351 else 7352 load = source_load(i, load_idx); 7353 7354 sgs->group_load += load; 7355 sgs->group_util += cpu_util(i); 7356 sgs->sum_nr_running += rq->cfs.h_nr_running; 7357 7358 nr_running = rq->nr_running; 7359 if (nr_running > 1) 7360 *overload = true; 7361 7362 #ifdef CONFIG_NUMA_BALANCING 7363 sgs->nr_numa_running += rq->nr_numa_running; 7364 sgs->nr_preferred_running += rq->nr_preferred_running; 7365 #endif 7366 sgs->sum_weighted_load += weighted_cpuload(i); 7367 /* 7368 * No need to call idle_cpu() if nr_running is not 0 7369 */ 7370 if (!nr_running && idle_cpu(i)) 7371 sgs->idle_cpus++; 7372 } 7373 7374 /* Adjust by relative CPU capacity of the group */ 7375 sgs->group_capacity = group->sgc->capacity; 7376 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; 7377 7378 if (sgs->sum_nr_running) 7379 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; 7380 7381 sgs->group_weight = group->group_weight; 7382 7383 sgs->group_no_capacity = group_is_overloaded(env, sgs); 7384 sgs->group_type = group_classify(group, sgs); 7385 } 7386 7387 /** 7388 * update_sd_pick_busiest - return 1 on busiest group 7389 * @env: The load balancing environment. 7390 * @sds: sched_domain statistics 7391 * @sg: sched_group candidate to be checked for being the busiest 7392 * @sgs: sched_group statistics 7393 * 7394 * Determine if @sg is a busier group than the previously selected 7395 * busiest group. 7396 * 7397 * Return: %true if @sg is a busier group than the previously selected 7398 * busiest group. %false otherwise. 7399 */ 7400 static bool update_sd_pick_busiest(struct lb_env *env, 7401 struct sd_lb_stats *sds, 7402 struct sched_group *sg, 7403 struct sg_lb_stats *sgs) 7404 { 7405 struct sg_lb_stats *busiest = &sds->busiest_stat; 7406 7407 if (sgs->group_type > busiest->group_type) 7408 return true; 7409 7410 if (sgs->group_type < busiest->group_type) 7411 return false; 7412 7413 if (sgs->avg_load <= busiest->avg_load) 7414 return false; 7415 7416 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) 7417 goto asym_packing; 7418 7419 /* 7420 * Candidate sg has no more than one task per CPU and 7421 * has higher per-CPU capacity. Migrating tasks to less 7422 * capable CPUs may harm throughput. Maximize throughput, 7423 * power/energy consequences are not considered. 7424 */ 7425 if (sgs->sum_nr_running <= sgs->group_weight && 7426 group_smaller_cpu_capacity(sds->local, sg)) 7427 return false; 7428 7429 asym_packing: 7430 /* This is the busiest node in its class. */ 7431 if (!(env->sd->flags & SD_ASYM_PACKING)) 7432 return true; 7433 7434 /* No ASYM_PACKING if target cpu is already busy */ 7435 if (env->idle == CPU_NOT_IDLE) 7436 return true; 7437 /* 7438 * ASYM_PACKING needs to move all the work to the highest 7439 * prority CPUs in the group, therefore mark all groups 7440 * of lower priority than ourself as busy. 7441 */ 7442 if (sgs->sum_nr_running && 7443 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { 7444 if (!sds->busiest) 7445 return true; 7446 7447 /* Prefer to move from lowest priority cpu's work */ 7448 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, 7449 sg->asym_prefer_cpu)) 7450 return true; 7451 } 7452 7453 return false; 7454 } 7455 7456 #ifdef CONFIG_NUMA_BALANCING 7457 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 7458 { 7459 if (sgs->sum_nr_running > sgs->nr_numa_running) 7460 return regular; 7461 if (sgs->sum_nr_running > sgs->nr_preferred_running) 7462 return remote; 7463 return all; 7464 } 7465 7466 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 7467 { 7468 if (rq->nr_running > rq->nr_numa_running) 7469 return regular; 7470 if (rq->nr_running > rq->nr_preferred_running) 7471 return remote; 7472 return all; 7473 } 7474 #else 7475 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) 7476 { 7477 return all; 7478 } 7479 7480 static inline enum fbq_type fbq_classify_rq(struct rq *rq) 7481 { 7482 return regular; 7483 } 7484 #endif /* CONFIG_NUMA_BALANCING */ 7485 7486 /** 7487 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 7488 * @env: The load balancing environment. 7489 * @sds: variable to hold the statistics for this sched_domain. 7490 */ 7491 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 7492 { 7493 struct sched_domain *child = env->sd->child; 7494 struct sched_group *sg = env->sd->groups; 7495 struct sg_lb_stats *local = &sds->local_stat; 7496 struct sg_lb_stats tmp_sgs; 7497 int load_idx, prefer_sibling = 0; 7498 bool overload = false; 7499 7500 if (child && child->flags & SD_PREFER_SIBLING) 7501 prefer_sibling = 1; 7502 7503 load_idx = get_sd_load_idx(env->sd, env->idle); 7504 7505 do { 7506 struct sg_lb_stats *sgs = &tmp_sgs; 7507 int local_group; 7508 7509 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); 7510 if (local_group) { 7511 sds->local = sg; 7512 sgs = local; 7513 7514 if (env->idle != CPU_NEWLY_IDLE || 7515 time_after_eq(jiffies, sg->sgc->next_update)) 7516 update_group_capacity(env->sd, env->dst_cpu); 7517 } 7518 7519 update_sg_lb_stats(env, sg, load_idx, local_group, sgs, 7520 &overload); 7521 7522 if (local_group) 7523 goto next_group; 7524 7525 /* 7526 * In case the child domain prefers tasks go to siblings 7527 * first, lower the sg capacity so that we'll try 7528 * and move all the excess tasks away. We lower the capacity 7529 * of a group only if the local group has the capacity to fit 7530 * these excess tasks. The extra check prevents the case where 7531 * you always pull from the heaviest group when it is already 7532 * under-utilized (possible with a large weight task outweighs 7533 * the tasks on the system). 7534 */ 7535 if (prefer_sibling && sds->local && 7536 group_has_capacity(env, local) && 7537 (sgs->sum_nr_running > local->sum_nr_running + 1)) { 7538 sgs->group_no_capacity = 1; 7539 sgs->group_type = group_classify(sg, sgs); 7540 } 7541 7542 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 7543 sds->busiest = sg; 7544 sds->busiest_stat = *sgs; 7545 } 7546 7547 next_group: 7548 /* Now, start updating sd_lb_stats */ 7549 sds->total_load += sgs->group_load; 7550 sds->total_capacity += sgs->group_capacity; 7551 7552 sg = sg->next; 7553 } while (sg != env->sd->groups); 7554 7555 if (env->sd->flags & SD_NUMA) 7556 env->fbq_type = fbq_classify_group(&sds->busiest_stat); 7557 7558 if (!env->sd->parent) { 7559 /* update overload indicator if we are at root domain */ 7560 if (env->dst_rq->rd->overload != overload) 7561 env->dst_rq->rd->overload = overload; 7562 } 7563 7564 } 7565 7566 /** 7567 * check_asym_packing - Check to see if the group is packed into the 7568 * sched domain. 7569 * 7570 * This is primarily intended to used at the sibling level. Some 7571 * cores like POWER7 prefer to use lower numbered SMT threads. In the 7572 * case of POWER7, it can move to lower SMT modes only when higher 7573 * threads are idle. When in lower SMT modes, the threads will 7574 * perform better since they share less core resources. Hence when we 7575 * have idle threads, we want them to be the higher ones. 7576 * 7577 * This packing function is run on idle threads. It checks to see if 7578 * the busiest CPU in this domain (core in the P7 case) has a higher 7579 * CPU number than the packing function is being run on. Here we are 7580 * assuming lower CPU number will be equivalent to lower a SMT thread 7581 * number. 7582 * 7583 * Return: 1 when packing is required and a task should be moved to 7584 * this CPU. The amount of the imbalance is returned in *imbalance. 7585 * 7586 * @env: The load balancing environment. 7587 * @sds: Statistics of the sched_domain which is to be packed 7588 */ 7589 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) 7590 { 7591 int busiest_cpu; 7592 7593 if (!(env->sd->flags & SD_ASYM_PACKING)) 7594 return 0; 7595 7596 if (env->idle == CPU_NOT_IDLE) 7597 return 0; 7598 7599 if (!sds->busiest) 7600 return 0; 7601 7602 busiest_cpu = sds->busiest->asym_prefer_cpu; 7603 if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) 7604 return 0; 7605 7606 env->imbalance = DIV_ROUND_CLOSEST( 7607 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, 7608 SCHED_CAPACITY_SCALE); 7609 7610 return 1; 7611 } 7612 7613 /** 7614 * fix_small_imbalance - Calculate the minor imbalance that exists 7615 * amongst the groups of a sched_domain, during 7616 * load balancing. 7617 * @env: The load balancing environment. 7618 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 7619 */ 7620 static inline 7621 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 7622 { 7623 unsigned long tmp, capa_now = 0, capa_move = 0; 7624 unsigned int imbn = 2; 7625 unsigned long scaled_busy_load_per_task; 7626 struct sg_lb_stats *local, *busiest; 7627 7628 local = &sds->local_stat; 7629 busiest = &sds->busiest_stat; 7630 7631 if (!local->sum_nr_running) 7632 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); 7633 else if (busiest->load_per_task > local->load_per_task) 7634 imbn = 1; 7635 7636 scaled_busy_load_per_task = 7637 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / 7638 busiest->group_capacity; 7639 7640 if (busiest->avg_load + scaled_busy_load_per_task >= 7641 local->avg_load + (scaled_busy_load_per_task * imbn)) { 7642 env->imbalance = busiest->load_per_task; 7643 return; 7644 } 7645 7646 /* 7647 * OK, we don't have enough imbalance to justify moving tasks, 7648 * however we may be able to increase total CPU capacity used by 7649 * moving them. 7650 */ 7651 7652 capa_now += busiest->group_capacity * 7653 min(busiest->load_per_task, busiest->avg_load); 7654 capa_now += local->group_capacity * 7655 min(local->load_per_task, local->avg_load); 7656 capa_now /= SCHED_CAPACITY_SCALE; 7657 7658 /* Amount of load we'd subtract */ 7659 if (busiest->avg_load > scaled_busy_load_per_task) { 7660 capa_move += busiest->group_capacity * 7661 min(busiest->load_per_task, 7662 busiest->avg_load - scaled_busy_load_per_task); 7663 } 7664 7665 /* Amount of load we'd add */ 7666 if (busiest->avg_load * busiest->group_capacity < 7667 busiest->load_per_task * SCHED_CAPACITY_SCALE) { 7668 tmp = (busiest->avg_load * busiest->group_capacity) / 7669 local->group_capacity; 7670 } else { 7671 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / 7672 local->group_capacity; 7673 } 7674 capa_move += local->group_capacity * 7675 min(local->load_per_task, local->avg_load + tmp); 7676 capa_move /= SCHED_CAPACITY_SCALE; 7677 7678 /* Move if we gain throughput */ 7679 if (capa_move > capa_now) 7680 env->imbalance = busiest->load_per_task; 7681 } 7682 7683 /** 7684 * calculate_imbalance - Calculate the amount of imbalance present within the 7685 * groups of a given sched_domain during load balance. 7686 * @env: load balance environment 7687 * @sds: statistics of the sched_domain whose imbalance is to be calculated. 7688 */ 7689 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 7690 { 7691 unsigned long max_pull, load_above_capacity = ~0UL; 7692 struct sg_lb_stats *local, *busiest; 7693 7694 local = &sds->local_stat; 7695 busiest = &sds->busiest_stat; 7696 7697 if (busiest->group_type == group_imbalanced) { 7698 /* 7699 * In the group_imb case we cannot rely on group-wide averages 7700 * to ensure cpu-load equilibrium, look at wider averages. XXX 7701 */ 7702 busiest->load_per_task = 7703 min(busiest->load_per_task, sds->avg_load); 7704 } 7705 7706 /* 7707 * Avg load of busiest sg can be less and avg load of local sg can 7708 * be greater than avg load across all sgs of sd because avg load 7709 * factors in sg capacity and sgs with smaller group_type are 7710 * skipped when updating the busiest sg: 7711 */ 7712 if (busiest->avg_load <= sds->avg_load || 7713 local->avg_load >= sds->avg_load) { 7714 env->imbalance = 0; 7715 return fix_small_imbalance(env, sds); 7716 } 7717 7718 /* 7719 * If there aren't any idle cpus, avoid creating some. 7720 */ 7721 if (busiest->group_type == group_overloaded && 7722 local->group_type == group_overloaded) { 7723 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; 7724 if (load_above_capacity > busiest->group_capacity) { 7725 load_above_capacity -= busiest->group_capacity; 7726 load_above_capacity *= scale_load_down(NICE_0_LOAD); 7727 load_above_capacity /= busiest->group_capacity; 7728 } else 7729 load_above_capacity = ~0UL; 7730 } 7731 7732 /* 7733 * We're trying to get all the cpus to the average_load, so we don't 7734 * want to push ourselves above the average load, nor do we wish to 7735 * reduce the max loaded cpu below the average load. At the same time, 7736 * we also don't want to reduce the group load below the group 7737 * capacity. Thus we look for the minimum possible imbalance. 7738 */ 7739 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); 7740 7741 /* How much load to actually move to equalise the imbalance */ 7742 env->imbalance = min( 7743 max_pull * busiest->group_capacity, 7744 (sds->avg_load - local->avg_load) * local->group_capacity 7745 ) / SCHED_CAPACITY_SCALE; 7746 7747 /* 7748 * if *imbalance is less than the average load per runnable task 7749 * there is no guarantee that any tasks will be moved so we'll have 7750 * a think about bumping its value to force at least one task to be 7751 * moved 7752 */ 7753 if (env->imbalance < busiest->load_per_task) 7754 return fix_small_imbalance(env, sds); 7755 } 7756 7757 /******* find_busiest_group() helpers end here *********************/ 7758 7759 /** 7760 * find_busiest_group - Returns the busiest group within the sched_domain 7761 * if there is an imbalance. 7762 * 7763 * Also calculates the amount of weighted load which should be moved 7764 * to restore balance. 7765 * 7766 * @env: The load balancing environment. 7767 * 7768 * Return: - The busiest group if imbalance exists. 7769 */ 7770 static struct sched_group *find_busiest_group(struct lb_env *env) 7771 { 7772 struct sg_lb_stats *local, *busiest; 7773 struct sd_lb_stats sds; 7774 7775 init_sd_lb_stats(&sds); 7776 7777 /* 7778 * Compute the various statistics relavent for load balancing at 7779 * this level. 7780 */ 7781 update_sd_lb_stats(env, &sds); 7782 local = &sds.local_stat; 7783 busiest = &sds.busiest_stat; 7784 7785 /* ASYM feature bypasses nice load balance check */ 7786 if (check_asym_packing(env, &sds)) 7787 return sds.busiest; 7788 7789 /* There is no busy sibling group to pull tasks from */ 7790 if (!sds.busiest || busiest->sum_nr_running == 0) 7791 goto out_balanced; 7792 7793 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) 7794 / sds.total_capacity; 7795 7796 /* 7797 * If the busiest group is imbalanced the below checks don't 7798 * work because they assume all things are equal, which typically 7799 * isn't true due to cpus_allowed constraints and the like. 7800 */ 7801 if (busiest->group_type == group_imbalanced) 7802 goto force_balance; 7803 7804 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ 7805 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) && 7806 busiest->group_no_capacity) 7807 goto force_balance; 7808 7809 /* 7810 * If the local group is busier than the selected busiest group 7811 * don't try and pull any tasks. 7812 */ 7813 if (local->avg_load >= busiest->avg_load) 7814 goto out_balanced; 7815 7816 /* 7817 * Don't pull any tasks if this group is already above the domain 7818 * average load. 7819 */ 7820 if (local->avg_load >= sds.avg_load) 7821 goto out_balanced; 7822 7823 if (env->idle == CPU_IDLE) { 7824 /* 7825 * This cpu is idle. If the busiest group is not overloaded 7826 * and there is no imbalance between this and busiest group 7827 * wrt idle cpus, it is balanced. The imbalance becomes 7828 * significant if the diff is greater than 1 otherwise we 7829 * might end up to just move the imbalance on another group 7830 */ 7831 if ((busiest->group_type != group_overloaded) && 7832 (local->idle_cpus <= (busiest->idle_cpus + 1))) 7833 goto out_balanced; 7834 } else { 7835 /* 7836 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use 7837 * imbalance_pct to be conservative. 7838 */ 7839 if (100 * busiest->avg_load <= 7840 env->sd->imbalance_pct * local->avg_load) 7841 goto out_balanced; 7842 } 7843 7844 force_balance: 7845 /* Looks like there is an imbalance. Compute it */ 7846 calculate_imbalance(env, &sds); 7847 return sds.busiest; 7848 7849 out_balanced: 7850 env->imbalance = 0; 7851 return NULL; 7852 } 7853 7854 /* 7855 * find_busiest_queue - find the busiest runqueue among the cpus in group. 7856 */ 7857 static struct rq *find_busiest_queue(struct lb_env *env, 7858 struct sched_group *group) 7859 { 7860 struct rq *busiest = NULL, *rq; 7861 unsigned long busiest_load = 0, busiest_capacity = 1; 7862 int i; 7863 7864 for_each_cpu_and(i, sched_group_span(group), env->cpus) { 7865 unsigned long capacity, wl; 7866 enum fbq_type rt; 7867 7868 rq = cpu_rq(i); 7869 rt = fbq_classify_rq(rq); 7870 7871 /* 7872 * We classify groups/runqueues into three groups: 7873 * - regular: there are !numa tasks 7874 * - remote: there are numa tasks that run on the 'wrong' node 7875 * - all: there is no distinction 7876 * 7877 * In order to avoid migrating ideally placed numa tasks, 7878 * ignore those when there's better options. 7879 * 7880 * If we ignore the actual busiest queue to migrate another 7881 * task, the next balance pass can still reduce the busiest 7882 * queue by moving tasks around inside the node. 7883 * 7884 * If we cannot move enough load due to this classification 7885 * the next pass will adjust the group classification and 7886 * allow migration of more tasks. 7887 * 7888 * Both cases only affect the total convergence complexity. 7889 */ 7890 if (rt > env->fbq_type) 7891 continue; 7892 7893 capacity = capacity_of(i); 7894 7895 wl = weighted_cpuload(i); 7896 7897 /* 7898 * When comparing with imbalance, use weighted_cpuload() 7899 * which is not scaled with the cpu capacity. 7900 */ 7901 7902 if (rq->nr_running == 1 && wl > env->imbalance && 7903 !check_cpu_capacity(rq, env->sd)) 7904 continue; 7905 7906 /* 7907 * For the load comparisons with the other cpu's, consider 7908 * the weighted_cpuload() scaled with the cpu capacity, so 7909 * that the load can be moved away from the cpu that is 7910 * potentially running at a lower capacity. 7911 * 7912 * Thus we're looking for max(wl_i / capacity_i), crosswise 7913 * multiplication to rid ourselves of the division works out 7914 * to: wl_i * capacity_j > wl_j * capacity_i; where j is 7915 * our previous maximum. 7916 */ 7917 if (wl * busiest_capacity > busiest_load * capacity) { 7918 busiest_load = wl; 7919 busiest_capacity = capacity; 7920 busiest = rq; 7921 } 7922 } 7923 7924 return busiest; 7925 } 7926 7927 /* 7928 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but 7929 * so long as it is large enough. 7930 */ 7931 #define MAX_PINNED_INTERVAL 512 7932 7933 static int need_active_balance(struct lb_env *env) 7934 { 7935 struct sched_domain *sd = env->sd; 7936 7937 if (env->idle == CPU_NEWLY_IDLE) { 7938 7939 /* 7940 * ASYM_PACKING needs to force migrate tasks from busy but 7941 * lower priority CPUs in order to pack all tasks in the 7942 * highest priority CPUs. 7943 */ 7944 if ((sd->flags & SD_ASYM_PACKING) && 7945 sched_asym_prefer(env->dst_cpu, env->src_cpu)) 7946 return 1; 7947 } 7948 7949 /* 7950 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. 7951 * It's worth migrating the task if the src_cpu's capacity is reduced 7952 * because of other sched_class or IRQs if more capacity stays 7953 * available on dst_cpu. 7954 */ 7955 if ((env->idle != CPU_NOT_IDLE) && 7956 (env->src_rq->cfs.h_nr_running == 1)) { 7957 if ((check_cpu_capacity(env->src_rq, sd)) && 7958 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) 7959 return 1; 7960 } 7961 7962 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); 7963 } 7964 7965 static int active_load_balance_cpu_stop(void *data); 7966 7967 static int should_we_balance(struct lb_env *env) 7968 { 7969 struct sched_group *sg = env->sd->groups; 7970 int cpu, balance_cpu = -1; 7971 7972 /* 7973 * In the newly idle case, we will allow all the cpu's 7974 * to do the newly idle load balance. 7975 */ 7976 if (env->idle == CPU_NEWLY_IDLE) 7977 return 1; 7978 7979 /* Try to find first idle cpu */ 7980 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { 7981 if (!idle_cpu(cpu)) 7982 continue; 7983 7984 balance_cpu = cpu; 7985 break; 7986 } 7987 7988 if (balance_cpu == -1) 7989 balance_cpu = group_balance_cpu(sg); 7990 7991 /* 7992 * First idle cpu or the first cpu(busiest) in this sched group 7993 * is eligible for doing load balancing at this and above domains. 7994 */ 7995 return balance_cpu == env->dst_cpu; 7996 } 7997 7998 /* 7999 * Check this_cpu to ensure it is balanced within domain. Attempt to move 8000 * tasks if there is an imbalance. 8001 */ 8002 static int load_balance(int this_cpu, struct rq *this_rq, 8003 struct sched_domain *sd, enum cpu_idle_type idle, 8004 int *continue_balancing) 8005 { 8006 int ld_moved, cur_ld_moved, active_balance = 0; 8007 struct sched_domain *sd_parent = sd->parent; 8008 struct sched_group *group; 8009 struct rq *busiest; 8010 struct rq_flags rf; 8011 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); 8012 8013 struct lb_env env = { 8014 .sd = sd, 8015 .dst_cpu = this_cpu, 8016 .dst_rq = this_rq, 8017 .dst_grpmask = sched_group_span(sd->groups), 8018 .idle = idle, 8019 .loop_break = sched_nr_migrate_break, 8020 .cpus = cpus, 8021 .fbq_type = all, 8022 .tasks = LIST_HEAD_INIT(env.tasks), 8023 }; 8024 8025 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); 8026 8027 schedstat_inc(sd->lb_count[idle]); 8028 8029 redo: 8030 if (!should_we_balance(&env)) { 8031 *continue_balancing = 0; 8032 goto out_balanced; 8033 } 8034 8035 group = find_busiest_group(&env); 8036 if (!group) { 8037 schedstat_inc(sd->lb_nobusyg[idle]); 8038 goto out_balanced; 8039 } 8040 8041 busiest = find_busiest_queue(&env, group); 8042 if (!busiest) { 8043 schedstat_inc(sd->lb_nobusyq[idle]); 8044 goto out_balanced; 8045 } 8046 8047 BUG_ON(busiest == env.dst_rq); 8048 8049 schedstat_add(sd->lb_imbalance[idle], env.imbalance); 8050 8051 env.src_cpu = busiest->cpu; 8052 env.src_rq = busiest; 8053 8054 ld_moved = 0; 8055 if (busiest->nr_running > 1) { 8056 /* 8057 * Attempt to move tasks. If find_busiest_group has found 8058 * an imbalance but busiest->nr_running <= 1, the group is 8059 * still unbalanced. ld_moved simply stays zero, so it is 8060 * correctly treated as an imbalance. 8061 */ 8062 env.flags |= LBF_ALL_PINNED; 8063 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); 8064 8065 more_balance: 8066 rq_lock_irqsave(busiest, &rf); 8067 update_rq_clock(busiest); 8068 8069 /* 8070 * cur_ld_moved - load moved in current iteration 8071 * ld_moved - cumulative load moved across iterations 8072 */ 8073 cur_ld_moved = detach_tasks(&env); 8074 8075 /* 8076 * We've detached some tasks from busiest_rq. Every 8077 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely 8078 * unlock busiest->lock, and we are able to be sure 8079 * that nobody can manipulate the tasks in parallel. 8080 * See task_rq_lock() family for the details. 8081 */ 8082 8083 rq_unlock(busiest, &rf); 8084 8085 if (cur_ld_moved) { 8086 attach_tasks(&env); 8087 ld_moved += cur_ld_moved; 8088 } 8089 8090 local_irq_restore(rf.flags); 8091 8092 if (env.flags & LBF_NEED_BREAK) { 8093 env.flags &= ~LBF_NEED_BREAK; 8094 goto more_balance; 8095 } 8096 8097 /* 8098 * Revisit (affine) tasks on src_cpu that couldn't be moved to 8099 * us and move them to an alternate dst_cpu in our sched_group 8100 * where they can run. The upper limit on how many times we 8101 * iterate on same src_cpu is dependent on number of cpus in our 8102 * sched_group. 8103 * 8104 * This changes load balance semantics a bit on who can move 8105 * load to a given_cpu. In addition to the given_cpu itself 8106 * (or a ilb_cpu acting on its behalf where given_cpu is 8107 * nohz-idle), we now have balance_cpu in a position to move 8108 * load to given_cpu. In rare situations, this may cause 8109 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding 8110 * _independently_ and at _same_ time to move some load to 8111 * given_cpu) causing exceess load to be moved to given_cpu. 8112 * This however should not happen so much in practice and 8113 * moreover subsequent load balance cycles should correct the 8114 * excess load moved. 8115 */ 8116 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { 8117 8118 /* Prevent to re-select dst_cpu via env's cpus */ 8119 cpumask_clear_cpu(env.dst_cpu, env.cpus); 8120 8121 env.dst_rq = cpu_rq(env.new_dst_cpu); 8122 env.dst_cpu = env.new_dst_cpu; 8123 env.flags &= ~LBF_DST_PINNED; 8124 env.loop = 0; 8125 env.loop_break = sched_nr_migrate_break; 8126 8127 /* 8128 * Go back to "more_balance" rather than "redo" since we 8129 * need to continue with same src_cpu. 8130 */ 8131 goto more_balance; 8132 } 8133 8134 /* 8135 * We failed to reach balance because of affinity. 8136 */ 8137 if (sd_parent) { 8138 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 8139 8140 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) 8141 *group_imbalance = 1; 8142 } 8143 8144 /* All tasks on this runqueue were pinned by CPU affinity */ 8145 if (unlikely(env.flags & LBF_ALL_PINNED)) { 8146 cpumask_clear_cpu(cpu_of(busiest), cpus); 8147 /* 8148 * Attempting to continue load balancing at the current 8149 * sched_domain level only makes sense if there are 8150 * active CPUs remaining as possible busiest CPUs to 8151 * pull load from which are not contained within the 8152 * destination group that is receiving any migrated 8153 * load. 8154 */ 8155 if (!cpumask_subset(cpus, env.dst_grpmask)) { 8156 env.loop = 0; 8157 env.loop_break = sched_nr_migrate_break; 8158 goto redo; 8159 } 8160 goto out_all_pinned; 8161 } 8162 } 8163 8164 if (!ld_moved) { 8165 schedstat_inc(sd->lb_failed[idle]); 8166 /* 8167 * Increment the failure counter only on periodic balance. 8168 * We do not want newidle balance, which can be very 8169 * frequent, pollute the failure counter causing 8170 * excessive cache_hot migrations and active balances. 8171 */ 8172 if (idle != CPU_NEWLY_IDLE) 8173 sd->nr_balance_failed++; 8174 8175 if (need_active_balance(&env)) { 8176 unsigned long flags; 8177 8178 raw_spin_lock_irqsave(&busiest->lock, flags); 8179 8180 /* don't kick the active_load_balance_cpu_stop, 8181 * if the curr task on busiest cpu can't be 8182 * moved to this_cpu 8183 */ 8184 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { 8185 raw_spin_unlock_irqrestore(&busiest->lock, 8186 flags); 8187 env.flags |= LBF_ALL_PINNED; 8188 goto out_one_pinned; 8189 } 8190 8191 /* 8192 * ->active_balance synchronizes accesses to 8193 * ->active_balance_work. Once set, it's cleared 8194 * only after active load balance is finished. 8195 */ 8196 if (!busiest->active_balance) { 8197 busiest->active_balance = 1; 8198 busiest->push_cpu = this_cpu; 8199 active_balance = 1; 8200 } 8201 raw_spin_unlock_irqrestore(&busiest->lock, flags); 8202 8203 if (active_balance) { 8204 stop_one_cpu_nowait(cpu_of(busiest), 8205 active_load_balance_cpu_stop, busiest, 8206 &busiest->active_balance_work); 8207 } 8208 8209 /* We've kicked active balancing, force task migration. */ 8210 sd->nr_balance_failed = sd->cache_nice_tries+1; 8211 } 8212 } else 8213 sd->nr_balance_failed = 0; 8214 8215 if (likely(!active_balance)) { 8216 /* We were unbalanced, so reset the balancing interval */ 8217 sd->balance_interval = sd->min_interval; 8218 } else { 8219 /* 8220 * If we've begun active balancing, start to back off. This 8221 * case may not be covered by the all_pinned logic if there 8222 * is only 1 task on the busy runqueue (because we don't call 8223 * detach_tasks). 8224 */ 8225 if (sd->balance_interval < sd->max_interval) 8226 sd->balance_interval *= 2; 8227 } 8228 8229 goto out; 8230 8231 out_balanced: 8232 /* 8233 * We reach balance although we may have faced some affinity 8234 * constraints. Clear the imbalance flag if it was set. 8235 */ 8236 if (sd_parent) { 8237 int *group_imbalance = &sd_parent->groups->sgc->imbalance; 8238 8239 if (*group_imbalance) 8240 *group_imbalance = 0; 8241 } 8242 8243 out_all_pinned: 8244 /* 8245 * We reach balance because all tasks are pinned at this level so 8246 * we can't migrate them. Let the imbalance flag set so parent level 8247 * can try to migrate them. 8248 */ 8249 schedstat_inc(sd->lb_balanced[idle]); 8250 8251 sd->nr_balance_failed = 0; 8252 8253 out_one_pinned: 8254 /* tune up the balancing interval */ 8255 if (((env.flags & LBF_ALL_PINNED) && 8256 sd->balance_interval < MAX_PINNED_INTERVAL) || 8257 (sd->balance_interval < sd->max_interval)) 8258 sd->balance_interval *= 2; 8259 8260 ld_moved = 0; 8261 out: 8262 return ld_moved; 8263 } 8264 8265 static inline unsigned long 8266 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) 8267 { 8268 unsigned long interval = sd->balance_interval; 8269 8270 if (cpu_busy) 8271 interval *= sd->busy_factor; 8272 8273 /* scale ms to jiffies */ 8274 interval = msecs_to_jiffies(interval); 8275 interval = clamp(interval, 1UL, max_load_balance_interval); 8276 8277 return interval; 8278 } 8279 8280 static inline void 8281 update_next_balance(struct sched_domain *sd, unsigned long *next_balance) 8282 { 8283 unsigned long interval, next; 8284 8285 /* used by idle balance, so cpu_busy = 0 */ 8286 interval = get_sd_balance_interval(sd, 0); 8287 next = sd->last_balance + interval; 8288 8289 if (time_after(*next_balance, next)) 8290 *next_balance = next; 8291 } 8292 8293 /* 8294 * idle_balance is called by schedule() if this_cpu is about to become 8295 * idle. Attempts to pull tasks from other CPUs. 8296 */ 8297 static int idle_balance(struct rq *this_rq, struct rq_flags *rf) 8298 { 8299 unsigned long next_balance = jiffies + HZ; 8300 int this_cpu = this_rq->cpu; 8301 struct sched_domain *sd; 8302 int pulled_task = 0; 8303 u64 curr_cost = 0; 8304 8305 /* 8306 * We must set idle_stamp _before_ calling idle_balance(), such that we 8307 * measure the duration of idle_balance() as idle time. 8308 */ 8309 this_rq->idle_stamp = rq_clock(this_rq); 8310 8311 /* 8312 * This is OK, because current is on_cpu, which avoids it being picked 8313 * for load-balance and preemption/IRQs are still disabled avoiding 8314 * further scheduler activity on it and we're being very careful to 8315 * re-start the picking loop. 8316 */ 8317 rq_unpin_lock(this_rq, rf); 8318 8319 if (this_rq->avg_idle < sysctl_sched_migration_cost || 8320 !this_rq->rd->overload) { 8321 rcu_read_lock(); 8322 sd = rcu_dereference_check_sched_domain(this_rq->sd); 8323 if (sd) 8324 update_next_balance(sd, &next_balance); 8325 rcu_read_unlock(); 8326 8327 goto out; 8328 } 8329 8330 raw_spin_unlock(&this_rq->lock); 8331 8332 update_blocked_averages(this_cpu); 8333 rcu_read_lock(); 8334 for_each_domain(this_cpu, sd) { 8335 int continue_balancing = 1; 8336 u64 t0, domain_cost; 8337 8338 if (!(sd->flags & SD_LOAD_BALANCE)) 8339 continue; 8340 8341 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { 8342 update_next_balance(sd, &next_balance); 8343 break; 8344 } 8345 8346 if (sd->flags & SD_BALANCE_NEWIDLE) { 8347 t0 = sched_clock_cpu(this_cpu); 8348 8349 pulled_task = load_balance(this_cpu, this_rq, 8350 sd, CPU_NEWLY_IDLE, 8351 &continue_balancing); 8352 8353 domain_cost = sched_clock_cpu(this_cpu) - t0; 8354 if (domain_cost > sd->max_newidle_lb_cost) 8355 sd->max_newidle_lb_cost = domain_cost; 8356 8357 curr_cost += domain_cost; 8358 } 8359 8360 update_next_balance(sd, &next_balance); 8361 8362 /* 8363 * Stop searching for tasks to pull if there are 8364 * now runnable tasks on this rq. 8365 */ 8366 if (pulled_task || this_rq->nr_running > 0) 8367 break; 8368 } 8369 rcu_read_unlock(); 8370 8371 raw_spin_lock(&this_rq->lock); 8372 8373 if (curr_cost > this_rq->max_idle_balance_cost) 8374 this_rq->max_idle_balance_cost = curr_cost; 8375 8376 /* 8377 * While browsing the domains, we released the rq lock, a task could 8378 * have been enqueued in the meantime. Since we're not going idle, 8379 * pretend we pulled a task. 8380 */ 8381 if (this_rq->cfs.h_nr_running && !pulled_task) 8382 pulled_task = 1; 8383 8384 out: 8385 /* Move the next balance forward */ 8386 if (time_after(this_rq->next_balance, next_balance)) 8387 this_rq->next_balance = next_balance; 8388 8389 /* Is there a task of a high priority class? */ 8390 if (this_rq->nr_running != this_rq->cfs.h_nr_running) 8391 pulled_task = -1; 8392 8393 if (pulled_task) 8394 this_rq->idle_stamp = 0; 8395 8396 rq_repin_lock(this_rq, rf); 8397 8398 return pulled_task; 8399 } 8400 8401 /* 8402 * active_load_balance_cpu_stop is run by cpu stopper. It pushes 8403 * running tasks off the busiest CPU onto idle CPUs. It requires at 8404 * least 1 task to be running on each physical CPU where possible, and 8405 * avoids physical / logical imbalances. 8406 */ 8407 static int active_load_balance_cpu_stop(void *data) 8408 { 8409 struct rq *busiest_rq = data; 8410 int busiest_cpu = cpu_of(busiest_rq); 8411 int target_cpu = busiest_rq->push_cpu; 8412 struct rq *target_rq = cpu_rq(target_cpu); 8413 struct sched_domain *sd; 8414 struct task_struct *p = NULL; 8415 struct rq_flags rf; 8416 8417 rq_lock_irq(busiest_rq, &rf); 8418 8419 /* make sure the requested cpu hasn't gone down in the meantime */ 8420 if (unlikely(busiest_cpu != smp_processor_id() || 8421 !busiest_rq->active_balance)) 8422 goto out_unlock; 8423 8424 /* Is there any task to move? */ 8425 if (busiest_rq->nr_running <= 1) 8426 goto out_unlock; 8427 8428 /* 8429 * This condition is "impossible", if it occurs 8430 * we need to fix it. Originally reported by 8431 * Bjorn Helgaas on a 128-cpu setup. 8432 */ 8433 BUG_ON(busiest_rq == target_rq); 8434 8435 /* Search for an sd spanning us and the target CPU. */ 8436 rcu_read_lock(); 8437 for_each_domain(target_cpu, sd) { 8438 if ((sd->flags & SD_LOAD_BALANCE) && 8439 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) 8440 break; 8441 } 8442 8443 if (likely(sd)) { 8444 struct lb_env env = { 8445 .sd = sd, 8446 .dst_cpu = target_cpu, 8447 .dst_rq = target_rq, 8448 .src_cpu = busiest_rq->cpu, 8449 .src_rq = busiest_rq, 8450 .idle = CPU_IDLE, 8451 /* 8452 * can_migrate_task() doesn't need to compute new_dst_cpu 8453 * for active balancing. Since we have CPU_IDLE, but no 8454 * @dst_grpmask we need to make that test go away with lying 8455 * about DST_PINNED. 8456 */ 8457 .flags = LBF_DST_PINNED, 8458 }; 8459 8460 schedstat_inc(sd->alb_count); 8461 update_rq_clock(busiest_rq); 8462 8463 p = detach_one_task(&env); 8464 if (p) { 8465 schedstat_inc(sd->alb_pushed); 8466 /* Active balancing done, reset the failure counter. */ 8467 sd->nr_balance_failed = 0; 8468 } else { 8469 schedstat_inc(sd->alb_failed); 8470 } 8471 } 8472 rcu_read_unlock(); 8473 out_unlock: 8474 busiest_rq->active_balance = 0; 8475 rq_unlock(busiest_rq, &rf); 8476 8477 if (p) 8478 attach_one_task(target_rq, p); 8479 8480 local_irq_enable(); 8481 8482 return 0; 8483 } 8484 8485 static inline int on_null_domain(struct rq *rq) 8486 { 8487 return unlikely(!rcu_dereference_sched(rq->sd)); 8488 } 8489 8490 #ifdef CONFIG_NO_HZ_COMMON 8491 /* 8492 * idle load balancing details 8493 * - When one of the busy CPUs notice that there may be an idle rebalancing 8494 * needed, they will kick the idle load balancer, which then does idle 8495 * load balancing for all the idle CPUs. 8496 */ 8497 static struct { 8498 cpumask_var_t idle_cpus_mask; 8499 atomic_t nr_cpus; 8500 unsigned long next_balance; /* in jiffy units */ 8501 } nohz ____cacheline_aligned; 8502 8503 static inline int find_new_ilb(void) 8504 { 8505 int ilb = cpumask_first(nohz.idle_cpus_mask); 8506 8507 if (ilb < nr_cpu_ids && idle_cpu(ilb)) 8508 return ilb; 8509 8510 return nr_cpu_ids; 8511 } 8512 8513 /* 8514 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the 8515 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle 8516 * CPU (if there is one). 8517 */ 8518 static void nohz_balancer_kick(void) 8519 { 8520 int ilb_cpu; 8521 8522 nohz.next_balance++; 8523 8524 ilb_cpu = find_new_ilb(); 8525 8526 if (ilb_cpu >= nr_cpu_ids) 8527 return; 8528 8529 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) 8530 return; 8531 /* 8532 * Use smp_send_reschedule() instead of resched_cpu(). 8533 * This way we generate a sched IPI on the target cpu which 8534 * is idle. And the softirq performing nohz idle load balance 8535 * will be run before returning from the IPI. 8536 */ 8537 smp_send_reschedule(ilb_cpu); 8538 return; 8539 } 8540 8541 void nohz_balance_exit_idle(unsigned int cpu) 8542 { 8543 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 8544 /* 8545 * Completely isolated CPUs don't ever set, so we must test. 8546 */ 8547 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { 8548 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); 8549 atomic_dec(&nohz.nr_cpus); 8550 } 8551 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 8552 } 8553 } 8554 8555 static inline void set_cpu_sd_state_busy(void) 8556 { 8557 struct sched_domain *sd; 8558 int cpu = smp_processor_id(); 8559 8560 rcu_read_lock(); 8561 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 8562 8563 if (!sd || !sd->nohz_idle) 8564 goto unlock; 8565 sd->nohz_idle = 0; 8566 8567 atomic_inc(&sd->shared->nr_busy_cpus); 8568 unlock: 8569 rcu_read_unlock(); 8570 } 8571 8572 void set_cpu_sd_state_idle(void) 8573 { 8574 struct sched_domain *sd; 8575 int cpu = smp_processor_id(); 8576 8577 rcu_read_lock(); 8578 sd = rcu_dereference(per_cpu(sd_llc, cpu)); 8579 8580 if (!sd || sd->nohz_idle) 8581 goto unlock; 8582 sd->nohz_idle = 1; 8583 8584 atomic_dec(&sd->shared->nr_busy_cpus); 8585 unlock: 8586 rcu_read_unlock(); 8587 } 8588 8589 /* 8590 * This routine will record that the cpu is going idle with tick stopped. 8591 * This info will be used in performing idle load balancing in the future. 8592 */ 8593 void nohz_balance_enter_idle(int cpu) 8594 { 8595 /* 8596 * If this cpu is going down, then nothing needs to be done. 8597 */ 8598 if (!cpu_active(cpu)) 8599 return; 8600 8601 /* Spare idle load balancing on CPUs that don't want to be disturbed: */ 8602 if (!is_housekeeping_cpu(cpu)) 8603 return; 8604 8605 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) 8606 return; 8607 8608 /* 8609 * If we're a completely isolated CPU, we don't play. 8610 */ 8611 if (on_null_domain(cpu_rq(cpu))) 8612 return; 8613 8614 cpumask_set_cpu(cpu, nohz.idle_cpus_mask); 8615 atomic_inc(&nohz.nr_cpus); 8616 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 8617 } 8618 #endif 8619 8620 static DEFINE_SPINLOCK(balancing); 8621 8622 /* 8623 * Scale the max load_balance interval with the number of CPUs in the system. 8624 * This trades load-balance latency on larger machines for less cross talk. 8625 */ 8626 void update_max_interval(void) 8627 { 8628 max_load_balance_interval = HZ*num_online_cpus()/10; 8629 } 8630 8631 /* 8632 * It checks each scheduling domain to see if it is due to be balanced, 8633 * and initiates a balancing operation if so. 8634 * 8635 * Balancing parameters are set up in init_sched_domains. 8636 */ 8637 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) 8638 { 8639 int continue_balancing = 1; 8640 int cpu = rq->cpu; 8641 unsigned long interval; 8642 struct sched_domain *sd; 8643 /* Earliest time when we have to do rebalance again */ 8644 unsigned long next_balance = jiffies + 60*HZ; 8645 int update_next_balance = 0; 8646 int need_serialize, need_decay = 0; 8647 u64 max_cost = 0; 8648 8649 update_blocked_averages(cpu); 8650 8651 rcu_read_lock(); 8652 for_each_domain(cpu, sd) { 8653 /* 8654 * Decay the newidle max times here because this is a regular 8655 * visit to all the domains. Decay ~1% per second. 8656 */ 8657 if (time_after(jiffies, sd->next_decay_max_lb_cost)) { 8658 sd->max_newidle_lb_cost = 8659 (sd->max_newidle_lb_cost * 253) / 256; 8660 sd->next_decay_max_lb_cost = jiffies + HZ; 8661 need_decay = 1; 8662 } 8663 max_cost += sd->max_newidle_lb_cost; 8664 8665 if (!(sd->flags & SD_LOAD_BALANCE)) 8666 continue; 8667 8668 /* 8669 * Stop the load balance at this level. There is another 8670 * CPU in our sched group which is doing load balancing more 8671 * actively. 8672 */ 8673 if (!continue_balancing) { 8674 if (need_decay) 8675 continue; 8676 break; 8677 } 8678 8679 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); 8680 8681 need_serialize = sd->flags & SD_SERIALIZE; 8682 if (need_serialize) { 8683 if (!spin_trylock(&balancing)) 8684 goto out; 8685 } 8686 8687 if (time_after_eq(jiffies, sd->last_balance + interval)) { 8688 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { 8689 /* 8690 * The LBF_DST_PINNED logic could have changed 8691 * env->dst_cpu, so we can't know our idle 8692 * state even if we migrated tasks. Update it. 8693 */ 8694 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; 8695 } 8696 sd->last_balance = jiffies; 8697 interval = get_sd_balance_interval(sd, idle != CPU_IDLE); 8698 } 8699 if (need_serialize) 8700 spin_unlock(&balancing); 8701 out: 8702 if (time_after(next_balance, sd->last_balance + interval)) { 8703 next_balance = sd->last_balance + interval; 8704 update_next_balance = 1; 8705 } 8706 } 8707 if (need_decay) { 8708 /* 8709 * Ensure the rq-wide value also decays but keep it at a 8710 * reasonable floor to avoid funnies with rq->avg_idle. 8711 */ 8712 rq->max_idle_balance_cost = 8713 max((u64)sysctl_sched_migration_cost, max_cost); 8714 } 8715 rcu_read_unlock(); 8716 8717 /* 8718 * next_balance will be updated only when there is a need. 8719 * When the cpu is attached to null domain for ex, it will not be 8720 * updated. 8721 */ 8722 if (likely(update_next_balance)) { 8723 rq->next_balance = next_balance; 8724 8725 #ifdef CONFIG_NO_HZ_COMMON 8726 /* 8727 * If this CPU has been elected to perform the nohz idle 8728 * balance. Other idle CPUs have already rebalanced with 8729 * nohz_idle_balance() and nohz.next_balance has been 8730 * updated accordingly. This CPU is now running the idle load 8731 * balance for itself and we need to update the 8732 * nohz.next_balance accordingly. 8733 */ 8734 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) 8735 nohz.next_balance = rq->next_balance; 8736 #endif 8737 } 8738 } 8739 8740 #ifdef CONFIG_NO_HZ_COMMON 8741 /* 8742 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the 8743 * rebalancing for all the cpus for whom scheduler ticks are stopped. 8744 */ 8745 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) 8746 { 8747 int this_cpu = this_rq->cpu; 8748 struct rq *rq; 8749 int balance_cpu; 8750 /* Earliest time when we have to do rebalance again */ 8751 unsigned long next_balance = jiffies + 60*HZ; 8752 int update_next_balance = 0; 8753 8754 if (idle != CPU_IDLE || 8755 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) 8756 goto end; 8757 8758 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { 8759 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) 8760 continue; 8761 8762 /* 8763 * If this cpu gets work to do, stop the load balancing 8764 * work being done for other cpus. Next load 8765 * balancing owner will pick it up. 8766 */ 8767 if (need_resched()) 8768 break; 8769 8770 rq = cpu_rq(balance_cpu); 8771 8772 /* 8773 * If time for next balance is due, 8774 * do the balance. 8775 */ 8776 if (time_after_eq(jiffies, rq->next_balance)) { 8777 struct rq_flags rf; 8778 8779 rq_lock_irq(rq, &rf); 8780 update_rq_clock(rq); 8781 cpu_load_update_idle(rq); 8782 rq_unlock_irq(rq, &rf); 8783 8784 rebalance_domains(rq, CPU_IDLE); 8785 } 8786 8787 if (time_after(next_balance, rq->next_balance)) { 8788 next_balance = rq->next_balance; 8789 update_next_balance = 1; 8790 } 8791 } 8792 8793 /* 8794 * next_balance will be updated only when there is a need. 8795 * When the CPU is attached to null domain for ex, it will not be 8796 * updated. 8797 */ 8798 if (likely(update_next_balance)) 8799 nohz.next_balance = next_balance; 8800 end: 8801 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); 8802 } 8803 8804 /* 8805 * Current heuristic for kicking the idle load balancer in the presence 8806 * of an idle cpu in the system. 8807 * - This rq has more than one task. 8808 * - This rq has at least one CFS task and the capacity of the CPU is 8809 * significantly reduced because of RT tasks or IRQs. 8810 * - At parent of LLC scheduler domain level, this cpu's scheduler group has 8811 * multiple busy cpu. 8812 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler 8813 * domain span are idle. 8814 */ 8815 static inline bool nohz_kick_needed(struct rq *rq) 8816 { 8817 unsigned long now = jiffies; 8818 struct sched_domain_shared *sds; 8819 struct sched_domain *sd; 8820 int nr_busy, i, cpu = rq->cpu; 8821 bool kick = false; 8822 8823 if (unlikely(rq->idle_balance)) 8824 return false; 8825 8826 /* 8827 * We may be recently in ticked or tickless idle mode. At the first 8828 * busy tick after returning from idle, we will update the busy stats. 8829 */ 8830 set_cpu_sd_state_busy(); 8831 nohz_balance_exit_idle(cpu); 8832 8833 /* 8834 * None are in tickless mode and hence no need for NOHZ idle load 8835 * balancing. 8836 */ 8837 if (likely(!atomic_read(&nohz.nr_cpus))) 8838 return false; 8839 8840 if (time_before(now, nohz.next_balance)) 8841 return false; 8842 8843 if (rq->nr_running >= 2) 8844 return true; 8845 8846 rcu_read_lock(); 8847 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 8848 if (sds) { 8849 /* 8850 * XXX: write a coherent comment on why we do this. 8851 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com 8852 */ 8853 nr_busy = atomic_read(&sds->nr_busy_cpus); 8854 if (nr_busy > 1) { 8855 kick = true; 8856 goto unlock; 8857 } 8858 8859 } 8860 8861 sd = rcu_dereference(rq->sd); 8862 if (sd) { 8863 if ((rq->cfs.h_nr_running >= 1) && 8864 check_cpu_capacity(rq, sd)) { 8865 kick = true; 8866 goto unlock; 8867 } 8868 } 8869 8870 sd = rcu_dereference(per_cpu(sd_asym, cpu)); 8871 if (sd) { 8872 for_each_cpu(i, sched_domain_span(sd)) { 8873 if (i == cpu || 8874 !cpumask_test_cpu(i, nohz.idle_cpus_mask)) 8875 continue; 8876 8877 if (sched_asym_prefer(i, cpu)) { 8878 kick = true; 8879 goto unlock; 8880 } 8881 } 8882 } 8883 unlock: 8884 rcu_read_unlock(); 8885 return kick; 8886 } 8887 #else 8888 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { } 8889 #endif 8890 8891 /* 8892 * run_rebalance_domains is triggered when needed from the scheduler tick. 8893 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 8894 */ 8895 static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 8896 { 8897 struct rq *this_rq = this_rq(); 8898 enum cpu_idle_type idle = this_rq->idle_balance ? 8899 CPU_IDLE : CPU_NOT_IDLE; 8900 8901 /* 8902 * If this cpu has a pending nohz_balance_kick, then do the 8903 * balancing on behalf of the other idle cpus whose ticks are 8904 * stopped. Do nohz_idle_balance *before* rebalance_domains to 8905 * give the idle cpus a chance to load balance. Else we may 8906 * load balance only within the local sched_domain hierarchy 8907 * and abort nohz_idle_balance altogether if we pull some load. 8908 */ 8909 nohz_idle_balance(this_rq, idle); 8910 rebalance_domains(this_rq, idle); 8911 } 8912 8913 /* 8914 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 8915 */ 8916 void trigger_load_balance(struct rq *rq) 8917 { 8918 /* Don't need to rebalance while attached to NULL domain */ 8919 if (unlikely(on_null_domain(rq))) 8920 return; 8921 8922 if (time_after_eq(jiffies, rq->next_balance)) 8923 raise_softirq(SCHED_SOFTIRQ); 8924 #ifdef CONFIG_NO_HZ_COMMON 8925 if (nohz_kick_needed(rq)) 8926 nohz_balancer_kick(); 8927 #endif 8928 } 8929 8930 static void rq_online_fair(struct rq *rq) 8931 { 8932 update_sysctl(); 8933 8934 update_runtime_enabled(rq); 8935 } 8936 8937 static void rq_offline_fair(struct rq *rq) 8938 { 8939 update_sysctl(); 8940 8941 /* Ensure any throttled groups are reachable by pick_next_task */ 8942 unthrottle_offline_cfs_rqs(rq); 8943 } 8944 8945 #endif /* CONFIG_SMP */ 8946 8947 /* 8948 * scheduler tick hitting a task of our scheduling class: 8949 */ 8950 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) 8951 { 8952 struct cfs_rq *cfs_rq; 8953 struct sched_entity *se = &curr->se; 8954 8955 for_each_sched_entity(se) { 8956 cfs_rq = cfs_rq_of(se); 8957 entity_tick(cfs_rq, se, queued); 8958 } 8959 8960 if (static_branch_unlikely(&sched_numa_balancing)) 8961 task_tick_numa(rq, curr); 8962 } 8963 8964 /* 8965 * called on fork with the child task as argument from the parent's context 8966 * - child not yet on the tasklist 8967 * - preemption disabled 8968 */ 8969 static void task_fork_fair(struct task_struct *p) 8970 { 8971 struct cfs_rq *cfs_rq; 8972 struct sched_entity *se = &p->se, *curr; 8973 struct rq *rq = this_rq(); 8974 struct rq_flags rf; 8975 8976 rq_lock(rq, &rf); 8977 update_rq_clock(rq); 8978 8979 cfs_rq = task_cfs_rq(current); 8980 curr = cfs_rq->curr; 8981 if (curr) { 8982 update_curr(cfs_rq); 8983 se->vruntime = curr->vruntime; 8984 } 8985 place_entity(cfs_rq, se, 1); 8986 8987 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { 8988 /* 8989 * Upon rescheduling, sched_class::put_prev_task() will place 8990 * 'current' within the tree based on its new key value. 8991 */ 8992 swap(curr->vruntime, se->vruntime); 8993 resched_curr(rq); 8994 } 8995 8996 se->vruntime -= cfs_rq->min_vruntime; 8997 rq_unlock(rq, &rf); 8998 } 8999 9000 /* 9001 * Priority of the task has changed. Check to see if we preempt 9002 * the current task. 9003 */ 9004 static void 9005 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) 9006 { 9007 if (!task_on_rq_queued(p)) 9008 return; 9009 9010 /* 9011 * Reschedule if we are currently running on this runqueue and 9012 * our priority decreased, or if we are not currently running on 9013 * this runqueue and our priority is higher than the current's 9014 */ 9015 if (rq->curr == p) { 9016 if (p->prio > oldprio) 9017 resched_curr(rq); 9018 } else 9019 check_preempt_curr(rq, p, 0); 9020 } 9021 9022 static inline bool vruntime_normalized(struct task_struct *p) 9023 { 9024 struct sched_entity *se = &p->se; 9025 9026 /* 9027 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, 9028 * the dequeue_entity(.flags=0) will already have normalized the 9029 * vruntime. 9030 */ 9031 if (p->on_rq) 9032 return true; 9033 9034 /* 9035 * When !on_rq, vruntime of the task has usually NOT been normalized. 9036 * But there are some cases where it has already been normalized: 9037 * 9038 * - A forked child which is waiting for being woken up by 9039 * wake_up_new_task(). 9040 * - A task which has been woken up by try_to_wake_up() and 9041 * waiting for actually being woken up by sched_ttwu_pending(). 9042 */ 9043 if (!se->sum_exec_runtime || p->state == TASK_WAKING) 9044 return true; 9045 9046 return false; 9047 } 9048 9049 #ifdef CONFIG_FAIR_GROUP_SCHED 9050 /* 9051 * Propagate the changes of the sched_entity across the tg tree to make it 9052 * visible to the root 9053 */ 9054 static void propagate_entity_cfs_rq(struct sched_entity *se) 9055 { 9056 struct cfs_rq *cfs_rq; 9057 9058 /* Start to propagate at parent */ 9059 se = se->parent; 9060 9061 for_each_sched_entity(se) { 9062 cfs_rq = cfs_rq_of(se); 9063 9064 if (cfs_rq_throttled(cfs_rq)) 9065 break; 9066 9067 update_load_avg(se, UPDATE_TG); 9068 } 9069 } 9070 #else 9071 static void propagate_entity_cfs_rq(struct sched_entity *se) { } 9072 #endif 9073 9074 static void detach_entity_cfs_rq(struct sched_entity *se) 9075 { 9076 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9077 9078 /* Catch up with the cfs_rq and remove our load when we leave */ 9079 update_load_avg(se, 0); 9080 detach_entity_load_avg(cfs_rq, se); 9081 update_tg_load_avg(cfs_rq, false); 9082 propagate_entity_cfs_rq(se); 9083 } 9084 9085 static void attach_entity_cfs_rq(struct sched_entity *se) 9086 { 9087 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9088 9089 #ifdef CONFIG_FAIR_GROUP_SCHED 9090 /* 9091 * Since the real-depth could have been changed (only FAIR 9092 * class maintain depth value), reset depth properly. 9093 */ 9094 se->depth = se->parent ? se->parent->depth + 1 : 0; 9095 #endif 9096 9097 /* Synchronize entity with its cfs_rq */ 9098 update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); 9099 attach_entity_load_avg(cfs_rq, se); 9100 update_tg_load_avg(cfs_rq, false); 9101 propagate_entity_cfs_rq(se); 9102 } 9103 9104 static void detach_task_cfs_rq(struct task_struct *p) 9105 { 9106 struct sched_entity *se = &p->se; 9107 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9108 9109 if (!vruntime_normalized(p)) { 9110 /* 9111 * Fix up our vruntime so that the current sleep doesn't 9112 * cause 'unlimited' sleep bonus. 9113 */ 9114 place_entity(cfs_rq, se, 0); 9115 se->vruntime -= cfs_rq->min_vruntime; 9116 } 9117 9118 detach_entity_cfs_rq(se); 9119 } 9120 9121 static void attach_task_cfs_rq(struct task_struct *p) 9122 { 9123 struct sched_entity *se = &p->se; 9124 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9125 9126 attach_entity_cfs_rq(se); 9127 9128 if (!vruntime_normalized(p)) 9129 se->vruntime += cfs_rq->min_vruntime; 9130 } 9131 9132 static void switched_from_fair(struct rq *rq, struct task_struct *p) 9133 { 9134 detach_task_cfs_rq(p); 9135 } 9136 9137 static void switched_to_fair(struct rq *rq, struct task_struct *p) 9138 { 9139 attach_task_cfs_rq(p); 9140 9141 if (task_on_rq_queued(p)) { 9142 /* 9143 * We were most likely switched from sched_rt, so 9144 * kick off the schedule if running, otherwise just see 9145 * if we can still preempt the current task. 9146 */ 9147 if (rq->curr == p) 9148 resched_curr(rq); 9149 else 9150 check_preempt_curr(rq, p, 0); 9151 } 9152 } 9153 9154 /* Account for a task changing its policy or group. 9155 * 9156 * This routine is mostly called to set cfs_rq->curr field when a task 9157 * migrates between groups/classes. 9158 */ 9159 static void set_curr_task_fair(struct rq *rq) 9160 { 9161 struct sched_entity *se = &rq->curr->se; 9162 9163 for_each_sched_entity(se) { 9164 struct cfs_rq *cfs_rq = cfs_rq_of(se); 9165 9166 set_next_entity(cfs_rq, se); 9167 /* ensure bandwidth has been allocated on our new cfs_rq */ 9168 account_cfs_rq_runtime(cfs_rq, 0); 9169 } 9170 } 9171 9172 void init_cfs_rq(struct cfs_rq *cfs_rq) 9173 { 9174 cfs_rq->tasks_timeline = RB_ROOT; 9175 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 9176 #ifndef CONFIG_64BIT 9177 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; 9178 #endif 9179 #ifdef CONFIG_SMP 9180 #ifdef CONFIG_FAIR_GROUP_SCHED 9181 cfs_rq->propagate_avg = 0; 9182 #endif 9183 atomic_long_set(&cfs_rq->removed_load_avg, 0); 9184 atomic_long_set(&cfs_rq->removed_util_avg, 0); 9185 #endif 9186 } 9187 9188 #ifdef CONFIG_FAIR_GROUP_SCHED 9189 static void task_set_group_fair(struct task_struct *p) 9190 { 9191 struct sched_entity *se = &p->se; 9192 9193 set_task_rq(p, task_cpu(p)); 9194 se->depth = se->parent ? se->parent->depth + 1 : 0; 9195 } 9196 9197 static void task_move_group_fair(struct task_struct *p) 9198 { 9199 detach_task_cfs_rq(p); 9200 set_task_rq(p, task_cpu(p)); 9201 9202 #ifdef CONFIG_SMP 9203 /* Tell se's cfs_rq has been changed -- migrated */ 9204 p->se.avg.last_update_time = 0; 9205 #endif 9206 attach_task_cfs_rq(p); 9207 } 9208 9209 static void task_change_group_fair(struct task_struct *p, int type) 9210 { 9211 switch (type) { 9212 case TASK_SET_GROUP: 9213 task_set_group_fair(p); 9214 break; 9215 9216 case TASK_MOVE_GROUP: 9217 task_move_group_fair(p); 9218 break; 9219 } 9220 } 9221 9222 void free_fair_sched_group(struct task_group *tg) 9223 { 9224 int i; 9225 9226 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); 9227 9228 for_each_possible_cpu(i) { 9229 if (tg->cfs_rq) 9230 kfree(tg->cfs_rq[i]); 9231 if (tg->se) 9232 kfree(tg->se[i]); 9233 } 9234 9235 kfree(tg->cfs_rq); 9236 kfree(tg->se); 9237 } 9238 9239 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 9240 { 9241 struct sched_entity *se; 9242 struct cfs_rq *cfs_rq; 9243 int i; 9244 9245 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); 9246 if (!tg->cfs_rq) 9247 goto err; 9248 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); 9249 if (!tg->se) 9250 goto err; 9251 9252 tg->shares = NICE_0_LOAD; 9253 9254 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 9255 9256 for_each_possible_cpu(i) { 9257 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 9258 GFP_KERNEL, cpu_to_node(i)); 9259 if (!cfs_rq) 9260 goto err; 9261 9262 se = kzalloc_node(sizeof(struct sched_entity), 9263 GFP_KERNEL, cpu_to_node(i)); 9264 if (!se) 9265 goto err_free_rq; 9266 9267 init_cfs_rq(cfs_rq); 9268 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 9269 init_entity_runnable_average(se); 9270 } 9271 9272 return 1; 9273 9274 err_free_rq: 9275 kfree(cfs_rq); 9276 err: 9277 return 0; 9278 } 9279 9280 void online_fair_sched_group(struct task_group *tg) 9281 { 9282 struct sched_entity *se; 9283 struct rq *rq; 9284 int i; 9285 9286 for_each_possible_cpu(i) { 9287 rq = cpu_rq(i); 9288 se = tg->se[i]; 9289 9290 raw_spin_lock_irq(&rq->lock); 9291 update_rq_clock(rq); 9292 attach_entity_cfs_rq(se); 9293 sync_throttle(tg, i); 9294 raw_spin_unlock_irq(&rq->lock); 9295 } 9296 } 9297 9298 void unregister_fair_sched_group(struct task_group *tg) 9299 { 9300 unsigned long flags; 9301 struct rq *rq; 9302 int cpu; 9303 9304 for_each_possible_cpu(cpu) { 9305 if (tg->se[cpu]) 9306 remove_entity_load_avg(tg->se[cpu]); 9307 9308 /* 9309 * Only empty task groups can be destroyed; so we can speculatively 9310 * check on_list without danger of it being re-added. 9311 */ 9312 if (!tg->cfs_rq[cpu]->on_list) 9313 continue; 9314 9315 rq = cpu_rq(cpu); 9316 9317 raw_spin_lock_irqsave(&rq->lock, flags); 9318 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); 9319 raw_spin_unlock_irqrestore(&rq->lock, flags); 9320 } 9321 } 9322 9323 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 9324 struct sched_entity *se, int cpu, 9325 struct sched_entity *parent) 9326 { 9327 struct rq *rq = cpu_rq(cpu); 9328 9329 cfs_rq->tg = tg; 9330 cfs_rq->rq = rq; 9331 init_cfs_rq_runtime(cfs_rq); 9332 9333 tg->cfs_rq[cpu] = cfs_rq; 9334 tg->se[cpu] = se; 9335 9336 /* se could be NULL for root_task_group */ 9337 if (!se) 9338 return; 9339 9340 if (!parent) { 9341 se->cfs_rq = &rq->cfs; 9342 se->depth = 0; 9343 } else { 9344 se->cfs_rq = parent->my_q; 9345 se->depth = parent->depth + 1; 9346 } 9347 9348 se->my_q = cfs_rq; 9349 /* guarantee group entities always have weight */ 9350 update_load_set(&se->load, NICE_0_LOAD); 9351 se->parent = parent; 9352 } 9353 9354 static DEFINE_MUTEX(shares_mutex); 9355 9356 int sched_group_set_shares(struct task_group *tg, unsigned long shares) 9357 { 9358 int i; 9359 9360 /* 9361 * We can't change the weight of the root cgroup. 9362 */ 9363 if (!tg->se[0]) 9364 return -EINVAL; 9365 9366 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); 9367 9368 mutex_lock(&shares_mutex); 9369 if (tg->shares == shares) 9370 goto done; 9371 9372 tg->shares = shares; 9373 for_each_possible_cpu(i) { 9374 struct rq *rq = cpu_rq(i); 9375 struct sched_entity *se = tg->se[i]; 9376 struct rq_flags rf; 9377 9378 /* Propagate contribution to hierarchy */ 9379 rq_lock_irqsave(rq, &rf); 9380 update_rq_clock(rq); 9381 for_each_sched_entity(se) { 9382 update_load_avg(se, UPDATE_TG); 9383 update_cfs_shares(se); 9384 } 9385 rq_unlock_irqrestore(rq, &rf); 9386 } 9387 9388 done: 9389 mutex_unlock(&shares_mutex); 9390 return 0; 9391 } 9392 #else /* CONFIG_FAIR_GROUP_SCHED */ 9393 9394 void free_fair_sched_group(struct task_group *tg) { } 9395 9396 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 9397 { 9398 return 1; 9399 } 9400 9401 void online_fair_sched_group(struct task_group *tg) { } 9402 9403 void unregister_fair_sched_group(struct task_group *tg) { } 9404 9405 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9406 9407 9408 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) 9409 { 9410 struct sched_entity *se = &task->se; 9411 unsigned int rr_interval = 0; 9412 9413 /* 9414 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise 9415 * idle runqueue: 9416 */ 9417 if (rq->cfs.load.weight) 9418 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); 9419 9420 return rr_interval; 9421 } 9422 9423 /* 9424 * All the scheduling class methods: 9425 */ 9426 const struct sched_class fair_sched_class = { 9427 .next = &idle_sched_class, 9428 .enqueue_task = enqueue_task_fair, 9429 .dequeue_task = dequeue_task_fair, 9430 .yield_task = yield_task_fair, 9431 .yield_to_task = yield_to_task_fair, 9432 9433 .check_preempt_curr = check_preempt_wakeup, 9434 9435 .pick_next_task = pick_next_task_fair, 9436 .put_prev_task = put_prev_task_fair, 9437 9438 #ifdef CONFIG_SMP 9439 .select_task_rq = select_task_rq_fair, 9440 .migrate_task_rq = migrate_task_rq_fair, 9441 9442 .rq_online = rq_online_fair, 9443 .rq_offline = rq_offline_fair, 9444 9445 .task_dead = task_dead_fair, 9446 .set_cpus_allowed = set_cpus_allowed_common, 9447 #endif 9448 9449 .set_curr_task = set_curr_task_fair, 9450 .task_tick = task_tick_fair, 9451 .task_fork = task_fork_fair, 9452 9453 .prio_changed = prio_changed_fair, 9454 .switched_from = switched_from_fair, 9455 .switched_to = switched_to_fair, 9456 9457 .get_rr_interval = get_rr_interval_fair, 9458 9459 .update_curr = update_curr_fair, 9460 9461 #ifdef CONFIG_FAIR_GROUP_SCHED 9462 .task_change_group = task_change_group_fair, 9463 #endif 9464 }; 9465 9466 #ifdef CONFIG_SCHED_DEBUG 9467 void print_cfs_stats(struct seq_file *m, int cpu) 9468 { 9469 struct cfs_rq *cfs_rq, *pos; 9470 9471 rcu_read_lock(); 9472 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) 9473 print_cfs_rq(m, cpu, cfs_rq); 9474 rcu_read_unlock(); 9475 } 9476 9477 #ifdef CONFIG_NUMA_BALANCING 9478 void show_numa_stats(struct task_struct *p, struct seq_file *m) 9479 { 9480 int node; 9481 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; 9482 9483 for_each_online_node(node) { 9484 if (p->numa_faults) { 9485 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; 9486 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; 9487 } 9488 if (p->numa_group) { 9489 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)], 9490 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)]; 9491 } 9492 print_numa_stats(m, node, tsf, tpf, gsf, gpf); 9493 } 9494 } 9495 #endif /* CONFIG_NUMA_BALANCING */ 9496 #endif /* CONFIG_SCHED_DEBUG */ 9497 9498 __init void init_sched_fair_class(void) 9499 { 9500 #ifdef CONFIG_SMP 9501 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); 9502 9503 #ifdef CONFIG_NO_HZ_COMMON 9504 nohz.next_balance = jiffies; 9505 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 9506 #endif 9507 #endif /* SMP */ 9508 9509 } 9510