1 2 #include <linux/sched.h> 3 #include <linux/sched/sysctl.h> 4 #include <linux/sched/rt.h> 5 #include <linux/sched/deadline.h> 6 #include <linux/binfmts.h> 7 #include <linux/mutex.h> 8 #include <linux/spinlock.h> 9 #include <linux/stop_machine.h> 10 #include <linux/irq_work.h> 11 #include <linux/tick.h> 12 #include <linux/slab.h> 13 14 #include "cpupri.h" 15 #include "cpudeadline.h" 16 #include "cpuacct.h" 17 18 struct rq; 19 struct cpuidle_state; 20 21 /* task_struct::on_rq states: */ 22 #define TASK_ON_RQ_QUEUED 1 23 #define TASK_ON_RQ_MIGRATING 2 24 25 extern __read_mostly int scheduler_running; 26 27 extern unsigned long calc_load_update; 28 extern atomic_long_t calc_load_tasks; 29 30 extern void calc_global_load_tick(struct rq *this_rq); 31 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 32 33 #ifdef CONFIG_SMP 34 extern void cpu_load_update_active(struct rq *this_rq); 35 #else 36 static inline void cpu_load_update_active(struct rq *this_rq) { } 37 #endif 38 39 /* 40 * Helpers for converting nanosecond timing to jiffy resolution 41 */ 42 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 43 44 /* 45 * Increase resolution of nice-level calculations for 64-bit architectures. 46 * The extra resolution improves shares distribution and load balancing of 47 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 48 * hierarchies, especially on larger systems. This is not a user-visible change 49 * and does not change the user-interface for setting shares/weights. 50 * 51 * We increase resolution only if we have enough bits to allow this increased 52 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are 53 * pretty high and the returns do not justify the increased costs. 54 * 55 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to 56 * increase coverage and consistency always enable it on 64bit platforms. 57 */ 58 #ifdef CONFIG_64BIT 59 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 60 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 61 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 62 #else 63 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 64 # define scale_load(w) (w) 65 # define scale_load_down(w) (w) 66 #endif 67 68 /* 69 * Task weight (visible to users) and its load (invisible to users) have 70 * independent resolution, but they should be well calibrated. We use 71 * scale_load() and scale_load_down(w) to convert between them. The 72 * following must be true: 73 * 74 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 75 * 76 */ 77 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 78 79 /* 80 * Single value that decides SCHED_DEADLINE internal math precision. 81 * 10 -> just above 1us 82 * 9 -> just above 0.5us 83 */ 84 #define DL_SCALE (10) 85 86 /* 87 * These are the 'tuning knobs' of the scheduler: 88 */ 89 90 /* 91 * single value that denotes runtime == period, ie unlimited time. 92 */ 93 #define RUNTIME_INF ((u64)~0ULL) 94 95 static inline int idle_policy(int policy) 96 { 97 return policy == SCHED_IDLE; 98 } 99 static inline int fair_policy(int policy) 100 { 101 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 102 } 103 104 static inline int rt_policy(int policy) 105 { 106 return policy == SCHED_FIFO || policy == SCHED_RR; 107 } 108 109 static inline int dl_policy(int policy) 110 { 111 return policy == SCHED_DEADLINE; 112 } 113 static inline bool valid_policy(int policy) 114 { 115 return idle_policy(policy) || fair_policy(policy) || 116 rt_policy(policy) || dl_policy(policy); 117 } 118 119 static inline int task_has_rt_policy(struct task_struct *p) 120 { 121 return rt_policy(p->policy); 122 } 123 124 static inline int task_has_dl_policy(struct task_struct *p) 125 { 126 return dl_policy(p->policy); 127 } 128 129 /* 130 * Tells if entity @a should preempt entity @b. 131 */ 132 static inline bool 133 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 134 { 135 return dl_time_before(a->deadline, b->deadline); 136 } 137 138 /* 139 * This is the priority-queue data structure of the RT scheduling class: 140 */ 141 struct rt_prio_array { 142 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 143 struct list_head queue[MAX_RT_PRIO]; 144 }; 145 146 struct rt_bandwidth { 147 /* nests inside the rq lock: */ 148 raw_spinlock_t rt_runtime_lock; 149 ktime_t rt_period; 150 u64 rt_runtime; 151 struct hrtimer rt_period_timer; 152 unsigned int rt_period_active; 153 }; 154 155 void __dl_clear_params(struct task_struct *p); 156 157 /* 158 * To keep the bandwidth of -deadline tasks and groups under control 159 * we need some place where: 160 * - store the maximum -deadline bandwidth of the system (the group); 161 * - cache the fraction of that bandwidth that is currently allocated. 162 * 163 * This is all done in the data structure below. It is similar to the 164 * one used for RT-throttling (rt_bandwidth), with the main difference 165 * that, since here we are only interested in admission control, we 166 * do not decrease any runtime while the group "executes", neither we 167 * need a timer to replenish it. 168 * 169 * With respect to SMP, the bandwidth is given on a per-CPU basis, 170 * meaning that: 171 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 172 * - dl_total_bw array contains, in the i-eth element, the currently 173 * allocated bandwidth on the i-eth CPU. 174 * Moreover, groups consume bandwidth on each CPU, while tasks only 175 * consume bandwidth on the CPU they're running on. 176 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 177 * that will be shown the next time the proc or cgroup controls will 178 * be red. It on its turn can be changed by writing on its own 179 * control. 180 */ 181 struct dl_bandwidth { 182 raw_spinlock_t dl_runtime_lock; 183 u64 dl_runtime; 184 u64 dl_period; 185 }; 186 187 static inline int dl_bandwidth_enabled(void) 188 { 189 return sysctl_sched_rt_runtime >= 0; 190 } 191 192 extern struct dl_bw *dl_bw_of(int i); 193 194 struct dl_bw { 195 raw_spinlock_t lock; 196 u64 bw, total_bw; 197 }; 198 199 static inline 200 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) 201 { 202 dl_b->total_bw -= tsk_bw; 203 } 204 205 static inline 206 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) 207 { 208 dl_b->total_bw += tsk_bw; 209 } 210 211 static inline 212 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 213 { 214 return dl_b->bw != -1 && 215 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 216 } 217 218 extern struct mutex sched_domains_mutex; 219 220 #ifdef CONFIG_CGROUP_SCHED 221 222 #include <linux/cgroup.h> 223 224 struct cfs_rq; 225 struct rt_rq; 226 227 extern struct list_head task_groups; 228 229 struct cfs_bandwidth { 230 #ifdef CONFIG_CFS_BANDWIDTH 231 raw_spinlock_t lock; 232 ktime_t period; 233 u64 quota, runtime; 234 s64 hierarchical_quota; 235 u64 runtime_expires; 236 237 int idle, period_active; 238 struct hrtimer period_timer, slack_timer; 239 struct list_head throttled_cfs_rq; 240 241 /* statistics */ 242 int nr_periods, nr_throttled; 243 u64 throttled_time; 244 #endif 245 }; 246 247 /* task group related information */ 248 struct task_group { 249 struct cgroup_subsys_state css; 250 251 #ifdef CONFIG_FAIR_GROUP_SCHED 252 /* schedulable entities of this group on each cpu */ 253 struct sched_entity **se; 254 /* runqueue "owned" by this group on each cpu */ 255 struct cfs_rq **cfs_rq; 256 unsigned long shares; 257 258 #ifdef CONFIG_SMP 259 /* 260 * load_avg can be heavily contended at clock tick time, so put 261 * it in its own cacheline separated from the fields above which 262 * will also be accessed at each tick. 263 */ 264 atomic_long_t load_avg ____cacheline_aligned; 265 #endif 266 #endif 267 268 #ifdef CONFIG_RT_GROUP_SCHED 269 struct sched_rt_entity **rt_se; 270 struct rt_rq **rt_rq; 271 272 struct rt_bandwidth rt_bandwidth; 273 #endif 274 275 struct rcu_head rcu; 276 struct list_head list; 277 278 struct task_group *parent; 279 struct list_head siblings; 280 struct list_head children; 281 282 #ifdef CONFIG_SCHED_AUTOGROUP 283 struct autogroup *autogroup; 284 #endif 285 286 struct cfs_bandwidth cfs_bandwidth; 287 }; 288 289 #ifdef CONFIG_FAIR_GROUP_SCHED 290 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 291 292 /* 293 * A weight of 0 or 1 can cause arithmetics problems. 294 * A weight of a cfs_rq is the sum of weights of which entities 295 * are queued on this cfs_rq, so a weight of a entity should not be 296 * too large, so as the shares value of a task group. 297 * (The default weight is 1024 - so there's no practical 298 * limitation from this.) 299 */ 300 #define MIN_SHARES (1UL << 1) 301 #define MAX_SHARES (1UL << 18) 302 #endif 303 304 typedef int (*tg_visitor)(struct task_group *, void *); 305 306 extern int walk_tg_tree_from(struct task_group *from, 307 tg_visitor down, tg_visitor up, void *data); 308 309 /* 310 * Iterate the full tree, calling @down when first entering a node and @up when 311 * leaving it for the final time. 312 * 313 * Caller must hold rcu_lock or sufficient equivalent. 314 */ 315 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 316 { 317 return walk_tg_tree_from(&root_task_group, down, up, data); 318 } 319 320 extern int tg_nop(struct task_group *tg, void *data); 321 322 extern void free_fair_sched_group(struct task_group *tg); 323 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 324 extern void online_fair_sched_group(struct task_group *tg); 325 extern void unregister_fair_sched_group(struct task_group *tg); 326 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 327 struct sched_entity *se, int cpu, 328 struct sched_entity *parent); 329 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 330 331 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 332 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 333 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 334 335 extern void free_rt_sched_group(struct task_group *tg); 336 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 337 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 338 struct sched_rt_entity *rt_se, int cpu, 339 struct sched_rt_entity *parent); 340 341 extern struct task_group *sched_create_group(struct task_group *parent); 342 extern void sched_online_group(struct task_group *tg, 343 struct task_group *parent); 344 extern void sched_destroy_group(struct task_group *tg); 345 extern void sched_offline_group(struct task_group *tg); 346 347 extern void sched_move_task(struct task_struct *tsk); 348 349 #ifdef CONFIG_FAIR_GROUP_SCHED 350 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 351 352 #ifdef CONFIG_SMP 353 extern void set_task_rq_fair(struct sched_entity *se, 354 struct cfs_rq *prev, struct cfs_rq *next); 355 #else /* !CONFIG_SMP */ 356 static inline void set_task_rq_fair(struct sched_entity *se, 357 struct cfs_rq *prev, struct cfs_rq *next) { } 358 #endif /* CONFIG_SMP */ 359 #endif /* CONFIG_FAIR_GROUP_SCHED */ 360 361 #else /* CONFIG_CGROUP_SCHED */ 362 363 struct cfs_bandwidth { }; 364 365 #endif /* CONFIG_CGROUP_SCHED */ 366 367 /* CFS-related fields in a runqueue */ 368 struct cfs_rq { 369 struct load_weight load; 370 unsigned int nr_running, h_nr_running; 371 372 u64 exec_clock; 373 u64 min_vruntime; 374 #ifndef CONFIG_64BIT 375 u64 min_vruntime_copy; 376 #endif 377 378 struct rb_root tasks_timeline; 379 struct rb_node *rb_leftmost; 380 381 /* 382 * 'curr' points to currently running entity on this cfs_rq. 383 * It is set to NULL otherwise (i.e when none are currently running). 384 */ 385 struct sched_entity *curr, *next, *last, *skip; 386 387 #ifdef CONFIG_SCHED_DEBUG 388 unsigned int nr_spread_over; 389 #endif 390 391 #ifdef CONFIG_SMP 392 /* 393 * CFS load tracking 394 */ 395 struct sched_avg avg; 396 u64 runnable_load_sum; 397 unsigned long runnable_load_avg; 398 #ifdef CONFIG_FAIR_GROUP_SCHED 399 unsigned long tg_load_avg_contrib; 400 #endif 401 atomic_long_t removed_load_avg, removed_util_avg; 402 #ifndef CONFIG_64BIT 403 u64 load_last_update_time_copy; 404 #endif 405 406 #ifdef CONFIG_FAIR_GROUP_SCHED 407 /* 408 * h_load = weight * f(tg) 409 * 410 * Where f(tg) is the recursive weight fraction assigned to 411 * this group. 412 */ 413 unsigned long h_load; 414 u64 last_h_load_update; 415 struct sched_entity *h_load_next; 416 #endif /* CONFIG_FAIR_GROUP_SCHED */ 417 #endif /* CONFIG_SMP */ 418 419 #ifdef CONFIG_FAIR_GROUP_SCHED 420 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 421 422 /* 423 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 424 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 425 * (like users, containers etc.) 426 * 427 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 428 * list is used during load balance. 429 */ 430 int on_list; 431 struct list_head leaf_cfs_rq_list; 432 struct task_group *tg; /* group that "owns" this runqueue */ 433 434 #ifdef CONFIG_CFS_BANDWIDTH 435 int runtime_enabled; 436 u64 runtime_expires; 437 s64 runtime_remaining; 438 439 u64 throttled_clock, throttled_clock_task; 440 u64 throttled_clock_task_time; 441 int throttled, throttle_count; 442 struct list_head throttled_list; 443 #endif /* CONFIG_CFS_BANDWIDTH */ 444 #endif /* CONFIG_FAIR_GROUP_SCHED */ 445 }; 446 447 static inline int rt_bandwidth_enabled(void) 448 { 449 return sysctl_sched_rt_runtime >= 0; 450 } 451 452 /* RT IPI pull logic requires IRQ_WORK */ 453 #ifdef CONFIG_IRQ_WORK 454 # define HAVE_RT_PUSH_IPI 455 #endif 456 457 /* Real-Time classes' related field in a runqueue: */ 458 struct rt_rq { 459 struct rt_prio_array active; 460 unsigned int rt_nr_running; 461 unsigned int rr_nr_running; 462 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 463 struct { 464 int curr; /* highest queued rt task prio */ 465 #ifdef CONFIG_SMP 466 int next; /* next highest */ 467 #endif 468 } highest_prio; 469 #endif 470 #ifdef CONFIG_SMP 471 unsigned long rt_nr_migratory; 472 unsigned long rt_nr_total; 473 int overloaded; 474 struct plist_head pushable_tasks; 475 #ifdef HAVE_RT_PUSH_IPI 476 int push_flags; 477 int push_cpu; 478 struct irq_work push_work; 479 raw_spinlock_t push_lock; 480 #endif 481 #endif /* CONFIG_SMP */ 482 int rt_queued; 483 484 int rt_throttled; 485 u64 rt_time; 486 u64 rt_runtime; 487 /* Nests inside the rq lock: */ 488 raw_spinlock_t rt_runtime_lock; 489 490 #ifdef CONFIG_RT_GROUP_SCHED 491 unsigned long rt_nr_boosted; 492 493 struct rq *rq; 494 struct task_group *tg; 495 #endif 496 }; 497 498 /* Deadline class' related fields in a runqueue */ 499 struct dl_rq { 500 /* runqueue is an rbtree, ordered by deadline */ 501 struct rb_root rb_root; 502 struct rb_node *rb_leftmost; 503 504 unsigned long dl_nr_running; 505 506 #ifdef CONFIG_SMP 507 /* 508 * Deadline values of the currently executing and the 509 * earliest ready task on this rq. Caching these facilitates 510 * the decision wether or not a ready but not running task 511 * should migrate somewhere else. 512 */ 513 struct { 514 u64 curr; 515 u64 next; 516 } earliest_dl; 517 518 unsigned long dl_nr_migratory; 519 int overloaded; 520 521 /* 522 * Tasks on this rq that can be pushed away. They are kept in 523 * an rb-tree, ordered by tasks' deadlines, with caching 524 * of the leftmost (earliest deadline) element. 525 */ 526 struct rb_root pushable_dl_tasks_root; 527 struct rb_node *pushable_dl_tasks_leftmost; 528 #else 529 struct dl_bw dl_bw; 530 #endif 531 }; 532 533 #ifdef CONFIG_SMP 534 535 /* 536 * We add the notion of a root-domain which will be used to define per-domain 537 * variables. Each exclusive cpuset essentially defines an island domain by 538 * fully partitioning the member cpus from any other cpuset. Whenever a new 539 * exclusive cpuset is created, we also create and attach a new root-domain 540 * object. 541 * 542 */ 543 struct root_domain { 544 atomic_t refcount; 545 atomic_t rto_count; 546 struct rcu_head rcu; 547 cpumask_var_t span; 548 cpumask_var_t online; 549 550 /* Indicate more than one runnable task for any CPU */ 551 bool overload; 552 553 /* 554 * The bit corresponding to a CPU gets set here if such CPU has more 555 * than one runnable -deadline task (as it is below for RT tasks). 556 */ 557 cpumask_var_t dlo_mask; 558 atomic_t dlo_count; 559 struct dl_bw dl_bw; 560 struct cpudl cpudl; 561 562 /* 563 * The "RT overload" flag: it gets set if a CPU has more than 564 * one runnable RT task. 565 */ 566 cpumask_var_t rto_mask; 567 struct cpupri cpupri; 568 }; 569 570 extern struct root_domain def_root_domain; 571 572 #endif /* CONFIG_SMP */ 573 574 /* 575 * This is the main, per-CPU runqueue data structure. 576 * 577 * Locking rule: those places that want to lock multiple runqueues 578 * (such as the load balancing or the thread migration code), lock 579 * acquire operations must be ordered by ascending &runqueue. 580 */ 581 struct rq { 582 /* runqueue lock: */ 583 raw_spinlock_t lock; 584 585 /* 586 * nr_running and cpu_load should be in the same cacheline because 587 * remote CPUs use both these fields when doing load calculation. 588 */ 589 unsigned int nr_running; 590 #ifdef CONFIG_NUMA_BALANCING 591 unsigned int nr_numa_running; 592 unsigned int nr_preferred_running; 593 #endif 594 #define CPU_LOAD_IDX_MAX 5 595 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 596 #ifdef CONFIG_NO_HZ_COMMON 597 #ifdef CONFIG_SMP 598 unsigned long last_load_update_tick; 599 #endif /* CONFIG_SMP */ 600 u64 nohz_stamp; 601 unsigned long nohz_flags; 602 #endif /* CONFIG_NO_HZ_COMMON */ 603 #ifdef CONFIG_NO_HZ_FULL 604 unsigned long last_sched_tick; 605 #endif 606 /* capture load from *all* tasks on this cpu: */ 607 struct load_weight load; 608 unsigned long nr_load_updates; 609 u64 nr_switches; 610 611 struct cfs_rq cfs; 612 struct rt_rq rt; 613 struct dl_rq dl; 614 615 #ifdef CONFIG_FAIR_GROUP_SCHED 616 /* list of leaf cfs_rq on this cpu: */ 617 struct list_head leaf_cfs_rq_list; 618 #endif /* CONFIG_FAIR_GROUP_SCHED */ 619 620 /* 621 * This is part of a global counter where only the total sum 622 * over all CPUs matters. A task can increase this counter on 623 * one CPU and if it got migrated afterwards it may decrease 624 * it on another CPU. Always updated under the runqueue lock: 625 */ 626 unsigned long nr_uninterruptible; 627 628 struct task_struct *curr, *idle, *stop; 629 unsigned long next_balance; 630 struct mm_struct *prev_mm; 631 632 unsigned int clock_skip_update; 633 u64 clock; 634 u64 clock_task; 635 636 atomic_t nr_iowait; 637 638 #ifdef CONFIG_SMP 639 struct root_domain *rd; 640 struct sched_domain *sd; 641 642 unsigned long cpu_capacity; 643 unsigned long cpu_capacity_orig; 644 645 struct callback_head *balance_callback; 646 647 unsigned char idle_balance; 648 /* For active balancing */ 649 int active_balance; 650 int push_cpu; 651 struct cpu_stop_work active_balance_work; 652 /* cpu of this runqueue: */ 653 int cpu; 654 int online; 655 656 struct list_head cfs_tasks; 657 658 u64 rt_avg; 659 u64 age_stamp; 660 u64 idle_stamp; 661 u64 avg_idle; 662 663 /* This is used to determine avg_idle's max value */ 664 u64 max_idle_balance_cost; 665 #endif 666 667 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 668 u64 prev_irq_time; 669 #endif 670 #ifdef CONFIG_PARAVIRT 671 u64 prev_steal_time; 672 #endif 673 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 674 u64 prev_steal_time_rq; 675 #endif 676 677 /* calc_load related fields */ 678 unsigned long calc_load_update; 679 long calc_load_active; 680 681 #ifdef CONFIG_SCHED_HRTICK 682 #ifdef CONFIG_SMP 683 int hrtick_csd_pending; 684 struct call_single_data hrtick_csd; 685 #endif 686 struct hrtimer hrtick_timer; 687 #endif 688 689 #ifdef CONFIG_SCHEDSTATS 690 /* latency stats */ 691 struct sched_info rq_sched_info; 692 unsigned long long rq_cpu_time; 693 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 694 695 /* sys_sched_yield() stats */ 696 unsigned int yld_count; 697 698 /* schedule() stats */ 699 unsigned int sched_count; 700 unsigned int sched_goidle; 701 702 /* try_to_wake_up() stats */ 703 unsigned int ttwu_count; 704 unsigned int ttwu_local; 705 #endif 706 707 #ifdef CONFIG_SMP 708 struct llist_head wake_list; 709 #endif 710 711 #ifdef CONFIG_CPU_IDLE 712 /* Must be inspected within a rcu lock section */ 713 struct cpuidle_state *idle_state; 714 #endif 715 }; 716 717 static inline int cpu_of(struct rq *rq) 718 { 719 #ifdef CONFIG_SMP 720 return rq->cpu; 721 #else 722 return 0; 723 #endif 724 } 725 726 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 727 728 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 729 #define this_rq() this_cpu_ptr(&runqueues) 730 #define task_rq(p) cpu_rq(task_cpu(p)) 731 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 732 #define raw_rq() raw_cpu_ptr(&runqueues) 733 734 static inline u64 __rq_clock_broken(struct rq *rq) 735 { 736 return READ_ONCE(rq->clock); 737 } 738 739 static inline u64 rq_clock(struct rq *rq) 740 { 741 lockdep_assert_held(&rq->lock); 742 return rq->clock; 743 } 744 745 static inline u64 rq_clock_task(struct rq *rq) 746 { 747 lockdep_assert_held(&rq->lock); 748 return rq->clock_task; 749 } 750 751 #define RQCF_REQ_SKIP 0x01 752 #define RQCF_ACT_SKIP 0x02 753 754 static inline void rq_clock_skip_update(struct rq *rq, bool skip) 755 { 756 lockdep_assert_held(&rq->lock); 757 if (skip) 758 rq->clock_skip_update |= RQCF_REQ_SKIP; 759 else 760 rq->clock_skip_update &= ~RQCF_REQ_SKIP; 761 } 762 763 #ifdef CONFIG_NUMA 764 enum numa_topology_type { 765 NUMA_DIRECT, 766 NUMA_GLUELESS_MESH, 767 NUMA_BACKPLANE, 768 }; 769 extern enum numa_topology_type sched_numa_topology_type; 770 extern int sched_max_numa_distance; 771 extern bool find_numa_distance(int distance); 772 #endif 773 774 #ifdef CONFIG_NUMA_BALANCING 775 /* The regions in numa_faults array from task_struct */ 776 enum numa_faults_stats { 777 NUMA_MEM = 0, 778 NUMA_CPU, 779 NUMA_MEMBUF, 780 NUMA_CPUBUF 781 }; 782 extern void sched_setnuma(struct task_struct *p, int node); 783 extern int migrate_task_to(struct task_struct *p, int cpu); 784 extern int migrate_swap(struct task_struct *, struct task_struct *); 785 #endif /* CONFIG_NUMA_BALANCING */ 786 787 #ifdef CONFIG_SMP 788 789 static inline void 790 queue_balance_callback(struct rq *rq, 791 struct callback_head *head, 792 void (*func)(struct rq *rq)) 793 { 794 lockdep_assert_held(&rq->lock); 795 796 if (unlikely(head->next)) 797 return; 798 799 head->func = (void (*)(struct callback_head *))func; 800 head->next = rq->balance_callback; 801 rq->balance_callback = head; 802 } 803 804 extern void sched_ttwu_pending(void); 805 806 #define rcu_dereference_check_sched_domain(p) \ 807 rcu_dereference_check((p), \ 808 lockdep_is_held(&sched_domains_mutex)) 809 810 /* 811 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 812 * See detach_destroy_domains: synchronize_sched for details. 813 * 814 * The domain tree of any CPU may only be accessed from within 815 * preempt-disabled sections. 816 */ 817 #define for_each_domain(cpu, __sd) \ 818 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 819 __sd; __sd = __sd->parent) 820 821 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 822 823 /** 824 * highest_flag_domain - Return highest sched_domain containing flag. 825 * @cpu: The cpu whose highest level of sched domain is to 826 * be returned. 827 * @flag: The flag to check for the highest sched_domain 828 * for the given cpu. 829 * 830 * Returns the highest sched_domain of a cpu which contains the given flag. 831 */ 832 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 833 { 834 struct sched_domain *sd, *hsd = NULL; 835 836 for_each_domain(cpu, sd) { 837 if (!(sd->flags & flag)) 838 break; 839 hsd = sd; 840 } 841 842 return hsd; 843 } 844 845 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 846 { 847 struct sched_domain *sd; 848 849 for_each_domain(cpu, sd) { 850 if (sd->flags & flag) 851 break; 852 } 853 854 return sd; 855 } 856 857 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 858 DECLARE_PER_CPU(int, sd_llc_size); 859 DECLARE_PER_CPU(int, sd_llc_id); 860 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 861 DECLARE_PER_CPU(struct sched_domain *, sd_busy); 862 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 863 864 struct sched_group_capacity { 865 atomic_t ref; 866 /* 867 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 868 * for a single CPU. 869 */ 870 unsigned int capacity; 871 unsigned long next_update; 872 int imbalance; /* XXX unrelated to capacity but shared group state */ 873 /* 874 * Number of busy cpus in this group. 875 */ 876 atomic_t nr_busy_cpus; 877 878 unsigned long cpumask[0]; /* iteration mask */ 879 }; 880 881 struct sched_group { 882 struct sched_group *next; /* Must be a circular list */ 883 atomic_t ref; 884 885 unsigned int group_weight; 886 struct sched_group_capacity *sgc; 887 888 /* 889 * The CPUs this group covers. 890 * 891 * NOTE: this field is variable length. (Allocated dynamically 892 * by attaching extra space to the end of the structure, 893 * depending on how many CPUs the kernel has booted up with) 894 */ 895 unsigned long cpumask[0]; 896 }; 897 898 static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 899 { 900 return to_cpumask(sg->cpumask); 901 } 902 903 /* 904 * cpumask masking which cpus in the group are allowed to iterate up the domain 905 * tree. 906 */ 907 static inline struct cpumask *sched_group_mask(struct sched_group *sg) 908 { 909 return to_cpumask(sg->sgc->cpumask); 910 } 911 912 /** 913 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 914 * @group: The group whose first cpu is to be returned. 915 */ 916 static inline unsigned int group_first_cpu(struct sched_group *group) 917 { 918 return cpumask_first(sched_group_cpus(group)); 919 } 920 921 extern int group_balance_cpu(struct sched_group *sg); 922 923 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 924 void register_sched_domain_sysctl(void); 925 void unregister_sched_domain_sysctl(void); 926 #else 927 static inline void register_sched_domain_sysctl(void) 928 { 929 } 930 static inline void unregister_sched_domain_sysctl(void) 931 { 932 } 933 #endif 934 935 #else 936 937 static inline void sched_ttwu_pending(void) { } 938 939 #endif /* CONFIG_SMP */ 940 941 #include "stats.h" 942 #include "auto_group.h" 943 944 #ifdef CONFIG_CGROUP_SCHED 945 946 /* 947 * Return the group to which this tasks belongs. 948 * 949 * We cannot use task_css() and friends because the cgroup subsystem 950 * changes that value before the cgroup_subsys::attach() method is called, 951 * therefore we cannot pin it and might observe the wrong value. 952 * 953 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 954 * core changes this before calling sched_move_task(). 955 * 956 * Instead we use a 'copy' which is updated from sched_move_task() while 957 * holding both task_struct::pi_lock and rq::lock. 958 */ 959 static inline struct task_group *task_group(struct task_struct *p) 960 { 961 return p->sched_task_group; 962 } 963 964 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 965 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 966 { 967 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 968 struct task_group *tg = task_group(p); 969 #endif 970 971 #ifdef CONFIG_FAIR_GROUP_SCHED 972 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 973 p->se.cfs_rq = tg->cfs_rq[cpu]; 974 p->se.parent = tg->se[cpu]; 975 #endif 976 977 #ifdef CONFIG_RT_GROUP_SCHED 978 p->rt.rt_rq = tg->rt_rq[cpu]; 979 p->rt.parent = tg->rt_se[cpu]; 980 #endif 981 } 982 983 #else /* CONFIG_CGROUP_SCHED */ 984 985 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 986 static inline struct task_group *task_group(struct task_struct *p) 987 { 988 return NULL; 989 } 990 991 #endif /* CONFIG_CGROUP_SCHED */ 992 993 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 994 { 995 set_task_rq(p, cpu); 996 #ifdef CONFIG_SMP 997 /* 998 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 999 * successfuly executed on another CPU. We must ensure that updates of 1000 * per-task data have been completed by this moment. 1001 */ 1002 smp_wmb(); 1003 task_thread_info(p)->cpu = cpu; 1004 p->wake_cpu = cpu; 1005 #endif 1006 } 1007 1008 /* 1009 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1010 */ 1011 #ifdef CONFIG_SCHED_DEBUG 1012 # include <linux/static_key.h> 1013 # define const_debug __read_mostly 1014 #else 1015 # define const_debug const 1016 #endif 1017 1018 extern const_debug unsigned int sysctl_sched_features; 1019 1020 #define SCHED_FEAT(name, enabled) \ 1021 __SCHED_FEAT_##name , 1022 1023 enum { 1024 #include "features.h" 1025 __SCHED_FEAT_NR, 1026 }; 1027 1028 #undef SCHED_FEAT 1029 1030 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 1031 #define SCHED_FEAT(name, enabled) \ 1032 static __always_inline bool static_branch_##name(struct static_key *key) \ 1033 { \ 1034 return static_key_##enabled(key); \ 1035 } 1036 1037 #include "features.h" 1038 1039 #undef SCHED_FEAT 1040 1041 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1042 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1043 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 1044 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1045 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1046 1047 extern struct static_key_false sched_numa_balancing; 1048 extern struct static_key_false sched_schedstats; 1049 1050 static inline u64 global_rt_period(void) 1051 { 1052 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1053 } 1054 1055 static inline u64 global_rt_runtime(void) 1056 { 1057 if (sysctl_sched_rt_runtime < 0) 1058 return RUNTIME_INF; 1059 1060 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1061 } 1062 1063 static inline int task_current(struct rq *rq, struct task_struct *p) 1064 { 1065 return rq->curr == p; 1066 } 1067 1068 static inline int task_running(struct rq *rq, struct task_struct *p) 1069 { 1070 #ifdef CONFIG_SMP 1071 return p->on_cpu; 1072 #else 1073 return task_current(rq, p); 1074 #endif 1075 } 1076 1077 static inline int task_on_rq_queued(struct task_struct *p) 1078 { 1079 return p->on_rq == TASK_ON_RQ_QUEUED; 1080 } 1081 1082 static inline int task_on_rq_migrating(struct task_struct *p) 1083 { 1084 return p->on_rq == TASK_ON_RQ_MIGRATING; 1085 } 1086 1087 #ifndef prepare_arch_switch 1088 # define prepare_arch_switch(next) do { } while (0) 1089 #endif 1090 #ifndef finish_arch_post_lock_switch 1091 # define finish_arch_post_lock_switch() do { } while (0) 1092 #endif 1093 1094 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 1095 { 1096 #ifdef CONFIG_SMP 1097 /* 1098 * We can optimise this out completely for !SMP, because the 1099 * SMP rebalancing from interrupt is the only thing that cares 1100 * here. 1101 */ 1102 next->on_cpu = 1; 1103 #endif 1104 } 1105 1106 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 1107 { 1108 #ifdef CONFIG_SMP 1109 /* 1110 * After ->on_cpu is cleared, the task can be moved to a different CPU. 1111 * We must ensure this doesn't happen until the switch is completely 1112 * finished. 1113 * 1114 * In particular, the load of prev->state in finish_task_switch() must 1115 * happen before this. 1116 * 1117 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 1118 */ 1119 smp_store_release(&prev->on_cpu, 0); 1120 #endif 1121 #ifdef CONFIG_DEBUG_SPINLOCK 1122 /* this is a valid case when another task releases the spinlock */ 1123 rq->lock.owner = current; 1124 #endif 1125 /* 1126 * If we are tracking spinlock dependencies then we have to 1127 * fix up the runqueue lock - which gets 'carried over' from 1128 * prev into current: 1129 */ 1130 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 1131 1132 raw_spin_unlock_irq(&rq->lock); 1133 } 1134 1135 /* 1136 * wake flags 1137 */ 1138 #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ 1139 #define WF_FORK 0x02 /* child wakeup after fork */ 1140 #define WF_MIGRATED 0x4 /* internal use, task got migrated */ 1141 1142 /* 1143 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1144 * of tasks with abnormal "nice" values across CPUs the contribution that 1145 * each task makes to its run queue's load is weighted according to its 1146 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1147 * scaled version of the new time slice allocation that they receive on time 1148 * slice expiry etc. 1149 */ 1150 1151 #define WEIGHT_IDLEPRIO 3 1152 #define WMULT_IDLEPRIO 1431655765 1153 1154 extern const int sched_prio_to_weight[40]; 1155 extern const u32 sched_prio_to_wmult[40]; 1156 1157 /* 1158 * {de,en}queue flags: 1159 * 1160 * DEQUEUE_SLEEP - task is no longer runnable 1161 * ENQUEUE_WAKEUP - task just became runnable 1162 * 1163 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1164 * are in a known state which allows modification. Such pairs 1165 * should preserve as much state as possible. 1166 * 1167 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1168 * in the runqueue. 1169 * 1170 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1171 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1172 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1173 * 1174 */ 1175 1176 #define DEQUEUE_SLEEP 0x01 1177 #define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ 1178 #define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ 1179 1180 #define ENQUEUE_WAKEUP 0x01 1181 #define ENQUEUE_RESTORE 0x02 1182 #define ENQUEUE_MOVE 0x04 1183 1184 #define ENQUEUE_HEAD 0x08 1185 #define ENQUEUE_REPLENISH 0x10 1186 #ifdef CONFIG_SMP 1187 #define ENQUEUE_MIGRATED 0x20 1188 #else 1189 #define ENQUEUE_MIGRATED 0x00 1190 #endif 1191 1192 #define RETRY_TASK ((void *)-1UL) 1193 1194 struct sched_class { 1195 const struct sched_class *next; 1196 1197 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1198 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1199 void (*yield_task) (struct rq *rq); 1200 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); 1201 1202 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1203 1204 /* 1205 * It is the responsibility of the pick_next_task() method that will 1206 * return the next task to call put_prev_task() on the @prev task or 1207 * something equivalent. 1208 * 1209 * May return RETRY_TASK when it finds a higher prio class has runnable 1210 * tasks. 1211 */ 1212 struct task_struct * (*pick_next_task) (struct rq *rq, 1213 struct task_struct *prev, 1214 struct pin_cookie cookie); 1215 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1216 1217 #ifdef CONFIG_SMP 1218 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1219 void (*migrate_task_rq)(struct task_struct *p); 1220 1221 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1222 1223 void (*set_cpus_allowed)(struct task_struct *p, 1224 const struct cpumask *newmask); 1225 1226 void (*rq_online)(struct rq *rq); 1227 void (*rq_offline)(struct rq *rq); 1228 #endif 1229 1230 void (*set_curr_task) (struct rq *rq); 1231 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1232 void (*task_fork) (struct task_struct *p); 1233 void (*task_dead) (struct task_struct *p); 1234 1235 /* 1236 * The switched_from() call is allowed to drop rq->lock, therefore we 1237 * cannot assume the switched_from/switched_to pair is serliazed by 1238 * rq->lock. They are however serialized by p->pi_lock. 1239 */ 1240 void (*switched_from) (struct rq *this_rq, struct task_struct *task); 1241 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1242 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1243 int oldprio); 1244 1245 unsigned int (*get_rr_interval) (struct rq *rq, 1246 struct task_struct *task); 1247 1248 void (*update_curr) (struct rq *rq); 1249 1250 #define TASK_SET_GROUP 0 1251 #define TASK_MOVE_GROUP 1 1252 1253 #ifdef CONFIG_FAIR_GROUP_SCHED 1254 void (*task_change_group) (struct task_struct *p, int type); 1255 #endif 1256 }; 1257 1258 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1259 { 1260 prev->sched_class->put_prev_task(rq, prev); 1261 } 1262 1263 #define sched_class_highest (&stop_sched_class) 1264 #define for_each_class(class) \ 1265 for (class = sched_class_highest; class; class = class->next) 1266 1267 extern const struct sched_class stop_sched_class; 1268 extern const struct sched_class dl_sched_class; 1269 extern const struct sched_class rt_sched_class; 1270 extern const struct sched_class fair_sched_class; 1271 extern const struct sched_class idle_sched_class; 1272 1273 1274 #ifdef CONFIG_SMP 1275 1276 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1277 1278 extern void trigger_load_balance(struct rq *rq); 1279 1280 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1281 1282 #endif 1283 1284 #ifdef CONFIG_CPU_IDLE 1285 static inline void idle_set_state(struct rq *rq, 1286 struct cpuidle_state *idle_state) 1287 { 1288 rq->idle_state = idle_state; 1289 } 1290 1291 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1292 { 1293 WARN_ON(!rcu_read_lock_held()); 1294 return rq->idle_state; 1295 } 1296 #else 1297 static inline void idle_set_state(struct rq *rq, 1298 struct cpuidle_state *idle_state) 1299 { 1300 } 1301 1302 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1303 { 1304 return NULL; 1305 } 1306 #endif 1307 1308 extern void sysrq_sched_debug_show(void); 1309 extern void sched_init_granularity(void); 1310 extern void update_max_interval(void); 1311 1312 extern void init_sched_dl_class(void); 1313 extern void init_sched_rt_class(void); 1314 extern void init_sched_fair_class(void); 1315 1316 extern void resched_curr(struct rq *rq); 1317 extern void resched_cpu(int cpu); 1318 1319 extern struct rt_bandwidth def_rt_bandwidth; 1320 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1321 1322 extern struct dl_bandwidth def_dl_bandwidth; 1323 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1324 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1325 1326 unsigned long to_ratio(u64 period, u64 runtime); 1327 1328 extern void init_entity_runnable_average(struct sched_entity *se); 1329 extern void post_init_entity_util_avg(struct sched_entity *se); 1330 1331 #ifdef CONFIG_NO_HZ_FULL 1332 extern bool sched_can_stop_tick(struct rq *rq); 1333 1334 /* 1335 * Tick may be needed by tasks in the runqueue depending on their policy and 1336 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1337 * nohz mode if necessary. 1338 */ 1339 static inline void sched_update_tick_dependency(struct rq *rq) 1340 { 1341 int cpu; 1342 1343 if (!tick_nohz_full_enabled()) 1344 return; 1345 1346 cpu = cpu_of(rq); 1347 1348 if (!tick_nohz_full_cpu(cpu)) 1349 return; 1350 1351 if (sched_can_stop_tick(rq)) 1352 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1353 else 1354 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1355 } 1356 #else 1357 static inline void sched_update_tick_dependency(struct rq *rq) { } 1358 #endif 1359 1360 static inline void add_nr_running(struct rq *rq, unsigned count) 1361 { 1362 unsigned prev_nr = rq->nr_running; 1363 1364 rq->nr_running = prev_nr + count; 1365 1366 if (prev_nr < 2 && rq->nr_running >= 2) { 1367 #ifdef CONFIG_SMP 1368 if (!rq->rd->overload) 1369 rq->rd->overload = true; 1370 #endif 1371 } 1372 1373 sched_update_tick_dependency(rq); 1374 } 1375 1376 static inline void sub_nr_running(struct rq *rq, unsigned count) 1377 { 1378 rq->nr_running -= count; 1379 /* Check if we still need preemption */ 1380 sched_update_tick_dependency(rq); 1381 } 1382 1383 static inline void rq_last_tick_reset(struct rq *rq) 1384 { 1385 #ifdef CONFIG_NO_HZ_FULL 1386 rq->last_sched_tick = jiffies; 1387 #endif 1388 } 1389 1390 extern void update_rq_clock(struct rq *rq); 1391 1392 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1393 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1394 1395 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1396 1397 extern const_debug unsigned int sysctl_sched_time_avg; 1398 extern const_debug unsigned int sysctl_sched_nr_migrate; 1399 extern const_debug unsigned int sysctl_sched_migration_cost; 1400 1401 static inline u64 sched_avg_period(void) 1402 { 1403 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1404 } 1405 1406 #ifdef CONFIG_SCHED_HRTICK 1407 1408 /* 1409 * Use hrtick when: 1410 * - enabled by features 1411 * - hrtimer is actually high res 1412 */ 1413 static inline int hrtick_enabled(struct rq *rq) 1414 { 1415 if (!sched_feat(HRTICK)) 1416 return 0; 1417 if (!cpu_active(cpu_of(rq))) 1418 return 0; 1419 return hrtimer_is_hres_active(&rq->hrtick_timer); 1420 } 1421 1422 void hrtick_start(struct rq *rq, u64 delay); 1423 1424 #else 1425 1426 static inline int hrtick_enabled(struct rq *rq) 1427 { 1428 return 0; 1429 } 1430 1431 #endif /* CONFIG_SCHED_HRTICK */ 1432 1433 #ifdef CONFIG_SMP 1434 extern void sched_avg_update(struct rq *rq); 1435 1436 #ifndef arch_scale_freq_capacity 1437 static __always_inline 1438 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) 1439 { 1440 return SCHED_CAPACITY_SCALE; 1441 } 1442 #endif 1443 1444 #ifndef arch_scale_cpu_capacity 1445 static __always_inline 1446 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) 1447 { 1448 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) 1449 return sd->smt_gain / sd->span_weight; 1450 1451 return SCHED_CAPACITY_SCALE; 1452 } 1453 #endif 1454 1455 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1456 { 1457 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); 1458 sched_avg_update(rq); 1459 } 1460 #else 1461 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1462 static inline void sched_avg_update(struct rq *rq) { } 1463 #endif 1464 1465 struct rq_flags { 1466 unsigned long flags; 1467 struct pin_cookie cookie; 1468 }; 1469 1470 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1471 __acquires(rq->lock); 1472 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1473 __acquires(p->pi_lock) 1474 __acquires(rq->lock); 1475 1476 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1477 __releases(rq->lock) 1478 { 1479 lockdep_unpin_lock(&rq->lock, rf->cookie); 1480 raw_spin_unlock(&rq->lock); 1481 } 1482 1483 static inline void 1484 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1485 __releases(rq->lock) 1486 __releases(p->pi_lock) 1487 { 1488 lockdep_unpin_lock(&rq->lock, rf->cookie); 1489 raw_spin_unlock(&rq->lock); 1490 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1491 } 1492 1493 #ifdef CONFIG_SMP 1494 #ifdef CONFIG_PREEMPT 1495 1496 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1497 1498 /* 1499 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1500 * way at the expense of forcing extra atomic operations in all 1501 * invocations. This assures that the double_lock is acquired using the 1502 * same underlying policy as the spinlock_t on this architecture, which 1503 * reduces latency compared to the unfair variant below. However, it 1504 * also adds more overhead and therefore may reduce throughput. 1505 */ 1506 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1507 __releases(this_rq->lock) 1508 __acquires(busiest->lock) 1509 __acquires(this_rq->lock) 1510 { 1511 raw_spin_unlock(&this_rq->lock); 1512 double_rq_lock(this_rq, busiest); 1513 1514 return 1; 1515 } 1516 1517 #else 1518 /* 1519 * Unfair double_lock_balance: Optimizes throughput at the expense of 1520 * latency by eliminating extra atomic operations when the locks are 1521 * already in proper order on entry. This favors lower cpu-ids and will 1522 * grant the double lock to lower cpus over higher ids under contention, 1523 * regardless of entry order into the function. 1524 */ 1525 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1526 __releases(this_rq->lock) 1527 __acquires(busiest->lock) 1528 __acquires(this_rq->lock) 1529 { 1530 int ret = 0; 1531 1532 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1533 if (busiest < this_rq) { 1534 raw_spin_unlock(&this_rq->lock); 1535 raw_spin_lock(&busiest->lock); 1536 raw_spin_lock_nested(&this_rq->lock, 1537 SINGLE_DEPTH_NESTING); 1538 ret = 1; 1539 } else 1540 raw_spin_lock_nested(&busiest->lock, 1541 SINGLE_DEPTH_NESTING); 1542 } 1543 return ret; 1544 } 1545 1546 #endif /* CONFIG_PREEMPT */ 1547 1548 /* 1549 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1550 */ 1551 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1552 { 1553 if (unlikely(!irqs_disabled())) { 1554 /* printk() doesn't work good under rq->lock */ 1555 raw_spin_unlock(&this_rq->lock); 1556 BUG_ON(1); 1557 } 1558 1559 return _double_lock_balance(this_rq, busiest); 1560 } 1561 1562 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1563 __releases(busiest->lock) 1564 { 1565 raw_spin_unlock(&busiest->lock); 1566 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1567 } 1568 1569 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 1570 { 1571 if (l1 > l2) 1572 swap(l1, l2); 1573 1574 spin_lock(l1); 1575 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1576 } 1577 1578 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 1579 { 1580 if (l1 > l2) 1581 swap(l1, l2); 1582 1583 spin_lock_irq(l1); 1584 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1585 } 1586 1587 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1588 { 1589 if (l1 > l2) 1590 swap(l1, l2); 1591 1592 raw_spin_lock(l1); 1593 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1594 } 1595 1596 /* 1597 * double_rq_lock - safely lock two runqueues 1598 * 1599 * Note this does not disable interrupts like task_rq_lock, 1600 * you need to do so manually before calling. 1601 */ 1602 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1603 __acquires(rq1->lock) 1604 __acquires(rq2->lock) 1605 { 1606 BUG_ON(!irqs_disabled()); 1607 if (rq1 == rq2) { 1608 raw_spin_lock(&rq1->lock); 1609 __acquire(rq2->lock); /* Fake it out ;) */ 1610 } else { 1611 if (rq1 < rq2) { 1612 raw_spin_lock(&rq1->lock); 1613 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1614 } else { 1615 raw_spin_lock(&rq2->lock); 1616 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1617 } 1618 } 1619 } 1620 1621 /* 1622 * double_rq_unlock - safely unlock two runqueues 1623 * 1624 * Note this does not restore interrupts like task_rq_unlock, 1625 * you need to do so manually after calling. 1626 */ 1627 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1628 __releases(rq1->lock) 1629 __releases(rq2->lock) 1630 { 1631 raw_spin_unlock(&rq1->lock); 1632 if (rq1 != rq2) 1633 raw_spin_unlock(&rq2->lock); 1634 else 1635 __release(rq2->lock); 1636 } 1637 1638 #else /* CONFIG_SMP */ 1639 1640 /* 1641 * double_rq_lock - safely lock two runqueues 1642 * 1643 * Note this does not disable interrupts like task_rq_lock, 1644 * you need to do so manually before calling. 1645 */ 1646 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1647 __acquires(rq1->lock) 1648 __acquires(rq2->lock) 1649 { 1650 BUG_ON(!irqs_disabled()); 1651 BUG_ON(rq1 != rq2); 1652 raw_spin_lock(&rq1->lock); 1653 __acquire(rq2->lock); /* Fake it out ;) */ 1654 } 1655 1656 /* 1657 * double_rq_unlock - safely unlock two runqueues 1658 * 1659 * Note this does not restore interrupts like task_rq_unlock, 1660 * you need to do so manually after calling. 1661 */ 1662 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1663 __releases(rq1->lock) 1664 __releases(rq2->lock) 1665 { 1666 BUG_ON(rq1 != rq2); 1667 raw_spin_unlock(&rq1->lock); 1668 __release(rq2->lock); 1669 } 1670 1671 #endif 1672 1673 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1674 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1675 1676 #ifdef CONFIG_SCHED_DEBUG 1677 extern void print_cfs_stats(struct seq_file *m, int cpu); 1678 extern void print_rt_stats(struct seq_file *m, int cpu); 1679 extern void print_dl_stats(struct seq_file *m, int cpu); 1680 extern void 1681 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 1682 1683 #ifdef CONFIG_NUMA_BALANCING 1684 extern void 1685 show_numa_stats(struct task_struct *p, struct seq_file *m); 1686 extern void 1687 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 1688 unsigned long tpf, unsigned long gsf, unsigned long gpf); 1689 #endif /* CONFIG_NUMA_BALANCING */ 1690 #endif /* CONFIG_SCHED_DEBUG */ 1691 1692 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1693 extern void init_rt_rq(struct rt_rq *rt_rq); 1694 extern void init_dl_rq(struct dl_rq *dl_rq); 1695 1696 extern void cfs_bandwidth_usage_inc(void); 1697 extern void cfs_bandwidth_usage_dec(void); 1698 1699 #ifdef CONFIG_NO_HZ_COMMON 1700 enum rq_nohz_flag_bits { 1701 NOHZ_TICK_STOPPED, 1702 NOHZ_BALANCE_KICK, 1703 }; 1704 1705 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 1706 1707 extern void nohz_balance_exit_idle(unsigned int cpu); 1708 #else 1709 static inline void nohz_balance_exit_idle(unsigned int cpu) { } 1710 #endif 1711 1712 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1713 1714 DECLARE_PER_CPU(u64, cpu_hardirq_time); 1715 DECLARE_PER_CPU(u64, cpu_softirq_time); 1716 1717 #ifndef CONFIG_64BIT 1718 DECLARE_PER_CPU(seqcount_t, irq_time_seq); 1719 1720 static inline void irq_time_write_begin(void) 1721 { 1722 __this_cpu_inc(irq_time_seq.sequence); 1723 smp_wmb(); 1724 } 1725 1726 static inline void irq_time_write_end(void) 1727 { 1728 smp_wmb(); 1729 __this_cpu_inc(irq_time_seq.sequence); 1730 } 1731 1732 static inline u64 irq_time_read(int cpu) 1733 { 1734 u64 irq_time; 1735 unsigned seq; 1736 1737 do { 1738 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 1739 irq_time = per_cpu(cpu_softirq_time, cpu) + 1740 per_cpu(cpu_hardirq_time, cpu); 1741 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); 1742 1743 return irq_time; 1744 } 1745 #else /* CONFIG_64BIT */ 1746 static inline void irq_time_write_begin(void) 1747 { 1748 } 1749 1750 static inline void irq_time_write_end(void) 1751 { 1752 } 1753 1754 static inline u64 irq_time_read(int cpu) 1755 { 1756 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 1757 } 1758 #endif /* CONFIG_64BIT */ 1759 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 1760 1761 #ifdef CONFIG_CPU_FREQ 1762 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 1763 1764 /** 1765 * cpufreq_update_util - Take a note about CPU utilization changes. 1766 * @time: Current time. 1767 * @util: Current utilization. 1768 * @max: Utilization ceiling. 1769 * 1770 * This function is called by the scheduler on every invocation of 1771 * update_load_avg() on the CPU whose utilization is being updated. 1772 * 1773 * It can only be called from RCU-sched read-side critical sections. 1774 */ 1775 static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) 1776 { 1777 struct update_util_data *data; 1778 1779 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); 1780 if (data) 1781 data->func(data, time, util, max); 1782 } 1783 1784 /** 1785 * cpufreq_trigger_update - Trigger CPU performance state evaluation if needed. 1786 * @time: Current time. 1787 * 1788 * The way cpufreq is currently arranged requires it to evaluate the CPU 1789 * performance state (frequency/voltage) on a regular basis to prevent it from 1790 * being stuck in a completely inadequate performance level for too long. 1791 * That is not guaranteed to happen if the updates are only triggered from CFS, 1792 * though, because they may not be coming in if RT or deadline tasks are active 1793 * all the time (or there are RT and DL tasks only). 1794 * 1795 * As a workaround for that issue, this function is called by the RT and DL 1796 * sched classes to trigger extra cpufreq updates to prevent it from stalling, 1797 * but that really is a band-aid. Going forward it should be replaced with 1798 * solutions targeted more specifically at RT and DL tasks. 1799 */ 1800 static inline void cpufreq_trigger_update(u64 time) 1801 { 1802 cpufreq_update_util(time, ULONG_MAX, 0); 1803 } 1804 #else 1805 static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) {} 1806 static inline void cpufreq_trigger_update(u64 time) {} 1807 #endif /* CONFIG_CPU_FREQ */ 1808 1809 #ifdef arch_scale_freq_capacity 1810 #ifndef arch_scale_freq_invariant 1811 #define arch_scale_freq_invariant() (true) 1812 #endif 1813 #else /* arch_scale_freq_capacity */ 1814 #define arch_scale_freq_invariant() (false) 1815 #endif 1816