1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/stat.h> 27 #include <linux/sched/sysctl.h> 28 #include <linux/sched/task.h> 29 #include <linux/sched/task_stack.h> 30 #include <linux/sched/topology.h> 31 #include <linux/sched/user.h> 32 #include <linux/sched/wake_q.h> 33 #include <linux/sched/xacct.h> 34 35 #include <uapi/linux/sched/types.h> 36 37 #include <linux/binfmts.h> 38 #include <linux/blkdev.h> 39 #include <linux/compat.h> 40 #include <linux/context_tracking.h> 41 #include <linux/cpufreq.h> 42 #include <linux/cpuidle.h> 43 #include <linux/cpuset.h> 44 #include <linux/ctype.h> 45 #include <linux/debugfs.h> 46 #include <linux/delayacct.h> 47 #include <linux/init_task.h> 48 #include <linux/kprobes.h> 49 #include <linux/kthread.h> 50 #include <linux/membarrier.h> 51 #include <linux/migrate.h> 52 #include <linux/mmu_context.h> 53 #include <linux/nmi.h> 54 #include <linux/proc_fs.h> 55 #include <linux/prefetch.h> 56 #include <linux/profile.h> 57 #include <linux/rcupdate_wait.h> 58 #include <linux/security.h> 59 #include <linux/stackprotector.h> 60 #include <linux/stop_machine.h> 61 #include <linux/suspend.h> 62 #include <linux/swait.h> 63 #include <linux/syscalls.h> 64 #include <linux/task_work.h> 65 #include <linux/tsacct_kern.h> 66 67 #include <asm/tlb.h> 68 69 #ifdef CONFIG_PARAVIRT 70 # include <asm/paravirt.h> 71 #endif 72 73 #include "cpupri.h" 74 #include "cpudeadline.h" 75 76 #ifdef CONFIG_SCHED_DEBUG 77 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 78 #else 79 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 80 #endif 81 82 struct rq; 83 struct cpuidle_state; 84 85 /* task_struct::on_rq states: */ 86 #define TASK_ON_RQ_QUEUED 1 87 #define TASK_ON_RQ_MIGRATING 2 88 89 extern __read_mostly int scheduler_running; 90 91 extern unsigned long calc_load_update; 92 extern atomic_long_t calc_load_tasks; 93 94 extern void calc_global_load_tick(struct rq *this_rq); 95 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 96 97 #ifdef CONFIG_SMP 98 extern void cpu_load_update_active(struct rq *this_rq); 99 #else 100 static inline void cpu_load_update_active(struct rq *this_rq) { } 101 #endif 102 103 /* 104 * Helpers for converting nanosecond timing to jiffy resolution 105 */ 106 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 107 108 /* 109 * Increase resolution of nice-level calculations for 64-bit architectures. 110 * The extra resolution improves shares distribution and load balancing of 111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 112 * hierarchies, especially on larger systems. This is not a user-visible change 113 * and does not change the user-interface for setting shares/weights. 114 * 115 * We increase resolution only if we have enough bits to allow this increased 116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 117 * are pretty high and the returns do not justify the increased costs. 118 * 119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 120 * increase coverage and consistency always enable it on 64-bit platforms. 121 */ 122 #ifdef CONFIG_64BIT 123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 124 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 125 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 126 #else 127 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 128 # define scale_load(w) (w) 129 # define scale_load_down(w) (w) 130 #endif 131 132 /* 133 * Task weight (visible to users) and its load (invisible to users) have 134 * independent resolution, but they should be well calibrated. We use 135 * scale_load() and scale_load_down(w) to convert between them. The 136 * following must be true: 137 * 138 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 139 * 140 */ 141 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 142 143 /* 144 * Single value that decides SCHED_DEADLINE internal math precision. 145 * 10 -> just above 1us 146 * 9 -> just above 0.5us 147 */ 148 #define DL_SCALE 10 149 150 /* 151 * Single value that denotes runtime == period, ie unlimited time. 152 */ 153 #define RUNTIME_INF ((u64)~0ULL) 154 155 static inline int idle_policy(int policy) 156 { 157 return policy == SCHED_IDLE; 158 } 159 static inline int fair_policy(int policy) 160 { 161 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 162 } 163 164 static inline int rt_policy(int policy) 165 { 166 return policy == SCHED_FIFO || policy == SCHED_RR; 167 } 168 169 static inline int dl_policy(int policy) 170 { 171 return policy == SCHED_DEADLINE; 172 } 173 static inline bool valid_policy(int policy) 174 { 175 return idle_policy(policy) || fair_policy(policy) || 176 rt_policy(policy) || dl_policy(policy); 177 } 178 179 static inline int task_has_rt_policy(struct task_struct *p) 180 { 181 return rt_policy(p->policy); 182 } 183 184 static inline int task_has_dl_policy(struct task_struct *p) 185 { 186 return dl_policy(p->policy); 187 } 188 189 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 190 191 /* 192 * !! For sched_setattr_nocheck() (kernel) only !! 193 * 194 * This is actually gross. :( 195 * 196 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 197 * tasks, but still be able to sleep. We need this on platforms that cannot 198 * atomically change clock frequency. Remove once fast switching will be 199 * available on such platforms. 200 * 201 * SUGOV stands for SchedUtil GOVernor. 202 */ 203 #define SCHED_FLAG_SUGOV 0x10000000 204 205 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 206 { 207 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 208 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 209 #else 210 return false; 211 #endif 212 } 213 214 /* 215 * Tells if entity @a should preempt entity @b. 216 */ 217 static inline bool 218 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 219 { 220 return dl_entity_is_special(a) || 221 dl_time_before(a->deadline, b->deadline); 222 } 223 224 /* 225 * This is the priority-queue data structure of the RT scheduling class: 226 */ 227 struct rt_prio_array { 228 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 229 struct list_head queue[MAX_RT_PRIO]; 230 }; 231 232 struct rt_bandwidth { 233 /* nests inside the rq lock: */ 234 raw_spinlock_t rt_runtime_lock; 235 ktime_t rt_period; 236 u64 rt_runtime; 237 struct hrtimer rt_period_timer; 238 unsigned int rt_period_active; 239 }; 240 241 void __dl_clear_params(struct task_struct *p); 242 243 /* 244 * To keep the bandwidth of -deadline tasks and groups under control 245 * we need some place where: 246 * - store the maximum -deadline bandwidth of the system (the group); 247 * - cache the fraction of that bandwidth that is currently allocated. 248 * 249 * This is all done in the data structure below. It is similar to the 250 * one used for RT-throttling (rt_bandwidth), with the main difference 251 * that, since here we are only interested in admission control, we 252 * do not decrease any runtime while the group "executes", neither we 253 * need a timer to replenish it. 254 * 255 * With respect to SMP, the bandwidth is given on a per-CPU basis, 256 * meaning that: 257 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 258 * - dl_total_bw array contains, in the i-eth element, the currently 259 * allocated bandwidth on the i-eth CPU. 260 * Moreover, groups consume bandwidth on each CPU, while tasks only 261 * consume bandwidth on the CPU they're running on. 262 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 263 * that will be shown the next time the proc or cgroup controls will 264 * be red. It on its turn can be changed by writing on its own 265 * control. 266 */ 267 struct dl_bandwidth { 268 raw_spinlock_t dl_runtime_lock; 269 u64 dl_runtime; 270 u64 dl_period; 271 }; 272 273 static inline int dl_bandwidth_enabled(void) 274 { 275 return sysctl_sched_rt_runtime >= 0; 276 } 277 278 struct dl_bw { 279 raw_spinlock_t lock; 280 u64 bw; 281 u64 total_bw; 282 }; 283 284 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 285 286 static inline 287 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 288 { 289 dl_b->total_bw -= tsk_bw; 290 __dl_update(dl_b, (s32)tsk_bw / cpus); 291 } 292 293 static inline 294 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 295 { 296 dl_b->total_bw += tsk_bw; 297 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 298 } 299 300 static inline 301 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 302 { 303 return dl_b->bw != -1 && 304 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 305 } 306 307 extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 308 extern void init_dl_bw(struct dl_bw *dl_b); 309 extern int sched_dl_global_validate(void); 310 extern void sched_dl_do_global(void); 311 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 312 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 313 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 314 extern bool __checkparam_dl(const struct sched_attr *attr); 315 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 316 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 317 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 318 extern bool dl_cpu_busy(unsigned int cpu); 319 320 #ifdef CONFIG_CGROUP_SCHED 321 322 #include <linux/cgroup.h> 323 324 struct cfs_rq; 325 struct rt_rq; 326 327 extern struct list_head task_groups; 328 329 struct cfs_bandwidth { 330 #ifdef CONFIG_CFS_BANDWIDTH 331 raw_spinlock_t lock; 332 ktime_t period; 333 u64 quota; 334 u64 runtime; 335 s64 hierarchical_quota; 336 u64 runtime_expires; 337 338 int idle; 339 int period_active; 340 struct hrtimer period_timer; 341 struct hrtimer slack_timer; 342 struct list_head throttled_cfs_rq; 343 344 /* Statistics: */ 345 int nr_periods; 346 int nr_throttled; 347 u64 throttled_time; 348 #endif 349 }; 350 351 /* Task group related information */ 352 struct task_group { 353 struct cgroup_subsys_state css; 354 355 #ifdef CONFIG_FAIR_GROUP_SCHED 356 /* schedulable entities of this group on each CPU */ 357 struct sched_entity **se; 358 /* runqueue "owned" by this group on each CPU */ 359 struct cfs_rq **cfs_rq; 360 unsigned long shares; 361 362 #ifdef CONFIG_SMP 363 /* 364 * load_avg can be heavily contended at clock tick time, so put 365 * it in its own cacheline separated from the fields above which 366 * will also be accessed at each tick. 367 */ 368 atomic_long_t load_avg ____cacheline_aligned; 369 #endif 370 #endif 371 372 #ifdef CONFIG_RT_GROUP_SCHED 373 struct sched_rt_entity **rt_se; 374 struct rt_rq **rt_rq; 375 376 struct rt_bandwidth rt_bandwidth; 377 #endif 378 379 struct rcu_head rcu; 380 struct list_head list; 381 382 struct task_group *parent; 383 struct list_head siblings; 384 struct list_head children; 385 386 #ifdef CONFIG_SCHED_AUTOGROUP 387 struct autogroup *autogroup; 388 #endif 389 390 struct cfs_bandwidth cfs_bandwidth; 391 }; 392 393 #ifdef CONFIG_FAIR_GROUP_SCHED 394 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 395 396 /* 397 * A weight of 0 or 1 can cause arithmetics problems. 398 * A weight of a cfs_rq is the sum of weights of which entities 399 * are queued on this cfs_rq, so a weight of a entity should not be 400 * too large, so as the shares value of a task group. 401 * (The default weight is 1024 - so there's no practical 402 * limitation from this.) 403 */ 404 #define MIN_SHARES (1UL << 1) 405 #define MAX_SHARES (1UL << 18) 406 #endif 407 408 typedef int (*tg_visitor)(struct task_group *, void *); 409 410 extern int walk_tg_tree_from(struct task_group *from, 411 tg_visitor down, tg_visitor up, void *data); 412 413 /* 414 * Iterate the full tree, calling @down when first entering a node and @up when 415 * leaving it for the final time. 416 * 417 * Caller must hold rcu_lock or sufficient equivalent. 418 */ 419 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 420 { 421 return walk_tg_tree_from(&root_task_group, down, up, data); 422 } 423 424 extern int tg_nop(struct task_group *tg, void *data); 425 426 extern void free_fair_sched_group(struct task_group *tg); 427 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 428 extern void online_fair_sched_group(struct task_group *tg); 429 extern void unregister_fair_sched_group(struct task_group *tg); 430 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 431 struct sched_entity *se, int cpu, 432 struct sched_entity *parent); 433 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 434 435 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 436 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 437 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 438 439 extern void free_rt_sched_group(struct task_group *tg); 440 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 441 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 442 struct sched_rt_entity *rt_se, int cpu, 443 struct sched_rt_entity *parent); 444 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 445 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 446 extern long sched_group_rt_runtime(struct task_group *tg); 447 extern long sched_group_rt_period(struct task_group *tg); 448 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 449 450 extern struct task_group *sched_create_group(struct task_group *parent); 451 extern void sched_online_group(struct task_group *tg, 452 struct task_group *parent); 453 extern void sched_destroy_group(struct task_group *tg); 454 extern void sched_offline_group(struct task_group *tg); 455 456 extern void sched_move_task(struct task_struct *tsk); 457 458 #ifdef CONFIG_FAIR_GROUP_SCHED 459 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 460 461 #ifdef CONFIG_SMP 462 extern void set_task_rq_fair(struct sched_entity *se, 463 struct cfs_rq *prev, struct cfs_rq *next); 464 #else /* !CONFIG_SMP */ 465 static inline void set_task_rq_fair(struct sched_entity *se, 466 struct cfs_rq *prev, struct cfs_rq *next) { } 467 #endif /* CONFIG_SMP */ 468 #endif /* CONFIG_FAIR_GROUP_SCHED */ 469 470 #else /* CONFIG_CGROUP_SCHED */ 471 472 struct cfs_bandwidth { }; 473 474 #endif /* CONFIG_CGROUP_SCHED */ 475 476 /* CFS-related fields in a runqueue */ 477 struct cfs_rq { 478 struct load_weight load; 479 unsigned long runnable_weight; 480 unsigned int nr_running; 481 unsigned int h_nr_running; 482 483 u64 exec_clock; 484 u64 min_vruntime; 485 #ifndef CONFIG_64BIT 486 u64 min_vruntime_copy; 487 #endif 488 489 struct rb_root_cached tasks_timeline; 490 491 /* 492 * 'curr' points to currently running entity on this cfs_rq. 493 * It is set to NULL otherwise (i.e when none are currently running). 494 */ 495 struct sched_entity *curr; 496 struct sched_entity *next; 497 struct sched_entity *last; 498 struct sched_entity *skip; 499 500 #ifdef CONFIG_SCHED_DEBUG 501 unsigned int nr_spread_over; 502 #endif 503 504 #ifdef CONFIG_SMP 505 /* 506 * CFS load tracking 507 */ 508 struct sched_avg avg; 509 #ifndef CONFIG_64BIT 510 u64 load_last_update_time_copy; 511 #endif 512 struct { 513 raw_spinlock_t lock ____cacheline_aligned; 514 int nr; 515 unsigned long load_avg; 516 unsigned long util_avg; 517 unsigned long runnable_sum; 518 } removed; 519 520 #ifdef CONFIG_FAIR_GROUP_SCHED 521 unsigned long tg_load_avg_contrib; 522 long propagate; 523 long prop_runnable_sum; 524 525 /* 526 * h_load = weight * f(tg) 527 * 528 * Where f(tg) is the recursive weight fraction assigned to 529 * this group. 530 */ 531 unsigned long h_load; 532 u64 last_h_load_update; 533 struct sched_entity *h_load_next; 534 #endif /* CONFIG_FAIR_GROUP_SCHED */ 535 #endif /* CONFIG_SMP */ 536 537 #ifdef CONFIG_FAIR_GROUP_SCHED 538 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 539 540 /* 541 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 542 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 543 * (like users, containers etc.) 544 * 545 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 546 * This list is used during load balance. 547 */ 548 int on_list; 549 struct list_head leaf_cfs_rq_list; 550 struct task_group *tg; /* group that "owns" this runqueue */ 551 552 #ifdef CONFIG_CFS_BANDWIDTH 553 int runtime_enabled; 554 u64 runtime_expires; 555 s64 runtime_remaining; 556 557 u64 throttled_clock; 558 u64 throttled_clock_task; 559 u64 throttled_clock_task_time; 560 int throttled; 561 int throttle_count; 562 struct list_head throttled_list; 563 #endif /* CONFIG_CFS_BANDWIDTH */ 564 #endif /* CONFIG_FAIR_GROUP_SCHED */ 565 }; 566 567 static inline int rt_bandwidth_enabled(void) 568 { 569 return sysctl_sched_rt_runtime >= 0; 570 } 571 572 /* RT IPI pull logic requires IRQ_WORK */ 573 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 574 # define HAVE_RT_PUSH_IPI 575 #endif 576 577 /* Real-Time classes' related field in a runqueue: */ 578 struct rt_rq { 579 struct rt_prio_array active; 580 unsigned int rt_nr_running; 581 unsigned int rr_nr_running; 582 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 583 struct { 584 int curr; /* highest queued rt task prio */ 585 #ifdef CONFIG_SMP 586 int next; /* next highest */ 587 #endif 588 } highest_prio; 589 #endif 590 #ifdef CONFIG_SMP 591 unsigned long rt_nr_migratory; 592 unsigned long rt_nr_total; 593 int overloaded; 594 struct plist_head pushable_tasks; 595 #endif /* CONFIG_SMP */ 596 int rt_queued; 597 598 int rt_throttled; 599 u64 rt_time; 600 u64 rt_runtime; 601 /* Nests inside the rq lock: */ 602 raw_spinlock_t rt_runtime_lock; 603 604 #ifdef CONFIG_RT_GROUP_SCHED 605 unsigned long rt_nr_boosted; 606 607 struct rq *rq; 608 struct task_group *tg; 609 #endif 610 }; 611 612 /* Deadline class' related fields in a runqueue */ 613 struct dl_rq { 614 /* runqueue is an rbtree, ordered by deadline */ 615 struct rb_root_cached root; 616 617 unsigned long dl_nr_running; 618 619 #ifdef CONFIG_SMP 620 /* 621 * Deadline values of the currently executing and the 622 * earliest ready task on this rq. Caching these facilitates 623 * the decision wether or not a ready but not running task 624 * should migrate somewhere else. 625 */ 626 struct { 627 u64 curr; 628 u64 next; 629 } earliest_dl; 630 631 unsigned long dl_nr_migratory; 632 int overloaded; 633 634 /* 635 * Tasks on this rq that can be pushed away. They are kept in 636 * an rb-tree, ordered by tasks' deadlines, with caching 637 * of the leftmost (earliest deadline) element. 638 */ 639 struct rb_root_cached pushable_dl_tasks_root; 640 #else 641 struct dl_bw dl_bw; 642 #endif 643 /* 644 * "Active utilization" for this runqueue: increased when a 645 * task wakes up (becomes TASK_RUNNING) and decreased when a 646 * task blocks 647 */ 648 u64 running_bw; 649 650 /* 651 * Utilization of the tasks "assigned" to this runqueue (including 652 * the tasks that are in runqueue and the tasks that executed on this 653 * CPU and blocked). Increased when a task moves to this runqueue, and 654 * decreased when the task moves away (migrates, changes scheduling 655 * policy, or terminates). 656 * This is needed to compute the "inactive utilization" for the 657 * runqueue (inactive utilization = this_bw - running_bw). 658 */ 659 u64 this_bw; 660 u64 extra_bw; 661 662 /* 663 * Inverse of the fraction of CPU utilization that can be reclaimed 664 * by the GRUB algorithm. 665 */ 666 u64 bw_ratio; 667 }; 668 669 #ifdef CONFIG_SMP 670 671 static inline bool sched_asym_prefer(int a, int b) 672 { 673 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 674 } 675 676 /* 677 * We add the notion of a root-domain which will be used to define per-domain 678 * variables. Each exclusive cpuset essentially defines an island domain by 679 * fully partitioning the member CPUs from any other cpuset. Whenever a new 680 * exclusive cpuset is created, we also create and attach a new root-domain 681 * object. 682 * 683 */ 684 struct root_domain { 685 atomic_t refcount; 686 atomic_t rto_count; 687 struct rcu_head rcu; 688 cpumask_var_t span; 689 cpumask_var_t online; 690 691 /* Indicate more than one runnable task for any CPU */ 692 bool overload; 693 694 /* 695 * The bit corresponding to a CPU gets set here if such CPU has more 696 * than one runnable -deadline task (as it is below for RT tasks). 697 */ 698 cpumask_var_t dlo_mask; 699 atomic_t dlo_count; 700 struct dl_bw dl_bw; 701 struct cpudl cpudl; 702 703 #ifdef HAVE_RT_PUSH_IPI 704 /* 705 * For IPI pull requests, loop across the rto_mask. 706 */ 707 struct irq_work rto_push_work; 708 raw_spinlock_t rto_lock; 709 /* These are only updated and read within rto_lock */ 710 int rto_loop; 711 int rto_cpu; 712 /* These atomics are updated outside of a lock */ 713 atomic_t rto_loop_next; 714 atomic_t rto_loop_start; 715 #endif 716 /* 717 * The "RT overload" flag: it gets set if a CPU has more than 718 * one runnable RT task. 719 */ 720 cpumask_var_t rto_mask; 721 struct cpupri cpupri; 722 723 unsigned long max_cpu_capacity; 724 }; 725 726 extern struct root_domain def_root_domain; 727 extern struct mutex sched_domains_mutex; 728 729 extern void init_defrootdomain(void); 730 extern int sched_init_domains(const struct cpumask *cpu_map); 731 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 732 extern void sched_get_rd(struct root_domain *rd); 733 extern void sched_put_rd(struct root_domain *rd); 734 735 #ifdef HAVE_RT_PUSH_IPI 736 extern void rto_push_irq_work_func(struct irq_work *work); 737 #endif 738 #endif /* CONFIG_SMP */ 739 740 /* 741 * This is the main, per-CPU runqueue data structure. 742 * 743 * Locking rule: those places that want to lock multiple runqueues 744 * (such as the load balancing or the thread migration code), lock 745 * acquire operations must be ordered by ascending &runqueue. 746 */ 747 struct rq { 748 /* runqueue lock: */ 749 raw_spinlock_t lock; 750 751 /* 752 * nr_running and cpu_load should be in the same cacheline because 753 * remote CPUs use both these fields when doing load calculation. 754 */ 755 unsigned int nr_running; 756 #ifdef CONFIG_NUMA_BALANCING 757 unsigned int nr_numa_running; 758 unsigned int nr_preferred_running; 759 #endif 760 #define CPU_LOAD_IDX_MAX 5 761 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 762 #ifdef CONFIG_NO_HZ_COMMON 763 #ifdef CONFIG_SMP 764 unsigned long last_load_update_tick; 765 unsigned long last_blocked_load_update_tick; 766 unsigned int has_blocked_load; 767 #endif /* CONFIG_SMP */ 768 unsigned int nohz_tick_stopped; 769 atomic_t nohz_flags; 770 #endif /* CONFIG_NO_HZ_COMMON */ 771 772 /* capture load from *all* tasks on this CPU: */ 773 struct load_weight load; 774 unsigned long nr_load_updates; 775 u64 nr_switches; 776 777 struct cfs_rq cfs; 778 struct rt_rq rt; 779 struct dl_rq dl; 780 781 #ifdef CONFIG_FAIR_GROUP_SCHED 782 /* list of leaf cfs_rq on this CPU: */ 783 struct list_head leaf_cfs_rq_list; 784 struct list_head *tmp_alone_branch; 785 #endif /* CONFIG_FAIR_GROUP_SCHED */ 786 787 /* 788 * This is part of a global counter where only the total sum 789 * over all CPUs matters. A task can increase this counter on 790 * one CPU and if it got migrated afterwards it may decrease 791 * it on another CPU. Always updated under the runqueue lock: 792 */ 793 unsigned long nr_uninterruptible; 794 795 struct task_struct *curr; 796 struct task_struct *idle; 797 struct task_struct *stop; 798 unsigned long next_balance; 799 struct mm_struct *prev_mm; 800 801 unsigned int clock_update_flags; 802 u64 clock; 803 u64 clock_task; 804 805 atomic_t nr_iowait; 806 807 #ifdef CONFIG_SMP 808 struct root_domain *rd; 809 struct sched_domain *sd; 810 811 unsigned long cpu_capacity; 812 unsigned long cpu_capacity_orig; 813 814 struct callback_head *balance_callback; 815 816 unsigned char idle_balance; 817 818 /* For active balancing */ 819 int active_balance; 820 int push_cpu; 821 struct cpu_stop_work active_balance_work; 822 823 /* CPU of this runqueue: */ 824 int cpu; 825 int online; 826 827 struct list_head cfs_tasks; 828 829 u64 rt_avg; 830 u64 age_stamp; 831 u64 idle_stamp; 832 u64 avg_idle; 833 834 /* This is used to determine avg_idle's max value */ 835 u64 max_idle_balance_cost; 836 #endif 837 838 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 839 u64 prev_irq_time; 840 #endif 841 #ifdef CONFIG_PARAVIRT 842 u64 prev_steal_time; 843 #endif 844 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 845 u64 prev_steal_time_rq; 846 #endif 847 848 /* calc_load related fields */ 849 unsigned long calc_load_update; 850 long calc_load_active; 851 852 #ifdef CONFIG_SCHED_HRTICK 853 #ifdef CONFIG_SMP 854 int hrtick_csd_pending; 855 call_single_data_t hrtick_csd; 856 #endif 857 struct hrtimer hrtick_timer; 858 #endif 859 860 #ifdef CONFIG_SCHEDSTATS 861 /* latency stats */ 862 struct sched_info rq_sched_info; 863 unsigned long long rq_cpu_time; 864 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 865 866 /* sys_sched_yield() stats */ 867 unsigned int yld_count; 868 869 /* schedule() stats */ 870 unsigned int sched_count; 871 unsigned int sched_goidle; 872 873 /* try_to_wake_up() stats */ 874 unsigned int ttwu_count; 875 unsigned int ttwu_local; 876 #endif 877 878 #ifdef CONFIG_SMP 879 struct llist_head wake_list; 880 #endif 881 882 #ifdef CONFIG_CPU_IDLE 883 /* Must be inspected within a rcu lock section */ 884 struct cpuidle_state *idle_state; 885 #endif 886 }; 887 888 static inline int cpu_of(struct rq *rq) 889 { 890 #ifdef CONFIG_SMP 891 return rq->cpu; 892 #else 893 return 0; 894 #endif 895 } 896 897 898 #ifdef CONFIG_SCHED_SMT 899 900 extern struct static_key_false sched_smt_present; 901 902 extern void __update_idle_core(struct rq *rq); 903 904 static inline void update_idle_core(struct rq *rq) 905 { 906 if (static_branch_unlikely(&sched_smt_present)) 907 __update_idle_core(rq); 908 } 909 910 #else 911 static inline void update_idle_core(struct rq *rq) { } 912 #endif 913 914 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 915 916 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 917 #define this_rq() this_cpu_ptr(&runqueues) 918 #define task_rq(p) cpu_rq(task_cpu(p)) 919 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 920 #define raw_rq() raw_cpu_ptr(&runqueues) 921 922 static inline u64 __rq_clock_broken(struct rq *rq) 923 { 924 return READ_ONCE(rq->clock); 925 } 926 927 /* 928 * rq::clock_update_flags bits 929 * 930 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 931 * call to __schedule(). This is an optimisation to avoid 932 * neighbouring rq clock updates. 933 * 934 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 935 * in effect and calls to update_rq_clock() are being ignored. 936 * 937 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 938 * made to update_rq_clock() since the last time rq::lock was pinned. 939 * 940 * If inside of __schedule(), clock_update_flags will have been 941 * shifted left (a left shift is a cheap operation for the fast path 942 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 943 * 944 * if (rq-clock_update_flags >= RQCF_UPDATED) 945 * 946 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 947 * one position though, because the next rq_unpin_lock() will shift it 948 * back. 949 */ 950 #define RQCF_REQ_SKIP 0x01 951 #define RQCF_ACT_SKIP 0x02 952 #define RQCF_UPDATED 0x04 953 954 static inline void assert_clock_updated(struct rq *rq) 955 { 956 /* 957 * The only reason for not seeing a clock update since the 958 * last rq_pin_lock() is if we're currently skipping updates. 959 */ 960 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 961 } 962 963 static inline u64 rq_clock(struct rq *rq) 964 { 965 lockdep_assert_held(&rq->lock); 966 assert_clock_updated(rq); 967 968 return rq->clock; 969 } 970 971 static inline u64 rq_clock_task(struct rq *rq) 972 { 973 lockdep_assert_held(&rq->lock); 974 assert_clock_updated(rq); 975 976 return rq->clock_task; 977 } 978 979 static inline void rq_clock_skip_update(struct rq *rq, bool skip) 980 { 981 lockdep_assert_held(&rq->lock); 982 if (skip) 983 rq->clock_update_flags |= RQCF_REQ_SKIP; 984 else 985 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 986 } 987 988 struct rq_flags { 989 unsigned long flags; 990 struct pin_cookie cookie; 991 #ifdef CONFIG_SCHED_DEBUG 992 /* 993 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 994 * current pin context is stashed here in case it needs to be 995 * restored in rq_repin_lock(). 996 */ 997 unsigned int clock_update_flags; 998 #endif 999 }; 1000 1001 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1002 { 1003 rf->cookie = lockdep_pin_lock(&rq->lock); 1004 1005 #ifdef CONFIG_SCHED_DEBUG 1006 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1007 rf->clock_update_flags = 0; 1008 #endif 1009 } 1010 1011 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1012 { 1013 #ifdef CONFIG_SCHED_DEBUG 1014 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1015 rf->clock_update_flags = RQCF_UPDATED; 1016 #endif 1017 1018 lockdep_unpin_lock(&rq->lock, rf->cookie); 1019 } 1020 1021 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1022 { 1023 lockdep_repin_lock(&rq->lock, rf->cookie); 1024 1025 #ifdef CONFIG_SCHED_DEBUG 1026 /* 1027 * Restore the value we stashed in @rf for this pin context. 1028 */ 1029 rq->clock_update_flags |= rf->clock_update_flags; 1030 #endif 1031 } 1032 1033 #ifdef CONFIG_NUMA 1034 enum numa_topology_type { 1035 NUMA_DIRECT, 1036 NUMA_GLUELESS_MESH, 1037 NUMA_BACKPLANE, 1038 }; 1039 extern enum numa_topology_type sched_numa_topology_type; 1040 extern int sched_max_numa_distance; 1041 extern bool find_numa_distance(int distance); 1042 #endif 1043 1044 #ifdef CONFIG_NUMA 1045 extern void sched_init_numa(void); 1046 extern void sched_domains_numa_masks_set(unsigned int cpu); 1047 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1048 #else 1049 static inline void sched_init_numa(void) { } 1050 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1051 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1052 #endif 1053 1054 #ifdef CONFIG_NUMA_BALANCING 1055 /* The regions in numa_faults array from task_struct */ 1056 enum numa_faults_stats { 1057 NUMA_MEM = 0, 1058 NUMA_CPU, 1059 NUMA_MEMBUF, 1060 NUMA_CPUBUF 1061 }; 1062 extern void sched_setnuma(struct task_struct *p, int node); 1063 extern int migrate_task_to(struct task_struct *p, int cpu); 1064 extern int migrate_swap(struct task_struct *, struct task_struct *); 1065 #endif /* CONFIG_NUMA_BALANCING */ 1066 1067 #ifdef CONFIG_SMP 1068 1069 static inline void 1070 queue_balance_callback(struct rq *rq, 1071 struct callback_head *head, 1072 void (*func)(struct rq *rq)) 1073 { 1074 lockdep_assert_held(&rq->lock); 1075 1076 if (unlikely(head->next)) 1077 return; 1078 1079 head->func = (void (*)(struct callback_head *))func; 1080 head->next = rq->balance_callback; 1081 rq->balance_callback = head; 1082 } 1083 1084 extern void sched_ttwu_pending(void); 1085 1086 #define rcu_dereference_check_sched_domain(p) \ 1087 rcu_dereference_check((p), \ 1088 lockdep_is_held(&sched_domains_mutex)) 1089 1090 /* 1091 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1092 * See detach_destroy_domains: synchronize_sched for details. 1093 * 1094 * The domain tree of any CPU may only be accessed from within 1095 * preempt-disabled sections. 1096 */ 1097 #define for_each_domain(cpu, __sd) \ 1098 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1099 __sd; __sd = __sd->parent) 1100 1101 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 1102 1103 /** 1104 * highest_flag_domain - Return highest sched_domain containing flag. 1105 * @cpu: The CPU whose highest level of sched domain is to 1106 * be returned. 1107 * @flag: The flag to check for the highest sched_domain 1108 * for the given CPU. 1109 * 1110 * Returns the highest sched_domain of a CPU which contains the given flag. 1111 */ 1112 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1113 { 1114 struct sched_domain *sd, *hsd = NULL; 1115 1116 for_each_domain(cpu, sd) { 1117 if (!(sd->flags & flag)) 1118 break; 1119 hsd = sd; 1120 } 1121 1122 return hsd; 1123 } 1124 1125 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1126 { 1127 struct sched_domain *sd; 1128 1129 for_each_domain(cpu, sd) { 1130 if (sd->flags & flag) 1131 break; 1132 } 1133 1134 return sd; 1135 } 1136 1137 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 1138 DECLARE_PER_CPU(int, sd_llc_size); 1139 DECLARE_PER_CPU(int, sd_llc_id); 1140 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 1141 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 1142 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 1143 1144 struct sched_group_capacity { 1145 atomic_t ref; 1146 /* 1147 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1148 * for a single CPU. 1149 */ 1150 unsigned long capacity; 1151 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1152 unsigned long next_update; 1153 int imbalance; /* XXX unrelated to capacity but shared group state */ 1154 1155 #ifdef CONFIG_SCHED_DEBUG 1156 int id; 1157 #endif 1158 1159 unsigned long cpumask[0]; /* Balance mask */ 1160 }; 1161 1162 struct sched_group { 1163 struct sched_group *next; /* Must be a circular list */ 1164 atomic_t ref; 1165 1166 unsigned int group_weight; 1167 struct sched_group_capacity *sgc; 1168 int asym_prefer_cpu; /* CPU of highest priority in group */ 1169 1170 /* 1171 * The CPUs this group covers. 1172 * 1173 * NOTE: this field is variable length. (Allocated dynamically 1174 * by attaching extra space to the end of the structure, 1175 * depending on how many CPUs the kernel has booted up with) 1176 */ 1177 unsigned long cpumask[0]; 1178 }; 1179 1180 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1181 { 1182 return to_cpumask(sg->cpumask); 1183 } 1184 1185 /* 1186 * See build_balance_mask(). 1187 */ 1188 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1189 { 1190 return to_cpumask(sg->sgc->cpumask); 1191 } 1192 1193 /** 1194 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1195 * @group: The group whose first CPU is to be returned. 1196 */ 1197 static inline unsigned int group_first_cpu(struct sched_group *group) 1198 { 1199 return cpumask_first(sched_group_span(group)); 1200 } 1201 1202 extern int group_balance_cpu(struct sched_group *sg); 1203 1204 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1205 void register_sched_domain_sysctl(void); 1206 void dirty_sched_domain_sysctl(int cpu); 1207 void unregister_sched_domain_sysctl(void); 1208 #else 1209 static inline void register_sched_domain_sysctl(void) 1210 { 1211 } 1212 static inline void dirty_sched_domain_sysctl(int cpu) 1213 { 1214 } 1215 static inline void unregister_sched_domain_sysctl(void) 1216 { 1217 } 1218 #endif 1219 1220 #else 1221 1222 static inline void sched_ttwu_pending(void) { } 1223 1224 #endif /* CONFIG_SMP */ 1225 1226 #include "stats.h" 1227 #include "autogroup.h" 1228 1229 #ifdef CONFIG_CGROUP_SCHED 1230 1231 /* 1232 * Return the group to which this tasks belongs. 1233 * 1234 * We cannot use task_css() and friends because the cgroup subsystem 1235 * changes that value before the cgroup_subsys::attach() method is called, 1236 * therefore we cannot pin it and might observe the wrong value. 1237 * 1238 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1239 * core changes this before calling sched_move_task(). 1240 * 1241 * Instead we use a 'copy' which is updated from sched_move_task() while 1242 * holding both task_struct::pi_lock and rq::lock. 1243 */ 1244 static inline struct task_group *task_group(struct task_struct *p) 1245 { 1246 return p->sched_task_group; 1247 } 1248 1249 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1250 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1251 { 1252 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1253 struct task_group *tg = task_group(p); 1254 #endif 1255 1256 #ifdef CONFIG_FAIR_GROUP_SCHED 1257 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1258 p->se.cfs_rq = tg->cfs_rq[cpu]; 1259 p->se.parent = tg->se[cpu]; 1260 #endif 1261 1262 #ifdef CONFIG_RT_GROUP_SCHED 1263 p->rt.rt_rq = tg->rt_rq[cpu]; 1264 p->rt.parent = tg->rt_se[cpu]; 1265 #endif 1266 } 1267 1268 #else /* CONFIG_CGROUP_SCHED */ 1269 1270 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1271 static inline struct task_group *task_group(struct task_struct *p) 1272 { 1273 return NULL; 1274 } 1275 1276 #endif /* CONFIG_CGROUP_SCHED */ 1277 1278 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1279 { 1280 set_task_rq(p, cpu); 1281 #ifdef CONFIG_SMP 1282 /* 1283 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1284 * successfuly executed on another CPU. We must ensure that updates of 1285 * per-task data have been completed by this moment. 1286 */ 1287 smp_wmb(); 1288 #ifdef CONFIG_THREAD_INFO_IN_TASK 1289 p->cpu = cpu; 1290 #else 1291 task_thread_info(p)->cpu = cpu; 1292 #endif 1293 p->wake_cpu = cpu; 1294 #endif 1295 } 1296 1297 /* 1298 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1299 */ 1300 #ifdef CONFIG_SCHED_DEBUG 1301 # include <linux/static_key.h> 1302 # define const_debug __read_mostly 1303 #else 1304 # define const_debug const 1305 #endif 1306 1307 #define SCHED_FEAT(name, enabled) \ 1308 __SCHED_FEAT_##name , 1309 1310 enum { 1311 #include "features.h" 1312 __SCHED_FEAT_NR, 1313 }; 1314 1315 #undef SCHED_FEAT 1316 1317 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 1318 1319 /* 1320 * To support run-time toggling of sched features, all the translation units 1321 * (but core.c) reference the sysctl_sched_features defined in core.c. 1322 */ 1323 extern const_debug unsigned int sysctl_sched_features; 1324 1325 #define SCHED_FEAT(name, enabled) \ 1326 static __always_inline bool static_branch_##name(struct static_key *key) \ 1327 { \ 1328 return static_key_##enabled(key); \ 1329 } 1330 1331 #include "features.h" 1332 #undef SCHED_FEAT 1333 1334 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1335 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1336 1337 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 1338 1339 /* 1340 * Each translation unit has its own copy of sysctl_sched_features to allow 1341 * constants propagation at compile time and compiler optimization based on 1342 * features default. 1343 */ 1344 #define SCHED_FEAT(name, enabled) \ 1345 (1UL << __SCHED_FEAT_##name) * enabled | 1346 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1347 #include "features.h" 1348 0; 1349 #undef SCHED_FEAT 1350 1351 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1352 1353 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1354 1355 extern struct static_key_false sched_numa_balancing; 1356 extern struct static_key_false sched_schedstats; 1357 1358 static inline u64 global_rt_period(void) 1359 { 1360 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1361 } 1362 1363 static inline u64 global_rt_runtime(void) 1364 { 1365 if (sysctl_sched_rt_runtime < 0) 1366 return RUNTIME_INF; 1367 1368 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1369 } 1370 1371 static inline int task_current(struct rq *rq, struct task_struct *p) 1372 { 1373 return rq->curr == p; 1374 } 1375 1376 static inline int task_running(struct rq *rq, struct task_struct *p) 1377 { 1378 #ifdef CONFIG_SMP 1379 return p->on_cpu; 1380 #else 1381 return task_current(rq, p); 1382 #endif 1383 } 1384 1385 static inline int task_on_rq_queued(struct task_struct *p) 1386 { 1387 return p->on_rq == TASK_ON_RQ_QUEUED; 1388 } 1389 1390 static inline int task_on_rq_migrating(struct task_struct *p) 1391 { 1392 return p->on_rq == TASK_ON_RQ_MIGRATING; 1393 } 1394 1395 /* 1396 * wake flags 1397 */ 1398 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1399 #define WF_FORK 0x02 /* Child wakeup after fork */ 1400 #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1401 1402 /* 1403 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1404 * of tasks with abnormal "nice" values across CPUs the contribution that 1405 * each task makes to its run queue's load is weighted according to its 1406 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1407 * scaled version of the new time slice allocation that they receive on time 1408 * slice expiry etc. 1409 */ 1410 1411 #define WEIGHT_IDLEPRIO 3 1412 #define WMULT_IDLEPRIO 1431655765 1413 1414 extern const int sched_prio_to_weight[40]; 1415 extern const u32 sched_prio_to_wmult[40]; 1416 1417 /* 1418 * {de,en}queue flags: 1419 * 1420 * DEQUEUE_SLEEP - task is no longer runnable 1421 * ENQUEUE_WAKEUP - task just became runnable 1422 * 1423 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1424 * are in a known state which allows modification. Such pairs 1425 * should preserve as much state as possible. 1426 * 1427 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1428 * in the runqueue. 1429 * 1430 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1431 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1432 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1433 * 1434 */ 1435 1436 #define DEQUEUE_SLEEP 0x01 1437 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1438 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1439 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1440 1441 #define ENQUEUE_WAKEUP 0x01 1442 #define ENQUEUE_RESTORE 0x02 1443 #define ENQUEUE_MOVE 0x04 1444 #define ENQUEUE_NOCLOCK 0x08 1445 1446 #define ENQUEUE_HEAD 0x10 1447 #define ENQUEUE_REPLENISH 0x20 1448 #ifdef CONFIG_SMP 1449 #define ENQUEUE_MIGRATED 0x40 1450 #else 1451 #define ENQUEUE_MIGRATED 0x00 1452 #endif 1453 1454 #define RETRY_TASK ((void *)-1UL) 1455 1456 struct sched_class { 1457 const struct sched_class *next; 1458 1459 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1460 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1461 void (*yield_task) (struct rq *rq); 1462 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1463 1464 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1465 1466 /* 1467 * It is the responsibility of the pick_next_task() method that will 1468 * return the next task to call put_prev_task() on the @prev task or 1469 * something equivalent. 1470 * 1471 * May return RETRY_TASK when it finds a higher prio class has runnable 1472 * tasks. 1473 */ 1474 struct task_struct * (*pick_next_task)(struct rq *rq, 1475 struct task_struct *prev, 1476 struct rq_flags *rf); 1477 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1478 1479 #ifdef CONFIG_SMP 1480 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1481 void (*migrate_task_rq)(struct task_struct *p); 1482 1483 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1484 1485 void (*set_cpus_allowed)(struct task_struct *p, 1486 const struct cpumask *newmask); 1487 1488 void (*rq_online)(struct rq *rq); 1489 void (*rq_offline)(struct rq *rq); 1490 #endif 1491 1492 void (*set_curr_task)(struct rq *rq); 1493 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1494 void (*task_fork)(struct task_struct *p); 1495 void (*task_dead)(struct task_struct *p); 1496 1497 /* 1498 * The switched_from() call is allowed to drop rq->lock, therefore we 1499 * cannot assume the switched_from/switched_to pair is serliazed by 1500 * rq->lock. They are however serialized by p->pi_lock. 1501 */ 1502 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1503 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1504 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1505 int oldprio); 1506 1507 unsigned int (*get_rr_interval)(struct rq *rq, 1508 struct task_struct *task); 1509 1510 void (*update_curr)(struct rq *rq); 1511 1512 #define TASK_SET_GROUP 0 1513 #define TASK_MOVE_GROUP 1 1514 1515 #ifdef CONFIG_FAIR_GROUP_SCHED 1516 void (*task_change_group)(struct task_struct *p, int type); 1517 #endif 1518 }; 1519 1520 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1521 { 1522 prev->sched_class->put_prev_task(rq, prev); 1523 } 1524 1525 static inline void set_curr_task(struct rq *rq, struct task_struct *curr) 1526 { 1527 curr->sched_class->set_curr_task(rq); 1528 } 1529 1530 #ifdef CONFIG_SMP 1531 #define sched_class_highest (&stop_sched_class) 1532 #else 1533 #define sched_class_highest (&dl_sched_class) 1534 #endif 1535 #define for_each_class(class) \ 1536 for (class = sched_class_highest; class; class = class->next) 1537 1538 extern const struct sched_class stop_sched_class; 1539 extern const struct sched_class dl_sched_class; 1540 extern const struct sched_class rt_sched_class; 1541 extern const struct sched_class fair_sched_class; 1542 extern const struct sched_class idle_sched_class; 1543 1544 1545 #ifdef CONFIG_SMP 1546 1547 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1548 1549 extern void trigger_load_balance(struct rq *rq); 1550 1551 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1552 1553 #endif 1554 1555 #ifdef CONFIG_CPU_IDLE 1556 static inline void idle_set_state(struct rq *rq, 1557 struct cpuidle_state *idle_state) 1558 { 1559 rq->idle_state = idle_state; 1560 } 1561 1562 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1563 { 1564 SCHED_WARN_ON(!rcu_read_lock_held()); 1565 1566 return rq->idle_state; 1567 } 1568 #else 1569 static inline void idle_set_state(struct rq *rq, 1570 struct cpuidle_state *idle_state) 1571 { 1572 } 1573 1574 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1575 { 1576 return NULL; 1577 } 1578 #endif 1579 1580 extern void schedule_idle(void); 1581 1582 extern void sysrq_sched_debug_show(void); 1583 extern void sched_init_granularity(void); 1584 extern void update_max_interval(void); 1585 1586 extern void init_sched_dl_class(void); 1587 extern void init_sched_rt_class(void); 1588 extern void init_sched_fair_class(void); 1589 1590 extern void reweight_task(struct task_struct *p, int prio); 1591 1592 extern void resched_curr(struct rq *rq); 1593 extern void resched_cpu(int cpu); 1594 1595 extern struct rt_bandwidth def_rt_bandwidth; 1596 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1597 1598 extern struct dl_bandwidth def_dl_bandwidth; 1599 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1600 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1601 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1602 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1603 1604 #define BW_SHIFT 20 1605 #define BW_UNIT (1 << BW_SHIFT) 1606 #define RATIO_SHIFT 8 1607 unsigned long to_ratio(u64 period, u64 runtime); 1608 1609 extern void init_entity_runnable_average(struct sched_entity *se); 1610 extern void post_init_entity_util_avg(struct sched_entity *se); 1611 1612 #ifdef CONFIG_NO_HZ_FULL 1613 extern bool sched_can_stop_tick(struct rq *rq); 1614 extern int __init sched_tick_offload_init(void); 1615 1616 /* 1617 * Tick may be needed by tasks in the runqueue depending on their policy and 1618 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1619 * nohz mode if necessary. 1620 */ 1621 static inline void sched_update_tick_dependency(struct rq *rq) 1622 { 1623 int cpu; 1624 1625 if (!tick_nohz_full_enabled()) 1626 return; 1627 1628 cpu = cpu_of(rq); 1629 1630 if (!tick_nohz_full_cpu(cpu)) 1631 return; 1632 1633 if (sched_can_stop_tick(rq)) 1634 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1635 else 1636 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1637 } 1638 #else 1639 static inline int sched_tick_offload_init(void) { return 0; } 1640 static inline void sched_update_tick_dependency(struct rq *rq) { } 1641 #endif 1642 1643 static inline void add_nr_running(struct rq *rq, unsigned count) 1644 { 1645 unsigned prev_nr = rq->nr_running; 1646 1647 rq->nr_running = prev_nr + count; 1648 1649 if (prev_nr < 2 && rq->nr_running >= 2) { 1650 #ifdef CONFIG_SMP 1651 if (!rq->rd->overload) 1652 rq->rd->overload = true; 1653 #endif 1654 } 1655 1656 sched_update_tick_dependency(rq); 1657 } 1658 1659 static inline void sub_nr_running(struct rq *rq, unsigned count) 1660 { 1661 rq->nr_running -= count; 1662 /* Check if we still need preemption */ 1663 sched_update_tick_dependency(rq); 1664 } 1665 1666 extern void update_rq_clock(struct rq *rq); 1667 1668 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1669 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1670 1671 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1672 1673 extern const_debug unsigned int sysctl_sched_time_avg; 1674 extern const_debug unsigned int sysctl_sched_nr_migrate; 1675 extern const_debug unsigned int sysctl_sched_migration_cost; 1676 1677 static inline u64 sched_avg_period(void) 1678 { 1679 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1680 } 1681 1682 #ifdef CONFIG_SCHED_HRTICK 1683 1684 /* 1685 * Use hrtick when: 1686 * - enabled by features 1687 * - hrtimer is actually high res 1688 */ 1689 static inline int hrtick_enabled(struct rq *rq) 1690 { 1691 if (!sched_feat(HRTICK)) 1692 return 0; 1693 if (!cpu_active(cpu_of(rq))) 1694 return 0; 1695 return hrtimer_is_hres_active(&rq->hrtick_timer); 1696 } 1697 1698 void hrtick_start(struct rq *rq, u64 delay); 1699 1700 #else 1701 1702 static inline int hrtick_enabled(struct rq *rq) 1703 { 1704 return 0; 1705 } 1706 1707 #endif /* CONFIG_SCHED_HRTICK */ 1708 1709 #ifndef arch_scale_freq_capacity 1710 static __always_inline 1711 unsigned long arch_scale_freq_capacity(int cpu) 1712 { 1713 return SCHED_CAPACITY_SCALE; 1714 } 1715 #endif 1716 1717 #ifdef CONFIG_SMP 1718 extern void sched_avg_update(struct rq *rq); 1719 1720 #ifndef arch_scale_cpu_capacity 1721 static __always_inline 1722 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) 1723 { 1724 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) 1725 return sd->smt_gain / sd->span_weight; 1726 1727 return SCHED_CAPACITY_SCALE; 1728 } 1729 #endif 1730 1731 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1732 { 1733 rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq)); 1734 sched_avg_update(rq); 1735 } 1736 #else 1737 #ifndef arch_scale_cpu_capacity 1738 static __always_inline 1739 unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) 1740 { 1741 return SCHED_CAPACITY_SCALE; 1742 } 1743 #endif 1744 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1745 static inline void sched_avg_update(struct rq *rq) { } 1746 #endif 1747 1748 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1749 __acquires(rq->lock); 1750 1751 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1752 __acquires(p->pi_lock) 1753 __acquires(rq->lock); 1754 1755 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1756 __releases(rq->lock) 1757 { 1758 rq_unpin_lock(rq, rf); 1759 raw_spin_unlock(&rq->lock); 1760 } 1761 1762 static inline void 1763 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1764 __releases(rq->lock) 1765 __releases(p->pi_lock) 1766 { 1767 rq_unpin_lock(rq, rf); 1768 raw_spin_unlock(&rq->lock); 1769 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1770 } 1771 1772 static inline void 1773 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1774 __acquires(rq->lock) 1775 { 1776 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1777 rq_pin_lock(rq, rf); 1778 } 1779 1780 static inline void 1781 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1782 __acquires(rq->lock) 1783 { 1784 raw_spin_lock_irq(&rq->lock); 1785 rq_pin_lock(rq, rf); 1786 } 1787 1788 static inline void 1789 rq_lock(struct rq *rq, struct rq_flags *rf) 1790 __acquires(rq->lock) 1791 { 1792 raw_spin_lock(&rq->lock); 1793 rq_pin_lock(rq, rf); 1794 } 1795 1796 static inline void 1797 rq_relock(struct rq *rq, struct rq_flags *rf) 1798 __acquires(rq->lock) 1799 { 1800 raw_spin_lock(&rq->lock); 1801 rq_repin_lock(rq, rf); 1802 } 1803 1804 static inline void 1805 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1806 __releases(rq->lock) 1807 { 1808 rq_unpin_lock(rq, rf); 1809 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1810 } 1811 1812 static inline void 1813 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1814 __releases(rq->lock) 1815 { 1816 rq_unpin_lock(rq, rf); 1817 raw_spin_unlock_irq(&rq->lock); 1818 } 1819 1820 static inline void 1821 rq_unlock(struct rq *rq, struct rq_flags *rf) 1822 __releases(rq->lock) 1823 { 1824 rq_unpin_lock(rq, rf); 1825 raw_spin_unlock(&rq->lock); 1826 } 1827 1828 #ifdef CONFIG_SMP 1829 #ifdef CONFIG_PREEMPT 1830 1831 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1832 1833 /* 1834 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1835 * way at the expense of forcing extra atomic operations in all 1836 * invocations. This assures that the double_lock is acquired using the 1837 * same underlying policy as the spinlock_t on this architecture, which 1838 * reduces latency compared to the unfair variant below. However, it 1839 * also adds more overhead and therefore may reduce throughput. 1840 */ 1841 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1842 __releases(this_rq->lock) 1843 __acquires(busiest->lock) 1844 __acquires(this_rq->lock) 1845 { 1846 raw_spin_unlock(&this_rq->lock); 1847 double_rq_lock(this_rq, busiest); 1848 1849 return 1; 1850 } 1851 1852 #else 1853 /* 1854 * Unfair double_lock_balance: Optimizes throughput at the expense of 1855 * latency by eliminating extra atomic operations when the locks are 1856 * already in proper order on entry. This favors lower CPU-ids and will 1857 * grant the double lock to lower CPUs over higher ids under contention, 1858 * regardless of entry order into the function. 1859 */ 1860 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1861 __releases(this_rq->lock) 1862 __acquires(busiest->lock) 1863 __acquires(this_rq->lock) 1864 { 1865 int ret = 0; 1866 1867 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1868 if (busiest < this_rq) { 1869 raw_spin_unlock(&this_rq->lock); 1870 raw_spin_lock(&busiest->lock); 1871 raw_spin_lock_nested(&this_rq->lock, 1872 SINGLE_DEPTH_NESTING); 1873 ret = 1; 1874 } else 1875 raw_spin_lock_nested(&busiest->lock, 1876 SINGLE_DEPTH_NESTING); 1877 } 1878 return ret; 1879 } 1880 1881 #endif /* CONFIG_PREEMPT */ 1882 1883 /* 1884 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1885 */ 1886 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1887 { 1888 if (unlikely(!irqs_disabled())) { 1889 /* printk() doesn't work well under rq->lock */ 1890 raw_spin_unlock(&this_rq->lock); 1891 BUG_ON(1); 1892 } 1893 1894 return _double_lock_balance(this_rq, busiest); 1895 } 1896 1897 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1898 __releases(busiest->lock) 1899 { 1900 raw_spin_unlock(&busiest->lock); 1901 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1902 } 1903 1904 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 1905 { 1906 if (l1 > l2) 1907 swap(l1, l2); 1908 1909 spin_lock(l1); 1910 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1911 } 1912 1913 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 1914 { 1915 if (l1 > l2) 1916 swap(l1, l2); 1917 1918 spin_lock_irq(l1); 1919 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1920 } 1921 1922 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1923 { 1924 if (l1 > l2) 1925 swap(l1, l2); 1926 1927 raw_spin_lock(l1); 1928 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1929 } 1930 1931 /* 1932 * double_rq_lock - safely lock two runqueues 1933 * 1934 * Note this does not disable interrupts like task_rq_lock, 1935 * you need to do so manually before calling. 1936 */ 1937 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1938 __acquires(rq1->lock) 1939 __acquires(rq2->lock) 1940 { 1941 BUG_ON(!irqs_disabled()); 1942 if (rq1 == rq2) { 1943 raw_spin_lock(&rq1->lock); 1944 __acquire(rq2->lock); /* Fake it out ;) */ 1945 } else { 1946 if (rq1 < rq2) { 1947 raw_spin_lock(&rq1->lock); 1948 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1949 } else { 1950 raw_spin_lock(&rq2->lock); 1951 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1952 } 1953 } 1954 } 1955 1956 /* 1957 * double_rq_unlock - safely unlock two runqueues 1958 * 1959 * Note this does not restore interrupts like task_rq_unlock, 1960 * you need to do so manually after calling. 1961 */ 1962 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1963 __releases(rq1->lock) 1964 __releases(rq2->lock) 1965 { 1966 raw_spin_unlock(&rq1->lock); 1967 if (rq1 != rq2) 1968 raw_spin_unlock(&rq2->lock); 1969 else 1970 __release(rq2->lock); 1971 } 1972 1973 extern void set_rq_online (struct rq *rq); 1974 extern void set_rq_offline(struct rq *rq); 1975 extern bool sched_smp_initialized; 1976 1977 #else /* CONFIG_SMP */ 1978 1979 /* 1980 * double_rq_lock - safely lock two runqueues 1981 * 1982 * Note this does not disable interrupts like task_rq_lock, 1983 * you need to do so manually before calling. 1984 */ 1985 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1986 __acquires(rq1->lock) 1987 __acquires(rq2->lock) 1988 { 1989 BUG_ON(!irqs_disabled()); 1990 BUG_ON(rq1 != rq2); 1991 raw_spin_lock(&rq1->lock); 1992 __acquire(rq2->lock); /* Fake it out ;) */ 1993 } 1994 1995 /* 1996 * double_rq_unlock - safely unlock two runqueues 1997 * 1998 * Note this does not restore interrupts like task_rq_unlock, 1999 * you need to do so manually after calling. 2000 */ 2001 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2002 __releases(rq1->lock) 2003 __releases(rq2->lock) 2004 { 2005 BUG_ON(rq1 != rq2); 2006 raw_spin_unlock(&rq1->lock); 2007 __release(rq2->lock); 2008 } 2009 2010 #endif 2011 2012 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2013 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2014 2015 #ifdef CONFIG_SCHED_DEBUG 2016 extern bool sched_debug_enabled; 2017 2018 extern void print_cfs_stats(struct seq_file *m, int cpu); 2019 extern void print_rt_stats(struct seq_file *m, int cpu); 2020 extern void print_dl_stats(struct seq_file *m, int cpu); 2021 extern void 2022 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2023 #ifdef CONFIG_NUMA_BALANCING 2024 extern void 2025 show_numa_stats(struct task_struct *p, struct seq_file *m); 2026 extern void 2027 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2028 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2029 #endif /* CONFIG_NUMA_BALANCING */ 2030 #endif /* CONFIG_SCHED_DEBUG */ 2031 2032 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2033 extern void init_rt_rq(struct rt_rq *rt_rq); 2034 extern void init_dl_rq(struct dl_rq *dl_rq); 2035 2036 extern void cfs_bandwidth_usage_inc(void); 2037 extern void cfs_bandwidth_usage_dec(void); 2038 2039 #ifdef CONFIG_NO_HZ_COMMON 2040 #define NOHZ_BALANCE_KICK_BIT 0 2041 #define NOHZ_STATS_KICK_BIT 1 2042 2043 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2044 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2045 2046 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2047 2048 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2049 2050 extern void nohz_balance_exit_idle(struct rq *rq); 2051 #else 2052 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2053 #endif 2054 2055 2056 #ifdef CONFIG_SMP 2057 static inline 2058 void __dl_update(struct dl_bw *dl_b, s64 bw) 2059 { 2060 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2061 int i; 2062 2063 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2064 "sched RCU must be held"); 2065 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2066 struct rq *rq = cpu_rq(i); 2067 2068 rq->dl.extra_bw += bw; 2069 } 2070 } 2071 #else 2072 static inline 2073 void __dl_update(struct dl_bw *dl_b, s64 bw) 2074 { 2075 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2076 2077 dl->extra_bw += bw; 2078 } 2079 #endif 2080 2081 2082 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2083 struct irqtime { 2084 u64 total; 2085 u64 tick_delta; 2086 u64 irq_start_time; 2087 struct u64_stats_sync sync; 2088 }; 2089 2090 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2091 2092 /* 2093 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2094 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2095 * and never move forward. 2096 */ 2097 static inline u64 irq_time_read(int cpu) 2098 { 2099 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2100 unsigned int seq; 2101 u64 total; 2102 2103 do { 2104 seq = __u64_stats_fetch_begin(&irqtime->sync); 2105 total = irqtime->total; 2106 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2107 2108 return total; 2109 } 2110 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2111 2112 #ifdef CONFIG_CPU_FREQ 2113 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 2114 2115 /** 2116 * cpufreq_update_util - Take a note about CPU utilization changes. 2117 * @rq: Runqueue to carry out the update for. 2118 * @flags: Update reason flags. 2119 * 2120 * This function is called by the scheduler on the CPU whose utilization is 2121 * being updated. 2122 * 2123 * It can only be called from RCU-sched read-side critical sections. 2124 * 2125 * The way cpufreq is currently arranged requires it to evaluate the CPU 2126 * performance state (frequency/voltage) on a regular basis to prevent it from 2127 * being stuck in a completely inadequate performance level for too long. 2128 * That is not guaranteed to happen if the updates are only triggered from CFS 2129 * and DL, though, because they may not be coming in if only RT tasks are 2130 * active all the time (or there are RT tasks only). 2131 * 2132 * As a workaround for that issue, this function is called periodically by the 2133 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2134 * but that really is a band-aid. Going forward it should be replaced with 2135 * solutions targeted more specifically at RT tasks. 2136 */ 2137 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2138 { 2139 struct update_util_data *data; 2140 2141 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2142 cpu_of(rq))); 2143 if (data) 2144 data->func(data, rq_clock(rq), flags); 2145 } 2146 #else 2147 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2148 #endif /* CONFIG_CPU_FREQ */ 2149 2150 #ifdef arch_scale_freq_capacity 2151 # ifndef arch_scale_freq_invariant 2152 # define arch_scale_freq_invariant() true 2153 # endif 2154 #else 2155 # define arch_scale_freq_invariant() false 2156 #endif 2157 2158 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2159 static inline unsigned long cpu_util_dl(struct rq *rq) 2160 { 2161 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2162 } 2163 2164 static inline unsigned long cpu_util_cfs(struct rq *rq) 2165 { 2166 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2167 2168 if (sched_feat(UTIL_EST)) { 2169 util = max_t(unsigned long, util, 2170 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2171 } 2172 2173 return util; 2174 } 2175 #endif 2176