1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/stat.h> 27 #include <linux/sched/sysctl.h> 28 #include <linux/sched/task.h> 29 #include <linux/sched/task_stack.h> 30 #include <linux/sched/topology.h> 31 #include <linux/sched/user.h> 32 #include <linux/sched/wake_q.h> 33 #include <linux/sched/xacct.h> 34 35 #include <uapi/linux/sched/types.h> 36 37 #include <linux/binfmts.h> 38 #include <linux/blkdev.h> 39 #include <linux/compat.h> 40 #include <linux/context_tracking.h> 41 #include <linux/cpufreq.h> 42 #include <linux/cpuidle.h> 43 #include <linux/cpuset.h> 44 #include <linux/ctype.h> 45 #include <linux/debugfs.h> 46 #include <linux/delayacct.h> 47 #include <linux/init_task.h> 48 #include <linux/kprobes.h> 49 #include <linux/kthread.h> 50 #include <linux/membarrier.h> 51 #include <linux/migrate.h> 52 #include <linux/mmu_context.h> 53 #include <linux/nmi.h> 54 #include <linux/proc_fs.h> 55 #include <linux/prefetch.h> 56 #include <linux/profile.h> 57 #include <linux/rcupdate_wait.h> 58 #include <linux/security.h> 59 #include <linux/stackprotector.h> 60 #include <linux/stop_machine.h> 61 #include <linux/suspend.h> 62 #include <linux/swait.h> 63 #include <linux/syscalls.h> 64 #include <linux/task_work.h> 65 #include <linux/tsacct_kern.h> 66 67 #include <asm/tlb.h> 68 69 #ifdef CONFIG_PARAVIRT 70 # include <asm/paravirt.h> 71 #endif 72 73 #include "cpupri.h" 74 #include "cpudeadline.h" 75 76 #ifdef CONFIG_SCHED_DEBUG 77 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 78 #else 79 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 80 #endif 81 82 struct rq; 83 struct cpuidle_state; 84 85 /* task_struct::on_rq states: */ 86 #define TASK_ON_RQ_QUEUED 1 87 #define TASK_ON_RQ_MIGRATING 2 88 89 extern __read_mostly int scheduler_running; 90 91 extern unsigned long calc_load_update; 92 extern atomic_long_t calc_load_tasks; 93 94 extern void calc_global_load_tick(struct rq *this_rq); 95 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 96 97 #ifdef CONFIG_SMP 98 extern void cpu_load_update_active(struct rq *this_rq); 99 #else 100 static inline void cpu_load_update_active(struct rq *this_rq) { } 101 #endif 102 103 /* 104 * Helpers for converting nanosecond timing to jiffy resolution 105 */ 106 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 107 108 /* 109 * Increase resolution of nice-level calculations for 64-bit architectures. 110 * The extra resolution improves shares distribution and load balancing of 111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 112 * hierarchies, especially on larger systems. This is not a user-visible change 113 * and does not change the user-interface for setting shares/weights. 114 * 115 * We increase resolution only if we have enough bits to allow this increased 116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 117 * are pretty high and the returns do not justify the increased costs. 118 * 119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 120 * increase coverage and consistency always enable it on 64-bit platforms. 121 */ 122 #ifdef CONFIG_64BIT 123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 124 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 125 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 126 #else 127 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 128 # define scale_load(w) (w) 129 # define scale_load_down(w) (w) 130 #endif 131 132 /* 133 * Task weight (visible to users) and its load (invisible to users) have 134 * independent resolution, but they should be well calibrated. We use 135 * scale_load() and scale_load_down(w) to convert between them. The 136 * following must be true: 137 * 138 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 139 * 140 */ 141 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 142 143 /* 144 * Single value that decides SCHED_DEADLINE internal math precision. 145 * 10 -> just above 1us 146 * 9 -> just above 0.5us 147 */ 148 #define DL_SCALE 10 149 150 /* 151 * Single value that denotes runtime == period, ie unlimited time. 152 */ 153 #define RUNTIME_INF ((u64)~0ULL) 154 155 static inline int idle_policy(int policy) 156 { 157 return policy == SCHED_IDLE; 158 } 159 static inline int fair_policy(int policy) 160 { 161 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 162 } 163 164 static inline int rt_policy(int policy) 165 { 166 return policy == SCHED_FIFO || policy == SCHED_RR; 167 } 168 169 static inline int dl_policy(int policy) 170 { 171 return policy == SCHED_DEADLINE; 172 } 173 static inline bool valid_policy(int policy) 174 { 175 return idle_policy(policy) || fair_policy(policy) || 176 rt_policy(policy) || dl_policy(policy); 177 } 178 179 static inline int task_has_rt_policy(struct task_struct *p) 180 { 181 return rt_policy(p->policy); 182 } 183 184 static inline int task_has_dl_policy(struct task_struct *p) 185 { 186 return dl_policy(p->policy); 187 } 188 189 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 190 191 /* 192 * !! For sched_setattr_nocheck() (kernel) only !! 193 * 194 * This is actually gross. :( 195 * 196 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 197 * tasks, but still be able to sleep. We need this on platforms that cannot 198 * atomically change clock frequency. Remove once fast switching will be 199 * available on such platforms. 200 * 201 * SUGOV stands for SchedUtil GOVernor. 202 */ 203 #define SCHED_FLAG_SUGOV 0x10000000 204 205 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 206 { 207 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 208 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 209 #else 210 return false; 211 #endif 212 } 213 214 /* 215 * Tells if entity @a should preempt entity @b. 216 */ 217 static inline bool 218 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 219 { 220 return dl_entity_is_special(a) || 221 dl_time_before(a->deadline, b->deadline); 222 } 223 224 /* 225 * This is the priority-queue data structure of the RT scheduling class: 226 */ 227 struct rt_prio_array { 228 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 229 struct list_head queue[MAX_RT_PRIO]; 230 }; 231 232 struct rt_bandwidth { 233 /* nests inside the rq lock: */ 234 raw_spinlock_t rt_runtime_lock; 235 ktime_t rt_period; 236 u64 rt_runtime; 237 struct hrtimer rt_period_timer; 238 unsigned int rt_period_active; 239 }; 240 241 void __dl_clear_params(struct task_struct *p); 242 243 /* 244 * To keep the bandwidth of -deadline tasks and groups under control 245 * we need some place where: 246 * - store the maximum -deadline bandwidth of the system (the group); 247 * - cache the fraction of that bandwidth that is currently allocated. 248 * 249 * This is all done in the data structure below. It is similar to the 250 * one used for RT-throttling (rt_bandwidth), with the main difference 251 * that, since here we are only interested in admission control, we 252 * do not decrease any runtime while the group "executes", neither we 253 * need a timer to replenish it. 254 * 255 * With respect to SMP, the bandwidth is given on a per-CPU basis, 256 * meaning that: 257 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 258 * - dl_total_bw array contains, in the i-eth element, the currently 259 * allocated bandwidth on the i-eth CPU. 260 * Moreover, groups consume bandwidth on each CPU, while tasks only 261 * consume bandwidth on the CPU they're running on. 262 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 263 * that will be shown the next time the proc or cgroup controls will 264 * be red. It on its turn can be changed by writing on its own 265 * control. 266 */ 267 struct dl_bandwidth { 268 raw_spinlock_t dl_runtime_lock; 269 u64 dl_runtime; 270 u64 dl_period; 271 }; 272 273 static inline int dl_bandwidth_enabled(void) 274 { 275 return sysctl_sched_rt_runtime >= 0; 276 } 277 278 struct dl_bw { 279 raw_spinlock_t lock; 280 u64 bw; 281 u64 total_bw; 282 }; 283 284 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 285 286 static inline 287 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 288 { 289 dl_b->total_bw -= tsk_bw; 290 __dl_update(dl_b, (s32)tsk_bw / cpus); 291 } 292 293 static inline 294 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 295 { 296 dl_b->total_bw += tsk_bw; 297 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 298 } 299 300 static inline 301 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 302 { 303 return dl_b->bw != -1 && 304 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 305 } 306 307 extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 308 extern void init_dl_bw(struct dl_bw *dl_b); 309 extern int sched_dl_global_validate(void); 310 extern void sched_dl_do_global(void); 311 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 312 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 313 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 314 extern bool __checkparam_dl(const struct sched_attr *attr); 315 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 316 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 317 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 318 extern bool dl_cpu_busy(unsigned int cpu); 319 320 #ifdef CONFIG_CGROUP_SCHED 321 322 #include <linux/cgroup.h> 323 324 struct cfs_rq; 325 struct rt_rq; 326 327 extern struct list_head task_groups; 328 329 struct cfs_bandwidth { 330 #ifdef CONFIG_CFS_BANDWIDTH 331 raw_spinlock_t lock; 332 ktime_t period; 333 u64 quota; 334 u64 runtime; 335 s64 hierarchical_quota; 336 u64 runtime_expires; 337 338 int idle; 339 int period_active; 340 struct hrtimer period_timer; 341 struct hrtimer slack_timer; 342 struct list_head throttled_cfs_rq; 343 344 /* Statistics: */ 345 int nr_periods; 346 int nr_throttled; 347 u64 throttled_time; 348 #endif 349 }; 350 351 /* Task group related information */ 352 struct task_group { 353 struct cgroup_subsys_state css; 354 355 #ifdef CONFIG_FAIR_GROUP_SCHED 356 /* schedulable entities of this group on each CPU */ 357 struct sched_entity **se; 358 /* runqueue "owned" by this group on each CPU */ 359 struct cfs_rq **cfs_rq; 360 unsigned long shares; 361 362 #ifdef CONFIG_SMP 363 /* 364 * load_avg can be heavily contended at clock tick time, so put 365 * it in its own cacheline separated from the fields above which 366 * will also be accessed at each tick. 367 */ 368 atomic_long_t load_avg ____cacheline_aligned; 369 #endif 370 #endif 371 372 #ifdef CONFIG_RT_GROUP_SCHED 373 struct sched_rt_entity **rt_se; 374 struct rt_rq **rt_rq; 375 376 struct rt_bandwidth rt_bandwidth; 377 #endif 378 379 struct rcu_head rcu; 380 struct list_head list; 381 382 struct task_group *parent; 383 struct list_head siblings; 384 struct list_head children; 385 386 #ifdef CONFIG_SCHED_AUTOGROUP 387 struct autogroup *autogroup; 388 #endif 389 390 struct cfs_bandwidth cfs_bandwidth; 391 }; 392 393 #ifdef CONFIG_FAIR_GROUP_SCHED 394 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 395 396 /* 397 * A weight of 0 or 1 can cause arithmetics problems. 398 * A weight of a cfs_rq is the sum of weights of which entities 399 * are queued on this cfs_rq, so a weight of a entity should not be 400 * too large, so as the shares value of a task group. 401 * (The default weight is 1024 - so there's no practical 402 * limitation from this.) 403 */ 404 #define MIN_SHARES (1UL << 1) 405 #define MAX_SHARES (1UL << 18) 406 #endif 407 408 typedef int (*tg_visitor)(struct task_group *, void *); 409 410 extern int walk_tg_tree_from(struct task_group *from, 411 tg_visitor down, tg_visitor up, void *data); 412 413 /* 414 * Iterate the full tree, calling @down when first entering a node and @up when 415 * leaving it for the final time. 416 * 417 * Caller must hold rcu_lock or sufficient equivalent. 418 */ 419 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 420 { 421 return walk_tg_tree_from(&root_task_group, down, up, data); 422 } 423 424 extern int tg_nop(struct task_group *tg, void *data); 425 426 extern void free_fair_sched_group(struct task_group *tg); 427 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 428 extern void online_fair_sched_group(struct task_group *tg); 429 extern void unregister_fair_sched_group(struct task_group *tg); 430 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 431 struct sched_entity *se, int cpu, 432 struct sched_entity *parent); 433 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 434 435 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 436 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 437 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 438 439 extern void free_rt_sched_group(struct task_group *tg); 440 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 441 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 442 struct sched_rt_entity *rt_se, int cpu, 443 struct sched_rt_entity *parent); 444 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 445 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 446 extern long sched_group_rt_runtime(struct task_group *tg); 447 extern long sched_group_rt_period(struct task_group *tg); 448 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 449 450 extern struct task_group *sched_create_group(struct task_group *parent); 451 extern void sched_online_group(struct task_group *tg, 452 struct task_group *parent); 453 extern void sched_destroy_group(struct task_group *tg); 454 extern void sched_offline_group(struct task_group *tg); 455 456 extern void sched_move_task(struct task_struct *tsk); 457 458 #ifdef CONFIG_FAIR_GROUP_SCHED 459 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 460 461 #ifdef CONFIG_SMP 462 extern void set_task_rq_fair(struct sched_entity *se, 463 struct cfs_rq *prev, struct cfs_rq *next); 464 #else /* !CONFIG_SMP */ 465 static inline void set_task_rq_fair(struct sched_entity *se, 466 struct cfs_rq *prev, struct cfs_rq *next) { } 467 #endif /* CONFIG_SMP */ 468 #endif /* CONFIG_FAIR_GROUP_SCHED */ 469 470 #else /* CONFIG_CGROUP_SCHED */ 471 472 struct cfs_bandwidth { }; 473 474 #endif /* CONFIG_CGROUP_SCHED */ 475 476 /* CFS-related fields in a runqueue */ 477 struct cfs_rq { 478 struct load_weight load; 479 unsigned long runnable_weight; 480 unsigned int nr_running; 481 unsigned int h_nr_running; 482 483 u64 exec_clock; 484 u64 min_vruntime; 485 #ifndef CONFIG_64BIT 486 u64 min_vruntime_copy; 487 #endif 488 489 struct rb_root_cached tasks_timeline; 490 491 /* 492 * 'curr' points to currently running entity on this cfs_rq. 493 * It is set to NULL otherwise (i.e when none are currently running). 494 */ 495 struct sched_entity *curr; 496 struct sched_entity *next; 497 struct sched_entity *last; 498 struct sched_entity *skip; 499 500 #ifdef CONFIG_SCHED_DEBUG 501 unsigned int nr_spread_over; 502 #endif 503 504 #ifdef CONFIG_SMP 505 /* 506 * CFS load tracking 507 */ 508 struct sched_avg avg; 509 #ifndef CONFIG_64BIT 510 u64 load_last_update_time_copy; 511 #endif 512 struct { 513 raw_spinlock_t lock ____cacheline_aligned; 514 int nr; 515 unsigned long load_avg; 516 unsigned long util_avg; 517 unsigned long runnable_sum; 518 } removed; 519 520 #ifdef CONFIG_FAIR_GROUP_SCHED 521 unsigned long tg_load_avg_contrib; 522 long propagate; 523 long prop_runnable_sum; 524 525 /* 526 * h_load = weight * f(tg) 527 * 528 * Where f(tg) is the recursive weight fraction assigned to 529 * this group. 530 */ 531 unsigned long h_load; 532 u64 last_h_load_update; 533 struct sched_entity *h_load_next; 534 #endif /* CONFIG_FAIR_GROUP_SCHED */ 535 #endif /* CONFIG_SMP */ 536 537 #ifdef CONFIG_FAIR_GROUP_SCHED 538 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 539 540 /* 541 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 542 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 543 * (like users, containers etc.) 544 * 545 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 546 * This list is used during load balance. 547 */ 548 int on_list; 549 struct list_head leaf_cfs_rq_list; 550 struct task_group *tg; /* group that "owns" this runqueue */ 551 552 #ifdef CONFIG_CFS_BANDWIDTH 553 int runtime_enabled; 554 u64 runtime_expires; 555 s64 runtime_remaining; 556 557 u64 throttled_clock; 558 u64 throttled_clock_task; 559 u64 throttled_clock_task_time; 560 int throttled; 561 int throttle_count; 562 struct list_head throttled_list; 563 #endif /* CONFIG_CFS_BANDWIDTH */ 564 #endif /* CONFIG_FAIR_GROUP_SCHED */ 565 }; 566 567 static inline int rt_bandwidth_enabled(void) 568 { 569 return sysctl_sched_rt_runtime >= 0; 570 } 571 572 /* RT IPI pull logic requires IRQ_WORK */ 573 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 574 # define HAVE_RT_PUSH_IPI 575 #endif 576 577 /* Real-Time classes' related field in a runqueue: */ 578 struct rt_rq { 579 struct rt_prio_array active; 580 unsigned int rt_nr_running; 581 unsigned int rr_nr_running; 582 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 583 struct { 584 int curr; /* highest queued rt task prio */ 585 #ifdef CONFIG_SMP 586 int next; /* next highest */ 587 #endif 588 } highest_prio; 589 #endif 590 #ifdef CONFIG_SMP 591 unsigned long rt_nr_migratory; 592 unsigned long rt_nr_total; 593 int overloaded; 594 struct plist_head pushable_tasks; 595 #endif /* CONFIG_SMP */ 596 int rt_queued; 597 598 int rt_throttled; 599 u64 rt_time; 600 u64 rt_runtime; 601 /* Nests inside the rq lock: */ 602 raw_spinlock_t rt_runtime_lock; 603 604 #ifdef CONFIG_RT_GROUP_SCHED 605 unsigned long rt_nr_boosted; 606 607 struct rq *rq; 608 struct task_group *tg; 609 #endif 610 }; 611 612 /* Deadline class' related fields in a runqueue */ 613 struct dl_rq { 614 /* runqueue is an rbtree, ordered by deadline */ 615 struct rb_root_cached root; 616 617 unsigned long dl_nr_running; 618 619 #ifdef CONFIG_SMP 620 /* 621 * Deadline values of the currently executing and the 622 * earliest ready task on this rq. Caching these facilitates 623 * the decision wether or not a ready but not running task 624 * should migrate somewhere else. 625 */ 626 struct { 627 u64 curr; 628 u64 next; 629 } earliest_dl; 630 631 unsigned long dl_nr_migratory; 632 int overloaded; 633 634 /* 635 * Tasks on this rq that can be pushed away. They are kept in 636 * an rb-tree, ordered by tasks' deadlines, with caching 637 * of the leftmost (earliest deadline) element. 638 */ 639 struct rb_root_cached pushable_dl_tasks_root; 640 #else 641 struct dl_bw dl_bw; 642 #endif 643 /* 644 * "Active utilization" for this runqueue: increased when a 645 * task wakes up (becomes TASK_RUNNING) and decreased when a 646 * task blocks 647 */ 648 u64 running_bw; 649 650 /* 651 * Utilization of the tasks "assigned" to this runqueue (including 652 * the tasks that are in runqueue and the tasks that executed on this 653 * CPU and blocked). Increased when a task moves to this runqueue, and 654 * decreased when the task moves away (migrates, changes scheduling 655 * policy, or terminates). 656 * This is needed to compute the "inactive utilization" for the 657 * runqueue (inactive utilization = this_bw - running_bw). 658 */ 659 u64 this_bw; 660 u64 extra_bw; 661 662 /* 663 * Inverse of the fraction of CPU utilization that can be reclaimed 664 * by the GRUB algorithm. 665 */ 666 u64 bw_ratio; 667 }; 668 669 #ifdef CONFIG_SMP 670 671 static inline bool sched_asym_prefer(int a, int b) 672 { 673 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 674 } 675 676 /* 677 * We add the notion of a root-domain which will be used to define per-domain 678 * variables. Each exclusive cpuset essentially defines an island domain by 679 * fully partitioning the member CPUs from any other cpuset. Whenever a new 680 * exclusive cpuset is created, we also create and attach a new root-domain 681 * object. 682 * 683 */ 684 struct root_domain { 685 atomic_t refcount; 686 atomic_t rto_count; 687 struct rcu_head rcu; 688 cpumask_var_t span; 689 cpumask_var_t online; 690 691 /* Indicate more than one runnable task for any CPU */ 692 bool overload; 693 694 /* 695 * The bit corresponding to a CPU gets set here if such CPU has more 696 * than one runnable -deadline task (as it is below for RT tasks). 697 */ 698 cpumask_var_t dlo_mask; 699 atomic_t dlo_count; 700 struct dl_bw dl_bw; 701 struct cpudl cpudl; 702 703 #ifdef HAVE_RT_PUSH_IPI 704 /* 705 * For IPI pull requests, loop across the rto_mask. 706 */ 707 struct irq_work rto_push_work; 708 raw_spinlock_t rto_lock; 709 /* These are only updated and read within rto_lock */ 710 int rto_loop; 711 int rto_cpu; 712 /* These atomics are updated outside of a lock */ 713 atomic_t rto_loop_next; 714 atomic_t rto_loop_start; 715 #endif 716 /* 717 * The "RT overload" flag: it gets set if a CPU has more than 718 * one runnable RT task. 719 */ 720 cpumask_var_t rto_mask; 721 struct cpupri cpupri; 722 723 unsigned long max_cpu_capacity; 724 }; 725 726 extern struct root_domain def_root_domain; 727 extern struct mutex sched_domains_mutex; 728 729 extern void init_defrootdomain(void); 730 extern int sched_init_domains(const struct cpumask *cpu_map); 731 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 732 extern void sched_get_rd(struct root_domain *rd); 733 extern void sched_put_rd(struct root_domain *rd); 734 735 #ifdef HAVE_RT_PUSH_IPI 736 extern void rto_push_irq_work_func(struct irq_work *work); 737 #endif 738 #endif /* CONFIG_SMP */ 739 740 /* 741 * This is the main, per-CPU runqueue data structure. 742 * 743 * Locking rule: those places that want to lock multiple runqueues 744 * (such as the load balancing or the thread migration code), lock 745 * acquire operations must be ordered by ascending &runqueue. 746 */ 747 struct rq { 748 /* runqueue lock: */ 749 raw_spinlock_t lock; 750 751 /* 752 * nr_running and cpu_load should be in the same cacheline because 753 * remote CPUs use both these fields when doing load calculation. 754 */ 755 unsigned int nr_running; 756 #ifdef CONFIG_NUMA_BALANCING 757 unsigned int nr_numa_running; 758 unsigned int nr_preferred_running; 759 #endif 760 #define CPU_LOAD_IDX_MAX 5 761 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 762 #ifdef CONFIG_NO_HZ_COMMON 763 #ifdef CONFIG_SMP 764 unsigned long last_load_update_tick; 765 unsigned long last_blocked_load_update_tick; 766 unsigned int has_blocked_load; 767 #endif /* CONFIG_SMP */ 768 unsigned int nohz_tick_stopped; 769 atomic_t nohz_flags; 770 #endif /* CONFIG_NO_HZ_COMMON */ 771 772 /* capture load from *all* tasks on this CPU: */ 773 struct load_weight load; 774 unsigned long nr_load_updates; 775 u64 nr_switches; 776 777 struct cfs_rq cfs; 778 struct rt_rq rt; 779 struct dl_rq dl; 780 781 #ifdef CONFIG_FAIR_GROUP_SCHED 782 /* list of leaf cfs_rq on this CPU: */ 783 struct list_head leaf_cfs_rq_list; 784 struct list_head *tmp_alone_branch; 785 #endif /* CONFIG_FAIR_GROUP_SCHED */ 786 787 /* 788 * This is part of a global counter where only the total sum 789 * over all CPUs matters. A task can increase this counter on 790 * one CPU and if it got migrated afterwards it may decrease 791 * it on another CPU. Always updated under the runqueue lock: 792 */ 793 unsigned long nr_uninterruptible; 794 795 struct task_struct *curr; 796 struct task_struct *idle; 797 struct task_struct *stop; 798 unsigned long next_balance; 799 struct mm_struct *prev_mm; 800 801 unsigned int clock_update_flags; 802 u64 clock; 803 u64 clock_task; 804 805 atomic_t nr_iowait; 806 807 #ifdef CONFIG_SMP 808 struct root_domain *rd; 809 struct sched_domain *sd; 810 811 unsigned long cpu_capacity; 812 unsigned long cpu_capacity_orig; 813 814 struct callback_head *balance_callback; 815 816 unsigned char idle_balance; 817 818 /* For active balancing */ 819 int active_balance; 820 int push_cpu; 821 struct cpu_stop_work active_balance_work; 822 823 /* CPU of this runqueue: */ 824 int cpu; 825 int online; 826 827 struct list_head cfs_tasks; 828 829 u64 rt_avg; 830 u64 age_stamp; 831 u64 idle_stamp; 832 u64 avg_idle; 833 834 /* This is used to determine avg_idle's max value */ 835 u64 max_idle_balance_cost; 836 #endif 837 838 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 839 u64 prev_irq_time; 840 #endif 841 #ifdef CONFIG_PARAVIRT 842 u64 prev_steal_time; 843 #endif 844 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 845 u64 prev_steal_time_rq; 846 #endif 847 848 /* calc_load related fields */ 849 unsigned long calc_load_update; 850 long calc_load_active; 851 852 #ifdef CONFIG_SCHED_HRTICK 853 #ifdef CONFIG_SMP 854 int hrtick_csd_pending; 855 call_single_data_t hrtick_csd; 856 #endif 857 struct hrtimer hrtick_timer; 858 #endif 859 860 #ifdef CONFIG_SCHEDSTATS 861 /* latency stats */ 862 struct sched_info rq_sched_info; 863 unsigned long long rq_cpu_time; 864 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 865 866 /* sys_sched_yield() stats */ 867 unsigned int yld_count; 868 869 /* schedule() stats */ 870 unsigned int sched_count; 871 unsigned int sched_goidle; 872 873 /* try_to_wake_up() stats */ 874 unsigned int ttwu_count; 875 unsigned int ttwu_local; 876 #endif 877 878 #ifdef CONFIG_SMP 879 struct llist_head wake_list; 880 #endif 881 882 #ifdef CONFIG_CPU_IDLE 883 /* Must be inspected within a rcu lock section */ 884 struct cpuidle_state *idle_state; 885 #endif 886 }; 887 888 static inline int cpu_of(struct rq *rq) 889 { 890 #ifdef CONFIG_SMP 891 return rq->cpu; 892 #else 893 return 0; 894 #endif 895 } 896 897 898 #ifdef CONFIG_SCHED_SMT 899 900 extern struct static_key_false sched_smt_present; 901 902 extern void __update_idle_core(struct rq *rq); 903 904 static inline void update_idle_core(struct rq *rq) 905 { 906 if (static_branch_unlikely(&sched_smt_present)) 907 __update_idle_core(rq); 908 } 909 910 #else 911 static inline void update_idle_core(struct rq *rq) { } 912 #endif 913 914 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 915 916 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 917 #define this_rq() this_cpu_ptr(&runqueues) 918 #define task_rq(p) cpu_rq(task_cpu(p)) 919 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 920 #define raw_rq() raw_cpu_ptr(&runqueues) 921 922 static inline u64 __rq_clock_broken(struct rq *rq) 923 { 924 return READ_ONCE(rq->clock); 925 } 926 927 /* 928 * rq::clock_update_flags bits 929 * 930 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 931 * call to __schedule(). This is an optimisation to avoid 932 * neighbouring rq clock updates. 933 * 934 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 935 * in effect and calls to update_rq_clock() are being ignored. 936 * 937 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 938 * made to update_rq_clock() since the last time rq::lock was pinned. 939 * 940 * If inside of __schedule(), clock_update_flags will have been 941 * shifted left (a left shift is a cheap operation for the fast path 942 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 943 * 944 * if (rq-clock_update_flags >= RQCF_UPDATED) 945 * 946 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 947 * one position though, because the next rq_unpin_lock() will shift it 948 * back. 949 */ 950 #define RQCF_REQ_SKIP 0x01 951 #define RQCF_ACT_SKIP 0x02 952 #define RQCF_UPDATED 0x04 953 954 static inline void assert_clock_updated(struct rq *rq) 955 { 956 /* 957 * The only reason for not seeing a clock update since the 958 * last rq_pin_lock() is if we're currently skipping updates. 959 */ 960 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 961 } 962 963 static inline u64 rq_clock(struct rq *rq) 964 { 965 lockdep_assert_held(&rq->lock); 966 assert_clock_updated(rq); 967 968 return rq->clock; 969 } 970 971 static inline u64 rq_clock_task(struct rq *rq) 972 { 973 lockdep_assert_held(&rq->lock); 974 assert_clock_updated(rq); 975 976 return rq->clock_task; 977 } 978 979 static inline void rq_clock_skip_update(struct rq *rq) 980 { 981 lockdep_assert_held(&rq->lock); 982 rq->clock_update_flags |= RQCF_REQ_SKIP; 983 } 984 985 /* 986 * See rt task throttling, which is the only time a skip 987 * request is cancelled. 988 */ 989 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 990 { 991 lockdep_assert_held(&rq->lock); 992 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 993 } 994 995 struct rq_flags { 996 unsigned long flags; 997 struct pin_cookie cookie; 998 #ifdef CONFIG_SCHED_DEBUG 999 /* 1000 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1001 * current pin context is stashed here in case it needs to be 1002 * restored in rq_repin_lock(). 1003 */ 1004 unsigned int clock_update_flags; 1005 #endif 1006 }; 1007 1008 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1009 { 1010 rf->cookie = lockdep_pin_lock(&rq->lock); 1011 1012 #ifdef CONFIG_SCHED_DEBUG 1013 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1014 rf->clock_update_flags = 0; 1015 #endif 1016 } 1017 1018 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1019 { 1020 #ifdef CONFIG_SCHED_DEBUG 1021 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1022 rf->clock_update_flags = RQCF_UPDATED; 1023 #endif 1024 1025 lockdep_unpin_lock(&rq->lock, rf->cookie); 1026 } 1027 1028 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1029 { 1030 lockdep_repin_lock(&rq->lock, rf->cookie); 1031 1032 #ifdef CONFIG_SCHED_DEBUG 1033 /* 1034 * Restore the value we stashed in @rf for this pin context. 1035 */ 1036 rq->clock_update_flags |= rf->clock_update_flags; 1037 #endif 1038 } 1039 1040 #ifdef CONFIG_NUMA 1041 enum numa_topology_type { 1042 NUMA_DIRECT, 1043 NUMA_GLUELESS_MESH, 1044 NUMA_BACKPLANE, 1045 }; 1046 extern enum numa_topology_type sched_numa_topology_type; 1047 extern int sched_max_numa_distance; 1048 extern bool find_numa_distance(int distance); 1049 #endif 1050 1051 #ifdef CONFIG_NUMA 1052 extern void sched_init_numa(void); 1053 extern void sched_domains_numa_masks_set(unsigned int cpu); 1054 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1055 #else 1056 static inline void sched_init_numa(void) { } 1057 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1058 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1059 #endif 1060 1061 #ifdef CONFIG_NUMA_BALANCING 1062 /* The regions in numa_faults array from task_struct */ 1063 enum numa_faults_stats { 1064 NUMA_MEM = 0, 1065 NUMA_CPU, 1066 NUMA_MEMBUF, 1067 NUMA_CPUBUF 1068 }; 1069 extern void sched_setnuma(struct task_struct *p, int node); 1070 extern int migrate_task_to(struct task_struct *p, int cpu); 1071 extern int migrate_swap(struct task_struct *, struct task_struct *); 1072 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1073 #else 1074 static inline void 1075 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1076 { 1077 } 1078 #endif /* CONFIG_NUMA_BALANCING */ 1079 1080 #ifdef CONFIG_SMP 1081 1082 static inline void 1083 queue_balance_callback(struct rq *rq, 1084 struct callback_head *head, 1085 void (*func)(struct rq *rq)) 1086 { 1087 lockdep_assert_held(&rq->lock); 1088 1089 if (unlikely(head->next)) 1090 return; 1091 1092 head->func = (void (*)(struct callback_head *))func; 1093 head->next = rq->balance_callback; 1094 rq->balance_callback = head; 1095 } 1096 1097 extern void sched_ttwu_pending(void); 1098 1099 #define rcu_dereference_check_sched_domain(p) \ 1100 rcu_dereference_check((p), \ 1101 lockdep_is_held(&sched_domains_mutex)) 1102 1103 /* 1104 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1105 * See detach_destroy_domains: synchronize_sched for details. 1106 * 1107 * The domain tree of any CPU may only be accessed from within 1108 * preempt-disabled sections. 1109 */ 1110 #define for_each_domain(cpu, __sd) \ 1111 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1112 __sd; __sd = __sd->parent) 1113 1114 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 1115 1116 /** 1117 * highest_flag_domain - Return highest sched_domain containing flag. 1118 * @cpu: The CPU whose highest level of sched domain is to 1119 * be returned. 1120 * @flag: The flag to check for the highest sched_domain 1121 * for the given CPU. 1122 * 1123 * Returns the highest sched_domain of a CPU which contains the given flag. 1124 */ 1125 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1126 { 1127 struct sched_domain *sd, *hsd = NULL; 1128 1129 for_each_domain(cpu, sd) { 1130 if (!(sd->flags & flag)) 1131 break; 1132 hsd = sd; 1133 } 1134 1135 return hsd; 1136 } 1137 1138 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1139 { 1140 struct sched_domain *sd; 1141 1142 for_each_domain(cpu, sd) { 1143 if (sd->flags & flag) 1144 break; 1145 } 1146 1147 return sd; 1148 } 1149 1150 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 1151 DECLARE_PER_CPU(int, sd_llc_size); 1152 DECLARE_PER_CPU(int, sd_llc_id); 1153 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 1154 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 1155 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 1156 1157 struct sched_group_capacity { 1158 atomic_t ref; 1159 /* 1160 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1161 * for a single CPU. 1162 */ 1163 unsigned long capacity; 1164 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1165 unsigned long next_update; 1166 int imbalance; /* XXX unrelated to capacity but shared group state */ 1167 1168 #ifdef CONFIG_SCHED_DEBUG 1169 int id; 1170 #endif 1171 1172 unsigned long cpumask[0]; /* Balance mask */ 1173 }; 1174 1175 struct sched_group { 1176 struct sched_group *next; /* Must be a circular list */ 1177 atomic_t ref; 1178 1179 unsigned int group_weight; 1180 struct sched_group_capacity *sgc; 1181 int asym_prefer_cpu; /* CPU of highest priority in group */ 1182 1183 /* 1184 * The CPUs this group covers. 1185 * 1186 * NOTE: this field is variable length. (Allocated dynamically 1187 * by attaching extra space to the end of the structure, 1188 * depending on how many CPUs the kernel has booted up with) 1189 */ 1190 unsigned long cpumask[0]; 1191 }; 1192 1193 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1194 { 1195 return to_cpumask(sg->cpumask); 1196 } 1197 1198 /* 1199 * See build_balance_mask(). 1200 */ 1201 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1202 { 1203 return to_cpumask(sg->sgc->cpumask); 1204 } 1205 1206 /** 1207 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1208 * @group: The group whose first CPU is to be returned. 1209 */ 1210 static inline unsigned int group_first_cpu(struct sched_group *group) 1211 { 1212 return cpumask_first(sched_group_span(group)); 1213 } 1214 1215 extern int group_balance_cpu(struct sched_group *sg); 1216 1217 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1218 void register_sched_domain_sysctl(void); 1219 void dirty_sched_domain_sysctl(int cpu); 1220 void unregister_sched_domain_sysctl(void); 1221 #else 1222 static inline void register_sched_domain_sysctl(void) 1223 { 1224 } 1225 static inline void dirty_sched_domain_sysctl(int cpu) 1226 { 1227 } 1228 static inline void unregister_sched_domain_sysctl(void) 1229 { 1230 } 1231 #endif 1232 1233 #else 1234 1235 static inline void sched_ttwu_pending(void) { } 1236 1237 #endif /* CONFIG_SMP */ 1238 1239 #include "stats.h" 1240 #include "autogroup.h" 1241 1242 #ifdef CONFIG_CGROUP_SCHED 1243 1244 /* 1245 * Return the group to which this tasks belongs. 1246 * 1247 * We cannot use task_css() and friends because the cgroup subsystem 1248 * changes that value before the cgroup_subsys::attach() method is called, 1249 * therefore we cannot pin it and might observe the wrong value. 1250 * 1251 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1252 * core changes this before calling sched_move_task(). 1253 * 1254 * Instead we use a 'copy' which is updated from sched_move_task() while 1255 * holding both task_struct::pi_lock and rq::lock. 1256 */ 1257 static inline struct task_group *task_group(struct task_struct *p) 1258 { 1259 return p->sched_task_group; 1260 } 1261 1262 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1263 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1264 { 1265 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1266 struct task_group *tg = task_group(p); 1267 #endif 1268 1269 #ifdef CONFIG_FAIR_GROUP_SCHED 1270 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1271 p->se.cfs_rq = tg->cfs_rq[cpu]; 1272 p->se.parent = tg->se[cpu]; 1273 #endif 1274 1275 #ifdef CONFIG_RT_GROUP_SCHED 1276 p->rt.rt_rq = tg->rt_rq[cpu]; 1277 p->rt.parent = tg->rt_se[cpu]; 1278 #endif 1279 } 1280 1281 #else /* CONFIG_CGROUP_SCHED */ 1282 1283 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1284 static inline struct task_group *task_group(struct task_struct *p) 1285 { 1286 return NULL; 1287 } 1288 1289 #endif /* CONFIG_CGROUP_SCHED */ 1290 1291 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1292 { 1293 set_task_rq(p, cpu); 1294 #ifdef CONFIG_SMP 1295 /* 1296 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1297 * successfuly executed on another CPU. We must ensure that updates of 1298 * per-task data have been completed by this moment. 1299 */ 1300 smp_wmb(); 1301 #ifdef CONFIG_THREAD_INFO_IN_TASK 1302 p->cpu = cpu; 1303 #else 1304 task_thread_info(p)->cpu = cpu; 1305 #endif 1306 p->wake_cpu = cpu; 1307 #endif 1308 } 1309 1310 /* 1311 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1312 */ 1313 #ifdef CONFIG_SCHED_DEBUG 1314 # include <linux/static_key.h> 1315 # define const_debug __read_mostly 1316 #else 1317 # define const_debug const 1318 #endif 1319 1320 #define SCHED_FEAT(name, enabled) \ 1321 __SCHED_FEAT_##name , 1322 1323 enum { 1324 #include "features.h" 1325 __SCHED_FEAT_NR, 1326 }; 1327 1328 #undef SCHED_FEAT 1329 1330 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 1331 1332 /* 1333 * To support run-time toggling of sched features, all the translation units 1334 * (but core.c) reference the sysctl_sched_features defined in core.c. 1335 */ 1336 extern const_debug unsigned int sysctl_sched_features; 1337 1338 #define SCHED_FEAT(name, enabled) \ 1339 static __always_inline bool static_branch_##name(struct static_key *key) \ 1340 { \ 1341 return static_key_##enabled(key); \ 1342 } 1343 1344 #include "features.h" 1345 #undef SCHED_FEAT 1346 1347 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1348 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1349 1350 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 1351 1352 /* 1353 * Each translation unit has its own copy of sysctl_sched_features to allow 1354 * constants propagation at compile time and compiler optimization based on 1355 * features default. 1356 */ 1357 #define SCHED_FEAT(name, enabled) \ 1358 (1UL << __SCHED_FEAT_##name) * enabled | 1359 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1360 #include "features.h" 1361 0; 1362 #undef SCHED_FEAT 1363 1364 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1365 1366 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1367 1368 extern struct static_key_false sched_numa_balancing; 1369 extern struct static_key_false sched_schedstats; 1370 1371 static inline u64 global_rt_period(void) 1372 { 1373 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1374 } 1375 1376 static inline u64 global_rt_runtime(void) 1377 { 1378 if (sysctl_sched_rt_runtime < 0) 1379 return RUNTIME_INF; 1380 1381 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1382 } 1383 1384 static inline int task_current(struct rq *rq, struct task_struct *p) 1385 { 1386 return rq->curr == p; 1387 } 1388 1389 static inline int task_running(struct rq *rq, struct task_struct *p) 1390 { 1391 #ifdef CONFIG_SMP 1392 return p->on_cpu; 1393 #else 1394 return task_current(rq, p); 1395 #endif 1396 } 1397 1398 static inline int task_on_rq_queued(struct task_struct *p) 1399 { 1400 return p->on_rq == TASK_ON_RQ_QUEUED; 1401 } 1402 1403 static inline int task_on_rq_migrating(struct task_struct *p) 1404 { 1405 return p->on_rq == TASK_ON_RQ_MIGRATING; 1406 } 1407 1408 /* 1409 * wake flags 1410 */ 1411 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1412 #define WF_FORK 0x02 /* Child wakeup after fork */ 1413 #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1414 1415 /* 1416 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1417 * of tasks with abnormal "nice" values across CPUs the contribution that 1418 * each task makes to its run queue's load is weighted according to its 1419 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1420 * scaled version of the new time slice allocation that they receive on time 1421 * slice expiry etc. 1422 */ 1423 1424 #define WEIGHT_IDLEPRIO 3 1425 #define WMULT_IDLEPRIO 1431655765 1426 1427 extern const int sched_prio_to_weight[40]; 1428 extern const u32 sched_prio_to_wmult[40]; 1429 1430 /* 1431 * {de,en}queue flags: 1432 * 1433 * DEQUEUE_SLEEP - task is no longer runnable 1434 * ENQUEUE_WAKEUP - task just became runnable 1435 * 1436 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1437 * are in a known state which allows modification. Such pairs 1438 * should preserve as much state as possible. 1439 * 1440 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1441 * in the runqueue. 1442 * 1443 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1444 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1445 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1446 * 1447 */ 1448 1449 #define DEQUEUE_SLEEP 0x01 1450 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1451 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1452 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1453 1454 #define ENQUEUE_WAKEUP 0x01 1455 #define ENQUEUE_RESTORE 0x02 1456 #define ENQUEUE_MOVE 0x04 1457 #define ENQUEUE_NOCLOCK 0x08 1458 1459 #define ENQUEUE_HEAD 0x10 1460 #define ENQUEUE_REPLENISH 0x20 1461 #ifdef CONFIG_SMP 1462 #define ENQUEUE_MIGRATED 0x40 1463 #else 1464 #define ENQUEUE_MIGRATED 0x00 1465 #endif 1466 1467 #define RETRY_TASK ((void *)-1UL) 1468 1469 struct sched_class { 1470 const struct sched_class *next; 1471 1472 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1473 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1474 void (*yield_task) (struct rq *rq); 1475 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1476 1477 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1478 1479 /* 1480 * It is the responsibility of the pick_next_task() method that will 1481 * return the next task to call put_prev_task() on the @prev task or 1482 * something equivalent. 1483 * 1484 * May return RETRY_TASK when it finds a higher prio class has runnable 1485 * tasks. 1486 */ 1487 struct task_struct * (*pick_next_task)(struct rq *rq, 1488 struct task_struct *prev, 1489 struct rq_flags *rf); 1490 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1491 1492 #ifdef CONFIG_SMP 1493 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1494 void (*migrate_task_rq)(struct task_struct *p); 1495 1496 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1497 1498 void (*set_cpus_allowed)(struct task_struct *p, 1499 const struct cpumask *newmask); 1500 1501 void (*rq_online)(struct rq *rq); 1502 void (*rq_offline)(struct rq *rq); 1503 #endif 1504 1505 void (*set_curr_task)(struct rq *rq); 1506 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1507 void (*task_fork)(struct task_struct *p); 1508 void (*task_dead)(struct task_struct *p); 1509 1510 /* 1511 * The switched_from() call is allowed to drop rq->lock, therefore we 1512 * cannot assume the switched_from/switched_to pair is serliazed by 1513 * rq->lock. They are however serialized by p->pi_lock. 1514 */ 1515 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1516 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1517 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1518 int oldprio); 1519 1520 unsigned int (*get_rr_interval)(struct rq *rq, 1521 struct task_struct *task); 1522 1523 void (*update_curr)(struct rq *rq); 1524 1525 #define TASK_SET_GROUP 0 1526 #define TASK_MOVE_GROUP 1 1527 1528 #ifdef CONFIG_FAIR_GROUP_SCHED 1529 void (*task_change_group)(struct task_struct *p, int type); 1530 #endif 1531 }; 1532 1533 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1534 { 1535 prev->sched_class->put_prev_task(rq, prev); 1536 } 1537 1538 static inline void set_curr_task(struct rq *rq, struct task_struct *curr) 1539 { 1540 curr->sched_class->set_curr_task(rq); 1541 } 1542 1543 #ifdef CONFIG_SMP 1544 #define sched_class_highest (&stop_sched_class) 1545 #else 1546 #define sched_class_highest (&dl_sched_class) 1547 #endif 1548 #define for_each_class(class) \ 1549 for (class = sched_class_highest; class; class = class->next) 1550 1551 extern const struct sched_class stop_sched_class; 1552 extern const struct sched_class dl_sched_class; 1553 extern const struct sched_class rt_sched_class; 1554 extern const struct sched_class fair_sched_class; 1555 extern const struct sched_class idle_sched_class; 1556 1557 1558 #ifdef CONFIG_SMP 1559 1560 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1561 1562 extern void trigger_load_balance(struct rq *rq); 1563 1564 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1565 1566 #endif 1567 1568 #ifdef CONFIG_CPU_IDLE 1569 static inline void idle_set_state(struct rq *rq, 1570 struct cpuidle_state *idle_state) 1571 { 1572 rq->idle_state = idle_state; 1573 } 1574 1575 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1576 { 1577 SCHED_WARN_ON(!rcu_read_lock_held()); 1578 1579 return rq->idle_state; 1580 } 1581 #else 1582 static inline void idle_set_state(struct rq *rq, 1583 struct cpuidle_state *idle_state) 1584 { 1585 } 1586 1587 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1588 { 1589 return NULL; 1590 } 1591 #endif 1592 1593 extern void schedule_idle(void); 1594 1595 extern void sysrq_sched_debug_show(void); 1596 extern void sched_init_granularity(void); 1597 extern void update_max_interval(void); 1598 1599 extern void init_sched_dl_class(void); 1600 extern void init_sched_rt_class(void); 1601 extern void init_sched_fair_class(void); 1602 1603 extern void reweight_task(struct task_struct *p, int prio); 1604 1605 extern void resched_curr(struct rq *rq); 1606 extern void resched_cpu(int cpu); 1607 1608 extern struct rt_bandwidth def_rt_bandwidth; 1609 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1610 1611 extern struct dl_bandwidth def_dl_bandwidth; 1612 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1613 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1614 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1615 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1616 1617 #define BW_SHIFT 20 1618 #define BW_UNIT (1 << BW_SHIFT) 1619 #define RATIO_SHIFT 8 1620 unsigned long to_ratio(u64 period, u64 runtime); 1621 1622 extern void init_entity_runnable_average(struct sched_entity *se); 1623 extern void post_init_entity_util_avg(struct sched_entity *se); 1624 1625 #ifdef CONFIG_NO_HZ_FULL 1626 extern bool sched_can_stop_tick(struct rq *rq); 1627 extern int __init sched_tick_offload_init(void); 1628 1629 /* 1630 * Tick may be needed by tasks in the runqueue depending on their policy and 1631 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1632 * nohz mode if necessary. 1633 */ 1634 static inline void sched_update_tick_dependency(struct rq *rq) 1635 { 1636 int cpu; 1637 1638 if (!tick_nohz_full_enabled()) 1639 return; 1640 1641 cpu = cpu_of(rq); 1642 1643 if (!tick_nohz_full_cpu(cpu)) 1644 return; 1645 1646 if (sched_can_stop_tick(rq)) 1647 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1648 else 1649 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1650 } 1651 #else 1652 static inline int sched_tick_offload_init(void) { return 0; } 1653 static inline void sched_update_tick_dependency(struct rq *rq) { } 1654 #endif 1655 1656 static inline void add_nr_running(struct rq *rq, unsigned count) 1657 { 1658 unsigned prev_nr = rq->nr_running; 1659 1660 rq->nr_running = prev_nr + count; 1661 1662 if (prev_nr < 2 && rq->nr_running >= 2) { 1663 #ifdef CONFIG_SMP 1664 if (!rq->rd->overload) 1665 rq->rd->overload = true; 1666 #endif 1667 } 1668 1669 sched_update_tick_dependency(rq); 1670 } 1671 1672 static inline void sub_nr_running(struct rq *rq, unsigned count) 1673 { 1674 rq->nr_running -= count; 1675 /* Check if we still need preemption */ 1676 sched_update_tick_dependency(rq); 1677 } 1678 1679 extern void update_rq_clock(struct rq *rq); 1680 1681 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1682 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1683 1684 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1685 1686 extern const_debug unsigned int sysctl_sched_time_avg; 1687 extern const_debug unsigned int sysctl_sched_nr_migrate; 1688 extern const_debug unsigned int sysctl_sched_migration_cost; 1689 1690 static inline u64 sched_avg_period(void) 1691 { 1692 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1693 } 1694 1695 #ifdef CONFIG_SCHED_HRTICK 1696 1697 /* 1698 * Use hrtick when: 1699 * - enabled by features 1700 * - hrtimer is actually high res 1701 */ 1702 static inline int hrtick_enabled(struct rq *rq) 1703 { 1704 if (!sched_feat(HRTICK)) 1705 return 0; 1706 if (!cpu_active(cpu_of(rq))) 1707 return 0; 1708 return hrtimer_is_hres_active(&rq->hrtick_timer); 1709 } 1710 1711 void hrtick_start(struct rq *rq, u64 delay); 1712 1713 #else 1714 1715 static inline int hrtick_enabled(struct rq *rq) 1716 { 1717 return 0; 1718 } 1719 1720 #endif /* CONFIG_SCHED_HRTICK */ 1721 1722 #ifndef arch_scale_freq_capacity 1723 static __always_inline 1724 unsigned long arch_scale_freq_capacity(int cpu) 1725 { 1726 return SCHED_CAPACITY_SCALE; 1727 } 1728 #endif 1729 1730 #ifdef CONFIG_SMP 1731 extern void sched_avg_update(struct rq *rq); 1732 1733 #ifndef arch_scale_cpu_capacity 1734 static __always_inline 1735 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) 1736 { 1737 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) 1738 return sd->smt_gain / sd->span_weight; 1739 1740 return SCHED_CAPACITY_SCALE; 1741 } 1742 #endif 1743 1744 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1745 { 1746 rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq)); 1747 sched_avg_update(rq); 1748 } 1749 #else 1750 #ifndef arch_scale_cpu_capacity 1751 static __always_inline 1752 unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) 1753 { 1754 return SCHED_CAPACITY_SCALE; 1755 } 1756 #endif 1757 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1758 static inline void sched_avg_update(struct rq *rq) { } 1759 #endif 1760 1761 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1762 __acquires(rq->lock); 1763 1764 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1765 __acquires(p->pi_lock) 1766 __acquires(rq->lock); 1767 1768 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1769 __releases(rq->lock) 1770 { 1771 rq_unpin_lock(rq, rf); 1772 raw_spin_unlock(&rq->lock); 1773 } 1774 1775 static inline void 1776 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1777 __releases(rq->lock) 1778 __releases(p->pi_lock) 1779 { 1780 rq_unpin_lock(rq, rf); 1781 raw_spin_unlock(&rq->lock); 1782 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1783 } 1784 1785 static inline void 1786 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1787 __acquires(rq->lock) 1788 { 1789 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1790 rq_pin_lock(rq, rf); 1791 } 1792 1793 static inline void 1794 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1795 __acquires(rq->lock) 1796 { 1797 raw_spin_lock_irq(&rq->lock); 1798 rq_pin_lock(rq, rf); 1799 } 1800 1801 static inline void 1802 rq_lock(struct rq *rq, struct rq_flags *rf) 1803 __acquires(rq->lock) 1804 { 1805 raw_spin_lock(&rq->lock); 1806 rq_pin_lock(rq, rf); 1807 } 1808 1809 static inline void 1810 rq_relock(struct rq *rq, struct rq_flags *rf) 1811 __acquires(rq->lock) 1812 { 1813 raw_spin_lock(&rq->lock); 1814 rq_repin_lock(rq, rf); 1815 } 1816 1817 static inline void 1818 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1819 __releases(rq->lock) 1820 { 1821 rq_unpin_lock(rq, rf); 1822 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1823 } 1824 1825 static inline void 1826 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1827 __releases(rq->lock) 1828 { 1829 rq_unpin_lock(rq, rf); 1830 raw_spin_unlock_irq(&rq->lock); 1831 } 1832 1833 static inline void 1834 rq_unlock(struct rq *rq, struct rq_flags *rf) 1835 __releases(rq->lock) 1836 { 1837 rq_unpin_lock(rq, rf); 1838 raw_spin_unlock(&rq->lock); 1839 } 1840 1841 #ifdef CONFIG_SMP 1842 #ifdef CONFIG_PREEMPT 1843 1844 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1845 1846 /* 1847 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1848 * way at the expense of forcing extra atomic operations in all 1849 * invocations. This assures that the double_lock is acquired using the 1850 * same underlying policy as the spinlock_t on this architecture, which 1851 * reduces latency compared to the unfair variant below. However, it 1852 * also adds more overhead and therefore may reduce throughput. 1853 */ 1854 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1855 __releases(this_rq->lock) 1856 __acquires(busiest->lock) 1857 __acquires(this_rq->lock) 1858 { 1859 raw_spin_unlock(&this_rq->lock); 1860 double_rq_lock(this_rq, busiest); 1861 1862 return 1; 1863 } 1864 1865 #else 1866 /* 1867 * Unfair double_lock_balance: Optimizes throughput at the expense of 1868 * latency by eliminating extra atomic operations when the locks are 1869 * already in proper order on entry. This favors lower CPU-ids and will 1870 * grant the double lock to lower CPUs over higher ids under contention, 1871 * regardless of entry order into the function. 1872 */ 1873 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1874 __releases(this_rq->lock) 1875 __acquires(busiest->lock) 1876 __acquires(this_rq->lock) 1877 { 1878 int ret = 0; 1879 1880 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1881 if (busiest < this_rq) { 1882 raw_spin_unlock(&this_rq->lock); 1883 raw_spin_lock(&busiest->lock); 1884 raw_spin_lock_nested(&this_rq->lock, 1885 SINGLE_DEPTH_NESTING); 1886 ret = 1; 1887 } else 1888 raw_spin_lock_nested(&busiest->lock, 1889 SINGLE_DEPTH_NESTING); 1890 } 1891 return ret; 1892 } 1893 1894 #endif /* CONFIG_PREEMPT */ 1895 1896 /* 1897 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1898 */ 1899 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1900 { 1901 if (unlikely(!irqs_disabled())) { 1902 /* printk() doesn't work well under rq->lock */ 1903 raw_spin_unlock(&this_rq->lock); 1904 BUG_ON(1); 1905 } 1906 1907 return _double_lock_balance(this_rq, busiest); 1908 } 1909 1910 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1911 __releases(busiest->lock) 1912 { 1913 raw_spin_unlock(&busiest->lock); 1914 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1915 } 1916 1917 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 1918 { 1919 if (l1 > l2) 1920 swap(l1, l2); 1921 1922 spin_lock(l1); 1923 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1924 } 1925 1926 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 1927 { 1928 if (l1 > l2) 1929 swap(l1, l2); 1930 1931 spin_lock_irq(l1); 1932 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1933 } 1934 1935 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1936 { 1937 if (l1 > l2) 1938 swap(l1, l2); 1939 1940 raw_spin_lock(l1); 1941 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1942 } 1943 1944 /* 1945 * double_rq_lock - safely lock two runqueues 1946 * 1947 * Note this does not disable interrupts like task_rq_lock, 1948 * you need to do so manually before calling. 1949 */ 1950 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1951 __acquires(rq1->lock) 1952 __acquires(rq2->lock) 1953 { 1954 BUG_ON(!irqs_disabled()); 1955 if (rq1 == rq2) { 1956 raw_spin_lock(&rq1->lock); 1957 __acquire(rq2->lock); /* Fake it out ;) */ 1958 } else { 1959 if (rq1 < rq2) { 1960 raw_spin_lock(&rq1->lock); 1961 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1962 } else { 1963 raw_spin_lock(&rq2->lock); 1964 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1965 } 1966 } 1967 } 1968 1969 /* 1970 * double_rq_unlock - safely unlock two runqueues 1971 * 1972 * Note this does not restore interrupts like task_rq_unlock, 1973 * you need to do so manually after calling. 1974 */ 1975 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1976 __releases(rq1->lock) 1977 __releases(rq2->lock) 1978 { 1979 raw_spin_unlock(&rq1->lock); 1980 if (rq1 != rq2) 1981 raw_spin_unlock(&rq2->lock); 1982 else 1983 __release(rq2->lock); 1984 } 1985 1986 extern void set_rq_online (struct rq *rq); 1987 extern void set_rq_offline(struct rq *rq); 1988 extern bool sched_smp_initialized; 1989 1990 #else /* CONFIG_SMP */ 1991 1992 /* 1993 * double_rq_lock - safely lock two runqueues 1994 * 1995 * Note this does not disable interrupts like task_rq_lock, 1996 * you need to do so manually before calling. 1997 */ 1998 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1999 __acquires(rq1->lock) 2000 __acquires(rq2->lock) 2001 { 2002 BUG_ON(!irqs_disabled()); 2003 BUG_ON(rq1 != rq2); 2004 raw_spin_lock(&rq1->lock); 2005 __acquire(rq2->lock); /* Fake it out ;) */ 2006 } 2007 2008 /* 2009 * double_rq_unlock - safely unlock two runqueues 2010 * 2011 * Note this does not restore interrupts like task_rq_unlock, 2012 * you need to do so manually after calling. 2013 */ 2014 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2015 __releases(rq1->lock) 2016 __releases(rq2->lock) 2017 { 2018 BUG_ON(rq1 != rq2); 2019 raw_spin_unlock(&rq1->lock); 2020 __release(rq2->lock); 2021 } 2022 2023 #endif 2024 2025 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2026 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2027 2028 #ifdef CONFIG_SCHED_DEBUG 2029 extern bool sched_debug_enabled; 2030 2031 extern void print_cfs_stats(struct seq_file *m, int cpu); 2032 extern void print_rt_stats(struct seq_file *m, int cpu); 2033 extern void print_dl_stats(struct seq_file *m, int cpu); 2034 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2035 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2036 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2037 #ifdef CONFIG_NUMA_BALANCING 2038 extern void 2039 show_numa_stats(struct task_struct *p, struct seq_file *m); 2040 extern void 2041 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2042 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2043 #endif /* CONFIG_NUMA_BALANCING */ 2044 #endif /* CONFIG_SCHED_DEBUG */ 2045 2046 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2047 extern void init_rt_rq(struct rt_rq *rt_rq); 2048 extern void init_dl_rq(struct dl_rq *dl_rq); 2049 2050 extern void cfs_bandwidth_usage_inc(void); 2051 extern void cfs_bandwidth_usage_dec(void); 2052 2053 #ifdef CONFIG_NO_HZ_COMMON 2054 #define NOHZ_BALANCE_KICK_BIT 0 2055 #define NOHZ_STATS_KICK_BIT 1 2056 2057 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2058 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2059 2060 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2061 2062 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2063 2064 extern void nohz_balance_exit_idle(struct rq *rq); 2065 #else 2066 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2067 #endif 2068 2069 2070 #ifdef CONFIG_SMP 2071 static inline 2072 void __dl_update(struct dl_bw *dl_b, s64 bw) 2073 { 2074 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2075 int i; 2076 2077 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2078 "sched RCU must be held"); 2079 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2080 struct rq *rq = cpu_rq(i); 2081 2082 rq->dl.extra_bw += bw; 2083 } 2084 } 2085 #else 2086 static inline 2087 void __dl_update(struct dl_bw *dl_b, s64 bw) 2088 { 2089 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2090 2091 dl->extra_bw += bw; 2092 } 2093 #endif 2094 2095 2096 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2097 struct irqtime { 2098 u64 total; 2099 u64 tick_delta; 2100 u64 irq_start_time; 2101 struct u64_stats_sync sync; 2102 }; 2103 2104 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2105 2106 /* 2107 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2108 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2109 * and never move forward. 2110 */ 2111 static inline u64 irq_time_read(int cpu) 2112 { 2113 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2114 unsigned int seq; 2115 u64 total; 2116 2117 do { 2118 seq = __u64_stats_fetch_begin(&irqtime->sync); 2119 total = irqtime->total; 2120 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2121 2122 return total; 2123 } 2124 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2125 2126 #ifdef CONFIG_CPU_FREQ 2127 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 2128 2129 /** 2130 * cpufreq_update_util - Take a note about CPU utilization changes. 2131 * @rq: Runqueue to carry out the update for. 2132 * @flags: Update reason flags. 2133 * 2134 * This function is called by the scheduler on the CPU whose utilization is 2135 * being updated. 2136 * 2137 * It can only be called from RCU-sched read-side critical sections. 2138 * 2139 * The way cpufreq is currently arranged requires it to evaluate the CPU 2140 * performance state (frequency/voltage) on a regular basis to prevent it from 2141 * being stuck in a completely inadequate performance level for too long. 2142 * That is not guaranteed to happen if the updates are only triggered from CFS 2143 * and DL, though, because they may not be coming in if only RT tasks are 2144 * active all the time (or there are RT tasks only). 2145 * 2146 * As a workaround for that issue, this function is called periodically by the 2147 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2148 * but that really is a band-aid. Going forward it should be replaced with 2149 * solutions targeted more specifically at RT tasks. 2150 */ 2151 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2152 { 2153 struct update_util_data *data; 2154 2155 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2156 cpu_of(rq))); 2157 if (data) 2158 data->func(data, rq_clock(rq), flags); 2159 } 2160 #else 2161 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2162 #endif /* CONFIG_CPU_FREQ */ 2163 2164 #ifdef arch_scale_freq_capacity 2165 # ifndef arch_scale_freq_invariant 2166 # define arch_scale_freq_invariant() true 2167 # endif 2168 #else 2169 # define arch_scale_freq_invariant() false 2170 #endif 2171 2172 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2173 static inline unsigned long cpu_util_dl(struct rq *rq) 2174 { 2175 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2176 } 2177 2178 static inline unsigned long cpu_util_cfs(struct rq *rq) 2179 { 2180 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2181 2182 if (sched_feat(UTIL_EST)) { 2183 util = max_t(unsigned long, util, 2184 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2185 } 2186 2187 return util; 2188 } 2189 #endif 2190