1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/blkdev.h> 40 #include <linux/compat.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpufreq.h> 43 #include <linux/cpuidle.h> 44 #include <linux/cpuset.h> 45 #include <linux/ctype.h> 46 #include <linux/debugfs.h> 47 #include <linux/delayacct.h> 48 #include <linux/energy_model.h> 49 #include <linux/init_task.h> 50 #include <linux/kprobes.h> 51 #include <linux/kthread.h> 52 #include <linux/membarrier.h> 53 #include <linux/migrate.h> 54 #include <linux/mmu_context.h> 55 #include <linux/nmi.h> 56 #include <linux/proc_fs.h> 57 #include <linux/prefetch.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/rcupdate_wait.h> 61 #include <linux/security.h> 62 #include <linux/stop_machine.h> 63 #include <linux/suspend.h> 64 #include <linux/swait.h> 65 #include <linux/syscalls.h> 66 #include <linux/task_work.h> 67 #include <linux/tsacct_kern.h> 68 69 #include <asm/tlb.h> 70 71 #ifdef CONFIG_PARAVIRT 72 # include <asm/paravirt.h> 73 #endif 74 75 #include "cpupri.h" 76 #include "cpudeadline.h" 77 78 #ifdef CONFIG_SCHED_DEBUG 79 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 80 #else 81 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 82 #endif 83 84 struct rq; 85 struct cpuidle_state; 86 87 /* task_struct::on_rq states: */ 88 #define TASK_ON_RQ_QUEUED 1 89 #define TASK_ON_RQ_MIGRATING 2 90 91 extern __read_mostly int scheduler_running; 92 93 extern unsigned long calc_load_update; 94 extern atomic_long_t calc_load_tasks; 95 96 extern void calc_global_load_tick(struct rq *this_rq); 97 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 98 99 #ifdef CONFIG_SMP 100 extern void cpu_load_update_active(struct rq *this_rq); 101 #else 102 static inline void cpu_load_update_active(struct rq *this_rq) { } 103 #endif 104 105 /* 106 * Helpers for converting nanosecond timing to jiffy resolution 107 */ 108 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 109 110 /* 111 * Increase resolution of nice-level calculations for 64-bit architectures. 112 * The extra resolution improves shares distribution and load balancing of 113 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 114 * hierarchies, especially on larger systems. This is not a user-visible change 115 * and does not change the user-interface for setting shares/weights. 116 * 117 * We increase resolution only if we have enough bits to allow this increased 118 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 119 * are pretty high and the returns do not justify the increased costs. 120 * 121 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 122 * increase coverage and consistency always enable it on 64-bit platforms. 123 */ 124 #ifdef CONFIG_64BIT 125 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 126 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 127 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 128 #else 129 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 130 # define scale_load(w) (w) 131 # define scale_load_down(w) (w) 132 #endif 133 134 /* 135 * Task weight (visible to users) and its load (invisible to users) have 136 * independent resolution, but they should be well calibrated. We use 137 * scale_load() and scale_load_down(w) to convert between them. The 138 * following must be true: 139 * 140 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 141 * 142 */ 143 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 144 145 /* 146 * Single value that decides SCHED_DEADLINE internal math precision. 147 * 10 -> just above 1us 148 * 9 -> just above 0.5us 149 */ 150 #define DL_SCALE 10 151 152 /* 153 * Single value that denotes runtime == period, ie unlimited time. 154 */ 155 #define RUNTIME_INF ((u64)~0ULL) 156 157 static inline int idle_policy(int policy) 158 { 159 return policy == SCHED_IDLE; 160 } 161 static inline int fair_policy(int policy) 162 { 163 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 164 } 165 166 static inline int rt_policy(int policy) 167 { 168 return policy == SCHED_FIFO || policy == SCHED_RR; 169 } 170 171 static inline int dl_policy(int policy) 172 { 173 return policy == SCHED_DEADLINE; 174 } 175 static inline bool valid_policy(int policy) 176 { 177 return idle_policy(policy) || fair_policy(policy) || 178 rt_policy(policy) || dl_policy(policy); 179 } 180 181 static inline int task_has_idle_policy(struct task_struct *p) 182 { 183 return idle_policy(p->policy); 184 } 185 186 static inline int task_has_rt_policy(struct task_struct *p) 187 { 188 return rt_policy(p->policy); 189 } 190 191 static inline int task_has_dl_policy(struct task_struct *p) 192 { 193 return dl_policy(p->policy); 194 } 195 196 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 197 198 /* 199 * !! For sched_setattr_nocheck() (kernel) only !! 200 * 201 * This is actually gross. :( 202 * 203 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 204 * tasks, but still be able to sleep. We need this on platforms that cannot 205 * atomically change clock frequency. Remove once fast switching will be 206 * available on such platforms. 207 * 208 * SUGOV stands for SchedUtil GOVernor. 209 */ 210 #define SCHED_FLAG_SUGOV 0x10000000 211 212 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 213 { 214 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 215 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 216 #else 217 return false; 218 #endif 219 } 220 221 /* 222 * Tells if entity @a should preempt entity @b. 223 */ 224 static inline bool 225 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 226 { 227 return dl_entity_is_special(a) || 228 dl_time_before(a->deadline, b->deadline); 229 } 230 231 /* 232 * This is the priority-queue data structure of the RT scheduling class: 233 */ 234 struct rt_prio_array { 235 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 236 struct list_head queue[MAX_RT_PRIO]; 237 }; 238 239 struct rt_bandwidth { 240 /* nests inside the rq lock: */ 241 raw_spinlock_t rt_runtime_lock; 242 ktime_t rt_period; 243 u64 rt_runtime; 244 struct hrtimer rt_period_timer; 245 unsigned int rt_period_active; 246 }; 247 248 void __dl_clear_params(struct task_struct *p); 249 250 /* 251 * To keep the bandwidth of -deadline tasks and groups under control 252 * we need some place where: 253 * - store the maximum -deadline bandwidth of the system (the group); 254 * - cache the fraction of that bandwidth that is currently allocated. 255 * 256 * This is all done in the data structure below. It is similar to the 257 * one used for RT-throttling (rt_bandwidth), with the main difference 258 * that, since here we are only interested in admission control, we 259 * do not decrease any runtime while the group "executes", neither we 260 * need a timer to replenish it. 261 * 262 * With respect to SMP, the bandwidth is given on a per-CPU basis, 263 * meaning that: 264 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 265 * - dl_total_bw array contains, in the i-eth element, the currently 266 * allocated bandwidth on the i-eth CPU. 267 * Moreover, groups consume bandwidth on each CPU, while tasks only 268 * consume bandwidth on the CPU they're running on. 269 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 270 * that will be shown the next time the proc or cgroup controls will 271 * be red. It on its turn can be changed by writing on its own 272 * control. 273 */ 274 struct dl_bandwidth { 275 raw_spinlock_t dl_runtime_lock; 276 u64 dl_runtime; 277 u64 dl_period; 278 }; 279 280 static inline int dl_bandwidth_enabled(void) 281 { 282 return sysctl_sched_rt_runtime >= 0; 283 } 284 285 struct dl_bw { 286 raw_spinlock_t lock; 287 u64 bw; 288 u64 total_bw; 289 }; 290 291 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 292 293 static inline 294 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 295 { 296 dl_b->total_bw -= tsk_bw; 297 __dl_update(dl_b, (s32)tsk_bw / cpus); 298 } 299 300 static inline 301 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 302 { 303 dl_b->total_bw += tsk_bw; 304 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 305 } 306 307 static inline 308 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 309 { 310 return dl_b->bw != -1 && 311 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 312 } 313 314 extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 315 extern void init_dl_bw(struct dl_bw *dl_b); 316 extern int sched_dl_global_validate(void); 317 extern void sched_dl_do_global(void); 318 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 319 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 320 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 321 extern bool __checkparam_dl(const struct sched_attr *attr); 322 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 323 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 324 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 325 extern bool dl_cpu_busy(unsigned int cpu); 326 327 #ifdef CONFIG_CGROUP_SCHED 328 329 #include <linux/cgroup.h> 330 #include <linux/psi.h> 331 332 struct cfs_rq; 333 struct rt_rq; 334 335 extern struct list_head task_groups; 336 337 struct cfs_bandwidth { 338 #ifdef CONFIG_CFS_BANDWIDTH 339 raw_spinlock_t lock; 340 ktime_t period; 341 u64 quota; 342 u64 runtime; 343 s64 hierarchical_quota; 344 u64 runtime_expires; 345 int expires_seq; 346 347 short idle; 348 short period_active; 349 struct hrtimer period_timer; 350 struct hrtimer slack_timer; 351 struct list_head throttled_cfs_rq; 352 353 /* Statistics: */ 354 int nr_periods; 355 int nr_throttled; 356 u64 throttled_time; 357 358 bool distribute_running; 359 #endif 360 }; 361 362 /* Task group related information */ 363 struct task_group { 364 struct cgroup_subsys_state css; 365 366 #ifdef CONFIG_FAIR_GROUP_SCHED 367 /* schedulable entities of this group on each CPU */ 368 struct sched_entity **se; 369 /* runqueue "owned" by this group on each CPU */ 370 struct cfs_rq **cfs_rq; 371 unsigned long shares; 372 373 #ifdef CONFIG_SMP 374 /* 375 * load_avg can be heavily contended at clock tick time, so put 376 * it in its own cacheline separated from the fields above which 377 * will also be accessed at each tick. 378 */ 379 atomic_long_t load_avg ____cacheline_aligned; 380 #endif 381 #endif 382 383 #ifdef CONFIG_RT_GROUP_SCHED 384 struct sched_rt_entity **rt_se; 385 struct rt_rq **rt_rq; 386 387 struct rt_bandwidth rt_bandwidth; 388 #endif 389 390 struct rcu_head rcu; 391 struct list_head list; 392 393 struct task_group *parent; 394 struct list_head siblings; 395 struct list_head children; 396 397 #ifdef CONFIG_SCHED_AUTOGROUP 398 struct autogroup *autogroup; 399 #endif 400 401 struct cfs_bandwidth cfs_bandwidth; 402 }; 403 404 #ifdef CONFIG_FAIR_GROUP_SCHED 405 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 406 407 /* 408 * A weight of 0 or 1 can cause arithmetics problems. 409 * A weight of a cfs_rq is the sum of weights of which entities 410 * are queued on this cfs_rq, so a weight of a entity should not be 411 * too large, so as the shares value of a task group. 412 * (The default weight is 1024 - so there's no practical 413 * limitation from this.) 414 */ 415 #define MIN_SHARES (1UL << 1) 416 #define MAX_SHARES (1UL << 18) 417 #endif 418 419 typedef int (*tg_visitor)(struct task_group *, void *); 420 421 extern int walk_tg_tree_from(struct task_group *from, 422 tg_visitor down, tg_visitor up, void *data); 423 424 /* 425 * Iterate the full tree, calling @down when first entering a node and @up when 426 * leaving it for the final time. 427 * 428 * Caller must hold rcu_lock or sufficient equivalent. 429 */ 430 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 431 { 432 return walk_tg_tree_from(&root_task_group, down, up, data); 433 } 434 435 extern int tg_nop(struct task_group *tg, void *data); 436 437 extern void free_fair_sched_group(struct task_group *tg); 438 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 439 extern void online_fair_sched_group(struct task_group *tg); 440 extern void unregister_fair_sched_group(struct task_group *tg); 441 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 442 struct sched_entity *se, int cpu, 443 struct sched_entity *parent); 444 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 445 446 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 447 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 448 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 449 450 extern void free_rt_sched_group(struct task_group *tg); 451 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 452 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 453 struct sched_rt_entity *rt_se, int cpu, 454 struct sched_rt_entity *parent); 455 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 456 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 457 extern long sched_group_rt_runtime(struct task_group *tg); 458 extern long sched_group_rt_period(struct task_group *tg); 459 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 460 461 extern struct task_group *sched_create_group(struct task_group *parent); 462 extern void sched_online_group(struct task_group *tg, 463 struct task_group *parent); 464 extern void sched_destroy_group(struct task_group *tg); 465 extern void sched_offline_group(struct task_group *tg); 466 467 extern void sched_move_task(struct task_struct *tsk); 468 469 #ifdef CONFIG_FAIR_GROUP_SCHED 470 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 471 472 #ifdef CONFIG_SMP 473 extern void set_task_rq_fair(struct sched_entity *se, 474 struct cfs_rq *prev, struct cfs_rq *next); 475 #else /* !CONFIG_SMP */ 476 static inline void set_task_rq_fair(struct sched_entity *se, 477 struct cfs_rq *prev, struct cfs_rq *next) { } 478 #endif /* CONFIG_SMP */ 479 #endif /* CONFIG_FAIR_GROUP_SCHED */ 480 481 #else /* CONFIG_CGROUP_SCHED */ 482 483 struct cfs_bandwidth { }; 484 485 #endif /* CONFIG_CGROUP_SCHED */ 486 487 /* CFS-related fields in a runqueue */ 488 struct cfs_rq { 489 struct load_weight load; 490 unsigned long runnable_weight; 491 unsigned int nr_running; 492 unsigned int h_nr_running; 493 494 u64 exec_clock; 495 u64 min_vruntime; 496 #ifndef CONFIG_64BIT 497 u64 min_vruntime_copy; 498 #endif 499 500 struct rb_root_cached tasks_timeline; 501 502 /* 503 * 'curr' points to currently running entity on this cfs_rq. 504 * It is set to NULL otherwise (i.e when none are currently running). 505 */ 506 struct sched_entity *curr; 507 struct sched_entity *next; 508 struct sched_entity *last; 509 struct sched_entity *skip; 510 511 #ifdef CONFIG_SCHED_DEBUG 512 unsigned int nr_spread_over; 513 #endif 514 515 #ifdef CONFIG_SMP 516 /* 517 * CFS load tracking 518 */ 519 struct sched_avg avg; 520 #ifndef CONFIG_64BIT 521 u64 load_last_update_time_copy; 522 #endif 523 struct { 524 raw_spinlock_t lock ____cacheline_aligned; 525 int nr; 526 unsigned long load_avg; 527 unsigned long util_avg; 528 unsigned long runnable_sum; 529 } removed; 530 531 #ifdef CONFIG_FAIR_GROUP_SCHED 532 unsigned long tg_load_avg_contrib; 533 long propagate; 534 long prop_runnable_sum; 535 536 /* 537 * h_load = weight * f(tg) 538 * 539 * Where f(tg) is the recursive weight fraction assigned to 540 * this group. 541 */ 542 unsigned long h_load; 543 u64 last_h_load_update; 544 struct sched_entity *h_load_next; 545 #endif /* CONFIG_FAIR_GROUP_SCHED */ 546 #endif /* CONFIG_SMP */ 547 548 #ifdef CONFIG_FAIR_GROUP_SCHED 549 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 550 551 /* 552 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 553 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 554 * (like users, containers etc.) 555 * 556 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 557 * This list is used during load balance. 558 */ 559 int on_list; 560 struct list_head leaf_cfs_rq_list; 561 struct task_group *tg; /* group that "owns" this runqueue */ 562 563 #ifdef CONFIG_CFS_BANDWIDTH 564 int runtime_enabled; 565 int expires_seq; 566 u64 runtime_expires; 567 s64 runtime_remaining; 568 569 u64 throttled_clock; 570 u64 throttled_clock_task; 571 u64 throttled_clock_task_time; 572 int throttled; 573 int throttle_count; 574 struct list_head throttled_list; 575 #endif /* CONFIG_CFS_BANDWIDTH */ 576 #endif /* CONFIG_FAIR_GROUP_SCHED */ 577 }; 578 579 static inline int rt_bandwidth_enabled(void) 580 { 581 return sysctl_sched_rt_runtime >= 0; 582 } 583 584 /* RT IPI pull logic requires IRQ_WORK */ 585 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 586 # define HAVE_RT_PUSH_IPI 587 #endif 588 589 /* Real-Time classes' related field in a runqueue: */ 590 struct rt_rq { 591 struct rt_prio_array active; 592 unsigned int rt_nr_running; 593 unsigned int rr_nr_running; 594 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 595 struct { 596 int curr; /* highest queued rt task prio */ 597 #ifdef CONFIG_SMP 598 int next; /* next highest */ 599 #endif 600 } highest_prio; 601 #endif 602 #ifdef CONFIG_SMP 603 unsigned long rt_nr_migratory; 604 unsigned long rt_nr_total; 605 int overloaded; 606 struct plist_head pushable_tasks; 607 608 #endif /* CONFIG_SMP */ 609 int rt_queued; 610 611 int rt_throttled; 612 u64 rt_time; 613 u64 rt_runtime; 614 /* Nests inside the rq lock: */ 615 raw_spinlock_t rt_runtime_lock; 616 617 #ifdef CONFIG_RT_GROUP_SCHED 618 unsigned long rt_nr_boosted; 619 620 struct rq *rq; 621 struct task_group *tg; 622 #endif 623 }; 624 625 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 626 { 627 return rt_rq->rt_queued && rt_rq->rt_nr_running; 628 } 629 630 /* Deadline class' related fields in a runqueue */ 631 struct dl_rq { 632 /* runqueue is an rbtree, ordered by deadline */ 633 struct rb_root_cached root; 634 635 unsigned long dl_nr_running; 636 637 #ifdef CONFIG_SMP 638 /* 639 * Deadline values of the currently executing and the 640 * earliest ready task on this rq. Caching these facilitates 641 * the decision whether or not a ready but not running task 642 * should migrate somewhere else. 643 */ 644 struct { 645 u64 curr; 646 u64 next; 647 } earliest_dl; 648 649 unsigned long dl_nr_migratory; 650 int overloaded; 651 652 /* 653 * Tasks on this rq that can be pushed away. They are kept in 654 * an rb-tree, ordered by tasks' deadlines, with caching 655 * of the leftmost (earliest deadline) element. 656 */ 657 struct rb_root_cached pushable_dl_tasks_root; 658 #else 659 struct dl_bw dl_bw; 660 #endif 661 /* 662 * "Active utilization" for this runqueue: increased when a 663 * task wakes up (becomes TASK_RUNNING) and decreased when a 664 * task blocks 665 */ 666 u64 running_bw; 667 668 /* 669 * Utilization of the tasks "assigned" to this runqueue (including 670 * the tasks that are in runqueue and the tasks that executed on this 671 * CPU and blocked). Increased when a task moves to this runqueue, and 672 * decreased when the task moves away (migrates, changes scheduling 673 * policy, or terminates). 674 * This is needed to compute the "inactive utilization" for the 675 * runqueue (inactive utilization = this_bw - running_bw). 676 */ 677 u64 this_bw; 678 u64 extra_bw; 679 680 /* 681 * Inverse of the fraction of CPU utilization that can be reclaimed 682 * by the GRUB algorithm. 683 */ 684 u64 bw_ratio; 685 }; 686 687 #ifdef CONFIG_FAIR_GROUP_SCHED 688 /* An entity is a task if it doesn't "own" a runqueue */ 689 #define entity_is_task(se) (!se->my_q) 690 #else 691 #define entity_is_task(se) 1 692 #endif 693 694 #ifdef CONFIG_SMP 695 /* 696 * XXX we want to get rid of these helpers and use the full load resolution. 697 */ 698 static inline long se_weight(struct sched_entity *se) 699 { 700 return scale_load_down(se->load.weight); 701 } 702 703 static inline long se_runnable(struct sched_entity *se) 704 { 705 return scale_load_down(se->runnable_weight); 706 } 707 708 static inline bool sched_asym_prefer(int a, int b) 709 { 710 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 711 } 712 713 struct perf_domain { 714 struct em_perf_domain *em_pd; 715 struct perf_domain *next; 716 struct rcu_head rcu; 717 }; 718 719 /* Scheduling group status flags */ 720 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 721 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 722 723 /* 724 * We add the notion of a root-domain which will be used to define per-domain 725 * variables. Each exclusive cpuset essentially defines an island domain by 726 * fully partitioning the member CPUs from any other cpuset. Whenever a new 727 * exclusive cpuset is created, we also create and attach a new root-domain 728 * object. 729 * 730 */ 731 struct root_domain { 732 atomic_t refcount; 733 atomic_t rto_count; 734 struct rcu_head rcu; 735 cpumask_var_t span; 736 cpumask_var_t online; 737 738 /* 739 * Indicate pullable load on at least one CPU, e.g: 740 * - More than one runnable task 741 * - Running task is misfit 742 */ 743 int overload; 744 745 /* Indicate one or more cpus over-utilized (tipping point) */ 746 int overutilized; 747 748 /* 749 * The bit corresponding to a CPU gets set here if such CPU has more 750 * than one runnable -deadline task (as it is below for RT tasks). 751 */ 752 cpumask_var_t dlo_mask; 753 atomic_t dlo_count; 754 struct dl_bw dl_bw; 755 struct cpudl cpudl; 756 757 #ifdef HAVE_RT_PUSH_IPI 758 /* 759 * For IPI pull requests, loop across the rto_mask. 760 */ 761 struct irq_work rto_push_work; 762 raw_spinlock_t rto_lock; 763 /* These are only updated and read within rto_lock */ 764 int rto_loop; 765 int rto_cpu; 766 /* These atomics are updated outside of a lock */ 767 atomic_t rto_loop_next; 768 atomic_t rto_loop_start; 769 #endif 770 /* 771 * The "RT overload" flag: it gets set if a CPU has more than 772 * one runnable RT task. 773 */ 774 cpumask_var_t rto_mask; 775 struct cpupri cpupri; 776 777 unsigned long max_cpu_capacity; 778 779 /* 780 * NULL-terminated list of performance domains intersecting with the 781 * CPUs of the rd. Protected by RCU. 782 */ 783 struct perf_domain *pd; 784 }; 785 786 extern struct root_domain def_root_domain; 787 extern struct mutex sched_domains_mutex; 788 789 extern void init_defrootdomain(void); 790 extern int sched_init_domains(const struct cpumask *cpu_map); 791 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 792 extern void sched_get_rd(struct root_domain *rd); 793 extern void sched_put_rd(struct root_domain *rd); 794 795 #ifdef HAVE_RT_PUSH_IPI 796 extern void rto_push_irq_work_func(struct irq_work *work); 797 #endif 798 #endif /* CONFIG_SMP */ 799 800 /* 801 * This is the main, per-CPU runqueue data structure. 802 * 803 * Locking rule: those places that want to lock multiple runqueues 804 * (such as the load balancing or the thread migration code), lock 805 * acquire operations must be ordered by ascending &runqueue. 806 */ 807 struct rq { 808 /* runqueue lock: */ 809 raw_spinlock_t lock; 810 811 /* 812 * nr_running and cpu_load should be in the same cacheline because 813 * remote CPUs use both these fields when doing load calculation. 814 */ 815 unsigned int nr_running; 816 #ifdef CONFIG_NUMA_BALANCING 817 unsigned int nr_numa_running; 818 unsigned int nr_preferred_running; 819 unsigned int numa_migrate_on; 820 #endif 821 #define CPU_LOAD_IDX_MAX 5 822 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 823 #ifdef CONFIG_NO_HZ_COMMON 824 #ifdef CONFIG_SMP 825 unsigned long last_load_update_tick; 826 unsigned long last_blocked_load_update_tick; 827 unsigned int has_blocked_load; 828 #endif /* CONFIG_SMP */ 829 unsigned int nohz_tick_stopped; 830 atomic_t nohz_flags; 831 #endif /* CONFIG_NO_HZ_COMMON */ 832 833 /* capture load from *all* tasks on this CPU: */ 834 struct load_weight load; 835 unsigned long nr_load_updates; 836 u64 nr_switches; 837 838 struct cfs_rq cfs; 839 struct rt_rq rt; 840 struct dl_rq dl; 841 842 #ifdef CONFIG_FAIR_GROUP_SCHED 843 /* list of leaf cfs_rq on this CPU: */ 844 struct list_head leaf_cfs_rq_list; 845 struct list_head *tmp_alone_branch; 846 #endif /* CONFIG_FAIR_GROUP_SCHED */ 847 848 /* 849 * This is part of a global counter where only the total sum 850 * over all CPUs matters. A task can increase this counter on 851 * one CPU and if it got migrated afterwards it may decrease 852 * it on another CPU. Always updated under the runqueue lock: 853 */ 854 unsigned long nr_uninterruptible; 855 856 struct task_struct *curr; 857 struct task_struct *idle; 858 struct task_struct *stop; 859 unsigned long next_balance; 860 struct mm_struct *prev_mm; 861 862 unsigned int clock_update_flags; 863 u64 clock; 864 u64 clock_task; 865 866 atomic_t nr_iowait; 867 868 #ifdef CONFIG_SMP 869 struct root_domain *rd; 870 struct sched_domain *sd; 871 872 unsigned long cpu_capacity; 873 unsigned long cpu_capacity_orig; 874 875 struct callback_head *balance_callback; 876 877 unsigned char idle_balance; 878 879 unsigned long misfit_task_load; 880 881 /* For active balancing */ 882 int active_balance; 883 int push_cpu; 884 struct cpu_stop_work active_balance_work; 885 886 /* CPU of this runqueue: */ 887 int cpu; 888 int online; 889 890 struct list_head cfs_tasks; 891 892 struct sched_avg avg_rt; 893 struct sched_avg avg_dl; 894 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 895 struct sched_avg avg_irq; 896 #endif 897 u64 idle_stamp; 898 u64 avg_idle; 899 900 /* This is used to determine avg_idle's max value */ 901 u64 max_idle_balance_cost; 902 #endif 903 904 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 905 u64 prev_irq_time; 906 #endif 907 #ifdef CONFIG_PARAVIRT 908 u64 prev_steal_time; 909 #endif 910 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 911 u64 prev_steal_time_rq; 912 #endif 913 914 /* calc_load related fields */ 915 unsigned long calc_load_update; 916 long calc_load_active; 917 918 #ifdef CONFIG_SCHED_HRTICK 919 #ifdef CONFIG_SMP 920 int hrtick_csd_pending; 921 call_single_data_t hrtick_csd; 922 #endif 923 struct hrtimer hrtick_timer; 924 #endif 925 926 #ifdef CONFIG_SCHEDSTATS 927 /* latency stats */ 928 struct sched_info rq_sched_info; 929 unsigned long long rq_cpu_time; 930 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 931 932 /* sys_sched_yield() stats */ 933 unsigned int yld_count; 934 935 /* schedule() stats */ 936 unsigned int sched_count; 937 unsigned int sched_goidle; 938 939 /* try_to_wake_up() stats */ 940 unsigned int ttwu_count; 941 unsigned int ttwu_local; 942 #endif 943 944 #ifdef CONFIG_SMP 945 struct llist_head wake_list; 946 #endif 947 948 #ifdef CONFIG_CPU_IDLE 949 /* Must be inspected within a rcu lock section */ 950 struct cpuidle_state *idle_state; 951 #endif 952 }; 953 954 static inline int cpu_of(struct rq *rq) 955 { 956 #ifdef CONFIG_SMP 957 return rq->cpu; 958 #else 959 return 0; 960 #endif 961 } 962 963 964 #ifdef CONFIG_SCHED_SMT 965 extern void __update_idle_core(struct rq *rq); 966 967 static inline void update_idle_core(struct rq *rq) 968 { 969 if (static_branch_unlikely(&sched_smt_present)) 970 __update_idle_core(rq); 971 } 972 973 #else 974 static inline void update_idle_core(struct rq *rq) { } 975 #endif 976 977 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 978 979 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 980 #define this_rq() this_cpu_ptr(&runqueues) 981 #define task_rq(p) cpu_rq(task_cpu(p)) 982 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 983 #define raw_rq() raw_cpu_ptr(&runqueues) 984 985 extern void update_rq_clock(struct rq *rq); 986 987 static inline u64 __rq_clock_broken(struct rq *rq) 988 { 989 return READ_ONCE(rq->clock); 990 } 991 992 /* 993 * rq::clock_update_flags bits 994 * 995 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 996 * call to __schedule(). This is an optimisation to avoid 997 * neighbouring rq clock updates. 998 * 999 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1000 * in effect and calls to update_rq_clock() are being ignored. 1001 * 1002 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1003 * made to update_rq_clock() since the last time rq::lock was pinned. 1004 * 1005 * If inside of __schedule(), clock_update_flags will have been 1006 * shifted left (a left shift is a cheap operation for the fast path 1007 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1008 * 1009 * if (rq-clock_update_flags >= RQCF_UPDATED) 1010 * 1011 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1012 * one position though, because the next rq_unpin_lock() will shift it 1013 * back. 1014 */ 1015 #define RQCF_REQ_SKIP 0x01 1016 #define RQCF_ACT_SKIP 0x02 1017 #define RQCF_UPDATED 0x04 1018 1019 static inline void assert_clock_updated(struct rq *rq) 1020 { 1021 /* 1022 * The only reason for not seeing a clock update since the 1023 * last rq_pin_lock() is if we're currently skipping updates. 1024 */ 1025 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1026 } 1027 1028 static inline u64 rq_clock(struct rq *rq) 1029 { 1030 lockdep_assert_held(&rq->lock); 1031 assert_clock_updated(rq); 1032 1033 return rq->clock; 1034 } 1035 1036 static inline u64 rq_clock_task(struct rq *rq) 1037 { 1038 lockdep_assert_held(&rq->lock); 1039 assert_clock_updated(rq); 1040 1041 return rq->clock_task; 1042 } 1043 1044 static inline void rq_clock_skip_update(struct rq *rq) 1045 { 1046 lockdep_assert_held(&rq->lock); 1047 rq->clock_update_flags |= RQCF_REQ_SKIP; 1048 } 1049 1050 /* 1051 * See rt task throttling, which is the only time a skip 1052 * request is cancelled. 1053 */ 1054 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1055 { 1056 lockdep_assert_held(&rq->lock); 1057 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1058 } 1059 1060 struct rq_flags { 1061 unsigned long flags; 1062 struct pin_cookie cookie; 1063 #ifdef CONFIG_SCHED_DEBUG 1064 /* 1065 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1066 * current pin context is stashed here in case it needs to be 1067 * restored in rq_repin_lock(). 1068 */ 1069 unsigned int clock_update_flags; 1070 #endif 1071 }; 1072 1073 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1074 { 1075 rf->cookie = lockdep_pin_lock(&rq->lock); 1076 1077 #ifdef CONFIG_SCHED_DEBUG 1078 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1079 rf->clock_update_flags = 0; 1080 #endif 1081 } 1082 1083 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1084 { 1085 #ifdef CONFIG_SCHED_DEBUG 1086 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1087 rf->clock_update_flags = RQCF_UPDATED; 1088 #endif 1089 1090 lockdep_unpin_lock(&rq->lock, rf->cookie); 1091 } 1092 1093 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1094 { 1095 lockdep_repin_lock(&rq->lock, rf->cookie); 1096 1097 #ifdef CONFIG_SCHED_DEBUG 1098 /* 1099 * Restore the value we stashed in @rf for this pin context. 1100 */ 1101 rq->clock_update_flags |= rf->clock_update_flags; 1102 #endif 1103 } 1104 1105 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1106 __acquires(rq->lock); 1107 1108 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1109 __acquires(p->pi_lock) 1110 __acquires(rq->lock); 1111 1112 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1113 __releases(rq->lock) 1114 { 1115 rq_unpin_lock(rq, rf); 1116 raw_spin_unlock(&rq->lock); 1117 } 1118 1119 static inline void 1120 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1121 __releases(rq->lock) 1122 __releases(p->pi_lock) 1123 { 1124 rq_unpin_lock(rq, rf); 1125 raw_spin_unlock(&rq->lock); 1126 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1127 } 1128 1129 static inline void 1130 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1131 __acquires(rq->lock) 1132 { 1133 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1134 rq_pin_lock(rq, rf); 1135 } 1136 1137 static inline void 1138 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1139 __acquires(rq->lock) 1140 { 1141 raw_spin_lock_irq(&rq->lock); 1142 rq_pin_lock(rq, rf); 1143 } 1144 1145 static inline void 1146 rq_lock(struct rq *rq, struct rq_flags *rf) 1147 __acquires(rq->lock) 1148 { 1149 raw_spin_lock(&rq->lock); 1150 rq_pin_lock(rq, rf); 1151 } 1152 1153 static inline void 1154 rq_relock(struct rq *rq, struct rq_flags *rf) 1155 __acquires(rq->lock) 1156 { 1157 raw_spin_lock(&rq->lock); 1158 rq_repin_lock(rq, rf); 1159 } 1160 1161 static inline void 1162 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1163 __releases(rq->lock) 1164 { 1165 rq_unpin_lock(rq, rf); 1166 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1167 } 1168 1169 static inline void 1170 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1171 __releases(rq->lock) 1172 { 1173 rq_unpin_lock(rq, rf); 1174 raw_spin_unlock_irq(&rq->lock); 1175 } 1176 1177 static inline void 1178 rq_unlock(struct rq *rq, struct rq_flags *rf) 1179 __releases(rq->lock) 1180 { 1181 rq_unpin_lock(rq, rf); 1182 raw_spin_unlock(&rq->lock); 1183 } 1184 1185 static inline struct rq * 1186 this_rq_lock_irq(struct rq_flags *rf) 1187 __acquires(rq->lock) 1188 { 1189 struct rq *rq; 1190 1191 local_irq_disable(); 1192 rq = this_rq(); 1193 rq_lock(rq, rf); 1194 return rq; 1195 } 1196 1197 #ifdef CONFIG_NUMA 1198 enum numa_topology_type { 1199 NUMA_DIRECT, 1200 NUMA_GLUELESS_MESH, 1201 NUMA_BACKPLANE, 1202 }; 1203 extern enum numa_topology_type sched_numa_topology_type; 1204 extern int sched_max_numa_distance; 1205 extern bool find_numa_distance(int distance); 1206 #endif 1207 1208 #ifdef CONFIG_NUMA 1209 extern void sched_init_numa(void); 1210 extern void sched_domains_numa_masks_set(unsigned int cpu); 1211 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1212 #else 1213 static inline void sched_init_numa(void) { } 1214 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1215 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1216 #endif 1217 1218 #ifdef CONFIG_NUMA_BALANCING 1219 /* The regions in numa_faults array from task_struct */ 1220 enum numa_faults_stats { 1221 NUMA_MEM = 0, 1222 NUMA_CPU, 1223 NUMA_MEMBUF, 1224 NUMA_CPUBUF 1225 }; 1226 extern void sched_setnuma(struct task_struct *p, int node); 1227 extern int migrate_task_to(struct task_struct *p, int cpu); 1228 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1229 int cpu, int scpu); 1230 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1231 #else 1232 static inline void 1233 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1234 { 1235 } 1236 #endif /* CONFIG_NUMA_BALANCING */ 1237 1238 #ifdef CONFIG_SMP 1239 1240 static inline void 1241 queue_balance_callback(struct rq *rq, 1242 struct callback_head *head, 1243 void (*func)(struct rq *rq)) 1244 { 1245 lockdep_assert_held(&rq->lock); 1246 1247 if (unlikely(head->next)) 1248 return; 1249 1250 head->func = (void (*)(struct callback_head *))func; 1251 head->next = rq->balance_callback; 1252 rq->balance_callback = head; 1253 } 1254 1255 extern void sched_ttwu_pending(void); 1256 1257 #define rcu_dereference_check_sched_domain(p) \ 1258 rcu_dereference_check((p), \ 1259 lockdep_is_held(&sched_domains_mutex)) 1260 1261 /* 1262 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1263 * See detach_destroy_domains: synchronize_sched for details. 1264 * 1265 * The domain tree of any CPU may only be accessed from within 1266 * preempt-disabled sections. 1267 */ 1268 #define for_each_domain(cpu, __sd) \ 1269 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1270 __sd; __sd = __sd->parent) 1271 1272 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 1273 1274 /** 1275 * highest_flag_domain - Return highest sched_domain containing flag. 1276 * @cpu: The CPU whose highest level of sched domain is to 1277 * be returned. 1278 * @flag: The flag to check for the highest sched_domain 1279 * for the given CPU. 1280 * 1281 * Returns the highest sched_domain of a CPU which contains the given flag. 1282 */ 1283 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1284 { 1285 struct sched_domain *sd, *hsd = NULL; 1286 1287 for_each_domain(cpu, sd) { 1288 if (!(sd->flags & flag)) 1289 break; 1290 hsd = sd; 1291 } 1292 1293 return hsd; 1294 } 1295 1296 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1297 { 1298 struct sched_domain *sd; 1299 1300 for_each_domain(cpu, sd) { 1301 if (sd->flags & flag) 1302 break; 1303 } 1304 1305 return sd; 1306 } 1307 1308 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 1309 DECLARE_PER_CPU(int, sd_llc_size); 1310 DECLARE_PER_CPU(int, sd_llc_id); 1311 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 1312 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 1313 DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing); 1314 DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity); 1315 extern struct static_key_false sched_asym_cpucapacity; 1316 1317 struct sched_group_capacity { 1318 atomic_t ref; 1319 /* 1320 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1321 * for a single CPU. 1322 */ 1323 unsigned long capacity; 1324 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1325 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1326 unsigned long next_update; 1327 int imbalance; /* XXX unrelated to capacity but shared group state */ 1328 1329 #ifdef CONFIG_SCHED_DEBUG 1330 int id; 1331 #endif 1332 1333 unsigned long cpumask[0]; /* Balance mask */ 1334 }; 1335 1336 struct sched_group { 1337 struct sched_group *next; /* Must be a circular list */ 1338 atomic_t ref; 1339 1340 unsigned int group_weight; 1341 struct sched_group_capacity *sgc; 1342 int asym_prefer_cpu; /* CPU of highest priority in group */ 1343 1344 /* 1345 * The CPUs this group covers. 1346 * 1347 * NOTE: this field is variable length. (Allocated dynamically 1348 * by attaching extra space to the end of the structure, 1349 * depending on how many CPUs the kernel has booted up with) 1350 */ 1351 unsigned long cpumask[0]; 1352 }; 1353 1354 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1355 { 1356 return to_cpumask(sg->cpumask); 1357 } 1358 1359 /* 1360 * See build_balance_mask(). 1361 */ 1362 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1363 { 1364 return to_cpumask(sg->sgc->cpumask); 1365 } 1366 1367 /** 1368 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1369 * @group: The group whose first CPU is to be returned. 1370 */ 1371 static inline unsigned int group_first_cpu(struct sched_group *group) 1372 { 1373 return cpumask_first(sched_group_span(group)); 1374 } 1375 1376 extern int group_balance_cpu(struct sched_group *sg); 1377 1378 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1379 void register_sched_domain_sysctl(void); 1380 void dirty_sched_domain_sysctl(int cpu); 1381 void unregister_sched_domain_sysctl(void); 1382 #else 1383 static inline void register_sched_domain_sysctl(void) 1384 { 1385 } 1386 static inline void dirty_sched_domain_sysctl(int cpu) 1387 { 1388 } 1389 static inline void unregister_sched_domain_sysctl(void) 1390 { 1391 } 1392 #endif 1393 1394 #else 1395 1396 static inline void sched_ttwu_pending(void) { } 1397 1398 #endif /* CONFIG_SMP */ 1399 1400 #include "stats.h" 1401 #include "autogroup.h" 1402 1403 #ifdef CONFIG_CGROUP_SCHED 1404 1405 /* 1406 * Return the group to which this tasks belongs. 1407 * 1408 * We cannot use task_css() and friends because the cgroup subsystem 1409 * changes that value before the cgroup_subsys::attach() method is called, 1410 * therefore we cannot pin it and might observe the wrong value. 1411 * 1412 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1413 * core changes this before calling sched_move_task(). 1414 * 1415 * Instead we use a 'copy' which is updated from sched_move_task() while 1416 * holding both task_struct::pi_lock and rq::lock. 1417 */ 1418 static inline struct task_group *task_group(struct task_struct *p) 1419 { 1420 return p->sched_task_group; 1421 } 1422 1423 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1424 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1425 { 1426 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1427 struct task_group *tg = task_group(p); 1428 #endif 1429 1430 #ifdef CONFIG_FAIR_GROUP_SCHED 1431 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1432 p->se.cfs_rq = tg->cfs_rq[cpu]; 1433 p->se.parent = tg->se[cpu]; 1434 #endif 1435 1436 #ifdef CONFIG_RT_GROUP_SCHED 1437 p->rt.rt_rq = tg->rt_rq[cpu]; 1438 p->rt.parent = tg->rt_se[cpu]; 1439 #endif 1440 } 1441 1442 #else /* CONFIG_CGROUP_SCHED */ 1443 1444 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1445 static inline struct task_group *task_group(struct task_struct *p) 1446 { 1447 return NULL; 1448 } 1449 1450 #endif /* CONFIG_CGROUP_SCHED */ 1451 1452 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1453 { 1454 set_task_rq(p, cpu); 1455 #ifdef CONFIG_SMP 1456 /* 1457 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1458 * successfully executed on another CPU. We must ensure that updates of 1459 * per-task data have been completed by this moment. 1460 */ 1461 smp_wmb(); 1462 #ifdef CONFIG_THREAD_INFO_IN_TASK 1463 p->cpu = cpu; 1464 #else 1465 task_thread_info(p)->cpu = cpu; 1466 #endif 1467 p->wake_cpu = cpu; 1468 #endif 1469 } 1470 1471 /* 1472 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1473 */ 1474 #ifdef CONFIG_SCHED_DEBUG 1475 # include <linux/static_key.h> 1476 # define const_debug __read_mostly 1477 #else 1478 # define const_debug const 1479 #endif 1480 1481 #define SCHED_FEAT(name, enabled) \ 1482 __SCHED_FEAT_##name , 1483 1484 enum { 1485 #include "features.h" 1486 __SCHED_FEAT_NR, 1487 }; 1488 1489 #undef SCHED_FEAT 1490 1491 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1492 1493 /* 1494 * To support run-time toggling of sched features, all the translation units 1495 * (but core.c) reference the sysctl_sched_features defined in core.c. 1496 */ 1497 extern const_debug unsigned int sysctl_sched_features; 1498 1499 #define SCHED_FEAT(name, enabled) \ 1500 static __always_inline bool static_branch_##name(struct static_key *key) \ 1501 { \ 1502 return static_key_##enabled(key); \ 1503 } 1504 1505 #include "features.h" 1506 #undef SCHED_FEAT 1507 1508 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1509 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1510 1511 #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1512 1513 /* 1514 * Each translation unit has its own copy of sysctl_sched_features to allow 1515 * constants propagation at compile time and compiler optimization based on 1516 * features default. 1517 */ 1518 #define SCHED_FEAT(name, enabled) \ 1519 (1UL << __SCHED_FEAT_##name) * enabled | 1520 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1521 #include "features.h" 1522 0; 1523 #undef SCHED_FEAT 1524 1525 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1526 1527 #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1528 1529 extern struct static_key_false sched_numa_balancing; 1530 extern struct static_key_false sched_schedstats; 1531 1532 static inline u64 global_rt_period(void) 1533 { 1534 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1535 } 1536 1537 static inline u64 global_rt_runtime(void) 1538 { 1539 if (sysctl_sched_rt_runtime < 0) 1540 return RUNTIME_INF; 1541 1542 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1543 } 1544 1545 static inline int task_current(struct rq *rq, struct task_struct *p) 1546 { 1547 return rq->curr == p; 1548 } 1549 1550 static inline int task_running(struct rq *rq, struct task_struct *p) 1551 { 1552 #ifdef CONFIG_SMP 1553 return p->on_cpu; 1554 #else 1555 return task_current(rq, p); 1556 #endif 1557 } 1558 1559 static inline int task_on_rq_queued(struct task_struct *p) 1560 { 1561 return p->on_rq == TASK_ON_RQ_QUEUED; 1562 } 1563 1564 static inline int task_on_rq_migrating(struct task_struct *p) 1565 { 1566 return p->on_rq == TASK_ON_RQ_MIGRATING; 1567 } 1568 1569 /* 1570 * wake flags 1571 */ 1572 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1573 #define WF_FORK 0x02 /* Child wakeup after fork */ 1574 #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1575 1576 /* 1577 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1578 * of tasks with abnormal "nice" values across CPUs the contribution that 1579 * each task makes to its run queue's load is weighted according to its 1580 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1581 * scaled version of the new time slice allocation that they receive on time 1582 * slice expiry etc. 1583 */ 1584 1585 #define WEIGHT_IDLEPRIO 3 1586 #define WMULT_IDLEPRIO 1431655765 1587 1588 extern const int sched_prio_to_weight[40]; 1589 extern const u32 sched_prio_to_wmult[40]; 1590 1591 /* 1592 * {de,en}queue flags: 1593 * 1594 * DEQUEUE_SLEEP - task is no longer runnable 1595 * ENQUEUE_WAKEUP - task just became runnable 1596 * 1597 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1598 * are in a known state which allows modification. Such pairs 1599 * should preserve as much state as possible. 1600 * 1601 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1602 * in the runqueue. 1603 * 1604 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1605 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1606 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1607 * 1608 */ 1609 1610 #define DEQUEUE_SLEEP 0x01 1611 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1612 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1613 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1614 1615 #define ENQUEUE_WAKEUP 0x01 1616 #define ENQUEUE_RESTORE 0x02 1617 #define ENQUEUE_MOVE 0x04 1618 #define ENQUEUE_NOCLOCK 0x08 1619 1620 #define ENQUEUE_HEAD 0x10 1621 #define ENQUEUE_REPLENISH 0x20 1622 #ifdef CONFIG_SMP 1623 #define ENQUEUE_MIGRATED 0x40 1624 #else 1625 #define ENQUEUE_MIGRATED 0x00 1626 #endif 1627 1628 #define RETRY_TASK ((void *)-1UL) 1629 1630 struct sched_class { 1631 const struct sched_class *next; 1632 1633 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1634 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1635 void (*yield_task) (struct rq *rq); 1636 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1637 1638 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1639 1640 /* 1641 * It is the responsibility of the pick_next_task() method that will 1642 * return the next task to call put_prev_task() on the @prev task or 1643 * something equivalent. 1644 * 1645 * May return RETRY_TASK when it finds a higher prio class has runnable 1646 * tasks. 1647 */ 1648 struct task_struct * (*pick_next_task)(struct rq *rq, 1649 struct task_struct *prev, 1650 struct rq_flags *rf); 1651 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1652 1653 #ifdef CONFIG_SMP 1654 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1655 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1656 1657 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1658 1659 void (*set_cpus_allowed)(struct task_struct *p, 1660 const struct cpumask *newmask); 1661 1662 void (*rq_online)(struct rq *rq); 1663 void (*rq_offline)(struct rq *rq); 1664 #endif 1665 1666 void (*set_curr_task)(struct rq *rq); 1667 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1668 void (*task_fork)(struct task_struct *p); 1669 void (*task_dead)(struct task_struct *p); 1670 1671 /* 1672 * The switched_from() call is allowed to drop rq->lock, therefore we 1673 * cannot assume the switched_from/switched_to pair is serliazed by 1674 * rq->lock. They are however serialized by p->pi_lock. 1675 */ 1676 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1677 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1678 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1679 int oldprio); 1680 1681 unsigned int (*get_rr_interval)(struct rq *rq, 1682 struct task_struct *task); 1683 1684 void (*update_curr)(struct rq *rq); 1685 1686 #define TASK_SET_GROUP 0 1687 #define TASK_MOVE_GROUP 1 1688 1689 #ifdef CONFIG_FAIR_GROUP_SCHED 1690 void (*task_change_group)(struct task_struct *p, int type); 1691 #endif 1692 }; 1693 1694 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1695 { 1696 prev->sched_class->put_prev_task(rq, prev); 1697 } 1698 1699 static inline void set_curr_task(struct rq *rq, struct task_struct *curr) 1700 { 1701 curr->sched_class->set_curr_task(rq); 1702 } 1703 1704 #ifdef CONFIG_SMP 1705 #define sched_class_highest (&stop_sched_class) 1706 #else 1707 #define sched_class_highest (&dl_sched_class) 1708 #endif 1709 #define for_each_class(class) \ 1710 for (class = sched_class_highest; class; class = class->next) 1711 1712 extern const struct sched_class stop_sched_class; 1713 extern const struct sched_class dl_sched_class; 1714 extern const struct sched_class rt_sched_class; 1715 extern const struct sched_class fair_sched_class; 1716 extern const struct sched_class idle_sched_class; 1717 1718 1719 #ifdef CONFIG_SMP 1720 1721 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1722 1723 extern void trigger_load_balance(struct rq *rq); 1724 1725 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1726 1727 #endif 1728 1729 #ifdef CONFIG_CPU_IDLE 1730 static inline void idle_set_state(struct rq *rq, 1731 struct cpuidle_state *idle_state) 1732 { 1733 rq->idle_state = idle_state; 1734 } 1735 1736 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1737 { 1738 SCHED_WARN_ON(!rcu_read_lock_held()); 1739 1740 return rq->idle_state; 1741 } 1742 #else 1743 static inline void idle_set_state(struct rq *rq, 1744 struct cpuidle_state *idle_state) 1745 { 1746 } 1747 1748 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1749 { 1750 return NULL; 1751 } 1752 #endif 1753 1754 extern void schedule_idle(void); 1755 1756 extern void sysrq_sched_debug_show(void); 1757 extern void sched_init_granularity(void); 1758 extern void update_max_interval(void); 1759 1760 extern void init_sched_dl_class(void); 1761 extern void init_sched_rt_class(void); 1762 extern void init_sched_fair_class(void); 1763 1764 extern void reweight_task(struct task_struct *p, int prio); 1765 1766 extern void resched_curr(struct rq *rq); 1767 extern void resched_cpu(int cpu); 1768 1769 extern struct rt_bandwidth def_rt_bandwidth; 1770 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1771 1772 extern struct dl_bandwidth def_dl_bandwidth; 1773 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1774 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1775 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1776 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1777 1778 #define BW_SHIFT 20 1779 #define BW_UNIT (1 << BW_SHIFT) 1780 #define RATIO_SHIFT 8 1781 unsigned long to_ratio(u64 period, u64 runtime); 1782 1783 extern void init_entity_runnable_average(struct sched_entity *se); 1784 extern void post_init_entity_util_avg(struct sched_entity *se); 1785 1786 #ifdef CONFIG_NO_HZ_FULL 1787 extern bool sched_can_stop_tick(struct rq *rq); 1788 extern int __init sched_tick_offload_init(void); 1789 1790 /* 1791 * Tick may be needed by tasks in the runqueue depending on their policy and 1792 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1793 * nohz mode if necessary. 1794 */ 1795 static inline void sched_update_tick_dependency(struct rq *rq) 1796 { 1797 int cpu; 1798 1799 if (!tick_nohz_full_enabled()) 1800 return; 1801 1802 cpu = cpu_of(rq); 1803 1804 if (!tick_nohz_full_cpu(cpu)) 1805 return; 1806 1807 if (sched_can_stop_tick(rq)) 1808 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1809 else 1810 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1811 } 1812 #else 1813 static inline int sched_tick_offload_init(void) { return 0; } 1814 static inline void sched_update_tick_dependency(struct rq *rq) { } 1815 #endif 1816 1817 static inline void add_nr_running(struct rq *rq, unsigned count) 1818 { 1819 unsigned prev_nr = rq->nr_running; 1820 1821 rq->nr_running = prev_nr + count; 1822 1823 #ifdef CONFIG_SMP 1824 if (prev_nr < 2 && rq->nr_running >= 2) { 1825 if (!READ_ONCE(rq->rd->overload)) 1826 WRITE_ONCE(rq->rd->overload, 1); 1827 } 1828 #endif 1829 1830 sched_update_tick_dependency(rq); 1831 } 1832 1833 static inline void sub_nr_running(struct rq *rq, unsigned count) 1834 { 1835 rq->nr_running -= count; 1836 /* Check if we still need preemption */ 1837 sched_update_tick_dependency(rq); 1838 } 1839 1840 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1841 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1842 1843 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1844 1845 extern const_debug unsigned int sysctl_sched_nr_migrate; 1846 extern const_debug unsigned int sysctl_sched_migration_cost; 1847 1848 #ifdef CONFIG_SCHED_HRTICK 1849 1850 /* 1851 * Use hrtick when: 1852 * - enabled by features 1853 * - hrtimer is actually high res 1854 */ 1855 static inline int hrtick_enabled(struct rq *rq) 1856 { 1857 if (!sched_feat(HRTICK)) 1858 return 0; 1859 if (!cpu_active(cpu_of(rq))) 1860 return 0; 1861 return hrtimer_is_hres_active(&rq->hrtick_timer); 1862 } 1863 1864 void hrtick_start(struct rq *rq, u64 delay); 1865 1866 #else 1867 1868 static inline int hrtick_enabled(struct rq *rq) 1869 { 1870 return 0; 1871 } 1872 1873 #endif /* CONFIG_SCHED_HRTICK */ 1874 1875 #ifndef arch_scale_freq_capacity 1876 static __always_inline 1877 unsigned long arch_scale_freq_capacity(int cpu) 1878 { 1879 return SCHED_CAPACITY_SCALE; 1880 } 1881 #endif 1882 1883 #ifdef CONFIG_SMP 1884 #ifdef CONFIG_PREEMPT 1885 1886 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1887 1888 /* 1889 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1890 * way at the expense of forcing extra atomic operations in all 1891 * invocations. This assures that the double_lock is acquired using the 1892 * same underlying policy as the spinlock_t on this architecture, which 1893 * reduces latency compared to the unfair variant below. However, it 1894 * also adds more overhead and therefore may reduce throughput. 1895 */ 1896 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1897 __releases(this_rq->lock) 1898 __acquires(busiest->lock) 1899 __acquires(this_rq->lock) 1900 { 1901 raw_spin_unlock(&this_rq->lock); 1902 double_rq_lock(this_rq, busiest); 1903 1904 return 1; 1905 } 1906 1907 #else 1908 /* 1909 * Unfair double_lock_balance: Optimizes throughput at the expense of 1910 * latency by eliminating extra atomic operations when the locks are 1911 * already in proper order on entry. This favors lower CPU-ids and will 1912 * grant the double lock to lower CPUs over higher ids under contention, 1913 * regardless of entry order into the function. 1914 */ 1915 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1916 __releases(this_rq->lock) 1917 __acquires(busiest->lock) 1918 __acquires(this_rq->lock) 1919 { 1920 int ret = 0; 1921 1922 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1923 if (busiest < this_rq) { 1924 raw_spin_unlock(&this_rq->lock); 1925 raw_spin_lock(&busiest->lock); 1926 raw_spin_lock_nested(&this_rq->lock, 1927 SINGLE_DEPTH_NESTING); 1928 ret = 1; 1929 } else 1930 raw_spin_lock_nested(&busiest->lock, 1931 SINGLE_DEPTH_NESTING); 1932 } 1933 return ret; 1934 } 1935 1936 #endif /* CONFIG_PREEMPT */ 1937 1938 /* 1939 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1940 */ 1941 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1942 { 1943 if (unlikely(!irqs_disabled())) { 1944 /* printk() doesn't work well under rq->lock */ 1945 raw_spin_unlock(&this_rq->lock); 1946 BUG_ON(1); 1947 } 1948 1949 return _double_lock_balance(this_rq, busiest); 1950 } 1951 1952 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1953 __releases(busiest->lock) 1954 { 1955 raw_spin_unlock(&busiest->lock); 1956 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1957 } 1958 1959 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 1960 { 1961 if (l1 > l2) 1962 swap(l1, l2); 1963 1964 spin_lock(l1); 1965 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1966 } 1967 1968 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 1969 { 1970 if (l1 > l2) 1971 swap(l1, l2); 1972 1973 spin_lock_irq(l1); 1974 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1975 } 1976 1977 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1978 { 1979 if (l1 > l2) 1980 swap(l1, l2); 1981 1982 raw_spin_lock(l1); 1983 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1984 } 1985 1986 /* 1987 * double_rq_lock - safely lock two runqueues 1988 * 1989 * Note this does not disable interrupts like task_rq_lock, 1990 * you need to do so manually before calling. 1991 */ 1992 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1993 __acquires(rq1->lock) 1994 __acquires(rq2->lock) 1995 { 1996 BUG_ON(!irqs_disabled()); 1997 if (rq1 == rq2) { 1998 raw_spin_lock(&rq1->lock); 1999 __acquire(rq2->lock); /* Fake it out ;) */ 2000 } else { 2001 if (rq1 < rq2) { 2002 raw_spin_lock(&rq1->lock); 2003 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2004 } else { 2005 raw_spin_lock(&rq2->lock); 2006 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2007 } 2008 } 2009 } 2010 2011 /* 2012 * double_rq_unlock - safely unlock two runqueues 2013 * 2014 * Note this does not restore interrupts like task_rq_unlock, 2015 * you need to do so manually after calling. 2016 */ 2017 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2018 __releases(rq1->lock) 2019 __releases(rq2->lock) 2020 { 2021 raw_spin_unlock(&rq1->lock); 2022 if (rq1 != rq2) 2023 raw_spin_unlock(&rq2->lock); 2024 else 2025 __release(rq2->lock); 2026 } 2027 2028 extern void set_rq_online (struct rq *rq); 2029 extern void set_rq_offline(struct rq *rq); 2030 extern bool sched_smp_initialized; 2031 2032 #else /* CONFIG_SMP */ 2033 2034 /* 2035 * double_rq_lock - safely lock two runqueues 2036 * 2037 * Note this does not disable interrupts like task_rq_lock, 2038 * you need to do so manually before calling. 2039 */ 2040 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2041 __acquires(rq1->lock) 2042 __acquires(rq2->lock) 2043 { 2044 BUG_ON(!irqs_disabled()); 2045 BUG_ON(rq1 != rq2); 2046 raw_spin_lock(&rq1->lock); 2047 __acquire(rq2->lock); /* Fake it out ;) */ 2048 } 2049 2050 /* 2051 * double_rq_unlock - safely unlock two runqueues 2052 * 2053 * Note this does not restore interrupts like task_rq_unlock, 2054 * you need to do so manually after calling. 2055 */ 2056 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2057 __releases(rq1->lock) 2058 __releases(rq2->lock) 2059 { 2060 BUG_ON(rq1 != rq2); 2061 raw_spin_unlock(&rq1->lock); 2062 __release(rq2->lock); 2063 } 2064 2065 #endif 2066 2067 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2068 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2069 2070 #ifdef CONFIG_SCHED_DEBUG 2071 extern bool sched_debug_enabled; 2072 2073 extern void print_cfs_stats(struct seq_file *m, int cpu); 2074 extern void print_rt_stats(struct seq_file *m, int cpu); 2075 extern void print_dl_stats(struct seq_file *m, int cpu); 2076 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2077 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2078 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2079 #ifdef CONFIG_NUMA_BALANCING 2080 extern void 2081 show_numa_stats(struct task_struct *p, struct seq_file *m); 2082 extern void 2083 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2084 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2085 #endif /* CONFIG_NUMA_BALANCING */ 2086 #endif /* CONFIG_SCHED_DEBUG */ 2087 2088 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2089 extern void init_rt_rq(struct rt_rq *rt_rq); 2090 extern void init_dl_rq(struct dl_rq *dl_rq); 2091 2092 extern void cfs_bandwidth_usage_inc(void); 2093 extern void cfs_bandwidth_usage_dec(void); 2094 2095 #ifdef CONFIG_NO_HZ_COMMON 2096 #define NOHZ_BALANCE_KICK_BIT 0 2097 #define NOHZ_STATS_KICK_BIT 1 2098 2099 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2100 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2101 2102 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2103 2104 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2105 2106 extern void nohz_balance_exit_idle(struct rq *rq); 2107 #else 2108 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2109 #endif 2110 2111 2112 #ifdef CONFIG_SMP 2113 static inline 2114 void __dl_update(struct dl_bw *dl_b, s64 bw) 2115 { 2116 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2117 int i; 2118 2119 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2120 "sched RCU must be held"); 2121 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2122 struct rq *rq = cpu_rq(i); 2123 2124 rq->dl.extra_bw += bw; 2125 } 2126 } 2127 #else 2128 static inline 2129 void __dl_update(struct dl_bw *dl_b, s64 bw) 2130 { 2131 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2132 2133 dl->extra_bw += bw; 2134 } 2135 #endif 2136 2137 2138 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2139 struct irqtime { 2140 u64 total; 2141 u64 tick_delta; 2142 u64 irq_start_time; 2143 struct u64_stats_sync sync; 2144 }; 2145 2146 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2147 2148 /* 2149 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2150 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2151 * and never move forward. 2152 */ 2153 static inline u64 irq_time_read(int cpu) 2154 { 2155 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2156 unsigned int seq; 2157 u64 total; 2158 2159 do { 2160 seq = __u64_stats_fetch_begin(&irqtime->sync); 2161 total = irqtime->total; 2162 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2163 2164 return total; 2165 } 2166 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2167 2168 #ifdef CONFIG_CPU_FREQ 2169 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 2170 2171 /** 2172 * cpufreq_update_util - Take a note about CPU utilization changes. 2173 * @rq: Runqueue to carry out the update for. 2174 * @flags: Update reason flags. 2175 * 2176 * This function is called by the scheduler on the CPU whose utilization is 2177 * being updated. 2178 * 2179 * It can only be called from RCU-sched read-side critical sections. 2180 * 2181 * The way cpufreq is currently arranged requires it to evaluate the CPU 2182 * performance state (frequency/voltage) on a regular basis to prevent it from 2183 * being stuck in a completely inadequate performance level for too long. 2184 * That is not guaranteed to happen if the updates are only triggered from CFS 2185 * and DL, though, because they may not be coming in if only RT tasks are 2186 * active all the time (or there are RT tasks only). 2187 * 2188 * As a workaround for that issue, this function is called periodically by the 2189 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2190 * but that really is a band-aid. Going forward it should be replaced with 2191 * solutions targeted more specifically at RT tasks. 2192 */ 2193 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2194 { 2195 struct update_util_data *data; 2196 2197 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2198 cpu_of(rq))); 2199 if (data) 2200 data->func(data, rq_clock(rq), flags); 2201 } 2202 #else 2203 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2204 #endif /* CONFIG_CPU_FREQ */ 2205 2206 #ifdef arch_scale_freq_capacity 2207 # ifndef arch_scale_freq_invariant 2208 # define arch_scale_freq_invariant() true 2209 # endif 2210 #else 2211 # define arch_scale_freq_invariant() false 2212 #endif 2213 2214 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2215 /** 2216 * enum schedutil_type - CPU utilization type 2217 * @FREQUENCY_UTIL: Utilization used to select frequency 2218 * @ENERGY_UTIL: Utilization used during energy calculation 2219 * 2220 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2221 * need to be aggregated differently depending on the usage made of them. This 2222 * enum is used within schedutil_freq_util() to differentiate the types of 2223 * utilization expected by the callers, and adjust the aggregation accordingly. 2224 */ 2225 enum schedutil_type { 2226 FREQUENCY_UTIL, 2227 ENERGY_UTIL, 2228 }; 2229 2230 unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs, 2231 unsigned long max, enum schedutil_type type); 2232 2233 static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) 2234 { 2235 unsigned long max = arch_scale_cpu_capacity(NULL, cpu); 2236 2237 return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL); 2238 } 2239 2240 static inline unsigned long cpu_bw_dl(struct rq *rq) 2241 { 2242 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2243 } 2244 2245 static inline unsigned long cpu_util_dl(struct rq *rq) 2246 { 2247 return READ_ONCE(rq->avg_dl.util_avg); 2248 } 2249 2250 static inline unsigned long cpu_util_cfs(struct rq *rq) 2251 { 2252 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2253 2254 if (sched_feat(UTIL_EST)) { 2255 util = max_t(unsigned long, util, 2256 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2257 } 2258 2259 return util; 2260 } 2261 2262 static inline unsigned long cpu_util_rt(struct rq *rq) 2263 { 2264 return READ_ONCE(rq->avg_rt.util_avg); 2265 } 2266 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2267 static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) 2268 { 2269 return cfs; 2270 } 2271 #endif 2272 2273 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2274 static inline unsigned long cpu_util_irq(struct rq *rq) 2275 { 2276 return rq->avg_irq.util_avg; 2277 } 2278 2279 static inline 2280 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2281 { 2282 util *= (max - irq); 2283 util /= max; 2284 2285 return util; 2286 2287 } 2288 #else 2289 static inline unsigned long cpu_util_irq(struct rq *rq) 2290 { 2291 return 0; 2292 } 2293 2294 static inline 2295 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2296 { 2297 return util; 2298 } 2299 #endif 2300 2301 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2302 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2303 #else 2304 #define perf_domain_span(pd) NULL 2305 #endif 2306 2307 #ifdef CONFIG_SMP 2308 extern struct static_key_false sched_energy_present; 2309 #endif 2310