1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/blkdev.h> 40 #include <linux/compat.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpufreq.h> 43 #include <linux/cpuidle.h> 44 #include <linux/cpuset.h> 45 #include <linux/ctype.h> 46 #include <linux/debugfs.h> 47 #include <linux/delayacct.h> 48 #include <linux/energy_model.h> 49 #include <linux/init_task.h> 50 #include <linux/kprobes.h> 51 #include <linux/kthread.h> 52 #include <linux/membarrier.h> 53 #include <linux/migrate.h> 54 #include <linux/mmu_context.h> 55 #include <linux/nmi.h> 56 #include <linux/proc_fs.h> 57 #include <linux/prefetch.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/rcupdate_wait.h> 61 #include <linux/security.h> 62 #include <linux/stop_machine.h> 63 #include <linux/suspend.h> 64 #include <linux/swait.h> 65 #include <linux/syscalls.h> 66 #include <linux/task_work.h> 67 #include <linux/tsacct_kern.h> 68 69 #include <asm/tlb.h> 70 71 #ifdef CONFIG_PARAVIRT 72 # include <asm/paravirt.h> 73 #endif 74 75 #include "cpupri.h" 76 #include "cpudeadline.h" 77 78 #ifdef CONFIG_SCHED_DEBUG 79 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 80 #else 81 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 82 #endif 83 84 struct rq; 85 struct cpuidle_state; 86 87 /* task_struct::on_rq states: */ 88 #define TASK_ON_RQ_QUEUED 1 89 #define TASK_ON_RQ_MIGRATING 2 90 91 extern __read_mostly int scheduler_running; 92 93 extern unsigned long calc_load_update; 94 extern atomic_long_t calc_load_tasks; 95 96 extern void calc_global_load_tick(struct rq *this_rq); 97 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 98 99 /* 100 * Helpers for converting nanosecond timing to jiffy resolution 101 */ 102 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 103 104 /* 105 * Increase resolution of nice-level calculations for 64-bit architectures. 106 * The extra resolution improves shares distribution and load balancing of 107 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 108 * hierarchies, especially on larger systems. This is not a user-visible change 109 * and does not change the user-interface for setting shares/weights. 110 * 111 * We increase resolution only if we have enough bits to allow this increased 112 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 113 * are pretty high and the returns do not justify the increased costs. 114 * 115 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 116 * increase coverage and consistency always enable it on 64-bit platforms. 117 */ 118 #ifdef CONFIG_64BIT 119 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 120 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 121 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 122 #else 123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 124 # define scale_load(w) (w) 125 # define scale_load_down(w) (w) 126 #endif 127 128 /* 129 * Task weight (visible to users) and its load (invisible to users) have 130 * independent resolution, but they should be well calibrated. We use 131 * scale_load() and scale_load_down(w) to convert between them. The 132 * following must be true: 133 * 134 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 135 * 136 */ 137 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 138 139 /* 140 * Single value that decides SCHED_DEADLINE internal math precision. 141 * 10 -> just above 1us 142 * 9 -> just above 0.5us 143 */ 144 #define DL_SCALE 10 145 146 /* 147 * Single value that denotes runtime == period, ie unlimited time. 148 */ 149 #define RUNTIME_INF ((u64)~0ULL) 150 151 static inline int idle_policy(int policy) 152 { 153 return policy == SCHED_IDLE; 154 } 155 static inline int fair_policy(int policy) 156 { 157 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 158 } 159 160 static inline int rt_policy(int policy) 161 { 162 return policy == SCHED_FIFO || policy == SCHED_RR; 163 } 164 165 static inline int dl_policy(int policy) 166 { 167 return policy == SCHED_DEADLINE; 168 } 169 static inline bool valid_policy(int policy) 170 { 171 return idle_policy(policy) || fair_policy(policy) || 172 rt_policy(policy) || dl_policy(policy); 173 } 174 175 static inline int task_has_idle_policy(struct task_struct *p) 176 { 177 return idle_policy(p->policy); 178 } 179 180 static inline int task_has_rt_policy(struct task_struct *p) 181 { 182 return rt_policy(p->policy); 183 } 184 185 static inline int task_has_dl_policy(struct task_struct *p) 186 { 187 return dl_policy(p->policy); 188 } 189 190 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 191 192 /* 193 * !! For sched_setattr_nocheck() (kernel) only !! 194 * 195 * This is actually gross. :( 196 * 197 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 198 * tasks, but still be able to sleep. We need this on platforms that cannot 199 * atomically change clock frequency. Remove once fast switching will be 200 * available on such platforms. 201 * 202 * SUGOV stands for SchedUtil GOVernor. 203 */ 204 #define SCHED_FLAG_SUGOV 0x10000000 205 206 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 207 { 208 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 209 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 210 #else 211 return false; 212 #endif 213 } 214 215 /* 216 * Tells if entity @a should preempt entity @b. 217 */ 218 static inline bool 219 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 220 { 221 return dl_entity_is_special(a) || 222 dl_time_before(a->deadline, b->deadline); 223 } 224 225 /* 226 * This is the priority-queue data structure of the RT scheduling class: 227 */ 228 struct rt_prio_array { 229 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 230 struct list_head queue[MAX_RT_PRIO]; 231 }; 232 233 struct rt_bandwidth { 234 /* nests inside the rq lock: */ 235 raw_spinlock_t rt_runtime_lock; 236 ktime_t rt_period; 237 u64 rt_runtime; 238 struct hrtimer rt_period_timer; 239 unsigned int rt_period_active; 240 }; 241 242 void __dl_clear_params(struct task_struct *p); 243 244 /* 245 * To keep the bandwidth of -deadline tasks and groups under control 246 * we need some place where: 247 * - store the maximum -deadline bandwidth of the system (the group); 248 * - cache the fraction of that bandwidth that is currently allocated. 249 * 250 * This is all done in the data structure below. It is similar to the 251 * one used for RT-throttling (rt_bandwidth), with the main difference 252 * that, since here we are only interested in admission control, we 253 * do not decrease any runtime while the group "executes", neither we 254 * need a timer to replenish it. 255 * 256 * With respect to SMP, the bandwidth is given on a per-CPU basis, 257 * meaning that: 258 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 259 * - dl_total_bw array contains, in the i-eth element, the currently 260 * allocated bandwidth on the i-eth CPU. 261 * Moreover, groups consume bandwidth on each CPU, while tasks only 262 * consume bandwidth on the CPU they're running on. 263 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 264 * that will be shown the next time the proc or cgroup controls will 265 * be red. It on its turn can be changed by writing on its own 266 * control. 267 */ 268 struct dl_bandwidth { 269 raw_spinlock_t dl_runtime_lock; 270 u64 dl_runtime; 271 u64 dl_period; 272 }; 273 274 static inline int dl_bandwidth_enabled(void) 275 { 276 return sysctl_sched_rt_runtime >= 0; 277 } 278 279 struct dl_bw { 280 raw_spinlock_t lock; 281 u64 bw; 282 u64 total_bw; 283 }; 284 285 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 286 287 static inline 288 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 289 { 290 dl_b->total_bw -= tsk_bw; 291 __dl_update(dl_b, (s32)tsk_bw / cpus); 292 } 293 294 static inline 295 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 296 { 297 dl_b->total_bw += tsk_bw; 298 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 299 } 300 301 static inline 302 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 303 { 304 return dl_b->bw != -1 && 305 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 306 } 307 308 extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 309 extern void init_dl_bw(struct dl_bw *dl_b); 310 extern int sched_dl_global_validate(void); 311 extern void sched_dl_do_global(void); 312 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 313 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 314 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 315 extern bool __checkparam_dl(const struct sched_attr *attr); 316 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 317 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 318 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 319 extern bool dl_cpu_busy(unsigned int cpu); 320 321 #ifdef CONFIG_CGROUP_SCHED 322 323 #include <linux/cgroup.h> 324 #include <linux/psi.h> 325 326 struct cfs_rq; 327 struct rt_rq; 328 329 extern struct list_head task_groups; 330 331 struct cfs_bandwidth { 332 #ifdef CONFIG_CFS_BANDWIDTH 333 raw_spinlock_t lock; 334 ktime_t period; 335 u64 quota; 336 u64 runtime; 337 s64 hierarchical_quota; 338 339 u8 idle; 340 u8 period_active; 341 u8 distribute_running; 342 u8 slack_started; 343 struct hrtimer period_timer; 344 struct hrtimer slack_timer; 345 struct list_head throttled_cfs_rq; 346 347 /* Statistics: */ 348 int nr_periods; 349 int nr_throttled; 350 u64 throttled_time; 351 #endif 352 }; 353 354 /* Task group related information */ 355 struct task_group { 356 struct cgroup_subsys_state css; 357 358 #ifdef CONFIG_FAIR_GROUP_SCHED 359 /* schedulable entities of this group on each CPU */ 360 struct sched_entity **se; 361 /* runqueue "owned" by this group on each CPU */ 362 struct cfs_rq **cfs_rq; 363 unsigned long shares; 364 365 #ifdef CONFIG_SMP 366 /* 367 * load_avg can be heavily contended at clock tick time, so put 368 * it in its own cacheline separated from the fields above which 369 * will also be accessed at each tick. 370 */ 371 atomic_long_t load_avg ____cacheline_aligned; 372 #endif 373 #endif 374 375 #ifdef CONFIG_RT_GROUP_SCHED 376 struct sched_rt_entity **rt_se; 377 struct rt_rq **rt_rq; 378 379 struct rt_bandwidth rt_bandwidth; 380 #endif 381 382 struct rcu_head rcu; 383 struct list_head list; 384 385 struct task_group *parent; 386 struct list_head siblings; 387 struct list_head children; 388 389 #ifdef CONFIG_SCHED_AUTOGROUP 390 struct autogroup *autogroup; 391 #endif 392 393 struct cfs_bandwidth cfs_bandwidth; 394 395 #ifdef CONFIG_UCLAMP_TASK_GROUP 396 /* The two decimal precision [%] value requested from user-space */ 397 unsigned int uclamp_pct[UCLAMP_CNT]; 398 /* Clamp values requested for a task group */ 399 struct uclamp_se uclamp_req[UCLAMP_CNT]; 400 /* Effective clamp values used for a task group */ 401 struct uclamp_se uclamp[UCLAMP_CNT]; 402 #endif 403 404 }; 405 406 #ifdef CONFIG_FAIR_GROUP_SCHED 407 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 408 409 /* 410 * A weight of 0 or 1 can cause arithmetics problems. 411 * A weight of a cfs_rq is the sum of weights of which entities 412 * are queued on this cfs_rq, so a weight of a entity should not be 413 * too large, so as the shares value of a task group. 414 * (The default weight is 1024 - so there's no practical 415 * limitation from this.) 416 */ 417 #define MIN_SHARES (1UL << 1) 418 #define MAX_SHARES (1UL << 18) 419 #endif 420 421 typedef int (*tg_visitor)(struct task_group *, void *); 422 423 extern int walk_tg_tree_from(struct task_group *from, 424 tg_visitor down, tg_visitor up, void *data); 425 426 /* 427 * Iterate the full tree, calling @down when first entering a node and @up when 428 * leaving it for the final time. 429 * 430 * Caller must hold rcu_lock or sufficient equivalent. 431 */ 432 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 433 { 434 return walk_tg_tree_from(&root_task_group, down, up, data); 435 } 436 437 extern int tg_nop(struct task_group *tg, void *data); 438 439 extern void free_fair_sched_group(struct task_group *tg); 440 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 441 extern void online_fair_sched_group(struct task_group *tg); 442 extern void unregister_fair_sched_group(struct task_group *tg); 443 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 444 struct sched_entity *se, int cpu, 445 struct sched_entity *parent); 446 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 447 448 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 449 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 450 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 451 452 extern void free_rt_sched_group(struct task_group *tg); 453 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 454 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 455 struct sched_rt_entity *rt_se, int cpu, 456 struct sched_rt_entity *parent); 457 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 458 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 459 extern long sched_group_rt_runtime(struct task_group *tg); 460 extern long sched_group_rt_period(struct task_group *tg); 461 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 462 463 extern struct task_group *sched_create_group(struct task_group *parent); 464 extern void sched_online_group(struct task_group *tg, 465 struct task_group *parent); 466 extern void sched_destroy_group(struct task_group *tg); 467 extern void sched_offline_group(struct task_group *tg); 468 469 extern void sched_move_task(struct task_struct *tsk); 470 471 #ifdef CONFIG_FAIR_GROUP_SCHED 472 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 473 474 #ifdef CONFIG_SMP 475 extern void set_task_rq_fair(struct sched_entity *se, 476 struct cfs_rq *prev, struct cfs_rq *next); 477 #else /* !CONFIG_SMP */ 478 static inline void set_task_rq_fair(struct sched_entity *se, 479 struct cfs_rq *prev, struct cfs_rq *next) { } 480 #endif /* CONFIG_SMP */ 481 #endif /* CONFIG_FAIR_GROUP_SCHED */ 482 483 #else /* CONFIG_CGROUP_SCHED */ 484 485 struct cfs_bandwidth { }; 486 487 #endif /* CONFIG_CGROUP_SCHED */ 488 489 /* CFS-related fields in a runqueue */ 490 struct cfs_rq { 491 struct load_weight load; 492 unsigned long runnable_weight; 493 unsigned int nr_running; 494 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 495 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 496 497 u64 exec_clock; 498 u64 min_vruntime; 499 #ifndef CONFIG_64BIT 500 u64 min_vruntime_copy; 501 #endif 502 503 struct rb_root_cached tasks_timeline; 504 505 /* 506 * 'curr' points to currently running entity on this cfs_rq. 507 * It is set to NULL otherwise (i.e when none are currently running). 508 */ 509 struct sched_entity *curr; 510 struct sched_entity *next; 511 struct sched_entity *last; 512 struct sched_entity *skip; 513 514 #ifdef CONFIG_SCHED_DEBUG 515 unsigned int nr_spread_over; 516 #endif 517 518 #ifdef CONFIG_SMP 519 /* 520 * CFS load tracking 521 */ 522 struct sched_avg avg; 523 #ifndef CONFIG_64BIT 524 u64 load_last_update_time_copy; 525 #endif 526 struct { 527 raw_spinlock_t lock ____cacheline_aligned; 528 int nr; 529 unsigned long load_avg; 530 unsigned long util_avg; 531 unsigned long runnable_sum; 532 } removed; 533 534 #ifdef CONFIG_FAIR_GROUP_SCHED 535 unsigned long tg_load_avg_contrib; 536 long propagate; 537 long prop_runnable_sum; 538 539 /* 540 * h_load = weight * f(tg) 541 * 542 * Where f(tg) is the recursive weight fraction assigned to 543 * this group. 544 */ 545 unsigned long h_load; 546 u64 last_h_load_update; 547 struct sched_entity *h_load_next; 548 #endif /* CONFIG_FAIR_GROUP_SCHED */ 549 #endif /* CONFIG_SMP */ 550 551 #ifdef CONFIG_FAIR_GROUP_SCHED 552 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 553 554 /* 555 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 556 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 557 * (like users, containers etc.) 558 * 559 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 560 * This list is used during load balance. 561 */ 562 int on_list; 563 struct list_head leaf_cfs_rq_list; 564 struct task_group *tg; /* group that "owns" this runqueue */ 565 566 #ifdef CONFIG_CFS_BANDWIDTH 567 int runtime_enabled; 568 s64 runtime_remaining; 569 570 u64 throttled_clock; 571 u64 throttled_clock_task; 572 u64 throttled_clock_task_time; 573 int throttled; 574 int throttle_count; 575 struct list_head throttled_list; 576 #endif /* CONFIG_CFS_BANDWIDTH */ 577 #endif /* CONFIG_FAIR_GROUP_SCHED */ 578 }; 579 580 static inline int rt_bandwidth_enabled(void) 581 { 582 return sysctl_sched_rt_runtime >= 0; 583 } 584 585 /* RT IPI pull logic requires IRQ_WORK */ 586 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 587 # define HAVE_RT_PUSH_IPI 588 #endif 589 590 /* Real-Time classes' related field in a runqueue: */ 591 struct rt_rq { 592 struct rt_prio_array active; 593 unsigned int rt_nr_running; 594 unsigned int rr_nr_running; 595 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 596 struct { 597 int curr; /* highest queued rt task prio */ 598 #ifdef CONFIG_SMP 599 int next; /* next highest */ 600 #endif 601 } highest_prio; 602 #endif 603 #ifdef CONFIG_SMP 604 unsigned long rt_nr_migratory; 605 unsigned long rt_nr_total; 606 int overloaded; 607 struct plist_head pushable_tasks; 608 609 #endif /* CONFIG_SMP */ 610 int rt_queued; 611 612 int rt_throttled; 613 u64 rt_time; 614 u64 rt_runtime; 615 /* Nests inside the rq lock: */ 616 raw_spinlock_t rt_runtime_lock; 617 618 #ifdef CONFIG_RT_GROUP_SCHED 619 unsigned long rt_nr_boosted; 620 621 struct rq *rq; 622 struct task_group *tg; 623 #endif 624 }; 625 626 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 627 { 628 return rt_rq->rt_queued && rt_rq->rt_nr_running; 629 } 630 631 /* Deadline class' related fields in a runqueue */ 632 struct dl_rq { 633 /* runqueue is an rbtree, ordered by deadline */ 634 struct rb_root_cached root; 635 636 unsigned long dl_nr_running; 637 638 #ifdef CONFIG_SMP 639 /* 640 * Deadline values of the currently executing and the 641 * earliest ready task on this rq. Caching these facilitates 642 * the decision whether or not a ready but not running task 643 * should migrate somewhere else. 644 */ 645 struct { 646 u64 curr; 647 u64 next; 648 } earliest_dl; 649 650 unsigned long dl_nr_migratory; 651 int overloaded; 652 653 /* 654 * Tasks on this rq that can be pushed away. They are kept in 655 * an rb-tree, ordered by tasks' deadlines, with caching 656 * of the leftmost (earliest deadline) element. 657 */ 658 struct rb_root_cached pushable_dl_tasks_root; 659 #else 660 struct dl_bw dl_bw; 661 #endif 662 /* 663 * "Active utilization" for this runqueue: increased when a 664 * task wakes up (becomes TASK_RUNNING) and decreased when a 665 * task blocks 666 */ 667 u64 running_bw; 668 669 /* 670 * Utilization of the tasks "assigned" to this runqueue (including 671 * the tasks that are in runqueue and the tasks that executed on this 672 * CPU and blocked). Increased when a task moves to this runqueue, and 673 * decreased when the task moves away (migrates, changes scheduling 674 * policy, or terminates). 675 * This is needed to compute the "inactive utilization" for the 676 * runqueue (inactive utilization = this_bw - running_bw). 677 */ 678 u64 this_bw; 679 u64 extra_bw; 680 681 /* 682 * Inverse of the fraction of CPU utilization that can be reclaimed 683 * by the GRUB algorithm. 684 */ 685 u64 bw_ratio; 686 }; 687 688 #ifdef CONFIG_FAIR_GROUP_SCHED 689 /* An entity is a task if it doesn't "own" a runqueue */ 690 #define entity_is_task(se) (!se->my_q) 691 #else 692 #define entity_is_task(se) 1 693 #endif 694 695 #ifdef CONFIG_SMP 696 /* 697 * XXX we want to get rid of these helpers and use the full load resolution. 698 */ 699 static inline long se_weight(struct sched_entity *se) 700 { 701 return scale_load_down(se->load.weight); 702 } 703 704 static inline long se_runnable(struct sched_entity *se) 705 { 706 return scale_load_down(se->runnable_weight); 707 } 708 709 static inline bool sched_asym_prefer(int a, int b) 710 { 711 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 712 } 713 714 struct perf_domain { 715 struct em_perf_domain *em_pd; 716 struct perf_domain *next; 717 struct rcu_head rcu; 718 }; 719 720 /* Scheduling group status flags */ 721 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 722 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 723 724 /* 725 * We add the notion of a root-domain which will be used to define per-domain 726 * variables. Each exclusive cpuset essentially defines an island domain by 727 * fully partitioning the member CPUs from any other cpuset. Whenever a new 728 * exclusive cpuset is created, we also create and attach a new root-domain 729 * object. 730 * 731 */ 732 struct root_domain { 733 atomic_t refcount; 734 atomic_t rto_count; 735 struct rcu_head rcu; 736 cpumask_var_t span; 737 cpumask_var_t online; 738 739 /* 740 * Indicate pullable load on at least one CPU, e.g: 741 * - More than one runnable task 742 * - Running task is misfit 743 */ 744 int overload; 745 746 /* Indicate one or more cpus over-utilized (tipping point) */ 747 int overutilized; 748 749 /* 750 * The bit corresponding to a CPU gets set here if such CPU has more 751 * than one runnable -deadline task (as it is below for RT tasks). 752 */ 753 cpumask_var_t dlo_mask; 754 atomic_t dlo_count; 755 struct dl_bw dl_bw; 756 struct cpudl cpudl; 757 758 #ifdef HAVE_RT_PUSH_IPI 759 /* 760 * For IPI pull requests, loop across the rto_mask. 761 */ 762 struct irq_work rto_push_work; 763 raw_spinlock_t rto_lock; 764 /* These are only updated and read within rto_lock */ 765 int rto_loop; 766 int rto_cpu; 767 /* These atomics are updated outside of a lock */ 768 atomic_t rto_loop_next; 769 atomic_t rto_loop_start; 770 #endif 771 /* 772 * The "RT overload" flag: it gets set if a CPU has more than 773 * one runnable RT task. 774 */ 775 cpumask_var_t rto_mask; 776 struct cpupri cpupri; 777 778 unsigned long max_cpu_capacity; 779 780 /* 781 * NULL-terminated list of performance domains intersecting with the 782 * CPUs of the rd. Protected by RCU. 783 */ 784 struct perf_domain __rcu *pd; 785 }; 786 787 extern void init_defrootdomain(void); 788 extern int sched_init_domains(const struct cpumask *cpu_map); 789 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 790 extern void sched_get_rd(struct root_domain *rd); 791 extern void sched_put_rd(struct root_domain *rd); 792 793 #ifdef HAVE_RT_PUSH_IPI 794 extern void rto_push_irq_work_func(struct irq_work *work); 795 #endif 796 #endif /* CONFIG_SMP */ 797 798 #ifdef CONFIG_UCLAMP_TASK 799 /* 800 * struct uclamp_bucket - Utilization clamp bucket 801 * @value: utilization clamp value for tasks on this clamp bucket 802 * @tasks: number of RUNNABLE tasks on this clamp bucket 803 * 804 * Keep track of how many tasks are RUNNABLE for a given utilization 805 * clamp value. 806 */ 807 struct uclamp_bucket { 808 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 809 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 810 }; 811 812 /* 813 * struct uclamp_rq - rq's utilization clamp 814 * @value: currently active clamp values for a rq 815 * @bucket: utilization clamp buckets affecting a rq 816 * 817 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 818 * A clamp value is affecting a rq when there is at least one task RUNNABLE 819 * (or actually running) with that value. 820 * 821 * There are up to UCLAMP_CNT possible different clamp values, currently there 822 * are only two: minimum utilization and maximum utilization. 823 * 824 * All utilization clamping values are MAX aggregated, since: 825 * - for util_min: we want to run the CPU at least at the max of the minimum 826 * utilization required by its currently RUNNABLE tasks. 827 * - for util_max: we want to allow the CPU to run up to the max of the 828 * maximum utilization allowed by its currently RUNNABLE tasks. 829 * 830 * Since on each system we expect only a limited number of different 831 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 832 * the metrics required to compute all the per-rq utilization clamp values. 833 */ 834 struct uclamp_rq { 835 unsigned int value; 836 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 837 }; 838 #endif /* CONFIG_UCLAMP_TASK */ 839 840 /* 841 * This is the main, per-CPU runqueue data structure. 842 * 843 * Locking rule: those places that want to lock multiple runqueues 844 * (such as the load balancing or the thread migration code), lock 845 * acquire operations must be ordered by ascending &runqueue. 846 */ 847 struct rq { 848 /* runqueue lock: */ 849 raw_spinlock_t lock; 850 851 /* 852 * nr_running and cpu_load should be in the same cacheline because 853 * remote CPUs use both these fields when doing load calculation. 854 */ 855 unsigned int nr_running; 856 #ifdef CONFIG_NUMA_BALANCING 857 unsigned int nr_numa_running; 858 unsigned int nr_preferred_running; 859 unsigned int numa_migrate_on; 860 #endif 861 #ifdef CONFIG_NO_HZ_COMMON 862 #ifdef CONFIG_SMP 863 unsigned long last_load_update_tick; 864 unsigned long last_blocked_load_update_tick; 865 unsigned int has_blocked_load; 866 #endif /* CONFIG_SMP */ 867 unsigned int nohz_tick_stopped; 868 atomic_t nohz_flags; 869 #endif /* CONFIG_NO_HZ_COMMON */ 870 871 unsigned long nr_load_updates; 872 u64 nr_switches; 873 874 #ifdef CONFIG_UCLAMP_TASK 875 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 876 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 877 unsigned int uclamp_flags; 878 #define UCLAMP_FLAG_IDLE 0x01 879 #endif 880 881 struct cfs_rq cfs; 882 struct rt_rq rt; 883 struct dl_rq dl; 884 885 #ifdef CONFIG_FAIR_GROUP_SCHED 886 /* list of leaf cfs_rq on this CPU: */ 887 struct list_head leaf_cfs_rq_list; 888 struct list_head *tmp_alone_branch; 889 #endif /* CONFIG_FAIR_GROUP_SCHED */ 890 891 /* 892 * This is part of a global counter where only the total sum 893 * over all CPUs matters. A task can increase this counter on 894 * one CPU and if it got migrated afterwards it may decrease 895 * it on another CPU. Always updated under the runqueue lock: 896 */ 897 unsigned long nr_uninterruptible; 898 899 struct task_struct *curr; 900 struct task_struct *idle; 901 struct task_struct *stop; 902 unsigned long next_balance; 903 struct mm_struct *prev_mm; 904 905 unsigned int clock_update_flags; 906 u64 clock; 907 /* Ensure that all clocks are in the same cache line */ 908 u64 clock_task ____cacheline_aligned; 909 u64 clock_pelt; 910 unsigned long lost_idle_time; 911 912 atomic_t nr_iowait; 913 914 #ifdef CONFIG_MEMBARRIER 915 int membarrier_state; 916 #endif 917 918 #ifdef CONFIG_SMP 919 struct root_domain *rd; 920 struct sched_domain __rcu *sd; 921 922 unsigned long cpu_capacity; 923 unsigned long cpu_capacity_orig; 924 925 struct callback_head *balance_callback; 926 927 unsigned char idle_balance; 928 929 unsigned long misfit_task_load; 930 931 /* For active balancing */ 932 int active_balance; 933 int push_cpu; 934 struct cpu_stop_work active_balance_work; 935 936 /* CPU of this runqueue: */ 937 int cpu; 938 int online; 939 940 struct list_head cfs_tasks; 941 942 struct sched_avg avg_rt; 943 struct sched_avg avg_dl; 944 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 945 struct sched_avg avg_irq; 946 #endif 947 u64 idle_stamp; 948 u64 avg_idle; 949 950 /* This is used to determine avg_idle's max value */ 951 u64 max_idle_balance_cost; 952 #endif 953 954 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 955 u64 prev_irq_time; 956 #endif 957 #ifdef CONFIG_PARAVIRT 958 u64 prev_steal_time; 959 #endif 960 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 961 u64 prev_steal_time_rq; 962 #endif 963 964 /* calc_load related fields */ 965 unsigned long calc_load_update; 966 long calc_load_active; 967 968 #ifdef CONFIG_SCHED_HRTICK 969 #ifdef CONFIG_SMP 970 int hrtick_csd_pending; 971 call_single_data_t hrtick_csd; 972 #endif 973 struct hrtimer hrtick_timer; 974 #endif 975 976 #ifdef CONFIG_SCHEDSTATS 977 /* latency stats */ 978 struct sched_info rq_sched_info; 979 unsigned long long rq_cpu_time; 980 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 981 982 /* sys_sched_yield() stats */ 983 unsigned int yld_count; 984 985 /* schedule() stats */ 986 unsigned int sched_count; 987 unsigned int sched_goidle; 988 989 /* try_to_wake_up() stats */ 990 unsigned int ttwu_count; 991 unsigned int ttwu_local; 992 #endif 993 994 #ifdef CONFIG_SMP 995 struct llist_head wake_list; 996 #endif 997 998 #ifdef CONFIG_CPU_IDLE 999 /* Must be inspected within a rcu lock section */ 1000 struct cpuidle_state *idle_state; 1001 #endif 1002 }; 1003 1004 #ifdef CONFIG_FAIR_GROUP_SCHED 1005 1006 /* CPU runqueue to which this cfs_rq is attached */ 1007 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1008 { 1009 return cfs_rq->rq; 1010 } 1011 1012 #else 1013 1014 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1015 { 1016 return container_of(cfs_rq, struct rq, cfs); 1017 } 1018 #endif 1019 1020 static inline int cpu_of(struct rq *rq) 1021 { 1022 #ifdef CONFIG_SMP 1023 return rq->cpu; 1024 #else 1025 return 0; 1026 #endif 1027 } 1028 1029 1030 #ifdef CONFIG_SCHED_SMT 1031 extern void __update_idle_core(struct rq *rq); 1032 1033 static inline void update_idle_core(struct rq *rq) 1034 { 1035 if (static_branch_unlikely(&sched_smt_present)) 1036 __update_idle_core(rq); 1037 } 1038 1039 #else 1040 static inline void update_idle_core(struct rq *rq) { } 1041 #endif 1042 1043 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1044 1045 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1046 #define this_rq() this_cpu_ptr(&runqueues) 1047 #define task_rq(p) cpu_rq(task_cpu(p)) 1048 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1049 #define raw_rq() raw_cpu_ptr(&runqueues) 1050 1051 extern void update_rq_clock(struct rq *rq); 1052 1053 static inline u64 __rq_clock_broken(struct rq *rq) 1054 { 1055 return READ_ONCE(rq->clock); 1056 } 1057 1058 /* 1059 * rq::clock_update_flags bits 1060 * 1061 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1062 * call to __schedule(). This is an optimisation to avoid 1063 * neighbouring rq clock updates. 1064 * 1065 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1066 * in effect and calls to update_rq_clock() are being ignored. 1067 * 1068 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1069 * made to update_rq_clock() since the last time rq::lock was pinned. 1070 * 1071 * If inside of __schedule(), clock_update_flags will have been 1072 * shifted left (a left shift is a cheap operation for the fast path 1073 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1074 * 1075 * if (rq-clock_update_flags >= RQCF_UPDATED) 1076 * 1077 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1078 * one position though, because the next rq_unpin_lock() will shift it 1079 * back. 1080 */ 1081 #define RQCF_REQ_SKIP 0x01 1082 #define RQCF_ACT_SKIP 0x02 1083 #define RQCF_UPDATED 0x04 1084 1085 static inline void assert_clock_updated(struct rq *rq) 1086 { 1087 /* 1088 * The only reason for not seeing a clock update since the 1089 * last rq_pin_lock() is if we're currently skipping updates. 1090 */ 1091 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1092 } 1093 1094 static inline u64 rq_clock(struct rq *rq) 1095 { 1096 lockdep_assert_held(&rq->lock); 1097 assert_clock_updated(rq); 1098 1099 return rq->clock; 1100 } 1101 1102 static inline u64 rq_clock_task(struct rq *rq) 1103 { 1104 lockdep_assert_held(&rq->lock); 1105 assert_clock_updated(rq); 1106 1107 return rq->clock_task; 1108 } 1109 1110 static inline void rq_clock_skip_update(struct rq *rq) 1111 { 1112 lockdep_assert_held(&rq->lock); 1113 rq->clock_update_flags |= RQCF_REQ_SKIP; 1114 } 1115 1116 /* 1117 * See rt task throttling, which is the only time a skip 1118 * request is cancelled. 1119 */ 1120 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1121 { 1122 lockdep_assert_held(&rq->lock); 1123 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1124 } 1125 1126 struct rq_flags { 1127 unsigned long flags; 1128 struct pin_cookie cookie; 1129 #ifdef CONFIG_SCHED_DEBUG 1130 /* 1131 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1132 * current pin context is stashed here in case it needs to be 1133 * restored in rq_repin_lock(). 1134 */ 1135 unsigned int clock_update_flags; 1136 #endif 1137 }; 1138 1139 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1140 { 1141 rf->cookie = lockdep_pin_lock(&rq->lock); 1142 1143 #ifdef CONFIG_SCHED_DEBUG 1144 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1145 rf->clock_update_flags = 0; 1146 #endif 1147 } 1148 1149 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1150 { 1151 #ifdef CONFIG_SCHED_DEBUG 1152 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1153 rf->clock_update_flags = RQCF_UPDATED; 1154 #endif 1155 1156 lockdep_unpin_lock(&rq->lock, rf->cookie); 1157 } 1158 1159 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1160 { 1161 lockdep_repin_lock(&rq->lock, rf->cookie); 1162 1163 #ifdef CONFIG_SCHED_DEBUG 1164 /* 1165 * Restore the value we stashed in @rf for this pin context. 1166 */ 1167 rq->clock_update_flags |= rf->clock_update_flags; 1168 #endif 1169 } 1170 1171 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1172 __acquires(rq->lock); 1173 1174 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1175 __acquires(p->pi_lock) 1176 __acquires(rq->lock); 1177 1178 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1179 __releases(rq->lock) 1180 { 1181 rq_unpin_lock(rq, rf); 1182 raw_spin_unlock(&rq->lock); 1183 } 1184 1185 static inline void 1186 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1187 __releases(rq->lock) 1188 __releases(p->pi_lock) 1189 { 1190 rq_unpin_lock(rq, rf); 1191 raw_spin_unlock(&rq->lock); 1192 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1193 } 1194 1195 static inline void 1196 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1197 __acquires(rq->lock) 1198 { 1199 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1200 rq_pin_lock(rq, rf); 1201 } 1202 1203 static inline void 1204 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1205 __acquires(rq->lock) 1206 { 1207 raw_spin_lock_irq(&rq->lock); 1208 rq_pin_lock(rq, rf); 1209 } 1210 1211 static inline void 1212 rq_lock(struct rq *rq, struct rq_flags *rf) 1213 __acquires(rq->lock) 1214 { 1215 raw_spin_lock(&rq->lock); 1216 rq_pin_lock(rq, rf); 1217 } 1218 1219 static inline void 1220 rq_relock(struct rq *rq, struct rq_flags *rf) 1221 __acquires(rq->lock) 1222 { 1223 raw_spin_lock(&rq->lock); 1224 rq_repin_lock(rq, rf); 1225 } 1226 1227 static inline void 1228 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1229 __releases(rq->lock) 1230 { 1231 rq_unpin_lock(rq, rf); 1232 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1233 } 1234 1235 static inline void 1236 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1237 __releases(rq->lock) 1238 { 1239 rq_unpin_lock(rq, rf); 1240 raw_spin_unlock_irq(&rq->lock); 1241 } 1242 1243 static inline void 1244 rq_unlock(struct rq *rq, struct rq_flags *rf) 1245 __releases(rq->lock) 1246 { 1247 rq_unpin_lock(rq, rf); 1248 raw_spin_unlock(&rq->lock); 1249 } 1250 1251 static inline struct rq * 1252 this_rq_lock_irq(struct rq_flags *rf) 1253 __acquires(rq->lock) 1254 { 1255 struct rq *rq; 1256 1257 local_irq_disable(); 1258 rq = this_rq(); 1259 rq_lock(rq, rf); 1260 return rq; 1261 } 1262 1263 #ifdef CONFIG_NUMA 1264 enum numa_topology_type { 1265 NUMA_DIRECT, 1266 NUMA_GLUELESS_MESH, 1267 NUMA_BACKPLANE, 1268 }; 1269 extern enum numa_topology_type sched_numa_topology_type; 1270 extern int sched_max_numa_distance; 1271 extern bool find_numa_distance(int distance); 1272 extern void sched_init_numa(void); 1273 extern void sched_domains_numa_masks_set(unsigned int cpu); 1274 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1275 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1276 #else 1277 static inline void sched_init_numa(void) { } 1278 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1279 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1280 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1281 { 1282 return nr_cpu_ids; 1283 } 1284 #endif 1285 1286 #ifdef CONFIG_NUMA_BALANCING 1287 /* The regions in numa_faults array from task_struct */ 1288 enum numa_faults_stats { 1289 NUMA_MEM = 0, 1290 NUMA_CPU, 1291 NUMA_MEMBUF, 1292 NUMA_CPUBUF 1293 }; 1294 extern void sched_setnuma(struct task_struct *p, int node); 1295 extern int migrate_task_to(struct task_struct *p, int cpu); 1296 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1297 int cpu, int scpu); 1298 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1299 #else 1300 static inline void 1301 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1302 { 1303 } 1304 #endif /* CONFIG_NUMA_BALANCING */ 1305 1306 #ifdef CONFIG_SMP 1307 1308 static inline void 1309 queue_balance_callback(struct rq *rq, 1310 struct callback_head *head, 1311 void (*func)(struct rq *rq)) 1312 { 1313 lockdep_assert_held(&rq->lock); 1314 1315 if (unlikely(head->next)) 1316 return; 1317 1318 head->func = (void (*)(struct callback_head *))func; 1319 head->next = rq->balance_callback; 1320 rq->balance_callback = head; 1321 } 1322 1323 extern void sched_ttwu_pending(void); 1324 1325 #define rcu_dereference_check_sched_domain(p) \ 1326 rcu_dereference_check((p), \ 1327 lockdep_is_held(&sched_domains_mutex)) 1328 1329 /* 1330 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1331 * See destroy_sched_domains: call_rcu for details. 1332 * 1333 * The domain tree of any CPU may only be accessed from within 1334 * preempt-disabled sections. 1335 */ 1336 #define for_each_domain(cpu, __sd) \ 1337 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1338 __sd; __sd = __sd->parent) 1339 1340 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 1341 1342 /** 1343 * highest_flag_domain - Return highest sched_domain containing flag. 1344 * @cpu: The CPU whose highest level of sched domain is to 1345 * be returned. 1346 * @flag: The flag to check for the highest sched_domain 1347 * for the given CPU. 1348 * 1349 * Returns the highest sched_domain of a CPU which contains the given flag. 1350 */ 1351 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1352 { 1353 struct sched_domain *sd, *hsd = NULL; 1354 1355 for_each_domain(cpu, sd) { 1356 if (!(sd->flags & flag)) 1357 break; 1358 hsd = sd; 1359 } 1360 1361 return hsd; 1362 } 1363 1364 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1365 { 1366 struct sched_domain *sd; 1367 1368 for_each_domain(cpu, sd) { 1369 if (sd->flags & flag) 1370 break; 1371 } 1372 1373 return sd; 1374 } 1375 1376 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1377 DECLARE_PER_CPU(int, sd_llc_size); 1378 DECLARE_PER_CPU(int, sd_llc_id); 1379 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1380 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1381 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1382 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1383 extern struct static_key_false sched_asym_cpucapacity; 1384 1385 struct sched_group_capacity { 1386 atomic_t ref; 1387 /* 1388 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1389 * for a single CPU. 1390 */ 1391 unsigned long capacity; 1392 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1393 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1394 unsigned long next_update; 1395 int imbalance; /* XXX unrelated to capacity but shared group state */ 1396 1397 #ifdef CONFIG_SCHED_DEBUG 1398 int id; 1399 #endif 1400 1401 unsigned long cpumask[0]; /* Balance mask */ 1402 }; 1403 1404 struct sched_group { 1405 struct sched_group *next; /* Must be a circular list */ 1406 atomic_t ref; 1407 1408 unsigned int group_weight; 1409 struct sched_group_capacity *sgc; 1410 int asym_prefer_cpu; /* CPU of highest priority in group */ 1411 1412 /* 1413 * The CPUs this group covers. 1414 * 1415 * NOTE: this field is variable length. (Allocated dynamically 1416 * by attaching extra space to the end of the structure, 1417 * depending on how many CPUs the kernel has booted up with) 1418 */ 1419 unsigned long cpumask[0]; 1420 }; 1421 1422 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1423 { 1424 return to_cpumask(sg->cpumask); 1425 } 1426 1427 /* 1428 * See build_balance_mask(). 1429 */ 1430 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1431 { 1432 return to_cpumask(sg->sgc->cpumask); 1433 } 1434 1435 /** 1436 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1437 * @group: The group whose first CPU is to be returned. 1438 */ 1439 static inline unsigned int group_first_cpu(struct sched_group *group) 1440 { 1441 return cpumask_first(sched_group_span(group)); 1442 } 1443 1444 extern int group_balance_cpu(struct sched_group *sg); 1445 1446 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1447 void register_sched_domain_sysctl(void); 1448 void dirty_sched_domain_sysctl(int cpu); 1449 void unregister_sched_domain_sysctl(void); 1450 #else 1451 static inline void register_sched_domain_sysctl(void) 1452 { 1453 } 1454 static inline void dirty_sched_domain_sysctl(int cpu) 1455 { 1456 } 1457 static inline void unregister_sched_domain_sysctl(void) 1458 { 1459 } 1460 #endif 1461 1462 extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 1463 1464 #else 1465 1466 static inline void sched_ttwu_pending(void) { } 1467 1468 static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; } 1469 1470 #endif /* CONFIG_SMP */ 1471 1472 #include "stats.h" 1473 #include "autogroup.h" 1474 1475 #ifdef CONFIG_CGROUP_SCHED 1476 1477 /* 1478 * Return the group to which this tasks belongs. 1479 * 1480 * We cannot use task_css() and friends because the cgroup subsystem 1481 * changes that value before the cgroup_subsys::attach() method is called, 1482 * therefore we cannot pin it and might observe the wrong value. 1483 * 1484 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1485 * core changes this before calling sched_move_task(). 1486 * 1487 * Instead we use a 'copy' which is updated from sched_move_task() while 1488 * holding both task_struct::pi_lock and rq::lock. 1489 */ 1490 static inline struct task_group *task_group(struct task_struct *p) 1491 { 1492 return p->sched_task_group; 1493 } 1494 1495 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1496 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1497 { 1498 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1499 struct task_group *tg = task_group(p); 1500 #endif 1501 1502 #ifdef CONFIG_FAIR_GROUP_SCHED 1503 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1504 p->se.cfs_rq = tg->cfs_rq[cpu]; 1505 p->se.parent = tg->se[cpu]; 1506 #endif 1507 1508 #ifdef CONFIG_RT_GROUP_SCHED 1509 p->rt.rt_rq = tg->rt_rq[cpu]; 1510 p->rt.parent = tg->rt_se[cpu]; 1511 #endif 1512 } 1513 1514 #else /* CONFIG_CGROUP_SCHED */ 1515 1516 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1517 static inline struct task_group *task_group(struct task_struct *p) 1518 { 1519 return NULL; 1520 } 1521 1522 #endif /* CONFIG_CGROUP_SCHED */ 1523 1524 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1525 { 1526 set_task_rq(p, cpu); 1527 #ifdef CONFIG_SMP 1528 /* 1529 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1530 * successfully executed on another CPU. We must ensure that updates of 1531 * per-task data have been completed by this moment. 1532 */ 1533 smp_wmb(); 1534 #ifdef CONFIG_THREAD_INFO_IN_TASK 1535 WRITE_ONCE(p->cpu, cpu); 1536 #else 1537 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1538 #endif 1539 p->wake_cpu = cpu; 1540 #endif 1541 } 1542 1543 /* 1544 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1545 */ 1546 #ifdef CONFIG_SCHED_DEBUG 1547 # include <linux/static_key.h> 1548 # define const_debug __read_mostly 1549 #else 1550 # define const_debug const 1551 #endif 1552 1553 #define SCHED_FEAT(name, enabled) \ 1554 __SCHED_FEAT_##name , 1555 1556 enum { 1557 #include "features.h" 1558 __SCHED_FEAT_NR, 1559 }; 1560 1561 #undef SCHED_FEAT 1562 1563 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1564 1565 /* 1566 * To support run-time toggling of sched features, all the translation units 1567 * (but core.c) reference the sysctl_sched_features defined in core.c. 1568 */ 1569 extern const_debug unsigned int sysctl_sched_features; 1570 1571 #define SCHED_FEAT(name, enabled) \ 1572 static __always_inline bool static_branch_##name(struct static_key *key) \ 1573 { \ 1574 return static_key_##enabled(key); \ 1575 } 1576 1577 #include "features.h" 1578 #undef SCHED_FEAT 1579 1580 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1581 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1582 1583 #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1584 1585 /* 1586 * Each translation unit has its own copy of sysctl_sched_features to allow 1587 * constants propagation at compile time and compiler optimization based on 1588 * features default. 1589 */ 1590 #define SCHED_FEAT(name, enabled) \ 1591 (1UL << __SCHED_FEAT_##name) * enabled | 1592 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1593 #include "features.h" 1594 0; 1595 #undef SCHED_FEAT 1596 1597 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1598 1599 #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1600 1601 extern struct static_key_false sched_numa_balancing; 1602 extern struct static_key_false sched_schedstats; 1603 1604 static inline u64 global_rt_period(void) 1605 { 1606 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1607 } 1608 1609 static inline u64 global_rt_runtime(void) 1610 { 1611 if (sysctl_sched_rt_runtime < 0) 1612 return RUNTIME_INF; 1613 1614 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1615 } 1616 1617 static inline int task_current(struct rq *rq, struct task_struct *p) 1618 { 1619 return rq->curr == p; 1620 } 1621 1622 static inline int task_running(struct rq *rq, struct task_struct *p) 1623 { 1624 #ifdef CONFIG_SMP 1625 return p->on_cpu; 1626 #else 1627 return task_current(rq, p); 1628 #endif 1629 } 1630 1631 static inline int task_on_rq_queued(struct task_struct *p) 1632 { 1633 return p->on_rq == TASK_ON_RQ_QUEUED; 1634 } 1635 1636 static inline int task_on_rq_migrating(struct task_struct *p) 1637 { 1638 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1639 } 1640 1641 /* 1642 * wake flags 1643 */ 1644 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1645 #define WF_FORK 0x02 /* Child wakeup after fork */ 1646 #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1647 1648 /* 1649 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1650 * of tasks with abnormal "nice" values across CPUs the contribution that 1651 * each task makes to its run queue's load is weighted according to its 1652 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1653 * scaled version of the new time slice allocation that they receive on time 1654 * slice expiry etc. 1655 */ 1656 1657 #define WEIGHT_IDLEPRIO 3 1658 #define WMULT_IDLEPRIO 1431655765 1659 1660 extern const int sched_prio_to_weight[40]; 1661 extern const u32 sched_prio_to_wmult[40]; 1662 1663 /* 1664 * {de,en}queue flags: 1665 * 1666 * DEQUEUE_SLEEP - task is no longer runnable 1667 * ENQUEUE_WAKEUP - task just became runnable 1668 * 1669 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1670 * are in a known state which allows modification. Such pairs 1671 * should preserve as much state as possible. 1672 * 1673 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1674 * in the runqueue. 1675 * 1676 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1677 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1678 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1679 * 1680 */ 1681 1682 #define DEQUEUE_SLEEP 0x01 1683 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1684 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1685 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1686 1687 #define ENQUEUE_WAKEUP 0x01 1688 #define ENQUEUE_RESTORE 0x02 1689 #define ENQUEUE_MOVE 0x04 1690 #define ENQUEUE_NOCLOCK 0x08 1691 1692 #define ENQUEUE_HEAD 0x10 1693 #define ENQUEUE_REPLENISH 0x20 1694 #ifdef CONFIG_SMP 1695 #define ENQUEUE_MIGRATED 0x40 1696 #else 1697 #define ENQUEUE_MIGRATED 0x00 1698 #endif 1699 1700 #define RETRY_TASK ((void *)-1UL) 1701 1702 struct sched_class { 1703 const struct sched_class *next; 1704 1705 #ifdef CONFIG_UCLAMP_TASK 1706 int uclamp_enabled; 1707 #endif 1708 1709 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1710 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1711 void (*yield_task) (struct rq *rq); 1712 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1713 1714 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1715 1716 struct task_struct *(*pick_next_task)(struct rq *rq); 1717 1718 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1719 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 1720 1721 #ifdef CONFIG_SMP 1722 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1723 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1724 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1725 1726 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1727 1728 void (*set_cpus_allowed)(struct task_struct *p, 1729 const struct cpumask *newmask); 1730 1731 void (*rq_online)(struct rq *rq); 1732 void (*rq_offline)(struct rq *rq); 1733 #endif 1734 1735 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1736 void (*task_fork)(struct task_struct *p); 1737 void (*task_dead)(struct task_struct *p); 1738 1739 /* 1740 * The switched_from() call is allowed to drop rq->lock, therefore we 1741 * cannot assume the switched_from/switched_to pair is serliazed by 1742 * rq->lock. They are however serialized by p->pi_lock. 1743 */ 1744 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1745 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1746 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1747 int oldprio); 1748 1749 unsigned int (*get_rr_interval)(struct rq *rq, 1750 struct task_struct *task); 1751 1752 void (*update_curr)(struct rq *rq); 1753 1754 #define TASK_SET_GROUP 0 1755 #define TASK_MOVE_GROUP 1 1756 1757 #ifdef CONFIG_FAIR_GROUP_SCHED 1758 void (*task_change_group)(struct task_struct *p, int type); 1759 #endif 1760 }; 1761 1762 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1763 { 1764 WARN_ON_ONCE(rq->curr != prev); 1765 prev->sched_class->put_prev_task(rq, prev); 1766 } 1767 1768 static inline void set_next_task(struct rq *rq, struct task_struct *next) 1769 { 1770 WARN_ON_ONCE(rq->curr != next); 1771 next->sched_class->set_next_task(rq, next, false); 1772 } 1773 1774 #ifdef CONFIG_SMP 1775 #define sched_class_highest (&stop_sched_class) 1776 #else 1777 #define sched_class_highest (&dl_sched_class) 1778 #endif 1779 1780 #define for_class_range(class, _from, _to) \ 1781 for (class = (_from); class != (_to); class = class->next) 1782 1783 #define for_each_class(class) \ 1784 for_class_range(class, sched_class_highest, NULL) 1785 1786 extern const struct sched_class stop_sched_class; 1787 extern const struct sched_class dl_sched_class; 1788 extern const struct sched_class rt_sched_class; 1789 extern const struct sched_class fair_sched_class; 1790 extern const struct sched_class idle_sched_class; 1791 1792 static inline bool sched_stop_runnable(struct rq *rq) 1793 { 1794 return rq->stop && task_on_rq_queued(rq->stop); 1795 } 1796 1797 static inline bool sched_dl_runnable(struct rq *rq) 1798 { 1799 return rq->dl.dl_nr_running > 0; 1800 } 1801 1802 static inline bool sched_rt_runnable(struct rq *rq) 1803 { 1804 return rq->rt.rt_queued > 0; 1805 } 1806 1807 static inline bool sched_fair_runnable(struct rq *rq) 1808 { 1809 return rq->cfs.nr_running > 0; 1810 } 1811 1812 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1813 extern struct task_struct *pick_next_task_idle(struct rq *rq); 1814 1815 #ifdef CONFIG_SMP 1816 1817 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1818 1819 extern void trigger_load_balance(struct rq *rq); 1820 1821 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1822 1823 #endif 1824 1825 #ifdef CONFIG_CPU_IDLE 1826 static inline void idle_set_state(struct rq *rq, 1827 struct cpuidle_state *idle_state) 1828 { 1829 rq->idle_state = idle_state; 1830 } 1831 1832 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1833 { 1834 SCHED_WARN_ON(!rcu_read_lock_held()); 1835 1836 return rq->idle_state; 1837 } 1838 #else 1839 static inline void idle_set_state(struct rq *rq, 1840 struct cpuidle_state *idle_state) 1841 { 1842 } 1843 1844 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1845 { 1846 return NULL; 1847 } 1848 #endif 1849 1850 extern void schedule_idle(void); 1851 1852 extern void sysrq_sched_debug_show(void); 1853 extern void sched_init_granularity(void); 1854 extern void update_max_interval(void); 1855 1856 extern void init_sched_dl_class(void); 1857 extern void init_sched_rt_class(void); 1858 extern void init_sched_fair_class(void); 1859 1860 extern void reweight_task(struct task_struct *p, int prio); 1861 1862 extern void resched_curr(struct rq *rq); 1863 extern void resched_cpu(int cpu); 1864 1865 extern struct rt_bandwidth def_rt_bandwidth; 1866 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1867 1868 extern struct dl_bandwidth def_dl_bandwidth; 1869 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1870 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1871 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1872 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1873 1874 #define BW_SHIFT 20 1875 #define BW_UNIT (1 << BW_SHIFT) 1876 #define RATIO_SHIFT 8 1877 unsigned long to_ratio(u64 period, u64 runtime); 1878 1879 extern void init_entity_runnable_average(struct sched_entity *se); 1880 extern void post_init_entity_util_avg(struct task_struct *p); 1881 1882 #ifdef CONFIG_NO_HZ_FULL 1883 extern bool sched_can_stop_tick(struct rq *rq); 1884 extern int __init sched_tick_offload_init(void); 1885 1886 /* 1887 * Tick may be needed by tasks in the runqueue depending on their policy and 1888 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1889 * nohz mode if necessary. 1890 */ 1891 static inline void sched_update_tick_dependency(struct rq *rq) 1892 { 1893 int cpu; 1894 1895 if (!tick_nohz_full_enabled()) 1896 return; 1897 1898 cpu = cpu_of(rq); 1899 1900 if (!tick_nohz_full_cpu(cpu)) 1901 return; 1902 1903 if (sched_can_stop_tick(rq)) 1904 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1905 else 1906 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1907 } 1908 #else 1909 static inline int sched_tick_offload_init(void) { return 0; } 1910 static inline void sched_update_tick_dependency(struct rq *rq) { } 1911 #endif 1912 1913 static inline void add_nr_running(struct rq *rq, unsigned count) 1914 { 1915 unsigned prev_nr = rq->nr_running; 1916 1917 rq->nr_running = prev_nr + count; 1918 1919 #ifdef CONFIG_SMP 1920 if (prev_nr < 2 && rq->nr_running >= 2) { 1921 if (!READ_ONCE(rq->rd->overload)) 1922 WRITE_ONCE(rq->rd->overload, 1); 1923 } 1924 #endif 1925 1926 sched_update_tick_dependency(rq); 1927 } 1928 1929 static inline void sub_nr_running(struct rq *rq, unsigned count) 1930 { 1931 rq->nr_running -= count; 1932 /* Check if we still need preemption */ 1933 sched_update_tick_dependency(rq); 1934 } 1935 1936 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1937 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1938 1939 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1940 1941 extern const_debug unsigned int sysctl_sched_nr_migrate; 1942 extern const_debug unsigned int sysctl_sched_migration_cost; 1943 1944 #ifdef CONFIG_SCHED_HRTICK 1945 1946 /* 1947 * Use hrtick when: 1948 * - enabled by features 1949 * - hrtimer is actually high res 1950 */ 1951 static inline int hrtick_enabled(struct rq *rq) 1952 { 1953 if (!sched_feat(HRTICK)) 1954 return 0; 1955 if (!cpu_active(cpu_of(rq))) 1956 return 0; 1957 return hrtimer_is_hres_active(&rq->hrtick_timer); 1958 } 1959 1960 void hrtick_start(struct rq *rq, u64 delay); 1961 1962 #else 1963 1964 static inline int hrtick_enabled(struct rq *rq) 1965 { 1966 return 0; 1967 } 1968 1969 #endif /* CONFIG_SCHED_HRTICK */ 1970 1971 #ifndef arch_scale_freq_capacity 1972 static __always_inline 1973 unsigned long arch_scale_freq_capacity(int cpu) 1974 { 1975 return SCHED_CAPACITY_SCALE; 1976 } 1977 #endif 1978 1979 #ifdef CONFIG_SMP 1980 #ifdef CONFIG_PREEMPTION 1981 1982 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1983 1984 /* 1985 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1986 * way at the expense of forcing extra atomic operations in all 1987 * invocations. This assures that the double_lock is acquired using the 1988 * same underlying policy as the spinlock_t on this architecture, which 1989 * reduces latency compared to the unfair variant below. However, it 1990 * also adds more overhead and therefore may reduce throughput. 1991 */ 1992 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1993 __releases(this_rq->lock) 1994 __acquires(busiest->lock) 1995 __acquires(this_rq->lock) 1996 { 1997 raw_spin_unlock(&this_rq->lock); 1998 double_rq_lock(this_rq, busiest); 1999 2000 return 1; 2001 } 2002 2003 #else 2004 /* 2005 * Unfair double_lock_balance: Optimizes throughput at the expense of 2006 * latency by eliminating extra atomic operations when the locks are 2007 * already in proper order on entry. This favors lower CPU-ids and will 2008 * grant the double lock to lower CPUs over higher ids under contention, 2009 * regardless of entry order into the function. 2010 */ 2011 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2012 __releases(this_rq->lock) 2013 __acquires(busiest->lock) 2014 __acquires(this_rq->lock) 2015 { 2016 int ret = 0; 2017 2018 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2019 if (busiest < this_rq) { 2020 raw_spin_unlock(&this_rq->lock); 2021 raw_spin_lock(&busiest->lock); 2022 raw_spin_lock_nested(&this_rq->lock, 2023 SINGLE_DEPTH_NESTING); 2024 ret = 1; 2025 } else 2026 raw_spin_lock_nested(&busiest->lock, 2027 SINGLE_DEPTH_NESTING); 2028 } 2029 return ret; 2030 } 2031 2032 #endif /* CONFIG_PREEMPTION */ 2033 2034 /* 2035 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2036 */ 2037 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2038 { 2039 if (unlikely(!irqs_disabled())) { 2040 /* printk() doesn't work well under rq->lock */ 2041 raw_spin_unlock(&this_rq->lock); 2042 BUG_ON(1); 2043 } 2044 2045 return _double_lock_balance(this_rq, busiest); 2046 } 2047 2048 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2049 __releases(busiest->lock) 2050 { 2051 raw_spin_unlock(&busiest->lock); 2052 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2053 } 2054 2055 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2056 { 2057 if (l1 > l2) 2058 swap(l1, l2); 2059 2060 spin_lock(l1); 2061 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2062 } 2063 2064 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2065 { 2066 if (l1 > l2) 2067 swap(l1, l2); 2068 2069 spin_lock_irq(l1); 2070 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2071 } 2072 2073 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2074 { 2075 if (l1 > l2) 2076 swap(l1, l2); 2077 2078 raw_spin_lock(l1); 2079 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2080 } 2081 2082 /* 2083 * double_rq_lock - safely lock two runqueues 2084 * 2085 * Note this does not disable interrupts like task_rq_lock, 2086 * you need to do so manually before calling. 2087 */ 2088 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2089 __acquires(rq1->lock) 2090 __acquires(rq2->lock) 2091 { 2092 BUG_ON(!irqs_disabled()); 2093 if (rq1 == rq2) { 2094 raw_spin_lock(&rq1->lock); 2095 __acquire(rq2->lock); /* Fake it out ;) */ 2096 } else { 2097 if (rq1 < rq2) { 2098 raw_spin_lock(&rq1->lock); 2099 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2100 } else { 2101 raw_spin_lock(&rq2->lock); 2102 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2103 } 2104 } 2105 } 2106 2107 /* 2108 * double_rq_unlock - safely unlock two runqueues 2109 * 2110 * Note this does not restore interrupts like task_rq_unlock, 2111 * you need to do so manually after calling. 2112 */ 2113 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2114 __releases(rq1->lock) 2115 __releases(rq2->lock) 2116 { 2117 raw_spin_unlock(&rq1->lock); 2118 if (rq1 != rq2) 2119 raw_spin_unlock(&rq2->lock); 2120 else 2121 __release(rq2->lock); 2122 } 2123 2124 extern void set_rq_online (struct rq *rq); 2125 extern void set_rq_offline(struct rq *rq); 2126 extern bool sched_smp_initialized; 2127 2128 #else /* CONFIG_SMP */ 2129 2130 /* 2131 * double_rq_lock - safely lock two runqueues 2132 * 2133 * Note this does not disable interrupts like task_rq_lock, 2134 * you need to do so manually before calling. 2135 */ 2136 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2137 __acquires(rq1->lock) 2138 __acquires(rq2->lock) 2139 { 2140 BUG_ON(!irqs_disabled()); 2141 BUG_ON(rq1 != rq2); 2142 raw_spin_lock(&rq1->lock); 2143 __acquire(rq2->lock); /* Fake it out ;) */ 2144 } 2145 2146 /* 2147 * double_rq_unlock - safely unlock two runqueues 2148 * 2149 * Note this does not restore interrupts like task_rq_unlock, 2150 * you need to do so manually after calling. 2151 */ 2152 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2153 __releases(rq1->lock) 2154 __releases(rq2->lock) 2155 { 2156 BUG_ON(rq1 != rq2); 2157 raw_spin_unlock(&rq1->lock); 2158 __release(rq2->lock); 2159 } 2160 2161 #endif 2162 2163 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2164 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2165 2166 #ifdef CONFIG_SCHED_DEBUG 2167 extern bool sched_debug_enabled; 2168 2169 extern void print_cfs_stats(struct seq_file *m, int cpu); 2170 extern void print_rt_stats(struct seq_file *m, int cpu); 2171 extern void print_dl_stats(struct seq_file *m, int cpu); 2172 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2173 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2174 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2175 #ifdef CONFIG_NUMA_BALANCING 2176 extern void 2177 show_numa_stats(struct task_struct *p, struct seq_file *m); 2178 extern void 2179 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2180 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2181 #endif /* CONFIG_NUMA_BALANCING */ 2182 #endif /* CONFIG_SCHED_DEBUG */ 2183 2184 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2185 extern void init_rt_rq(struct rt_rq *rt_rq); 2186 extern void init_dl_rq(struct dl_rq *dl_rq); 2187 2188 extern void cfs_bandwidth_usage_inc(void); 2189 extern void cfs_bandwidth_usage_dec(void); 2190 2191 #ifdef CONFIG_NO_HZ_COMMON 2192 #define NOHZ_BALANCE_KICK_BIT 0 2193 #define NOHZ_STATS_KICK_BIT 1 2194 2195 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2196 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2197 2198 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2199 2200 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2201 2202 extern void nohz_balance_exit_idle(struct rq *rq); 2203 #else 2204 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2205 #endif 2206 2207 2208 #ifdef CONFIG_SMP 2209 static inline 2210 void __dl_update(struct dl_bw *dl_b, s64 bw) 2211 { 2212 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2213 int i; 2214 2215 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2216 "sched RCU must be held"); 2217 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2218 struct rq *rq = cpu_rq(i); 2219 2220 rq->dl.extra_bw += bw; 2221 } 2222 } 2223 #else 2224 static inline 2225 void __dl_update(struct dl_bw *dl_b, s64 bw) 2226 { 2227 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2228 2229 dl->extra_bw += bw; 2230 } 2231 #endif 2232 2233 2234 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2235 struct irqtime { 2236 u64 total; 2237 u64 tick_delta; 2238 u64 irq_start_time; 2239 struct u64_stats_sync sync; 2240 }; 2241 2242 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2243 2244 /* 2245 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2246 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2247 * and never move forward. 2248 */ 2249 static inline u64 irq_time_read(int cpu) 2250 { 2251 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2252 unsigned int seq; 2253 u64 total; 2254 2255 do { 2256 seq = __u64_stats_fetch_begin(&irqtime->sync); 2257 total = irqtime->total; 2258 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2259 2260 return total; 2261 } 2262 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2263 2264 #ifdef CONFIG_CPU_FREQ 2265 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2266 2267 /** 2268 * cpufreq_update_util - Take a note about CPU utilization changes. 2269 * @rq: Runqueue to carry out the update for. 2270 * @flags: Update reason flags. 2271 * 2272 * This function is called by the scheduler on the CPU whose utilization is 2273 * being updated. 2274 * 2275 * It can only be called from RCU-sched read-side critical sections. 2276 * 2277 * The way cpufreq is currently arranged requires it to evaluate the CPU 2278 * performance state (frequency/voltage) on a regular basis to prevent it from 2279 * being stuck in a completely inadequate performance level for too long. 2280 * That is not guaranteed to happen if the updates are only triggered from CFS 2281 * and DL, though, because they may not be coming in if only RT tasks are 2282 * active all the time (or there are RT tasks only). 2283 * 2284 * As a workaround for that issue, this function is called periodically by the 2285 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2286 * but that really is a band-aid. Going forward it should be replaced with 2287 * solutions targeted more specifically at RT tasks. 2288 */ 2289 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2290 { 2291 struct update_util_data *data; 2292 2293 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2294 cpu_of(rq))); 2295 if (data) 2296 data->func(data, rq_clock(rq), flags); 2297 } 2298 #else 2299 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2300 #endif /* CONFIG_CPU_FREQ */ 2301 2302 #ifdef CONFIG_UCLAMP_TASK 2303 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2304 2305 static __always_inline 2306 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2307 struct task_struct *p) 2308 { 2309 unsigned long min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 2310 unsigned long max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2311 2312 if (p) { 2313 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 2314 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 2315 } 2316 2317 /* 2318 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2319 * RUNNABLE tasks with _different_ clamps, we can end up with an 2320 * inversion. Fix it now when the clamps are applied. 2321 */ 2322 if (unlikely(min_util >= max_util)) 2323 return min_util; 2324 2325 return clamp(util, min_util, max_util); 2326 } 2327 #else /* CONFIG_UCLAMP_TASK */ 2328 static inline 2329 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2330 struct task_struct *p) 2331 { 2332 return util; 2333 } 2334 #endif /* CONFIG_UCLAMP_TASK */ 2335 2336 #ifdef arch_scale_freq_capacity 2337 # ifndef arch_scale_freq_invariant 2338 # define arch_scale_freq_invariant() true 2339 # endif 2340 #else 2341 # define arch_scale_freq_invariant() false 2342 #endif 2343 2344 #ifdef CONFIG_SMP 2345 static inline unsigned long capacity_orig_of(int cpu) 2346 { 2347 return cpu_rq(cpu)->cpu_capacity_orig; 2348 } 2349 #endif 2350 2351 /** 2352 * enum schedutil_type - CPU utilization type 2353 * @FREQUENCY_UTIL: Utilization used to select frequency 2354 * @ENERGY_UTIL: Utilization used during energy calculation 2355 * 2356 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2357 * need to be aggregated differently depending on the usage made of them. This 2358 * enum is used within schedutil_freq_util() to differentiate the types of 2359 * utilization expected by the callers, and adjust the aggregation accordingly. 2360 */ 2361 enum schedutil_type { 2362 FREQUENCY_UTIL, 2363 ENERGY_UTIL, 2364 }; 2365 2366 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2367 2368 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2369 unsigned long max, enum schedutil_type type, 2370 struct task_struct *p); 2371 2372 static inline unsigned long cpu_bw_dl(struct rq *rq) 2373 { 2374 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2375 } 2376 2377 static inline unsigned long cpu_util_dl(struct rq *rq) 2378 { 2379 return READ_ONCE(rq->avg_dl.util_avg); 2380 } 2381 2382 static inline unsigned long cpu_util_cfs(struct rq *rq) 2383 { 2384 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2385 2386 if (sched_feat(UTIL_EST)) { 2387 util = max_t(unsigned long, util, 2388 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2389 } 2390 2391 return util; 2392 } 2393 2394 static inline unsigned long cpu_util_rt(struct rq *rq) 2395 { 2396 return READ_ONCE(rq->avg_rt.util_avg); 2397 } 2398 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2399 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2400 unsigned long max, enum schedutil_type type, 2401 struct task_struct *p) 2402 { 2403 return 0; 2404 } 2405 #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2406 2407 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2408 static inline unsigned long cpu_util_irq(struct rq *rq) 2409 { 2410 return rq->avg_irq.util_avg; 2411 } 2412 2413 static inline 2414 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2415 { 2416 util *= (max - irq); 2417 util /= max; 2418 2419 return util; 2420 2421 } 2422 #else 2423 static inline unsigned long cpu_util_irq(struct rq *rq) 2424 { 2425 return 0; 2426 } 2427 2428 static inline 2429 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2430 { 2431 return util; 2432 } 2433 #endif 2434 2435 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2436 2437 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2438 2439 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2440 2441 static inline bool sched_energy_enabled(void) 2442 { 2443 return static_branch_unlikely(&sched_energy_present); 2444 } 2445 2446 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2447 2448 #define perf_domain_span(pd) NULL 2449 static inline bool sched_energy_enabled(void) { return false; } 2450 2451 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2452 2453 #ifdef CONFIG_MEMBARRIER 2454 /* 2455 * The scheduler provides memory barriers required by membarrier between: 2456 * - prior user-space memory accesses and store to rq->membarrier_state, 2457 * - store to rq->membarrier_state and following user-space memory accesses. 2458 * In the same way it provides those guarantees around store to rq->curr. 2459 */ 2460 static inline void membarrier_switch_mm(struct rq *rq, 2461 struct mm_struct *prev_mm, 2462 struct mm_struct *next_mm) 2463 { 2464 int membarrier_state; 2465 2466 if (prev_mm == next_mm) 2467 return; 2468 2469 membarrier_state = atomic_read(&next_mm->membarrier_state); 2470 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 2471 return; 2472 2473 WRITE_ONCE(rq->membarrier_state, membarrier_state); 2474 } 2475 #else 2476 static inline void membarrier_switch_mm(struct rq *rq, 2477 struct mm_struct *prev_mm, 2478 struct mm_struct *next_mm) 2479 { 2480 } 2481 #endif 2482