1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/bitops.h> 40 #include <linux/blkdev.h> 41 #include <linux/compat.h> 42 #include <linux/context_tracking.h> 43 #include <linux/cpufreq.h> 44 #include <linux/cpuidle.h> 45 #include <linux/cpuset.h> 46 #include <linux/ctype.h> 47 #include <linux/debugfs.h> 48 #include <linux/delayacct.h> 49 #include <linux/energy_model.h> 50 #include <linux/init_task.h> 51 #include <linux/kprobes.h> 52 #include <linux/kthread.h> 53 #include <linux/membarrier.h> 54 #include <linux/migrate.h> 55 #include <linux/mmu_context.h> 56 #include <linux/nmi.h> 57 #include <linux/proc_fs.h> 58 #include <linux/prefetch.h> 59 #include <linux/profile.h> 60 #include <linux/psi.h> 61 #include <linux/ratelimit.h> 62 #include <linux/rcupdate_wait.h> 63 #include <linux/security.h> 64 #include <linux/stop_machine.h> 65 #include <linux/suspend.h> 66 #include <linux/swait.h> 67 #include <linux/syscalls.h> 68 #include <linux/task_work.h> 69 #include <linux/tsacct_kern.h> 70 71 #include <asm/tlb.h> 72 73 #ifdef CONFIG_PARAVIRT 74 # include <asm/paravirt.h> 75 #endif 76 77 #include "cpupri.h" 78 #include "cpudeadline.h" 79 80 #include <trace/events/sched.h> 81 82 #ifdef CONFIG_SCHED_DEBUG 83 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 84 #else 85 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 86 #endif 87 88 struct rq; 89 struct cpuidle_state; 90 91 /* task_struct::on_rq states: */ 92 #define TASK_ON_RQ_QUEUED 1 93 #define TASK_ON_RQ_MIGRATING 2 94 95 extern __read_mostly int scheduler_running; 96 97 extern unsigned long calc_load_update; 98 extern atomic_long_t calc_load_tasks; 99 100 extern void calc_global_load_tick(struct rq *this_rq); 101 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 102 103 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 104 /* 105 * Helpers for converting nanosecond timing to jiffy resolution 106 */ 107 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 108 109 /* 110 * Increase resolution of nice-level calculations for 64-bit architectures. 111 * The extra resolution improves shares distribution and load balancing of 112 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 113 * hierarchies, especially on larger systems. This is not a user-visible change 114 * and does not change the user-interface for setting shares/weights. 115 * 116 * We increase resolution only if we have enough bits to allow this increased 117 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 118 * are pretty high and the returns do not justify the increased costs. 119 * 120 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 121 * increase coverage and consistency always enable it on 64-bit platforms. 122 */ 123 #ifdef CONFIG_64BIT 124 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 125 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 126 # define scale_load_down(w) \ 127 ({ \ 128 unsigned long __w = (w); \ 129 if (__w) \ 130 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 131 __w; \ 132 }) 133 #else 134 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 135 # define scale_load(w) (w) 136 # define scale_load_down(w) (w) 137 #endif 138 139 /* 140 * Task weight (visible to users) and its load (invisible to users) have 141 * independent resolution, but they should be well calibrated. We use 142 * scale_load() and scale_load_down(w) to convert between them. The 143 * following must be true: 144 * 145 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 146 * 147 */ 148 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 149 150 /* 151 * Single value that decides SCHED_DEADLINE internal math precision. 152 * 10 -> just above 1us 153 * 9 -> just above 0.5us 154 */ 155 #define DL_SCALE 10 156 157 /* 158 * Single value that denotes runtime == period, ie unlimited time. 159 */ 160 #define RUNTIME_INF ((u64)~0ULL) 161 162 static inline int idle_policy(int policy) 163 { 164 return policy == SCHED_IDLE; 165 } 166 static inline int fair_policy(int policy) 167 { 168 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 169 } 170 171 static inline int rt_policy(int policy) 172 { 173 return policy == SCHED_FIFO || policy == SCHED_RR; 174 } 175 176 static inline int dl_policy(int policy) 177 { 178 return policy == SCHED_DEADLINE; 179 } 180 static inline bool valid_policy(int policy) 181 { 182 return idle_policy(policy) || fair_policy(policy) || 183 rt_policy(policy) || dl_policy(policy); 184 } 185 186 static inline int task_has_idle_policy(struct task_struct *p) 187 { 188 return idle_policy(p->policy); 189 } 190 191 static inline int task_has_rt_policy(struct task_struct *p) 192 { 193 return rt_policy(p->policy); 194 } 195 196 static inline int task_has_dl_policy(struct task_struct *p) 197 { 198 return dl_policy(p->policy); 199 } 200 201 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 202 203 static inline void update_avg(u64 *avg, u64 sample) 204 { 205 s64 diff = sample - *avg; 206 *avg += diff / 8; 207 } 208 209 /* 210 * Shifting a value by an exponent greater *or equal* to the size of said value 211 * is UB; cap at size-1. 212 */ 213 #define shr_bound(val, shift) \ 214 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 215 216 /* 217 * !! For sched_setattr_nocheck() (kernel) only !! 218 * 219 * This is actually gross. :( 220 * 221 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 222 * tasks, but still be able to sleep. We need this on platforms that cannot 223 * atomically change clock frequency. Remove once fast switching will be 224 * available on such platforms. 225 * 226 * SUGOV stands for SchedUtil GOVernor. 227 */ 228 #define SCHED_FLAG_SUGOV 0x10000000 229 230 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 231 { 232 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 233 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 234 #else 235 return false; 236 #endif 237 } 238 239 /* 240 * Tells if entity @a should preempt entity @b. 241 */ 242 static inline bool 243 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 244 { 245 return dl_entity_is_special(a) || 246 dl_time_before(a->deadline, b->deadline); 247 } 248 249 /* 250 * This is the priority-queue data structure of the RT scheduling class: 251 */ 252 struct rt_prio_array { 253 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 254 struct list_head queue[MAX_RT_PRIO]; 255 }; 256 257 struct rt_bandwidth { 258 /* nests inside the rq lock: */ 259 raw_spinlock_t rt_runtime_lock; 260 ktime_t rt_period; 261 u64 rt_runtime; 262 struct hrtimer rt_period_timer; 263 unsigned int rt_period_active; 264 }; 265 266 void __dl_clear_params(struct task_struct *p); 267 268 struct dl_bandwidth { 269 raw_spinlock_t dl_runtime_lock; 270 u64 dl_runtime; 271 u64 dl_period; 272 }; 273 274 static inline int dl_bandwidth_enabled(void) 275 { 276 return sysctl_sched_rt_runtime >= 0; 277 } 278 279 /* 280 * To keep the bandwidth of -deadline tasks under control 281 * we need some place where: 282 * - store the maximum -deadline bandwidth of each cpu; 283 * - cache the fraction of bandwidth that is currently allocated in 284 * each root domain; 285 * 286 * This is all done in the data structure below. It is similar to the 287 * one used for RT-throttling (rt_bandwidth), with the main difference 288 * that, since here we are only interested in admission control, we 289 * do not decrease any runtime while the group "executes", neither we 290 * need a timer to replenish it. 291 * 292 * With respect to SMP, bandwidth is given on a per root domain basis, 293 * meaning that: 294 * - bw (< 100%) is the deadline bandwidth of each CPU; 295 * - total_bw is the currently allocated bandwidth in each root domain; 296 */ 297 struct dl_bw { 298 raw_spinlock_t lock; 299 u64 bw; 300 u64 total_bw; 301 }; 302 303 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 304 305 static inline 306 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 307 { 308 dl_b->total_bw -= tsk_bw; 309 __dl_update(dl_b, (s32)tsk_bw / cpus); 310 } 311 312 static inline 313 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 314 { 315 dl_b->total_bw += tsk_bw; 316 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 317 } 318 319 static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, 320 u64 old_bw, u64 new_bw) 321 { 322 return dl_b->bw != -1 && 323 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 324 } 325 326 /* 327 * Verify the fitness of task @p to run on @cpu taking into account the 328 * CPU original capacity and the runtime/deadline ratio of the task. 329 * 330 * The function will return true if the CPU original capacity of the 331 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 332 * task and false otherwise. 333 */ 334 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 335 { 336 unsigned long cap = arch_scale_cpu_capacity(cpu); 337 338 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; 339 } 340 341 extern void init_dl_bw(struct dl_bw *dl_b); 342 extern int sched_dl_global_validate(void); 343 extern void sched_dl_do_global(void); 344 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 345 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 346 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 347 extern bool __checkparam_dl(const struct sched_attr *attr); 348 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 349 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 350 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 351 extern bool dl_cpu_busy(unsigned int cpu); 352 353 #ifdef CONFIG_CGROUP_SCHED 354 355 #include <linux/cgroup.h> 356 #include <linux/psi.h> 357 358 struct cfs_rq; 359 struct rt_rq; 360 361 extern struct list_head task_groups; 362 363 struct cfs_bandwidth { 364 #ifdef CONFIG_CFS_BANDWIDTH 365 raw_spinlock_t lock; 366 ktime_t period; 367 u64 quota; 368 u64 runtime; 369 u64 burst; 370 s64 hierarchical_quota; 371 372 u8 idle; 373 u8 period_active; 374 u8 slack_started; 375 struct hrtimer period_timer; 376 struct hrtimer slack_timer; 377 struct list_head throttled_cfs_rq; 378 379 /* Statistics: */ 380 int nr_periods; 381 int nr_throttled; 382 u64 throttled_time; 383 #endif 384 }; 385 386 /* Task group related information */ 387 struct task_group { 388 struct cgroup_subsys_state css; 389 390 #ifdef CONFIG_FAIR_GROUP_SCHED 391 /* schedulable entities of this group on each CPU */ 392 struct sched_entity **se; 393 /* runqueue "owned" by this group on each CPU */ 394 struct cfs_rq **cfs_rq; 395 unsigned long shares; 396 397 #ifdef CONFIG_SMP 398 /* 399 * load_avg can be heavily contended at clock tick time, so put 400 * it in its own cacheline separated from the fields above which 401 * will also be accessed at each tick. 402 */ 403 atomic_long_t load_avg ____cacheline_aligned; 404 #endif 405 #endif 406 407 #ifdef CONFIG_RT_GROUP_SCHED 408 struct sched_rt_entity **rt_se; 409 struct rt_rq **rt_rq; 410 411 struct rt_bandwidth rt_bandwidth; 412 #endif 413 414 struct rcu_head rcu; 415 struct list_head list; 416 417 struct task_group *parent; 418 struct list_head siblings; 419 struct list_head children; 420 421 #ifdef CONFIG_SCHED_AUTOGROUP 422 struct autogroup *autogroup; 423 #endif 424 425 struct cfs_bandwidth cfs_bandwidth; 426 427 #ifdef CONFIG_UCLAMP_TASK_GROUP 428 /* The two decimal precision [%] value requested from user-space */ 429 unsigned int uclamp_pct[UCLAMP_CNT]; 430 /* Clamp values requested for a task group */ 431 struct uclamp_se uclamp_req[UCLAMP_CNT]; 432 /* Effective clamp values used for a task group */ 433 struct uclamp_se uclamp[UCLAMP_CNT]; 434 #endif 435 436 }; 437 438 #ifdef CONFIG_FAIR_GROUP_SCHED 439 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 440 441 /* 442 * A weight of 0 or 1 can cause arithmetics problems. 443 * A weight of a cfs_rq is the sum of weights of which entities 444 * are queued on this cfs_rq, so a weight of a entity should not be 445 * too large, so as the shares value of a task group. 446 * (The default weight is 1024 - so there's no practical 447 * limitation from this.) 448 */ 449 #define MIN_SHARES (1UL << 1) 450 #define MAX_SHARES (1UL << 18) 451 #endif 452 453 typedef int (*tg_visitor)(struct task_group *, void *); 454 455 extern int walk_tg_tree_from(struct task_group *from, 456 tg_visitor down, tg_visitor up, void *data); 457 458 /* 459 * Iterate the full tree, calling @down when first entering a node and @up when 460 * leaving it for the final time. 461 * 462 * Caller must hold rcu_lock or sufficient equivalent. 463 */ 464 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 465 { 466 return walk_tg_tree_from(&root_task_group, down, up, data); 467 } 468 469 extern int tg_nop(struct task_group *tg, void *data); 470 471 extern void free_fair_sched_group(struct task_group *tg); 472 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 473 extern void online_fair_sched_group(struct task_group *tg); 474 extern void unregister_fair_sched_group(struct task_group *tg); 475 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 476 struct sched_entity *se, int cpu, 477 struct sched_entity *parent); 478 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 479 480 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 481 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 482 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 483 484 extern void free_rt_sched_group(struct task_group *tg); 485 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 486 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 487 struct sched_rt_entity *rt_se, int cpu, 488 struct sched_rt_entity *parent); 489 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 490 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 491 extern long sched_group_rt_runtime(struct task_group *tg); 492 extern long sched_group_rt_period(struct task_group *tg); 493 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 494 495 extern struct task_group *sched_create_group(struct task_group *parent); 496 extern void sched_online_group(struct task_group *tg, 497 struct task_group *parent); 498 extern void sched_destroy_group(struct task_group *tg); 499 extern void sched_offline_group(struct task_group *tg); 500 501 extern void sched_move_task(struct task_struct *tsk); 502 503 #ifdef CONFIG_FAIR_GROUP_SCHED 504 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 505 506 #ifdef CONFIG_SMP 507 extern void set_task_rq_fair(struct sched_entity *se, 508 struct cfs_rq *prev, struct cfs_rq *next); 509 #else /* !CONFIG_SMP */ 510 static inline void set_task_rq_fair(struct sched_entity *se, 511 struct cfs_rq *prev, struct cfs_rq *next) { } 512 #endif /* CONFIG_SMP */ 513 #endif /* CONFIG_FAIR_GROUP_SCHED */ 514 515 #else /* CONFIG_CGROUP_SCHED */ 516 517 struct cfs_bandwidth { }; 518 519 #endif /* CONFIG_CGROUP_SCHED */ 520 521 /* CFS-related fields in a runqueue */ 522 struct cfs_rq { 523 struct load_weight load; 524 unsigned int nr_running; 525 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 526 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 527 528 u64 exec_clock; 529 u64 min_vruntime; 530 #ifdef CONFIG_SCHED_CORE 531 unsigned int forceidle_seq; 532 u64 min_vruntime_fi; 533 #endif 534 535 #ifndef CONFIG_64BIT 536 u64 min_vruntime_copy; 537 #endif 538 539 struct rb_root_cached tasks_timeline; 540 541 /* 542 * 'curr' points to currently running entity on this cfs_rq. 543 * It is set to NULL otherwise (i.e when none are currently running). 544 */ 545 struct sched_entity *curr; 546 struct sched_entity *next; 547 struct sched_entity *last; 548 struct sched_entity *skip; 549 550 #ifdef CONFIG_SCHED_DEBUG 551 unsigned int nr_spread_over; 552 #endif 553 554 #ifdef CONFIG_SMP 555 /* 556 * CFS load tracking 557 */ 558 struct sched_avg avg; 559 #ifndef CONFIG_64BIT 560 u64 load_last_update_time_copy; 561 #endif 562 struct { 563 raw_spinlock_t lock ____cacheline_aligned; 564 int nr; 565 unsigned long load_avg; 566 unsigned long util_avg; 567 unsigned long runnable_avg; 568 } removed; 569 570 #ifdef CONFIG_FAIR_GROUP_SCHED 571 unsigned long tg_load_avg_contrib; 572 long propagate; 573 long prop_runnable_sum; 574 575 /* 576 * h_load = weight * f(tg) 577 * 578 * Where f(tg) is the recursive weight fraction assigned to 579 * this group. 580 */ 581 unsigned long h_load; 582 u64 last_h_load_update; 583 struct sched_entity *h_load_next; 584 #endif /* CONFIG_FAIR_GROUP_SCHED */ 585 #endif /* CONFIG_SMP */ 586 587 #ifdef CONFIG_FAIR_GROUP_SCHED 588 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 589 590 /* 591 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 592 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 593 * (like users, containers etc.) 594 * 595 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 596 * This list is used during load balance. 597 */ 598 int on_list; 599 struct list_head leaf_cfs_rq_list; 600 struct task_group *tg; /* group that "owns" this runqueue */ 601 602 #ifdef CONFIG_CFS_BANDWIDTH 603 int runtime_enabled; 604 s64 runtime_remaining; 605 606 u64 throttled_clock; 607 u64 throttled_clock_task; 608 u64 throttled_clock_task_time; 609 int throttled; 610 int throttle_count; 611 struct list_head throttled_list; 612 #endif /* CONFIG_CFS_BANDWIDTH */ 613 #endif /* CONFIG_FAIR_GROUP_SCHED */ 614 }; 615 616 static inline int rt_bandwidth_enabled(void) 617 { 618 return sysctl_sched_rt_runtime >= 0; 619 } 620 621 /* RT IPI pull logic requires IRQ_WORK */ 622 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 623 # define HAVE_RT_PUSH_IPI 624 #endif 625 626 /* Real-Time classes' related field in a runqueue: */ 627 struct rt_rq { 628 struct rt_prio_array active; 629 unsigned int rt_nr_running; 630 unsigned int rr_nr_running; 631 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 632 struct { 633 int curr; /* highest queued rt task prio */ 634 #ifdef CONFIG_SMP 635 int next; /* next highest */ 636 #endif 637 } highest_prio; 638 #endif 639 #ifdef CONFIG_SMP 640 unsigned int rt_nr_migratory; 641 unsigned int rt_nr_total; 642 int overloaded; 643 struct plist_head pushable_tasks; 644 645 #endif /* CONFIG_SMP */ 646 int rt_queued; 647 648 int rt_throttled; 649 u64 rt_time; 650 u64 rt_runtime; 651 /* Nests inside the rq lock: */ 652 raw_spinlock_t rt_runtime_lock; 653 654 #ifdef CONFIG_RT_GROUP_SCHED 655 unsigned int rt_nr_boosted; 656 657 struct rq *rq; 658 struct task_group *tg; 659 #endif 660 }; 661 662 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 663 { 664 return rt_rq->rt_queued && rt_rq->rt_nr_running; 665 } 666 667 /* Deadline class' related fields in a runqueue */ 668 struct dl_rq { 669 /* runqueue is an rbtree, ordered by deadline */ 670 struct rb_root_cached root; 671 672 unsigned int dl_nr_running; 673 674 #ifdef CONFIG_SMP 675 /* 676 * Deadline values of the currently executing and the 677 * earliest ready task on this rq. Caching these facilitates 678 * the decision whether or not a ready but not running task 679 * should migrate somewhere else. 680 */ 681 struct { 682 u64 curr; 683 u64 next; 684 } earliest_dl; 685 686 unsigned int dl_nr_migratory; 687 int overloaded; 688 689 /* 690 * Tasks on this rq that can be pushed away. They are kept in 691 * an rb-tree, ordered by tasks' deadlines, with caching 692 * of the leftmost (earliest deadline) element. 693 */ 694 struct rb_root_cached pushable_dl_tasks_root; 695 #else 696 struct dl_bw dl_bw; 697 #endif 698 /* 699 * "Active utilization" for this runqueue: increased when a 700 * task wakes up (becomes TASK_RUNNING) and decreased when a 701 * task blocks 702 */ 703 u64 running_bw; 704 705 /* 706 * Utilization of the tasks "assigned" to this runqueue (including 707 * the tasks that are in runqueue and the tasks that executed on this 708 * CPU and blocked). Increased when a task moves to this runqueue, and 709 * decreased when the task moves away (migrates, changes scheduling 710 * policy, or terminates). 711 * This is needed to compute the "inactive utilization" for the 712 * runqueue (inactive utilization = this_bw - running_bw). 713 */ 714 u64 this_bw; 715 u64 extra_bw; 716 717 /* 718 * Inverse of the fraction of CPU utilization that can be reclaimed 719 * by the GRUB algorithm. 720 */ 721 u64 bw_ratio; 722 }; 723 724 #ifdef CONFIG_FAIR_GROUP_SCHED 725 /* An entity is a task if it doesn't "own" a runqueue */ 726 #define entity_is_task(se) (!se->my_q) 727 728 static inline void se_update_runnable(struct sched_entity *se) 729 { 730 if (!entity_is_task(se)) 731 se->runnable_weight = se->my_q->h_nr_running; 732 } 733 734 static inline long se_runnable(struct sched_entity *se) 735 { 736 if (entity_is_task(se)) 737 return !!se->on_rq; 738 else 739 return se->runnable_weight; 740 } 741 742 #else 743 #define entity_is_task(se) 1 744 745 static inline void se_update_runnable(struct sched_entity *se) {} 746 747 static inline long se_runnable(struct sched_entity *se) 748 { 749 return !!se->on_rq; 750 } 751 #endif 752 753 #ifdef CONFIG_SMP 754 /* 755 * XXX we want to get rid of these helpers and use the full load resolution. 756 */ 757 static inline long se_weight(struct sched_entity *se) 758 { 759 return scale_load_down(se->load.weight); 760 } 761 762 763 static inline bool sched_asym_prefer(int a, int b) 764 { 765 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 766 } 767 768 struct perf_domain { 769 struct em_perf_domain *em_pd; 770 struct perf_domain *next; 771 struct rcu_head rcu; 772 }; 773 774 /* Scheduling group status flags */ 775 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 776 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 777 778 /* 779 * We add the notion of a root-domain which will be used to define per-domain 780 * variables. Each exclusive cpuset essentially defines an island domain by 781 * fully partitioning the member CPUs from any other cpuset. Whenever a new 782 * exclusive cpuset is created, we also create and attach a new root-domain 783 * object. 784 * 785 */ 786 struct root_domain { 787 atomic_t refcount; 788 atomic_t rto_count; 789 struct rcu_head rcu; 790 cpumask_var_t span; 791 cpumask_var_t online; 792 793 /* 794 * Indicate pullable load on at least one CPU, e.g: 795 * - More than one runnable task 796 * - Running task is misfit 797 */ 798 int overload; 799 800 /* Indicate one or more cpus over-utilized (tipping point) */ 801 int overutilized; 802 803 /* 804 * The bit corresponding to a CPU gets set here if such CPU has more 805 * than one runnable -deadline task (as it is below for RT tasks). 806 */ 807 cpumask_var_t dlo_mask; 808 atomic_t dlo_count; 809 struct dl_bw dl_bw; 810 struct cpudl cpudl; 811 812 /* 813 * Indicate whether a root_domain's dl_bw has been checked or 814 * updated. It's monotonously increasing value. 815 * 816 * Also, some corner cases, like 'wrap around' is dangerous, but given 817 * that u64 is 'big enough'. So that shouldn't be a concern. 818 */ 819 u64 visit_gen; 820 821 #ifdef HAVE_RT_PUSH_IPI 822 /* 823 * For IPI pull requests, loop across the rto_mask. 824 */ 825 struct irq_work rto_push_work; 826 raw_spinlock_t rto_lock; 827 /* These are only updated and read within rto_lock */ 828 int rto_loop; 829 int rto_cpu; 830 /* These atomics are updated outside of a lock */ 831 atomic_t rto_loop_next; 832 atomic_t rto_loop_start; 833 #endif 834 /* 835 * The "RT overload" flag: it gets set if a CPU has more than 836 * one runnable RT task. 837 */ 838 cpumask_var_t rto_mask; 839 struct cpupri cpupri; 840 841 unsigned long max_cpu_capacity; 842 843 /* 844 * NULL-terminated list of performance domains intersecting with the 845 * CPUs of the rd. Protected by RCU. 846 */ 847 struct perf_domain __rcu *pd; 848 }; 849 850 extern void init_defrootdomain(void); 851 extern int sched_init_domains(const struct cpumask *cpu_map); 852 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 853 extern void sched_get_rd(struct root_domain *rd); 854 extern void sched_put_rd(struct root_domain *rd); 855 856 #ifdef HAVE_RT_PUSH_IPI 857 extern void rto_push_irq_work_func(struct irq_work *work); 858 #endif 859 #endif /* CONFIG_SMP */ 860 861 #ifdef CONFIG_UCLAMP_TASK 862 /* 863 * struct uclamp_bucket - Utilization clamp bucket 864 * @value: utilization clamp value for tasks on this clamp bucket 865 * @tasks: number of RUNNABLE tasks on this clamp bucket 866 * 867 * Keep track of how many tasks are RUNNABLE for a given utilization 868 * clamp value. 869 */ 870 struct uclamp_bucket { 871 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 872 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 873 }; 874 875 /* 876 * struct uclamp_rq - rq's utilization clamp 877 * @value: currently active clamp values for a rq 878 * @bucket: utilization clamp buckets affecting a rq 879 * 880 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 881 * A clamp value is affecting a rq when there is at least one task RUNNABLE 882 * (or actually running) with that value. 883 * 884 * There are up to UCLAMP_CNT possible different clamp values, currently there 885 * are only two: minimum utilization and maximum utilization. 886 * 887 * All utilization clamping values are MAX aggregated, since: 888 * - for util_min: we want to run the CPU at least at the max of the minimum 889 * utilization required by its currently RUNNABLE tasks. 890 * - for util_max: we want to allow the CPU to run up to the max of the 891 * maximum utilization allowed by its currently RUNNABLE tasks. 892 * 893 * Since on each system we expect only a limited number of different 894 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 895 * the metrics required to compute all the per-rq utilization clamp values. 896 */ 897 struct uclamp_rq { 898 unsigned int value; 899 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 900 }; 901 902 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 903 #endif /* CONFIG_UCLAMP_TASK */ 904 905 /* 906 * This is the main, per-CPU runqueue data structure. 907 * 908 * Locking rule: those places that want to lock multiple runqueues 909 * (such as the load balancing or the thread migration code), lock 910 * acquire operations must be ordered by ascending &runqueue. 911 */ 912 struct rq { 913 /* runqueue lock: */ 914 raw_spinlock_t __lock; 915 916 /* 917 * nr_running and cpu_load should be in the same cacheline because 918 * remote CPUs use both these fields when doing load calculation. 919 */ 920 unsigned int nr_running; 921 #ifdef CONFIG_NUMA_BALANCING 922 unsigned int nr_numa_running; 923 unsigned int nr_preferred_running; 924 unsigned int numa_migrate_on; 925 #endif 926 #ifdef CONFIG_NO_HZ_COMMON 927 #ifdef CONFIG_SMP 928 unsigned long last_blocked_load_update_tick; 929 unsigned int has_blocked_load; 930 call_single_data_t nohz_csd; 931 #endif /* CONFIG_SMP */ 932 unsigned int nohz_tick_stopped; 933 atomic_t nohz_flags; 934 #endif /* CONFIG_NO_HZ_COMMON */ 935 936 #ifdef CONFIG_SMP 937 unsigned int ttwu_pending; 938 #endif 939 u64 nr_switches; 940 941 #ifdef CONFIG_UCLAMP_TASK 942 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 943 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 944 unsigned int uclamp_flags; 945 #define UCLAMP_FLAG_IDLE 0x01 946 #endif 947 948 struct cfs_rq cfs; 949 struct rt_rq rt; 950 struct dl_rq dl; 951 952 #ifdef CONFIG_FAIR_GROUP_SCHED 953 /* list of leaf cfs_rq on this CPU: */ 954 struct list_head leaf_cfs_rq_list; 955 struct list_head *tmp_alone_branch; 956 #endif /* CONFIG_FAIR_GROUP_SCHED */ 957 958 /* 959 * This is part of a global counter where only the total sum 960 * over all CPUs matters. A task can increase this counter on 961 * one CPU and if it got migrated afterwards it may decrease 962 * it on another CPU. Always updated under the runqueue lock: 963 */ 964 unsigned int nr_uninterruptible; 965 966 struct task_struct __rcu *curr; 967 struct task_struct *idle; 968 struct task_struct *stop; 969 unsigned long next_balance; 970 struct mm_struct *prev_mm; 971 972 unsigned int clock_update_flags; 973 u64 clock; 974 /* Ensure that all clocks are in the same cache line */ 975 u64 clock_task ____cacheline_aligned; 976 u64 clock_pelt; 977 unsigned long lost_idle_time; 978 979 atomic_t nr_iowait; 980 981 #ifdef CONFIG_SCHED_DEBUG 982 u64 last_seen_need_resched_ns; 983 int ticks_without_resched; 984 #endif 985 986 #ifdef CONFIG_MEMBARRIER 987 int membarrier_state; 988 #endif 989 990 #ifdef CONFIG_SMP 991 struct root_domain *rd; 992 struct sched_domain __rcu *sd; 993 994 unsigned long cpu_capacity; 995 unsigned long cpu_capacity_orig; 996 997 struct callback_head *balance_callback; 998 999 unsigned char nohz_idle_balance; 1000 unsigned char idle_balance; 1001 1002 unsigned long misfit_task_load; 1003 1004 /* For active balancing */ 1005 int active_balance; 1006 int push_cpu; 1007 struct cpu_stop_work active_balance_work; 1008 1009 /* CPU of this runqueue: */ 1010 int cpu; 1011 int online; 1012 1013 struct list_head cfs_tasks; 1014 1015 struct sched_avg avg_rt; 1016 struct sched_avg avg_dl; 1017 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1018 struct sched_avg avg_irq; 1019 #endif 1020 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 1021 struct sched_avg avg_thermal; 1022 #endif 1023 u64 idle_stamp; 1024 u64 avg_idle; 1025 1026 unsigned long wake_stamp; 1027 u64 wake_avg_idle; 1028 1029 /* This is used to determine avg_idle's max value */ 1030 u64 max_idle_balance_cost; 1031 1032 #ifdef CONFIG_HOTPLUG_CPU 1033 struct rcuwait hotplug_wait; 1034 #endif 1035 #endif /* CONFIG_SMP */ 1036 1037 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1038 u64 prev_irq_time; 1039 #endif 1040 #ifdef CONFIG_PARAVIRT 1041 u64 prev_steal_time; 1042 #endif 1043 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1044 u64 prev_steal_time_rq; 1045 #endif 1046 1047 /* calc_load related fields */ 1048 unsigned long calc_load_update; 1049 long calc_load_active; 1050 1051 #ifdef CONFIG_SCHED_HRTICK 1052 #ifdef CONFIG_SMP 1053 call_single_data_t hrtick_csd; 1054 #endif 1055 struct hrtimer hrtick_timer; 1056 ktime_t hrtick_time; 1057 #endif 1058 1059 #ifdef CONFIG_SCHEDSTATS 1060 /* latency stats */ 1061 struct sched_info rq_sched_info; 1062 unsigned long long rq_cpu_time; 1063 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1064 1065 /* sys_sched_yield() stats */ 1066 unsigned int yld_count; 1067 1068 /* schedule() stats */ 1069 unsigned int sched_count; 1070 unsigned int sched_goidle; 1071 1072 /* try_to_wake_up() stats */ 1073 unsigned int ttwu_count; 1074 unsigned int ttwu_local; 1075 #endif 1076 1077 #ifdef CONFIG_CPU_IDLE 1078 /* Must be inspected within a rcu lock section */ 1079 struct cpuidle_state *idle_state; 1080 #endif 1081 1082 #ifdef CONFIG_SMP 1083 unsigned int nr_pinned; 1084 #endif 1085 unsigned int push_busy; 1086 struct cpu_stop_work push_work; 1087 1088 #ifdef CONFIG_SCHED_CORE 1089 /* per rq */ 1090 struct rq *core; 1091 struct task_struct *core_pick; 1092 unsigned int core_enabled; 1093 unsigned int core_sched_seq; 1094 struct rb_root core_tree; 1095 1096 /* shared state */ 1097 unsigned int core_task_seq; 1098 unsigned int core_pick_seq; 1099 unsigned long core_cookie; 1100 unsigned char core_forceidle; 1101 unsigned int core_forceidle_seq; 1102 #endif 1103 }; 1104 1105 #ifdef CONFIG_FAIR_GROUP_SCHED 1106 1107 /* CPU runqueue to which this cfs_rq is attached */ 1108 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1109 { 1110 return cfs_rq->rq; 1111 } 1112 1113 #else 1114 1115 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1116 { 1117 return container_of(cfs_rq, struct rq, cfs); 1118 } 1119 #endif 1120 1121 static inline int cpu_of(struct rq *rq) 1122 { 1123 #ifdef CONFIG_SMP 1124 return rq->cpu; 1125 #else 1126 return 0; 1127 #endif 1128 } 1129 1130 #define MDF_PUSH 0x01 1131 1132 static inline bool is_migration_disabled(struct task_struct *p) 1133 { 1134 #ifdef CONFIG_SMP 1135 return p->migration_disabled; 1136 #else 1137 return false; 1138 #endif 1139 } 1140 1141 struct sched_group; 1142 #ifdef CONFIG_SCHED_CORE 1143 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1144 1145 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1146 1147 static inline bool sched_core_enabled(struct rq *rq) 1148 { 1149 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1150 } 1151 1152 static inline bool sched_core_disabled(void) 1153 { 1154 return !static_branch_unlikely(&__sched_core_enabled); 1155 } 1156 1157 /* 1158 * Be careful with this function; not for general use. The return value isn't 1159 * stable unless you actually hold a relevant rq->__lock. 1160 */ 1161 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1162 { 1163 if (sched_core_enabled(rq)) 1164 return &rq->core->__lock; 1165 1166 return &rq->__lock; 1167 } 1168 1169 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1170 { 1171 if (rq->core_enabled) 1172 return &rq->core->__lock; 1173 1174 return &rq->__lock; 1175 } 1176 1177 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi); 1178 1179 /* 1180 * Helpers to check if the CPU's core cookie matches with the task's cookie 1181 * when core scheduling is enabled. 1182 * A special case is that the task's cookie always matches with CPU's core 1183 * cookie if the CPU is in an idle core. 1184 */ 1185 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1186 { 1187 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1188 if (!sched_core_enabled(rq)) 1189 return true; 1190 1191 return rq->core->core_cookie == p->core_cookie; 1192 } 1193 1194 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1195 { 1196 bool idle_core = true; 1197 int cpu; 1198 1199 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1200 if (!sched_core_enabled(rq)) 1201 return true; 1202 1203 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1204 if (!available_idle_cpu(cpu)) { 1205 idle_core = false; 1206 break; 1207 } 1208 } 1209 1210 /* 1211 * A CPU in an idle core is always the best choice for tasks with 1212 * cookies. 1213 */ 1214 return idle_core || rq->core->core_cookie == p->core_cookie; 1215 } 1216 1217 static inline bool sched_group_cookie_match(struct rq *rq, 1218 struct task_struct *p, 1219 struct sched_group *group) 1220 { 1221 int cpu; 1222 1223 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1224 if (!sched_core_enabled(rq)) 1225 return true; 1226 1227 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1228 if (sched_core_cookie_match(rq, p)) 1229 return true; 1230 } 1231 return false; 1232 } 1233 1234 extern void queue_core_balance(struct rq *rq); 1235 1236 static inline bool sched_core_enqueued(struct task_struct *p) 1237 { 1238 return !RB_EMPTY_NODE(&p->core_node); 1239 } 1240 1241 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1242 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p); 1243 1244 extern void sched_core_get(void); 1245 extern void sched_core_put(void); 1246 1247 extern unsigned long sched_core_alloc_cookie(void); 1248 extern void sched_core_put_cookie(unsigned long cookie); 1249 extern unsigned long sched_core_get_cookie(unsigned long cookie); 1250 extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie); 1251 1252 #else /* !CONFIG_SCHED_CORE */ 1253 1254 static inline bool sched_core_enabled(struct rq *rq) 1255 { 1256 return false; 1257 } 1258 1259 static inline bool sched_core_disabled(void) 1260 { 1261 return true; 1262 } 1263 1264 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1265 { 1266 return &rq->__lock; 1267 } 1268 1269 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1270 { 1271 return &rq->__lock; 1272 } 1273 1274 static inline void queue_core_balance(struct rq *rq) 1275 { 1276 } 1277 1278 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1279 { 1280 return true; 1281 } 1282 1283 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1284 { 1285 return true; 1286 } 1287 1288 static inline bool sched_group_cookie_match(struct rq *rq, 1289 struct task_struct *p, 1290 struct sched_group *group) 1291 { 1292 return true; 1293 } 1294 #endif /* CONFIG_SCHED_CORE */ 1295 1296 static inline void lockdep_assert_rq_held(struct rq *rq) 1297 { 1298 lockdep_assert_held(__rq_lockp(rq)); 1299 } 1300 1301 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1302 extern bool raw_spin_rq_trylock(struct rq *rq); 1303 extern void raw_spin_rq_unlock(struct rq *rq); 1304 1305 static inline void raw_spin_rq_lock(struct rq *rq) 1306 { 1307 raw_spin_rq_lock_nested(rq, 0); 1308 } 1309 1310 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1311 { 1312 local_irq_disable(); 1313 raw_spin_rq_lock(rq); 1314 } 1315 1316 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1317 { 1318 raw_spin_rq_unlock(rq); 1319 local_irq_enable(); 1320 } 1321 1322 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1323 { 1324 unsigned long flags; 1325 local_irq_save(flags); 1326 raw_spin_rq_lock(rq); 1327 return flags; 1328 } 1329 1330 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1331 { 1332 raw_spin_rq_unlock(rq); 1333 local_irq_restore(flags); 1334 } 1335 1336 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1337 do { \ 1338 flags = _raw_spin_rq_lock_irqsave(rq); \ 1339 } while (0) 1340 1341 #ifdef CONFIG_SCHED_SMT 1342 extern void __update_idle_core(struct rq *rq); 1343 1344 static inline void update_idle_core(struct rq *rq) 1345 { 1346 if (static_branch_unlikely(&sched_smt_present)) 1347 __update_idle_core(rq); 1348 } 1349 1350 #else 1351 static inline void update_idle_core(struct rq *rq) { } 1352 #endif 1353 1354 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1355 1356 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1357 #define this_rq() this_cpu_ptr(&runqueues) 1358 #define task_rq(p) cpu_rq(task_cpu(p)) 1359 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1360 #define raw_rq() raw_cpu_ptr(&runqueues) 1361 1362 #ifdef CONFIG_FAIR_GROUP_SCHED 1363 static inline struct task_struct *task_of(struct sched_entity *se) 1364 { 1365 SCHED_WARN_ON(!entity_is_task(se)); 1366 return container_of(se, struct task_struct, se); 1367 } 1368 1369 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1370 { 1371 return p->se.cfs_rq; 1372 } 1373 1374 /* runqueue on which this entity is (to be) queued */ 1375 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 1376 { 1377 return se->cfs_rq; 1378 } 1379 1380 /* runqueue "owned" by this group */ 1381 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1382 { 1383 return grp->my_q; 1384 } 1385 1386 #else 1387 1388 static inline struct task_struct *task_of(struct sched_entity *se) 1389 { 1390 return container_of(se, struct task_struct, se); 1391 } 1392 1393 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1394 { 1395 return &task_rq(p)->cfs; 1396 } 1397 1398 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 1399 { 1400 struct task_struct *p = task_of(se); 1401 struct rq *rq = task_rq(p); 1402 1403 return &rq->cfs; 1404 } 1405 1406 /* runqueue "owned" by this group */ 1407 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1408 { 1409 return NULL; 1410 } 1411 #endif 1412 1413 extern void update_rq_clock(struct rq *rq); 1414 1415 static inline u64 __rq_clock_broken(struct rq *rq) 1416 { 1417 return READ_ONCE(rq->clock); 1418 } 1419 1420 /* 1421 * rq::clock_update_flags bits 1422 * 1423 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1424 * call to __schedule(). This is an optimisation to avoid 1425 * neighbouring rq clock updates. 1426 * 1427 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1428 * in effect and calls to update_rq_clock() are being ignored. 1429 * 1430 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1431 * made to update_rq_clock() since the last time rq::lock was pinned. 1432 * 1433 * If inside of __schedule(), clock_update_flags will have been 1434 * shifted left (a left shift is a cheap operation for the fast path 1435 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1436 * 1437 * if (rq-clock_update_flags >= RQCF_UPDATED) 1438 * 1439 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1440 * one position though, because the next rq_unpin_lock() will shift it 1441 * back. 1442 */ 1443 #define RQCF_REQ_SKIP 0x01 1444 #define RQCF_ACT_SKIP 0x02 1445 #define RQCF_UPDATED 0x04 1446 1447 static inline void assert_clock_updated(struct rq *rq) 1448 { 1449 /* 1450 * The only reason for not seeing a clock update since the 1451 * last rq_pin_lock() is if we're currently skipping updates. 1452 */ 1453 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1454 } 1455 1456 static inline u64 rq_clock(struct rq *rq) 1457 { 1458 lockdep_assert_rq_held(rq); 1459 assert_clock_updated(rq); 1460 1461 return rq->clock; 1462 } 1463 1464 static inline u64 rq_clock_task(struct rq *rq) 1465 { 1466 lockdep_assert_rq_held(rq); 1467 assert_clock_updated(rq); 1468 1469 return rq->clock_task; 1470 } 1471 1472 /** 1473 * By default the decay is the default pelt decay period. 1474 * The decay shift can change the decay period in 1475 * multiples of 32. 1476 * Decay shift Decay period(ms) 1477 * 0 32 1478 * 1 64 1479 * 2 128 1480 * 3 256 1481 * 4 512 1482 */ 1483 extern int sched_thermal_decay_shift; 1484 1485 static inline u64 rq_clock_thermal(struct rq *rq) 1486 { 1487 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1488 } 1489 1490 static inline void rq_clock_skip_update(struct rq *rq) 1491 { 1492 lockdep_assert_rq_held(rq); 1493 rq->clock_update_flags |= RQCF_REQ_SKIP; 1494 } 1495 1496 /* 1497 * See rt task throttling, which is the only time a skip 1498 * request is canceled. 1499 */ 1500 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1501 { 1502 lockdep_assert_rq_held(rq); 1503 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1504 } 1505 1506 struct rq_flags { 1507 unsigned long flags; 1508 struct pin_cookie cookie; 1509 #ifdef CONFIG_SCHED_DEBUG 1510 /* 1511 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1512 * current pin context is stashed here in case it needs to be 1513 * restored in rq_repin_lock(). 1514 */ 1515 unsigned int clock_update_flags; 1516 #endif 1517 }; 1518 1519 extern struct callback_head balance_push_callback; 1520 1521 /* 1522 * Lockdep annotation that avoids accidental unlocks; it's like a 1523 * sticky/continuous lockdep_assert_held(). 1524 * 1525 * This avoids code that has access to 'struct rq *rq' (basically everything in 1526 * the scheduler) from accidentally unlocking the rq if they do not also have a 1527 * copy of the (on-stack) 'struct rq_flags rf'. 1528 * 1529 * Also see Documentation/locking/lockdep-design.rst. 1530 */ 1531 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1532 { 1533 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1534 1535 #ifdef CONFIG_SCHED_DEBUG 1536 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1537 rf->clock_update_flags = 0; 1538 #ifdef CONFIG_SMP 1539 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1540 #endif 1541 #endif 1542 } 1543 1544 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1545 { 1546 #ifdef CONFIG_SCHED_DEBUG 1547 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1548 rf->clock_update_flags = RQCF_UPDATED; 1549 #endif 1550 1551 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1552 } 1553 1554 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1555 { 1556 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1557 1558 #ifdef CONFIG_SCHED_DEBUG 1559 /* 1560 * Restore the value we stashed in @rf for this pin context. 1561 */ 1562 rq->clock_update_flags |= rf->clock_update_flags; 1563 #endif 1564 } 1565 1566 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1567 __acquires(rq->lock); 1568 1569 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1570 __acquires(p->pi_lock) 1571 __acquires(rq->lock); 1572 1573 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1574 __releases(rq->lock) 1575 { 1576 rq_unpin_lock(rq, rf); 1577 raw_spin_rq_unlock(rq); 1578 } 1579 1580 static inline void 1581 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1582 __releases(rq->lock) 1583 __releases(p->pi_lock) 1584 { 1585 rq_unpin_lock(rq, rf); 1586 raw_spin_rq_unlock(rq); 1587 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1588 } 1589 1590 static inline void 1591 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1592 __acquires(rq->lock) 1593 { 1594 raw_spin_rq_lock_irqsave(rq, rf->flags); 1595 rq_pin_lock(rq, rf); 1596 } 1597 1598 static inline void 1599 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1600 __acquires(rq->lock) 1601 { 1602 raw_spin_rq_lock_irq(rq); 1603 rq_pin_lock(rq, rf); 1604 } 1605 1606 static inline void 1607 rq_lock(struct rq *rq, struct rq_flags *rf) 1608 __acquires(rq->lock) 1609 { 1610 raw_spin_rq_lock(rq); 1611 rq_pin_lock(rq, rf); 1612 } 1613 1614 static inline void 1615 rq_relock(struct rq *rq, struct rq_flags *rf) 1616 __acquires(rq->lock) 1617 { 1618 raw_spin_rq_lock(rq); 1619 rq_repin_lock(rq, rf); 1620 } 1621 1622 static inline void 1623 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1624 __releases(rq->lock) 1625 { 1626 rq_unpin_lock(rq, rf); 1627 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1628 } 1629 1630 static inline void 1631 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1632 __releases(rq->lock) 1633 { 1634 rq_unpin_lock(rq, rf); 1635 raw_spin_rq_unlock_irq(rq); 1636 } 1637 1638 static inline void 1639 rq_unlock(struct rq *rq, struct rq_flags *rf) 1640 __releases(rq->lock) 1641 { 1642 rq_unpin_lock(rq, rf); 1643 raw_spin_rq_unlock(rq); 1644 } 1645 1646 static inline struct rq * 1647 this_rq_lock_irq(struct rq_flags *rf) 1648 __acquires(rq->lock) 1649 { 1650 struct rq *rq; 1651 1652 local_irq_disable(); 1653 rq = this_rq(); 1654 rq_lock(rq, rf); 1655 return rq; 1656 } 1657 1658 #ifdef CONFIG_NUMA 1659 enum numa_topology_type { 1660 NUMA_DIRECT, 1661 NUMA_GLUELESS_MESH, 1662 NUMA_BACKPLANE, 1663 }; 1664 extern enum numa_topology_type sched_numa_topology_type; 1665 extern int sched_max_numa_distance; 1666 extern bool find_numa_distance(int distance); 1667 extern void sched_init_numa(void); 1668 extern void sched_domains_numa_masks_set(unsigned int cpu); 1669 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1670 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1671 #else 1672 static inline void sched_init_numa(void) { } 1673 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1674 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1675 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1676 { 1677 return nr_cpu_ids; 1678 } 1679 #endif 1680 1681 #ifdef CONFIG_NUMA_BALANCING 1682 /* The regions in numa_faults array from task_struct */ 1683 enum numa_faults_stats { 1684 NUMA_MEM = 0, 1685 NUMA_CPU, 1686 NUMA_MEMBUF, 1687 NUMA_CPUBUF 1688 }; 1689 extern void sched_setnuma(struct task_struct *p, int node); 1690 extern int migrate_task_to(struct task_struct *p, int cpu); 1691 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1692 int cpu, int scpu); 1693 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1694 #else 1695 static inline void 1696 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1697 { 1698 } 1699 #endif /* CONFIG_NUMA_BALANCING */ 1700 1701 #ifdef CONFIG_SMP 1702 1703 static inline void 1704 queue_balance_callback(struct rq *rq, 1705 struct callback_head *head, 1706 void (*func)(struct rq *rq)) 1707 { 1708 lockdep_assert_rq_held(rq); 1709 1710 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1711 return; 1712 1713 head->func = (void (*)(struct callback_head *))func; 1714 head->next = rq->balance_callback; 1715 rq->balance_callback = head; 1716 } 1717 1718 #define rcu_dereference_check_sched_domain(p) \ 1719 rcu_dereference_check((p), \ 1720 lockdep_is_held(&sched_domains_mutex)) 1721 1722 /* 1723 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1724 * See destroy_sched_domains: call_rcu for details. 1725 * 1726 * The domain tree of any CPU may only be accessed from within 1727 * preempt-disabled sections. 1728 */ 1729 #define for_each_domain(cpu, __sd) \ 1730 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1731 __sd; __sd = __sd->parent) 1732 1733 /** 1734 * highest_flag_domain - Return highest sched_domain containing flag. 1735 * @cpu: The CPU whose highest level of sched domain is to 1736 * be returned. 1737 * @flag: The flag to check for the highest sched_domain 1738 * for the given CPU. 1739 * 1740 * Returns the highest sched_domain of a CPU which contains the given flag. 1741 */ 1742 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1743 { 1744 struct sched_domain *sd, *hsd = NULL; 1745 1746 for_each_domain(cpu, sd) { 1747 if (!(sd->flags & flag)) 1748 break; 1749 hsd = sd; 1750 } 1751 1752 return hsd; 1753 } 1754 1755 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1756 { 1757 struct sched_domain *sd; 1758 1759 for_each_domain(cpu, sd) { 1760 if (sd->flags & flag) 1761 break; 1762 } 1763 1764 return sd; 1765 } 1766 1767 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1768 DECLARE_PER_CPU(int, sd_llc_size); 1769 DECLARE_PER_CPU(int, sd_llc_id); 1770 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1771 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1772 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1773 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1774 extern struct static_key_false sched_asym_cpucapacity; 1775 1776 struct sched_group_capacity { 1777 atomic_t ref; 1778 /* 1779 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1780 * for a single CPU. 1781 */ 1782 unsigned long capacity; 1783 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1784 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1785 unsigned long next_update; 1786 int imbalance; /* XXX unrelated to capacity but shared group state */ 1787 1788 #ifdef CONFIG_SCHED_DEBUG 1789 int id; 1790 #endif 1791 1792 unsigned long cpumask[]; /* Balance mask */ 1793 }; 1794 1795 struct sched_group { 1796 struct sched_group *next; /* Must be a circular list */ 1797 atomic_t ref; 1798 1799 unsigned int group_weight; 1800 struct sched_group_capacity *sgc; 1801 int asym_prefer_cpu; /* CPU of highest priority in group */ 1802 1803 /* 1804 * The CPUs this group covers. 1805 * 1806 * NOTE: this field is variable length. (Allocated dynamically 1807 * by attaching extra space to the end of the structure, 1808 * depending on how many CPUs the kernel has booted up with) 1809 */ 1810 unsigned long cpumask[]; 1811 }; 1812 1813 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1814 { 1815 return to_cpumask(sg->cpumask); 1816 } 1817 1818 /* 1819 * See build_balance_mask(). 1820 */ 1821 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1822 { 1823 return to_cpumask(sg->sgc->cpumask); 1824 } 1825 1826 /** 1827 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1828 * @group: The group whose first CPU is to be returned. 1829 */ 1830 static inline unsigned int group_first_cpu(struct sched_group *group) 1831 { 1832 return cpumask_first(sched_group_span(group)); 1833 } 1834 1835 extern int group_balance_cpu(struct sched_group *sg); 1836 1837 #ifdef CONFIG_SCHED_DEBUG 1838 void update_sched_domain_debugfs(void); 1839 void dirty_sched_domain_sysctl(int cpu); 1840 #else 1841 static inline void update_sched_domain_debugfs(void) 1842 { 1843 } 1844 static inline void dirty_sched_domain_sysctl(int cpu) 1845 { 1846 } 1847 #endif 1848 1849 extern int sched_update_scaling(void); 1850 1851 extern void flush_smp_call_function_from_idle(void); 1852 1853 #else /* !CONFIG_SMP: */ 1854 static inline void flush_smp_call_function_from_idle(void) { } 1855 #endif 1856 1857 #include "stats.h" 1858 #include "autogroup.h" 1859 1860 #ifdef CONFIG_CGROUP_SCHED 1861 1862 /* 1863 * Return the group to which this tasks belongs. 1864 * 1865 * We cannot use task_css() and friends because the cgroup subsystem 1866 * changes that value before the cgroup_subsys::attach() method is called, 1867 * therefore we cannot pin it and might observe the wrong value. 1868 * 1869 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1870 * core changes this before calling sched_move_task(). 1871 * 1872 * Instead we use a 'copy' which is updated from sched_move_task() while 1873 * holding both task_struct::pi_lock and rq::lock. 1874 */ 1875 static inline struct task_group *task_group(struct task_struct *p) 1876 { 1877 return p->sched_task_group; 1878 } 1879 1880 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1881 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1882 { 1883 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1884 struct task_group *tg = task_group(p); 1885 #endif 1886 1887 #ifdef CONFIG_FAIR_GROUP_SCHED 1888 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1889 p->se.cfs_rq = tg->cfs_rq[cpu]; 1890 p->se.parent = tg->se[cpu]; 1891 #endif 1892 1893 #ifdef CONFIG_RT_GROUP_SCHED 1894 p->rt.rt_rq = tg->rt_rq[cpu]; 1895 p->rt.parent = tg->rt_se[cpu]; 1896 #endif 1897 } 1898 1899 #else /* CONFIG_CGROUP_SCHED */ 1900 1901 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1902 static inline struct task_group *task_group(struct task_struct *p) 1903 { 1904 return NULL; 1905 } 1906 1907 #endif /* CONFIG_CGROUP_SCHED */ 1908 1909 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1910 { 1911 set_task_rq(p, cpu); 1912 #ifdef CONFIG_SMP 1913 /* 1914 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1915 * successfully executed on another CPU. We must ensure that updates of 1916 * per-task data have been completed by this moment. 1917 */ 1918 smp_wmb(); 1919 #ifdef CONFIG_THREAD_INFO_IN_TASK 1920 WRITE_ONCE(p->cpu, cpu); 1921 #else 1922 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1923 #endif 1924 p->wake_cpu = cpu; 1925 #endif 1926 } 1927 1928 /* 1929 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1930 */ 1931 #ifdef CONFIG_SCHED_DEBUG 1932 # include <linux/static_key.h> 1933 # define const_debug __read_mostly 1934 #else 1935 # define const_debug const 1936 #endif 1937 1938 #define SCHED_FEAT(name, enabled) \ 1939 __SCHED_FEAT_##name , 1940 1941 enum { 1942 #include "features.h" 1943 __SCHED_FEAT_NR, 1944 }; 1945 1946 #undef SCHED_FEAT 1947 1948 #ifdef CONFIG_SCHED_DEBUG 1949 1950 /* 1951 * To support run-time toggling of sched features, all the translation units 1952 * (but core.c) reference the sysctl_sched_features defined in core.c. 1953 */ 1954 extern const_debug unsigned int sysctl_sched_features; 1955 1956 #ifdef CONFIG_JUMP_LABEL 1957 #define SCHED_FEAT(name, enabled) \ 1958 static __always_inline bool static_branch_##name(struct static_key *key) \ 1959 { \ 1960 return static_key_##enabled(key); \ 1961 } 1962 1963 #include "features.h" 1964 #undef SCHED_FEAT 1965 1966 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1967 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1968 1969 #else /* !CONFIG_JUMP_LABEL */ 1970 1971 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1972 1973 #endif /* CONFIG_JUMP_LABEL */ 1974 1975 #else /* !SCHED_DEBUG */ 1976 1977 /* 1978 * Each translation unit has its own copy of sysctl_sched_features to allow 1979 * constants propagation at compile time and compiler optimization based on 1980 * features default. 1981 */ 1982 #define SCHED_FEAT(name, enabled) \ 1983 (1UL << __SCHED_FEAT_##name) * enabled | 1984 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1985 #include "features.h" 1986 0; 1987 #undef SCHED_FEAT 1988 1989 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1990 1991 #endif /* SCHED_DEBUG */ 1992 1993 extern struct static_key_false sched_numa_balancing; 1994 extern struct static_key_false sched_schedstats; 1995 1996 static inline u64 global_rt_period(void) 1997 { 1998 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1999 } 2000 2001 static inline u64 global_rt_runtime(void) 2002 { 2003 if (sysctl_sched_rt_runtime < 0) 2004 return RUNTIME_INF; 2005 2006 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2007 } 2008 2009 static inline int task_current(struct rq *rq, struct task_struct *p) 2010 { 2011 return rq->curr == p; 2012 } 2013 2014 static inline int task_running(struct rq *rq, struct task_struct *p) 2015 { 2016 #ifdef CONFIG_SMP 2017 return p->on_cpu; 2018 #else 2019 return task_current(rq, p); 2020 #endif 2021 } 2022 2023 static inline int task_on_rq_queued(struct task_struct *p) 2024 { 2025 return p->on_rq == TASK_ON_RQ_QUEUED; 2026 } 2027 2028 static inline int task_on_rq_migrating(struct task_struct *p) 2029 { 2030 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2031 } 2032 2033 /* Wake flags. The first three directly map to some SD flag value */ 2034 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2035 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2036 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2037 2038 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2039 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2040 #define WF_ON_CPU 0x40 /* Wakee is on_cpu */ 2041 2042 #ifdef CONFIG_SMP 2043 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2044 static_assert(WF_FORK == SD_BALANCE_FORK); 2045 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2046 #endif 2047 2048 /* 2049 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2050 * of tasks with abnormal "nice" values across CPUs the contribution that 2051 * each task makes to its run queue's load is weighted according to its 2052 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2053 * scaled version of the new time slice allocation that they receive on time 2054 * slice expiry etc. 2055 */ 2056 2057 #define WEIGHT_IDLEPRIO 3 2058 #define WMULT_IDLEPRIO 1431655765 2059 2060 extern const int sched_prio_to_weight[40]; 2061 extern const u32 sched_prio_to_wmult[40]; 2062 2063 /* 2064 * {de,en}queue flags: 2065 * 2066 * DEQUEUE_SLEEP - task is no longer runnable 2067 * ENQUEUE_WAKEUP - task just became runnable 2068 * 2069 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2070 * are in a known state which allows modification. Such pairs 2071 * should preserve as much state as possible. 2072 * 2073 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2074 * in the runqueue. 2075 * 2076 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2077 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2078 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2079 * 2080 */ 2081 2082 #define DEQUEUE_SLEEP 0x01 2083 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2084 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2085 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2086 2087 #define ENQUEUE_WAKEUP 0x01 2088 #define ENQUEUE_RESTORE 0x02 2089 #define ENQUEUE_MOVE 0x04 2090 #define ENQUEUE_NOCLOCK 0x08 2091 2092 #define ENQUEUE_HEAD 0x10 2093 #define ENQUEUE_REPLENISH 0x20 2094 #ifdef CONFIG_SMP 2095 #define ENQUEUE_MIGRATED 0x40 2096 #else 2097 #define ENQUEUE_MIGRATED 0x00 2098 #endif 2099 2100 #define RETRY_TASK ((void *)-1UL) 2101 2102 struct sched_class { 2103 2104 #ifdef CONFIG_UCLAMP_TASK 2105 int uclamp_enabled; 2106 #endif 2107 2108 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2109 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2110 void (*yield_task) (struct rq *rq); 2111 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2112 2113 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 2114 2115 struct task_struct *(*pick_next_task)(struct rq *rq); 2116 2117 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 2118 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2119 2120 #ifdef CONFIG_SMP 2121 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2122 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2123 2124 struct task_struct * (*pick_task)(struct rq *rq); 2125 2126 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2127 2128 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2129 2130 void (*set_cpus_allowed)(struct task_struct *p, 2131 const struct cpumask *newmask, 2132 u32 flags); 2133 2134 void (*rq_online)(struct rq *rq); 2135 void (*rq_offline)(struct rq *rq); 2136 2137 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2138 #endif 2139 2140 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2141 void (*task_fork)(struct task_struct *p); 2142 void (*task_dead)(struct task_struct *p); 2143 2144 /* 2145 * The switched_from() call is allowed to drop rq->lock, therefore we 2146 * cannot assume the switched_from/switched_to pair is serialized by 2147 * rq->lock. They are however serialized by p->pi_lock. 2148 */ 2149 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2150 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2151 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2152 int oldprio); 2153 2154 unsigned int (*get_rr_interval)(struct rq *rq, 2155 struct task_struct *task); 2156 2157 void (*update_curr)(struct rq *rq); 2158 2159 #define TASK_SET_GROUP 0 2160 #define TASK_MOVE_GROUP 1 2161 2162 #ifdef CONFIG_FAIR_GROUP_SCHED 2163 void (*task_change_group)(struct task_struct *p, int type); 2164 #endif 2165 }; 2166 2167 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2168 { 2169 WARN_ON_ONCE(rq->curr != prev); 2170 prev->sched_class->put_prev_task(rq, prev); 2171 } 2172 2173 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2174 { 2175 next->sched_class->set_next_task(rq, next, false); 2176 } 2177 2178 2179 /* 2180 * Helper to define a sched_class instance; each one is placed in a separate 2181 * section which is ordered by the linker script: 2182 * 2183 * include/asm-generic/vmlinux.lds.h 2184 * 2185 * Also enforce alignment on the instance, not the type, to guarantee layout. 2186 */ 2187 #define DEFINE_SCHED_CLASS(name) \ 2188 const struct sched_class name##_sched_class \ 2189 __aligned(__alignof__(struct sched_class)) \ 2190 __section("__" #name "_sched_class") 2191 2192 /* Defined in include/asm-generic/vmlinux.lds.h */ 2193 extern struct sched_class __begin_sched_classes[]; 2194 extern struct sched_class __end_sched_classes[]; 2195 2196 #define sched_class_highest (__end_sched_classes - 1) 2197 #define sched_class_lowest (__begin_sched_classes - 1) 2198 2199 #define for_class_range(class, _from, _to) \ 2200 for (class = (_from); class != (_to); class--) 2201 2202 #define for_each_class(class) \ 2203 for_class_range(class, sched_class_highest, sched_class_lowest) 2204 2205 extern const struct sched_class stop_sched_class; 2206 extern const struct sched_class dl_sched_class; 2207 extern const struct sched_class rt_sched_class; 2208 extern const struct sched_class fair_sched_class; 2209 extern const struct sched_class idle_sched_class; 2210 2211 static inline bool sched_stop_runnable(struct rq *rq) 2212 { 2213 return rq->stop && task_on_rq_queued(rq->stop); 2214 } 2215 2216 static inline bool sched_dl_runnable(struct rq *rq) 2217 { 2218 return rq->dl.dl_nr_running > 0; 2219 } 2220 2221 static inline bool sched_rt_runnable(struct rq *rq) 2222 { 2223 return rq->rt.rt_queued > 0; 2224 } 2225 2226 static inline bool sched_fair_runnable(struct rq *rq) 2227 { 2228 return rq->cfs.nr_running > 0; 2229 } 2230 2231 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2232 extern struct task_struct *pick_next_task_idle(struct rq *rq); 2233 2234 #define SCA_CHECK 0x01 2235 #define SCA_MIGRATE_DISABLE 0x02 2236 #define SCA_MIGRATE_ENABLE 0x04 2237 2238 #ifdef CONFIG_SMP 2239 2240 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2241 2242 extern void trigger_load_balance(struct rq *rq); 2243 2244 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 2245 2246 static inline struct task_struct *get_push_task(struct rq *rq) 2247 { 2248 struct task_struct *p = rq->curr; 2249 2250 lockdep_assert_rq_held(rq); 2251 2252 if (rq->push_busy) 2253 return NULL; 2254 2255 if (p->nr_cpus_allowed == 1) 2256 return NULL; 2257 2258 rq->push_busy = true; 2259 return get_task_struct(p); 2260 } 2261 2262 extern int push_cpu_stop(void *arg); 2263 2264 #endif 2265 2266 #ifdef CONFIG_CPU_IDLE 2267 static inline void idle_set_state(struct rq *rq, 2268 struct cpuidle_state *idle_state) 2269 { 2270 rq->idle_state = idle_state; 2271 } 2272 2273 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2274 { 2275 SCHED_WARN_ON(!rcu_read_lock_held()); 2276 2277 return rq->idle_state; 2278 } 2279 #else 2280 static inline void idle_set_state(struct rq *rq, 2281 struct cpuidle_state *idle_state) 2282 { 2283 } 2284 2285 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2286 { 2287 return NULL; 2288 } 2289 #endif 2290 2291 extern void schedule_idle(void); 2292 2293 extern void sysrq_sched_debug_show(void); 2294 extern void sched_init_granularity(void); 2295 extern void update_max_interval(void); 2296 2297 extern void init_sched_dl_class(void); 2298 extern void init_sched_rt_class(void); 2299 extern void init_sched_fair_class(void); 2300 2301 extern void reweight_task(struct task_struct *p, int prio); 2302 2303 extern void resched_curr(struct rq *rq); 2304 extern void resched_cpu(int cpu); 2305 2306 extern struct rt_bandwidth def_rt_bandwidth; 2307 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2308 2309 extern struct dl_bandwidth def_dl_bandwidth; 2310 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 2311 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 2312 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 2313 2314 #define BW_SHIFT 20 2315 #define BW_UNIT (1 << BW_SHIFT) 2316 #define RATIO_SHIFT 8 2317 #define MAX_BW_BITS (64 - BW_SHIFT) 2318 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2319 unsigned long to_ratio(u64 period, u64 runtime); 2320 2321 extern void init_entity_runnable_average(struct sched_entity *se); 2322 extern void post_init_entity_util_avg(struct task_struct *p); 2323 2324 #ifdef CONFIG_NO_HZ_FULL 2325 extern bool sched_can_stop_tick(struct rq *rq); 2326 extern int __init sched_tick_offload_init(void); 2327 2328 /* 2329 * Tick may be needed by tasks in the runqueue depending on their policy and 2330 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2331 * nohz mode if necessary. 2332 */ 2333 static inline void sched_update_tick_dependency(struct rq *rq) 2334 { 2335 int cpu = cpu_of(rq); 2336 2337 if (!tick_nohz_full_cpu(cpu)) 2338 return; 2339 2340 if (sched_can_stop_tick(rq)) 2341 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2342 else 2343 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2344 } 2345 #else 2346 static inline int sched_tick_offload_init(void) { return 0; } 2347 static inline void sched_update_tick_dependency(struct rq *rq) { } 2348 #endif 2349 2350 static inline void add_nr_running(struct rq *rq, unsigned count) 2351 { 2352 unsigned prev_nr = rq->nr_running; 2353 2354 rq->nr_running = prev_nr + count; 2355 if (trace_sched_update_nr_running_tp_enabled()) { 2356 call_trace_sched_update_nr_running(rq, count); 2357 } 2358 2359 #ifdef CONFIG_SMP 2360 if (prev_nr < 2 && rq->nr_running >= 2) { 2361 if (!READ_ONCE(rq->rd->overload)) 2362 WRITE_ONCE(rq->rd->overload, 1); 2363 } 2364 #endif 2365 2366 sched_update_tick_dependency(rq); 2367 } 2368 2369 static inline void sub_nr_running(struct rq *rq, unsigned count) 2370 { 2371 rq->nr_running -= count; 2372 if (trace_sched_update_nr_running_tp_enabled()) { 2373 call_trace_sched_update_nr_running(rq, -count); 2374 } 2375 2376 /* Check if we still need preemption */ 2377 sched_update_tick_dependency(rq); 2378 } 2379 2380 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2381 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2382 2383 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2384 2385 extern const_debug unsigned int sysctl_sched_nr_migrate; 2386 extern const_debug unsigned int sysctl_sched_migration_cost; 2387 2388 #ifdef CONFIG_SCHED_HRTICK 2389 2390 /* 2391 * Use hrtick when: 2392 * - enabled by features 2393 * - hrtimer is actually high res 2394 */ 2395 static inline int hrtick_enabled(struct rq *rq) 2396 { 2397 if (!cpu_active(cpu_of(rq))) 2398 return 0; 2399 return hrtimer_is_hres_active(&rq->hrtick_timer); 2400 } 2401 2402 static inline int hrtick_enabled_fair(struct rq *rq) 2403 { 2404 if (!sched_feat(HRTICK)) 2405 return 0; 2406 return hrtick_enabled(rq); 2407 } 2408 2409 static inline int hrtick_enabled_dl(struct rq *rq) 2410 { 2411 if (!sched_feat(HRTICK_DL)) 2412 return 0; 2413 return hrtick_enabled(rq); 2414 } 2415 2416 void hrtick_start(struct rq *rq, u64 delay); 2417 2418 #else 2419 2420 static inline int hrtick_enabled_fair(struct rq *rq) 2421 { 2422 return 0; 2423 } 2424 2425 static inline int hrtick_enabled_dl(struct rq *rq) 2426 { 2427 return 0; 2428 } 2429 2430 static inline int hrtick_enabled(struct rq *rq) 2431 { 2432 return 0; 2433 } 2434 2435 #endif /* CONFIG_SCHED_HRTICK */ 2436 2437 #ifndef arch_scale_freq_tick 2438 static __always_inline 2439 void arch_scale_freq_tick(void) 2440 { 2441 } 2442 #endif 2443 2444 #ifndef arch_scale_freq_capacity 2445 /** 2446 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2447 * @cpu: the CPU in question. 2448 * 2449 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2450 * 2451 * f_curr 2452 * ------ * SCHED_CAPACITY_SCALE 2453 * f_max 2454 */ 2455 static __always_inline 2456 unsigned long arch_scale_freq_capacity(int cpu) 2457 { 2458 return SCHED_CAPACITY_SCALE; 2459 } 2460 #endif 2461 2462 2463 #ifdef CONFIG_SMP 2464 2465 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2466 { 2467 #ifdef CONFIG_SCHED_CORE 2468 /* 2469 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2470 * order by core-id first and cpu-id second. 2471 * 2472 * Notably: 2473 * 2474 * double_rq_lock(0,3); will take core-0, core-1 lock 2475 * double_rq_lock(1,2); will take core-1, core-0 lock 2476 * 2477 * when only cpu-id is considered. 2478 */ 2479 if (rq1->core->cpu < rq2->core->cpu) 2480 return true; 2481 if (rq1->core->cpu > rq2->core->cpu) 2482 return false; 2483 2484 /* 2485 * __sched_core_flip() relies on SMT having cpu-id lock order. 2486 */ 2487 #endif 2488 return rq1->cpu < rq2->cpu; 2489 } 2490 2491 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2492 2493 #ifdef CONFIG_PREEMPTION 2494 2495 /* 2496 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2497 * way at the expense of forcing extra atomic operations in all 2498 * invocations. This assures that the double_lock is acquired using the 2499 * same underlying policy as the spinlock_t on this architecture, which 2500 * reduces latency compared to the unfair variant below. However, it 2501 * also adds more overhead and therefore may reduce throughput. 2502 */ 2503 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2504 __releases(this_rq->lock) 2505 __acquires(busiest->lock) 2506 __acquires(this_rq->lock) 2507 { 2508 raw_spin_rq_unlock(this_rq); 2509 double_rq_lock(this_rq, busiest); 2510 2511 return 1; 2512 } 2513 2514 #else 2515 /* 2516 * Unfair double_lock_balance: Optimizes throughput at the expense of 2517 * latency by eliminating extra atomic operations when the locks are 2518 * already in proper order on entry. This favors lower CPU-ids and will 2519 * grant the double lock to lower CPUs over higher ids under contention, 2520 * regardless of entry order into the function. 2521 */ 2522 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2523 __releases(this_rq->lock) 2524 __acquires(busiest->lock) 2525 __acquires(this_rq->lock) 2526 { 2527 if (__rq_lockp(this_rq) == __rq_lockp(busiest)) 2528 return 0; 2529 2530 if (likely(raw_spin_rq_trylock(busiest))) 2531 return 0; 2532 2533 if (rq_order_less(this_rq, busiest)) { 2534 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2535 return 0; 2536 } 2537 2538 raw_spin_rq_unlock(this_rq); 2539 double_rq_lock(this_rq, busiest); 2540 2541 return 1; 2542 } 2543 2544 #endif /* CONFIG_PREEMPTION */ 2545 2546 /* 2547 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2548 */ 2549 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2550 { 2551 lockdep_assert_irqs_disabled(); 2552 2553 return _double_lock_balance(this_rq, busiest); 2554 } 2555 2556 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2557 __releases(busiest->lock) 2558 { 2559 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2560 raw_spin_rq_unlock(busiest); 2561 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2562 } 2563 2564 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2565 { 2566 if (l1 > l2) 2567 swap(l1, l2); 2568 2569 spin_lock(l1); 2570 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2571 } 2572 2573 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2574 { 2575 if (l1 > l2) 2576 swap(l1, l2); 2577 2578 spin_lock_irq(l1); 2579 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2580 } 2581 2582 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2583 { 2584 if (l1 > l2) 2585 swap(l1, l2); 2586 2587 raw_spin_lock(l1); 2588 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2589 } 2590 2591 /* 2592 * double_rq_unlock - safely unlock two runqueues 2593 * 2594 * Note this does not restore interrupts like task_rq_unlock, 2595 * you need to do so manually after calling. 2596 */ 2597 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2598 __releases(rq1->lock) 2599 __releases(rq2->lock) 2600 { 2601 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2602 raw_spin_rq_unlock(rq2); 2603 else 2604 __release(rq2->lock); 2605 raw_spin_rq_unlock(rq1); 2606 } 2607 2608 extern void set_rq_online (struct rq *rq); 2609 extern void set_rq_offline(struct rq *rq); 2610 extern bool sched_smp_initialized; 2611 2612 #else /* CONFIG_SMP */ 2613 2614 /* 2615 * double_rq_lock - safely lock two runqueues 2616 * 2617 * Note this does not disable interrupts like task_rq_lock, 2618 * you need to do so manually before calling. 2619 */ 2620 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2621 __acquires(rq1->lock) 2622 __acquires(rq2->lock) 2623 { 2624 BUG_ON(!irqs_disabled()); 2625 BUG_ON(rq1 != rq2); 2626 raw_spin_rq_lock(rq1); 2627 __acquire(rq2->lock); /* Fake it out ;) */ 2628 } 2629 2630 /* 2631 * double_rq_unlock - safely unlock two runqueues 2632 * 2633 * Note this does not restore interrupts like task_rq_unlock, 2634 * you need to do so manually after calling. 2635 */ 2636 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2637 __releases(rq1->lock) 2638 __releases(rq2->lock) 2639 { 2640 BUG_ON(rq1 != rq2); 2641 raw_spin_rq_unlock(rq1); 2642 __release(rq2->lock); 2643 } 2644 2645 #endif 2646 2647 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2648 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2649 2650 #ifdef CONFIG_SCHED_DEBUG 2651 extern bool sched_debug_verbose; 2652 2653 extern void print_cfs_stats(struct seq_file *m, int cpu); 2654 extern void print_rt_stats(struct seq_file *m, int cpu); 2655 extern void print_dl_stats(struct seq_file *m, int cpu); 2656 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2657 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2658 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2659 2660 extern void resched_latency_warn(int cpu, u64 latency); 2661 #ifdef CONFIG_NUMA_BALANCING 2662 extern void 2663 show_numa_stats(struct task_struct *p, struct seq_file *m); 2664 extern void 2665 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2666 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2667 #endif /* CONFIG_NUMA_BALANCING */ 2668 #else 2669 static inline void resched_latency_warn(int cpu, u64 latency) {} 2670 #endif /* CONFIG_SCHED_DEBUG */ 2671 2672 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2673 extern void init_rt_rq(struct rt_rq *rt_rq); 2674 extern void init_dl_rq(struct dl_rq *dl_rq); 2675 2676 extern void cfs_bandwidth_usage_inc(void); 2677 extern void cfs_bandwidth_usage_dec(void); 2678 2679 #ifdef CONFIG_NO_HZ_COMMON 2680 #define NOHZ_BALANCE_KICK_BIT 0 2681 #define NOHZ_STATS_KICK_BIT 1 2682 #define NOHZ_NEWILB_KICK_BIT 2 2683 2684 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2685 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2686 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 2687 2688 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2689 2690 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2691 2692 extern void nohz_balance_exit_idle(struct rq *rq); 2693 #else 2694 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2695 #endif 2696 2697 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 2698 extern void nohz_run_idle_balance(int cpu); 2699 #else 2700 static inline void nohz_run_idle_balance(int cpu) { } 2701 #endif 2702 2703 #ifdef CONFIG_SMP 2704 static inline 2705 void __dl_update(struct dl_bw *dl_b, s64 bw) 2706 { 2707 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2708 int i; 2709 2710 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2711 "sched RCU must be held"); 2712 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2713 struct rq *rq = cpu_rq(i); 2714 2715 rq->dl.extra_bw += bw; 2716 } 2717 } 2718 #else 2719 static inline 2720 void __dl_update(struct dl_bw *dl_b, s64 bw) 2721 { 2722 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2723 2724 dl->extra_bw += bw; 2725 } 2726 #endif 2727 2728 2729 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2730 struct irqtime { 2731 u64 total; 2732 u64 tick_delta; 2733 u64 irq_start_time; 2734 struct u64_stats_sync sync; 2735 }; 2736 2737 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2738 2739 /* 2740 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2741 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 2742 * and never move forward. 2743 */ 2744 static inline u64 irq_time_read(int cpu) 2745 { 2746 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2747 unsigned int seq; 2748 u64 total; 2749 2750 do { 2751 seq = __u64_stats_fetch_begin(&irqtime->sync); 2752 total = irqtime->total; 2753 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2754 2755 return total; 2756 } 2757 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2758 2759 #ifdef CONFIG_CPU_FREQ 2760 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2761 2762 /** 2763 * cpufreq_update_util - Take a note about CPU utilization changes. 2764 * @rq: Runqueue to carry out the update for. 2765 * @flags: Update reason flags. 2766 * 2767 * This function is called by the scheduler on the CPU whose utilization is 2768 * being updated. 2769 * 2770 * It can only be called from RCU-sched read-side critical sections. 2771 * 2772 * The way cpufreq is currently arranged requires it to evaluate the CPU 2773 * performance state (frequency/voltage) on a regular basis to prevent it from 2774 * being stuck in a completely inadequate performance level for too long. 2775 * That is not guaranteed to happen if the updates are only triggered from CFS 2776 * and DL, though, because they may not be coming in if only RT tasks are 2777 * active all the time (or there are RT tasks only). 2778 * 2779 * As a workaround for that issue, this function is called periodically by the 2780 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2781 * but that really is a band-aid. Going forward it should be replaced with 2782 * solutions targeted more specifically at RT tasks. 2783 */ 2784 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2785 { 2786 struct update_util_data *data; 2787 2788 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2789 cpu_of(rq))); 2790 if (data) 2791 data->func(data, rq_clock(rq), flags); 2792 } 2793 #else 2794 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2795 #endif /* CONFIG_CPU_FREQ */ 2796 2797 #ifdef CONFIG_UCLAMP_TASK 2798 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2799 2800 /** 2801 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 2802 * @rq: The rq to clamp against. Must not be NULL. 2803 * @util: The util value to clamp. 2804 * @p: The task to clamp against. Can be NULL if you want to clamp 2805 * against @rq only. 2806 * 2807 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 2808 * 2809 * If sched_uclamp_used static key is disabled, then just return the util 2810 * without any clamping since uclamp aggregation at the rq level in the fast 2811 * path is disabled, rendering this operation a NOP. 2812 * 2813 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 2814 * will return the correct effective uclamp value of the task even if the 2815 * static key is disabled. 2816 */ 2817 static __always_inline 2818 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2819 struct task_struct *p) 2820 { 2821 unsigned long min_util = 0; 2822 unsigned long max_util = 0; 2823 2824 if (!static_branch_likely(&sched_uclamp_used)) 2825 return util; 2826 2827 if (p) { 2828 min_util = uclamp_eff_value(p, UCLAMP_MIN); 2829 max_util = uclamp_eff_value(p, UCLAMP_MAX); 2830 2831 /* 2832 * Ignore last runnable task's max clamp, as this task will 2833 * reset it. Similarly, no need to read the rq's min clamp. 2834 */ 2835 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 2836 goto out; 2837 } 2838 2839 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); 2840 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); 2841 out: 2842 /* 2843 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2844 * RUNNABLE tasks with _different_ clamps, we can end up with an 2845 * inversion. Fix it now when the clamps are applied. 2846 */ 2847 if (unlikely(min_util >= max_util)) 2848 return min_util; 2849 2850 return clamp(util, min_util, max_util); 2851 } 2852 2853 /* 2854 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 2855 * by default in the fast path and only gets turned on once userspace performs 2856 * an operation that requires it. 2857 * 2858 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 2859 * hence is active. 2860 */ 2861 static inline bool uclamp_is_used(void) 2862 { 2863 return static_branch_likely(&sched_uclamp_used); 2864 } 2865 #else /* CONFIG_UCLAMP_TASK */ 2866 static inline 2867 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2868 struct task_struct *p) 2869 { 2870 return util; 2871 } 2872 2873 static inline bool uclamp_is_used(void) 2874 { 2875 return false; 2876 } 2877 #endif /* CONFIG_UCLAMP_TASK */ 2878 2879 #ifdef arch_scale_freq_capacity 2880 # ifndef arch_scale_freq_invariant 2881 # define arch_scale_freq_invariant() true 2882 # endif 2883 #else 2884 # define arch_scale_freq_invariant() false 2885 #endif 2886 2887 #ifdef CONFIG_SMP 2888 static inline unsigned long capacity_orig_of(int cpu) 2889 { 2890 return cpu_rq(cpu)->cpu_capacity_orig; 2891 } 2892 2893 /** 2894 * enum cpu_util_type - CPU utilization type 2895 * @FREQUENCY_UTIL: Utilization used to select frequency 2896 * @ENERGY_UTIL: Utilization used during energy calculation 2897 * 2898 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2899 * need to be aggregated differently depending on the usage made of them. This 2900 * enum is used within effective_cpu_util() to differentiate the types of 2901 * utilization expected by the callers, and adjust the aggregation accordingly. 2902 */ 2903 enum cpu_util_type { 2904 FREQUENCY_UTIL, 2905 ENERGY_UTIL, 2906 }; 2907 2908 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 2909 unsigned long max, enum cpu_util_type type, 2910 struct task_struct *p); 2911 2912 static inline unsigned long cpu_bw_dl(struct rq *rq) 2913 { 2914 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2915 } 2916 2917 static inline unsigned long cpu_util_dl(struct rq *rq) 2918 { 2919 return READ_ONCE(rq->avg_dl.util_avg); 2920 } 2921 2922 static inline unsigned long cpu_util_cfs(struct rq *rq) 2923 { 2924 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2925 2926 if (sched_feat(UTIL_EST)) { 2927 util = max_t(unsigned long, util, 2928 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2929 } 2930 2931 return util; 2932 } 2933 2934 static inline unsigned long cpu_util_rt(struct rq *rq) 2935 { 2936 return READ_ONCE(rq->avg_rt.util_avg); 2937 } 2938 #endif 2939 2940 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2941 static inline unsigned long cpu_util_irq(struct rq *rq) 2942 { 2943 return rq->avg_irq.util_avg; 2944 } 2945 2946 static inline 2947 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2948 { 2949 util *= (max - irq); 2950 util /= max; 2951 2952 return util; 2953 2954 } 2955 #else 2956 static inline unsigned long cpu_util_irq(struct rq *rq) 2957 { 2958 return 0; 2959 } 2960 2961 static inline 2962 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2963 { 2964 return util; 2965 } 2966 #endif 2967 2968 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2969 2970 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2971 2972 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2973 2974 static inline bool sched_energy_enabled(void) 2975 { 2976 return static_branch_unlikely(&sched_energy_present); 2977 } 2978 2979 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2980 2981 #define perf_domain_span(pd) NULL 2982 static inline bool sched_energy_enabled(void) { return false; } 2983 2984 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2985 2986 #ifdef CONFIG_MEMBARRIER 2987 /* 2988 * The scheduler provides memory barriers required by membarrier between: 2989 * - prior user-space memory accesses and store to rq->membarrier_state, 2990 * - store to rq->membarrier_state and following user-space memory accesses. 2991 * In the same way it provides those guarantees around store to rq->curr. 2992 */ 2993 static inline void membarrier_switch_mm(struct rq *rq, 2994 struct mm_struct *prev_mm, 2995 struct mm_struct *next_mm) 2996 { 2997 int membarrier_state; 2998 2999 if (prev_mm == next_mm) 3000 return; 3001 3002 membarrier_state = atomic_read(&next_mm->membarrier_state); 3003 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3004 return; 3005 3006 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3007 } 3008 #else 3009 static inline void membarrier_switch_mm(struct rq *rq, 3010 struct mm_struct *prev_mm, 3011 struct mm_struct *next_mm) 3012 { 3013 } 3014 #endif 3015 3016 #ifdef CONFIG_SMP 3017 static inline bool is_per_cpu_kthread(struct task_struct *p) 3018 { 3019 if (!(p->flags & PF_KTHREAD)) 3020 return false; 3021 3022 if (p->nr_cpus_allowed != 1) 3023 return false; 3024 3025 return true; 3026 } 3027 #endif 3028 3029 extern void swake_up_all_locked(struct swait_queue_head *q); 3030 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3031 3032 #ifdef CONFIG_PREEMPT_DYNAMIC 3033 extern int preempt_dynamic_mode; 3034 extern int sched_dynamic_mode(const char *str); 3035 extern void sched_dynamic_update(int mode); 3036 #endif 3037 3038