1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/bitops.h> 40 #include <linux/compat.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpufreq.h> 43 #include <linux/cpuidle.h> 44 #include <linux/cpuset.h> 45 #include <linux/ctype.h> 46 #include <linux/debugfs.h> 47 #include <linux/delayacct.h> 48 #include <linux/energy_model.h> 49 #include <linux/init_task.h> 50 #include <linux/kprobes.h> 51 #include <linux/kthread.h> 52 #include <linux/membarrier.h> 53 #include <linux/migrate.h> 54 #include <linux/mmu_context.h> 55 #include <linux/nmi.h> 56 #include <linux/proc_fs.h> 57 #include <linux/prefetch.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/ratelimit.h> 61 #include <linux/rcupdate_wait.h> 62 #include <linux/security.h> 63 #include <linux/stop_machine.h> 64 #include <linux/suspend.h> 65 #include <linux/swait.h> 66 #include <linux/syscalls.h> 67 #include <linux/task_work.h> 68 #include <linux/tsacct_kern.h> 69 70 #include <asm/tlb.h> 71 72 #ifdef CONFIG_PARAVIRT 73 # include <asm/paravirt.h> 74 #endif 75 76 #include "cpupri.h" 77 #include "cpudeadline.h" 78 79 #include <trace/events/sched.h> 80 81 #ifdef CONFIG_SCHED_DEBUG 82 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 83 #else 84 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 85 #endif 86 87 struct rq; 88 struct cpuidle_state; 89 90 /* task_struct::on_rq states: */ 91 #define TASK_ON_RQ_QUEUED 1 92 #define TASK_ON_RQ_MIGRATING 2 93 94 extern __read_mostly int scheduler_running; 95 96 extern unsigned long calc_load_update; 97 extern atomic_long_t calc_load_tasks; 98 99 extern void calc_global_load_tick(struct rq *this_rq); 100 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 101 102 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 103 /* 104 * Helpers for converting nanosecond timing to jiffy resolution 105 */ 106 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 107 108 /* 109 * Increase resolution of nice-level calculations for 64-bit architectures. 110 * The extra resolution improves shares distribution and load balancing of 111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 112 * hierarchies, especially on larger systems. This is not a user-visible change 113 * and does not change the user-interface for setting shares/weights. 114 * 115 * We increase resolution only if we have enough bits to allow this increased 116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 117 * are pretty high and the returns do not justify the increased costs. 118 * 119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 120 * increase coverage and consistency always enable it on 64-bit platforms. 121 */ 122 #ifdef CONFIG_64BIT 123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 124 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 125 # define scale_load_down(w) \ 126 ({ \ 127 unsigned long __w = (w); \ 128 if (__w) \ 129 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 130 __w; \ 131 }) 132 #else 133 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 134 # define scale_load(w) (w) 135 # define scale_load_down(w) (w) 136 #endif 137 138 /* 139 * Task weight (visible to users) and its load (invisible to users) have 140 * independent resolution, but they should be well calibrated. We use 141 * scale_load() and scale_load_down(w) to convert between them. The 142 * following must be true: 143 * 144 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 145 * 146 */ 147 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 148 149 /* 150 * Single value that decides SCHED_DEADLINE internal math precision. 151 * 10 -> just above 1us 152 * 9 -> just above 0.5us 153 */ 154 #define DL_SCALE 10 155 156 /* 157 * Single value that denotes runtime == period, ie unlimited time. 158 */ 159 #define RUNTIME_INF ((u64)~0ULL) 160 161 static inline int idle_policy(int policy) 162 { 163 return policy == SCHED_IDLE; 164 } 165 static inline int fair_policy(int policy) 166 { 167 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 168 } 169 170 static inline int rt_policy(int policy) 171 { 172 return policy == SCHED_FIFO || policy == SCHED_RR; 173 } 174 175 static inline int dl_policy(int policy) 176 { 177 return policy == SCHED_DEADLINE; 178 } 179 static inline bool valid_policy(int policy) 180 { 181 return idle_policy(policy) || fair_policy(policy) || 182 rt_policy(policy) || dl_policy(policy); 183 } 184 185 static inline int task_has_idle_policy(struct task_struct *p) 186 { 187 return idle_policy(p->policy); 188 } 189 190 static inline int task_has_rt_policy(struct task_struct *p) 191 { 192 return rt_policy(p->policy); 193 } 194 195 static inline int task_has_dl_policy(struct task_struct *p) 196 { 197 return dl_policy(p->policy); 198 } 199 200 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 201 202 static inline void update_avg(u64 *avg, u64 sample) 203 { 204 s64 diff = sample - *avg; 205 *avg += diff / 8; 206 } 207 208 /* 209 * Shifting a value by an exponent greater *or equal* to the size of said value 210 * is UB; cap at size-1. 211 */ 212 #define shr_bound(val, shift) \ 213 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 214 215 /* 216 * !! For sched_setattr_nocheck() (kernel) only !! 217 * 218 * This is actually gross. :( 219 * 220 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 221 * tasks, but still be able to sleep. We need this on platforms that cannot 222 * atomically change clock frequency. Remove once fast switching will be 223 * available on such platforms. 224 * 225 * SUGOV stands for SchedUtil GOVernor. 226 */ 227 #define SCHED_FLAG_SUGOV 0x10000000 228 229 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 230 231 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 232 { 233 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 234 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 235 #else 236 return false; 237 #endif 238 } 239 240 /* 241 * Tells if entity @a should preempt entity @b. 242 */ 243 static inline bool 244 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 245 { 246 return dl_entity_is_special(a) || 247 dl_time_before(a->deadline, b->deadline); 248 } 249 250 /* 251 * This is the priority-queue data structure of the RT scheduling class: 252 */ 253 struct rt_prio_array { 254 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 255 struct list_head queue[MAX_RT_PRIO]; 256 }; 257 258 struct rt_bandwidth { 259 /* nests inside the rq lock: */ 260 raw_spinlock_t rt_runtime_lock; 261 ktime_t rt_period; 262 u64 rt_runtime; 263 struct hrtimer rt_period_timer; 264 unsigned int rt_period_active; 265 }; 266 267 void __dl_clear_params(struct task_struct *p); 268 269 struct dl_bandwidth { 270 raw_spinlock_t dl_runtime_lock; 271 u64 dl_runtime; 272 u64 dl_period; 273 }; 274 275 static inline int dl_bandwidth_enabled(void) 276 { 277 return sysctl_sched_rt_runtime >= 0; 278 } 279 280 /* 281 * To keep the bandwidth of -deadline tasks under control 282 * we need some place where: 283 * - store the maximum -deadline bandwidth of each cpu; 284 * - cache the fraction of bandwidth that is currently allocated in 285 * each root domain; 286 * 287 * This is all done in the data structure below. It is similar to the 288 * one used for RT-throttling (rt_bandwidth), with the main difference 289 * that, since here we are only interested in admission control, we 290 * do not decrease any runtime while the group "executes", neither we 291 * need a timer to replenish it. 292 * 293 * With respect to SMP, bandwidth is given on a per root domain basis, 294 * meaning that: 295 * - bw (< 100%) is the deadline bandwidth of each CPU; 296 * - total_bw is the currently allocated bandwidth in each root domain; 297 */ 298 struct dl_bw { 299 raw_spinlock_t lock; 300 u64 bw; 301 u64 total_bw; 302 }; 303 304 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 305 306 static inline 307 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 308 { 309 dl_b->total_bw -= tsk_bw; 310 __dl_update(dl_b, (s32)tsk_bw / cpus); 311 } 312 313 static inline 314 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 315 { 316 dl_b->total_bw += tsk_bw; 317 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 318 } 319 320 static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, 321 u64 old_bw, u64 new_bw) 322 { 323 return dl_b->bw != -1 && 324 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 325 } 326 327 /* 328 * Verify the fitness of task @p to run on @cpu taking into account the 329 * CPU original capacity and the runtime/deadline ratio of the task. 330 * 331 * The function will return true if the CPU original capacity of the 332 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 333 * task and false otherwise. 334 */ 335 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 336 { 337 unsigned long cap = arch_scale_cpu_capacity(cpu); 338 339 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; 340 } 341 342 extern void init_dl_bw(struct dl_bw *dl_b); 343 extern int sched_dl_global_validate(void); 344 extern void sched_dl_do_global(void); 345 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 346 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 347 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 348 extern bool __checkparam_dl(const struct sched_attr *attr); 349 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 350 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 351 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 352 extern bool dl_cpu_busy(unsigned int cpu); 353 354 #ifdef CONFIG_CGROUP_SCHED 355 356 #include <linux/cgroup.h> 357 #include <linux/psi.h> 358 359 struct cfs_rq; 360 struct rt_rq; 361 362 extern struct list_head task_groups; 363 364 struct cfs_bandwidth { 365 #ifdef CONFIG_CFS_BANDWIDTH 366 raw_spinlock_t lock; 367 ktime_t period; 368 u64 quota; 369 u64 runtime; 370 u64 burst; 371 u64 runtime_snap; 372 s64 hierarchical_quota; 373 374 u8 idle; 375 u8 period_active; 376 u8 slack_started; 377 struct hrtimer period_timer; 378 struct hrtimer slack_timer; 379 struct list_head throttled_cfs_rq; 380 381 /* Statistics: */ 382 int nr_periods; 383 int nr_throttled; 384 int nr_burst; 385 u64 throttled_time; 386 u64 burst_time; 387 #endif 388 }; 389 390 /* Task group related information */ 391 struct task_group { 392 struct cgroup_subsys_state css; 393 394 #ifdef CONFIG_FAIR_GROUP_SCHED 395 /* schedulable entities of this group on each CPU */ 396 struct sched_entity **se; 397 /* runqueue "owned" by this group on each CPU */ 398 struct cfs_rq **cfs_rq; 399 unsigned long shares; 400 401 /* A positive value indicates that this is a SCHED_IDLE group. */ 402 int idle; 403 404 #ifdef CONFIG_SMP 405 /* 406 * load_avg can be heavily contended at clock tick time, so put 407 * it in its own cacheline separated from the fields above which 408 * will also be accessed at each tick. 409 */ 410 atomic_long_t load_avg ____cacheline_aligned; 411 #endif 412 #endif 413 414 #ifdef CONFIG_RT_GROUP_SCHED 415 struct sched_rt_entity **rt_se; 416 struct rt_rq **rt_rq; 417 418 struct rt_bandwidth rt_bandwidth; 419 #endif 420 421 struct rcu_head rcu; 422 struct list_head list; 423 424 struct task_group *parent; 425 struct list_head siblings; 426 struct list_head children; 427 428 #ifdef CONFIG_SCHED_AUTOGROUP 429 struct autogroup *autogroup; 430 #endif 431 432 struct cfs_bandwidth cfs_bandwidth; 433 434 #ifdef CONFIG_UCLAMP_TASK_GROUP 435 /* The two decimal precision [%] value requested from user-space */ 436 unsigned int uclamp_pct[UCLAMP_CNT]; 437 /* Clamp values requested for a task group */ 438 struct uclamp_se uclamp_req[UCLAMP_CNT]; 439 /* Effective clamp values used for a task group */ 440 struct uclamp_se uclamp[UCLAMP_CNT]; 441 #endif 442 443 }; 444 445 #ifdef CONFIG_FAIR_GROUP_SCHED 446 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 447 448 /* 449 * A weight of 0 or 1 can cause arithmetics problems. 450 * A weight of a cfs_rq is the sum of weights of which entities 451 * are queued on this cfs_rq, so a weight of a entity should not be 452 * too large, so as the shares value of a task group. 453 * (The default weight is 1024 - so there's no practical 454 * limitation from this.) 455 */ 456 #define MIN_SHARES (1UL << 1) 457 #define MAX_SHARES (1UL << 18) 458 #endif 459 460 typedef int (*tg_visitor)(struct task_group *, void *); 461 462 extern int walk_tg_tree_from(struct task_group *from, 463 tg_visitor down, tg_visitor up, void *data); 464 465 /* 466 * Iterate the full tree, calling @down when first entering a node and @up when 467 * leaving it for the final time. 468 * 469 * Caller must hold rcu_lock or sufficient equivalent. 470 */ 471 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 472 { 473 return walk_tg_tree_from(&root_task_group, down, up, data); 474 } 475 476 extern int tg_nop(struct task_group *tg, void *data); 477 478 extern void free_fair_sched_group(struct task_group *tg); 479 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 480 extern void online_fair_sched_group(struct task_group *tg); 481 extern void unregister_fair_sched_group(struct task_group *tg); 482 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 483 struct sched_entity *se, int cpu, 484 struct sched_entity *parent); 485 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 486 487 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 488 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 489 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 490 491 extern void free_rt_sched_group(struct task_group *tg); 492 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 493 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 494 struct sched_rt_entity *rt_se, int cpu, 495 struct sched_rt_entity *parent); 496 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 497 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 498 extern long sched_group_rt_runtime(struct task_group *tg); 499 extern long sched_group_rt_period(struct task_group *tg); 500 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 501 502 extern struct task_group *sched_create_group(struct task_group *parent); 503 extern void sched_online_group(struct task_group *tg, 504 struct task_group *parent); 505 extern void sched_destroy_group(struct task_group *tg); 506 extern void sched_offline_group(struct task_group *tg); 507 508 extern void sched_move_task(struct task_struct *tsk); 509 510 #ifdef CONFIG_FAIR_GROUP_SCHED 511 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 512 513 extern int sched_group_set_idle(struct task_group *tg, long idle); 514 515 #ifdef CONFIG_SMP 516 extern void set_task_rq_fair(struct sched_entity *se, 517 struct cfs_rq *prev, struct cfs_rq *next); 518 #else /* !CONFIG_SMP */ 519 static inline void set_task_rq_fair(struct sched_entity *se, 520 struct cfs_rq *prev, struct cfs_rq *next) { } 521 #endif /* CONFIG_SMP */ 522 #endif /* CONFIG_FAIR_GROUP_SCHED */ 523 524 #else /* CONFIG_CGROUP_SCHED */ 525 526 struct cfs_bandwidth { }; 527 528 #endif /* CONFIG_CGROUP_SCHED */ 529 530 /* CFS-related fields in a runqueue */ 531 struct cfs_rq { 532 struct load_weight load; 533 unsigned int nr_running; 534 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 535 unsigned int idle_nr_running; /* SCHED_IDLE */ 536 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 537 538 u64 exec_clock; 539 u64 min_vruntime; 540 #ifdef CONFIG_SCHED_CORE 541 unsigned int forceidle_seq; 542 u64 min_vruntime_fi; 543 #endif 544 545 #ifndef CONFIG_64BIT 546 u64 min_vruntime_copy; 547 #endif 548 549 struct rb_root_cached tasks_timeline; 550 551 /* 552 * 'curr' points to currently running entity on this cfs_rq. 553 * It is set to NULL otherwise (i.e when none are currently running). 554 */ 555 struct sched_entity *curr; 556 struct sched_entity *next; 557 struct sched_entity *last; 558 struct sched_entity *skip; 559 560 #ifdef CONFIG_SCHED_DEBUG 561 unsigned int nr_spread_over; 562 #endif 563 564 #ifdef CONFIG_SMP 565 /* 566 * CFS load tracking 567 */ 568 struct sched_avg avg; 569 #ifndef CONFIG_64BIT 570 u64 load_last_update_time_copy; 571 #endif 572 struct { 573 raw_spinlock_t lock ____cacheline_aligned; 574 int nr; 575 unsigned long load_avg; 576 unsigned long util_avg; 577 unsigned long runnable_avg; 578 } removed; 579 580 #ifdef CONFIG_FAIR_GROUP_SCHED 581 unsigned long tg_load_avg_contrib; 582 long propagate; 583 long prop_runnable_sum; 584 585 /* 586 * h_load = weight * f(tg) 587 * 588 * Where f(tg) is the recursive weight fraction assigned to 589 * this group. 590 */ 591 unsigned long h_load; 592 u64 last_h_load_update; 593 struct sched_entity *h_load_next; 594 #endif /* CONFIG_FAIR_GROUP_SCHED */ 595 #endif /* CONFIG_SMP */ 596 597 #ifdef CONFIG_FAIR_GROUP_SCHED 598 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 599 600 /* 601 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 602 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 603 * (like users, containers etc.) 604 * 605 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 606 * This list is used during load balance. 607 */ 608 int on_list; 609 struct list_head leaf_cfs_rq_list; 610 struct task_group *tg; /* group that "owns" this runqueue */ 611 612 /* Locally cached copy of our task_group's idle value */ 613 int idle; 614 615 #ifdef CONFIG_CFS_BANDWIDTH 616 int runtime_enabled; 617 s64 runtime_remaining; 618 619 u64 throttled_clock; 620 u64 throttled_clock_task; 621 u64 throttled_clock_task_time; 622 int throttled; 623 int throttle_count; 624 struct list_head throttled_list; 625 #endif /* CONFIG_CFS_BANDWIDTH */ 626 #endif /* CONFIG_FAIR_GROUP_SCHED */ 627 }; 628 629 static inline int rt_bandwidth_enabled(void) 630 { 631 return sysctl_sched_rt_runtime >= 0; 632 } 633 634 /* RT IPI pull logic requires IRQ_WORK */ 635 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 636 # define HAVE_RT_PUSH_IPI 637 #endif 638 639 /* Real-Time classes' related field in a runqueue: */ 640 struct rt_rq { 641 struct rt_prio_array active; 642 unsigned int rt_nr_running; 643 unsigned int rr_nr_running; 644 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 645 struct { 646 int curr; /* highest queued rt task prio */ 647 #ifdef CONFIG_SMP 648 int next; /* next highest */ 649 #endif 650 } highest_prio; 651 #endif 652 #ifdef CONFIG_SMP 653 unsigned int rt_nr_migratory; 654 unsigned int rt_nr_total; 655 int overloaded; 656 struct plist_head pushable_tasks; 657 658 #endif /* CONFIG_SMP */ 659 int rt_queued; 660 661 int rt_throttled; 662 u64 rt_time; 663 u64 rt_runtime; 664 /* Nests inside the rq lock: */ 665 raw_spinlock_t rt_runtime_lock; 666 667 #ifdef CONFIG_RT_GROUP_SCHED 668 unsigned int rt_nr_boosted; 669 670 struct rq *rq; 671 struct task_group *tg; 672 #endif 673 }; 674 675 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 676 { 677 return rt_rq->rt_queued && rt_rq->rt_nr_running; 678 } 679 680 /* Deadline class' related fields in a runqueue */ 681 struct dl_rq { 682 /* runqueue is an rbtree, ordered by deadline */ 683 struct rb_root_cached root; 684 685 unsigned int dl_nr_running; 686 687 #ifdef CONFIG_SMP 688 /* 689 * Deadline values of the currently executing and the 690 * earliest ready task on this rq. Caching these facilitates 691 * the decision whether or not a ready but not running task 692 * should migrate somewhere else. 693 */ 694 struct { 695 u64 curr; 696 u64 next; 697 } earliest_dl; 698 699 unsigned int dl_nr_migratory; 700 int overloaded; 701 702 /* 703 * Tasks on this rq that can be pushed away. They are kept in 704 * an rb-tree, ordered by tasks' deadlines, with caching 705 * of the leftmost (earliest deadline) element. 706 */ 707 struct rb_root_cached pushable_dl_tasks_root; 708 #else 709 struct dl_bw dl_bw; 710 #endif 711 /* 712 * "Active utilization" for this runqueue: increased when a 713 * task wakes up (becomes TASK_RUNNING) and decreased when a 714 * task blocks 715 */ 716 u64 running_bw; 717 718 /* 719 * Utilization of the tasks "assigned" to this runqueue (including 720 * the tasks that are in runqueue and the tasks that executed on this 721 * CPU and blocked). Increased when a task moves to this runqueue, and 722 * decreased when the task moves away (migrates, changes scheduling 723 * policy, or terminates). 724 * This is needed to compute the "inactive utilization" for the 725 * runqueue (inactive utilization = this_bw - running_bw). 726 */ 727 u64 this_bw; 728 u64 extra_bw; 729 730 /* 731 * Inverse of the fraction of CPU utilization that can be reclaimed 732 * by the GRUB algorithm. 733 */ 734 u64 bw_ratio; 735 }; 736 737 #ifdef CONFIG_FAIR_GROUP_SCHED 738 /* An entity is a task if it doesn't "own" a runqueue */ 739 #define entity_is_task(se) (!se->my_q) 740 741 static inline void se_update_runnable(struct sched_entity *se) 742 { 743 if (!entity_is_task(se)) 744 se->runnable_weight = se->my_q->h_nr_running; 745 } 746 747 static inline long se_runnable(struct sched_entity *se) 748 { 749 if (entity_is_task(se)) 750 return !!se->on_rq; 751 else 752 return se->runnable_weight; 753 } 754 755 #else 756 #define entity_is_task(se) 1 757 758 static inline void se_update_runnable(struct sched_entity *se) {} 759 760 static inline long se_runnable(struct sched_entity *se) 761 { 762 return !!se->on_rq; 763 } 764 #endif 765 766 #ifdef CONFIG_SMP 767 /* 768 * XXX we want to get rid of these helpers and use the full load resolution. 769 */ 770 static inline long se_weight(struct sched_entity *se) 771 { 772 return scale_load_down(se->load.weight); 773 } 774 775 776 static inline bool sched_asym_prefer(int a, int b) 777 { 778 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 779 } 780 781 struct perf_domain { 782 struct em_perf_domain *em_pd; 783 struct perf_domain *next; 784 struct rcu_head rcu; 785 }; 786 787 /* Scheduling group status flags */ 788 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 789 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 790 791 /* 792 * We add the notion of a root-domain which will be used to define per-domain 793 * variables. Each exclusive cpuset essentially defines an island domain by 794 * fully partitioning the member CPUs from any other cpuset. Whenever a new 795 * exclusive cpuset is created, we also create and attach a new root-domain 796 * object. 797 * 798 */ 799 struct root_domain { 800 atomic_t refcount; 801 atomic_t rto_count; 802 struct rcu_head rcu; 803 cpumask_var_t span; 804 cpumask_var_t online; 805 806 /* 807 * Indicate pullable load on at least one CPU, e.g: 808 * - More than one runnable task 809 * - Running task is misfit 810 */ 811 int overload; 812 813 /* Indicate one or more cpus over-utilized (tipping point) */ 814 int overutilized; 815 816 /* 817 * The bit corresponding to a CPU gets set here if such CPU has more 818 * than one runnable -deadline task (as it is below for RT tasks). 819 */ 820 cpumask_var_t dlo_mask; 821 atomic_t dlo_count; 822 struct dl_bw dl_bw; 823 struct cpudl cpudl; 824 825 /* 826 * Indicate whether a root_domain's dl_bw has been checked or 827 * updated. It's monotonously increasing value. 828 * 829 * Also, some corner cases, like 'wrap around' is dangerous, but given 830 * that u64 is 'big enough'. So that shouldn't be a concern. 831 */ 832 u64 visit_gen; 833 834 #ifdef HAVE_RT_PUSH_IPI 835 /* 836 * For IPI pull requests, loop across the rto_mask. 837 */ 838 struct irq_work rto_push_work; 839 raw_spinlock_t rto_lock; 840 /* These are only updated and read within rto_lock */ 841 int rto_loop; 842 int rto_cpu; 843 /* These atomics are updated outside of a lock */ 844 atomic_t rto_loop_next; 845 atomic_t rto_loop_start; 846 #endif 847 /* 848 * The "RT overload" flag: it gets set if a CPU has more than 849 * one runnable RT task. 850 */ 851 cpumask_var_t rto_mask; 852 struct cpupri cpupri; 853 854 unsigned long max_cpu_capacity; 855 856 /* 857 * NULL-terminated list of performance domains intersecting with the 858 * CPUs of the rd. Protected by RCU. 859 */ 860 struct perf_domain __rcu *pd; 861 }; 862 863 extern void init_defrootdomain(void); 864 extern int sched_init_domains(const struct cpumask *cpu_map); 865 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 866 extern void sched_get_rd(struct root_domain *rd); 867 extern void sched_put_rd(struct root_domain *rd); 868 869 #ifdef HAVE_RT_PUSH_IPI 870 extern void rto_push_irq_work_func(struct irq_work *work); 871 #endif 872 #endif /* CONFIG_SMP */ 873 874 #ifdef CONFIG_UCLAMP_TASK 875 /* 876 * struct uclamp_bucket - Utilization clamp bucket 877 * @value: utilization clamp value for tasks on this clamp bucket 878 * @tasks: number of RUNNABLE tasks on this clamp bucket 879 * 880 * Keep track of how many tasks are RUNNABLE for a given utilization 881 * clamp value. 882 */ 883 struct uclamp_bucket { 884 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 885 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 886 }; 887 888 /* 889 * struct uclamp_rq - rq's utilization clamp 890 * @value: currently active clamp values for a rq 891 * @bucket: utilization clamp buckets affecting a rq 892 * 893 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 894 * A clamp value is affecting a rq when there is at least one task RUNNABLE 895 * (or actually running) with that value. 896 * 897 * There are up to UCLAMP_CNT possible different clamp values, currently there 898 * are only two: minimum utilization and maximum utilization. 899 * 900 * All utilization clamping values are MAX aggregated, since: 901 * - for util_min: we want to run the CPU at least at the max of the minimum 902 * utilization required by its currently RUNNABLE tasks. 903 * - for util_max: we want to allow the CPU to run up to the max of the 904 * maximum utilization allowed by its currently RUNNABLE tasks. 905 * 906 * Since on each system we expect only a limited number of different 907 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 908 * the metrics required to compute all the per-rq utilization clamp values. 909 */ 910 struct uclamp_rq { 911 unsigned int value; 912 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 913 }; 914 915 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 916 #endif /* CONFIG_UCLAMP_TASK */ 917 918 /* 919 * This is the main, per-CPU runqueue data structure. 920 * 921 * Locking rule: those places that want to lock multiple runqueues 922 * (such as the load balancing or the thread migration code), lock 923 * acquire operations must be ordered by ascending &runqueue. 924 */ 925 struct rq { 926 /* runqueue lock: */ 927 raw_spinlock_t __lock; 928 929 /* 930 * nr_running and cpu_load should be in the same cacheline because 931 * remote CPUs use both these fields when doing load calculation. 932 */ 933 unsigned int nr_running; 934 #ifdef CONFIG_NUMA_BALANCING 935 unsigned int nr_numa_running; 936 unsigned int nr_preferred_running; 937 unsigned int numa_migrate_on; 938 #endif 939 #ifdef CONFIG_NO_HZ_COMMON 940 #ifdef CONFIG_SMP 941 unsigned long last_blocked_load_update_tick; 942 unsigned int has_blocked_load; 943 call_single_data_t nohz_csd; 944 #endif /* CONFIG_SMP */ 945 unsigned int nohz_tick_stopped; 946 atomic_t nohz_flags; 947 #endif /* CONFIG_NO_HZ_COMMON */ 948 949 #ifdef CONFIG_SMP 950 unsigned int ttwu_pending; 951 #endif 952 u64 nr_switches; 953 954 #ifdef CONFIG_UCLAMP_TASK 955 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 956 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 957 unsigned int uclamp_flags; 958 #define UCLAMP_FLAG_IDLE 0x01 959 #endif 960 961 struct cfs_rq cfs; 962 struct rt_rq rt; 963 struct dl_rq dl; 964 965 #ifdef CONFIG_FAIR_GROUP_SCHED 966 /* list of leaf cfs_rq on this CPU: */ 967 struct list_head leaf_cfs_rq_list; 968 struct list_head *tmp_alone_branch; 969 #endif /* CONFIG_FAIR_GROUP_SCHED */ 970 971 /* 972 * This is part of a global counter where only the total sum 973 * over all CPUs matters. A task can increase this counter on 974 * one CPU and if it got migrated afterwards it may decrease 975 * it on another CPU. Always updated under the runqueue lock: 976 */ 977 unsigned int nr_uninterruptible; 978 979 struct task_struct __rcu *curr; 980 struct task_struct *idle; 981 struct task_struct *stop; 982 unsigned long next_balance; 983 struct mm_struct *prev_mm; 984 985 unsigned int clock_update_flags; 986 u64 clock; 987 /* Ensure that all clocks are in the same cache line */ 988 u64 clock_task ____cacheline_aligned; 989 u64 clock_pelt; 990 unsigned long lost_idle_time; 991 992 atomic_t nr_iowait; 993 994 #ifdef CONFIG_SCHED_DEBUG 995 u64 last_seen_need_resched_ns; 996 int ticks_without_resched; 997 #endif 998 999 #ifdef CONFIG_MEMBARRIER 1000 int membarrier_state; 1001 #endif 1002 1003 #ifdef CONFIG_SMP 1004 struct root_domain *rd; 1005 struct sched_domain __rcu *sd; 1006 1007 unsigned long cpu_capacity; 1008 unsigned long cpu_capacity_orig; 1009 1010 struct callback_head *balance_callback; 1011 1012 unsigned char nohz_idle_balance; 1013 unsigned char idle_balance; 1014 1015 unsigned long misfit_task_load; 1016 1017 /* For active balancing */ 1018 int active_balance; 1019 int push_cpu; 1020 struct cpu_stop_work active_balance_work; 1021 1022 /* CPU of this runqueue: */ 1023 int cpu; 1024 int online; 1025 1026 struct list_head cfs_tasks; 1027 1028 struct sched_avg avg_rt; 1029 struct sched_avg avg_dl; 1030 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1031 struct sched_avg avg_irq; 1032 #endif 1033 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 1034 struct sched_avg avg_thermal; 1035 #endif 1036 u64 idle_stamp; 1037 u64 avg_idle; 1038 1039 unsigned long wake_stamp; 1040 u64 wake_avg_idle; 1041 1042 /* This is used to determine avg_idle's max value */ 1043 u64 max_idle_balance_cost; 1044 1045 #ifdef CONFIG_HOTPLUG_CPU 1046 struct rcuwait hotplug_wait; 1047 #endif 1048 #endif /* CONFIG_SMP */ 1049 1050 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1051 u64 prev_irq_time; 1052 #endif 1053 #ifdef CONFIG_PARAVIRT 1054 u64 prev_steal_time; 1055 #endif 1056 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1057 u64 prev_steal_time_rq; 1058 #endif 1059 1060 /* calc_load related fields */ 1061 unsigned long calc_load_update; 1062 long calc_load_active; 1063 1064 #ifdef CONFIG_SCHED_HRTICK 1065 #ifdef CONFIG_SMP 1066 call_single_data_t hrtick_csd; 1067 #endif 1068 struct hrtimer hrtick_timer; 1069 ktime_t hrtick_time; 1070 #endif 1071 1072 #ifdef CONFIG_SCHEDSTATS 1073 /* latency stats */ 1074 struct sched_info rq_sched_info; 1075 unsigned long long rq_cpu_time; 1076 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1077 1078 /* sys_sched_yield() stats */ 1079 unsigned int yld_count; 1080 1081 /* schedule() stats */ 1082 unsigned int sched_count; 1083 unsigned int sched_goidle; 1084 1085 /* try_to_wake_up() stats */ 1086 unsigned int ttwu_count; 1087 unsigned int ttwu_local; 1088 #endif 1089 1090 #ifdef CONFIG_CPU_IDLE 1091 /* Must be inspected within a rcu lock section */ 1092 struct cpuidle_state *idle_state; 1093 #endif 1094 1095 #ifdef CONFIG_SMP 1096 unsigned int nr_pinned; 1097 #endif 1098 unsigned int push_busy; 1099 struct cpu_stop_work push_work; 1100 1101 #ifdef CONFIG_SCHED_CORE 1102 /* per rq */ 1103 struct rq *core; 1104 struct task_struct *core_pick; 1105 unsigned int core_enabled; 1106 unsigned int core_sched_seq; 1107 struct rb_root core_tree; 1108 1109 /* shared state -- careful with sched_core_cpu_deactivate() */ 1110 unsigned int core_task_seq; 1111 unsigned int core_pick_seq; 1112 unsigned long core_cookie; 1113 unsigned char core_forceidle; 1114 unsigned int core_forceidle_seq; 1115 #endif 1116 }; 1117 1118 #ifdef CONFIG_FAIR_GROUP_SCHED 1119 1120 /* CPU runqueue to which this cfs_rq is attached */ 1121 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1122 { 1123 return cfs_rq->rq; 1124 } 1125 1126 #else 1127 1128 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1129 { 1130 return container_of(cfs_rq, struct rq, cfs); 1131 } 1132 #endif 1133 1134 static inline int cpu_of(struct rq *rq) 1135 { 1136 #ifdef CONFIG_SMP 1137 return rq->cpu; 1138 #else 1139 return 0; 1140 #endif 1141 } 1142 1143 #define MDF_PUSH 0x01 1144 1145 static inline bool is_migration_disabled(struct task_struct *p) 1146 { 1147 #ifdef CONFIG_SMP 1148 return p->migration_disabled; 1149 #else 1150 return false; 1151 #endif 1152 } 1153 1154 struct sched_group; 1155 #ifdef CONFIG_SCHED_CORE 1156 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1157 1158 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1159 1160 static inline bool sched_core_enabled(struct rq *rq) 1161 { 1162 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1163 } 1164 1165 static inline bool sched_core_disabled(void) 1166 { 1167 return !static_branch_unlikely(&__sched_core_enabled); 1168 } 1169 1170 /* 1171 * Be careful with this function; not for general use. The return value isn't 1172 * stable unless you actually hold a relevant rq->__lock. 1173 */ 1174 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1175 { 1176 if (sched_core_enabled(rq)) 1177 return &rq->core->__lock; 1178 1179 return &rq->__lock; 1180 } 1181 1182 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1183 { 1184 if (rq->core_enabled) 1185 return &rq->core->__lock; 1186 1187 return &rq->__lock; 1188 } 1189 1190 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi); 1191 1192 /* 1193 * Helpers to check if the CPU's core cookie matches with the task's cookie 1194 * when core scheduling is enabled. 1195 * A special case is that the task's cookie always matches with CPU's core 1196 * cookie if the CPU is in an idle core. 1197 */ 1198 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1199 { 1200 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1201 if (!sched_core_enabled(rq)) 1202 return true; 1203 1204 return rq->core->core_cookie == p->core_cookie; 1205 } 1206 1207 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1208 { 1209 bool idle_core = true; 1210 int cpu; 1211 1212 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1213 if (!sched_core_enabled(rq)) 1214 return true; 1215 1216 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1217 if (!available_idle_cpu(cpu)) { 1218 idle_core = false; 1219 break; 1220 } 1221 } 1222 1223 /* 1224 * A CPU in an idle core is always the best choice for tasks with 1225 * cookies. 1226 */ 1227 return idle_core || rq->core->core_cookie == p->core_cookie; 1228 } 1229 1230 static inline bool sched_group_cookie_match(struct rq *rq, 1231 struct task_struct *p, 1232 struct sched_group *group) 1233 { 1234 int cpu; 1235 1236 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1237 if (!sched_core_enabled(rq)) 1238 return true; 1239 1240 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1241 if (sched_core_cookie_match(rq, p)) 1242 return true; 1243 } 1244 return false; 1245 } 1246 1247 extern void queue_core_balance(struct rq *rq); 1248 1249 static inline bool sched_core_enqueued(struct task_struct *p) 1250 { 1251 return !RB_EMPTY_NODE(&p->core_node); 1252 } 1253 1254 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1255 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p); 1256 1257 extern void sched_core_get(void); 1258 extern void sched_core_put(void); 1259 1260 #else /* !CONFIG_SCHED_CORE */ 1261 1262 static inline bool sched_core_enabled(struct rq *rq) 1263 { 1264 return false; 1265 } 1266 1267 static inline bool sched_core_disabled(void) 1268 { 1269 return true; 1270 } 1271 1272 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1273 { 1274 return &rq->__lock; 1275 } 1276 1277 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1278 { 1279 return &rq->__lock; 1280 } 1281 1282 static inline void queue_core_balance(struct rq *rq) 1283 { 1284 } 1285 1286 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1287 { 1288 return true; 1289 } 1290 1291 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1292 { 1293 return true; 1294 } 1295 1296 static inline bool sched_group_cookie_match(struct rq *rq, 1297 struct task_struct *p, 1298 struct sched_group *group) 1299 { 1300 return true; 1301 } 1302 #endif /* CONFIG_SCHED_CORE */ 1303 1304 static inline void lockdep_assert_rq_held(struct rq *rq) 1305 { 1306 lockdep_assert_held(__rq_lockp(rq)); 1307 } 1308 1309 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1310 extern bool raw_spin_rq_trylock(struct rq *rq); 1311 extern void raw_spin_rq_unlock(struct rq *rq); 1312 1313 static inline void raw_spin_rq_lock(struct rq *rq) 1314 { 1315 raw_spin_rq_lock_nested(rq, 0); 1316 } 1317 1318 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1319 { 1320 local_irq_disable(); 1321 raw_spin_rq_lock(rq); 1322 } 1323 1324 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1325 { 1326 raw_spin_rq_unlock(rq); 1327 local_irq_enable(); 1328 } 1329 1330 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1331 { 1332 unsigned long flags; 1333 local_irq_save(flags); 1334 raw_spin_rq_lock(rq); 1335 return flags; 1336 } 1337 1338 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1339 { 1340 raw_spin_rq_unlock(rq); 1341 local_irq_restore(flags); 1342 } 1343 1344 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1345 do { \ 1346 flags = _raw_spin_rq_lock_irqsave(rq); \ 1347 } while (0) 1348 1349 #ifdef CONFIG_SCHED_SMT 1350 extern void __update_idle_core(struct rq *rq); 1351 1352 static inline void update_idle_core(struct rq *rq) 1353 { 1354 if (static_branch_unlikely(&sched_smt_present)) 1355 __update_idle_core(rq); 1356 } 1357 1358 #else 1359 static inline void update_idle_core(struct rq *rq) { } 1360 #endif 1361 1362 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1363 1364 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1365 #define this_rq() this_cpu_ptr(&runqueues) 1366 #define task_rq(p) cpu_rq(task_cpu(p)) 1367 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1368 #define raw_rq() raw_cpu_ptr(&runqueues) 1369 1370 #ifdef CONFIG_FAIR_GROUP_SCHED 1371 static inline struct task_struct *task_of(struct sched_entity *se) 1372 { 1373 SCHED_WARN_ON(!entity_is_task(se)); 1374 return container_of(se, struct task_struct, se); 1375 } 1376 1377 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1378 { 1379 return p->se.cfs_rq; 1380 } 1381 1382 /* runqueue on which this entity is (to be) queued */ 1383 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 1384 { 1385 return se->cfs_rq; 1386 } 1387 1388 /* runqueue "owned" by this group */ 1389 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1390 { 1391 return grp->my_q; 1392 } 1393 1394 #else 1395 1396 static inline struct task_struct *task_of(struct sched_entity *se) 1397 { 1398 return container_of(se, struct task_struct, se); 1399 } 1400 1401 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1402 { 1403 return &task_rq(p)->cfs; 1404 } 1405 1406 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 1407 { 1408 struct task_struct *p = task_of(se); 1409 struct rq *rq = task_rq(p); 1410 1411 return &rq->cfs; 1412 } 1413 1414 /* runqueue "owned" by this group */ 1415 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1416 { 1417 return NULL; 1418 } 1419 #endif 1420 1421 extern void update_rq_clock(struct rq *rq); 1422 1423 /* 1424 * rq::clock_update_flags bits 1425 * 1426 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1427 * call to __schedule(). This is an optimisation to avoid 1428 * neighbouring rq clock updates. 1429 * 1430 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1431 * in effect and calls to update_rq_clock() are being ignored. 1432 * 1433 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1434 * made to update_rq_clock() since the last time rq::lock was pinned. 1435 * 1436 * If inside of __schedule(), clock_update_flags will have been 1437 * shifted left (a left shift is a cheap operation for the fast path 1438 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1439 * 1440 * if (rq-clock_update_flags >= RQCF_UPDATED) 1441 * 1442 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1443 * one position though, because the next rq_unpin_lock() will shift it 1444 * back. 1445 */ 1446 #define RQCF_REQ_SKIP 0x01 1447 #define RQCF_ACT_SKIP 0x02 1448 #define RQCF_UPDATED 0x04 1449 1450 static inline void assert_clock_updated(struct rq *rq) 1451 { 1452 /* 1453 * The only reason for not seeing a clock update since the 1454 * last rq_pin_lock() is if we're currently skipping updates. 1455 */ 1456 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1457 } 1458 1459 static inline u64 rq_clock(struct rq *rq) 1460 { 1461 lockdep_assert_rq_held(rq); 1462 assert_clock_updated(rq); 1463 1464 return rq->clock; 1465 } 1466 1467 static inline u64 rq_clock_task(struct rq *rq) 1468 { 1469 lockdep_assert_rq_held(rq); 1470 assert_clock_updated(rq); 1471 1472 return rq->clock_task; 1473 } 1474 1475 /** 1476 * By default the decay is the default pelt decay period. 1477 * The decay shift can change the decay period in 1478 * multiples of 32. 1479 * Decay shift Decay period(ms) 1480 * 0 32 1481 * 1 64 1482 * 2 128 1483 * 3 256 1484 * 4 512 1485 */ 1486 extern int sched_thermal_decay_shift; 1487 1488 static inline u64 rq_clock_thermal(struct rq *rq) 1489 { 1490 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1491 } 1492 1493 static inline void rq_clock_skip_update(struct rq *rq) 1494 { 1495 lockdep_assert_rq_held(rq); 1496 rq->clock_update_flags |= RQCF_REQ_SKIP; 1497 } 1498 1499 /* 1500 * See rt task throttling, which is the only time a skip 1501 * request is canceled. 1502 */ 1503 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1504 { 1505 lockdep_assert_rq_held(rq); 1506 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1507 } 1508 1509 struct rq_flags { 1510 unsigned long flags; 1511 struct pin_cookie cookie; 1512 #ifdef CONFIG_SCHED_DEBUG 1513 /* 1514 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1515 * current pin context is stashed here in case it needs to be 1516 * restored in rq_repin_lock(). 1517 */ 1518 unsigned int clock_update_flags; 1519 #endif 1520 }; 1521 1522 extern struct callback_head balance_push_callback; 1523 1524 /* 1525 * Lockdep annotation that avoids accidental unlocks; it's like a 1526 * sticky/continuous lockdep_assert_held(). 1527 * 1528 * This avoids code that has access to 'struct rq *rq' (basically everything in 1529 * the scheduler) from accidentally unlocking the rq if they do not also have a 1530 * copy of the (on-stack) 'struct rq_flags rf'. 1531 * 1532 * Also see Documentation/locking/lockdep-design.rst. 1533 */ 1534 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1535 { 1536 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1537 1538 #ifdef CONFIG_SCHED_DEBUG 1539 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1540 rf->clock_update_flags = 0; 1541 #ifdef CONFIG_SMP 1542 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1543 #endif 1544 #endif 1545 } 1546 1547 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1548 { 1549 #ifdef CONFIG_SCHED_DEBUG 1550 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1551 rf->clock_update_flags = RQCF_UPDATED; 1552 #endif 1553 1554 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1555 } 1556 1557 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1558 { 1559 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1560 1561 #ifdef CONFIG_SCHED_DEBUG 1562 /* 1563 * Restore the value we stashed in @rf for this pin context. 1564 */ 1565 rq->clock_update_flags |= rf->clock_update_flags; 1566 #endif 1567 } 1568 1569 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1570 __acquires(rq->lock); 1571 1572 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1573 __acquires(p->pi_lock) 1574 __acquires(rq->lock); 1575 1576 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1577 __releases(rq->lock) 1578 { 1579 rq_unpin_lock(rq, rf); 1580 raw_spin_rq_unlock(rq); 1581 } 1582 1583 static inline void 1584 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1585 __releases(rq->lock) 1586 __releases(p->pi_lock) 1587 { 1588 rq_unpin_lock(rq, rf); 1589 raw_spin_rq_unlock(rq); 1590 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1591 } 1592 1593 static inline void 1594 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1595 __acquires(rq->lock) 1596 { 1597 raw_spin_rq_lock_irqsave(rq, rf->flags); 1598 rq_pin_lock(rq, rf); 1599 } 1600 1601 static inline void 1602 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1603 __acquires(rq->lock) 1604 { 1605 raw_spin_rq_lock_irq(rq); 1606 rq_pin_lock(rq, rf); 1607 } 1608 1609 static inline void 1610 rq_lock(struct rq *rq, struct rq_flags *rf) 1611 __acquires(rq->lock) 1612 { 1613 raw_spin_rq_lock(rq); 1614 rq_pin_lock(rq, rf); 1615 } 1616 1617 static inline void 1618 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1619 __releases(rq->lock) 1620 { 1621 rq_unpin_lock(rq, rf); 1622 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1623 } 1624 1625 static inline void 1626 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1627 __releases(rq->lock) 1628 { 1629 rq_unpin_lock(rq, rf); 1630 raw_spin_rq_unlock_irq(rq); 1631 } 1632 1633 static inline void 1634 rq_unlock(struct rq *rq, struct rq_flags *rf) 1635 __releases(rq->lock) 1636 { 1637 rq_unpin_lock(rq, rf); 1638 raw_spin_rq_unlock(rq); 1639 } 1640 1641 static inline struct rq * 1642 this_rq_lock_irq(struct rq_flags *rf) 1643 __acquires(rq->lock) 1644 { 1645 struct rq *rq; 1646 1647 local_irq_disable(); 1648 rq = this_rq(); 1649 rq_lock(rq, rf); 1650 return rq; 1651 } 1652 1653 #ifdef CONFIG_NUMA 1654 enum numa_topology_type { 1655 NUMA_DIRECT, 1656 NUMA_GLUELESS_MESH, 1657 NUMA_BACKPLANE, 1658 }; 1659 extern enum numa_topology_type sched_numa_topology_type; 1660 extern int sched_max_numa_distance; 1661 extern bool find_numa_distance(int distance); 1662 extern void sched_init_numa(void); 1663 extern void sched_domains_numa_masks_set(unsigned int cpu); 1664 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1665 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1666 #else 1667 static inline void sched_init_numa(void) { } 1668 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1669 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1670 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1671 { 1672 return nr_cpu_ids; 1673 } 1674 #endif 1675 1676 #ifdef CONFIG_NUMA_BALANCING 1677 /* The regions in numa_faults array from task_struct */ 1678 enum numa_faults_stats { 1679 NUMA_MEM = 0, 1680 NUMA_CPU, 1681 NUMA_MEMBUF, 1682 NUMA_CPUBUF 1683 }; 1684 extern void sched_setnuma(struct task_struct *p, int node); 1685 extern int migrate_task_to(struct task_struct *p, int cpu); 1686 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1687 int cpu, int scpu); 1688 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1689 #else 1690 static inline void 1691 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1692 { 1693 } 1694 #endif /* CONFIG_NUMA_BALANCING */ 1695 1696 #ifdef CONFIG_SMP 1697 1698 static inline void 1699 queue_balance_callback(struct rq *rq, 1700 struct callback_head *head, 1701 void (*func)(struct rq *rq)) 1702 { 1703 lockdep_assert_rq_held(rq); 1704 1705 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1706 return; 1707 1708 head->func = (void (*)(struct callback_head *))func; 1709 head->next = rq->balance_callback; 1710 rq->balance_callback = head; 1711 } 1712 1713 #define rcu_dereference_check_sched_domain(p) \ 1714 rcu_dereference_check((p), \ 1715 lockdep_is_held(&sched_domains_mutex)) 1716 1717 /* 1718 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1719 * See destroy_sched_domains: call_rcu for details. 1720 * 1721 * The domain tree of any CPU may only be accessed from within 1722 * preempt-disabled sections. 1723 */ 1724 #define for_each_domain(cpu, __sd) \ 1725 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1726 __sd; __sd = __sd->parent) 1727 1728 /** 1729 * highest_flag_domain - Return highest sched_domain containing flag. 1730 * @cpu: The CPU whose highest level of sched domain is to 1731 * be returned. 1732 * @flag: The flag to check for the highest sched_domain 1733 * for the given CPU. 1734 * 1735 * Returns the highest sched_domain of a CPU which contains the given flag. 1736 */ 1737 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1738 { 1739 struct sched_domain *sd, *hsd = NULL; 1740 1741 for_each_domain(cpu, sd) { 1742 if (!(sd->flags & flag)) 1743 break; 1744 hsd = sd; 1745 } 1746 1747 return hsd; 1748 } 1749 1750 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1751 { 1752 struct sched_domain *sd; 1753 1754 for_each_domain(cpu, sd) { 1755 if (sd->flags & flag) 1756 break; 1757 } 1758 1759 return sd; 1760 } 1761 1762 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1763 DECLARE_PER_CPU(int, sd_llc_size); 1764 DECLARE_PER_CPU(int, sd_llc_id); 1765 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1766 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1767 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1768 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1769 extern struct static_key_false sched_asym_cpucapacity; 1770 1771 struct sched_group_capacity { 1772 atomic_t ref; 1773 /* 1774 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1775 * for a single CPU. 1776 */ 1777 unsigned long capacity; 1778 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1779 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1780 unsigned long next_update; 1781 int imbalance; /* XXX unrelated to capacity but shared group state */ 1782 1783 #ifdef CONFIG_SCHED_DEBUG 1784 int id; 1785 #endif 1786 1787 unsigned long cpumask[]; /* Balance mask */ 1788 }; 1789 1790 struct sched_group { 1791 struct sched_group *next; /* Must be a circular list */ 1792 atomic_t ref; 1793 1794 unsigned int group_weight; 1795 struct sched_group_capacity *sgc; 1796 int asym_prefer_cpu; /* CPU of highest priority in group */ 1797 int flags; 1798 1799 /* 1800 * The CPUs this group covers. 1801 * 1802 * NOTE: this field is variable length. (Allocated dynamically 1803 * by attaching extra space to the end of the structure, 1804 * depending on how many CPUs the kernel has booted up with) 1805 */ 1806 unsigned long cpumask[]; 1807 }; 1808 1809 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1810 { 1811 return to_cpumask(sg->cpumask); 1812 } 1813 1814 /* 1815 * See build_balance_mask(). 1816 */ 1817 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1818 { 1819 return to_cpumask(sg->sgc->cpumask); 1820 } 1821 1822 /** 1823 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1824 * @group: The group whose first CPU is to be returned. 1825 */ 1826 static inline unsigned int group_first_cpu(struct sched_group *group) 1827 { 1828 return cpumask_first(sched_group_span(group)); 1829 } 1830 1831 extern int group_balance_cpu(struct sched_group *sg); 1832 1833 #ifdef CONFIG_SCHED_DEBUG 1834 void update_sched_domain_debugfs(void); 1835 void dirty_sched_domain_sysctl(int cpu); 1836 #else 1837 static inline void update_sched_domain_debugfs(void) 1838 { 1839 } 1840 static inline void dirty_sched_domain_sysctl(int cpu) 1841 { 1842 } 1843 #endif 1844 1845 extern int sched_update_scaling(void); 1846 1847 extern void flush_smp_call_function_from_idle(void); 1848 1849 #else /* !CONFIG_SMP: */ 1850 static inline void flush_smp_call_function_from_idle(void) { } 1851 #endif 1852 1853 #include "stats.h" 1854 #include "autogroup.h" 1855 1856 #ifdef CONFIG_CGROUP_SCHED 1857 1858 /* 1859 * Return the group to which this tasks belongs. 1860 * 1861 * We cannot use task_css() and friends because the cgroup subsystem 1862 * changes that value before the cgroup_subsys::attach() method is called, 1863 * therefore we cannot pin it and might observe the wrong value. 1864 * 1865 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1866 * core changes this before calling sched_move_task(). 1867 * 1868 * Instead we use a 'copy' which is updated from sched_move_task() while 1869 * holding both task_struct::pi_lock and rq::lock. 1870 */ 1871 static inline struct task_group *task_group(struct task_struct *p) 1872 { 1873 return p->sched_task_group; 1874 } 1875 1876 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1877 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1878 { 1879 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1880 struct task_group *tg = task_group(p); 1881 #endif 1882 1883 #ifdef CONFIG_FAIR_GROUP_SCHED 1884 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1885 p->se.cfs_rq = tg->cfs_rq[cpu]; 1886 p->se.parent = tg->se[cpu]; 1887 #endif 1888 1889 #ifdef CONFIG_RT_GROUP_SCHED 1890 p->rt.rt_rq = tg->rt_rq[cpu]; 1891 p->rt.parent = tg->rt_se[cpu]; 1892 #endif 1893 } 1894 1895 #else /* CONFIG_CGROUP_SCHED */ 1896 1897 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1898 static inline struct task_group *task_group(struct task_struct *p) 1899 { 1900 return NULL; 1901 } 1902 1903 #endif /* CONFIG_CGROUP_SCHED */ 1904 1905 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1906 { 1907 set_task_rq(p, cpu); 1908 #ifdef CONFIG_SMP 1909 /* 1910 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1911 * successfully executed on another CPU. We must ensure that updates of 1912 * per-task data have been completed by this moment. 1913 */ 1914 smp_wmb(); 1915 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1916 p->wake_cpu = cpu; 1917 #endif 1918 } 1919 1920 /* 1921 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1922 */ 1923 #ifdef CONFIG_SCHED_DEBUG 1924 # include <linux/static_key.h> 1925 # define const_debug __read_mostly 1926 #else 1927 # define const_debug const 1928 #endif 1929 1930 #define SCHED_FEAT(name, enabled) \ 1931 __SCHED_FEAT_##name , 1932 1933 enum { 1934 #include "features.h" 1935 __SCHED_FEAT_NR, 1936 }; 1937 1938 #undef SCHED_FEAT 1939 1940 #ifdef CONFIG_SCHED_DEBUG 1941 1942 /* 1943 * To support run-time toggling of sched features, all the translation units 1944 * (but core.c) reference the sysctl_sched_features defined in core.c. 1945 */ 1946 extern const_debug unsigned int sysctl_sched_features; 1947 1948 #ifdef CONFIG_JUMP_LABEL 1949 #define SCHED_FEAT(name, enabled) \ 1950 static __always_inline bool static_branch_##name(struct static_key *key) \ 1951 { \ 1952 return static_key_##enabled(key); \ 1953 } 1954 1955 #include "features.h" 1956 #undef SCHED_FEAT 1957 1958 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1959 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1960 1961 #else /* !CONFIG_JUMP_LABEL */ 1962 1963 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1964 1965 #endif /* CONFIG_JUMP_LABEL */ 1966 1967 #else /* !SCHED_DEBUG */ 1968 1969 /* 1970 * Each translation unit has its own copy of sysctl_sched_features to allow 1971 * constants propagation at compile time and compiler optimization based on 1972 * features default. 1973 */ 1974 #define SCHED_FEAT(name, enabled) \ 1975 (1UL << __SCHED_FEAT_##name) * enabled | 1976 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1977 #include "features.h" 1978 0; 1979 #undef SCHED_FEAT 1980 1981 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1982 1983 #endif /* SCHED_DEBUG */ 1984 1985 extern struct static_key_false sched_numa_balancing; 1986 extern struct static_key_false sched_schedstats; 1987 1988 static inline u64 global_rt_period(void) 1989 { 1990 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1991 } 1992 1993 static inline u64 global_rt_runtime(void) 1994 { 1995 if (sysctl_sched_rt_runtime < 0) 1996 return RUNTIME_INF; 1997 1998 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1999 } 2000 2001 static inline int task_current(struct rq *rq, struct task_struct *p) 2002 { 2003 return rq->curr == p; 2004 } 2005 2006 static inline int task_running(struct rq *rq, struct task_struct *p) 2007 { 2008 #ifdef CONFIG_SMP 2009 return p->on_cpu; 2010 #else 2011 return task_current(rq, p); 2012 #endif 2013 } 2014 2015 static inline int task_on_rq_queued(struct task_struct *p) 2016 { 2017 return p->on_rq == TASK_ON_RQ_QUEUED; 2018 } 2019 2020 static inline int task_on_rq_migrating(struct task_struct *p) 2021 { 2022 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2023 } 2024 2025 /* Wake flags. The first three directly map to some SD flag value */ 2026 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2027 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2028 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2029 2030 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2031 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2032 #define WF_ON_CPU 0x40 /* Wakee is on_cpu */ 2033 2034 #ifdef CONFIG_SMP 2035 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2036 static_assert(WF_FORK == SD_BALANCE_FORK); 2037 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2038 #endif 2039 2040 /* 2041 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2042 * of tasks with abnormal "nice" values across CPUs the contribution that 2043 * each task makes to its run queue's load is weighted according to its 2044 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2045 * scaled version of the new time slice allocation that they receive on time 2046 * slice expiry etc. 2047 */ 2048 2049 #define WEIGHT_IDLEPRIO 3 2050 #define WMULT_IDLEPRIO 1431655765 2051 2052 extern const int sched_prio_to_weight[40]; 2053 extern const u32 sched_prio_to_wmult[40]; 2054 2055 /* 2056 * {de,en}queue flags: 2057 * 2058 * DEQUEUE_SLEEP - task is no longer runnable 2059 * ENQUEUE_WAKEUP - task just became runnable 2060 * 2061 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2062 * are in a known state which allows modification. Such pairs 2063 * should preserve as much state as possible. 2064 * 2065 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2066 * in the runqueue. 2067 * 2068 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2069 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2070 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2071 * 2072 */ 2073 2074 #define DEQUEUE_SLEEP 0x01 2075 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2076 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2077 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2078 2079 #define ENQUEUE_WAKEUP 0x01 2080 #define ENQUEUE_RESTORE 0x02 2081 #define ENQUEUE_MOVE 0x04 2082 #define ENQUEUE_NOCLOCK 0x08 2083 2084 #define ENQUEUE_HEAD 0x10 2085 #define ENQUEUE_REPLENISH 0x20 2086 #ifdef CONFIG_SMP 2087 #define ENQUEUE_MIGRATED 0x40 2088 #else 2089 #define ENQUEUE_MIGRATED 0x00 2090 #endif 2091 2092 #define RETRY_TASK ((void *)-1UL) 2093 2094 struct sched_class { 2095 2096 #ifdef CONFIG_UCLAMP_TASK 2097 int uclamp_enabled; 2098 #endif 2099 2100 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2101 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2102 void (*yield_task) (struct rq *rq); 2103 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2104 2105 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 2106 2107 struct task_struct *(*pick_next_task)(struct rq *rq); 2108 2109 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 2110 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2111 2112 #ifdef CONFIG_SMP 2113 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2114 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2115 2116 struct task_struct * (*pick_task)(struct rq *rq); 2117 2118 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2119 2120 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2121 2122 void (*set_cpus_allowed)(struct task_struct *p, 2123 const struct cpumask *newmask, 2124 u32 flags); 2125 2126 void (*rq_online)(struct rq *rq); 2127 void (*rq_offline)(struct rq *rq); 2128 2129 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2130 #endif 2131 2132 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2133 void (*task_fork)(struct task_struct *p); 2134 void (*task_dead)(struct task_struct *p); 2135 2136 /* 2137 * The switched_from() call is allowed to drop rq->lock, therefore we 2138 * cannot assume the switched_from/switched_to pair is serialized by 2139 * rq->lock. They are however serialized by p->pi_lock. 2140 */ 2141 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2142 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2143 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2144 int oldprio); 2145 2146 unsigned int (*get_rr_interval)(struct rq *rq, 2147 struct task_struct *task); 2148 2149 void (*update_curr)(struct rq *rq); 2150 2151 #define TASK_SET_GROUP 0 2152 #define TASK_MOVE_GROUP 1 2153 2154 #ifdef CONFIG_FAIR_GROUP_SCHED 2155 void (*task_change_group)(struct task_struct *p, int type); 2156 #endif 2157 }; 2158 2159 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2160 { 2161 WARN_ON_ONCE(rq->curr != prev); 2162 prev->sched_class->put_prev_task(rq, prev); 2163 } 2164 2165 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2166 { 2167 next->sched_class->set_next_task(rq, next, false); 2168 } 2169 2170 2171 /* 2172 * Helper to define a sched_class instance; each one is placed in a separate 2173 * section which is ordered by the linker script: 2174 * 2175 * include/asm-generic/vmlinux.lds.h 2176 * 2177 * Also enforce alignment on the instance, not the type, to guarantee layout. 2178 */ 2179 #define DEFINE_SCHED_CLASS(name) \ 2180 const struct sched_class name##_sched_class \ 2181 __aligned(__alignof__(struct sched_class)) \ 2182 __section("__" #name "_sched_class") 2183 2184 /* Defined in include/asm-generic/vmlinux.lds.h */ 2185 extern struct sched_class __begin_sched_classes[]; 2186 extern struct sched_class __end_sched_classes[]; 2187 2188 #define sched_class_highest (__end_sched_classes - 1) 2189 #define sched_class_lowest (__begin_sched_classes - 1) 2190 2191 #define for_class_range(class, _from, _to) \ 2192 for (class = (_from); class != (_to); class--) 2193 2194 #define for_each_class(class) \ 2195 for_class_range(class, sched_class_highest, sched_class_lowest) 2196 2197 extern const struct sched_class stop_sched_class; 2198 extern const struct sched_class dl_sched_class; 2199 extern const struct sched_class rt_sched_class; 2200 extern const struct sched_class fair_sched_class; 2201 extern const struct sched_class idle_sched_class; 2202 2203 static inline bool sched_stop_runnable(struct rq *rq) 2204 { 2205 return rq->stop && task_on_rq_queued(rq->stop); 2206 } 2207 2208 static inline bool sched_dl_runnable(struct rq *rq) 2209 { 2210 return rq->dl.dl_nr_running > 0; 2211 } 2212 2213 static inline bool sched_rt_runnable(struct rq *rq) 2214 { 2215 return rq->rt.rt_queued > 0; 2216 } 2217 2218 static inline bool sched_fair_runnable(struct rq *rq) 2219 { 2220 return rq->cfs.nr_running > 0; 2221 } 2222 2223 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2224 extern struct task_struct *pick_next_task_idle(struct rq *rq); 2225 2226 #define SCA_CHECK 0x01 2227 #define SCA_MIGRATE_DISABLE 0x02 2228 #define SCA_MIGRATE_ENABLE 0x04 2229 #define SCA_USER 0x08 2230 2231 #ifdef CONFIG_SMP 2232 2233 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2234 2235 extern void trigger_load_balance(struct rq *rq); 2236 2237 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 2238 2239 static inline struct task_struct *get_push_task(struct rq *rq) 2240 { 2241 struct task_struct *p = rq->curr; 2242 2243 lockdep_assert_rq_held(rq); 2244 2245 if (rq->push_busy) 2246 return NULL; 2247 2248 if (p->nr_cpus_allowed == 1) 2249 return NULL; 2250 2251 if (p->migration_disabled) 2252 return NULL; 2253 2254 rq->push_busy = true; 2255 return get_task_struct(p); 2256 } 2257 2258 extern int push_cpu_stop(void *arg); 2259 2260 #endif 2261 2262 #ifdef CONFIG_CPU_IDLE 2263 static inline void idle_set_state(struct rq *rq, 2264 struct cpuidle_state *idle_state) 2265 { 2266 rq->idle_state = idle_state; 2267 } 2268 2269 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2270 { 2271 SCHED_WARN_ON(!rcu_read_lock_held()); 2272 2273 return rq->idle_state; 2274 } 2275 #else 2276 static inline void idle_set_state(struct rq *rq, 2277 struct cpuidle_state *idle_state) 2278 { 2279 } 2280 2281 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2282 { 2283 return NULL; 2284 } 2285 #endif 2286 2287 extern void schedule_idle(void); 2288 2289 extern void sysrq_sched_debug_show(void); 2290 extern void sched_init_granularity(void); 2291 extern void update_max_interval(void); 2292 2293 extern void init_sched_dl_class(void); 2294 extern void init_sched_rt_class(void); 2295 extern void init_sched_fair_class(void); 2296 2297 extern void reweight_task(struct task_struct *p, int prio); 2298 2299 extern void resched_curr(struct rq *rq); 2300 extern void resched_cpu(int cpu); 2301 2302 extern struct rt_bandwidth def_rt_bandwidth; 2303 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2304 2305 extern struct dl_bandwidth def_dl_bandwidth; 2306 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 2307 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 2308 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 2309 2310 #define BW_SHIFT 20 2311 #define BW_UNIT (1 << BW_SHIFT) 2312 #define RATIO_SHIFT 8 2313 #define MAX_BW_BITS (64 - BW_SHIFT) 2314 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2315 unsigned long to_ratio(u64 period, u64 runtime); 2316 2317 extern void init_entity_runnable_average(struct sched_entity *se); 2318 extern void post_init_entity_util_avg(struct task_struct *p); 2319 2320 #ifdef CONFIG_NO_HZ_FULL 2321 extern bool sched_can_stop_tick(struct rq *rq); 2322 extern int __init sched_tick_offload_init(void); 2323 2324 /* 2325 * Tick may be needed by tasks in the runqueue depending on their policy and 2326 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2327 * nohz mode if necessary. 2328 */ 2329 static inline void sched_update_tick_dependency(struct rq *rq) 2330 { 2331 int cpu = cpu_of(rq); 2332 2333 if (!tick_nohz_full_cpu(cpu)) 2334 return; 2335 2336 if (sched_can_stop_tick(rq)) 2337 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2338 else 2339 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2340 } 2341 #else 2342 static inline int sched_tick_offload_init(void) { return 0; } 2343 static inline void sched_update_tick_dependency(struct rq *rq) { } 2344 #endif 2345 2346 static inline void add_nr_running(struct rq *rq, unsigned count) 2347 { 2348 unsigned prev_nr = rq->nr_running; 2349 2350 rq->nr_running = prev_nr + count; 2351 if (trace_sched_update_nr_running_tp_enabled()) { 2352 call_trace_sched_update_nr_running(rq, count); 2353 } 2354 2355 #ifdef CONFIG_SMP 2356 if (prev_nr < 2 && rq->nr_running >= 2) { 2357 if (!READ_ONCE(rq->rd->overload)) 2358 WRITE_ONCE(rq->rd->overload, 1); 2359 } 2360 #endif 2361 2362 sched_update_tick_dependency(rq); 2363 } 2364 2365 static inline void sub_nr_running(struct rq *rq, unsigned count) 2366 { 2367 rq->nr_running -= count; 2368 if (trace_sched_update_nr_running_tp_enabled()) { 2369 call_trace_sched_update_nr_running(rq, -count); 2370 } 2371 2372 /* Check if we still need preemption */ 2373 sched_update_tick_dependency(rq); 2374 } 2375 2376 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2377 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2378 2379 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2380 2381 extern const_debug unsigned int sysctl_sched_nr_migrate; 2382 extern const_debug unsigned int sysctl_sched_migration_cost; 2383 2384 #ifdef CONFIG_SCHED_DEBUG 2385 extern unsigned int sysctl_sched_latency; 2386 extern unsigned int sysctl_sched_min_granularity; 2387 extern unsigned int sysctl_sched_idle_min_granularity; 2388 extern unsigned int sysctl_sched_wakeup_granularity; 2389 extern int sysctl_resched_latency_warn_ms; 2390 extern int sysctl_resched_latency_warn_once; 2391 2392 extern unsigned int sysctl_sched_tunable_scaling; 2393 2394 extern unsigned int sysctl_numa_balancing_scan_delay; 2395 extern unsigned int sysctl_numa_balancing_scan_period_min; 2396 extern unsigned int sysctl_numa_balancing_scan_period_max; 2397 extern unsigned int sysctl_numa_balancing_scan_size; 2398 #endif 2399 2400 #ifdef CONFIG_SCHED_HRTICK 2401 2402 /* 2403 * Use hrtick when: 2404 * - enabled by features 2405 * - hrtimer is actually high res 2406 */ 2407 static inline int hrtick_enabled(struct rq *rq) 2408 { 2409 if (!cpu_active(cpu_of(rq))) 2410 return 0; 2411 return hrtimer_is_hres_active(&rq->hrtick_timer); 2412 } 2413 2414 static inline int hrtick_enabled_fair(struct rq *rq) 2415 { 2416 if (!sched_feat(HRTICK)) 2417 return 0; 2418 return hrtick_enabled(rq); 2419 } 2420 2421 static inline int hrtick_enabled_dl(struct rq *rq) 2422 { 2423 if (!sched_feat(HRTICK_DL)) 2424 return 0; 2425 return hrtick_enabled(rq); 2426 } 2427 2428 void hrtick_start(struct rq *rq, u64 delay); 2429 2430 #else 2431 2432 static inline int hrtick_enabled_fair(struct rq *rq) 2433 { 2434 return 0; 2435 } 2436 2437 static inline int hrtick_enabled_dl(struct rq *rq) 2438 { 2439 return 0; 2440 } 2441 2442 static inline int hrtick_enabled(struct rq *rq) 2443 { 2444 return 0; 2445 } 2446 2447 #endif /* CONFIG_SCHED_HRTICK */ 2448 2449 #ifndef arch_scale_freq_tick 2450 static __always_inline 2451 void arch_scale_freq_tick(void) 2452 { 2453 } 2454 #endif 2455 2456 #ifndef arch_scale_freq_capacity 2457 /** 2458 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2459 * @cpu: the CPU in question. 2460 * 2461 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2462 * 2463 * f_curr 2464 * ------ * SCHED_CAPACITY_SCALE 2465 * f_max 2466 */ 2467 static __always_inline 2468 unsigned long arch_scale_freq_capacity(int cpu) 2469 { 2470 return SCHED_CAPACITY_SCALE; 2471 } 2472 #endif 2473 2474 2475 #ifdef CONFIG_SMP 2476 2477 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2478 { 2479 #ifdef CONFIG_SCHED_CORE 2480 /* 2481 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2482 * order by core-id first and cpu-id second. 2483 * 2484 * Notably: 2485 * 2486 * double_rq_lock(0,3); will take core-0, core-1 lock 2487 * double_rq_lock(1,2); will take core-1, core-0 lock 2488 * 2489 * when only cpu-id is considered. 2490 */ 2491 if (rq1->core->cpu < rq2->core->cpu) 2492 return true; 2493 if (rq1->core->cpu > rq2->core->cpu) 2494 return false; 2495 2496 /* 2497 * __sched_core_flip() relies on SMT having cpu-id lock order. 2498 */ 2499 #endif 2500 return rq1->cpu < rq2->cpu; 2501 } 2502 2503 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2504 2505 #ifdef CONFIG_PREEMPTION 2506 2507 /* 2508 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2509 * way at the expense of forcing extra atomic operations in all 2510 * invocations. This assures that the double_lock is acquired using the 2511 * same underlying policy as the spinlock_t on this architecture, which 2512 * reduces latency compared to the unfair variant below. However, it 2513 * also adds more overhead and therefore may reduce throughput. 2514 */ 2515 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2516 __releases(this_rq->lock) 2517 __acquires(busiest->lock) 2518 __acquires(this_rq->lock) 2519 { 2520 raw_spin_rq_unlock(this_rq); 2521 double_rq_lock(this_rq, busiest); 2522 2523 return 1; 2524 } 2525 2526 #else 2527 /* 2528 * Unfair double_lock_balance: Optimizes throughput at the expense of 2529 * latency by eliminating extra atomic operations when the locks are 2530 * already in proper order on entry. This favors lower CPU-ids and will 2531 * grant the double lock to lower CPUs over higher ids under contention, 2532 * regardless of entry order into the function. 2533 */ 2534 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2535 __releases(this_rq->lock) 2536 __acquires(busiest->lock) 2537 __acquires(this_rq->lock) 2538 { 2539 if (__rq_lockp(this_rq) == __rq_lockp(busiest)) 2540 return 0; 2541 2542 if (likely(raw_spin_rq_trylock(busiest))) 2543 return 0; 2544 2545 if (rq_order_less(this_rq, busiest)) { 2546 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2547 return 0; 2548 } 2549 2550 raw_spin_rq_unlock(this_rq); 2551 double_rq_lock(this_rq, busiest); 2552 2553 return 1; 2554 } 2555 2556 #endif /* CONFIG_PREEMPTION */ 2557 2558 /* 2559 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2560 */ 2561 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2562 { 2563 lockdep_assert_irqs_disabled(); 2564 2565 return _double_lock_balance(this_rq, busiest); 2566 } 2567 2568 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2569 __releases(busiest->lock) 2570 { 2571 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2572 raw_spin_rq_unlock(busiest); 2573 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2574 } 2575 2576 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2577 { 2578 if (l1 > l2) 2579 swap(l1, l2); 2580 2581 spin_lock(l1); 2582 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2583 } 2584 2585 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2586 { 2587 if (l1 > l2) 2588 swap(l1, l2); 2589 2590 spin_lock_irq(l1); 2591 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2592 } 2593 2594 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2595 { 2596 if (l1 > l2) 2597 swap(l1, l2); 2598 2599 raw_spin_lock(l1); 2600 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2601 } 2602 2603 /* 2604 * double_rq_unlock - safely unlock two runqueues 2605 * 2606 * Note this does not restore interrupts like task_rq_unlock, 2607 * you need to do so manually after calling. 2608 */ 2609 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2610 __releases(rq1->lock) 2611 __releases(rq2->lock) 2612 { 2613 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2614 raw_spin_rq_unlock(rq2); 2615 else 2616 __release(rq2->lock); 2617 raw_spin_rq_unlock(rq1); 2618 } 2619 2620 extern void set_rq_online (struct rq *rq); 2621 extern void set_rq_offline(struct rq *rq); 2622 extern bool sched_smp_initialized; 2623 2624 #else /* CONFIG_SMP */ 2625 2626 /* 2627 * double_rq_lock - safely lock two runqueues 2628 * 2629 * Note this does not disable interrupts like task_rq_lock, 2630 * you need to do so manually before calling. 2631 */ 2632 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2633 __acquires(rq1->lock) 2634 __acquires(rq2->lock) 2635 { 2636 BUG_ON(!irqs_disabled()); 2637 BUG_ON(rq1 != rq2); 2638 raw_spin_rq_lock(rq1); 2639 __acquire(rq2->lock); /* Fake it out ;) */ 2640 } 2641 2642 /* 2643 * double_rq_unlock - safely unlock two runqueues 2644 * 2645 * Note this does not restore interrupts like task_rq_unlock, 2646 * you need to do so manually after calling. 2647 */ 2648 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2649 __releases(rq1->lock) 2650 __releases(rq2->lock) 2651 { 2652 BUG_ON(rq1 != rq2); 2653 raw_spin_rq_unlock(rq1); 2654 __release(rq2->lock); 2655 } 2656 2657 #endif 2658 2659 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2660 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2661 2662 #ifdef CONFIG_SCHED_DEBUG 2663 extern bool sched_debug_verbose; 2664 2665 extern void print_cfs_stats(struct seq_file *m, int cpu); 2666 extern void print_rt_stats(struct seq_file *m, int cpu); 2667 extern void print_dl_stats(struct seq_file *m, int cpu); 2668 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2669 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2670 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2671 2672 extern void resched_latency_warn(int cpu, u64 latency); 2673 #ifdef CONFIG_NUMA_BALANCING 2674 extern void 2675 show_numa_stats(struct task_struct *p, struct seq_file *m); 2676 extern void 2677 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2678 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2679 #endif /* CONFIG_NUMA_BALANCING */ 2680 #else 2681 static inline void resched_latency_warn(int cpu, u64 latency) {} 2682 #endif /* CONFIG_SCHED_DEBUG */ 2683 2684 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2685 extern void init_rt_rq(struct rt_rq *rt_rq); 2686 extern void init_dl_rq(struct dl_rq *dl_rq); 2687 2688 extern void cfs_bandwidth_usage_inc(void); 2689 extern void cfs_bandwidth_usage_dec(void); 2690 2691 #ifdef CONFIG_NO_HZ_COMMON 2692 #define NOHZ_BALANCE_KICK_BIT 0 2693 #define NOHZ_STATS_KICK_BIT 1 2694 #define NOHZ_NEWILB_KICK_BIT 2 2695 #define NOHZ_NEXT_KICK_BIT 3 2696 2697 /* Run rebalance_domains() */ 2698 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2699 /* Update blocked load */ 2700 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2701 /* Update blocked load when entering idle */ 2702 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 2703 /* Update nohz.next_balance */ 2704 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 2705 2706 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 2707 2708 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2709 2710 extern void nohz_balance_exit_idle(struct rq *rq); 2711 #else 2712 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2713 #endif 2714 2715 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 2716 extern void nohz_run_idle_balance(int cpu); 2717 #else 2718 static inline void nohz_run_idle_balance(int cpu) { } 2719 #endif 2720 2721 #ifdef CONFIG_SMP 2722 static inline 2723 void __dl_update(struct dl_bw *dl_b, s64 bw) 2724 { 2725 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2726 int i; 2727 2728 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2729 "sched RCU must be held"); 2730 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2731 struct rq *rq = cpu_rq(i); 2732 2733 rq->dl.extra_bw += bw; 2734 } 2735 } 2736 #else 2737 static inline 2738 void __dl_update(struct dl_bw *dl_b, s64 bw) 2739 { 2740 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2741 2742 dl->extra_bw += bw; 2743 } 2744 #endif 2745 2746 2747 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2748 struct irqtime { 2749 u64 total; 2750 u64 tick_delta; 2751 u64 irq_start_time; 2752 struct u64_stats_sync sync; 2753 }; 2754 2755 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2756 2757 /* 2758 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2759 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 2760 * and never move forward. 2761 */ 2762 static inline u64 irq_time_read(int cpu) 2763 { 2764 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2765 unsigned int seq; 2766 u64 total; 2767 2768 do { 2769 seq = __u64_stats_fetch_begin(&irqtime->sync); 2770 total = irqtime->total; 2771 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2772 2773 return total; 2774 } 2775 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2776 2777 #ifdef CONFIG_CPU_FREQ 2778 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2779 2780 /** 2781 * cpufreq_update_util - Take a note about CPU utilization changes. 2782 * @rq: Runqueue to carry out the update for. 2783 * @flags: Update reason flags. 2784 * 2785 * This function is called by the scheduler on the CPU whose utilization is 2786 * being updated. 2787 * 2788 * It can only be called from RCU-sched read-side critical sections. 2789 * 2790 * The way cpufreq is currently arranged requires it to evaluate the CPU 2791 * performance state (frequency/voltage) on a regular basis to prevent it from 2792 * being stuck in a completely inadequate performance level for too long. 2793 * That is not guaranteed to happen if the updates are only triggered from CFS 2794 * and DL, though, because they may not be coming in if only RT tasks are 2795 * active all the time (or there are RT tasks only). 2796 * 2797 * As a workaround for that issue, this function is called periodically by the 2798 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2799 * but that really is a band-aid. Going forward it should be replaced with 2800 * solutions targeted more specifically at RT tasks. 2801 */ 2802 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2803 { 2804 struct update_util_data *data; 2805 2806 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2807 cpu_of(rq))); 2808 if (data) 2809 data->func(data, rq_clock(rq), flags); 2810 } 2811 #else 2812 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2813 #endif /* CONFIG_CPU_FREQ */ 2814 2815 #ifdef CONFIG_UCLAMP_TASK 2816 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2817 2818 /** 2819 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 2820 * @rq: The rq to clamp against. Must not be NULL. 2821 * @util: The util value to clamp. 2822 * @p: The task to clamp against. Can be NULL if you want to clamp 2823 * against @rq only. 2824 * 2825 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 2826 * 2827 * If sched_uclamp_used static key is disabled, then just return the util 2828 * without any clamping since uclamp aggregation at the rq level in the fast 2829 * path is disabled, rendering this operation a NOP. 2830 * 2831 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 2832 * will return the correct effective uclamp value of the task even if the 2833 * static key is disabled. 2834 */ 2835 static __always_inline 2836 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2837 struct task_struct *p) 2838 { 2839 unsigned long min_util = 0; 2840 unsigned long max_util = 0; 2841 2842 if (!static_branch_likely(&sched_uclamp_used)) 2843 return util; 2844 2845 if (p) { 2846 min_util = uclamp_eff_value(p, UCLAMP_MIN); 2847 max_util = uclamp_eff_value(p, UCLAMP_MAX); 2848 2849 /* 2850 * Ignore last runnable task's max clamp, as this task will 2851 * reset it. Similarly, no need to read the rq's min clamp. 2852 */ 2853 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 2854 goto out; 2855 } 2856 2857 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); 2858 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); 2859 out: 2860 /* 2861 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2862 * RUNNABLE tasks with _different_ clamps, we can end up with an 2863 * inversion. Fix it now when the clamps are applied. 2864 */ 2865 if (unlikely(min_util >= max_util)) 2866 return min_util; 2867 2868 return clamp(util, min_util, max_util); 2869 } 2870 2871 /* 2872 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 2873 * by default in the fast path and only gets turned on once userspace performs 2874 * an operation that requires it. 2875 * 2876 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 2877 * hence is active. 2878 */ 2879 static inline bool uclamp_is_used(void) 2880 { 2881 return static_branch_likely(&sched_uclamp_used); 2882 } 2883 #else /* CONFIG_UCLAMP_TASK */ 2884 static inline 2885 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2886 struct task_struct *p) 2887 { 2888 return util; 2889 } 2890 2891 static inline bool uclamp_is_used(void) 2892 { 2893 return false; 2894 } 2895 #endif /* CONFIG_UCLAMP_TASK */ 2896 2897 #ifdef arch_scale_freq_capacity 2898 # ifndef arch_scale_freq_invariant 2899 # define arch_scale_freq_invariant() true 2900 # endif 2901 #else 2902 # define arch_scale_freq_invariant() false 2903 #endif 2904 2905 #ifdef CONFIG_SMP 2906 static inline unsigned long capacity_orig_of(int cpu) 2907 { 2908 return cpu_rq(cpu)->cpu_capacity_orig; 2909 } 2910 2911 /** 2912 * enum cpu_util_type - CPU utilization type 2913 * @FREQUENCY_UTIL: Utilization used to select frequency 2914 * @ENERGY_UTIL: Utilization used during energy calculation 2915 * 2916 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2917 * need to be aggregated differently depending on the usage made of them. This 2918 * enum is used within effective_cpu_util() to differentiate the types of 2919 * utilization expected by the callers, and adjust the aggregation accordingly. 2920 */ 2921 enum cpu_util_type { 2922 FREQUENCY_UTIL, 2923 ENERGY_UTIL, 2924 }; 2925 2926 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 2927 unsigned long max, enum cpu_util_type type, 2928 struct task_struct *p); 2929 2930 static inline unsigned long cpu_bw_dl(struct rq *rq) 2931 { 2932 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2933 } 2934 2935 static inline unsigned long cpu_util_dl(struct rq *rq) 2936 { 2937 return READ_ONCE(rq->avg_dl.util_avg); 2938 } 2939 2940 static inline unsigned long cpu_util_cfs(struct rq *rq) 2941 { 2942 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2943 2944 if (sched_feat(UTIL_EST)) { 2945 util = max_t(unsigned long, util, 2946 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2947 } 2948 2949 return util; 2950 } 2951 2952 static inline unsigned long cpu_util_rt(struct rq *rq) 2953 { 2954 return READ_ONCE(rq->avg_rt.util_avg); 2955 } 2956 #endif 2957 2958 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2959 static inline unsigned long cpu_util_irq(struct rq *rq) 2960 { 2961 return rq->avg_irq.util_avg; 2962 } 2963 2964 static inline 2965 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2966 { 2967 util *= (max - irq); 2968 util /= max; 2969 2970 return util; 2971 2972 } 2973 #else 2974 static inline unsigned long cpu_util_irq(struct rq *rq) 2975 { 2976 return 0; 2977 } 2978 2979 static inline 2980 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2981 { 2982 return util; 2983 } 2984 #endif 2985 2986 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2987 2988 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2989 2990 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2991 2992 static inline bool sched_energy_enabled(void) 2993 { 2994 return static_branch_unlikely(&sched_energy_present); 2995 } 2996 2997 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2998 2999 #define perf_domain_span(pd) NULL 3000 static inline bool sched_energy_enabled(void) { return false; } 3001 3002 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 3003 3004 #ifdef CONFIG_MEMBARRIER 3005 /* 3006 * The scheduler provides memory barriers required by membarrier between: 3007 * - prior user-space memory accesses and store to rq->membarrier_state, 3008 * - store to rq->membarrier_state and following user-space memory accesses. 3009 * In the same way it provides those guarantees around store to rq->curr. 3010 */ 3011 static inline void membarrier_switch_mm(struct rq *rq, 3012 struct mm_struct *prev_mm, 3013 struct mm_struct *next_mm) 3014 { 3015 int membarrier_state; 3016 3017 if (prev_mm == next_mm) 3018 return; 3019 3020 membarrier_state = atomic_read(&next_mm->membarrier_state); 3021 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3022 return; 3023 3024 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3025 } 3026 #else 3027 static inline void membarrier_switch_mm(struct rq *rq, 3028 struct mm_struct *prev_mm, 3029 struct mm_struct *next_mm) 3030 { 3031 } 3032 #endif 3033 3034 #ifdef CONFIG_SMP 3035 static inline bool is_per_cpu_kthread(struct task_struct *p) 3036 { 3037 if (!(p->flags & PF_KTHREAD)) 3038 return false; 3039 3040 if (p->nr_cpus_allowed != 1) 3041 return false; 3042 3043 return true; 3044 } 3045 #endif 3046 3047 extern void swake_up_all_locked(struct swait_queue_head *q); 3048 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3049 3050 #ifdef CONFIG_PREEMPT_DYNAMIC 3051 extern int preempt_dynamic_mode; 3052 extern int sched_dynamic_mode(const char *str); 3053 extern void sched_dynamic_update(int mode); 3054 #endif 3055 3056