1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/bitops.h> 40 #include <linux/blkdev.h> 41 #include <linux/compat.h> 42 #include <linux/context_tracking.h> 43 #include <linux/cpufreq.h> 44 #include <linux/cpuidle.h> 45 #include <linux/cpuset.h> 46 #include <linux/ctype.h> 47 #include <linux/debugfs.h> 48 #include <linux/delayacct.h> 49 #include <linux/energy_model.h> 50 #include <linux/init_task.h> 51 #include <linux/kprobes.h> 52 #include <linux/kthread.h> 53 #include <linux/membarrier.h> 54 #include <linux/migrate.h> 55 #include <linux/mmu_context.h> 56 #include <linux/nmi.h> 57 #include <linux/proc_fs.h> 58 #include <linux/prefetch.h> 59 #include <linux/profile.h> 60 #include <linux/psi.h> 61 #include <linux/ratelimit.h> 62 #include <linux/rcupdate_wait.h> 63 #include <linux/security.h> 64 #include <linux/stop_machine.h> 65 #include <linux/suspend.h> 66 #include <linux/swait.h> 67 #include <linux/syscalls.h> 68 #include <linux/task_work.h> 69 #include <linux/tsacct_kern.h> 70 71 #include <asm/tlb.h> 72 73 #ifdef CONFIG_PARAVIRT 74 # include <asm/paravirt.h> 75 #endif 76 77 #include "cpupri.h" 78 #include "cpudeadline.h" 79 80 #include <trace/events/sched.h> 81 82 #ifdef CONFIG_SCHED_DEBUG 83 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 84 #else 85 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 86 #endif 87 88 struct rq; 89 struct cpuidle_state; 90 91 /* task_struct::on_rq states: */ 92 #define TASK_ON_RQ_QUEUED 1 93 #define TASK_ON_RQ_MIGRATING 2 94 95 extern __read_mostly int scheduler_running; 96 97 extern unsigned long calc_load_update; 98 extern atomic_long_t calc_load_tasks; 99 100 extern void calc_global_load_tick(struct rq *this_rq); 101 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 102 103 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 104 /* 105 * Helpers for converting nanosecond timing to jiffy resolution 106 */ 107 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 108 109 /* 110 * Increase resolution of nice-level calculations for 64-bit architectures. 111 * The extra resolution improves shares distribution and load balancing of 112 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 113 * hierarchies, especially on larger systems. This is not a user-visible change 114 * and does not change the user-interface for setting shares/weights. 115 * 116 * We increase resolution only if we have enough bits to allow this increased 117 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 118 * are pretty high and the returns do not justify the increased costs. 119 * 120 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 121 * increase coverage and consistency always enable it on 64-bit platforms. 122 */ 123 #ifdef CONFIG_64BIT 124 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 125 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 126 # define scale_load_down(w) \ 127 ({ \ 128 unsigned long __w = (w); \ 129 if (__w) \ 130 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 131 __w; \ 132 }) 133 #else 134 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 135 # define scale_load(w) (w) 136 # define scale_load_down(w) (w) 137 #endif 138 139 /* 140 * Task weight (visible to users) and its load (invisible to users) have 141 * independent resolution, but they should be well calibrated. We use 142 * scale_load() and scale_load_down(w) to convert between them. The 143 * following must be true: 144 * 145 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 146 * 147 */ 148 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 149 150 /* 151 * Single value that decides SCHED_DEADLINE internal math precision. 152 * 10 -> just above 1us 153 * 9 -> just above 0.5us 154 */ 155 #define DL_SCALE 10 156 157 /* 158 * Single value that denotes runtime == period, ie unlimited time. 159 */ 160 #define RUNTIME_INF ((u64)~0ULL) 161 162 static inline int idle_policy(int policy) 163 { 164 return policy == SCHED_IDLE; 165 } 166 static inline int fair_policy(int policy) 167 { 168 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 169 } 170 171 static inline int rt_policy(int policy) 172 { 173 return policy == SCHED_FIFO || policy == SCHED_RR; 174 } 175 176 static inline int dl_policy(int policy) 177 { 178 return policy == SCHED_DEADLINE; 179 } 180 static inline bool valid_policy(int policy) 181 { 182 return idle_policy(policy) || fair_policy(policy) || 183 rt_policy(policy) || dl_policy(policy); 184 } 185 186 static inline int task_has_idle_policy(struct task_struct *p) 187 { 188 return idle_policy(p->policy); 189 } 190 191 static inline int task_has_rt_policy(struct task_struct *p) 192 { 193 return rt_policy(p->policy); 194 } 195 196 static inline int task_has_dl_policy(struct task_struct *p) 197 { 198 return dl_policy(p->policy); 199 } 200 201 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 202 203 static inline void update_avg(u64 *avg, u64 sample) 204 { 205 s64 diff = sample - *avg; 206 *avg += diff / 8; 207 } 208 209 /* 210 * Shifting a value by an exponent greater *or equal* to the size of said value 211 * is UB; cap at size-1. 212 */ 213 #define shr_bound(val, shift) \ 214 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 215 216 /* 217 * !! For sched_setattr_nocheck() (kernel) only !! 218 * 219 * This is actually gross. :( 220 * 221 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 222 * tasks, but still be able to sleep. We need this on platforms that cannot 223 * atomically change clock frequency. Remove once fast switching will be 224 * available on such platforms. 225 * 226 * SUGOV stands for SchedUtil GOVernor. 227 */ 228 #define SCHED_FLAG_SUGOV 0x10000000 229 230 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 231 232 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 233 { 234 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 235 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 236 #else 237 return false; 238 #endif 239 } 240 241 /* 242 * Tells if entity @a should preempt entity @b. 243 */ 244 static inline bool 245 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 246 { 247 return dl_entity_is_special(a) || 248 dl_time_before(a->deadline, b->deadline); 249 } 250 251 /* 252 * This is the priority-queue data structure of the RT scheduling class: 253 */ 254 struct rt_prio_array { 255 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 256 struct list_head queue[MAX_RT_PRIO]; 257 }; 258 259 struct rt_bandwidth { 260 /* nests inside the rq lock: */ 261 raw_spinlock_t rt_runtime_lock; 262 ktime_t rt_period; 263 u64 rt_runtime; 264 struct hrtimer rt_period_timer; 265 unsigned int rt_period_active; 266 }; 267 268 void __dl_clear_params(struct task_struct *p); 269 270 struct dl_bandwidth { 271 raw_spinlock_t dl_runtime_lock; 272 u64 dl_runtime; 273 u64 dl_period; 274 }; 275 276 static inline int dl_bandwidth_enabled(void) 277 { 278 return sysctl_sched_rt_runtime >= 0; 279 } 280 281 /* 282 * To keep the bandwidth of -deadline tasks under control 283 * we need some place where: 284 * - store the maximum -deadline bandwidth of each cpu; 285 * - cache the fraction of bandwidth that is currently allocated in 286 * each root domain; 287 * 288 * This is all done in the data structure below. It is similar to the 289 * one used for RT-throttling (rt_bandwidth), with the main difference 290 * that, since here we are only interested in admission control, we 291 * do not decrease any runtime while the group "executes", neither we 292 * need a timer to replenish it. 293 * 294 * With respect to SMP, bandwidth is given on a per root domain basis, 295 * meaning that: 296 * - bw (< 100%) is the deadline bandwidth of each CPU; 297 * - total_bw is the currently allocated bandwidth in each root domain; 298 */ 299 struct dl_bw { 300 raw_spinlock_t lock; 301 u64 bw; 302 u64 total_bw; 303 }; 304 305 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 306 307 static inline 308 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 309 { 310 dl_b->total_bw -= tsk_bw; 311 __dl_update(dl_b, (s32)tsk_bw / cpus); 312 } 313 314 static inline 315 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 316 { 317 dl_b->total_bw += tsk_bw; 318 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 319 } 320 321 static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, 322 u64 old_bw, u64 new_bw) 323 { 324 return dl_b->bw != -1 && 325 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 326 } 327 328 /* 329 * Verify the fitness of task @p to run on @cpu taking into account the 330 * CPU original capacity and the runtime/deadline ratio of the task. 331 * 332 * The function will return true if the CPU original capacity of the 333 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 334 * task and false otherwise. 335 */ 336 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 337 { 338 unsigned long cap = arch_scale_cpu_capacity(cpu); 339 340 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; 341 } 342 343 extern void init_dl_bw(struct dl_bw *dl_b); 344 extern int sched_dl_global_validate(void); 345 extern void sched_dl_do_global(void); 346 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 347 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 348 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 349 extern bool __checkparam_dl(const struct sched_attr *attr); 350 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 351 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 352 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 353 extern bool dl_cpu_busy(unsigned int cpu); 354 355 #ifdef CONFIG_CGROUP_SCHED 356 357 #include <linux/cgroup.h> 358 #include <linux/psi.h> 359 360 struct cfs_rq; 361 struct rt_rq; 362 363 extern struct list_head task_groups; 364 365 struct cfs_bandwidth { 366 #ifdef CONFIG_CFS_BANDWIDTH 367 raw_spinlock_t lock; 368 ktime_t period; 369 u64 quota; 370 u64 runtime; 371 u64 burst; 372 u64 runtime_snap; 373 s64 hierarchical_quota; 374 375 u8 idle; 376 u8 period_active; 377 u8 slack_started; 378 struct hrtimer period_timer; 379 struct hrtimer slack_timer; 380 struct list_head throttled_cfs_rq; 381 382 /* Statistics: */ 383 int nr_periods; 384 int nr_throttled; 385 int nr_burst; 386 u64 throttled_time; 387 u64 burst_time; 388 #endif 389 }; 390 391 /* Task group related information */ 392 struct task_group { 393 struct cgroup_subsys_state css; 394 395 #ifdef CONFIG_FAIR_GROUP_SCHED 396 /* schedulable entities of this group on each CPU */ 397 struct sched_entity **se; 398 /* runqueue "owned" by this group on each CPU */ 399 struct cfs_rq **cfs_rq; 400 unsigned long shares; 401 402 /* A positive value indicates that this is a SCHED_IDLE group. */ 403 int idle; 404 405 #ifdef CONFIG_SMP 406 /* 407 * load_avg can be heavily contended at clock tick time, so put 408 * it in its own cacheline separated from the fields above which 409 * will also be accessed at each tick. 410 */ 411 atomic_long_t load_avg ____cacheline_aligned; 412 #endif 413 #endif 414 415 #ifdef CONFIG_RT_GROUP_SCHED 416 struct sched_rt_entity **rt_se; 417 struct rt_rq **rt_rq; 418 419 struct rt_bandwidth rt_bandwidth; 420 #endif 421 422 struct rcu_head rcu; 423 struct list_head list; 424 425 struct task_group *parent; 426 struct list_head siblings; 427 struct list_head children; 428 429 #ifdef CONFIG_SCHED_AUTOGROUP 430 struct autogroup *autogroup; 431 #endif 432 433 struct cfs_bandwidth cfs_bandwidth; 434 435 #ifdef CONFIG_UCLAMP_TASK_GROUP 436 /* The two decimal precision [%] value requested from user-space */ 437 unsigned int uclamp_pct[UCLAMP_CNT]; 438 /* Clamp values requested for a task group */ 439 struct uclamp_se uclamp_req[UCLAMP_CNT]; 440 /* Effective clamp values used for a task group */ 441 struct uclamp_se uclamp[UCLAMP_CNT]; 442 #endif 443 444 }; 445 446 #ifdef CONFIG_FAIR_GROUP_SCHED 447 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 448 449 /* 450 * A weight of 0 or 1 can cause arithmetics problems. 451 * A weight of a cfs_rq is the sum of weights of which entities 452 * are queued on this cfs_rq, so a weight of a entity should not be 453 * too large, so as the shares value of a task group. 454 * (The default weight is 1024 - so there's no practical 455 * limitation from this.) 456 */ 457 #define MIN_SHARES (1UL << 1) 458 #define MAX_SHARES (1UL << 18) 459 #endif 460 461 typedef int (*tg_visitor)(struct task_group *, void *); 462 463 extern int walk_tg_tree_from(struct task_group *from, 464 tg_visitor down, tg_visitor up, void *data); 465 466 /* 467 * Iterate the full tree, calling @down when first entering a node and @up when 468 * leaving it for the final time. 469 * 470 * Caller must hold rcu_lock or sufficient equivalent. 471 */ 472 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 473 { 474 return walk_tg_tree_from(&root_task_group, down, up, data); 475 } 476 477 extern int tg_nop(struct task_group *tg, void *data); 478 479 extern void free_fair_sched_group(struct task_group *tg); 480 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 481 extern void online_fair_sched_group(struct task_group *tg); 482 extern void unregister_fair_sched_group(struct task_group *tg); 483 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 484 struct sched_entity *se, int cpu, 485 struct sched_entity *parent); 486 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 487 488 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 489 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 490 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 491 492 extern void free_rt_sched_group(struct task_group *tg); 493 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 494 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 495 struct sched_rt_entity *rt_se, int cpu, 496 struct sched_rt_entity *parent); 497 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 498 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 499 extern long sched_group_rt_runtime(struct task_group *tg); 500 extern long sched_group_rt_period(struct task_group *tg); 501 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 502 503 extern struct task_group *sched_create_group(struct task_group *parent); 504 extern void sched_online_group(struct task_group *tg, 505 struct task_group *parent); 506 extern void sched_destroy_group(struct task_group *tg); 507 extern void sched_offline_group(struct task_group *tg); 508 509 extern void sched_move_task(struct task_struct *tsk); 510 511 #ifdef CONFIG_FAIR_GROUP_SCHED 512 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 513 514 extern int sched_group_set_idle(struct task_group *tg, long idle); 515 516 #ifdef CONFIG_SMP 517 extern void set_task_rq_fair(struct sched_entity *se, 518 struct cfs_rq *prev, struct cfs_rq *next); 519 #else /* !CONFIG_SMP */ 520 static inline void set_task_rq_fair(struct sched_entity *se, 521 struct cfs_rq *prev, struct cfs_rq *next) { } 522 #endif /* CONFIG_SMP */ 523 #endif /* CONFIG_FAIR_GROUP_SCHED */ 524 525 #else /* CONFIG_CGROUP_SCHED */ 526 527 struct cfs_bandwidth { }; 528 529 #endif /* CONFIG_CGROUP_SCHED */ 530 531 /* CFS-related fields in a runqueue */ 532 struct cfs_rq { 533 struct load_weight load; 534 unsigned int nr_running; 535 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 536 unsigned int idle_nr_running; /* SCHED_IDLE */ 537 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 538 539 u64 exec_clock; 540 u64 min_vruntime; 541 #ifdef CONFIG_SCHED_CORE 542 unsigned int forceidle_seq; 543 u64 min_vruntime_fi; 544 #endif 545 546 #ifndef CONFIG_64BIT 547 u64 min_vruntime_copy; 548 #endif 549 550 struct rb_root_cached tasks_timeline; 551 552 /* 553 * 'curr' points to currently running entity on this cfs_rq. 554 * It is set to NULL otherwise (i.e when none are currently running). 555 */ 556 struct sched_entity *curr; 557 struct sched_entity *next; 558 struct sched_entity *last; 559 struct sched_entity *skip; 560 561 #ifdef CONFIG_SCHED_DEBUG 562 unsigned int nr_spread_over; 563 #endif 564 565 #ifdef CONFIG_SMP 566 /* 567 * CFS load tracking 568 */ 569 struct sched_avg avg; 570 #ifndef CONFIG_64BIT 571 u64 load_last_update_time_copy; 572 #endif 573 struct { 574 raw_spinlock_t lock ____cacheline_aligned; 575 int nr; 576 unsigned long load_avg; 577 unsigned long util_avg; 578 unsigned long runnable_avg; 579 } removed; 580 581 #ifdef CONFIG_FAIR_GROUP_SCHED 582 unsigned long tg_load_avg_contrib; 583 long propagate; 584 long prop_runnable_sum; 585 586 /* 587 * h_load = weight * f(tg) 588 * 589 * Where f(tg) is the recursive weight fraction assigned to 590 * this group. 591 */ 592 unsigned long h_load; 593 u64 last_h_load_update; 594 struct sched_entity *h_load_next; 595 #endif /* CONFIG_FAIR_GROUP_SCHED */ 596 #endif /* CONFIG_SMP */ 597 598 #ifdef CONFIG_FAIR_GROUP_SCHED 599 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 600 601 /* 602 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 603 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 604 * (like users, containers etc.) 605 * 606 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 607 * This list is used during load balance. 608 */ 609 int on_list; 610 struct list_head leaf_cfs_rq_list; 611 struct task_group *tg; /* group that "owns" this runqueue */ 612 613 /* Locally cached copy of our task_group's idle value */ 614 int idle; 615 616 #ifdef CONFIG_CFS_BANDWIDTH 617 int runtime_enabled; 618 s64 runtime_remaining; 619 620 u64 throttled_clock; 621 u64 throttled_clock_task; 622 u64 throttled_clock_task_time; 623 int throttled; 624 int throttle_count; 625 struct list_head throttled_list; 626 #endif /* CONFIG_CFS_BANDWIDTH */ 627 #endif /* CONFIG_FAIR_GROUP_SCHED */ 628 }; 629 630 static inline int rt_bandwidth_enabled(void) 631 { 632 return sysctl_sched_rt_runtime >= 0; 633 } 634 635 /* RT IPI pull logic requires IRQ_WORK */ 636 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 637 # define HAVE_RT_PUSH_IPI 638 #endif 639 640 /* Real-Time classes' related field in a runqueue: */ 641 struct rt_rq { 642 struct rt_prio_array active; 643 unsigned int rt_nr_running; 644 unsigned int rr_nr_running; 645 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 646 struct { 647 int curr; /* highest queued rt task prio */ 648 #ifdef CONFIG_SMP 649 int next; /* next highest */ 650 #endif 651 } highest_prio; 652 #endif 653 #ifdef CONFIG_SMP 654 unsigned int rt_nr_migratory; 655 unsigned int rt_nr_total; 656 int overloaded; 657 struct plist_head pushable_tasks; 658 659 #endif /* CONFIG_SMP */ 660 int rt_queued; 661 662 int rt_throttled; 663 u64 rt_time; 664 u64 rt_runtime; 665 /* Nests inside the rq lock: */ 666 raw_spinlock_t rt_runtime_lock; 667 668 #ifdef CONFIG_RT_GROUP_SCHED 669 unsigned int rt_nr_boosted; 670 671 struct rq *rq; 672 struct task_group *tg; 673 #endif 674 }; 675 676 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 677 { 678 return rt_rq->rt_queued && rt_rq->rt_nr_running; 679 } 680 681 /* Deadline class' related fields in a runqueue */ 682 struct dl_rq { 683 /* runqueue is an rbtree, ordered by deadline */ 684 struct rb_root_cached root; 685 686 unsigned int dl_nr_running; 687 688 #ifdef CONFIG_SMP 689 /* 690 * Deadline values of the currently executing and the 691 * earliest ready task on this rq. Caching these facilitates 692 * the decision whether or not a ready but not running task 693 * should migrate somewhere else. 694 */ 695 struct { 696 u64 curr; 697 u64 next; 698 } earliest_dl; 699 700 unsigned int dl_nr_migratory; 701 int overloaded; 702 703 /* 704 * Tasks on this rq that can be pushed away. They are kept in 705 * an rb-tree, ordered by tasks' deadlines, with caching 706 * of the leftmost (earliest deadline) element. 707 */ 708 struct rb_root_cached pushable_dl_tasks_root; 709 #else 710 struct dl_bw dl_bw; 711 #endif 712 /* 713 * "Active utilization" for this runqueue: increased when a 714 * task wakes up (becomes TASK_RUNNING) and decreased when a 715 * task blocks 716 */ 717 u64 running_bw; 718 719 /* 720 * Utilization of the tasks "assigned" to this runqueue (including 721 * the tasks that are in runqueue and the tasks that executed on this 722 * CPU and blocked). Increased when a task moves to this runqueue, and 723 * decreased when the task moves away (migrates, changes scheduling 724 * policy, or terminates). 725 * This is needed to compute the "inactive utilization" for the 726 * runqueue (inactive utilization = this_bw - running_bw). 727 */ 728 u64 this_bw; 729 u64 extra_bw; 730 731 /* 732 * Inverse of the fraction of CPU utilization that can be reclaimed 733 * by the GRUB algorithm. 734 */ 735 u64 bw_ratio; 736 }; 737 738 #ifdef CONFIG_FAIR_GROUP_SCHED 739 /* An entity is a task if it doesn't "own" a runqueue */ 740 #define entity_is_task(se) (!se->my_q) 741 742 static inline void se_update_runnable(struct sched_entity *se) 743 { 744 if (!entity_is_task(se)) 745 se->runnable_weight = se->my_q->h_nr_running; 746 } 747 748 static inline long se_runnable(struct sched_entity *se) 749 { 750 if (entity_is_task(se)) 751 return !!se->on_rq; 752 else 753 return se->runnable_weight; 754 } 755 756 #else 757 #define entity_is_task(se) 1 758 759 static inline void se_update_runnable(struct sched_entity *se) {} 760 761 static inline long se_runnable(struct sched_entity *se) 762 { 763 return !!se->on_rq; 764 } 765 #endif 766 767 #ifdef CONFIG_SMP 768 /* 769 * XXX we want to get rid of these helpers and use the full load resolution. 770 */ 771 static inline long se_weight(struct sched_entity *se) 772 { 773 return scale_load_down(se->load.weight); 774 } 775 776 777 static inline bool sched_asym_prefer(int a, int b) 778 { 779 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 780 } 781 782 struct perf_domain { 783 struct em_perf_domain *em_pd; 784 struct perf_domain *next; 785 struct rcu_head rcu; 786 }; 787 788 /* Scheduling group status flags */ 789 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 790 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 791 792 /* 793 * We add the notion of a root-domain which will be used to define per-domain 794 * variables. Each exclusive cpuset essentially defines an island domain by 795 * fully partitioning the member CPUs from any other cpuset. Whenever a new 796 * exclusive cpuset is created, we also create and attach a new root-domain 797 * object. 798 * 799 */ 800 struct root_domain { 801 atomic_t refcount; 802 atomic_t rto_count; 803 struct rcu_head rcu; 804 cpumask_var_t span; 805 cpumask_var_t online; 806 807 /* 808 * Indicate pullable load on at least one CPU, e.g: 809 * - More than one runnable task 810 * - Running task is misfit 811 */ 812 int overload; 813 814 /* Indicate one or more cpus over-utilized (tipping point) */ 815 int overutilized; 816 817 /* 818 * The bit corresponding to a CPU gets set here if such CPU has more 819 * than one runnable -deadline task (as it is below for RT tasks). 820 */ 821 cpumask_var_t dlo_mask; 822 atomic_t dlo_count; 823 struct dl_bw dl_bw; 824 struct cpudl cpudl; 825 826 /* 827 * Indicate whether a root_domain's dl_bw has been checked or 828 * updated. It's monotonously increasing value. 829 * 830 * Also, some corner cases, like 'wrap around' is dangerous, but given 831 * that u64 is 'big enough'. So that shouldn't be a concern. 832 */ 833 u64 visit_gen; 834 835 #ifdef HAVE_RT_PUSH_IPI 836 /* 837 * For IPI pull requests, loop across the rto_mask. 838 */ 839 struct irq_work rto_push_work; 840 raw_spinlock_t rto_lock; 841 /* These are only updated and read within rto_lock */ 842 int rto_loop; 843 int rto_cpu; 844 /* These atomics are updated outside of a lock */ 845 atomic_t rto_loop_next; 846 atomic_t rto_loop_start; 847 #endif 848 /* 849 * The "RT overload" flag: it gets set if a CPU has more than 850 * one runnable RT task. 851 */ 852 cpumask_var_t rto_mask; 853 struct cpupri cpupri; 854 855 unsigned long max_cpu_capacity; 856 857 /* 858 * NULL-terminated list of performance domains intersecting with the 859 * CPUs of the rd. Protected by RCU. 860 */ 861 struct perf_domain __rcu *pd; 862 }; 863 864 extern void init_defrootdomain(void); 865 extern int sched_init_domains(const struct cpumask *cpu_map); 866 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 867 extern void sched_get_rd(struct root_domain *rd); 868 extern void sched_put_rd(struct root_domain *rd); 869 870 #ifdef HAVE_RT_PUSH_IPI 871 extern void rto_push_irq_work_func(struct irq_work *work); 872 #endif 873 #endif /* CONFIG_SMP */ 874 875 #ifdef CONFIG_UCLAMP_TASK 876 /* 877 * struct uclamp_bucket - Utilization clamp bucket 878 * @value: utilization clamp value for tasks on this clamp bucket 879 * @tasks: number of RUNNABLE tasks on this clamp bucket 880 * 881 * Keep track of how many tasks are RUNNABLE for a given utilization 882 * clamp value. 883 */ 884 struct uclamp_bucket { 885 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 886 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 887 }; 888 889 /* 890 * struct uclamp_rq - rq's utilization clamp 891 * @value: currently active clamp values for a rq 892 * @bucket: utilization clamp buckets affecting a rq 893 * 894 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 895 * A clamp value is affecting a rq when there is at least one task RUNNABLE 896 * (or actually running) with that value. 897 * 898 * There are up to UCLAMP_CNT possible different clamp values, currently there 899 * are only two: minimum utilization and maximum utilization. 900 * 901 * All utilization clamping values are MAX aggregated, since: 902 * - for util_min: we want to run the CPU at least at the max of the minimum 903 * utilization required by its currently RUNNABLE tasks. 904 * - for util_max: we want to allow the CPU to run up to the max of the 905 * maximum utilization allowed by its currently RUNNABLE tasks. 906 * 907 * Since on each system we expect only a limited number of different 908 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 909 * the metrics required to compute all the per-rq utilization clamp values. 910 */ 911 struct uclamp_rq { 912 unsigned int value; 913 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 914 }; 915 916 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 917 #endif /* CONFIG_UCLAMP_TASK */ 918 919 /* 920 * This is the main, per-CPU runqueue data structure. 921 * 922 * Locking rule: those places that want to lock multiple runqueues 923 * (such as the load balancing or the thread migration code), lock 924 * acquire operations must be ordered by ascending &runqueue. 925 */ 926 struct rq { 927 /* runqueue lock: */ 928 raw_spinlock_t __lock; 929 930 /* 931 * nr_running and cpu_load should be in the same cacheline because 932 * remote CPUs use both these fields when doing load calculation. 933 */ 934 unsigned int nr_running; 935 #ifdef CONFIG_NUMA_BALANCING 936 unsigned int nr_numa_running; 937 unsigned int nr_preferred_running; 938 unsigned int numa_migrate_on; 939 #endif 940 #ifdef CONFIG_NO_HZ_COMMON 941 #ifdef CONFIG_SMP 942 unsigned long last_blocked_load_update_tick; 943 unsigned int has_blocked_load; 944 call_single_data_t nohz_csd; 945 #endif /* CONFIG_SMP */ 946 unsigned int nohz_tick_stopped; 947 atomic_t nohz_flags; 948 #endif /* CONFIG_NO_HZ_COMMON */ 949 950 #ifdef CONFIG_SMP 951 unsigned int ttwu_pending; 952 #endif 953 u64 nr_switches; 954 955 #ifdef CONFIG_UCLAMP_TASK 956 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 957 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 958 unsigned int uclamp_flags; 959 #define UCLAMP_FLAG_IDLE 0x01 960 #endif 961 962 struct cfs_rq cfs; 963 struct rt_rq rt; 964 struct dl_rq dl; 965 966 #ifdef CONFIG_FAIR_GROUP_SCHED 967 /* list of leaf cfs_rq on this CPU: */ 968 struct list_head leaf_cfs_rq_list; 969 struct list_head *tmp_alone_branch; 970 #endif /* CONFIG_FAIR_GROUP_SCHED */ 971 972 /* 973 * This is part of a global counter where only the total sum 974 * over all CPUs matters. A task can increase this counter on 975 * one CPU and if it got migrated afterwards it may decrease 976 * it on another CPU. Always updated under the runqueue lock: 977 */ 978 unsigned int nr_uninterruptible; 979 980 struct task_struct __rcu *curr; 981 struct task_struct *idle; 982 struct task_struct *stop; 983 unsigned long next_balance; 984 struct mm_struct *prev_mm; 985 986 unsigned int clock_update_flags; 987 u64 clock; 988 /* Ensure that all clocks are in the same cache line */ 989 u64 clock_task ____cacheline_aligned; 990 u64 clock_pelt; 991 unsigned long lost_idle_time; 992 993 atomic_t nr_iowait; 994 995 #ifdef CONFIG_SCHED_DEBUG 996 u64 last_seen_need_resched_ns; 997 int ticks_without_resched; 998 #endif 999 1000 #ifdef CONFIG_MEMBARRIER 1001 int membarrier_state; 1002 #endif 1003 1004 #ifdef CONFIG_SMP 1005 struct root_domain *rd; 1006 struct sched_domain __rcu *sd; 1007 1008 unsigned long cpu_capacity; 1009 unsigned long cpu_capacity_orig; 1010 1011 struct callback_head *balance_callback; 1012 1013 unsigned char nohz_idle_balance; 1014 unsigned char idle_balance; 1015 1016 unsigned long misfit_task_load; 1017 1018 /* For active balancing */ 1019 int active_balance; 1020 int push_cpu; 1021 struct cpu_stop_work active_balance_work; 1022 1023 /* CPU of this runqueue: */ 1024 int cpu; 1025 int online; 1026 1027 struct list_head cfs_tasks; 1028 1029 struct sched_avg avg_rt; 1030 struct sched_avg avg_dl; 1031 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1032 struct sched_avg avg_irq; 1033 #endif 1034 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 1035 struct sched_avg avg_thermal; 1036 #endif 1037 u64 idle_stamp; 1038 u64 avg_idle; 1039 1040 unsigned long wake_stamp; 1041 u64 wake_avg_idle; 1042 1043 /* This is used to determine avg_idle's max value */ 1044 u64 max_idle_balance_cost; 1045 1046 #ifdef CONFIG_HOTPLUG_CPU 1047 struct rcuwait hotplug_wait; 1048 #endif 1049 #endif /* CONFIG_SMP */ 1050 1051 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1052 u64 prev_irq_time; 1053 #endif 1054 #ifdef CONFIG_PARAVIRT 1055 u64 prev_steal_time; 1056 #endif 1057 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1058 u64 prev_steal_time_rq; 1059 #endif 1060 1061 /* calc_load related fields */ 1062 unsigned long calc_load_update; 1063 long calc_load_active; 1064 1065 #ifdef CONFIG_SCHED_HRTICK 1066 #ifdef CONFIG_SMP 1067 call_single_data_t hrtick_csd; 1068 #endif 1069 struct hrtimer hrtick_timer; 1070 ktime_t hrtick_time; 1071 #endif 1072 1073 #ifdef CONFIG_SCHEDSTATS 1074 /* latency stats */ 1075 struct sched_info rq_sched_info; 1076 unsigned long long rq_cpu_time; 1077 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1078 1079 /* sys_sched_yield() stats */ 1080 unsigned int yld_count; 1081 1082 /* schedule() stats */ 1083 unsigned int sched_count; 1084 unsigned int sched_goidle; 1085 1086 /* try_to_wake_up() stats */ 1087 unsigned int ttwu_count; 1088 unsigned int ttwu_local; 1089 #endif 1090 1091 #ifdef CONFIG_CPU_IDLE 1092 /* Must be inspected within a rcu lock section */ 1093 struct cpuidle_state *idle_state; 1094 #endif 1095 1096 #ifdef CONFIG_SMP 1097 unsigned int nr_pinned; 1098 #endif 1099 unsigned int push_busy; 1100 struct cpu_stop_work push_work; 1101 1102 #ifdef CONFIG_SCHED_CORE 1103 /* per rq */ 1104 struct rq *core; 1105 struct task_struct *core_pick; 1106 unsigned int core_enabled; 1107 unsigned int core_sched_seq; 1108 struct rb_root core_tree; 1109 1110 /* shared state -- careful with sched_core_cpu_deactivate() */ 1111 unsigned int core_task_seq; 1112 unsigned int core_pick_seq; 1113 unsigned long core_cookie; 1114 unsigned char core_forceidle; 1115 unsigned int core_forceidle_seq; 1116 #endif 1117 }; 1118 1119 #ifdef CONFIG_FAIR_GROUP_SCHED 1120 1121 /* CPU runqueue to which this cfs_rq is attached */ 1122 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1123 { 1124 return cfs_rq->rq; 1125 } 1126 1127 #else 1128 1129 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1130 { 1131 return container_of(cfs_rq, struct rq, cfs); 1132 } 1133 #endif 1134 1135 static inline int cpu_of(struct rq *rq) 1136 { 1137 #ifdef CONFIG_SMP 1138 return rq->cpu; 1139 #else 1140 return 0; 1141 #endif 1142 } 1143 1144 #define MDF_PUSH 0x01 1145 1146 static inline bool is_migration_disabled(struct task_struct *p) 1147 { 1148 #ifdef CONFIG_SMP 1149 return p->migration_disabled; 1150 #else 1151 return false; 1152 #endif 1153 } 1154 1155 struct sched_group; 1156 #ifdef CONFIG_SCHED_CORE 1157 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1158 1159 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1160 1161 static inline bool sched_core_enabled(struct rq *rq) 1162 { 1163 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1164 } 1165 1166 static inline bool sched_core_disabled(void) 1167 { 1168 return !static_branch_unlikely(&__sched_core_enabled); 1169 } 1170 1171 /* 1172 * Be careful with this function; not for general use. The return value isn't 1173 * stable unless you actually hold a relevant rq->__lock. 1174 */ 1175 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1176 { 1177 if (sched_core_enabled(rq)) 1178 return &rq->core->__lock; 1179 1180 return &rq->__lock; 1181 } 1182 1183 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1184 { 1185 if (rq->core_enabled) 1186 return &rq->core->__lock; 1187 1188 return &rq->__lock; 1189 } 1190 1191 bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi); 1192 1193 /* 1194 * Helpers to check if the CPU's core cookie matches with the task's cookie 1195 * when core scheduling is enabled. 1196 * A special case is that the task's cookie always matches with CPU's core 1197 * cookie if the CPU is in an idle core. 1198 */ 1199 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1200 { 1201 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1202 if (!sched_core_enabled(rq)) 1203 return true; 1204 1205 return rq->core->core_cookie == p->core_cookie; 1206 } 1207 1208 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1209 { 1210 bool idle_core = true; 1211 int cpu; 1212 1213 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1214 if (!sched_core_enabled(rq)) 1215 return true; 1216 1217 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1218 if (!available_idle_cpu(cpu)) { 1219 idle_core = false; 1220 break; 1221 } 1222 } 1223 1224 /* 1225 * A CPU in an idle core is always the best choice for tasks with 1226 * cookies. 1227 */ 1228 return idle_core || rq->core->core_cookie == p->core_cookie; 1229 } 1230 1231 static inline bool sched_group_cookie_match(struct rq *rq, 1232 struct task_struct *p, 1233 struct sched_group *group) 1234 { 1235 int cpu; 1236 1237 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1238 if (!sched_core_enabled(rq)) 1239 return true; 1240 1241 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1242 if (sched_core_cookie_match(rq, p)) 1243 return true; 1244 } 1245 return false; 1246 } 1247 1248 extern void queue_core_balance(struct rq *rq); 1249 1250 static inline bool sched_core_enqueued(struct task_struct *p) 1251 { 1252 return !RB_EMPTY_NODE(&p->core_node); 1253 } 1254 1255 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1256 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p); 1257 1258 extern void sched_core_get(void); 1259 extern void sched_core_put(void); 1260 1261 extern unsigned long sched_core_alloc_cookie(void); 1262 extern void sched_core_put_cookie(unsigned long cookie); 1263 extern unsigned long sched_core_get_cookie(unsigned long cookie); 1264 extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie); 1265 1266 #else /* !CONFIG_SCHED_CORE */ 1267 1268 static inline bool sched_core_enabled(struct rq *rq) 1269 { 1270 return false; 1271 } 1272 1273 static inline bool sched_core_disabled(void) 1274 { 1275 return true; 1276 } 1277 1278 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1279 { 1280 return &rq->__lock; 1281 } 1282 1283 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1284 { 1285 return &rq->__lock; 1286 } 1287 1288 static inline void queue_core_balance(struct rq *rq) 1289 { 1290 } 1291 1292 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1293 { 1294 return true; 1295 } 1296 1297 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1298 { 1299 return true; 1300 } 1301 1302 static inline bool sched_group_cookie_match(struct rq *rq, 1303 struct task_struct *p, 1304 struct sched_group *group) 1305 { 1306 return true; 1307 } 1308 #endif /* CONFIG_SCHED_CORE */ 1309 1310 static inline void lockdep_assert_rq_held(struct rq *rq) 1311 { 1312 lockdep_assert_held(__rq_lockp(rq)); 1313 } 1314 1315 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1316 extern bool raw_spin_rq_trylock(struct rq *rq); 1317 extern void raw_spin_rq_unlock(struct rq *rq); 1318 1319 static inline void raw_spin_rq_lock(struct rq *rq) 1320 { 1321 raw_spin_rq_lock_nested(rq, 0); 1322 } 1323 1324 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1325 { 1326 local_irq_disable(); 1327 raw_spin_rq_lock(rq); 1328 } 1329 1330 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1331 { 1332 raw_spin_rq_unlock(rq); 1333 local_irq_enable(); 1334 } 1335 1336 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1337 { 1338 unsigned long flags; 1339 local_irq_save(flags); 1340 raw_spin_rq_lock(rq); 1341 return flags; 1342 } 1343 1344 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1345 { 1346 raw_spin_rq_unlock(rq); 1347 local_irq_restore(flags); 1348 } 1349 1350 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1351 do { \ 1352 flags = _raw_spin_rq_lock_irqsave(rq); \ 1353 } while (0) 1354 1355 #ifdef CONFIG_SCHED_SMT 1356 extern void __update_idle_core(struct rq *rq); 1357 1358 static inline void update_idle_core(struct rq *rq) 1359 { 1360 if (static_branch_unlikely(&sched_smt_present)) 1361 __update_idle_core(rq); 1362 } 1363 1364 #else 1365 static inline void update_idle_core(struct rq *rq) { } 1366 #endif 1367 1368 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1369 1370 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1371 #define this_rq() this_cpu_ptr(&runqueues) 1372 #define task_rq(p) cpu_rq(task_cpu(p)) 1373 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1374 #define raw_rq() raw_cpu_ptr(&runqueues) 1375 1376 #ifdef CONFIG_FAIR_GROUP_SCHED 1377 static inline struct task_struct *task_of(struct sched_entity *se) 1378 { 1379 SCHED_WARN_ON(!entity_is_task(se)); 1380 return container_of(se, struct task_struct, se); 1381 } 1382 1383 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1384 { 1385 return p->se.cfs_rq; 1386 } 1387 1388 /* runqueue on which this entity is (to be) queued */ 1389 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 1390 { 1391 return se->cfs_rq; 1392 } 1393 1394 /* runqueue "owned" by this group */ 1395 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1396 { 1397 return grp->my_q; 1398 } 1399 1400 #else 1401 1402 static inline struct task_struct *task_of(struct sched_entity *se) 1403 { 1404 return container_of(se, struct task_struct, se); 1405 } 1406 1407 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1408 { 1409 return &task_rq(p)->cfs; 1410 } 1411 1412 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) 1413 { 1414 struct task_struct *p = task_of(se); 1415 struct rq *rq = task_rq(p); 1416 1417 return &rq->cfs; 1418 } 1419 1420 /* runqueue "owned" by this group */ 1421 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1422 { 1423 return NULL; 1424 } 1425 #endif 1426 1427 extern void update_rq_clock(struct rq *rq); 1428 1429 /* 1430 * rq::clock_update_flags bits 1431 * 1432 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1433 * call to __schedule(). This is an optimisation to avoid 1434 * neighbouring rq clock updates. 1435 * 1436 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1437 * in effect and calls to update_rq_clock() are being ignored. 1438 * 1439 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1440 * made to update_rq_clock() since the last time rq::lock was pinned. 1441 * 1442 * If inside of __schedule(), clock_update_flags will have been 1443 * shifted left (a left shift is a cheap operation for the fast path 1444 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1445 * 1446 * if (rq-clock_update_flags >= RQCF_UPDATED) 1447 * 1448 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1449 * one position though, because the next rq_unpin_lock() will shift it 1450 * back. 1451 */ 1452 #define RQCF_REQ_SKIP 0x01 1453 #define RQCF_ACT_SKIP 0x02 1454 #define RQCF_UPDATED 0x04 1455 1456 static inline void assert_clock_updated(struct rq *rq) 1457 { 1458 /* 1459 * The only reason for not seeing a clock update since the 1460 * last rq_pin_lock() is if we're currently skipping updates. 1461 */ 1462 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1463 } 1464 1465 static inline u64 rq_clock(struct rq *rq) 1466 { 1467 lockdep_assert_rq_held(rq); 1468 assert_clock_updated(rq); 1469 1470 return rq->clock; 1471 } 1472 1473 static inline u64 rq_clock_task(struct rq *rq) 1474 { 1475 lockdep_assert_rq_held(rq); 1476 assert_clock_updated(rq); 1477 1478 return rq->clock_task; 1479 } 1480 1481 /** 1482 * By default the decay is the default pelt decay period. 1483 * The decay shift can change the decay period in 1484 * multiples of 32. 1485 * Decay shift Decay period(ms) 1486 * 0 32 1487 * 1 64 1488 * 2 128 1489 * 3 256 1490 * 4 512 1491 */ 1492 extern int sched_thermal_decay_shift; 1493 1494 static inline u64 rq_clock_thermal(struct rq *rq) 1495 { 1496 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1497 } 1498 1499 static inline void rq_clock_skip_update(struct rq *rq) 1500 { 1501 lockdep_assert_rq_held(rq); 1502 rq->clock_update_flags |= RQCF_REQ_SKIP; 1503 } 1504 1505 /* 1506 * See rt task throttling, which is the only time a skip 1507 * request is canceled. 1508 */ 1509 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1510 { 1511 lockdep_assert_rq_held(rq); 1512 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1513 } 1514 1515 struct rq_flags { 1516 unsigned long flags; 1517 struct pin_cookie cookie; 1518 #ifdef CONFIG_SCHED_DEBUG 1519 /* 1520 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1521 * current pin context is stashed here in case it needs to be 1522 * restored in rq_repin_lock(). 1523 */ 1524 unsigned int clock_update_flags; 1525 #endif 1526 }; 1527 1528 extern struct callback_head balance_push_callback; 1529 1530 /* 1531 * Lockdep annotation that avoids accidental unlocks; it's like a 1532 * sticky/continuous lockdep_assert_held(). 1533 * 1534 * This avoids code that has access to 'struct rq *rq' (basically everything in 1535 * the scheduler) from accidentally unlocking the rq if they do not also have a 1536 * copy of the (on-stack) 'struct rq_flags rf'. 1537 * 1538 * Also see Documentation/locking/lockdep-design.rst. 1539 */ 1540 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1541 { 1542 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1543 1544 #ifdef CONFIG_SCHED_DEBUG 1545 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1546 rf->clock_update_flags = 0; 1547 #ifdef CONFIG_SMP 1548 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1549 #endif 1550 #endif 1551 } 1552 1553 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1554 { 1555 #ifdef CONFIG_SCHED_DEBUG 1556 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1557 rf->clock_update_flags = RQCF_UPDATED; 1558 #endif 1559 1560 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1561 } 1562 1563 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1564 { 1565 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1566 1567 #ifdef CONFIG_SCHED_DEBUG 1568 /* 1569 * Restore the value we stashed in @rf for this pin context. 1570 */ 1571 rq->clock_update_flags |= rf->clock_update_flags; 1572 #endif 1573 } 1574 1575 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1576 __acquires(rq->lock); 1577 1578 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1579 __acquires(p->pi_lock) 1580 __acquires(rq->lock); 1581 1582 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1583 __releases(rq->lock) 1584 { 1585 rq_unpin_lock(rq, rf); 1586 raw_spin_rq_unlock(rq); 1587 } 1588 1589 static inline void 1590 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1591 __releases(rq->lock) 1592 __releases(p->pi_lock) 1593 { 1594 rq_unpin_lock(rq, rf); 1595 raw_spin_rq_unlock(rq); 1596 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1597 } 1598 1599 static inline void 1600 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1601 __acquires(rq->lock) 1602 { 1603 raw_spin_rq_lock_irqsave(rq, rf->flags); 1604 rq_pin_lock(rq, rf); 1605 } 1606 1607 static inline void 1608 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1609 __acquires(rq->lock) 1610 { 1611 raw_spin_rq_lock_irq(rq); 1612 rq_pin_lock(rq, rf); 1613 } 1614 1615 static inline void 1616 rq_lock(struct rq *rq, struct rq_flags *rf) 1617 __acquires(rq->lock) 1618 { 1619 raw_spin_rq_lock(rq); 1620 rq_pin_lock(rq, rf); 1621 } 1622 1623 static inline void 1624 rq_relock(struct rq *rq, struct rq_flags *rf) 1625 __acquires(rq->lock) 1626 { 1627 raw_spin_rq_lock(rq); 1628 rq_repin_lock(rq, rf); 1629 } 1630 1631 static inline void 1632 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1633 __releases(rq->lock) 1634 { 1635 rq_unpin_lock(rq, rf); 1636 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1637 } 1638 1639 static inline void 1640 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1641 __releases(rq->lock) 1642 { 1643 rq_unpin_lock(rq, rf); 1644 raw_spin_rq_unlock_irq(rq); 1645 } 1646 1647 static inline void 1648 rq_unlock(struct rq *rq, struct rq_flags *rf) 1649 __releases(rq->lock) 1650 { 1651 rq_unpin_lock(rq, rf); 1652 raw_spin_rq_unlock(rq); 1653 } 1654 1655 static inline struct rq * 1656 this_rq_lock_irq(struct rq_flags *rf) 1657 __acquires(rq->lock) 1658 { 1659 struct rq *rq; 1660 1661 local_irq_disable(); 1662 rq = this_rq(); 1663 rq_lock(rq, rf); 1664 return rq; 1665 } 1666 1667 #ifdef CONFIG_NUMA 1668 enum numa_topology_type { 1669 NUMA_DIRECT, 1670 NUMA_GLUELESS_MESH, 1671 NUMA_BACKPLANE, 1672 }; 1673 extern enum numa_topology_type sched_numa_topology_type; 1674 extern int sched_max_numa_distance; 1675 extern bool find_numa_distance(int distance); 1676 extern void sched_init_numa(void); 1677 extern void sched_domains_numa_masks_set(unsigned int cpu); 1678 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1679 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1680 #else 1681 static inline void sched_init_numa(void) { } 1682 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1683 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1684 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1685 { 1686 return nr_cpu_ids; 1687 } 1688 #endif 1689 1690 #ifdef CONFIG_NUMA_BALANCING 1691 /* The regions in numa_faults array from task_struct */ 1692 enum numa_faults_stats { 1693 NUMA_MEM = 0, 1694 NUMA_CPU, 1695 NUMA_MEMBUF, 1696 NUMA_CPUBUF 1697 }; 1698 extern void sched_setnuma(struct task_struct *p, int node); 1699 extern int migrate_task_to(struct task_struct *p, int cpu); 1700 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1701 int cpu, int scpu); 1702 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1703 #else 1704 static inline void 1705 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1706 { 1707 } 1708 #endif /* CONFIG_NUMA_BALANCING */ 1709 1710 #ifdef CONFIG_SMP 1711 1712 static inline void 1713 queue_balance_callback(struct rq *rq, 1714 struct callback_head *head, 1715 void (*func)(struct rq *rq)) 1716 { 1717 lockdep_assert_rq_held(rq); 1718 1719 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1720 return; 1721 1722 head->func = (void (*)(struct callback_head *))func; 1723 head->next = rq->balance_callback; 1724 rq->balance_callback = head; 1725 } 1726 1727 #define rcu_dereference_check_sched_domain(p) \ 1728 rcu_dereference_check((p), \ 1729 lockdep_is_held(&sched_domains_mutex)) 1730 1731 /* 1732 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1733 * See destroy_sched_domains: call_rcu for details. 1734 * 1735 * The domain tree of any CPU may only be accessed from within 1736 * preempt-disabled sections. 1737 */ 1738 #define for_each_domain(cpu, __sd) \ 1739 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1740 __sd; __sd = __sd->parent) 1741 1742 /** 1743 * highest_flag_domain - Return highest sched_domain containing flag. 1744 * @cpu: The CPU whose highest level of sched domain is to 1745 * be returned. 1746 * @flag: The flag to check for the highest sched_domain 1747 * for the given CPU. 1748 * 1749 * Returns the highest sched_domain of a CPU which contains the given flag. 1750 */ 1751 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1752 { 1753 struct sched_domain *sd, *hsd = NULL; 1754 1755 for_each_domain(cpu, sd) { 1756 if (!(sd->flags & flag)) 1757 break; 1758 hsd = sd; 1759 } 1760 1761 return hsd; 1762 } 1763 1764 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1765 { 1766 struct sched_domain *sd; 1767 1768 for_each_domain(cpu, sd) { 1769 if (sd->flags & flag) 1770 break; 1771 } 1772 1773 return sd; 1774 } 1775 1776 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1777 DECLARE_PER_CPU(int, sd_llc_size); 1778 DECLARE_PER_CPU(int, sd_llc_id); 1779 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1780 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1781 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1782 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1783 extern struct static_key_false sched_asym_cpucapacity; 1784 1785 struct sched_group_capacity { 1786 atomic_t ref; 1787 /* 1788 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1789 * for a single CPU. 1790 */ 1791 unsigned long capacity; 1792 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1793 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1794 unsigned long next_update; 1795 int imbalance; /* XXX unrelated to capacity but shared group state */ 1796 1797 #ifdef CONFIG_SCHED_DEBUG 1798 int id; 1799 #endif 1800 1801 unsigned long cpumask[]; /* Balance mask */ 1802 }; 1803 1804 struct sched_group { 1805 struct sched_group *next; /* Must be a circular list */ 1806 atomic_t ref; 1807 1808 unsigned int group_weight; 1809 struct sched_group_capacity *sgc; 1810 int asym_prefer_cpu; /* CPU of highest priority in group */ 1811 1812 /* 1813 * The CPUs this group covers. 1814 * 1815 * NOTE: this field is variable length. (Allocated dynamically 1816 * by attaching extra space to the end of the structure, 1817 * depending on how many CPUs the kernel has booted up with) 1818 */ 1819 unsigned long cpumask[]; 1820 }; 1821 1822 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1823 { 1824 return to_cpumask(sg->cpumask); 1825 } 1826 1827 /* 1828 * See build_balance_mask(). 1829 */ 1830 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1831 { 1832 return to_cpumask(sg->sgc->cpumask); 1833 } 1834 1835 /** 1836 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1837 * @group: The group whose first CPU is to be returned. 1838 */ 1839 static inline unsigned int group_first_cpu(struct sched_group *group) 1840 { 1841 return cpumask_first(sched_group_span(group)); 1842 } 1843 1844 extern int group_balance_cpu(struct sched_group *sg); 1845 1846 #ifdef CONFIG_SCHED_DEBUG 1847 void update_sched_domain_debugfs(void); 1848 void dirty_sched_domain_sysctl(int cpu); 1849 #else 1850 static inline void update_sched_domain_debugfs(void) 1851 { 1852 } 1853 static inline void dirty_sched_domain_sysctl(int cpu) 1854 { 1855 } 1856 #endif 1857 1858 extern int sched_update_scaling(void); 1859 1860 extern void flush_smp_call_function_from_idle(void); 1861 1862 #else /* !CONFIG_SMP: */ 1863 static inline void flush_smp_call_function_from_idle(void) { } 1864 #endif 1865 1866 #include "stats.h" 1867 #include "autogroup.h" 1868 1869 #ifdef CONFIG_CGROUP_SCHED 1870 1871 /* 1872 * Return the group to which this tasks belongs. 1873 * 1874 * We cannot use task_css() and friends because the cgroup subsystem 1875 * changes that value before the cgroup_subsys::attach() method is called, 1876 * therefore we cannot pin it and might observe the wrong value. 1877 * 1878 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1879 * core changes this before calling sched_move_task(). 1880 * 1881 * Instead we use a 'copy' which is updated from sched_move_task() while 1882 * holding both task_struct::pi_lock and rq::lock. 1883 */ 1884 static inline struct task_group *task_group(struct task_struct *p) 1885 { 1886 return p->sched_task_group; 1887 } 1888 1889 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1890 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1891 { 1892 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1893 struct task_group *tg = task_group(p); 1894 #endif 1895 1896 #ifdef CONFIG_FAIR_GROUP_SCHED 1897 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1898 p->se.cfs_rq = tg->cfs_rq[cpu]; 1899 p->se.parent = tg->se[cpu]; 1900 #endif 1901 1902 #ifdef CONFIG_RT_GROUP_SCHED 1903 p->rt.rt_rq = tg->rt_rq[cpu]; 1904 p->rt.parent = tg->rt_se[cpu]; 1905 #endif 1906 } 1907 1908 #else /* CONFIG_CGROUP_SCHED */ 1909 1910 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1911 static inline struct task_group *task_group(struct task_struct *p) 1912 { 1913 return NULL; 1914 } 1915 1916 #endif /* CONFIG_CGROUP_SCHED */ 1917 1918 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1919 { 1920 set_task_rq(p, cpu); 1921 #ifdef CONFIG_SMP 1922 /* 1923 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1924 * successfully executed on another CPU. We must ensure that updates of 1925 * per-task data have been completed by this moment. 1926 */ 1927 smp_wmb(); 1928 #ifdef CONFIG_THREAD_INFO_IN_TASK 1929 WRITE_ONCE(p->cpu, cpu); 1930 #else 1931 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1932 #endif 1933 p->wake_cpu = cpu; 1934 #endif 1935 } 1936 1937 /* 1938 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1939 */ 1940 #ifdef CONFIG_SCHED_DEBUG 1941 # include <linux/static_key.h> 1942 # define const_debug __read_mostly 1943 #else 1944 # define const_debug const 1945 #endif 1946 1947 #define SCHED_FEAT(name, enabled) \ 1948 __SCHED_FEAT_##name , 1949 1950 enum { 1951 #include "features.h" 1952 __SCHED_FEAT_NR, 1953 }; 1954 1955 #undef SCHED_FEAT 1956 1957 #ifdef CONFIG_SCHED_DEBUG 1958 1959 /* 1960 * To support run-time toggling of sched features, all the translation units 1961 * (but core.c) reference the sysctl_sched_features defined in core.c. 1962 */ 1963 extern const_debug unsigned int sysctl_sched_features; 1964 1965 #ifdef CONFIG_JUMP_LABEL 1966 #define SCHED_FEAT(name, enabled) \ 1967 static __always_inline bool static_branch_##name(struct static_key *key) \ 1968 { \ 1969 return static_key_##enabled(key); \ 1970 } 1971 1972 #include "features.h" 1973 #undef SCHED_FEAT 1974 1975 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1976 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1977 1978 #else /* !CONFIG_JUMP_LABEL */ 1979 1980 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1981 1982 #endif /* CONFIG_JUMP_LABEL */ 1983 1984 #else /* !SCHED_DEBUG */ 1985 1986 /* 1987 * Each translation unit has its own copy of sysctl_sched_features to allow 1988 * constants propagation at compile time and compiler optimization based on 1989 * features default. 1990 */ 1991 #define SCHED_FEAT(name, enabled) \ 1992 (1UL << __SCHED_FEAT_##name) * enabled | 1993 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1994 #include "features.h" 1995 0; 1996 #undef SCHED_FEAT 1997 1998 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1999 2000 #endif /* SCHED_DEBUG */ 2001 2002 extern struct static_key_false sched_numa_balancing; 2003 extern struct static_key_false sched_schedstats; 2004 2005 static inline u64 global_rt_period(void) 2006 { 2007 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 2008 } 2009 2010 static inline u64 global_rt_runtime(void) 2011 { 2012 if (sysctl_sched_rt_runtime < 0) 2013 return RUNTIME_INF; 2014 2015 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2016 } 2017 2018 static inline int task_current(struct rq *rq, struct task_struct *p) 2019 { 2020 return rq->curr == p; 2021 } 2022 2023 static inline int task_running(struct rq *rq, struct task_struct *p) 2024 { 2025 #ifdef CONFIG_SMP 2026 return p->on_cpu; 2027 #else 2028 return task_current(rq, p); 2029 #endif 2030 } 2031 2032 static inline int task_on_rq_queued(struct task_struct *p) 2033 { 2034 return p->on_rq == TASK_ON_RQ_QUEUED; 2035 } 2036 2037 static inline int task_on_rq_migrating(struct task_struct *p) 2038 { 2039 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2040 } 2041 2042 /* Wake flags. The first three directly map to some SD flag value */ 2043 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2044 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2045 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2046 2047 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2048 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2049 #define WF_ON_CPU 0x40 /* Wakee is on_cpu */ 2050 2051 #ifdef CONFIG_SMP 2052 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2053 static_assert(WF_FORK == SD_BALANCE_FORK); 2054 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2055 #endif 2056 2057 /* 2058 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2059 * of tasks with abnormal "nice" values across CPUs the contribution that 2060 * each task makes to its run queue's load is weighted according to its 2061 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2062 * scaled version of the new time slice allocation that they receive on time 2063 * slice expiry etc. 2064 */ 2065 2066 #define WEIGHT_IDLEPRIO 3 2067 #define WMULT_IDLEPRIO 1431655765 2068 2069 extern const int sched_prio_to_weight[40]; 2070 extern const u32 sched_prio_to_wmult[40]; 2071 2072 /* 2073 * {de,en}queue flags: 2074 * 2075 * DEQUEUE_SLEEP - task is no longer runnable 2076 * ENQUEUE_WAKEUP - task just became runnable 2077 * 2078 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2079 * are in a known state which allows modification. Such pairs 2080 * should preserve as much state as possible. 2081 * 2082 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2083 * in the runqueue. 2084 * 2085 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2086 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2087 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2088 * 2089 */ 2090 2091 #define DEQUEUE_SLEEP 0x01 2092 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2093 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2094 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2095 2096 #define ENQUEUE_WAKEUP 0x01 2097 #define ENQUEUE_RESTORE 0x02 2098 #define ENQUEUE_MOVE 0x04 2099 #define ENQUEUE_NOCLOCK 0x08 2100 2101 #define ENQUEUE_HEAD 0x10 2102 #define ENQUEUE_REPLENISH 0x20 2103 #ifdef CONFIG_SMP 2104 #define ENQUEUE_MIGRATED 0x40 2105 #else 2106 #define ENQUEUE_MIGRATED 0x00 2107 #endif 2108 2109 #define RETRY_TASK ((void *)-1UL) 2110 2111 struct sched_class { 2112 2113 #ifdef CONFIG_UCLAMP_TASK 2114 int uclamp_enabled; 2115 #endif 2116 2117 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2118 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2119 void (*yield_task) (struct rq *rq); 2120 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2121 2122 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 2123 2124 struct task_struct *(*pick_next_task)(struct rq *rq); 2125 2126 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 2127 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2128 2129 #ifdef CONFIG_SMP 2130 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2131 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2132 2133 struct task_struct * (*pick_task)(struct rq *rq); 2134 2135 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2136 2137 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2138 2139 void (*set_cpus_allowed)(struct task_struct *p, 2140 const struct cpumask *newmask, 2141 u32 flags); 2142 2143 void (*rq_online)(struct rq *rq); 2144 void (*rq_offline)(struct rq *rq); 2145 2146 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2147 #endif 2148 2149 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2150 void (*task_fork)(struct task_struct *p); 2151 void (*task_dead)(struct task_struct *p); 2152 2153 /* 2154 * The switched_from() call is allowed to drop rq->lock, therefore we 2155 * cannot assume the switched_from/switched_to pair is serialized by 2156 * rq->lock. They are however serialized by p->pi_lock. 2157 */ 2158 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2159 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2160 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2161 int oldprio); 2162 2163 unsigned int (*get_rr_interval)(struct rq *rq, 2164 struct task_struct *task); 2165 2166 void (*update_curr)(struct rq *rq); 2167 2168 #define TASK_SET_GROUP 0 2169 #define TASK_MOVE_GROUP 1 2170 2171 #ifdef CONFIG_FAIR_GROUP_SCHED 2172 void (*task_change_group)(struct task_struct *p, int type); 2173 #endif 2174 }; 2175 2176 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2177 { 2178 WARN_ON_ONCE(rq->curr != prev); 2179 prev->sched_class->put_prev_task(rq, prev); 2180 } 2181 2182 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2183 { 2184 next->sched_class->set_next_task(rq, next, false); 2185 } 2186 2187 2188 /* 2189 * Helper to define a sched_class instance; each one is placed in a separate 2190 * section which is ordered by the linker script: 2191 * 2192 * include/asm-generic/vmlinux.lds.h 2193 * 2194 * Also enforce alignment on the instance, not the type, to guarantee layout. 2195 */ 2196 #define DEFINE_SCHED_CLASS(name) \ 2197 const struct sched_class name##_sched_class \ 2198 __aligned(__alignof__(struct sched_class)) \ 2199 __section("__" #name "_sched_class") 2200 2201 /* Defined in include/asm-generic/vmlinux.lds.h */ 2202 extern struct sched_class __begin_sched_classes[]; 2203 extern struct sched_class __end_sched_classes[]; 2204 2205 #define sched_class_highest (__end_sched_classes - 1) 2206 #define sched_class_lowest (__begin_sched_classes - 1) 2207 2208 #define for_class_range(class, _from, _to) \ 2209 for (class = (_from); class != (_to); class--) 2210 2211 #define for_each_class(class) \ 2212 for_class_range(class, sched_class_highest, sched_class_lowest) 2213 2214 extern const struct sched_class stop_sched_class; 2215 extern const struct sched_class dl_sched_class; 2216 extern const struct sched_class rt_sched_class; 2217 extern const struct sched_class fair_sched_class; 2218 extern const struct sched_class idle_sched_class; 2219 2220 static inline bool sched_stop_runnable(struct rq *rq) 2221 { 2222 return rq->stop && task_on_rq_queued(rq->stop); 2223 } 2224 2225 static inline bool sched_dl_runnable(struct rq *rq) 2226 { 2227 return rq->dl.dl_nr_running > 0; 2228 } 2229 2230 static inline bool sched_rt_runnable(struct rq *rq) 2231 { 2232 return rq->rt.rt_queued > 0; 2233 } 2234 2235 static inline bool sched_fair_runnable(struct rq *rq) 2236 { 2237 return rq->cfs.nr_running > 0; 2238 } 2239 2240 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2241 extern struct task_struct *pick_next_task_idle(struct rq *rq); 2242 2243 #define SCA_CHECK 0x01 2244 #define SCA_MIGRATE_DISABLE 0x02 2245 #define SCA_MIGRATE_ENABLE 0x04 2246 #define SCA_USER 0x08 2247 2248 #ifdef CONFIG_SMP 2249 2250 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2251 2252 extern void trigger_load_balance(struct rq *rq); 2253 2254 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); 2255 2256 static inline struct task_struct *get_push_task(struct rq *rq) 2257 { 2258 struct task_struct *p = rq->curr; 2259 2260 lockdep_assert_rq_held(rq); 2261 2262 if (rq->push_busy) 2263 return NULL; 2264 2265 if (p->nr_cpus_allowed == 1) 2266 return NULL; 2267 2268 if (p->migration_disabled) 2269 return NULL; 2270 2271 rq->push_busy = true; 2272 return get_task_struct(p); 2273 } 2274 2275 extern int push_cpu_stop(void *arg); 2276 2277 #endif 2278 2279 #ifdef CONFIG_CPU_IDLE 2280 static inline void idle_set_state(struct rq *rq, 2281 struct cpuidle_state *idle_state) 2282 { 2283 rq->idle_state = idle_state; 2284 } 2285 2286 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2287 { 2288 SCHED_WARN_ON(!rcu_read_lock_held()); 2289 2290 return rq->idle_state; 2291 } 2292 #else 2293 static inline void idle_set_state(struct rq *rq, 2294 struct cpuidle_state *idle_state) 2295 { 2296 } 2297 2298 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2299 { 2300 return NULL; 2301 } 2302 #endif 2303 2304 extern void schedule_idle(void); 2305 2306 extern void sysrq_sched_debug_show(void); 2307 extern void sched_init_granularity(void); 2308 extern void update_max_interval(void); 2309 2310 extern void init_sched_dl_class(void); 2311 extern void init_sched_rt_class(void); 2312 extern void init_sched_fair_class(void); 2313 2314 extern void reweight_task(struct task_struct *p, int prio); 2315 2316 extern void resched_curr(struct rq *rq); 2317 extern void resched_cpu(int cpu); 2318 2319 extern struct rt_bandwidth def_rt_bandwidth; 2320 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2321 2322 extern struct dl_bandwidth def_dl_bandwidth; 2323 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 2324 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 2325 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 2326 2327 #define BW_SHIFT 20 2328 #define BW_UNIT (1 << BW_SHIFT) 2329 #define RATIO_SHIFT 8 2330 #define MAX_BW_BITS (64 - BW_SHIFT) 2331 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2332 unsigned long to_ratio(u64 period, u64 runtime); 2333 2334 extern void init_entity_runnable_average(struct sched_entity *se); 2335 extern void post_init_entity_util_avg(struct task_struct *p); 2336 2337 #ifdef CONFIG_NO_HZ_FULL 2338 extern bool sched_can_stop_tick(struct rq *rq); 2339 extern int __init sched_tick_offload_init(void); 2340 2341 /* 2342 * Tick may be needed by tasks in the runqueue depending on their policy and 2343 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2344 * nohz mode if necessary. 2345 */ 2346 static inline void sched_update_tick_dependency(struct rq *rq) 2347 { 2348 int cpu = cpu_of(rq); 2349 2350 if (!tick_nohz_full_cpu(cpu)) 2351 return; 2352 2353 if (sched_can_stop_tick(rq)) 2354 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2355 else 2356 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2357 } 2358 #else 2359 static inline int sched_tick_offload_init(void) { return 0; } 2360 static inline void sched_update_tick_dependency(struct rq *rq) { } 2361 #endif 2362 2363 static inline void add_nr_running(struct rq *rq, unsigned count) 2364 { 2365 unsigned prev_nr = rq->nr_running; 2366 2367 rq->nr_running = prev_nr + count; 2368 if (trace_sched_update_nr_running_tp_enabled()) { 2369 call_trace_sched_update_nr_running(rq, count); 2370 } 2371 2372 #ifdef CONFIG_SMP 2373 if (prev_nr < 2 && rq->nr_running >= 2) { 2374 if (!READ_ONCE(rq->rd->overload)) 2375 WRITE_ONCE(rq->rd->overload, 1); 2376 } 2377 #endif 2378 2379 sched_update_tick_dependency(rq); 2380 } 2381 2382 static inline void sub_nr_running(struct rq *rq, unsigned count) 2383 { 2384 rq->nr_running -= count; 2385 if (trace_sched_update_nr_running_tp_enabled()) { 2386 call_trace_sched_update_nr_running(rq, -count); 2387 } 2388 2389 /* Check if we still need preemption */ 2390 sched_update_tick_dependency(rq); 2391 } 2392 2393 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2394 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2395 2396 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2397 2398 extern const_debug unsigned int sysctl_sched_nr_migrate; 2399 extern const_debug unsigned int sysctl_sched_migration_cost; 2400 2401 #ifdef CONFIG_SCHED_DEBUG 2402 extern unsigned int sysctl_sched_latency; 2403 extern unsigned int sysctl_sched_min_granularity; 2404 extern unsigned int sysctl_sched_idle_min_granularity; 2405 extern unsigned int sysctl_sched_wakeup_granularity; 2406 extern int sysctl_resched_latency_warn_ms; 2407 extern int sysctl_resched_latency_warn_once; 2408 2409 extern unsigned int sysctl_sched_tunable_scaling; 2410 2411 extern unsigned int sysctl_numa_balancing_scan_delay; 2412 extern unsigned int sysctl_numa_balancing_scan_period_min; 2413 extern unsigned int sysctl_numa_balancing_scan_period_max; 2414 extern unsigned int sysctl_numa_balancing_scan_size; 2415 #endif 2416 2417 #ifdef CONFIG_SCHED_HRTICK 2418 2419 /* 2420 * Use hrtick when: 2421 * - enabled by features 2422 * - hrtimer is actually high res 2423 */ 2424 static inline int hrtick_enabled(struct rq *rq) 2425 { 2426 if (!cpu_active(cpu_of(rq))) 2427 return 0; 2428 return hrtimer_is_hres_active(&rq->hrtick_timer); 2429 } 2430 2431 static inline int hrtick_enabled_fair(struct rq *rq) 2432 { 2433 if (!sched_feat(HRTICK)) 2434 return 0; 2435 return hrtick_enabled(rq); 2436 } 2437 2438 static inline int hrtick_enabled_dl(struct rq *rq) 2439 { 2440 if (!sched_feat(HRTICK_DL)) 2441 return 0; 2442 return hrtick_enabled(rq); 2443 } 2444 2445 void hrtick_start(struct rq *rq, u64 delay); 2446 2447 #else 2448 2449 static inline int hrtick_enabled_fair(struct rq *rq) 2450 { 2451 return 0; 2452 } 2453 2454 static inline int hrtick_enabled_dl(struct rq *rq) 2455 { 2456 return 0; 2457 } 2458 2459 static inline int hrtick_enabled(struct rq *rq) 2460 { 2461 return 0; 2462 } 2463 2464 #endif /* CONFIG_SCHED_HRTICK */ 2465 2466 #ifndef arch_scale_freq_tick 2467 static __always_inline 2468 void arch_scale_freq_tick(void) 2469 { 2470 } 2471 #endif 2472 2473 #ifndef arch_scale_freq_capacity 2474 /** 2475 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2476 * @cpu: the CPU in question. 2477 * 2478 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2479 * 2480 * f_curr 2481 * ------ * SCHED_CAPACITY_SCALE 2482 * f_max 2483 */ 2484 static __always_inline 2485 unsigned long arch_scale_freq_capacity(int cpu) 2486 { 2487 return SCHED_CAPACITY_SCALE; 2488 } 2489 #endif 2490 2491 2492 #ifdef CONFIG_SMP 2493 2494 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2495 { 2496 #ifdef CONFIG_SCHED_CORE 2497 /* 2498 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2499 * order by core-id first and cpu-id second. 2500 * 2501 * Notably: 2502 * 2503 * double_rq_lock(0,3); will take core-0, core-1 lock 2504 * double_rq_lock(1,2); will take core-1, core-0 lock 2505 * 2506 * when only cpu-id is considered. 2507 */ 2508 if (rq1->core->cpu < rq2->core->cpu) 2509 return true; 2510 if (rq1->core->cpu > rq2->core->cpu) 2511 return false; 2512 2513 /* 2514 * __sched_core_flip() relies on SMT having cpu-id lock order. 2515 */ 2516 #endif 2517 return rq1->cpu < rq2->cpu; 2518 } 2519 2520 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2521 2522 #ifdef CONFIG_PREEMPTION 2523 2524 /* 2525 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2526 * way at the expense of forcing extra atomic operations in all 2527 * invocations. This assures that the double_lock is acquired using the 2528 * same underlying policy as the spinlock_t on this architecture, which 2529 * reduces latency compared to the unfair variant below. However, it 2530 * also adds more overhead and therefore may reduce throughput. 2531 */ 2532 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2533 __releases(this_rq->lock) 2534 __acquires(busiest->lock) 2535 __acquires(this_rq->lock) 2536 { 2537 raw_spin_rq_unlock(this_rq); 2538 double_rq_lock(this_rq, busiest); 2539 2540 return 1; 2541 } 2542 2543 #else 2544 /* 2545 * Unfair double_lock_balance: Optimizes throughput at the expense of 2546 * latency by eliminating extra atomic operations when the locks are 2547 * already in proper order on entry. This favors lower CPU-ids and will 2548 * grant the double lock to lower CPUs over higher ids under contention, 2549 * regardless of entry order into the function. 2550 */ 2551 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2552 __releases(this_rq->lock) 2553 __acquires(busiest->lock) 2554 __acquires(this_rq->lock) 2555 { 2556 if (__rq_lockp(this_rq) == __rq_lockp(busiest)) 2557 return 0; 2558 2559 if (likely(raw_spin_rq_trylock(busiest))) 2560 return 0; 2561 2562 if (rq_order_less(this_rq, busiest)) { 2563 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2564 return 0; 2565 } 2566 2567 raw_spin_rq_unlock(this_rq); 2568 double_rq_lock(this_rq, busiest); 2569 2570 return 1; 2571 } 2572 2573 #endif /* CONFIG_PREEMPTION */ 2574 2575 /* 2576 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2577 */ 2578 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2579 { 2580 lockdep_assert_irqs_disabled(); 2581 2582 return _double_lock_balance(this_rq, busiest); 2583 } 2584 2585 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2586 __releases(busiest->lock) 2587 { 2588 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2589 raw_spin_rq_unlock(busiest); 2590 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2591 } 2592 2593 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2594 { 2595 if (l1 > l2) 2596 swap(l1, l2); 2597 2598 spin_lock(l1); 2599 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2600 } 2601 2602 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2603 { 2604 if (l1 > l2) 2605 swap(l1, l2); 2606 2607 spin_lock_irq(l1); 2608 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2609 } 2610 2611 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2612 { 2613 if (l1 > l2) 2614 swap(l1, l2); 2615 2616 raw_spin_lock(l1); 2617 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2618 } 2619 2620 /* 2621 * double_rq_unlock - safely unlock two runqueues 2622 * 2623 * Note this does not restore interrupts like task_rq_unlock, 2624 * you need to do so manually after calling. 2625 */ 2626 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2627 __releases(rq1->lock) 2628 __releases(rq2->lock) 2629 { 2630 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2631 raw_spin_rq_unlock(rq2); 2632 else 2633 __release(rq2->lock); 2634 raw_spin_rq_unlock(rq1); 2635 } 2636 2637 extern void set_rq_online (struct rq *rq); 2638 extern void set_rq_offline(struct rq *rq); 2639 extern bool sched_smp_initialized; 2640 2641 #else /* CONFIG_SMP */ 2642 2643 /* 2644 * double_rq_lock - safely lock two runqueues 2645 * 2646 * Note this does not disable interrupts like task_rq_lock, 2647 * you need to do so manually before calling. 2648 */ 2649 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2650 __acquires(rq1->lock) 2651 __acquires(rq2->lock) 2652 { 2653 BUG_ON(!irqs_disabled()); 2654 BUG_ON(rq1 != rq2); 2655 raw_spin_rq_lock(rq1); 2656 __acquire(rq2->lock); /* Fake it out ;) */ 2657 } 2658 2659 /* 2660 * double_rq_unlock - safely unlock two runqueues 2661 * 2662 * Note this does not restore interrupts like task_rq_unlock, 2663 * you need to do so manually after calling. 2664 */ 2665 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2666 __releases(rq1->lock) 2667 __releases(rq2->lock) 2668 { 2669 BUG_ON(rq1 != rq2); 2670 raw_spin_rq_unlock(rq1); 2671 __release(rq2->lock); 2672 } 2673 2674 #endif 2675 2676 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2677 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2678 2679 #ifdef CONFIG_SCHED_DEBUG 2680 extern bool sched_debug_verbose; 2681 2682 extern void print_cfs_stats(struct seq_file *m, int cpu); 2683 extern void print_rt_stats(struct seq_file *m, int cpu); 2684 extern void print_dl_stats(struct seq_file *m, int cpu); 2685 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2686 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2687 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2688 2689 extern void resched_latency_warn(int cpu, u64 latency); 2690 #ifdef CONFIG_NUMA_BALANCING 2691 extern void 2692 show_numa_stats(struct task_struct *p, struct seq_file *m); 2693 extern void 2694 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2695 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2696 #endif /* CONFIG_NUMA_BALANCING */ 2697 #else 2698 static inline void resched_latency_warn(int cpu, u64 latency) {} 2699 #endif /* CONFIG_SCHED_DEBUG */ 2700 2701 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2702 extern void init_rt_rq(struct rt_rq *rt_rq); 2703 extern void init_dl_rq(struct dl_rq *dl_rq); 2704 2705 extern void cfs_bandwidth_usage_inc(void); 2706 extern void cfs_bandwidth_usage_dec(void); 2707 2708 #ifdef CONFIG_NO_HZ_COMMON 2709 #define NOHZ_BALANCE_KICK_BIT 0 2710 #define NOHZ_STATS_KICK_BIT 1 2711 #define NOHZ_NEWILB_KICK_BIT 2 2712 #define NOHZ_NEXT_KICK_BIT 3 2713 2714 /* Run rebalance_domains() */ 2715 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2716 /* Update blocked load */ 2717 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2718 /* Update blocked load when entering idle */ 2719 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 2720 /* Update nohz.next_balance */ 2721 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 2722 2723 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 2724 2725 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2726 2727 extern void nohz_balance_exit_idle(struct rq *rq); 2728 #else 2729 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2730 #endif 2731 2732 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 2733 extern void nohz_run_idle_balance(int cpu); 2734 #else 2735 static inline void nohz_run_idle_balance(int cpu) { } 2736 #endif 2737 2738 #ifdef CONFIG_SMP 2739 static inline 2740 void __dl_update(struct dl_bw *dl_b, s64 bw) 2741 { 2742 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2743 int i; 2744 2745 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2746 "sched RCU must be held"); 2747 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2748 struct rq *rq = cpu_rq(i); 2749 2750 rq->dl.extra_bw += bw; 2751 } 2752 } 2753 #else 2754 static inline 2755 void __dl_update(struct dl_bw *dl_b, s64 bw) 2756 { 2757 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2758 2759 dl->extra_bw += bw; 2760 } 2761 #endif 2762 2763 2764 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2765 struct irqtime { 2766 u64 total; 2767 u64 tick_delta; 2768 u64 irq_start_time; 2769 struct u64_stats_sync sync; 2770 }; 2771 2772 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2773 2774 /* 2775 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2776 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 2777 * and never move forward. 2778 */ 2779 static inline u64 irq_time_read(int cpu) 2780 { 2781 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2782 unsigned int seq; 2783 u64 total; 2784 2785 do { 2786 seq = __u64_stats_fetch_begin(&irqtime->sync); 2787 total = irqtime->total; 2788 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2789 2790 return total; 2791 } 2792 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2793 2794 #ifdef CONFIG_CPU_FREQ 2795 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2796 2797 /** 2798 * cpufreq_update_util - Take a note about CPU utilization changes. 2799 * @rq: Runqueue to carry out the update for. 2800 * @flags: Update reason flags. 2801 * 2802 * This function is called by the scheduler on the CPU whose utilization is 2803 * being updated. 2804 * 2805 * It can only be called from RCU-sched read-side critical sections. 2806 * 2807 * The way cpufreq is currently arranged requires it to evaluate the CPU 2808 * performance state (frequency/voltage) on a regular basis to prevent it from 2809 * being stuck in a completely inadequate performance level for too long. 2810 * That is not guaranteed to happen if the updates are only triggered from CFS 2811 * and DL, though, because they may not be coming in if only RT tasks are 2812 * active all the time (or there are RT tasks only). 2813 * 2814 * As a workaround for that issue, this function is called periodically by the 2815 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2816 * but that really is a band-aid. Going forward it should be replaced with 2817 * solutions targeted more specifically at RT tasks. 2818 */ 2819 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2820 { 2821 struct update_util_data *data; 2822 2823 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2824 cpu_of(rq))); 2825 if (data) 2826 data->func(data, rq_clock(rq), flags); 2827 } 2828 #else 2829 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2830 #endif /* CONFIG_CPU_FREQ */ 2831 2832 #ifdef CONFIG_UCLAMP_TASK 2833 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2834 2835 /** 2836 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 2837 * @rq: The rq to clamp against. Must not be NULL. 2838 * @util: The util value to clamp. 2839 * @p: The task to clamp against. Can be NULL if you want to clamp 2840 * against @rq only. 2841 * 2842 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 2843 * 2844 * If sched_uclamp_used static key is disabled, then just return the util 2845 * without any clamping since uclamp aggregation at the rq level in the fast 2846 * path is disabled, rendering this operation a NOP. 2847 * 2848 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 2849 * will return the correct effective uclamp value of the task even if the 2850 * static key is disabled. 2851 */ 2852 static __always_inline 2853 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2854 struct task_struct *p) 2855 { 2856 unsigned long min_util = 0; 2857 unsigned long max_util = 0; 2858 2859 if (!static_branch_likely(&sched_uclamp_used)) 2860 return util; 2861 2862 if (p) { 2863 min_util = uclamp_eff_value(p, UCLAMP_MIN); 2864 max_util = uclamp_eff_value(p, UCLAMP_MAX); 2865 2866 /* 2867 * Ignore last runnable task's max clamp, as this task will 2868 * reset it. Similarly, no need to read the rq's min clamp. 2869 */ 2870 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 2871 goto out; 2872 } 2873 2874 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); 2875 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); 2876 out: 2877 /* 2878 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2879 * RUNNABLE tasks with _different_ clamps, we can end up with an 2880 * inversion. Fix it now when the clamps are applied. 2881 */ 2882 if (unlikely(min_util >= max_util)) 2883 return min_util; 2884 2885 return clamp(util, min_util, max_util); 2886 } 2887 2888 /* 2889 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 2890 * by default in the fast path and only gets turned on once userspace performs 2891 * an operation that requires it. 2892 * 2893 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 2894 * hence is active. 2895 */ 2896 static inline bool uclamp_is_used(void) 2897 { 2898 return static_branch_likely(&sched_uclamp_used); 2899 } 2900 #else /* CONFIG_UCLAMP_TASK */ 2901 static inline 2902 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2903 struct task_struct *p) 2904 { 2905 return util; 2906 } 2907 2908 static inline bool uclamp_is_used(void) 2909 { 2910 return false; 2911 } 2912 #endif /* CONFIG_UCLAMP_TASK */ 2913 2914 #ifdef arch_scale_freq_capacity 2915 # ifndef arch_scale_freq_invariant 2916 # define arch_scale_freq_invariant() true 2917 # endif 2918 #else 2919 # define arch_scale_freq_invariant() false 2920 #endif 2921 2922 #ifdef CONFIG_SMP 2923 static inline unsigned long capacity_orig_of(int cpu) 2924 { 2925 return cpu_rq(cpu)->cpu_capacity_orig; 2926 } 2927 2928 /** 2929 * enum cpu_util_type - CPU utilization type 2930 * @FREQUENCY_UTIL: Utilization used to select frequency 2931 * @ENERGY_UTIL: Utilization used during energy calculation 2932 * 2933 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2934 * need to be aggregated differently depending on the usage made of them. This 2935 * enum is used within effective_cpu_util() to differentiate the types of 2936 * utilization expected by the callers, and adjust the aggregation accordingly. 2937 */ 2938 enum cpu_util_type { 2939 FREQUENCY_UTIL, 2940 ENERGY_UTIL, 2941 }; 2942 2943 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 2944 unsigned long max, enum cpu_util_type type, 2945 struct task_struct *p); 2946 2947 static inline unsigned long cpu_bw_dl(struct rq *rq) 2948 { 2949 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2950 } 2951 2952 static inline unsigned long cpu_util_dl(struct rq *rq) 2953 { 2954 return READ_ONCE(rq->avg_dl.util_avg); 2955 } 2956 2957 static inline unsigned long cpu_util_cfs(struct rq *rq) 2958 { 2959 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2960 2961 if (sched_feat(UTIL_EST)) { 2962 util = max_t(unsigned long, util, 2963 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2964 } 2965 2966 return util; 2967 } 2968 2969 static inline unsigned long cpu_util_rt(struct rq *rq) 2970 { 2971 return READ_ONCE(rq->avg_rt.util_avg); 2972 } 2973 #endif 2974 2975 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2976 static inline unsigned long cpu_util_irq(struct rq *rq) 2977 { 2978 return rq->avg_irq.util_avg; 2979 } 2980 2981 static inline 2982 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2983 { 2984 util *= (max - irq); 2985 util /= max; 2986 2987 return util; 2988 2989 } 2990 #else 2991 static inline unsigned long cpu_util_irq(struct rq *rq) 2992 { 2993 return 0; 2994 } 2995 2996 static inline 2997 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2998 { 2999 return util; 3000 } 3001 #endif 3002 3003 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 3004 3005 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 3006 3007 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 3008 3009 static inline bool sched_energy_enabled(void) 3010 { 3011 return static_branch_unlikely(&sched_energy_present); 3012 } 3013 3014 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 3015 3016 #define perf_domain_span(pd) NULL 3017 static inline bool sched_energy_enabled(void) { return false; } 3018 3019 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 3020 3021 #ifdef CONFIG_MEMBARRIER 3022 /* 3023 * The scheduler provides memory barriers required by membarrier between: 3024 * - prior user-space memory accesses and store to rq->membarrier_state, 3025 * - store to rq->membarrier_state and following user-space memory accesses. 3026 * In the same way it provides those guarantees around store to rq->curr. 3027 */ 3028 static inline void membarrier_switch_mm(struct rq *rq, 3029 struct mm_struct *prev_mm, 3030 struct mm_struct *next_mm) 3031 { 3032 int membarrier_state; 3033 3034 if (prev_mm == next_mm) 3035 return; 3036 3037 membarrier_state = atomic_read(&next_mm->membarrier_state); 3038 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3039 return; 3040 3041 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3042 } 3043 #else 3044 static inline void membarrier_switch_mm(struct rq *rq, 3045 struct mm_struct *prev_mm, 3046 struct mm_struct *next_mm) 3047 { 3048 } 3049 #endif 3050 3051 #ifdef CONFIG_SMP 3052 static inline bool is_per_cpu_kthread(struct task_struct *p) 3053 { 3054 if (!(p->flags & PF_KTHREAD)) 3055 return false; 3056 3057 if (p->nr_cpus_allowed != 1) 3058 return false; 3059 3060 return true; 3061 } 3062 #endif 3063 3064 extern void swake_up_all_locked(struct swait_queue_head *q); 3065 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3066 3067 #ifdef CONFIG_PREEMPT_DYNAMIC 3068 extern int preempt_dynamic_mode; 3069 extern int sched_dynamic_mode(const char *str); 3070 extern void sched_dynamic_update(int mode); 3071 #endif 3072 3073