1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/blkdev.h> 40 #include <linux/compat.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpufreq.h> 43 #include <linux/cpuidle.h> 44 #include <linux/cpuset.h> 45 #include <linux/ctype.h> 46 #include <linux/debugfs.h> 47 #include <linux/delayacct.h> 48 #include <linux/energy_model.h> 49 #include <linux/init_task.h> 50 #include <linux/kprobes.h> 51 #include <linux/kthread.h> 52 #include <linux/membarrier.h> 53 #include <linux/migrate.h> 54 #include <linux/mmu_context.h> 55 #include <linux/nmi.h> 56 #include <linux/proc_fs.h> 57 #include <linux/prefetch.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/rcupdate_wait.h> 61 #include <linux/security.h> 62 #include <linux/stop_machine.h> 63 #include <linux/suspend.h> 64 #include <linux/swait.h> 65 #include <linux/syscalls.h> 66 #include <linux/task_work.h> 67 #include <linux/tsacct_kern.h> 68 69 #include <asm/tlb.h> 70 #include <asm-generic/vmlinux.lds.h> 71 72 #ifdef CONFIG_PARAVIRT 73 # include <asm/paravirt.h> 74 #endif 75 76 #include "cpupri.h" 77 #include "cpudeadline.h" 78 79 #include <trace/events/sched.h> 80 81 #ifdef CONFIG_SCHED_DEBUG 82 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 83 #else 84 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 85 #endif 86 87 struct rq; 88 struct cpuidle_state; 89 90 /* task_struct::on_rq states: */ 91 #define TASK_ON_RQ_QUEUED 1 92 #define TASK_ON_RQ_MIGRATING 2 93 94 extern __read_mostly int scheduler_running; 95 96 extern unsigned long calc_load_update; 97 extern atomic_long_t calc_load_tasks; 98 99 extern void calc_global_load_tick(struct rq *this_rq); 100 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 101 102 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 103 /* 104 * Helpers for converting nanosecond timing to jiffy resolution 105 */ 106 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 107 108 /* 109 * Increase resolution of nice-level calculations for 64-bit architectures. 110 * The extra resolution improves shares distribution and load balancing of 111 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 112 * hierarchies, especially on larger systems. This is not a user-visible change 113 * and does not change the user-interface for setting shares/weights. 114 * 115 * We increase resolution only if we have enough bits to allow this increased 116 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 117 * are pretty high and the returns do not justify the increased costs. 118 * 119 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 120 * increase coverage and consistency always enable it on 64-bit platforms. 121 */ 122 #ifdef CONFIG_64BIT 123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 124 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 125 # define scale_load_down(w) \ 126 ({ \ 127 unsigned long __w = (w); \ 128 if (__w) \ 129 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 130 __w; \ 131 }) 132 #else 133 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 134 # define scale_load(w) (w) 135 # define scale_load_down(w) (w) 136 #endif 137 138 /* 139 * Task weight (visible to users) and its load (invisible to users) have 140 * independent resolution, but they should be well calibrated. We use 141 * scale_load() and scale_load_down(w) to convert between them. The 142 * following must be true: 143 * 144 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 145 * 146 */ 147 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 148 149 /* 150 * Single value that decides SCHED_DEADLINE internal math precision. 151 * 10 -> just above 1us 152 * 9 -> just above 0.5us 153 */ 154 #define DL_SCALE 10 155 156 /* 157 * Single value that denotes runtime == period, ie unlimited time. 158 */ 159 #define RUNTIME_INF ((u64)~0ULL) 160 161 static inline int idle_policy(int policy) 162 { 163 return policy == SCHED_IDLE; 164 } 165 static inline int fair_policy(int policy) 166 { 167 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 168 } 169 170 static inline int rt_policy(int policy) 171 { 172 return policy == SCHED_FIFO || policy == SCHED_RR; 173 } 174 175 static inline int dl_policy(int policy) 176 { 177 return policy == SCHED_DEADLINE; 178 } 179 static inline bool valid_policy(int policy) 180 { 181 return idle_policy(policy) || fair_policy(policy) || 182 rt_policy(policy) || dl_policy(policy); 183 } 184 185 static inline int task_has_idle_policy(struct task_struct *p) 186 { 187 return idle_policy(p->policy); 188 } 189 190 static inline int task_has_rt_policy(struct task_struct *p) 191 { 192 return rt_policy(p->policy); 193 } 194 195 static inline int task_has_dl_policy(struct task_struct *p) 196 { 197 return dl_policy(p->policy); 198 } 199 200 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 201 202 static inline void update_avg(u64 *avg, u64 sample) 203 { 204 s64 diff = sample - *avg; 205 *avg += diff / 8; 206 } 207 208 /* 209 * !! For sched_setattr_nocheck() (kernel) only !! 210 * 211 * This is actually gross. :( 212 * 213 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 214 * tasks, but still be able to sleep. We need this on platforms that cannot 215 * atomically change clock frequency. Remove once fast switching will be 216 * available on such platforms. 217 * 218 * SUGOV stands for SchedUtil GOVernor. 219 */ 220 #define SCHED_FLAG_SUGOV 0x10000000 221 222 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 223 { 224 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 225 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 226 #else 227 return false; 228 #endif 229 } 230 231 /* 232 * Tells if entity @a should preempt entity @b. 233 */ 234 static inline bool 235 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 236 { 237 return dl_entity_is_special(a) || 238 dl_time_before(a->deadline, b->deadline); 239 } 240 241 /* 242 * This is the priority-queue data structure of the RT scheduling class: 243 */ 244 struct rt_prio_array { 245 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 246 struct list_head queue[MAX_RT_PRIO]; 247 }; 248 249 struct rt_bandwidth { 250 /* nests inside the rq lock: */ 251 raw_spinlock_t rt_runtime_lock; 252 ktime_t rt_period; 253 u64 rt_runtime; 254 struct hrtimer rt_period_timer; 255 unsigned int rt_period_active; 256 }; 257 258 void __dl_clear_params(struct task_struct *p); 259 260 /* 261 * To keep the bandwidth of -deadline tasks and groups under control 262 * we need some place where: 263 * - store the maximum -deadline bandwidth of the system (the group); 264 * - cache the fraction of that bandwidth that is currently allocated. 265 * 266 * This is all done in the data structure below. It is similar to the 267 * one used for RT-throttling (rt_bandwidth), with the main difference 268 * that, since here we are only interested in admission control, we 269 * do not decrease any runtime while the group "executes", neither we 270 * need a timer to replenish it. 271 * 272 * With respect to SMP, the bandwidth is given on a per-CPU basis, 273 * meaning that: 274 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 275 * - dl_total_bw array contains, in the i-eth element, the currently 276 * allocated bandwidth on the i-eth CPU. 277 * Moreover, groups consume bandwidth on each CPU, while tasks only 278 * consume bandwidth on the CPU they're running on. 279 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 280 * that will be shown the next time the proc or cgroup controls will 281 * be red. It on its turn can be changed by writing on its own 282 * control. 283 */ 284 struct dl_bandwidth { 285 raw_spinlock_t dl_runtime_lock; 286 u64 dl_runtime; 287 u64 dl_period; 288 }; 289 290 static inline int dl_bandwidth_enabled(void) 291 { 292 return sysctl_sched_rt_runtime >= 0; 293 } 294 295 struct dl_bw { 296 raw_spinlock_t lock; 297 u64 bw; 298 u64 total_bw; 299 }; 300 301 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 302 303 static inline 304 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 305 { 306 dl_b->total_bw -= tsk_bw; 307 __dl_update(dl_b, (s32)tsk_bw / cpus); 308 } 309 310 static inline 311 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 312 { 313 dl_b->total_bw += tsk_bw; 314 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 315 } 316 317 static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, 318 u64 old_bw, u64 new_bw) 319 { 320 return dl_b->bw != -1 && 321 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 322 } 323 324 /* 325 * Verify the fitness of task @p to run on @cpu taking into account the 326 * CPU original capacity and the runtime/deadline ratio of the task. 327 * 328 * The function will return true if the CPU original capacity of the 329 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 330 * task and false otherwise. 331 */ 332 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 333 { 334 unsigned long cap = arch_scale_cpu_capacity(cpu); 335 336 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; 337 } 338 339 extern void init_dl_bw(struct dl_bw *dl_b); 340 extern int sched_dl_global_validate(void); 341 extern void sched_dl_do_global(void); 342 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 343 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 344 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 345 extern bool __checkparam_dl(const struct sched_attr *attr); 346 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 347 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 348 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 349 extern bool dl_cpu_busy(unsigned int cpu); 350 351 #ifdef CONFIG_CGROUP_SCHED 352 353 #include <linux/cgroup.h> 354 #include <linux/psi.h> 355 356 struct cfs_rq; 357 struct rt_rq; 358 359 extern struct list_head task_groups; 360 361 struct cfs_bandwidth { 362 #ifdef CONFIG_CFS_BANDWIDTH 363 raw_spinlock_t lock; 364 ktime_t period; 365 u64 quota; 366 u64 runtime; 367 s64 hierarchical_quota; 368 369 u8 idle; 370 u8 period_active; 371 u8 slack_started; 372 struct hrtimer period_timer; 373 struct hrtimer slack_timer; 374 struct list_head throttled_cfs_rq; 375 376 /* Statistics: */ 377 int nr_periods; 378 int nr_throttled; 379 u64 throttled_time; 380 #endif 381 }; 382 383 /* Task group related information */ 384 struct task_group { 385 struct cgroup_subsys_state css; 386 387 #ifdef CONFIG_FAIR_GROUP_SCHED 388 /* schedulable entities of this group on each CPU */ 389 struct sched_entity **se; 390 /* runqueue "owned" by this group on each CPU */ 391 struct cfs_rq **cfs_rq; 392 unsigned long shares; 393 394 #ifdef CONFIG_SMP 395 /* 396 * load_avg can be heavily contended at clock tick time, so put 397 * it in its own cacheline separated from the fields above which 398 * will also be accessed at each tick. 399 */ 400 atomic_long_t load_avg ____cacheline_aligned; 401 #endif 402 #endif 403 404 #ifdef CONFIG_RT_GROUP_SCHED 405 struct sched_rt_entity **rt_se; 406 struct rt_rq **rt_rq; 407 408 struct rt_bandwidth rt_bandwidth; 409 #endif 410 411 struct rcu_head rcu; 412 struct list_head list; 413 414 struct task_group *parent; 415 struct list_head siblings; 416 struct list_head children; 417 418 #ifdef CONFIG_SCHED_AUTOGROUP 419 struct autogroup *autogroup; 420 #endif 421 422 struct cfs_bandwidth cfs_bandwidth; 423 424 #ifdef CONFIG_UCLAMP_TASK_GROUP 425 /* The two decimal precision [%] value requested from user-space */ 426 unsigned int uclamp_pct[UCLAMP_CNT]; 427 /* Clamp values requested for a task group */ 428 struct uclamp_se uclamp_req[UCLAMP_CNT]; 429 /* Effective clamp values used for a task group */ 430 struct uclamp_se uclamp[UCLAMP_CNT]; 431 #endif 432 433 }; 434 435 #ifdef CONFIG_FAIR_GROUP_SCHED 436 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 437 438 /* 439 * A weight of 0 or 1 can cause arithmetics problems. 440 * A weight of a cfs_rq is the sum of weights of which entities 441 * are queued on this cfs_rq, so a weight of a entity should not be 442 * too large, so as the shares value of a task group. 443 * (The default weight is 1024 - so there's no practical 444 * limitation from this.) 445 */ 446 #define MIN_SHARES (1UL << 1) 447 #define MAX_SHARES (1UL << 18) 448 #endif 449 450 typedef int (*tg_visitor)(struct task_group *, void *); 451 452 extern int walk_tg_tree_from(struct task_group *from, 453 tg_visitor down, tg_visitor up, void *data); 454 455 /* 456 * Iterate the full tree, calling @down when first entering a node and @up when 457 * leaving it for the final time. 458 * 459 * Caller must hold rcu_lock or sufficient equivalent. 460 */ 461 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 462 { 463 return walk_tg_tree_from(&root_task_group, down, up, data); 464 } 465 466 extern int tg_nop(struct task_group *tg, void *data); 467 468 extern void free_fair_sched_group(struct task_group *tg); 469 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 470 extern void online_fair_sched_group(struct task_group *tg); 471 extern void unregister_fair_sched_group(struct task_group *tg); 472 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 473 struct sched_entity *se, int cpu, 474 struct sched_entity *parent); 475 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 476 477 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 478 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 479 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 480 481 extern void free_rt_sched_group(struct task_group *tg); 482 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 483 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 484 struct sched_rt_entity *rt_se, int cpu, 485 struct sched_rt_entity *parent); 486 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 487 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 488 extern long sched_group_rt_runtime(struct task_group *tg); 489 extern long sched_group_rt_period(struct task_group *tg); 490 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 491 492 extern struct task_group *sched_create_group(struct task_group *parent); 493 extern void sched_online_group(struct task_group *tg, 494 struct task_group *parent); 495 extern void sched_destroy_group(struct task_group *tg); 496 extern void sched_offline_group(struct task_group *tg); 497 498 extern void sched_move_task(struct task_struct *tsk); 499 500 #ifdef CONFIG_FAIR_GROUP_SCHED 501 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 502 503 #ifdef CONFIG_SMP 504 extern void set_task_rq_fair(struct sched_entity *se, 505 struct cfs_rq *prev, struct cfs_rq *next); 506 #else /* !CONFIG_SMP */ 507 static inline void set_task_rq_fair(struct sched_entity *se, 508 struct cfs_rq *prev, struct cfs_rq *next) { } 509 #endif /* CONFIG_SMP */ 510 #endif /* CONFIG_FAIR_GROUP_SCHED */ 511 512 #else /* CONFIG_CGROUP_SCHED */ 513 514 struct cfs_bandwidth { }; 515 516 #endif /* CONFIG_CGROUP_SCHED */ 517 518 /* CFS-related fields in a runqueue */ 519 struct cfs_rq { 520 struct load_weight load; 521 unsigned int nr_running; 522 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 523 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 524 525 u64 exec_clock; 526 u64 min_vruntime; 527 #ifndef CONFIG_64BIT 528 u64 min_vruntime_copy; 529 #endif 530 531 struct rb_root_cached tasks_timeline; 532 533 /* 534 * 'curr' points to currently running entity on this cfs_rq. 535 * It is set to NULL otherwise (i.e when none are currently running). 536 */ 537 struct sched_entity *curr; 538 struct sched_entity *next; 539 struct sched_entity *last; 540 struct sched_entity *skip; 541 542 #ifdef CONFIG_SCHED_DEBUG 543 unsigned int nr_spread_over; 544 #endif 545 546 #ifdef CONFIG_SMP 547 /* 548 * CFS load tracking 549 */ 550 struct sched_avg avg; 551 #ifndef CONFIG_64BIT 552 u64 load_last_update_time_copy; 553 #endif 554 struct { 555 raw_spinlock_t lock ____cacheline_aligned; 556 int nr; 557 unsigned long load_avg; 558 unsigned long util_avg; 559 unsigned long runnable_avg; 560 } removed; 561 562 #ifdef CONFIG_FAIR_GROUP_SCHED 563 unsigned long tg_load_avg_contrib; 564 long propagate; 565 long prop_runnable_sum; 566 567 /* 568 * h_load = weight * f(tg) 569 * 570 * Where f(tg) is the recursive weight fraction assigned to 571 * this group. 572 */ 573 unsigned long h_load; 574 u64 last_h_load_update; 575 struct sched_entity *h_load_next; 576 #endif /* CONFIG_FAIR_GROUP_SCHED */ 577 #endif /* CONFIG_SMP */ 578 579 #ifdef CONFIG_FAIR_GROUP_SCHED 580 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 581 582 /* 583 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 584 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 585 * (like users, containers etc.) 586 * 587 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 588 * This list is used during load balance. 589 */ 590 int on_list; 591 struct list_head leaf_cfs_rq_list; 592 struct task_group *tg; /* group that "owns" this runqueue */ 593 594 #ifdef CONFIG_CFS_BANDWIDTH 595 int runtime_enabled; 596 s64 runtime_remaining; 597 598 u64 throttled_clock; 599 u64 throttled_clock_task; 600 u64 throttled_clock_task_time; 601 int throttled; 602 int throttle_count; 603 struct list_head throttled_list; 604 #endif /* CONFIG_CFS_BANDWIDTH */ 605 #endif /* CONFIG_FAIR_GROUP_SCHED */ 606 }; 607 608 static inline int rt_bandwidth_enabled(void) 609 { 610 return sysctl_sched_rt_runtime >= 0; 611 } 612 613 /* RT IPI pull logic requires IRQ_WORK */ 614 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 615 # define HAVE_RT_PUSH_IPI 616 #endif 617 618 /* Real-Time classes' related field in a runqueue: */ 619 struct rt_rq { 620 struct rt_prio_array active; 621 unsigned int rt_nr_running; 622 unsigned int rr_nr_running; 623 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 624 struct { 625 int curr; /* highest queued rt task prio */ 626 #ifdef CONFIG_SMP 627 int next; /* next highest */ 628 #endif 629 } highest_prio; 630 #endif 631 #ifdef CONFIG_SMP 632 unsigned long rt_nr_migratory; 633 unsigned long rt_nr_total; 634 int overloaded; 635 struct plist_head pushable_tasks; 636 637 #endif /* CONFIG_SMP */ 638 int rt_queued; 639 640 int rt_throttled; 641 u64 rt_time; 642 u64 rt_runtime; 643 /* Nests inside the rq lock: */ 644 raw_spinlock_t rt_runtime_lock; 645 646 #ifdef CONFIG_RT_GROUP_SCHED 647 unsigned long rt_nr_boosted; 648 649 struct rq *rq; 650 struct task_group *tg; 651 #endif 652 }; 653 654 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 655 { 656 return rt_rq->rt_queued && rt_rq->rt_nr_running; 657 } 658 659 /* Deadline class' related fields in a runqueue */ 660 struct dl_rq { 661 /* runqueue is an rbtree, ordered by deadline */ 662 struct rb_root_cached root; 663 664 unsigned long dl_nr_running; 665 666 #ifdef CONFIG_SMP 667 /* 668 * Deadline values of the currently executing and the 669 * earliest ready task on this rq. Caching these facilitates 670 * the decision whether or not a ready but not running task 671 * should migrate somewhere else. 672 */ 673 struct { 674 u64 curr; 675 u64 next; 676 } earliest_dl; 677 678 unsigned long dl_nr_migratory; 679 int overloaded; 680 681 /* 682 * Tasks on this rq that can be pushed away. They are kept in 683 * an rb-tree, ordered by tasks' deadlines, with caching 684 * of the leftmost (earliest deadline) element. 685 */ 686 struct rb_root_cached pushable_dl_tasks_root; 687 #else 688 struct dl_bw dl_bw; 689 #endif 690 /* 691 * "Active utilization" for this runqueue: increased when a 692 * task wakes up (becomes TASK_RUNNING) and decreased when a 693 * task blocks 694 */ 695 u64 running_bw; 696 697 /* 698 * Utilization of the tasks "assigned" to this runqueue (including 699 * the tasks that are in runqueue and the tasks that executed on this 700 * CPU and blocked). Increased when a task moves to this runqueue, and 701 * decreased when the task moves away (migrates, changes scheduling 702 * policy, or terminates). 703 * This is needed to compute the "inactive utilization" for the 704 * runqueue (inactive utilization = this_bw - running_bw). 705 */ 706 u64 this_bw; 707 u64 extra_bw; 708 709 /* 710 * Inverse of the fraction of CPU utilization that can be reclaimed 711 * by the GRUB algorithm. 712 */ 713 u64 bw_ratio; 714 }; 715 716 #ifdef CONFIG_FAIR_GROUP_SCHED 717 /* An entity is a task if it doesn't "own" a runqueue */ 718 #define entity_is_task(se) (!se->my_q) 719 720 static inline void se_update_runnable(struct sched_entity *se) 721 { 722 if (!entity_is_task(se)) 723 se->runnable_weight = se->my_q->h_nr_running; 724 } 725 726 static inline long se_runnable(struct sched_entity *se) 727 { 728 if (entity_is_task(se)) 729 return !!se->on_rq; 730 else 731 return se->runnable_weight; 732 } 733 734 #else 735 #define entity_is_task(se) 1 736 737 static inline void se_update_runnable(struct sched_entity *se) {} 738 739 static inline long se_runnable(struct sched_entity *se) 740 { 741 return !!se->on_rq; 742 } 743 #endif 744 745 #ifdef CONFIG_SMP 746 /* 747 * XXX we want to get rid of these helpers and use the full load resolution. 748 */ 749 static inline long se_weight(struct sched_entity *se) 750 { 751 return scale_load_down(se->load.weight); 752 } 753 754 755 static inline bool sched_asym_prefer(int a, int b) 756 { 757 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 758 } 759 760 struct perf_domain { 761 struct em_perf_domain *em_pd; 762 struct perf_domain *next; 763 struct rcu_head rcu; 764 }; 765 766 /* Scheduling group status flags */ 767 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 768 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 769 770 /* 771 * We add the notion of a root-domain which will be used to define per-domain 772 * variables. Each exclusive cpuset essentially defines an island domain by 773 * fully partitioning the member CPUs from any other cpuset. Whenever a new 774 * exclusive cpuset is created, we also create and attach a new root-domain 775 * object. 776 * 777 */ 778 struct root_domain { 779 atomic_t refcount; 780 atomic_t rto_count; 781 struct rcu_head rcu; 782 cpumask_var_t span; 783 cpumask_var_t online; 784 785 /* 786 * Indicate pullable load on at least one CPU, e.g: 787 * - More than one runnable task 788 * - Running task is misfit 789 */ 790 int overload; 791 792 /* Indicate one or more cpus over-utilized (tipping point) */ 793 int overutilized; 794 795 /* 796 * The bit corresponding to a CPU gets set here if such CPU has more 797 * than one runnable -deadline task (as it is below for RT tasks). 798 */ 799 cpumask_var_t dlo_mask; 800 atomic_t dlo_count; 801 struct dl_bw dl_bw; 802 struct cpudl cpudl; 803 804 #ifdef HAVE_RT_PUSH_IPI 805 /* 806 * For IPI pull requests, loop across the rto_mask. 807 */ 808 struct irq_work rto_push_work; 809 raw_spinlock_t rto_lock; 810 /* These are only updated and read within rto_lock */ 811 int rto_loop; 812 int rto_cpu; 813 /* These atomics are updated outside of a lock */ 814 atomic_t rto_loop_next; 815 atomic_t rto_loop_start; 816 #endif 817 /* 818 * The "RT overload" flag: it gets set if a CPU has more than 819 * one runnable RT task. 820 */ 821 cpumask_var_t rto_mask; 822 struct cpupri cpupri; 823 824 unsigned long max_cpu_capacity; 825 826 /* 827 * NULL-terminated list of performance domains intersecting with the 828 * CPUs of the rd. Protected by RCU. 829 */ 830 struct perf_domain __rcu *pd; 831 }; 832 833 extern void init_defrootdomain(void); 834 extern int sched_init_domains(const struct cpumask *cpu_map); 835 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 836 extern void sched_get_rd(struct root_domain *rd); 837 extern void sched_put_rd(struct root_domain *rd); 838 839 #ifdef HAVE_RT_PUSH_IPI 840 extern void rto_push_irq_work_func(struct irq_work *work); 841 #endif 842 #endif /* CONFIG_SMP */ 843 844 #ifdef CONFIG_UCLAMP_TASK 845 /* 846 * struct uclamp_bucket - Utilization clamp bucket 847 * @value: utilization clamp value for tasks on this clamp bucket 848 * @tasks: number of RUNNABLE tasks on this clamp bucket 849 * 850 * Keep track of how many tasks are RUNNABLE for a given utilization 851 * clamp value. 852 */ 853 struct uclamp_bucket { 854 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 855 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 856 }; 857 858 /* 859 * struct uclamp_rq - rq's utilization clamp 860 * @value: currently active clamp values for a rq 861 * @bucket: utilization clamp buckets affecting a rq 862 * 863 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 864 * A clamp value is affecting a rq when there is at least one task RUNNABLE 865 * (or actually running) with that value. 866 * 867 * There are up to UCLAMP_CNT possible different clamp values, currently there 868 * are only two: minimum utilization and maximum utilization. 869 * 870 * All utilization clamping values are MAX aggregated, since: 871 * - for util_min: we want to run the CPU at least at the max of the minimum 872 * utilization required by its currently RUNNABLE tasks. 873 * - for util_max: we want to allow the CPU to run up to the max of the 874 * maximum utilization allowed by its currently RUNNABLE tasks. 875 * 876 * Since on each system we expect only a limited number of different 877 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 878 * the metrics required to compute all the per-rq utilization clamp values. 879 */ 880 struct uclamp_rq { 881 unsigned int value; 882 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 883 }; 884 885 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 886 #endif /* CONFIG_UCLAMP_TASK */ 887 888 /* 889 * This is the main, per-CPU runqueue data structure. 890 * 891 * Locking rule: those places that want to lock multiple runqueues 892 * (such as the load balancing or the thread migration code), lock 893 * acquire operations must be ordered by ascending &runqueue. 894 */ 895 struct rq { 896 /* runqueue lock: */ 897 raw_spinlock_t lock; 898 899 /* 900 * nr_running and cpu_load should be in the same cacheline because 901 * remote CPUs use both these fields when doing load calculation. 902 */ 903 unsigned int nr_running; 904 #ifdef CONFIG_NUMA_BALANCING 905 unsigned int nr_numa_running; 906 unsigned int nr_preferred_running; 907 unsigned int numa_migrate_on; 908 #endif 909 #ifdef CONFIG_NO_HZ_COMMON 910 #ifdef CONFIG_SMP 911 unsigned long last_blocked_load_update_tick; 912 unsigned int has_blocked_load; 913 call_single_data_t nohz_csd; 914 #endif /* CONFIG_SMP */ 915 unsigned int nohz_tick_stopped; 916 atomic_t nohz_flags; 917 #endif /* CONFIG_NO_HZ_COMMON */ 918 919 #ifdef CONFIG_SMP 920 unsigned int ttwu_pending; 921 #endif 922 u64 nr_switches; 923 924 #ifdef CONFIG_UCLAMP_TASK 925 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 926 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 927 unsigned int uclamp_flags; 928 #define UCLAMP_FLAG_IDLE 0x01 929 #endif 930 931 struct cfs_rq cfs; 932 struct rt_rq rt; 933 struct dl_rq dl; 934 935 #ifdef CONFIG_FAIR_GROUP_SCHED 936 /* list of leaf cfs_rq on this CPU: */ 937 struct list_head leaf_cfs_rq_list; 938 struct list_head *tmp_alone_branch; 939 #endif /* CONFIG_FAIR_GROUP_SCHED */ 940 941 /* 942 * This is part of a global counter where only the total sum 943 * over all CPUs matters. A task can increase this counter on 944 * one CPU and if it got migrated afterwards it may decrease 945 * it on another CPU. Always updated under the runqueue lock: 946 */ 947 unsigned long nr_uninterruptible; 948 949 struct task_struct __rcu *curr; 950 struct task_struct *idle; 951 struct task_struct *stop; 952 unsigned long next_balance; 953 struct mm_struct *prev_mm; 954 955 unsigned int clock_update_flags; 956 u64 clock; 957 /* Ensure that all clocks are in the same cache line */ 958 u64 clock_task ____cacheline_aligned; 959 u64 clock_pelt; 960 unsigned long lost_idle_time; 961 962 atomic_t nr_iowait; 963 964 #ifdef CONFIG_MEMBARRIER 965 int membarrier_state; 966 #endif 967 968 #ifdef CONFIG_SMP 969 struct root_domain *rd; 970 struct sched_domain __rcu *sd; 971 972 unsigned long cpu_capacity; 973 unsigned long cpu_capacity_orig; 974 975 struct callback_head *balance_callback; 976 977 unsigned char nohz_idle_balance; 978 unsigned char idle_balance; 979 980 unsigned long misfit_task_load; 981 982 /* For active balancing */ 983 int active_balance; 984 int push_cpu; 985 struct cpu_stop_work active_balance_work; 986 987 /* CPU of this runqueue: */ 988 int cpu; 989 int online; 990 991 struct list_head cfs_tasks; 992 993 struct sched_avg avg_rt; 994 struct sched_avg avg_dl; 995 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 996 struct sched_avg avg_irq; 997 #endif 998 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 999 struct sched_avg avg_thermal; 1000 #endif 1001 u64 idle_stamp; 1002 u64 avg_idle; 1003 1004 /* This is used to determine avg_idle's max value */ 1005 u64 max_idle_balance_cost; 1006 #endif /* CONFIG_SMP */ 1007 1008 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1009 u64 prev_irq_time; 1010 #endif 1011 #ifdef CONFIG_PARAVIRT 1012 u64 prev_steal_time; 1013 #endif 1014 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1015 u64 prev_steal_time_rq; 1016 #endif 1017 1018 /* calc_load related fields */ 1019 unsigned long calc_load_update; 1020 long calc_load_active; 1021 1022 #ifdef CONFIG_SCHED_HRTICK 1023 #ifdef CONFIG_SMP 1024 call_single_data_t hrtick_csd; 1025 #endif 1026 struct hrtimer hrtick_timer; 1027 #endif 1028 1029 #ifdef CONFIG_SCHEDSTATS 1030 /* latency stats */ 1031 struct sched_info rq_sched_info; 1032 unsigned long long rq_cpu_time; 1033 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1034 1035 /* sys_sched_yield() stats */ 1036 unsigned int yld_count; 1037 1038 /* schedule() stats */ 1039 unsigned int sched_count; 1040 unsigned int sched_goidle; 1041 1042 /* try_to_wake_up() stats */ 1043 unsigned int ttwu_count; 1044 unsigned int ttwu_local; 1045 #endif 1046 1047 #ifdef CONFIG_CPU_IDLE 1048 /* Must be inspected within a rcu lock section */ 1049 struct cpuidle_state *idle_state; 1050 #endif 1051 }; 1052 1053 #ifdef CONFIG_FAIR_GROUP_SCHED 1054 1055 /* CPU runqueue to which this cfs_rq is attached */ 1056 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1057 { 1058 return cfs_rq->rq; 1059 } 1060 1061 #else 1062 1063 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1064 { 1065 return container_of(cfs_rq, struct rq, cfs); 1066 } 1067 #endif 1068 1069 static inline int cpu_of(struct rq *rq) 1070 { 1071 #ifdef CONFIG_SMP 1072 return rq->cpu; 1073 #else 1074 return 0; 1075 #endif 1076 } 1077 1078 1079 #ifdef CONFIG_SCHED_SMT 1080 extern void __update_idle_core(struct rq *rq); 1081 1082 static inline void update_idle_core(struct rq *rq) 1083 { 1084 if (static_branch_unlikely(&sched_smt_present)) 1085 __update_idle_core(rq); 1086 } 1087 1088 #else 1089 static inline void update_idle_core(struct rq *rq) { } 1090 #endif 1091 1092 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1093 1094 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1095 #define this_rq() this_cpu_ptr(&runqueues) 1096 #define task_rq(p) cpu_rq(task_cpu(p)) 1097 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1098 #define raw_rq() raw_cpu_ptr(&runqueues) 1099 1100 extern void update_rq_clock(struct rq *rq); 1101 1102 static inline u64 __rq_clock_broken(struct rq *rq) 1103 { 1104 return READ_ONCE(rq->clock); 1105 } 1106 1107 /* 1108 * rq::clock_update_flags bits 1109 * 1110 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1111 * call to __schedule(). This is an optimisation to avoid 1112 * neighbouring rq clock updates. 1113 * 1114 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1115 * in effect and calls to update_rq_clock() are being ignored. 1116 * 1117 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1118 * made to update_rq_clock() since the last time rq::lock was pinned. 1119 * 1120 * If inside of __schedule(), clock_update_flags will have been 1121 * shifted left (a left shift is a cheap operation for the fast path 1122 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1123 * 1124 * if (rq-clock_update_flags >= RQCF_UPDATED) 1125 * 1126 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1127 * one position though, because the next rq_unpin_lock() will shift it 1128 * back. 1129 */ 1130 #define RQCF_REQ_SKIP 0x01 1131 #define RQCF_ACT_SKIP 0x02 1132 #define RQCF_UPDATED 0x04 1133 1134 static inline void assert_clock_updated(struct rq *rq) 1135 { 1136 /* 1137 * The only reason for not seeing a clock update since the 1138 * last rq_pin_lock() is if we're currently skipping updates. 1139 */ 1140 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1141 } 1142 1143 static inline u64 rq_clock(struct rq *rq) 1144 { 1145 lockdep_assert_held(&rq->lock); 1146 assert_clock_updated(rq); 1147 1148 return rq->clock; 1149 } 1150 1151 static inline u64 rq_clock_task(struct rq *rq) 1152 { 1153 lockdep_assert_held(&rq->lock); 1154 assert_clock_updated(rq); 1155 1156 return rq->clock_task; 1157 } 1158 1159 /** 1160 * By default the decay is the default pelt decay period. 1161 * The decay shift can change the decay period in 1162 * multiples of 32. 1163 * Decay shift Decay period(ms) 1164 * 0 32 1165 * 1 64 1166 * 2 128 1167 * 3 256 1168 * 4 512 1169 */ 1170 extern int sched_thermal_decay_shift; 1171 1172 static inline u64 rq_clock_thermal(struct rq *rq) 1173 { 1174 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1175 } 1176 1177 static inline void rq_clock_skip_update(struct rq *rq) 1178 { 1179 lockdep_assert_held(&rq->lock); 1180 rq->clock_update_flags |= RQCF_REQ_SKIP; 1181 } 1182 1183 /* 1184 * See rt task throttling, which is the only time a skip 1185 * request is cancelled. 1186 */ 1187 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1188 { 1189 lockdep_assert_held(&rq->lock); 1190 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1191 } 1192 1193 struct rq_flags { 1194 unsigned long flags; 1195 struct pin_cookie cookie; 1196 #ifdef CONFIG_SCHED_DEBUG 1197 /* 1198 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1199 * current pin context is stashed here in case it needs to be 1200 * restored in rq_repin_lock(). 1201 */ 1202 unsigned int clock_update_flags; 1203 #endif 1204 }; 1205 1206 /* 1207 * Lockdep annotation that avoids accidental unlocks; it's like a 1208 * sticky/continuous lockdep_assert_held(). 1209 * 1210 * This avoids code that has access to 'struct rq *rq' (basically everything in 1211 * the scheduler) from accidentally unlocking the rq if they do not also have a 1212 * copy of the (on-stack) 'struct rq_flags rf'. 1213 * 1214 * Also see Documentation/locking/lockdep-design.rst. 1215 */ 1216 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1217 { 1218 rf->cookie = lockdep_pin_lock(&rq->lock); 1219 1220 #ifdef CONFIG_SCHED_DEBUG 1221 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1222 rf->clock_update_flags = 0; 1223 #endif 1224 } 1225 1226 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1227 { 1228 #ifdef CONFIG_SCHED_DEBUG 1229 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1230 rf->clock_update_flags = RQCF_UPDATED; 1231 #endif 1232 1233 lockdep_unpin_lock(&rq->lock, rf->cookie); 1234 } 1235 1236 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1237 { 1238 lockdep_repin_lock(&rq->lock, rf->cookie); 1239 1240 #ifdef CONFIG_SCHED_DEBUG 1241 /* 1242 * Restore the value we stashed in @rf for this pin context. 1243 */ 1244 rq->clock_update_flags |= rf->clock_update_flags; 1245 #endif 1246 } 1247 1248 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1249 __acquires(rq->lock); 1250 1251 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1252 __acquires(p->pi_lock) 1253 __acquires(rq->lock); 1254 1255 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1256 __releases(rq->lock) 1257 { 1258 rq_unpin_lock(rq, rf); 1259 raw_spin_unlock(&rq->lock); 1260 } 1261 1262 static inline void 1263 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1264 __releases(rq->lock) 1265 __releases(p->pi_lock) 1266 { 1267 rq_unpin_lock(rq, rf); 1268 raw_spin_unlock(&rq->lock); 1269 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1270 } 1271 1272 static inline void 1273 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1274 __acquires(rq->lock) 1275 { 1276 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1277 rq_pin_lock(rq, rf); 1278 } 1279 1280 static inline void 1281 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1282 __acquires(rq->lock) 1283 { 1284 raw_spin_lock_irq(&rq->lock); 1285 rq_pin_lock(rq, rf); 1286 } 1287 1288 static inline void 1289 rq_lock(struct rq *rq, struct rq_flags *rf) 1290 __acquires(rq->lock) 1291 { 1292 raw_spin_lock(&rq->lock); 1293 rq_pin_lock(rq, rf); 1294 } 1295 1296 static inline void 1297 rq_relock(struct rq *rq, struct rq_flags *rf) 1298 __acquires(rq->lock) 1299 { 1300 raw_spin_lock(&rq->lock); 1301 rq_repin_lock(rq, rf); 1302 } 1303 1304 static inline void 1305 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1306 __releases(rq->lock) 1307 { 1308 rq_unpin_lock(rq, rf); 1309 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1310 } 1311 1312 static inline void 1313 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1314 __releases(rq->lock) 1315 { 1316 rq_unpin_lock(rq, rf); 1317 raw_spin_unlock_irq(&rq->lock); 1318 } 1319 1320 static inline void 1321 rq_unlock(struct rq *rq, struct rq_flags *rf) 1322 __releases(rq->lock) 1323 { 1324 rq_unpin_lock(rq, rf); 1325 raw_spin_unlock(&rq->lock); 1326 } 1327 1328 static inline struct rq * 1329 this_rq_lock_irq(struct rq_flags *rf) 1330 __acquires(rq->lock) 1331 { 1332 struct rq *rq; 1333 1334 local_irq_disable(); 1335 rq = this_rq(); 1336 rq_lock(rq, rf); 1337 return rq; 1338 } 1339 1340 #ifdef CONFIG_NUMA 1341 enum numa_topology_type { 1342 NUMA_DIRECT, 1343 NUMA_GLUELESS_MESH, 1344 NUMA_BACKPLANE, 1345 }; 1346 extern enum numa_topology_type sched_numa_topology_type; 1347 extern int sched_max_numa_distance; 1348 extern bool find_numa_distance(int distance); 1349 extern void sched_init_numa(void); 1350 extern void sched_domains_numa_masks_set(unsigned int cpu); 1351 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1352 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1353 #else 1354 static inline void sched_init_numa(void) { } 1355 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1356 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1357 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1358 { 1359 return nr_cpu_ids; 1360 } 1361 #endif 1362 1363 #ifdef CONFIG_NUMA_BALANCING 1364 /* The regions in numa_faults array from task_struct */ 1365 enum numa_faults_stats { 1366 NUMA_MEM = 0, 1367 NUMA_CPU, 1368 NUMA_MEMBUF, 1369 NUMA_CPUBUF 1370 }; 1371 extern void sched_setnuma(struct task_struct *p, int node); 1372 extern int migrate_task_to(struct task_struct *p, int cpu); 1373 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1374 int cpu, int scpu); 1375 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1376 #else 1377 static inline void 1378 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1379 { 1380 } 1381 #endif /* CONFIG_NUMA_BALANCING */ 1382 1383 #ifdef CONFIG_SMP 1384 1385 static inline void 1386 queue_balance_callback(struct rq *rq, 1387 struct callback_head *head, 1388 void (*func)(struct rq *rq)) 1389 { 1390 lockdep_assert_held(&rq->lock); 1391 1392 if (unlikely(head->next)) 1393 return; 1394 1395 head->func = (void (*)(struct callback_head *))func; 1396 head->next = rq->balance_callback; 1397 rq->balance_callback = head; 1398 } 1399 1400 #define rcu_dereference_check_sched_domain(p) \ 1401 rcu_dereference_check((p), \ 1402 lockdep_is_held(&sched_domains_mutex)) 1403 1404 /* 1405 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1406 * See destroy_sched_domains: call_rcu for details. 1407 * 1408 * The domain tree of any CPU may only be accessed from within 1409 * preempt-disabled sections. 1410 */ 1411 #define for_each_domain(cpu, __sd) \ 1412 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1413 __sd; __sd = __sd->parent) 1414 1415 /** 1416 * highest_flag_domain - Return highest sched_domain containing flag. 1417 * @cpu: The CPU whose highest level of sched domain is to 1418 * be returned. 1419 * @flag: The flag to check for the highest sched_domain 1420 * for the given CPU. 1421 * 1422 * Returns the highest sched_domain of a CPU which contains the given flag. 1423 */ 1424 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1425 { 1426 struct sched_domain *sd, *hsd = NULL; 1427 1428 for_each_domain(cpu, sd) { 1429 if (!(sd->flags & flag)) 1430 break; 1431 hsd = sd; 1432 } 1433 1434 return hsd; 1435 } 1436 1437 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1438 { 1439 struct sched_domain *sd; 1440 1441 for_each_domain(cpu, sd) { 1442 if (sd->flags & flag) 1443 break; 1444 } 1445 1446 return sd; 1447 } 1448 1449 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1450 DECLARE_PER_CPU(int, sd_llc_size); 1451 DECLARE_PER_CPU(int, sd_llc_id); 1452 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1453 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1454 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1455 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1456 extern struct static_key_false sched_asym_cpucapacity; 1457 1458 struct sched_group_capacity { 1459 atomic_t ref; 1460 /* 1461 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1462 * for a single CPU. 1463 */ 1464 unsigned long capacity; 1465 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1466 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1467 unsigned long next_update; 1468 int imbalance; /* XXX unrelated to capacity but shared group state */ 1469 1470 #ifdef CONFIG_SCHED_DEBUG 1471 int id; 1472 #endif 1473 1474 unsigned long cpumask[0]; /* Balance mask */ 1475 }; 1476 1477 struct sched_group { 1478 struct sched_group *next; /* Must be a circular list */ 1479 atomic_t ref; 1480 1481 unsigned int group_weight; 1482 struct sched_group_capacity *sgc; 1483 int asym_prefer_cpu; /* CPU of highest priority in group */ 1484 1485 /* 1486 * The CPUs this group covers. 1487 * 1488 * NOTE: this field is variable length. (Allocated dynamically 1489 * by attaching extra space to the end of the structure, 1490 * depending on how many CPUs the kernel has booted up with) 1491 */ 1492 unsigned long cpumask[]; 1493 }; 1494 1495 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1496 { 1497 return to_cpumask(sg->cpumask); 1498 } 1499 1500 /* 1501 * See build_balance_mask(). 1502 */ 1503 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1504 { 1505 return to_cpumask(sg->sgc->cpumask); 1506 } 1507 1508 /** 1509 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1510 * @group: The group whose first CPU is to be returned. 1511 */ 1512 static inline unsigned int group_first_cpu(struct sched_group *group) 1513 { 1514 return cpumask_first(sched_group_span(group)); 1515 } 1516 1517 extern int group_balance_cpu(struct sched_group *sg); 1518 1519 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1520 void register_sched_domain_sysctl(void); 1521 void dirty_sched_domain_sysctl(int cpu); 1522 void unregister_sched_domain_sysctl(void); 1523 #else 1524 static inline void register_sched_domain_sysctl(void) 1525 { 1526 } 1527 static inline void dirty_sched_domain_sysctl(int cpu) 1528 { 1529 } 1530 static inline void unregister_sched_domain_sysctl(void) 1531 { 1532 } 1533 #endif 1534 1535 extern void flush_smp_call_function_from_idle(void); 1536 1537 #else /* !CONFIG_SMP: */ 1538 static inline void flush_smp_call_function_from_idle(void) { } 1539 #endif 1540 1541 #include "stats.h" 1542 #include "autogroup.h" 1543 1544 #ifdef CONFIG_CGROUP_SCHED 1545 1546 /* 1547 * Return the group to which this tasks belongs. 1548 * 1549 * We cannot use task_css() and friends because the cgroup subsystem 1550 * changes that value before the cgroup_subsys::attach() method is called, 1551 * therefore we cannot pin it and might observe the wrong value. 1552 * 1553 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1554 * core changes this before calling sched_move_task(). 1555 * 1556 * Instead we use a 'copy' which is updated from sched_move_task() while 1557 * holding both task_struct::pi_lock and rq::lock. 1558 */ 1559 static inline struct task_group *task_group(struct task_struct *p) 1560 { 1561 return p->sched_task_group; 1562 } 1563 1564 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1565 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1566 { 1567 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1568 struct task_group *tg = task_group(p); 1569 #endif 1570 1571 #ifdef CONFIG_FAIR_GROUP_SCHED 1572 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1573 p->se.cfs_rq = tg->cfs_rq[cpu]; 1574 p->se.parent = tg->se[cpu]; 1575 #endif 1576 1577 #ifdef CONFIG_RT_GROUP_SCHED 1578 p->rt.rt_rq = tg->rt_rq[cpu]; 1579 p->rt.parent = tg->rt_se[cpu]; 1580 #endif 1581 } 1582 1583 #else /* CONFIG_CGROUP_SCHED */ 1584 1585 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1586 static inline struct task_group *task_group(struct task_struct *p) 1587 { 1588 return NULL; 1589 } 1590 1591 #endif /* CONFIG_CGROUP_SCHED */ 1592 1593 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1594 { 1595 set_task_rq(p, cpu); 1596 #ifdef CONFIG_SMP 1597 /* 1598 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1599 * successfully executed on another CPU. We must ensure that updates of 1600 * per-task data have been completed by this moment. 1601 */ 1602 smp_wmb(); 1603 #ifdef CONFIG_THREAD_INFO_IN_TASK 1604 WRITE_ONCE(p->cpu, cpu); 1605 #else 1606 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1607 #endif 1608 p->wake_cpu = cpu; 1609 #endif 1610 } 1611 1612 /* 1613 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1614 */ 1615 #ifdef CONFIG_SCHED_DEBUG 1616 # include <linux/static_key.h> 1617 # define const_debug __read_mostly 1618 #else 1619 # define const_debug const 1620 #endif 1621 1622 #define SCHED_FEAT(name, enabled) \ 1623 __SCHED_FEAT_##name , 1624 1625 enum { 1626 #include "features.h" 1627 __SCHED_FEAT_NR, 1628 }; 1629 1630 #undef SCHED_FEAT 1631 1632 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1633 1634 /* 1635 * To support run-time toggling of sched features, all the translation units 1636 * (but core.c) reference the sysctl_sched_features defined in core.c. 1637 */ 1638 extern const_debug unsigned int sysctl_sched_features; 1639 1640 #define SCHED_FEAT(name, enabled) \ 1641 static __always_inline bool static_branch_##name(struct static_key *key) \ 1642 { \ 1643 return static_key_##enabled(key); \ 1644 } 1645 1646 #include "features.h" 1647 #undef SCHED_FEAT 1648 1649 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1650 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1651 1652 #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1653 1654 /* 1655 * Each translation unit has its own copy of sysctl_sched_features to allow 1656 * constants propagation at compile time and compiler optimization based on 1657 * features default. 1658 */ 1659 #define SCHED_FEAT(name, enabled) \ 1660 (1UL << __SCHED_FEAT_##name) * enabled | 1661 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1662 #include "features.h" 1663 0; 1664 #undef SCHED_FEAT 1665 1666 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1667 1668 #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1669 1670 extern struct static_key_false sched_numa_balancing; 1671 extern struct static_key_false sched_schedstats; 1672 1673 static inline u64 global_rt_period(void) 1674 { 1675 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1676 } 1677 1678 static inline u64 global_rt_runtime(void) 1679 { 1680 if (sysctl_sched_rt_runtime < 0) 1681 return RUNTIME_INF; 1682 1683 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1684 } 1685 1686 static inline int task_current(struct rq *rq, struct task_struct *p) 1687 { 1688 return rq->curr == p; 1689 } 1690 1691 static inline int task_running(struct rq *rq, struct task_struct *p) 1692 { 1693 #ifdef CONFIG_SMP 1694 return p->on_cpu; 1695 #else 1696 return task_current(rq, p); 1697 #endif 1698 } 1699 1700 static inline int task_on_rq_queued(struct task_struct *p) 1701 { 1702 return p->on_rq == TASK_ON_RQ_QUEUED; 1703 } 1704 1705 static inline int task_on_rq_migrating(struct task_struct *p) 1706 { 1707 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1708 } 1709 1710 /* 1711 * wake flags 1712 */ 1713 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1714 #define WF_FORK 0x02 /* Child wakeup after fork */ 1715 #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ 1716 #define WF_ON_CPU 0x08 /* Wakee is on_cpu */ 1717 1718 /* 1719 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1720 * of tasks with abnormal "nice" values across CPUs the contribution that 1721 * each task makes to its run queue's load is weighted according to its 1722 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1723 * scaled version of the new time slice allocation that they receive on time 1724 * slice expiry etc. 1725 */ 1726 1727 #define WEIGHT_IDLEPRIO 3 1728 #define WMULT_IDLEPRIO 1431655765 1729 1730 extern const int sched_prio_to_weight[40]; 1731 extern const u32 sched_prio_to_wmult[40]; 1732 1733 /* 1734 * {de,en}queue flags: 1735 * 1736 * DEQUEUE_SLEEP - task is no longer runnable 1737 * ENQUEUE_WAKEUP - task just became runnable 1738 * 1739 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1740 * are in a known state which allows modification. Such pairs 1741 * should preserve as much state as possible. 1742 * 1743 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1744 * in the runqueue. 1745 * 1746 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1747 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1748 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1749 * 1750 */ 1751 1752 #define DEQUEUE_SLEEP 0x01 1753 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1754 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1755 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1756 1757 #define ENQUEUE_WAKEUP 0x01 1758 #define ENQUEUE_RESTORE 0x02 1759 #define ENQUEUE_MOVE 0x04 1760 #define ENQUEUE_NOCLOCK 0x08 1761 1762 #define ENQUEUE_HEAD 0x10 1763 #define ENQUEUE_REPLENISH 0x20 1764 #ifdef CONFIG_SMP 1765 #define ENQUEUE_MIGRATED 0x40 1766 #else 1767 #define ENQUEUE_MIGRATED 0x00 1768 #endif 1769 1770 #define RETRY_TASK ((void *)-1UL) 1771 1772 struct sched_class { 1773 1774 #ifdef CONFIG_UCLAMP_TASK 1775 int uclamp_enabled; 1776 #endif 1777 1778 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1779 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1780 void (*yield_task) (struct rq *rq); 1781 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 1782 1783 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1784 1785 struct task_struct *(*pick_next_task)(struct rq *rq); 1786 1787 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1788 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 1789 1790 #ifdef CONFIG_SMP 1791 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1792 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1793 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1794 1795 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1796 1797 void (*set_cpus_allowed)(struct task_struct *p, 1798 const struct cpumask *newmask); 1799 1800 void (*rq_online)(struct rq *rq); 1801 void (*rq_offline)(struct rq *rq); 1802 #endif 1803 1804 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1805 void (*task_fork)(struct task_struct *p); 1806 void (*task_dead)(struct task_struct *p); 1807 1808 /* 1809 * The switched_from() call is allowed to drop rq->lock, therefore we 1810 * cannot assume the switched_from/switched_to pair is serliazed by 1811 * rq->lock. They are however serialized by p->pi_lock. 1812 */ 1813 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1814 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1815 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1816 int oldprio); 1817 1818 unsigned int (*get_rr_interval)(struct rq *rq, 1819 struct task_struct *task); 1820 1821 void (*update_curr)(struct rq *rq); 1822 1823 #define TASK_SET_GROUP 0 1824 #define TASK_MOVE_GROUP 1 1825 1826 #ifdef CONFIG_FAIR_GROUP_SCHED 1827 void (*task_change_group)(struct task_struct *p, int type); 1828 #endif 1829 } __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ 1830 1831 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1832 { 1833 WARN_ON_ONCE(rq->curr != prev); 1834 prev->sched_class->put_prev_task(rq, prev); 1835 } 1836 1837 static inline void set_next_task(struct rq *rq, struct task_struct *next) 1838 { 1839 WARN_ON_ONCE(rq->curr != next); 1840 next->sched_class->set_next_task(rq, next, false); 1841 } 1842 1843 /* Defined in include/asm-generic/vmlinux.lds.h */ 1844 extern struct sched_class __begin_sched_classes[]; 1845 extern struct sched_class __end_sched_classes[]; 1846 1847 #define sched_class_highest (__end_sched_classes - 1) 1848 #define sched_class_lowest (__begin_sched_classes - 1) 1849 1850 #define for_class_range(class, _from, _to) \ 1851 for (class = (_from); class != (_to); class--) 1852 1853 #define for_each_class(class) \ 1854 for_class_range(class, sched_class_highest, sched_class_lowest) 1855 1856 extern const struct sched_class stop_sched_class; 1857 extern const struct sched_class dl_sched_class; 1858 extern const struct sched_class rt_sched_class; 1859 extern const struct sched_class fair_sched_class; 1860 extern const struct sched_class idle_sched_class; 1861 1862 static inline bool sched_stop_runnable(struct rq *rq) 1863 { 1864 return rq->stop && task_on_rq_queued(rq->stop); 1865 } 1866 1867 static inline bool sched_dl_runnable(struct rq *rq) 1868 { 1869 return rq->dl.dl_nr_running > 0; 1870 } 1871 1872 static inline bool sched_rt_runnable(struct rq *rq) 1873 { 1874 return rq->rt.rt_queued > 0; 1875 } 1876 1877 static inline bool sched_fair_runnable(struct rq *rq) 1878 { 1879 return rq->cfs.nr_running > 0; 1880 } 1881 1882 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1883 extern struct task_struct *pick_next_task_idle(struct rq *rq); 1884 1885 #ifdef CONFIG_SMP 1886 1887 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1888 1889 extern void trigger_load_balance(struct rq *rq); 1890 1891 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1892 1893 #endif 1894 1895 #ifdef CONFIG_CPU_IDLE 1896 static inline void idle_set_state(struct rq *rq, 1897 struct cpuidle_state *idle_state) 1898 { 1899 rq->idle_state = idle_state; 1900 } 1901 1902 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1903 { 1904 SCHED_WARN_ON(!rcu_read_lock_held()); 1905 1906 return rq->idle_state; 1907 } 1908 #else 1909 static inline void idle_set_state(struct rq *rq, 1910 struct cpuidle_state *idle_state) 1911 { 1912 } 1913 1914 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1915 { 1916 return NULL; 1917 } 1918 #endif 1919 1920 extern void schedule_idle(void); 1921 1922 extern void sysrq_sched_debug_show(void); 1923 extern void sched_init_granularity(void); 1924 extern void update_max_interval(void); 1925 1926 extern void init_sched_dl_class(void); 1927 extern void init_sched_rt_class(void); 1928 extern void init_sched_fair_class(void); 1929 1930 extern void reweight_task(struct task_struct *p, int prio); 1931 1932 extern void resched_curr(struct rq *rq); 1933 extern void resched_cpu(int cpu); 1934 1935 extern struct rt_bandwidth def_rt_bandwidth; 1936 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1937 1938 extern struct dl_bandwidth def_dl_bandwidth; 1939 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1940 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1941 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1942 1943 #define BW_SHIFT 20 1944 #define BW_UNIT (1 << BW_SHIFT) 1945 #define RATIO_SHIFT 8 1946 #define MAX_BW_BITS (64 - BW_SHIFT) 1947 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 1948 unsigned long to_ratio(u64 period, u64 runtime); 1949 1950 extern void init_entity_runnable_average(struct sched_entity *se); 1951 extern void post_init_entity_util_avg(struct task_struct *p); 1952 1953 #ifdef CONFIG_NO_HZ_FULL 1954 extern bool sched_can_stop_tick(struct rq *rq); 1955 extern int __init sched_tick_offload_init(void); 1956 1957 /* 1958 * Tick may be needed by tasks in the runqueue depending on their policy and 1959 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1960 * nohz mode if necessary. 1961 */ 1962 static inline void sched_update_tick_dependency(struct rq *rq) 1963 { 1964 int cpu = cpu_of(rq); 1965 1966 if (!tick_nohz_full_cpu(cpu)) 1967 return; 1968 1969 if (sched_can_stop_tick(rq)) 1970 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1971 else 1972 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1973 } 1974 #else 1975 static inline int sched_tick_offload_init(void) { return 0; } 1976 static inline void sched_update_tick_dependency(struct rq *rq) { } 1977 #endif 1978 1979 static inline void add_nr_running(struct rq *rq, unsigned count) 1980 { 1981 unsigned prev_nr = rq->nr_running; 1982 1983 rq->nr_running = prev_nr + count; 1984 if (trace_sched_update_nr_running_tp_enabled()) { 1985 call_trace_sched_update_nr_running(rq, count); 1986 } 1987 1988 #ifdef CONFIG_SMP 1989 if (prev_nr < 2 && rq->nr_running >= 2) { 1990 if (!READ_ONCE(rq->rd->overload)) 1991 WRITE_ONCE(rq->rd->overload, 1); 1992 } 1993 #endif 1994 1995 sched_update_tick_dependency(rq); 1996 } 1997 1998 static inline void sub_nr_running(struct rq *rq, unsigned count) 1999 { 2000 rq->nr_running -= count; 2001 if (trace_sched_update_nr_running_tp_enabled()) { 2002 call_trace_sched_update_nr_running(rq, -count); 2003 } 2004 2005 /* Check if we still need preemption */ 2006 sched_update_tick_dependency(rq); 2007 } 2008 2009 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2010 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2011 2012 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2013 2014 extern const_debug unsigned int sysctl_sched_nr_migrate; 2015 extern const_debug unsigned int sysctl_sched_migration_cost; 2016 2017 #ifdef CONFIG_SCHED_HRTICK 2018 2019 /* 2020 * Use hrtick when: 2021 * - enabled by features 2022 * - hrtimer is actually high res 2023 */ 2024 static inline int hrtick_enabled(struct rq *rq) 2025 { 2026 if (!sched_feat(HRTICK)) 2027 return 0; 2028 if (!cpu_active(cpu_of(rq))) 2029 return 0; 2030 return hrtimer_is_hres_active(&rq->hrtick_timer); 2031 } 2032 2033 void hrtick_start(struct rq *rq, u64 delay); 2034 2035 #else 2036 2037 static inline int hrtick_enabled(struct rq *rq) 2038 { 2039 return 0; 2040 } 2041 2042 #endif /* CONFIG_SCHED_HRTICK */ 2043 2044 #ifndef arch_scale_freq_tick 2045 static __always_inline 2046 void arch_scale_freq_tick(void) 2047 { 2048 } 2049 #endif 2050 2051 #ifndef arch_scale_freq_capacity 2052 /** 2053 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2054 * @cpu: the CPU in question. 2055 * 2056 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2057 * 2058 * f_curr 2059 * ------ * SCHED_CAPACITY_SCALE 2060 * f_max 2061 */ 2062 static __always_inline 2063 unsigned long arch_scale_freq_capacity(int cpu) 2064 { 2065 return SCHED_CAPACITY_SCALE; 2066 } 2067 #endif 2068 2069 #ifdef CONFIG_SMP 2070 #ifdef CONFIG_PREEMPTION 2071 2072 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 2073 2074 /* 2075 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2076 * way at the expense of forcing extra atomic operations in all 2077 * invocations. This assures that the double_lock is acquired using the 2078 * same underlying policy as the spinlock_t on this architecture, which 2079 * reduces latency compared to the unfair variant below. However, it 2080 * also adds more overhead and therefore may reduce throughput. 2081 */ 2082 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2083 __releases(this_rq->lock) 2084 __acquires(busiest->lock) 2085 __acquires(this_rq->lock) 2086 { 2087 raw_spin_unlock(&this_rq->lock); 2088 double_rq_lock(this_rq, busiest); 2089 2090 return 1; 2091 } 2092 2093 #else 2094 /* 2095 * Unfair double_lock_balance: Optimizes throughput at the expense of 2096 * latency by eliminating extra atomic operations when the locks are 2097 * already in proper order on entry. This favors lower CPU-ids and will 2098 * grant the double lock to lower CPUs over higher ids under contention, 2099 * regardless of entry order into the function. 2100 */ 2101 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2102 __releases(this_rq->lock) 2103 __acquires(busiest->lock) 2104 __acquires(this_rq->lock) 2105 { 2106 int ret = 0; 2107 2108 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2109 if (busiest < this_rq) { 2110 raw_spin_unlock(&this_rq->lock); 2111 raw_spin_lock(&busiest->lock); 2112 raw_spin_lock_nested(&this_rq->lock, 2113 SINGLE_DEPTH_NESTING); 2114 ret = 1; 2115 } else 2116 raw_spin_lock_nested(&busiest->lock, 2117 SINGLE_DEPTH_NESTING); 2118 } 2119 return ret; 2120 } 2121 2122 #endif /* CONFIG_PREEMPTION */ 2123 2124 /* 2125 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2126 */ 2127 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2128 { 2129 if (unlikely(!irqs_disabled())) { 2130 /* printk() doesn't work well under rq->lock */ 2131 raw_spin_unlock(&this_rq->lock); 2132 BUG_ON(1); 2133 } 2134 2135 return _double_lock_balance(this_rq, busiest); 2136 } 2137 2138 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2139 __releases(busiest->lock) 2140 { 2141 raw_spin_unlock(&busiest->lock); 2142 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2143 } 2144 2145 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2146 { 2147 if (l1 > l2) 2148 swap(l1, l2); 2149 2150 spin_lock(l1); 2151 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2152 } 2153 2154 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2155 { 2156 if (l1 > l2) 2157 swap(l1, l2); 2158 2159 spin_lock_irq(l1); 2160 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2161 } 2162 2163 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2164 { 2165 if (l1 > l2) 2166 swap(l1, l2); 2167 2168 raw_spin_lock(l1); 2169 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2170 } 2171 2172 /* 2173 * double_rq_lock - safely lock two runqueues 2174 * 2175 * Note this does not disable interrupts like task_rq_lock, 2176 * you need to do so manually before calling. 2177 */ 2178 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2179 __acquires(rq1->lock) 2180 __acquires(rq2->lock) 2181 { 2182 BUG_ON(!irqs_disabled()); 2183 if (rq1 == rq2) { 2184 raw_spin_lock(&rq1->lock); 2185 __acquire(rq2->lock); /* Fake it out ;) */ 2186 } else { 2187 if (rq1 < rq2) { 2188 raw_spin_lock(&rq1->lock); 2189 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2190 } else { 2191 raw_spin_lock(&rq2->lock); 2192 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2193 } 2194 } 2195 } 2196 2197 /* 2198 * double_rq_unlock - safely unlock two runqueues 2199 * 2200 * Note this does not restore interrupts like task_rq_unlock, 2201 * you need to do so manually after calling. 2202 */ 2203 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2204 __releases(rq1->lock) 2205 __releases(rq2->lock) 2206 { 2207 raw_spin_unlock(&rq1->lock); 2208 if (rq1 != rq2) 2209 raw_spin_unlock(&rq2->lock); 2210 else 2211 __release(rq2->lock); 2212 } 2213 2214 extern void set_rq_online (struct rq *rq); 2215 extern void set_rq_offline(struct rq *rq); 2216 extern bool sched_smp_initialized; 2217 2218 #else /* CONFIG_SMP */ 2219 2220 /* 2221 * double_rq_lock - safely lock two runqueues 2222 * 2223 * Note this does not disable interrupts like task_rq_lock, 2224 * you need to do so manually before calling. 2225 */ 2226 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2227 __acquires(rq1->lock) 2228 __acquires(rq2->lock) 2229 { 2230 BUG_ON(!irqs_disabled()); 2231 BUG_ON(rq1 != rq2); 2232 raw_spin_lock(&rq1->lock); 2233 __acquire(rq2->lock); /* Fake it out ;) */ 2234 } 2235 2236 /* 2237 * double_rq_unlock - safely unlock two runqueues 2238 * 2239 * Note this does not restore interrupts like task_rq_unlock, 2240 * you need to do so manually after calling. 2241 */ 2242 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2243 __releases(rq1->lock) 2244 __releases(rq2->lock) 2245 { 2246 BUG_ON(rq1 != rq2); 2247 raw_spin_unlock(&rq1->lock); 2248 __release(rq2->lock); 2249 } 2250 2251 #endif 2252 2253 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2254 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2255 2256 #ifdef CONFIG_SCHED_DEBUG 2257 extern bool sched_debug_enabled; 2258 2259 extern void print_cfs_stats(struct seq_file *m, int cpu); 2260 extern void print_rt_stats(struct seq_file *m, int cpu); 2261 extern void print_dl_stats(struct seq_file *m, int cpu); 2262 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2263 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2264 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2265 #ifdef CONFIG_NUMA_BALANCING 2266 extern void 2267 show_numa_stats(struct task_struct *p, struct seq_file *m); 2268 extern void 2269 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2270 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2271 #endif /* CONFIG_NUMA_BALANCING */ 2272 #endif /* CONFIG_SCHED_DEBUG */ 2273 2274 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2275 extern void init_rt_rq(struct rt_rq *rt_rq); 2276 extern void init_dl_rq(struct dl_rq *dl_rq); 2277 2278 extern void cfs_bandwidth_usage_inc(void); 2279 extern void cfs_bandwidth_usage_dec(void); 2280 2281 #ifdef CONFIG_NO_HZ_COMMON 2282 #define NOHZ_BALANCE_KICK_BIT 0 2283 #define NOHZ_STATS_KICK_BIT 1 2284 2285 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2286 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2287 2288 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2289 2290 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2291 2292 extern void nohz_balance_exit_idle(struct rq *rq); 2293 #else 2294 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2295 #endif 2296 2297 2298 #ifdef CONFIG_SMP 2299 static inline 2300 void __dl_update(struct dl_bw *dl_b, s64 bw) 2301 { 2302 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2303 int i; 2304 2305 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2306 "sched RCU must be held"); 2307 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2308 struct rq *rq = cpu_rq(i); 2309 2310 rq->dl.extra_bw += bw; 2311 } 2312 } 2313 #else 2314 static inline 2315 void __dl_update(struct dl_bw *dl_b, s64 bw) 2316 { 2317 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2318 2319 dl->extra_bw += bw; 2320 } 2321 #endif 2322 2323 2324 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2325 struct irqtime { 2326 u64 total; 2327 u64 tick_delta; 2328 u64 irq_start_time; 2329 struct u64_stats_sync sync; 2330 }; 2331 2332 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2333 2334 /* 2335 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2336 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2337 * and never move forward. 2338 */ 2339 static inline u64 irq_time_read(int cpu) 2340 { 2341 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2342 unsigned int seq; 2343 u64 total; 2344 2345 do { 2346 seq = __u64_stats_fetch_begin(&irqtime->sync); 2347 total = irqtime->total; 2348 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2349 2350 return total; 2351 } 2352 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2353 2354 #ifdef CONFIG_CPU_FREQ 2355 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2356 2357 /** 2358 * cpufreq_update_util - Take a note about CPU utilization changes. 2359 * @rq: Runqueue to carry out the update for. 2360 * @flags: Update reason flags. 2361 * 2362 * This function is called by the scheduler on the CPU whose utilization is 2363 * being updated. 2364 * 2365 * It can only be called from RCU-sched read-side critical sections. 2366 * 2367 * The way cpufreq is currently arranged requires it to evaluate the CPU 2368 * performance state (frequency/voltage) on a regular basis to prevent it from 2369 * being stuck in a completely inadequate performance level for too long. 2370 * That is not guaranteed to happen if the updates are only triggered from CFS 2371 * and DL, though, because they may not be coming in if only RT tasks are 2372 * active all the time (or there are RT tasks only). 2373 * 2374 * As a workaround for that issue, this function is called periodically by the 2375 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2376 * but that really is a band-aid. Going forward it should be replaced with 2377 * solutions targeted more specifically at RT tasks. 2378 */ 2379 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2380 { 2381 struct update_util_data *data; 2382 2383 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2384 cpu_of(rq))); 2385 if (data) 2386 data->func(data, rq_clock(rq), flags); 2387 } 2388 #else 2389 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2390 #endif /* CONFIG_CPU_FREQ */ 2391 2392 #ifdef CONFIG_UCLAMP_TASK 2393 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2394 2395 /** 2396 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 2397 * @rq: The rq to clamp against. Must not be NULL. 2398 * @util: The util value to clamp. 2399 * @p: The task to clamp against. Can be NULL if you want to clamp 2400 * against @rq only. 2401 * 2402 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 2403 * 2404 * If sched_uclamp_used static key is disabled, then just return the util 2405 * without any clamping since uclamp aggregation at the rq level in the fast 2406 * path is disabled, rendering this operation a NOP. 2407 * 2408 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 2409 * will return the correct effective uclamp value of the task even if the 2410 * static key is disabled. 2411 */ 2412 static __always_inline 2413 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2414 struct task_struct *p) 2415 { 2416 unsigned long min_util; 2417 unsigned long max_util; 2418 2419 if (!static_branch_likely(&sched_uclamp_used)) 2420 return util; 2421 2422 min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 2423 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2424 2425 if (p) { 2426 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 2427 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 2428 } 2429 2430 /* 2431 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2432 * RUNNABLE tasks with _different_ clamps, we can end up with an 2433 * inversion. Fix it now when the clamps are applied. 2434 */ 2435 if (unlikely(min_util >= max_util)) 2436 return min_util; 2437 2438 return clamp(util, min_util, max_util); 2439 } 2440 2441 /* 2442 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 2443 * by default in the fast path and only gets turned on once userspace performs 2444 * an operation that requires it. 2445 * 2446 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 2447 * hence is active. 2448 */ 2449 static inline bool uclamp_is_used(void) 2450 { 2451 return static_branch_likely(&sched_uclamp_used); 2452 } 2453 #else /* CONFIG_UCLAMP_TASK */ 2454 static inline 2455 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 2456 struct task_struct *p) 2457 { 2458 return util; 2459 } 2460 2461 static inline bool uclamp_is_used(void) 2462 { 2463 return false; 2464 } 2465 #endif /* CONFIG_UCLAMP_TASK */ 2466 2467 #ifdef arch_scale_freq_capacity 2468 # ifndef arch_scale_freq_invariant 2469 # define arch_scale_freq_invariant() true 2470 # endif 2471 #else 2472 # define arch_scale_freq_invariant() false 2473 #endif 2474 2475 #ifdef CONFIG_SMP 2476 static inline unsigned long capacity_orig_of(int cpu) 2477 { 2478 return cpu_rq(cpu)->cpu_capacity_orig; 2479 } 2480 #endif 2481 2482 /** 2483 * enum schedutil_type - CPU utilization type 2484 * @FREQUENCY_UTIL: Utilization used to select frequency 2485 * @ENERGY_UTIL: Utilization used during energy calculation 2486 * 2487 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2488 * need to be aggregated differently depending on the usage made of them. This 2489 * enum is used within schedutil_freq_util() to differentiate the types of 2490 * utilization expected by the callers, and adjust the aggregation accordingly. 2491 */ 2492 enum schedutil_type { 2493 FREQUENCY_UTIL, 2494 ENERGY_UTIL, 2495 }; 2496 2497 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2498 2499 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2500 unsigned long max, enum schedutil_type type, 2501 struct task_struct *p); 2502 2503 static inline unsigned long cpu_bw_dl(struct rq *rq) 2504 { 2505 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2506 } 2507 2508 static inline unsigned long cpu_util_dl(struct rq *rq) 2509 { 2510 return READ_ONCE(rq->avg_dl.util_avg); 2511 } 2512 2513 static inline unsigned long cpu_util_cfs(struct rq *rq) 2514 { 2515 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2516 2517 if (sched_feat(UTIL_EST)) { 2518 util = max_t(unsigned long, util, 2519 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2520 } 2521 2522 return util; 2523 } 2524 2525 static inline unsigned long cpu_util_rt(struct rq *rq) 2526 { 2527 return READ_ONCE(rq->avg_rt.util_avg); 2528 } 2529 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2530 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2531 unsigned long max, enum schedutil_type type, 2532 struct task_struct *p) 2533 { 2534 return 0; 2535 } 2536 #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2537 2538 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2539 static inline unsigned long cpu_util_irq(struct rq *rq) 2540 { 2541 return rq->avg_irq.util_avg; 2542 } 2543 2544 static inline 2545 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2546 { 2547 util *= (max - irq); 2548 util /= max; 2549 2550 return util; 2551 2552 } 2553 #else 2554 static inline unsigned long cpu_util_irq(struct rq *rq) 2555 { 2556 return 0; 2557 } 2558 2559 static inline 2560 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2561 { 2562 return util; 2563 } 2564 #endif 2565 2566 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2567 2568 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2569 2570 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2571 2572 static inline bool sched_energy_enabled(void) 2573 { 2574 return static_branch_unlikely(&sched_energy_present); 2575 } 2576 2577 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2578 2579 #define perf_domain_span(pd) NULL 2580 static inline bool sched_energy_enabled(void) { return false; } 2581 2582 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2583 2584 #ifdef CONFIG_MEMBARRIER 2585 /* 2586 * The scheduler provides memory barriers required by membarrier between: 2587 * - prior user-space memory accesses and store to rq->membarrier_state, 2588 * - store to rq->membarrier_state and following user-space memory accesses. 2589 * In the same way it provides those guarantees around store to rq->curr. 2590 */ 2591 static inline void membarrier_switch_mm(struct rq *rq, 2592 struct mm_struct *prev_mm, 2593 struct mm_struct *next_mm) 2594 { 2595 int membarrier_state; 2596 2597 if (prev_mm == next_mm) 2598 return; 2599 2600 membarrier_state = atomic_read(&next_mm->membarrier_state); 2601 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 2602 return; 2603 2604 WRITE_ONCE(rq->membarrier_state, membarrier_state); 2605 } 2606 #else 2607 static inline void membarrier_switch_mm(struct rq *rq, 2608 struct mm_struct *prev_mm, 2609 struct mm_struct *next_mm) 2610 { 2611 } 2612 #endif 2613 2614 #ifdef CONFIG_SMP 2615 static inline bool is_per_cpu_kthread(struct task_struct *p) 2616 { 2617 if (!(p->flags & PF_KTHREAD)) 2618 return false; 2619 2620 if (p->nr_cpus_allowed != 1) 2621 return false; 2622 2623 return true; 2624 } 2625 #endif 2626 2627 void swake_up_all_locked(struct swait_queue_head *q); 2628 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 2629