1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #ifndef _KERNEL_SCHED_SCHED_H 6 #define _KERNEL_SCHED_SCHED_H 7 8 #include <linux/sched/affinity.h> 9 #include <linux/sched/autogroup.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/deadline.h> 12 #include <linux/sched.h> 13 #include <linux/sched/loadavg.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/rseq_api.h> 16 #include <linux/sched/signal.h> 17 #include <linux/sched/smt.h> 18 #include <linux/sched/stat.h> 19 #include <linux/sched/sysctl.h> 20 #include <linux/sched/task_flags.h> 21 #include <linux/sched/task.h> 22 #include <linux/sched/topology.h> 23 24 #include <linux/atomic.h> 25 #include <linux/bitmap.h> 26 #include <linux/bug.h> 27 #include <linux/capability.h> 28 #include <linux/cgroup_api.h> 29 #include <linux/cgroup.h> 30 #include <linux/context_tracking.h> 31 #include <linux/cpufreq.h> 32 #include <linux/cpumask_api.h> 33 #include <linux/ctype.h> 34 #include <linux/file.h> 35 #include <linux/fs_api.h> 36 #include <linux/hrtimer_api.h> 37 #include <linux/interrupt.h> 38 #include <linux/irq_work.h> 39 #include <linux/jiffies.h> 40 #include <linux/kref_api.h> 41 #include <linux/kthread.h> 42 #include <linux/ktime_api.h> 43 #include <linux/lockdep_api.h> 44 #include <linux/lockdep.h> 45 #include <linux/minmax.h> 46 #include <linux/mm.h> 47 #include <linux/module.h> 48 #include <linux/mutex_api.h> 49 #include <linux/plist.h> 50 #include <linux/poll.h> 51 #include <linux/proc_fs.h> 52 #include <linux/profile.h> 53 #include <linux/psi.h> 54 #include <linux/rcupdate.h> 55 #include <linux/seq_file.h> 56 #include <linux/seqlock.h> 57 #include <linux/softirq.h> 58 #include <linux/spinlock_api.h> 59 #include <linux/static_key.h> 60 #include <linux/stop_machine.h> 61 #include <linux/syscalls_api.h> 62 #include <linux/syscalls.h> 63 #include <linux/tick.h> 64 #include <linux/topology.h> 65 #include <linux/types.h> 66 #include <linux/u64_stats_sync_api.h> 67 #include <linux/uaccess.h> 68 #include <linux/wait_api.h> 69 #include <linux/wait_bit.h> 70 #include <linux/workqueue_api.h> 71 72 #include <trace/events/power.h> 73 #include <trace/events/sched.h> 74 75 #include "../workqueue_internal.h" 76 77 #ifdef CONFIG_CGROUP_SCHED 78 #include <linux/cgroup.h> 79 #include <linux/psi.h> 80 #endif 81 82 #ifdef CONFIG_SCHED_DEBUG 83 # include <linux/static_key.h> 84 #endif 85 86 #ifdef CONFIG_PARAVIRT 87 # include <asm/paravirt.h> 88 # include <asm/paravirt_api_clock.h> 89 #endif 90 91 #include "cpupri.h" 92 #include "cpudeadline.h" 93 94 #ifdef CONFIG_SCHED_DEBUG 95 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 96 #else 97 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 98 #endif 99 100 struct rq; 101 struct cpuidle_state; 102 103 /* task_struct::on_rq states: */ 104 #define TASK_ON_RQ_QUEUED 1 105 #define TASK_ON_RQ_MIGRATING 2 106 107 extern __read_mostly int scheduler_running; 108 109 extern unsigned long calc_load_update; 110 extern atomic_long_t calc_load_tasks; 111 112 extern unsigned int sysctl_sched_child_runs_first; 113 114 extern void calc_global_load_tick(struct rq *this_rq); 115 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 116 117 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 118 119 extern unsigned int sysctl_sched_rt_period; 120 extern int sysctl_sched_rt_runtime; 121 extern int sched_rr_timeslice; 122 123 /* 124 * Helpers for converting nanosecond timing to jiffy resolution 125 */ 126 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 127 128 /* 129 * Increase resolution of nice-level calculations for 64-bit architectures. 130 * The extra resolution improves shares distribution and load balancing of 131 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 132 * hierarchies, especially on larger systems. This is not a user-visible change 133 * and does not change the user-interface for setting shares/weights. 134 * 135 * We increase resolution only if we have enough bits to allow this increased 136 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 137 * are pretty high and the returns do not justify the increased costs. 138 * 139 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 140 * increase coverage and consistency always enable it on 64-bit platforms. 141 */ 142 #ifdef CONFIG_64BIT 143 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 144 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 145 # define scale_load_down(w) \ 146 ({ \ 147 unsigned long __w = (w); \ 148 if (__w) \ 149 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 150 __w; \ 151 }) 152 #else 153 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 154 # define scale_load(w) (w) 155 # define scale_load_down(w) (w) 156 #endif 157 158 /* 159 * Task weight (visible to users) and its load (invisible to users) have 160 * independent resolution, but they should be well calibrated. We use 161 * scale_load() and scale_load_down(w) to convert between them. The 162 * following must be true: 163 * 164 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 165 * 166 */ 167 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 168 169 /* 170 * Single value that decides SCHED_DEADLINE internal math precision. 171 * 10 -> just above 1us 172 * 9 -> just above 0.5us 173 */ 174 #define DL_SCALE 10 175 176 /* 177 * Single value that denotes runtime == period, ie unlimited time. 178 */ 179 #define RUNTIME_INF ((u64)~0ULL) 180 181 static inline int idle_policy(int policy) 182 { 183 return policy == SCHED_IDLE; 184 } 185 static inline int fair_policy(int policy) 186 { 187 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 188 } 189 190 static inline int rt_policy(int policy) 191 { 192 return policy == SCHED_FIFO || policy == SCHED_RR; 193 } 194 195 static inline int dl_policy(int policy) 196 { 197 return policy == SCHED_DEADLINE; 198 } 199 static inline bool valid_policy(int policy) 200 { 201 return idle_policy(policy) || fair_policy(policy) || 202 rt_policy(policy) || dl_policy(policy); 203 } 204 205 static inline int task_has_idle_policy(struct task_struct *p) 206 { 207 return idle_policy(p->policy); 208 } 209 210 static inline int task_has_rt_policy(struct task_struct *p) 211 { 212 return rt_policy(p->policy); 213 } 214 215 static inline int task_has_dl_policy(struct task_struct *p) 216 { 217 return dl_policy(p->policy); 218 } 219 220 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 221 222 static inline void update_avg(u64 *avg, u64 sample) 223 { 224 s64 diff = sample - *avg; 225 *avg += diff / 8; 226 } 227 228 /* 229 * Shifting a value by an exponent greater *or equal* to the size of said value 230 * is UB; cap at size-1. 231 */ 232 #define shr_bound(val, shift) \ 233 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 234 235 /* 236 * !! For sched_setattr_nocheck() (kernel) only !! 237 * 238 * This is actually gross. :( 239 * 240 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 241 * tasks, but still be able to sleep. We need this on platforms that cannot 242 * atomically change clock frequency. Remove once fast switching will be 243 * available on such platforms. 244 * 245 * SUGOV stands for SchedUtil GOVernor. 246 */ 247 #define SCHED_FLAG_SUGOV 0x10000000 248 249 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 250 251 static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) 252 { 253 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 254 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 255 #else 256 return false; 257 #endif 258 } 259 260 /* 261 * Tells if entity @a should preempt entity @b. 262 */ 263 static inline bool dl_entity_preempt(const struct sched_dl_entity *a, 264 const struct sched_dl_entity *b) 265 { 266 return dl_entity_is_special(a) || 267 dl_time_before(a->deadline, b->deadline); 268 } 269 270 /* 271 * This is the priority-queue data structure of the RT scheduling class: 272 */ 273 struct rt_prio_array { 274 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 275 struct list_head queue[MAX_RT_PRIO]; 276 }; 277 278 struct rt_bandwidth { 279 /* nests inside the rq lock: */ 280 raw_spinlock_t rt_runtime_lock; 281 ktime_t rt_period; 282 u64 rt_runtime; 283 struct hrtimer rt_period_timer; 284 unsigned int rt_period_active; 285 }; 286 287 void __dl_clear_params(struct task_struct *p); 288 289 static inline int dl_bandwidth_enabled(void) 290 { 291 return sysctl_sched_rt_runtime >= 0; 292 } 293 294 /* 295 * To keep the bandwidth of -deadline tasks under control 296 * we need some place where: 297 * - store the maximum -deadline bandwidth of each cpu; 298 * - cache the fraction of bandwidth that is currently allocated in 299 * each root domain; 300 * 301 * This is all done in the data structure below. It is similar to the 302 * one used for RT-throttling (rt_bandwidth), with the main difference 303 * that, since here we are only interested in admission control, we 304 * do not decrease any runtime while the group "executes", neither we 305 * need a timer to replenish it. 306 * 307 * With respect to SMP, bandwidth is given on a per root domain basis, 308 * meaning that: 309 * - bw (< 100%) is the deadline bandwidth of each CPU; 310 * - total_bw is the currently allocated bandwidth in each root domain; 311 */ 312 struct dl_bw { 313 raw_spinlock_t lock; 314 u64 bw; 315 u64 total_bw; 316 }; 317 318 extern void init_dl_bw(struct dl_bw *dl_b); 319 extern int sched_dl_global_validate(void); 320 extern void sched_dl_do_global(void); 321 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 322 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 323 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 324 extern bool __checkparam_dl(const struct sched_attr *attr); 325 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 326 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 327 extern int dl_bw_check_overflow(int cpu); 328 329 #ifdef CONFIG_CGROUP_SCHED 330 331 struct cfs_rq; 332 struct rt_rq; 333 334 extern struct list_head task_groups; 335 336 struct cfs_bandwidth { 337 #ifdef CONFIG_CFS_BANDWIDTH 338 raw_spinlock_t lock; 339 ktime_t period; 340 u64 quota; 341 u64 runtime; 342 u64 burst; 343 u64 runtime_snap; 344 s64 hierarchical_quota; 345 346 u8 idle; 347 u8 period_active; 348 u8 slack_started; 349 struct hrtimer period_timer; 350 struct hrtimer slack_timer; 351 struct list_head throttled_cfs_rq; 352 353 /* Statistics: */ 354 int nr_periods; 355 int nr_throttled; 356 int nr_burst; 357 u64 throttled_time; 358 u64 burst_time; 359 #endif 360 }; 361 362 /* Task group related information */ 363 struct task_group { 364 struct cgroup_subsys_state css; 365 366 #ifdef CONFIG_FAIR_GROUP_SCHED 367 /* schedulable entities of this group on each CPU */ 368 struct sched_entity **se; 369 /* runqueue "owned" by this group on each CPU */ 370 struct cfs_rq **cfs_rq; 371 unsigned long shares; 372 373 /* A positive value indicates that this is a SCHED_IDLE group. */ 374 int idle; 375 376 #ifdef CONFIG_SMP 377 /* 378 * load_avg can be heavily contended at clock tick time, so put 379 * it in its own cacheline separated from the fields above which 380 * will also be accessed at each tick. 381 */ 382 atomic_long_t load_avg ____cacheline_aligned; 383 #endif 384 #endif 385 386 #ifdef CONFIG_RT_GROUP_SCHED 387 struct sched_rt_entity **rt_se; 388 struct rt_rq **rt_rq; 389 390 struct rt_bandwidth rt_bandwidth; 391 #endif 392 393 struct rcu_head rcu; 394 struct list_head list; 395 396 struct task_group *parent; 397 struct list_head siblings; 398 struct list_head children; 399 400 #ifdef CONFIG_SCHED_AUTOGROUP 401 struct autogroup *autogroup; 402 #endif 403 404 struct cfs_bandwidth cfs_bandwidth; 405 406 #ifdef CONFIG_UCLAMP_TASK_GROUP 407 /* The two decimal precision [%] value requested from user-space */ 408 unsigned int uclamp_pct[UCLAMP_CNT]; 409 /* Clamp values requested for a task group */ 410 struct uclamp_se uclamp_req[UCLAMP_CNT]; 411 /* Effective clamp values used for a task group */ 412 struct uclamp_se uclamp[UCLAMP_CNT]; 413 #endif 414 415 }; 416 417 #ifdef CONFIG_FAIR_GROUP_SCHED 418 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 419 420 /* 421 * A weight of 0 or 1 can cause arithmetics problems. 422 * A weight of a cfs_rq is the sum of weights of which entities 423 * are queued on this cfs_rq, so a weight of a entity should not be 424 * too large, so as the shares value of a task group. 425 * (The default weight is 1024 - so there's no practical 426 * limitation from this.) 427 */ 428 #define MIN_SHARES (1UL << 1) 429 #define MAX_SHARES (1UL << 18) 430 #endif 431 432 typedef int (*tg_visitor)(struct task_group *, void *); 433 434 extern int walk_tg_tree_from(struct task_group *from, 435 tg_visitor down, tg_visitor up, void *data); 436 437 /* 438 * Iterate the full tree, calling @down when first entering a node and @up when 439 * leaving it for the final time. 440 * 441 * Caller must hold rcu_lock or sufficient equivalent. 442 */ 443 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 444 { 445 return walk_tg_tree_from(&root_task_group, down, up, data); 446 } 447 448 extern int tg_nop(struct task_group *tg, void *data); 449 450 extern void free_fair_sched_group(struct task_group *tg); 451 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 452 extern void online_fair_sched_group(struct task_group *tg); 453 extern void unregister_fair_sched_group(struct task_group *tg); 454 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 455 struct sched_entity *se, int cpu, 456 struct sched_entity *parent); 457 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); 458 459 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 460 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 461 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 462 extern bool cfs_task_bw_constrained(struct task_struct *p); 463 464 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 465 struct sched_rt_entity *rt_se, int cpu, 466 struct sched_rt_entity *parent); 467 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 468 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 469 extern long sched_group_rt_runtime(struct task_group *tg); 470 extern long sched_group_rt_period(struct task_group *tg); 471 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 472 473 extern struct task_group *sched_create_group(struct task_group *parent); 474 extern void sched_online_group(struct task_group *tg, 475 struct task_group *parent); 476 extern void sched_destroy_group(struct task_group *tg); 477 extern void sched_release_group(struct task_group *tg); 478 479 extern void sched_move_task(struct task_struct *tsk); 480 481 #ifdef CONFIG_FAIR_GROUP_SCHED 482 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 483 484 extern int sched_group_set_idle(struct task_group *tg, long idle); 485 486 #ifdef CONFIG_SMP 487 extern void set_task_rq_fair(struct sched_entity *se, 488 struct cfs_rq *prev, struct cfs_rq *next); 489 #else /* !CONFIG_SMP */ 490 static inline void set_task_rq_fair(struct sched_entity *se, 491 struct cfs_rq *prev, struct cfs_rq *next) { } 492 #endif /* CONFIG_SMP */ 493 #endif /* CONFIG_FAIR_GROUP_SCHED */ 494 495 #else /* CONFIG_CGROUP_SCHED */ 496 497 struct cfs_bandwidth { }; 498 static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } 499 500 #endif /* CONFIG_CGROUP_SCHED */ 501 502 extern void unregister_rt_sched_group(struct task_group *tg); 503 extern void free_rt_sched_group(struct task_group *tg); 504 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 505 506 /* 507 * u64_u32_load/u64_u32_store 508 * 509 * Use a copy of a u64 value to protect against data race. This is only 510 * applicable for 32-bits architectures. 511 */ 512 #ifdef CONFIG_64BIT 513 # define u64_u32_load_copy(var, copy) var 514 # define u64_u32_store_copy(var, copy, val) (var = val) 515 #else 516 # define u64_u32_load_copy(var, copy) \ 517 ({ \ 518 u64 __val, __val_copy; \ 519 do { \ 520 __val_copy = copy; \ 521 /* \ 522 * paired with u64_u32_store_copy(), ordering access \ 523 * to var and copy. \ 524 */ \ 525 smp_rmb(); \ 526 __val = var; \ 527 } while (__val != __val_copy); \ 528 __val; \ 529 }) 530 # define u64_u32_store_copy(var, copy, val) \ 531 do { \ 532 typeof(val) __val = (val); \ 533 var = __val; \ 534 /* \ 535 * paired with u64_u32_load_copy(), ordering access to var and \ 536 * copy. \ 537 */ \ 538 smp_wmb(); \ 539 copy = __val; \ 540 } while (0) 541 #endif 542 # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) 543 # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) 544 545 /* CFS-related fields in a runqueue */ 546 struct cfs_rq { 547 struct load_weight load; 548 unsigned int nr_running; 549 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 550 unsigned int idle_nr_running; /* SCHED_IDLE */ 551 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 552 553 s64 avg_vruntime; 554 u64 avg_load; 555 556 u64 exec_clock; 557 u64 min_vruntime; 558 #ifdef CONFIG_SCHED_CORE 559 unsigned int forceidle_seq; 560 u64 min_vruntime_fi; 561 #endif 562 563 #ifndef CONFIG_64BIT 564 u64 min_vruntime_copy; 565 #endif 566 567 struct rb_root_cached tasks_timeline; 568 569 /* 570 * 'curr' points to currently running entity on this cfs_rq. 571 * It is set to NULL otherwise (i.e when none are currently running). 572 */ 573 struct sched_entity *curr; 574 struct sched_entity *next; 575 576 #ifdef CONFIG_SCHED_DEBUG 577 unsigned int nr_spread_over; 578 #endif 579 580 #ifdef CONFIG_SMP 581 /* 582 * CFS load tracking 583 */ 584 struct sched_avg avg; 585 #ifndef CONFIG_64BIT 586 u64 last_update_time_copy; 587 #endif 588 struct { 589 raw_spinlock_t lock ____cacheline_aligned; 590 int nr; 591 unsigned long load_avg; 592 unsigned long util_avg; 593 unsigned long runnable_avg; 594 } removed; 595 596 #ifdef CONFIG_FAIR_GROUP_SCHED 597 unsigned long tg_load_avg_contrib; 598 long propagate; 599 long prop_runnable_sum; 600 601 /* 602 * h_load = weight * f(tg) 603 * 604 * Where f(tg) is the recursive weight fraction assigned to 605 * this group. 606 */ 607 unsigned long h_load; 608 u64 last_h_load_update; 609 struct sched_entity *h_load_next; 610 #endif /* CONFIG_FAIR_GROUP_SCHED */ 611 #endif /* CONFIG_SMP */ 612 613 #ifdef CONFIG_FAIR_GROUP_SCHED 614 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 615 616 /* 617 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 618 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 619 * (like users, containers etc.) 620 * 621 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 622 * This list is used during load balance. 623 */ 624 int on_list; 625 struct list_head leaf_cfs_rq_list; 626 struct task_group *tg; /* group that "owns" this runqueue */ 627 628 /* Locally cached copy of our task_group's idle value */ 629 int idle; 630 631 #ifdef CONFIG_CFS_BANDWIDTH 632 int runtime_enabled; 633 s64 runtime_remaining; 634 635 u64 throttled_pelt_idle; 636 #ifndef CONFIG_64BIT 637 u64 throttled_pelt_idle_copy; 638 #endif 639 u64 throttled_clock; 640 u64 throttled_clock_pelt; 641 u64 throttled_clock_pelt_time; 642 u64 throttled_clock_self; 643 u64 throttled_clock_self_time; 644 int throttled; 645 int throttle_count; 646 struct list_head throttled_list; 647 #ifdef CONFIG_SMP 648 struct list_head throttled_csd_list; 649 #endif 650 #endif /* CONFIG_CFS_BANDWIDTH */ 651 #endif /* CONFIG_FAIR_GROUP_SCHED */ 652 }; 653 654 static inline int rt_bandwidth_enabled(void) 655 { 656 return sysctl_sched_rt_runtime >= 0; 657 } 658 659 /* RT IPI pull logic requires IRQ_WORK */ 660 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 661 # define HAVE_RT_PUSH_IPI 662 #endif 663 664 /* Real-Time classes' related field in a runqueue: */ 665 struct rt_rq { 666 struct rt_prio_array active; 667 unsigned int rt_nr_running; 668 unsigned int rr_nr_running; 669 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 670 struct { 671 int curr; /* highest queued rt task prio */ 672 #ifdef CONFIG_SMP 673 int next; /* next highest */ 674 #endif 675 } highest_prio; 676 #endif 677 #ifdef CONFIG_SMP 678 unsigned int rt_nr_migratory; 679 unsigned int rt_nr_total; 680 int overloaded; 681 struct plist_head pushable_tasks; 682 683 #endif /* CONFIG_SMP */ 684 int rt_queued; 685 686 int rt_throttled; 687 u64 rt_time; 688 u64 rt_runtime; 689 /* Nests inside the rq lock: */ 690 raw_spinlock_t rt_runtime_lock; 691 692 #ifdef CONFIG_RT_GROUP_SCHED 693 unsigned int rt_nr_boosted; 694 695 struct rq *rq; 696 struct task_group *tg; 697 #endif 698 }; 699 700 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 701 { 702 return rt_rq->rt_queued && rt_rq->rt_nr_running; 703 } 704 705 /* Deadline class' related fields in a runqueue */ 706 struct dl_rq { 707 /* runqueue is an rbtree, ordered by deadline */ 708 struct rb_root_cached root; 709 710 unsigned int dl_nr_running; 711 712 #ifdef CONFIG_SMP 713 /* 714 * Deadline values of the currently executing and the 715 * earliest ready task on this rq. Caching these facilitates 716 * the decision whether or not a ready but not running task 717 * should migrate somewhere else. 718 */ 719 struct { 720 u64 curr; 721 u64 next; 722 } earliest_dl; 723 724 unsigned int dl_nr_migratory; 725 int overloaded; 726 727 /* 728 * Tasks on this rq that can be pushed away. They are kept in 729 * an rb-tree, ordered by tasks' deadlines, with caching 730 * of the leftmost (earliest deadline) element. 731 */ 732 struct rb_root_cached pushable_dl_tasks_root; 733 #else 734 struct dl_bw dl_bw; 735 #endif 736 /* 737 * "Active utilization" for this runqueue: increased when a 738 * task wakes up (becomes TASK_RUNNING) and decreased when a 739 * task blocks 740 */ 741 u64 running_bw; 742 743 /* 744 * Utilization of the tasks "assigned" to this runqueue (including 745 * the tasks that are in runqueue and the tasks that executed on this 746 * CPU and blocked). Increased when a task moves to this runqueue, and 747 * decreased when the task moves away (migrates, changes scheduling 748 * policy, or terminates). 749 * This is needed to compute the "inactive utilization" for the 750 * runqueue (inactive utilization = this_bw - running_bw). 751 */ 752 u64 this_bw; 753 u64 extra_bw; 754 755 /* 756 * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM 757 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). 758 */ 759 u64 max_bw; 760 761 /* 762 * Inverse of the fraction of CPU utilization that can be reclaimed 763 * by the GRUB algorithm. 764 */ 765 u64 bw_ratio; 766 }; 767 768 #ifdef CONFIG_FAIR_GROUP_SCHED 769 /* An entity is a task if it doesn't "own" a runqueue */ 770 #define entity_is_task(se) (!se->my_q) 771 772 static inline void se_update_runnable(struct sched_entity *se) 773 { 774 if (!entity_is_task(se)) 775 se->runnable_weight = se->my_q->h_nr_running; 776 } 777 778 static inline long se_runnable(struct sched_entity *se) 779 { 780 if (entity_is_task(se)) 781 return !!se->on_rq; 782 else 783 return se->runnable_weight; 784 } 785 786 #else 787 #define entity_is_task(se) 1 788 789 static inline void se_update_runnable(struct sched_entity *se) {} 790 791 static inline long se_runnable(struct sched_entity *se) 792 { 793 return !!se->on_rq; 794 } 795 #endif 796 797 #ifdef CONFIG_SMP 798 /* 799 * XXX we want to get rid of these helpers and use the full load resolution. 800 */ 801 static inline long se_weight(struct sched_entity *se) 802 { 803 return scale_load_down(se->load.weight); 804 } 805 806 807 static inline bool sched_asym_prefer(int a, int b) 808 { 809 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 810 } 811 812 struct perf_domain { 813 struct em_perf_domain *em_pd; 814 struct perf_domain *next; 815 struct rcu_head rcu; 816 }; 817 818 /* Scheduling group status flags */ 819 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 820 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 821 822 /* 823 * We add the notion of a root-domain which will be used to define per-domain 824 * variables. Each exclusive cpuset essentially defines an island domain by 825 * fully partitioning the member CPUs from any other cpuset. Whenever a new 826 * exclusive cpuset is created, we also create and attach a new root-domain 827 * object. 828 * 829 */ 830 struct root_domain { 831 atomic_t refcount; 832 atomic_t rto_count; 833 struct rcu_head rcu; 834 cpumask_var_t span; 835 cpumask_var_t online; 836 837 /* 838 * Indicate pullable load on at least one CPU, e.g: 839 * - More than one runnable task 840 * - Running task is misfit 841 */ 842 int overload; 843 844 /* Indicate one or more cpus over-utilized (tipping point) */ 845 int overutilized; 846 847 /* 848 * The bit corresponding to a CPU gets set here if such CPU has more 849 * than one runnable -deadline task (as it is below for RT tasks). 850 */ 851 cpumask_var_t dlo_mask; 852 atomic_t dlo_count; 853 struct dl_bw dl_bw; 854 struct cpudl cpudl; 855 856 /* 857 * Indicate whether a root_domain's dl_bw has been checked or 858 * updated. It's monotonously increasing value. 859 * 860 * Also, some corner cases, like 'wrap around' is dangerous, but given 861 * that u64 is 'big enough'. So that shouldn't be a concern. 862 */ 863 u64 visit_gen; 864 865 #ifdef HAVE_RT_PUSH_IPI 866 /* 867 * For IPI pull requests, loop across the rto_mask. 868 */ 869 struct irq_work rto_push_work; 870 raw_spinlock_t rto_lock; 871 /* These are only updated and read within rto_lock */ 872 int rto_loop; 873 int rto_cpu; 874 /* These atomics are updated outside of a lock */ 875 atomic_t rto_loop_next; 876 atomic_t rto_loop_start; 877 #endif 878 /* 879 * The "RT overload" flag: it gets set if a CPU has more than 880 * one runnable RT task. 881 */ 882 cpumask_var_t rto_mask; 883 struct cpupri cpupri; 884 885 unsigned long max_cpu_capacity; 886 887 /* 888 * NULL-terminated list of performance domains intersecting with the 889 * CPUs of the rd. Protected by RCU. 890 */ 891 struct perf_domain __rcu *pd; 892 }; 893 894 extern void init_defrootdomain(void); 895 extern int sched_init_domains(const struct cpumask *cpu_map); 896 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 897 extern void sched_get_rd(struct root_domain *rd); 898 extern void sched_put_rd(struct root_domain *rd); 899 900 #ifdef HAVE_RT_PUSH_IPI 901 extern void rto_push_irq_work_func(struct irq_work *work); 902 #endif 903 #endif /* CONFIG_SMP */ 904 905 #ifdef CONFIG_UCLAMP_TASK 906 /* 907 * struct uclamp_bucket - Utilization clamp bucket 908 * @value: utilization clamp value for tasks on this clamp bucket 909 * @tasks: number of RUNNABLE tasks on this clamp bucket 910 * 911 * Keep track of how many tasks are RUNNABLE for a given utilization 912 * clamp value. 913 */ 914 struct uclamp_bucket { 915 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 916 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 917 }; 918 919 /* 920 * struct uclamp_rq - rq's utilization clamp 921 * @value: currently active clamp values for a rq 922 * @bucket: utilization clamp buckets affecting a rq 923 * 924 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 925 * A clamp value is affecting a rq when there is at least one task RUNNABLE 926 * (or actually running) with that value. 927 * 928 * There are up to UCLAMP_CNT possible different clamp values, currently there 929 * are only two: minimum utilization and maximum utilization. 930 * 931 * All utilization clamping values are MAX aggregated, since: 932 * - for util_min: we want to run the CPU at least at the max of the minimum 933 * utilization required by its currently RUNNABLE tasks. 934 * - for util_max: we want to allow the CPU to run up to the max of the 935 * maximum utilization allowed by its currently RUNNABLE tasks. 936 * 937 * Since on each system we expect only a limited number of different 938 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 939 * the metrics required to compute all the per-rq utilization clamp values. 940 */ 941 struct uclamp_rq { 942 unsigned int value; 943 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 944 }; 945 946 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 947 #endif /* CONFIG_UCLAMP_TASK */ 948 949 struct rq; 950 struct balance_callback { 951 struct balance_callback *next; 952 void (*func)(struct rq *rq); 953 }; 954 955 /* 956 * This is the main, per-CPU runqueue data structure. 957 * 958 * Locking rule: those places that want to lock multiple runqueues 959 * (such as the load balancing or the thread migration code), lock 960 * acquire operations must be ordered by ascending &runqueue. 961 */ 962 struct rq { 963 /* runqueue lock: */ 964 raw_spinlock_t __lock; 965 966 /* 967 * nr_running and cpu_load should be in the same cacheline because 968 * remote CPUs use both these fields when doing load calculation. 969 */ 970 unsigned int nr_running; 971 #ifdef CONFIG_NUMA_BALANCING 972 unsigned int nr_numa_running; 973 unsigned int nr_preferred_running; 974 unsigned int numa_migrate_on; 975 #endif 976 #ifdef CONFIG_NO_HZ_COMMON 977 #ifdef CONFIG_SMP 978 unsigned long last_blocked_load_update_tick; 979 unsigned int has_blocked_load; 980 call_single_data_t nohz_csd; 981 #endif /* CONFIG_SMP */ 982 unsigned int nohz_tick_stopped; 983 atomic_t nohz_flags; 984 #endif /* CONFIG_NO_HZ_COMMON */ 985 986 #ifdef CONFIG_SMP 987 unsigned int ttwu_pending; 988 #endif 989 u64 nr_switches; 990 991 #ifdef CONFIG_UCLAMP_TASK 992 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 993 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 994 unsigned int uclamp_flags; 995 #define UCLAMP_FLAG_IDLE 0x01 996 #endif 997 998 struct cfs_rq cfs; 999 struct rt_rq rt; 1000 struct dl_rq dl; 1001 1002 #ifdef CONFIG_FAIR_GROUP_SCHED 1003 /* list of leaf cfs_rq on this CPU: */ 1004 struct list_head leaf_cfs_rq_list; 1005 struct list_head *tmp_alone_branch; 1006 #endif /* CONFIG_FAIR_GROUP_SCHED */ 1007 1008 /* 1009 * This is part of a global counter where only the total sum 1010 * over all CPUs matters. A task can increase this counter on 1011 * one CPU and if it got migrated afterwards it may decrease 1012 * it on another CPU. Always updated under the runqueue lock: 1013 */ 1014 unsigned int nr_uninterruptible; 1015 1016 struct task_struct __rcu *curr; 1017 struct task_struct *idle; 1018 struct task_struct *stop; 1019 unsigned long next_balance; 1020 struct mm_struct *prev_mm; 1021 1022 unsigned int clock_update_flags; 1023 u64 clock; 1024 /* Ensure that all clocks are in the same cache line */ 1025 u64 clock_task ____cacheline_aligned; 1026 u64 clock_pelt; 1027 unsigned long lost_idle_time; 1028 u64 clock_pelt_idle; 1029 u64 clock_idle; 1030 #ifndef CONFIG_64BIT 1031 u64 clock_pelt_idle_copy; 1032 u64 clock_idle_copy; 1033 #endif 1034 1035 atomic_t nr_iowait; 1036 1037 #ifdef CONFIG_SCHED_DEBUG 1038 u64 last_seen_need_resched_ns; 1039 int ticks_without_resched; 1040 #endif 1041 1042 #ifdef CONFIG_MEMBARRIER 1043 int membarrier_state; 1044 #endif 1045 1046 #ifdef CONFIG_SMP 1047 struct root_domain *rd; 1048 struct sched_domain __rcu *sd; 1049 1050 unsigned long cpu_capacity; 1051 unsigned long cpu_capacity_orig; 1052 1053 struct balance_callback *balance_callback; 1054 1055 unsigned char nohz_idle_balance; 1056 unsigned char idle_balance; 1057 1058 unsigned long misfit_task_load; 1059 1060 /* For active balancing */ 1061 int active_balance; 1062 int push_cpu; 1063 struct cpu_stop_work active_balance_work; 1064 1065 /* CPU of this runqueue: */ 1066 int cpu; 1067 int online; 1068 1069 struct list_head cfs_tasks; 1070 1071 struct sched_avg avg_rt; 1072 struct sched_avg avg_dl; 1073 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1074 struct sched_avg avg_irq; 1075 #endif 1076 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 1077 struct sched_avg avg_thermal; 1078 #endif 1079 u64 idle_stamp; 1080 u64 avg_idle; 1081 1082 unsigned long wake_stamp; 1083 u64 wake_avg_idle; 1084 1085 /* This is used to determine avg_idle's max value */ 1086 u64 max_idle_balance_cost; 1087 1088 #ifdef CONFIG_HOTPLUG_CPU 1089 struct rcuwait hotplug_wait; 1090 #endif 1091 #endif /* CONFIG_SMP */ 1092 1093 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1094 u64 prev_irq_time; 1095 #endif 1096 #ifdef CONFIG_PARAVIRT 1097 u64 prev_steal_time; 1098 #endif 1099 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1100 u64 prev_steal_time_rq; 1101 #endif 1102 1103 /* calc_load related fields */ 1104 unsigned long calc_load_update; 1105 long calc_load_active; 1106 1107 #ifdef CONFIG_SCHED_HRTICK 1108 #ifdef CONFIG_SMP 1109 call_single_data_t hrtick_csd; 1110 #endif 1111 struct hrtimer hrtick_timer; 1112 ktime_t hrtick_time; 1113 #endif 1114 1115 #ifdef CONFIG_SCHEDSTATS 1116 /* latency stats */ 1117 struct sched_info rq_sched_info; 1118 unsigned long long rq_cpu_time; 1119 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1120 1121 /* sys_sched_yield() stats */ 1122 unsigned int yld_count; 1123 1124 /* schedule() stats */ 1125 unsigned int sched_count; 1126 unsigned int sched_goidle; 1127 1128 /* try_to_wake_up() stats */ 1129 unsigned int ttwu_count; 1130 unsigned int ttwu_local; 1131 #endif 1132 1133 #ifdef CONFIG_CPU_IDLE 1134 /* Must be inspected within a rcu lock section */ 1135 struct cpuidle_state *idle_state; 1136 #endif 1137 1138 #ifdef CONFIG_SMP 1139 unsigned int nr_pinned; 1140 #endif 1141 unsigned int push_busy; 1142 struct cpu_stop_work push_work; 1143 1144 #ifdef CONFIG_SCHED_CORE 1145 /* per rq */ 1146 struct rq *core; 1147 struct task_struct *core_pick; 1148 unsigned int core_enabled; 1149 unsigned int core_sched_seq; 1150 struct rb_root core_tree; 1151 1152 /* shared state -- careful with sched_core_cpu_deactivate() */ 1153 unsigned int core_task_seq; 1154 unsigned int core_pick_seq; 1155 unsigned long core_cookie; 1156 unsigned int core_forceidle_count; 1157 unsigned int core_forceidle_seq; 1158 unsigned int core_forceidle_occupation; 1159 u64 core_forceidle_start; 1160 #endif 1161 1162 /* Scratch cpumask to be temporarily used under rq_lock */ 1163 cpumask_var_t scratch_mask; 1164 1165 #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) 1166 call_single_data_t cfsb_csd; 1167 struct list_head cfsb_csd_list; 1168 #endif 1169 }; 1170 1171 #ifdef CONFIG_FAIR_GROUP_SCHED 1172 1173 /* CPU runqueue to which this cfs_rq is attached */ 1174 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1175 { 1176 return cfs_rq->rq; 1177 } 1178 1179 #else 1180 1181 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1182 { 1183 return container_of(cfs_rq, struct rq, cfs); 1184 } 1185 #endif 1186 1187 static inline int cpu_of(struct rq *rq) 1188 { 1189 #ifdef CONFIG_SMP 1190 return rq->cpu; 1191 #else 1192 return 0; 1193 #endif 1194 } 1195 1196 #define MDF_PUSH 0x01 1197 1198 static inline bool is_migration_disabled(struct task_struct *p) 1199 { 1200 #ifdef CONFIG_SMP 1201 return p->migration_disabled; 1202 #else 1203 return false; 1204 #endif 1205 } 1206 1207 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1208 1209 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1210 #define this_rq() this_cpu_ptr(&runqueues) 1211 #define task_rq(p) cpu_rq(task_cpu(p)) 1212 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1213 #define raw_rq() raw_cpu_ptr(&runqueues) 1214 1215 struct sched_group; 1216 #ifdef CONFIG_SCHED_CORE 1217 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1218 1219 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1220 1221 static inline bool sched_core_enabled(struct rq *rq) 1222 { 1223 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1224 } 1225 1226 static inline bool sched_core_disabled(void) 1227 { 1228 return !static_branch_unlikely(&__sched_core_enabled); 1229 } 1230 1231 /* 1232 * Be careful with this function; not for general use. The return value isn't 1233 * stable unless you actually hold a relevant rq->__lock. 1234 */ 1235 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1236 { 1237 if (sched_core_enabled(rq)) 1238 return &rq->core->__lock; 1239 1240 return &rq->__lock; 1241 } 1242 1243 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1244 { 1245 if (rq->core_enabled) 1246 return &rq->core->__lock; 1247 1248 return &rq->__lock; 1249 } 1250 1251 bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b, 1252 bool fi); 1253 1254 /* 1255 * Helpers to check if the CPU's core cookie matches with the task's cookie 1256 * when core scheduling is enabled. 1257 * A special case is that the task's cookie always matches with CPU's core 1258 * cookie if the CPU is in an idle core. 1259 */ 1260 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1261 { 1262 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1263 if (!sched_core_enabled(rq)) 1264 return true; 1265 1266 return rq->core->core_cookie == p->core_cookie; 1267 } 1268 1269 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1270 { 1271 bool idle_core = true; 1272 int cpu; 1273 1274 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1275 if (!sched_core_enabled(rq)) 1276 return true; 1277 1278 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1279 if (!available_idle_cpu(cpu)) { 1280 idle_core = false; 1281 break; 1282 } 1283 } 1284 1285 /* 1286 * A CPU in an idle core is always the best choice for tasks with 1287 * cookies. 1288 */ 1289 return idle_core || rq->core->core_cookie == p->core_cookie; 1290 } 1291 1292 static inline bool sched_group_cookie_match(struct rq *rq, 1293 struct task_struct *p, 1294 struct sched_group *group) 1295 { 1296 int cpu; 1297 1298 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1299 if (!sched_core_enabled(rq)) 1300 return true; 1301 1302 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1303 if (sched_core_cookie_match(cpu_rq(cpu), p)) 1304 return true; 1305 } 1306 return false; 1307 } 1308 1309 static inline bool sched_core_enqueued(struct task_struct *p) 1310 { 1311 return !RB_EMPTY_NODE(&p->core_node); 1312 } 1313 1314 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1315 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); 1316 1317 extern void sched_core_get(void); 1318 extern void sched_core_put(void); 1319 1320 #else /* !CONFIG_SCHED_CORE */ 1321 1322 static inline bool sched_core_enabled(struct rq *rq) 1323 { 1324 return false; 1325 } 1326 1327 static inline bool sched_core_disabled(void) 1328 { 1329 return true; 1330 } 1331 1332 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1333 { 1334 return &rq->__lock; 1335 } 1336 1337 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1338 { 1339 return &rq->__lock; 1340 } 1341 1342 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1343 { 1344 return true; 1345 } 1346 1347 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1348 { 1349 return true; 1350 } 1351 1352 static inline bool sched_group_cookie_match(struct rq *rq, 1353 struct task_struct *p, 1354 struct sched_group *group) 1355 { 1356 return true; 1357 } 1358 #endif /* CONFIG_SCHED_CORE */ 1359 1360 static inline void lockdep_assert_rq_held(struct rq *rq) 1361 { 1362 lockdep_assert_held(__rq_lockp(rq)); 1363 } 1364 1365 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1366 extern bool raw_spin_rq_trylock(struct rq *rq); 1367 extern void raw_spin_rq_unlock(struct rq *rq); 1368 1369 static inline void raw_spin_rq_lock(struct rq *rq) 1370 { 1371 raw_spin_rq_lock_nested(rq, 0); 1372 } 1373 1374 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1375 { 1376 local_irq_disable(); 1377 raw_spin_rq_lock(rq); 1378 } 1379 1380 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1381 { 1382 raw_spin_rq_unlock(rq); 1383 local_irq_enable(); 1384 } 1385 1386 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1387 { 1388 unsigned long flags; 1389 local_irq_save(flags); 1390 raw_spin_rq_lock(rq); 1391 return flags; 1392 } 1393 1394 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1395 { 1396 raw_spin_rq_unlock(rq); 1397 local_irq_restore(flags); 1398 } 1399 1400 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1401 do { \ 1402 flags = _raw_spin_rq_lock_irqsave(rq); \ 1403 } while (0) 1404 1405 #ifdef CONFIG_SCHED_SMT 1406 extern void __update_idle_core(struct rq *rq); 1407 1408 static inline void update_idle_core(struct rq *rq) 1409 { 1410 if (static_branch_unlikely(&sched_smt_present)) 1411 __update_idle_core(rq); 1412 } 1413 1414 #else 1415 static inline void update_idle_core(struct rq *rq) { } 1416 #endif 1417 1418 #ifdef CONFIG_FAIR_GROUP_SCHED 1419 static inline struct task_struct *task_of(struct sched_entity *se) 1420 { 1421 SCHED_WARN_ON(!entity_is_task(se)); 1422 return container_of(se, struct task_struct, se); 1423 } 1424 1425 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1426 { 1427 return p->se.cfs_rq; 1428 } 1429 1430 /* runqueue on which this entity is (to be) queued */ 1431 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1432 { 1433 return se->cfs_rq; 1434 } 1435 1436 /* runqueue "owned" by this group */ 1437 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1438 { 1439 return grp->my_q; 1440 } 1441 1442 #else 1443 1444 #define task_of(_se) container_of(_se, struct task_struct, se) 1445 1446 static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) 1447 { 1448 return &task_rq(p)->cfs; 1449 } 1450 1451 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1452 { 1453 const struct task_struct *p = task_of(se); 1454 struct rq *rq = task_rq(p); 1455 1456 return &rq->cfs; 1457 } 1458 1459 /* runqueue "owned" by this group */ 1460 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1461 { 1462 return NULL; 1463 } 1464 #endif 1465 1466 extern void update_rq_clock(struct rq *rq); 1467 1468 /* 1469 * rq::clock_update_flags bits 1470 * 1471 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1472 * call to __schedule(). This is an optimisation to avoid 1473 * neighbouring rq clock updates. 1474 * 1475 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1476 * in effect and calls to update_rq_clock() are being ignored. 1477 * 1478 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1479 * made to update_rq_clock() since the last time rq::lock was pinned. 1480 * 1481 * If inside of __schedule(), clock_update_flags will have been 1482 * shifted left (a left shift is a cheap operation for the fast path 1483 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1484 * 1485 * if (rq-clock_update_flags >= RQCF_UPDATED) 1486 * 1487 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1488 * one position though, because the next rq_unpin_lock() will shift it 1489 * back. 1490 */ 1491 #define RQCF_REQ_SKIP 0x01 1492 #define RQCF_ACT_SKIP 0x02 1493 #define RQCF_UPDATED 0x04 1494 1495 static inline void assert_clock_updated(struct rq *rq) 1496 { 1497 /* 1498 * The only reason for not seeing a clock update since the 1499 * last rq_pin_lock() is if we're currently skipping updates. 1500 */ 1501 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1502 } 1503 1504 static inline u64 rq_clock(struct rq *rq) 1505 { 1506 lockdep_assert_rq_held(rq); 1507 assert_clock_updated(rq); 1508 1509 return rq->clock; 1510 } 1511 1512 static inline u64 rq_clock_task(struct rq *rq) 1513 { 1514 lockdep_assert_rq_held(rq); 1515 assert_clock_updated(rq); 1516 1517 return rq->clock_task; 1518 } 1519 1520 /** 1521 * By default the decay is the default pelt decay period. 1522 * The decay shift can change the decay period in 1523 * multiples of 32. 1524 * Decay shift Decay period(ms) 1525 * 0 32 1526 * 1 64 1527 * 2 128 1528 * 3 256 1529 * 4 512 1530 */ 1531 extern int sched_thermal_decay_shift; 1532 1533 static inline u64 rq_clock_thermal(struct rq *rq) 1534 { 1535 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1536 } 1537 1538 static inline void rq_clock_skip_update(struct rq *rq) 1539 { 1540 lockdep_assert_rq_held(rq); 1541 rq->clock_update_flags |= RQCF_REQ_SKIP; 1542 } 1543 1544 /* 1545 * See rt task throttling, which is the only time a skip 1546 * request is canceled. 1547 */ 1548 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1549 { 1550 lockdep_assert_rq_held(rq); 1551 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1552 } 1553 1554 /* 1555 * During cpu offlining and rq wide unthrottling, we can trigger 1556 * an update_rq_clock() for several cfs and rt runqueues (Typically 1557 * when using list_for_each_entry_*) 1558 * rq_clock_start_loop_update() can be called after updating the clock 1559 * once and before iterating over the list to prevent multiple update. 1560 * After the iterative traversal, we need to call rq_clock_stop_loop_update() 1561 * to clear RQCF_ACT_SKIP of rq->clock_update_flags. 1562 */ 1563 static inline void rq_clock_start_loop_update(struct rq *rq) 1564 { 1565 lockdep_assert_rq_held(rq); 1566 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); 1567 rq->clock_update_flags |= RQCF_ACT_SKIP; 1568 } 1569 1570 static inline void rq_clock_stop_loop_update(struct rq *rq) 1571 { 1572 lockdep_assert_rq_held(rq); 1573 rq->clock_update_flags &= ~RQCF_ACT_SKIP; 1574 } 1575 1576 struct rq_flags { 1577 unsigned long flags; 1578 struct pin_cookie cookie; 1579 #ifdef CONFIG_SCHED_DEBUG 1580 /* 1581 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1582 * current pin context is stashed here in case it needs to be 1583 * restored in rq_repin_lock(). 1584 */ 1585 unsigned int clock_update_flags; 1586 #endif 1587 }; 1588 1589 extern struct balance_callback balance_push_callback; 1590 1591 /* 1592 * Lockdep annotation that avoids accidental unlocks; it's like a 1593 * sticky/continuous lockdep_assert_held(). 1594 * 1595 * This avoids code that has access to 'struct rq *rq' (basically everything in 1596 * the scheduler) from accidentally unlocking the rq if they do not also have a 1597 * copy of the (on-stack) 'struct rq_flags rf'. 1598 * 1599 * Also see Documentation/locking/lockdep-design.rst. 1600 */ 1601 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1602 { 1603 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1604 1605 #ifdef CONFIG_SCHED_DEBUG 1606 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1607 rf->clock_update_flags = 0; 1608 #ifdef CONFIG_SMP 1609 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1610 #endif 1611 #endif 1612 } 1613 1614 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1615 { 1616 #ifdef CONFIG_SCHED_DEBUG 1617 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1618 rf->clock_update_flags = RQCF_UPDATED; 1619 #endif 1620 1621 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1622 } 1623 1624 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1625 { 1626 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1627 1628 #ifdef CONFIG_SCHED_DEBUG 1629 /* 1630 * Restore the value we stashed in @rf for this pin context. 1631 */ 1632 rq->clock_update_flags |= rf->clock_update_flags; 1633 #endif 1634 } 1635 1636 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1637 __acquires(rq->lock); 1638 1639 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1640 __acquires(p->pi_lock) 1641 __acquires(rq->lock); 1642 1643 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1644 __releases(rq->lock) 1645 { 1646 rq_unpin_lock(rq, rf); 1647 raw_spin_rq_unlock(rq); 1648 } 1649 1650 static inline void 1651 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1652 __releases(rq->lock) 1653 __releases(p->pi_lock) 1654 { 1655 rq_unpin_lock(rq, rf); 1656 raw_spin_rq_unlock(rq); 1657 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1658 } 1659 1660 static inline void 1661 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1662 __acquires(rq->lock) 1663 { 1664 raw_spin_rq_lock_irqsave(rq, rf->flags); 1665 rq_pin_lock(rq, rf); 1666 } 1667 1668 static inline void 1669 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1670 __acquires(rq->lock) 1671 { 1672 raw_spin_rq_lock_irq(rq); 1673 rq_pin_lock(rq, rf); 1674 } 1675 1676 static inline void 1677 rq_lock(struct rq *rq, struct rq_flags *rf) 1678 __acquires(rq->lock) 1679 { 1680 raw_spin_rq_lock(rq); 1681 rq_pin_lock(rq, rf); 1682 } 1683 1684 static inline void 1685 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1686 __releases(rq->lock) 1687 { 1688 rq_unpin_lock(rq, rf); 1689 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1690 } 1691 1692 static inline void 1693 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1694 __releases(rq->lock) 1695 { 1696 rq_unpin_lock(rq, rf); 1697 raw_spin_rq_unlock_irq(rq); 1698 } 1699 1700 static inline void 1701 rq_unlock(struct rq *rq, struct rq_flags *rf) 1702 __releases(rq->lock) 1703 { 1704 rq_unpin_lock(rq, rf); 1705 raw_spin_rq_unlock(rq); 1706 } 1707 1708 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1709 rq_lock(_T->lock, &_T->rf), 1710 rq_unlock(_T->lock, &_T->rf), 1711 struct rq_flags rf) 1712 1713 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, 1714 rq_lock_irq(_T->lock, &_T->rf), 1715 rq_unlock_irq(_T->lock, &_T->rf), 1716 struct rq_flags rf) 1717 1718 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, 1719 rq_lock_irqsave(_T->lock, &_T->rf), 1720 rq_unlock_irqrestore(_T->lock, &_T->rf), 1721 struct rq_flags rf) 1722 1723 static inline struct rq * 1724 this_rq_lock_irq(struct rq_flags *rf) 1725 __acquires(rq->lock) 1726 { 1727 struct rq *rq; 1728 1729 local_irq_disable(); 1730 rq = this_rq(); 1731 rq_lock(rq, rf); 1732 return rq; 1733 } 1734 1735 #ifdef CONFIG_NUMA 1736 enum numa_topology_type { 1737 NUMA_DIRECT, 1738 NUMA_GLUELESS_MESH, 1739 NUMA_BACKPLANE, 1740 }; 1741 extern enum numa_topology_type sched_numa_topology_type; 1742 extern int sched_max_numa_distance; 1743 extern bool find_numa_distance(int distance); 1744 extern void sched_init_numa(int offline_node); 1745 extern void sched_update_numa(int cpu, bool online); 1746 extern void sched_domains_numa_masks_set(unsigned int cpu); 1747 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1748 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1749 #else 1750 static inline void sched_init_numa(int offline_node) { } 1751 static inline void sched_update_numa(int cpu, bool online) { } 1752 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1753 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1754 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1755 { 1756 return nr_cpu_ids; 1757 } 1758 #endif 1759 1760 #ifdef CONFIG_NUMA_BALANCING 1761 /* The regions in numa_faults array from task_struct */ 1762 enum numa_faults_stats { 1763 NUMA_MEM = 0, 1764 NUMA_CPU, 1765 NUMA_MEMBUF, 1766 NUMA_CPUBUF 1767 }; 1768 extern void sched_setnuma(struct task_struct *p, int node); 1769 extern int migrate_task_to(struct task_struct *p, int cpu); 1770 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1771 int cpu, int scpu); 1772 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1773 #else 1774 static inline void 1775 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1776 { 1777 } 1778 #endif /* CONFIG_NUMA_BALANCING */ 1779 1780 #ifdef CONFIG_SMP 1781 1782 static inline void 1783 queue_balance_callback(struct rq *rq, 1784 struct balance_callback *head, 1785 void (*func)(struct rq *rq)) 1786 { 1787 lockdep_assert_rq_held(rq); 1788 1789 /* 1790 * Don't (re)queue an already queued item; nor queue anything when 1791 * balance_push() is active, see the comment with 1792 * balance_push_callback. 1793 */ 1794 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1795 return; 1796 1797 head->func = func; 1798 head->next = rq->balance_callback; 1799 rq->balance_callback = head; 1800 } 1801 1802 #define rcu_dereference_check_sched_domain(p) \ 1803 rcu_dereference_check((p), \ 1804 lockdep_is_held(&sched_domains_mutex)) 1805 1806 /* 1807 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1808 * See destroy_sched_domains: call_rcu for details. 1809 * 1810 * The domain tree of any CPU may only be accessed from within 1811 * preempt-disabled sections. 1812 */ 1813 #define for_each_domain(cpu, __sd) \ 1814 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1815 __sd; __sd = __sd->parent) 1816 1817 /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ 1818 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | 1819 static const unsigned int SD_SHARED_CHILD_MASK = 1820 #include <linux/sched/sd_flags.h> 1821 0; 1822 #undef SD_FLAG 1823 1824 /** 1825 * highest_flag_domain - Return highest sched_domain containing flag. 1826 * @cpu: The CPU whose highest level of sched domain is to 1827 * be returned. 1828 * @flag: The flag to check for the highest sched_domain 1829 * for the given CPU. 1830 * 1831 * Returns the highest sched_domain of a CPU which contains @flag. If @flag has 1832 * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. 1833 */ 1834 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1835 { 1836 struct sched_domain *sd, *hsd = NULL; 1837 1838 for_each_domain(cpu, sd) { 1839 if (sd->flags & flag) { 1840 hsd = sd; 1841 continue; 1842 } 1843 1844 /* 1845 * Stop the search if @flag is known to be shared at lower 1846 * levels. It will not be found further up. 1847 */ 1848 if (flag & SD_SHARED_CHILD_MASK) 1849 break; 1850 } 1851 1852 return hsd; 1853 } 1854 1855 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1856 { 1857 struct sched_domain *sd; 1858 1859 for_each_domain(cpu, sd) { 1860 if (sd->flags & flag) 1861 break; 1862 } 1863 1864 return sd; 1865 } 1866 1867 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1868 DECLARE_PER_CPU(int, sd_llc_size); 1869 DECLARE_PER_CPU(int, sd_llc_id); 1870 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1871 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1872 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1873 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1874 extern struct static_key_false sched_asym_cpucapacity; 1875 1876 static __always_inline bool sched_asym_cpucap_active(void) 1877 { 1878 return static_branch_unlikely(&sched_asym_cpucapacity); 1879 } 1880 1881 struct sched_group_capacity { 1882 atomic_t ref; 1883 /* 1884 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1885 * for a single CPU. 1886 */ 1887 unsigned long capacity; 1888 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1889 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1890 unsigned long next_update; 1891 int imbalance; /* XXX unrelated to capacity but shared group state */ 1892 1893 #ifdef CONFIG_SCHED_DEBUG 1894 int id; 1895 #endif 1896 1897 unsigned long cpumask[]; /* Balance mask */ 1898 }; 1899 1900 struct sched_group { 1901 struct sched_group *next; /* Must be a circular list */ 1902 atomic_t ref; 1903 1904 unsigned int group_weight; 1905 unsigned int cores; 1906 struct sched_group_capacity *sgc; 1907 int asym_prefer_cpu; /* CPU of highest priority in group */ 1908 int flags; 1909 1910 /* 1911 * The CPUs this group covers. 1912 * 1913 * NOTE: this field is variable length. (Allocated dynamically 1914 * by attaching extra space to the end of the structure, 1915 * depending on how many CPUs the kernel has booted up with) 1916 */ 1917 unsigned long cpumask[]; 1918 }; 1919 1920 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1921 { 1922 return to_cpumask(sg->cpumask); 1923 } 1924 1925 /* 1926 * See build_balance_mask(). 1927 */ 1928 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1929 { 1930 return to_cpumask(sg->sgc->cpumask); 1931 } 1932 1933 extern int group_balance_cpu(struct sched_group *sg); 1934 1935 #ifdef CONFIG_SCHED_DEBUG 1936 void update_sched_domain_debugfs(void); 1937 void dirty_sched_domain_sysctl(int cpu); 1938 #else 1939 static inline void update_sched_domain_debugfs(void) 1940 { 1941 } 1942 static inline void dirty_sched_domain_sysctl(int cpu) 1943 { 1944 } 1945 #endif 1946 1947 extern int sched_update_scaling(void); 1948 1949 static inline const struct cpumask *task_user_cpus(struct task_struct *p) 1950 { 1951 if (!p->user_cpus_ptr) 1952 return cpu_possible_mask; /* &init_task.cpus_mask */ 1953 return p->user_cpus_ptr; 1954 } 1955 #endif /* CONFIG_SMP */ 1956 1957 #include "stats.h" 1958 1959 #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) 1960 1961 extern void __sched_core_account_forceidle(struct rq *rq); 1962 1963 static inline void sched_core_account_forceidle(struct rq *rq) 1964 { 1965 if (schedstat_enabled()) 1966 __sched_core_account_forceidle(rq); 1967 } 1968 1969 extern void __sched_core_tick(struct rq *rq); 1970 1971 static inline void sched_core_tick(struct rq *rq) 1972 { 1973 if (sched_core_enabled(rq) && schedstat_enabled()) 1974 __sched_core_tick(rq); 1975 } 1976 1977 #else 1978 1979 static inline void sched_core_account_forceidle(struct rq *rq) {} 1980 1981 static inline void sched_core_tick(struct rq *rq) {} 1982 1983 #endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */ 1984 1985 #ifdef CONFIG_CGROUP_SCHED 1986 1987 /* 1988 * Return the group to which this tasks belongs. 1989 * 1990 * We cannot use task_css() and friends because the cgroup subsystem 1991 * changes that value before the cgroup_subsys::attach() method is called, 1992 * therefore we cannot pin it and might observe the wrong value. 1993 * 1994 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1995 * core changes this before calling sched_move_task(). 1996 * 1997 * Instead we use a 'copy' which is updated from sched_move_task() while 1998 * holding both task_struct::pi_lock and rq::lock. 1999 */ 2000 static inline struct task_group *task_group(struct task_struct *p) 2001 { 2002 return p->sched_task_group; 2003 } 2004 2005 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 2006 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 2007 { 2008 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 2009 struct task_group *tg = task_group(p); 2010 #endif 2011 2012 #ifdef CONFIG_FAIR_GROUP_SCHED 2013 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 2014 p->se.cfs_rq = tg->cfs_rq[cpu]; 2015 p->se.parent = tg->se[cpu]; 2016 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; 2017 #endif 2018 2019 #ifdef CONFIG_RT_GROUP_SCHED 2020 p->rt.rt_rq = tg->rt_rq[cpu]; 2021 p->rt.parent = tg->rt_se[cpu]; 2022 #endif 2023 } 2024 2025 #else /* CONFIG_CGROUP_SCHED */ 2026 2027 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 2028 static inline struct task_group *task_group(struct task_struct *p) 2029 { 2030 return NULL; 2031 } 2032 2033 #endif /* CONFIG_CGROUP_SCHED */ 2034 2035 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 2036 { 2037 set_task_rq(p, cpu); 2038 #ifdef CONFIG_SMP 2039 /* 2040 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 2041 * successfully executed on another CPU. We must ensure that updates of 2042 * per-task data have been completed by this moment. 2043 */ 2044 smp_wmb(); 2045 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 2046 p->wake_cpu = cpu; 2047 #endif 2048 } 2049 2050 /* 2051 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 2052 */ 2053 #ifdef CONFIG_SCHED_DEBUG 2054 # define const_debug __read_mostly 2055 #else 2056 # define const_debug const 2057 #endif 2058 2059 #define SCHED_FEAT(name, enabled) \ 2060 __SCHED_FEAT_##name , 2061 2062 enum { 2063 #include "features.h" 2064 __SCHED_FEAT_NR, 2065 }; 2066 2067 #undef SCHED_FEAT 2068 2069 #ifdef CONFIG_SCHED_DEBUG 2070 2071 /* 2072 * To support run-time toggling of sched features, all the translation units 2073 * (but core.c) reference the sysctl_sched_features defined in core.c. 2074 */ 2075 extern const_debug unsigned int sysctl_sched_features; 2076 2077 #ifdef CONFIG_JUMP_LABEL 2078 #define SCHED_FEAT(name, enabled) \ 2079 static __always_inline bool static_branch_##name(struct static_key *key) \ 2080 { \ 2081 return static_key_##enabled(key); \ 2082 } 2083 2084 #include "features.h" 2085 #undef SCHED_FEAT 2086 2087 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 2088 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 2089 2090 #else /* !CONFIG_JUMP_LABEL */ 2091 2092 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2093 2094 #endif /* CONFIG_JUMP_LABEL */ 2095 2096 #else /* !SCHED_DEBUG */ 2097 2098 /* 2099 * Each translation unit has its own copy of sysctl_sched_features to allow 2100 * constants propagation at compile time and compiler optimization based on 2101 * features default. 2102 */ 2103 #define SCHED_FEAT(name, enabled) \ 2104 (1UL << __SCHED_FEAT_##name) * enabled | 2105 static const_debug __maybe_unused unsigned int sysctl_sched_features = 2106 #include "features.h" 2107 0; 2108 #undef SCHED_FEAT 2109 2110 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2111 2112 #endif /* SCHED_DEBUG */ 2113 2114 extern struct static_key_false sched_numa_balancing; 2115 extern struct static_key_false sched_schedstats; 2116 2117 static inline u64 global_rt_period(void) 2118 { 2119 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 2120 } 2121 2122 static inline u64 global_rt_runtime(void) 2123 { 2124 if (sysctl_sched_rt_runtime < 0) 2125 return RUNTIME_INF; 2126 2127 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2128 } 2129 2130 static inline int task_current(struct rq *rq, struct task_struct *p) 2131 { 2132 return rq->curr == p; 2133 } 2134 2135 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) 2136 { 2137 #ifdef CONFIG_SMP 2138 return p->on_cpu; 2139 #else 2140 return task_current(rq, p); 2141 #endif 2142 } 2143 2144 static inline int task_on_rq_queued(struct task_struct *p) 2145 { 2146 return p->on_rq == TASK_ON_RQ_QUEUED; 2147 } 2148 2149 static inline int task_on_rq_migrating(struct task_struct *p) 2150 { 2151 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2152 } 2153 2154 /* Wake flags. The first three directly map to some SD flag value */ 2155 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2156 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2157 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2158 2159 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2160 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2161 2162 #ifdef CONFIG_SMP 2163 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2164 static_assert(WF_FORK == SD_BALANCE_FORK); 2165 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2166 #endif 2167 2168 /* 2169 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2170 * of tasks with abnormal "nice" values across CPUs the contribution that 2171 * each task makes to its run queue's load is weighted according to its 2172 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2173 * scaled version of the new time slice allocation that they receive on time 2174 * slice expiry etc. 2175 */ 2176 2177 #define WEIGHT_IDLEPRIO 3 2178 #define WMULT_IDLEPRIO 1431655765 2179 2180 extern const int sched_prio_to_weight[40]; 2181 extern const u32 sched_prio_to_wmult[40]; 2182 2183 /* 2184 * {de,en}queue flags: 2185 * 2186 * DEQUEUE_SLEEP - task is no longer runnable 2187 * ENQUEUE_WAKEUP - task just became runnable 2188 * 2189 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2190 * are in a known state which allows modification. Such pairs 2191 * should preserve as much state as possible. 2192 * 2193 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2194 * in the runqueue. 2195 * 2196 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2197 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2198 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2199 * 2200 */ 2201 2202 #define DEQUEUE_SLEEP 0x01 2203 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2204 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2205 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2206 2207 #define ENQUEUE_WAKEUP 0x01 2208 #define ENQUEUE_RESTORE 0x02 2209 #define ENQUEUE_MOVE 0x04 2210 #define ENQUEUE_NOCLOCK 0x08 2211 2212 #define ENQUEUE_HEAD 0x10 2213 #define ENQUEUE_REPLENISH 0x20 2214 #ifdef CONFIG_SMP 2215 #define ENQUEUE_MIGRATED 0x40 2216 #else 2217 #define ENQUEUE_MIGRATED 0x00 2218 #endif 2219 #define ENQUEUE_INITIAL 0x80 2220 2221 #define RETRY_TASK ((void *)-1UL) 2222 2223 struct affinity_context { 2224 const struct cpumask *new_mask; 2225 struct cpumask *user_mask; 2226 unsigned int flags; 2227 }; 2228 2229 struct sched_class { 2230 2231 #ifdef CONFIG_UCLAMP_TASK 2232 int uclamp_enabled; 2233 #endif 2234 2235 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2236 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2237 void (*yield_task) (struct rq *rq); 2238 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2239 2240 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 2241 2242 struct task_struct *(*pick_next_task)(struct rq *rq); 2243 2244 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 2245 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2246 2247 #ifdef CONFIG_SMP 2248 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2249 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2250 2251 struct task_struct * (*pick_task)(struct rq *rq); 2252 2253 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2254 2255 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2256 2257 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); 2258 2259 void (*rq_online)(struct rq *rq); 2260 void (*rq_offline)(struct rq *rq); 2261 2262 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2263 #endif 2264 2265 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2266 void (*task_fork)(struct task_struct *p); 2267 void (*task_dead)(struct task_struct *p); 2268 2269 /* 2270 * The switched_from() call is allowed to drop rq->lock, therefore we 2271 * cannot assume the switched_from/switched_to pair is serialized by 2272 * rq->lock. They are however serialized by p->pi_lock. 2273 */ 2274 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2275 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2276 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2277 int oldprio); 2278 2279 unsigned int (*get_rr_interval)(struct rq *rq, 2280 struct task_struct *task); 2281 2282 void (*update_curr)(struct rq *rq); 2283 2284 #ifdef CONFIG_FAIR_GROUP_SCHED 2285 void (*task_change_group)(struct task_struct *p); 2286 #endif 2287 2288 #ifdef CONFIG_SCHED_CORE 2289 int (*task_is_throttled)(struct task_struct *p, int cpu); 2290 #endif 2291 }; 2292 2293 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2294 { 2295 WARN_ON_ONCE(rq->curr != prev); 2296 prev->sched_class->put_prev_task(rq, prev); 2297 } 2298 2299 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2300 { 2301 next->sched_class->set_next_task(rq, next, false); 2302 } 2303 2304 2305 /* 2306 * Helper to define a sched_class instance; each one is placed in a separate 2307 * section which is ordered by the linker script: 2308 * 2309 * include/asm-generic/vmlinux.lds.h 2310 * 2311 * *CAREFUL* they are laid out in *REVERSE* order!!! 2312 * 2313 * Also enforce alignment on the instance, not the type, to guarantee layout. 2314 */ 2315 #define DEFINE_SCHED_CLASS(name) \ 2316 const struct sched_class name##_sched_class \ 2317 __aligned(__alignof__(struct sched_class)) \ 2318 __section("__" #name "_sched_class") 2319 2320 /* Defined in include/asm-generic/vmlinux.lds.h */ 2321 extern struct sched_class __sched_class_highest[]; 2322 extern struct sched_class __sched_class_lowest[]; 2323 2324 #define for_class_range(class, _from, _to) \ 2325 for (class = (_from); class < (_to); class++) 2326 2327 #define for_each_class(class) \ 2328 for_class_range(class, __sched_class_highest, __sched_class_lowest) 2329 2330 #define sched_class_above(_a, _b) ((_a) < (_b)) 2331 2332 extern const struct sched_class stop_sched_class; 2333 extern const struct sched_class dl_sched_class; 2334 extern const struct sched_class rt_sched_class; 2335 extern const struct sched_class fair_sched_class; 2336 extern const struct sched_class idle_sched_class; 2337 2338 static inline bool sched_stop_runnable(struct rq *rq) 2339 { 2340 return rq->stop && task_on_rq_queued(rq->stop); 2341 } 2342 2343 static inline bool sched_dl_runnable(struct rq *rq) 2344 { 2345 return rq->dl.dl_nr_running > 0; 2346 } 2347 2348 static inline bool sched_rt_runnable(struct rq *rq) 2349 { 2350 return rq->rt.rt_queued > 0; 2351 } 2352 2353 static inline bool sched_fair_runnable(struct rq *rq) 2354 { 2355 return rq->cfs.nr_running > 0; 2356 } 2357 2358 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2359 extern struct task_struct *pick_next_task_idle(struct rq *rq); 2360 2361 #define SCA_CHECK 0x01 2362 #define SCA_MIGRATE_DISABLE 0x02 2363 #define SCA_MIGRATE_ENABLE 0x04 2364 #define SCA_USER 0x08 2365 2366 #ifdef CONFIG_SMP 2367 2368 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2369 2370 extern void trigger_load_balance(struct rq *rq); 2371 2372 extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); 2373 2374 static inline struct task_struct *get_push_task(struct rq *rq) 2375 { 2376 struct task_struct *p = rq->curr; 2377 2378 lockdep_assert_rq_held(rq); 2379 2380 if (rq->push_busy) 2381 return NULL; 2382 2383 if (p->nr_cpus_allowed == 1) 2384 return NULL; 2385 2386 if (p->migration_disabled) 2387 return NULL; 2388 2389 rq->push_busy = true; 2390 return get_task_struct(p); 2391 } 2392 2393 extern int push_cpu_stop(void *arg); 2394 2395 #endif 2396 2397 #ifdef CONFIG_CPU_IDLE 2398 static inline void idle_set_state(struct rq *rq, 2399 struct cpuidle_state *idle_state) 2400 { 2401 rq->idle_state = idle_state; 2402 } 2403 2404 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2405 { 2406 SCHED_WARN_ON(!rcu_read_lock_held()); 2407 2408 return rq->idle_state; 2409 } 2410 #else 2411 static inline void idle_set_state(struct rq *rq, 2412 struct cpuidle_state *idle_state) 2413 { 2414 } 2415 2416 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2417 { 2418 return NULL; 2419 } 2420 #endif 2421 2422 extern void schedule_idle(void); 2423 2424 extern void sysrq_sched_debug_show(void); 2425 extern void sched_init_granularity(void); 2426 extern void update_max_interval(void); 2427 2428 extern void init_sched_dl_class(void); 2429 extern void init_sched_rt_class(void); 2430 extern void init_sched_fair_class(void); 2431 2432 extern void reweight_task(struct task_struct *p, int prio); 2433 2434 extern void resched_curr(struct rq *rq); 2435 extern void resched_cpu(int cpu); 2436 2437 extern struct rt_bandwidth def_rt_bandwidth; 2438 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2439 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 2440 2441 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 2442 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 2443 2444 #define BW_SHIFT 20 2445 #define BW_UNIT (1 << BW_SHIFT) 2446 #define RATIO_SHIFT 8 2447 #define MAX_BW_BITS (64 - BW_SHIFT) 2448 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2449 unsigned long to_ratio(u64 period, u64 runtime); 2450 2451 extern void init_entity_runnable_average(struct sched_entity *se); 2452 extern void post_init_entity_util_avg(struct task_struct *p); 2453 2454 #ifdef CONFIG_NO_HZ_FULL 2455 extern bool sched_can_stop_tick(struct rq *rq); 2456 extern int __init sched_tick_offload_init(void); 2457 2458 /* 2459 * Tick may be needed by tasks in the runqueue depending on their policy and 2460 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2461 * nohz mode if necessary. 2462 */ 2463 static inline void sched_update_tick_dependency(struct rq *rq) 2464 { 2465 int cpu = cpu_of(rq); 2466 2467 if (!tick_nohz_full_cpu(cpu)) 2468 return; 2469 2470 if (sched_can_stop_tick(rq)) 2471 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2472 else 2473 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2474 } 2475 #else 2476 static inline int sched_tick_offload_init(void) { return 0; } 2477 static inline void sched_update_tick_dependency(struct rq *rq) { } 2478 #endif 2479 2480 static inline void add_nr_running(struct rq *rq, unsigned count) 2481 { 2482 unsigned prev_nr = rq->nr_running; 2483 2484 rq->nr_running = prev_nr + count; 2485 if (trace_sched_update_nr_running_tp_enabled()) { 2486 call_trace_sched_update_nr_running(rq, count); 2487 } 2488 2489 #ifdef CONFIG_SMP 2490 if (prev_nr < 2 && rq->nr_running >= 2) { 2491 if (!READ_ONCE(rq->rd->overload)) 2492 WRITE_ONCE(rq->rd->overload, 1); 2493 } 2494 #endif 2495 2496 sched_update_tick_dependency(rq); 2497 } 2498 2499 static inline void sub_nr_running(struct rq *rq, unsigned count) 2500 { 2501 rq->nr_running -= count; 2502 if (trace_sched_update_nr_running_tp_enabled()) { 2503 call_trace_sched_update_nr_running(rq, -count); 2504 } 2505 2506 /* Check if we still need preemption */ 2507 sched_update_tick_dependency(rq); 2508 } 2509 2510 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2511 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2512 2513 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2514 2515 #ifdef CONFIG_PREEMPT_RT 2516 #define SCHED_NR_MIGRATE_BREAK 8 2517 #else 2518 #define SCHED_NR_MIGRATE_BREAK 32 2519 #endif 2520 2521 extern const_debug unsigned int sysctl_sched_nr_migrate; 2522 extern const_debug unsigned int sysctl_sched_migration_cost; 2523 2524 extern unsigned int sysctl_sched_base_slice; 2525 2526 #ifdef CONFIG_SCHED_DEBUG 2527 extern int sysctl_resched_latency_warn_ms; 2528 extern int sysctl_resched_latency_warn_once; 2529 2530 extern unsigned int sysctl_sched_tunable_scaling; 2531 2532 extern unsigned int sysctl_numa_balancing_scan_delay; 2533 extern unsigned int sysctl_numa_balancing_scan_period_min; 2534 extern unsigned int sysctl_numa_balancing_scan_period_max; 2535 extern unsigned int sysctl_numa_balancing_scan_size; 2536 extern unsigned int sysctl_numa_balancing_hot_threshold; 2537 #endif 2538 2539 #ifdef CONFIG_SCHED_HRTICK 2540 2541 /* 2542 * Use hrtick when: 2543 * - enabled by features 2544 * - hrtimer is actually high res 2545 */ 2546 static inline int hrtick_enabled(struct rq *rq) 2547 { 2548 if (!cpu_active(cpu_of(rq))) 2549 return 0; 2550 return hrtimer_is_hres_active(&rq->hrtick_timer); 2551 } 2552 2553 static inline int hrtick_enabled_fair(struct rq *rq) 2554 { 2555 if (!sched_feat(HRTICK)) 2556 return 0; 2557 return hrtick_enabled(rq); 2558 } 2559 2560 static inline int hrtick_enabled_dl(struct rq *rq) 2561 { 2562 if (!sched_feat(HRTICK_DL)) 2563 return 0; 2564 return hrtick_enabled(rq); 2565 } 2566 2567 void hrtick_start(struct rq *rq, u64 delay); 2568 2569 #else 2570 2571 static inline int hrtick_enabled_fair(struct rq *rq) 2572 { 2573 return 0; 2574 } 2575 2576 static inline int hrtick_enabled_dl(struct rq *rq) 2577 { 2578 return 0; 2579 } 2580 2581 static inline int hrtick_enabled(struct rq *rq) 2582 { 2583 return 0; 2584 } 2585 2586 #endif /* CONFIG_SCHED_HRTICK */ 2587 2588 #ifndef arch_scale_freq_tick 2589 static __always_inline 2590 void arch_scale_freq_tick(void) 2591 { 2592 } 2593 #endif 2594 2595 #ifndef arch_scale_freq_capacity 2596 /** 2597 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2598 * @cpu: the CPU in question. 2599 * 2600 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2601 * 2602 * f_curr 2603 * ------ * SCHED_CAPACITY_SCALE 2604 * f_max 2605 */ 2606 static __always_inline 2607 unsigned long arch_scale_freq_capacity(int cpu) 2608 { 2609 return SCHED_CAPACITY_SCALE; 2610 } 2611 #endif 2612 2613 #ifdef CONFIG_SCHED_DEBUG 2614 /* 2615 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to 2616 * acquire rq lock instead of rq_lock(). So at the end of these two functions 2617 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of 2618 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. 2619 */ 2620 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) 2621 { 2622 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2623 /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ 2624 #ifdef CONFIG_SMP 2625 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2626 #endif 2627 } 2628 #else 2629 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {} 2630 #endif 2631 2632 #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ 2633 __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ 2634 static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ 2635 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ 2636 _lock; return _t; } 2637 2638 #ifdef CONFIG_SMP 2639 2640 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2641 { 2642 #ifdef CONFIG_SCHED_CORE 2643 /* 2644 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2645 * order by core-id first and cpu-id second. 2646 * 2647 * Notably: 2648 * 2649 * double_rq_lock(0,3); will take core-0, core-1 lock 2650 * double_rq_lock(1,2); will take core-1, core-0 lock 2651 * 2652 * when only cpu-id is considered. 2653 */ 2654 if (rq1->core->cpu < rq2->core->cpu) 2655 return true; 2656 if (rq1->core->cpu > rq2->core->cpu) 2657 return false; 2658 2659 /* 2660 * __sched_core_flip() relies on SMT having cpu-id lock order. 2661 */ 2662 #endif 2663 return rq1->cpu < rq2->cpu; 2664 } 2665 2666 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2667 2668 #ifdef CONFIG_PREEMPTION 2669 2670 /* 2671 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2672 * way at the expense of forcing extra atomic operations in all 2673 * invocations. This assures that the double_lock is acquired using the 2674 * same underlying policy as the spinlock_t on this architecture, which 2675 * reduces latency compared to the unfair variant below. However, it 2676 * also adds more overhead and therefore may reduce throughput. 2677 */ 2678 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2679 __releases(this_rq->lock) 2680 __acquires(busiest->lock) 2681 __acquires(this_rq->lock) 2682 { 2683 raw_spin_rq_unlock(this_rq); 2684 double_rq_lock(this_rq, busiest); 2685 2686 return 1; 2687 } 2688 2689 #else 2690 /* 2691 * Unfair double_lock_balance: Optimizes throughput at the expense of 2692 * latency by eliminating extra atomic operations when the locks are 2693 * already in proper order on entry. This favors lower CPU-ids and will 2694 * grant the double lock to lower CPUs over higher ids under contention, 2695 * regardless of entry order into the function. 2696 */ 2697 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2698 __releases(this_rq->lock) 2699 __acquires(busiest->lock) 2700 __acquires(this_rq->lock) 2701 { 2702 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 2703 likely(raw_spin_rq_trylock(busiest))) { 2704 double_rq_clock_clear_update(this_rq, busiest); 2705 return 0; 2706 } 2707 2708 if (rq_order_less(this_rq, busiest)) { 2709 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2710 double_rq_clock_clear_update(this_rq, busiest); 2711 return 0; 2712 } 2713 2714 raw_spin_rq_unlock(this_rq); 2715 double_rq_lock(this_rq, busiest); 2716 2717 return 1; 2718 } 2719 2720 #endif /* CONFIG_PREEMPTION */ 2721 2722 /* 2723 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2724 */ 2725 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2726 { 2727 lockdep_assert_irqs_disabled(); 2728 2729 return _double_lock_balance(this_rq, busiest); 2730 } 2731 2732 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2733 __releases(busiest->lock) 2734 { 2735 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2736 raw_spin_rq_unlock(busiest); 2737 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2738 } 2739 2740 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2741 { 2742 if (l1 > l2) 2743 swap(l1, l2); 2744 2745 spin_lock(l1); 2746 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2747 } 2748 2749 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2750 { 2751 if (l1 > l2) 2752 swap(l1, l2); 2753 2754 spin_lock_irq(l1); 2755 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2756 } 2757 2758 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2759 { 2760 if (l1 > l2) 2761 swap(l1, l2); 2762 2763 raw_spin_lock(l1); 2764 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2765 } 2766 2767 static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2768 { 2769 raw_spin_unlock(l1); 2770 raw_spin_unlock(l2); 2771 } 2772 2773 DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t, 2774 double_raw_lock(_T->lock, _T->lock2), 2775 double_raw_unlock(_T->lock, _T->lock2)) 2776 2777 /* 2778 * double_rq_unlock - safely unlock two runqueues 2779 * 2780 * Note this does not restore interrupts like task_rq_unlock, 2781 * you need to do so manually after calling. 2782 */ 2783 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2784 __releases(rq1->lock) 2785 __releases(rq2->lock) 2786 { 2787 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2788 raw_spin_rq_unlock(rq2); 2789 else 2790 __release(rq2->lock); 2791 raw_spin_rq_unlock(rq1); 2792 } 2793 2794 extern void set_rq_online (struct rq *rq); 2795 extern void set_rq_offline(struct rq *rq); 2796 extern bool sched_smp_initialized; 2797 2798 #else /* CONFIG_SMP */ 2799 2800 /* 2801 * double_rq_lock - safely lock two runqueues 2802 * 2803 * Note this does not disable interrupts like task_rq_lock, 2804 * you need to do so manually before calling. 2805 */ 2806 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2807 __acquires(rq1->lock) 2808 __acquires(rq2->lock) 2809 { 2810 WARN_ON_ONCE(!irqs_disabled()); 2811 WARN_ON_ONCE(rq1 != rq2); 2812 raw_spin_rq_lock(rq1); 2813 __acquire(rq2->lock); /* Fake it out ;) */ 2814 double_rq_clock_clear_update(rq1, rq2); 2815 } 2816 2817 /* 2818 * double_rq_unlock - safely unlock two runqueues 2819 * 2820 * Note this does not restore interrupts like task_rq_unlock, 2821 * you need to do so manually after calling. 2822 */ 2823 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2824 __releases(rq1->lock) 2825 __releases(rq2->lock) 2826 { 2827 WARN_ON_ONCE(rq1 != rq2); 2828 raw_spin_rq_unlock(rq1); 2829 __release(rq2->lock); 2830 } 2831 2832 #endif 2833 2834 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, 2835 double_rq_lock(_T->lock, _T->lock2), 2836 double_rq_unlock(_T->lock, _T->lock2)) 2837 2838 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2839 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2840 2841 #ifdef CONFIG_SCHED_DEBUG 2842 extern bool sched_debug_verbose; 2843 2844 extern void print_cfs_stats(struct seq_file *m, int cpu); 2845 extern void print_rt_stats(struct seq_file *m, int cpu); 2846 extern void print_dl_stats(struct seq_file *m, int cpu); 2847 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2848 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2849 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2850 2851 extern void resched_latency_warn(int cpu, u64 latency); 2852 #ifdef CONFIG_NUMA_BALANCING 2853 extern void 2854 show_numa_stats(struct task_struct *p, struct seq_file *m); 2855 extern void 2856 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2857 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2858 #endif /* CONFIG_NUMA_BALANCING */ 2859 #else 2860 static inline void resched_latency_warn(int cpu, u64 latency) {} 2861 #endif /* CONFIG_SCHED_DEBUG */ 2862 2863 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2864 extern void init_rt_rq(struct rt_rq *rt_rq); 2865 extern void init_dl_rq(struct dl_rq *dl_rq); 2866 2867 extern void cfs_bandwidth_usage_inc(void); 2868 extern void cfs_bandwidth_usage_dec(void); 2869 2870 #ifdef CONFIG_NO_HZ_COMMON 2871 #define NOHZ_BALANCE_KICK_BIT 0 2872 #define NOHZ_STATS_KICK_BIT 1 2873 #define NOHZ_NEWILB_KICK_BIT 2 2874 #define NOHZ_NEXT_KICK_BIT 3 2875 2876 /* Run rebalance_domains() */ 2877 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2878 /* Update blocked load */ 2879 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2880 /* Update blocked load when entering idle */ 2881 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 2882 /* Update nohz.next_balance */ 2883 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 2884 2885 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 2886 2887 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2888 2889 extern void nohz_balance_exit_idle(struct rq *rq); 2890 #else 2891 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2892 #endif 2893 2894 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 2895 extern void nohz_run_idle_balance(int cpu); 2896 #else 2897 static inline void nohz_run_idle_balance(int cpu) { } 2898 #endif 2899 2900 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2901 struct irqtime { 2902 u64 total; 2903 u64 tick_delta; 2904 u64 irq_start_time; 2905 struct u64_stats_sync sync; 2906 }; 2907 2908 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2909 2910 /* 2911 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2912 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 2913 * and never move forward. 2914 */ 2915 static inline u64 irq_time_read(int cpu) 2916 { 2917 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2918 unsigned int seq; 2919 u64 total; 2920 2921 do { 2922 seq = __u64_stats_fetch_begin(&irqtime->sync); 2923 total = irqtime->total; 2924 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2925 2926 return total; 2927 } 2928 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2929 2930 #ifdef CONFIG_CPU_FREQ 2931 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2932 2933 /** 2934 * cpufreq_update_util - Take a note about CPU utilization changes. 2935 * @rq: Runqueue to carry out the update for. 2936 * @flags: Update reason flags. 2937 * 2938 * This function is called by the scheduler on the CPU whose utilization is 2939 * being updated. 2940 * 2941 * It can only be called from RCU-sched read-side critical sections. 2942 * 2943 * The way cpufreq is currently arranged requires it to evaluate the CPU 2944 * performance state (frequency/voltage) on a regular basis to prevent it from 2945 * being stuck in a completely inadequate performance level for too long. 2946 * That is not guaranteed to happen if the updates are only triggered from CFS 2947 * and DL, though, because they may not be coming in if only RT tasks are 2948 * active all the time (or there are RT tasks only). 2949 * 2950 * As a workaround for that issue, this function is called periodically by the 2951 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2952 * but that really is a band-aid. Going forward it should be replaced with 2953 * solutions targeted more specifically at RT tasks. 2954 */ 2955 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2956 { 2957 struct update_util_data *data; 2958 2959 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2960 cpu_of(rq))); 2961 if (data) 2962 data->func(data, rq_clock(rq), flags); 2963 } 2964 #else 2965 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2966 #endif /* CONFIG_CPU_FREQ */ 2967 2968 #ifdef arch_scale_freq_capacity 2969 # ifndef arch_scale_freq_invariant 2970 # define arch_scale_freq_invariant() true 2971 # endif 2972 #else 2973 # define arch_scale_freq_invariant() false 2974 #endif 2975 2976 #ifdef CONFIG_SMP 2977 static inline unsigned long capacity_orig_of(int cpu) 2978 { 2979 return cpu_rq(cpu)->cpu_capacity_orig; 2980 } 2981 2982 /** 2983 * enum cpu_util_type - CPU utilization type 2984 * @FREQUENCY_UTIL: Utilization used to select frequency 2985 * @ENERGY_UTIL: Utilization used during energy calculation 2986 * 2987 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2988 * need to be aggregated differently depending on the usage made of them. This 2989 * enum is used within effective_cpu_util() to differentiate the types of 2990 * utilization expected by the callers, and adjust the aggregation accordingly. 2991 */ 2992 enum cpu_util_type { 2993 FREQUENCY_UTIL, 2994 ENERGY_UTIL, 2995 }; 2996 2997 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 2998 enum cpu_util_type type, 2999 struct task_struct *p); 3000 3001 /* 3002 * Verify the fitness of task @p to run on @cpu taking into account the 3003 * CPU original capacity and the runtime/deadline ratio of the task. 3004 * 3005 * The function will return true if the original capacity of @cpu is 3006 * greater than or equal to task's deadline density right shifted by 3007 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. 3008 */ 3009 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 3010 { 3011 unsigned long cap = arch_scale_cpu_capacity(cpu); 3012 3013 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); 3014 } 3015 3016 static inline unsigned long cpu_bw_dl(struct rq *rq) 3017 { 3018 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 3019 } 3020 3021 static inline unsigned long cpu_util_dl(struct rq *rq) 3022 { 3023 return READ_ONCE(rq->avg_dl.util_avg); 3024 } 3025 3026 3027 extern unsigned long cpu_util_cfs(int cpu); 3028 extern unsigned long cpu_util_cfs_boost(int cpu); 3029 3030 static inline unsigned long cpu_util_rt(struct rq *rq) 3031 { 3032 return READ_ONCE(rq->avg_rt.util_avg); 3033 } 3034 #endif 3035 3036 #ifdef CONFIG_UCLAMP_TASK 3037 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 3038 3039 static inline unsigned long uclamp_rq_get(struct rq *rq, 3040 enum uclamp_id clamp_id) 3041 { 3042 return READ_ONCE(rq->uclamp[clamp_id].value); 3043 } 3044 3045 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3046 unsigned int value) 3047 { 3048 WRITE_ONCE(rq->uclamp[clamp_id].value, value); 3049 } 3050 3051 static inline bool uclamp_rq_is_idle(struct rq *rq) 3052 { 3053 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; 3054 } 3055 3056 /** 3057 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 3058 * @rq: The rq to clamp against. Must not be NULL. 3059 * @util: The util value to clamp. 3060 * @p: The task to clamp against. Can be NULL if you want to clamp 3061 * against @rq only. 3062 * 3063 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 3064 * 3065 * If sched_uclamp_used static key is disabled, then just return the util 3066 * without any clamping since uclamp aggregation at the rq level in the fast 3067 * path is disabled, rendering this operation a NOP. 3068 * 3069 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 3070 * will return the correct effective uclamp value of the task even if the 3071 * static key is disabled. 3072 */ 3073 static __always_inline 3074 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 3075 struct task_struct *p) 3076 { 3077 unsigned long min_util = 0; 3078 unsigned long max_util = 0; 3079 3080 if (!static_branch_likely(&sched_uclamp_used)) 3081 return util; 3082 3083 if (p) { 3084 min_util = uclamp_eff_value(p, UCLAMP_MIN); 3085 max_util = uclamp_eff_value(p, UCLAMP_MAX); 3086 3087 /* 3088 * Ignore last runnable task's max clamp, as this task will 3089 * reset it. Similarly, no need to read the rq's min clamp. 3090 */ 3091 if (uclamp_rq_is_idle(rq)) 3092 goto out; 3093 } 3094 3095 min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); 3096 max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); 3097 out: 3098 /* 3099 * Since CPU's {min,max}_util clamps are MAX aggregated considering 3100 * RUNNABLE tasks with _different_ clamps, we can end up with an 3101 * inversion. Fix it now when the clamps are applied. 3102 */ 3103 if (unlikely(min_util >= max_util)) 3104 return min_util; 3105 3106 return clamp(util, min_util, max_util); 3107 } 3108 3109 /* Is the rq being capped/throttled by uclamp_max? */ 3110 static inline bool uclamp_rq_is_capped(struct rq *rq) 3111 { 3112 unsigned long rq_util; 3113 unsigned long max_util; 3114 3115 if (!static_branch_likely(&sched_uclamp_used)) 3116 return false; 3117 3118 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); 3119 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 3120 3121 return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; 3122 } 3123 3124 /* 3125 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 3126 * by default in the fast path and only gets turned on once userspace performs 3127 * an operation that requires it. 3128 * 3129 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 3130 * hence is active. 3131 */ 3132 static inline bool uclamp_is_used(void) 3133 { 3134 return static_branch_likely(&sched_uclamp_used); 3135 } 3136 #else /* CONFIG_UCLAMP_TASK */ 3137 static inline unsigned long uclamp_eff_value(struct task_struct *p, 3138 enum uclamp_id clamp_id) 3139 { 3140 if (clamp_id == UCLAMP_MIN) 3141 return 0; 3142 3143 return SCHED_CAPACITY_SCALE; 3144 } 3145 3146 static inline 3147 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 3148 struct task_struct *p) 3149 { 3150 return util; 3151 } 3152 3153 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } 3154 3155 static inline bool uclamp_is_used(void) 3156 { 3157 return false; 3158 } 3159 3160 static inline unsigned long uclamp_rq_get(struct rq *rq, 3161 enum uclamp_id clamp_id) 3162 { 3163 if (clamp_id == UCLAMP_MIN) 3164 return 0; 3165 3166 return SCHED_CAPACITY_SCALE; 3167 } 3168 3169 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3170 unsigned int value) 3171 { 3172 } 3173 3174 static inline bool uclamp_rq_is_idle(struct rq *rq) 3175 { 3176 return false; 3177 } 3178 #endif /* CONFIG_UCLAMP_TASK */ 3179 3180 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 3181 static inline unsigned long cpu_util_irq(struct rq *rq) 3182 { 3183 return rq->avg_irq.util_avg; 3184 } 3185 3186 static inline 3187 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3188 { 3189 util *= (max - irq); 3190 util /= max; 3191 3192 return util; 3193 3194 } 3195 #else 3196 static inline unsigned long cpu_util_irq(struct rq *rq) 3197 { 3198 return 0; 3199 } 3200 3201 static inline 3202 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3203 { 3204 return util; 3205 } 3206 #endif 3207 3208 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 3209 3210 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 3211 3212 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 3213 3214 static inline bool sched_energy_enabled(void) 3215 { 3216 return static_branch_unlikely(&sched_energy_present); 3217 } 3218 3219 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 3220 3221 #define perf_domain_span(pd) NULL 3222 static inline bool sched_energy_enabled(void) { return false; } 3223 3224 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 3225 3226 #ifdef CONFIG_MEMBARRIER 3227 /* 3228 * The scheduler provides memory barriers required by membarrier between: 3229 * - prior user-space memory accesses and store to rq->membarrier_state, 3230 * - store to rq->membarrier_state and following user-space memory accesses. 3231 * In the same way it provides those guarantees around store to rq->curr. 3232 */ 3233 static inline void membarrier_switch_mm(struct rq *rq, 3234 struct mm_struct *prev_mm, 3235 struct mm_struct *next_mm) 3236 { 3237 int membarrier_state; 3238 3239 if (prev_mm == next_mm) 3240 return; 3241 3242 membarrier_state = atomic_read(&next_mm->membarrier_state); 3243 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3244 return; 3245 3246 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3247 } 3248 #else 3249 static inline void membarrier_switch_mm(struct rq *rq, 3250 struct mm_struct *prev_mm, 3251 struct mm_struct *next_mm) 3252 { 3253 } 3254 #endif 3255 3256 #ifdef CONFIG_SMP 3257 static inline bool is_per_cpu_kthread(struct task_struct *p) 3258 { 3259 if (!(p->flags & PF_KTHREAD)) 3260 return false; 3261 3262 if (p->nr_cpus_allowed != 1) 3263 return false; 3264 3265 return true; 3266 } 3267 #endif 3268 3269 extern void swake_up_all_locked(struct swait_queue_head *q); 3270 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3271 3272 #ifdef CONFIG_PREEMPT_DYNAMIC 3273 extern int preempt_dynamic_mode; 3274 extern int sched_dynamic_mode(const char *str); 3275 extern void sched_dynamic_update(int mode); 3276 #endif 3277 3278 static inline void update_current_exec_runtime(struct task_struct *curr, 3279 u64 now, u64 delta_exec) 3280 { 3281 curr->se.sum_exec_runtime += delta_exec; 3282 account_group_exec_runtime(curr, delta_exec); 3283 3284 curr->se.exec_start = now; 3285 cgroup_account_cputime(curr, delta_exec); 3286 } 3287 3288 #ifdef CONFIG_SCHED_MM_CID 3289 3290 #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ 3291 #define MM_CID_SCAN_DELAY 100 /* 100ms */ 3292 3293 extern raw_spinlock_t cid_lock; 3294 extern int use_cid_lock; 3295 3296 extern void sched_mm_cid_migrate_from(struct task_struct *t); 3297 extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t); 3298 extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr); 3299 extern void init_sched_mm_cid(struct task_struct *t); 3300 3301 static inline void __mm_cid_put(struct mm_struct *mm, int cid) 3302 { 3303 if (cid < 0) 3304 return; 3305 cpumask_clear_cpu(cid, mm_cidmask(mm)); 3306 } 3307 3308 /* 3309 * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to 3310 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to 3311 * be held to transition to other states. 3312 * 3313 * State transitions synchronized with cmpxchg or try_cmpxchg need to be 3314 * consistent across cpus, which prevents use of this_cpu_cmpxchg. 3315 */ 3316 static inline void mm_cid_put_lazy(struct task_struct *t) 3317 { 3318 struct mm_struct *mm = t->mm; 3319 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3320 int cid; 3321 3322 lockdep_assert_irqs_disabled(); 3323 cid = __this_cpu_read(pcpu_cid->cid); 3324 if (!mm_cid_is_lazy_put(cid) || 3325 !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3326 return; 3327 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3328 } 3329 3330 static inline int mm_cid_pcpu_unset(struct mm_struct *mm) 3331 { 3332 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3333 int cid, res; 3334 3335 lockdep_assert_irqs_disabled(); 3336 cid = __this_cpu_read(pcpu_cid->cid); 3337 for (;;) { 3338 if (mm_cid_is_unset(cid)) 3339 return MM_CID_UNSET; 3340 /* 3341 * Attempt transition from valid or lazy-put to unset. 3342 */ 3343 res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); 3344 if (res == cid) 3345 break; 3346 cid = res; 3347 } 3348 return cid; 3349 } 3350 3351 static inline void mm_cid_put(struct mm_struct *mm) 3352 { 3353 int cid; 3354 3355 lockdep_assert_irqs_disabled(); 3356 cid = mm_cid_pcpu_unset(mm); 3357 if (cid == MM_CID_UNSET) 3358 return; 3359 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3360 } 3361 3362 static inline int __mm_cid_try_get(struct mm_struct *mm) 3363 { 3364 struct cpumask *cpumask; 3365 int cid; 3366 3367 cpumask = mm_cidmask(mm); 3368 /* 3369 * Retry finding first zero bit if the mask is temporarily 3370 * filled. This only happens during concurrent remote-clear 3371 * which owns a cid without holding a rq lock. 3372 */ 3373 for (;;) { 3374 cid = cpumask_first_zero(cpumask); 3375 if (cid < nr_cpu_ids) 3376 break; 3377 cpu_relax(); 3378 } 3379 if (cpumask_test_and_set_cpu(cid, cpumask)) 3380 return -1; 3381 return cid; 3382 } 3383 3384 /* 3385 * Save a snapshot of the current runqueue time of this cpu 3386 * with the per-cpu cid value, allowing to estimate how recently it was used. 3387 */ 3388 static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) 3389 { 3390 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); 3391 3392 lockdep_assert_rq_held(rq); 3393 WRITE_ONCE(pcpu_cid->time, rq->clock); 3394 } 3395 3396 static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) 3397 { 3398 int cid; 3399 3400 /* 3401 * All allocations (even those using the cid_lock) are lock-free. If 3402 * use_cid_lock is set, hold the cid_lock to perform cid allocation to 3403 * guarantee forward progress. 3404 */ 3405 if (!READ_ONCE(use_cid_lock)) { 3406 cid = __mm_cid_try_get(mm); 3407 if (cid >= 0) 3408 goto end; 3409 raw_spin_lock(&cid_lock); 3410 } else { 3411 raw_spin_lock(&cid_lock); 3412 cid = __mm_cid_try_get(mm); 3413 if (cid >= 0) 3414 goto unlock; 3415 } 3416 3417 /* 3418 * cid concurrently allocated. Retry while forcing following 3419 * allocations to use the cid_lock to ensure forward progress. 3420 */ 3421 WRITE_ONCE(use_cid_lock, 1); 3422 /* 3423 * Set use_cid_lock before allocation. Only care about program order 3424 * because this is only required for forward progress. 3425 */ 3426 barrier(); 3427 /* 3428 * Retry until it succeeds. It is guaranteed to eventually succeed once 3429 * all newcoming allocations observe the use_cid_lock flag set. 3430 */ 3431 do { 3432 cid = __mm_cid_try_get(mm); 3433 cpu_relax(); 3434 } while (cid < 0); 3435 /* 3436 * Allocate before clearing use_cid_lock. Only care about 3437 * program order because this is for forward progress. 3438 */ 3439 barrier(); 3440 WRITE_ONCE(use_cid_lock, 0); 3441 unlock: 3442 raw_spin_unlock(&cid_lock); 3443 end: 3444 mm_cid_snapshot_time(rq, mm); 3445 return cid; 3446 } 3447 3448 static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) 3449 { 3450 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3451 struct cpumask *cpumask; 3452 int cid; 3453 3454 lockdep_assert_rq_held(rq); 3455 cpumask = mm_cidmask(mm); 3456 cid = __this_cpu_read(pcpu_cid->cid); 3457 if (mm_cid_is_valid(cid)) { 3458 mm_cid_snapshot_time(rq, mm); 3459 return cid; 3460 } 3461 if (mm_cid_is_lazy_put(cid)) { 3462 if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3463 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3464 } 3465 cid = __mm_cid_get(rq, mm); 3466 __this_cpu_write(pcpu_cid->cid, cid); 3467 return cid; 3468 } 3469 3470 static inline void switch_mm_cid(struct rq *rq, 3471 struct task_struct *prev, 3472 struct task_struct *next) 3473 { 3474 /* 3475 * Provide a memory barrier between rq->curr store and load of 3476 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. 3477 * 3478 * Should be adapted if context_switch() is modified. 3479 */ 3480 if (!next->mm) { // to kernel 3481 /* 3482 * user -> kernel transition does not guarantee a barrier, but 3483 * we can use the fact that it performs an atomic operation in 3484 * mmgrab(). 3485 */ 3486 if (prev->mm) // from user 3487 smp_mb__after_mmgrab(); 3488 /* 3489 * kernel -> kernel transition does not change rq->curr->mm 3490 * state. It stays NULL. 3491 */ 3492 } else { // to user 3493 /* 3494 * kernel -> user transition does not provide a barrier 3495 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. 3496 * Provide it here. 3497 */ 3498 if (!prev->mm) // from kernel 3499 smp_mb(); 3500 /* 3501 * user -> user transition guarantees a memory barrier through 3502 * switch_mm() when current->mm changes. If current->mm is 3503 * unchanged, no barrier is needed. 3504 */ 3505 } 3506 if (prev->mm_cid_active) { 3507 mm_cid_snapshot_time(rq, prev->mm); 3508 mm_cid_put_lazy(prev); 3509 prev->mm_cid = -1; 3510 } 3511 if (next->mm_cid_active) 3512 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); 3513 } 3514 3515 #else 3516 static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { } 3517 static inline void sched_mm_cid_migrate_from(struct task_struct *t) { } 3518 static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } 3519 static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } 3520 static inline void init_sched_mm_cid(struct task_struct *t) { } 3521 #endif 3522 3523 extern u64 avg_vruntime(struct cfs_rq *cfs_rq); 3524 extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); 3525 3526 #endif /* _KERNEL_SCHED_SCHED_H */ 3527