1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #ifndef _KERNEL_SCHED_SCHED_H 6 #define _KERNEL_SCHED_SCHED_H 7 8 #include <linux/sched/affinity.h> 9 #include <linux/sched/autogroup.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/deadline.h> 12 #include <linux/sched.h> 13 #include <linux/sched/loadavg.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/rseq_api.h> 16 #include <linux/sched/signal.h> 17 #include <linux/sched/smt.h> 18 #include <linux/sched/stat.h> 19 #include <linux/sched/sysctl.h> 20 #include <linux/sched/task_flags.h> 21 #include <linux/sched/task.h> 22 #include <linux/sched/topology.h> 23 24 #include <linux/atomic.h> 25 #include <linux/bitmap.h> 26 #include <linux/bug.h> 27 #include <linux/capability.h> 28 #include <linux/cgroup_api.h> 29 #include <linux/cgroup.h> 30 #include <linux/context_tracking.h> 31 #include <linux/cpufreq.h> 32 #include <linux/cpumask_api.h> 33 #include <linux/ctype.h> 34 #include <linux/file.h> 35 #include <linux/fs_api.h> 36 #include <linux/hrtimer_api.h> 37 #include <linux/interrupt.h> 38 #include <linux/irq_work.h> 39 #include <linux/jiffies.h> 40 #include <linux/kref_api.h> 41 #include <linux/kthread.h> 42 #include <linux/ktime_api.h> 43 #include <linux/lockdep_api.h> 44 #include <linux/lockdep.h> 45 #include <linux/minmax.h> 46 #include <linux/mm.h> 47 #include <linux/module.h> 48 #include <linux/mutex_api.h> 49 #include <linux/plist.h> 50 #include <linux/poll.h> 51 #include <linux/proc_fs.h> 52 #include <linux/profile.h> 53 #include <linux/psi.h> 54 #include <linux/rcupdate.h> 55 #include <linux/seq_file.h> 56 #include <linux/seqlock.h> 57 #include <linux/softirq.h> 58 #include <linux/spinlock_api.h> 59 #include <linux/static_key.h> 60 #include <linux/stop_machine.h> 61 #include <linux/syscalls_api.h> 62 #include <linux/syscalls.h> 63 #include <linux/tick.h> 64 #include <linux/topology.h> 65 #include <linux/types.h> 66 #include <linux/u64_stats_sync_api.h> 67 #include <linux/uaccess.h> 68 #include <linux/wait_api.h> 69 #include <linux/wait_bit.h> 70 #include <linux/workqueue_api.h> 71 72 #include <trace/events/power.h> 73 #include <trace/events/sched.h> 74 75 #include "../workqueue_internal.h" 76 77 #ifdef CONFIG_CGROUP_SCHED 78 #include <linux/cgroup.h> 79 #include <linux/psi.h> 80 #endif 81 82 #ifdef CONFIG_SCHED_DEBUG 83 # include <linux/static_key.h> 84 #endif 85 86 #ifdef CONFIG_PARAVIRT 87 # include <asm/paravirt.h> 88 # include <asm/paravirt_api_clock.h> 89 #endif 90 91 #include "cpupri.h" 92 #include "cpudeadline.h" 93 94 #ifdef CONFIG_SCHED_DEBUG 95 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 96 #else 97 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 98 #endif 99 100 struct rq; 101 struct cpuidle_state; 102 103 /* task_struct::on_rq states: */ 104 #define TASK_ON_RQ_QUEUED 1 105 #define TASK_ON_RQ_MIGRATING 2 106 107 extern __read_mostly int scheduler_running; 108 109 extern unsigned long calc_load_update; 110 extern atomic_long_t calc_load_tasks; 111 112 extern unsigned int sysctl_sched_child_runs_first; 113 114 extern void calc_global_load_tick(struct rq *this_rq); 115 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 116 117 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 118 119 extern unsigned int sysctl_sched_rt_period; 120 extern int sysctl_sched_rt_runtime; 121 extern int sched_rr_timeslice; 122 123 /* 124 * Helpers for converting nanosecond timing to jiffy resolution 125 */ 126 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 127 128 /* 129 * Increase resolution of nice-level calculations for 64-bit architectures. 130 * The extra resolution improves shares distribution and load balancing of 131 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 132 * hierarchies, especially on larger systems. This is not a user-visible change 133 * and does not change the user-interface for setting shares/weights. 134 * 135 * We increase resolution only if we have enough bits to allow this increased 136 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 137 * are pretty high and the returns do not justify the increased costs. 138 * 139 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 140 * increase coverage and consistency always enable it on 64-bit platforms. 141 */ 142 #ifdef CONFIG_64BIT 143 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 144 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 145 # define scale_load_down(w) \ 146 ({ \ 147 unsigned long __w = (w); \ 148 if (__w) \ 149 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 150 __w; \ 151 }) 152 #else 153 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 154 # define scale_load(w) (w) 155 # define scale_load_down(w) (w) 156 #endif 157 158 /* 159 * Task weight (visible to users) and its load (invisible to users) have 160 * independent resolution, but they should be well calibrated. We use 161 * scale_load() and scale_load_down(w) to convert between them. The 162 * following must be true: 163 * 164 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 165 * 166 */ 167 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 168 169 /* 170 * Single value that decides SCHED_DEADLINE internal math precision. 171 * 10 -> just above 1us 172 * 9 -> just above 0.5us 173 */ 174 #define DL_SCALE 10 175 176 /* 177 * Single value that denotes runtime == period, ie unlimited time. 178 */ 179 #define RUNTIME_INF ((u64)~0ULL) 180 181 static inline int idle_policy(int policy) 182 { 183 return policy == SCHED_IDLE; 184 } 185 static inline int fair_policy(int policy) 186 { 187 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 188 } 189 190 static inline int rt_policy(int policy) 191 { 192 return policy == SCHED_FIFO || policy == SCHED_RR; 193 } 194 195 static inline int dl_policy(int policy) 196 { 197 return policy == SCHED_DEADLINE; 198 } 199 static inline bool valid_policy(int policy) 200 { 201 return idle_policy(policy) || fair_policy(policy) || 202 rt_policy(policy) || dl_policy(policy); 203 } 204 205 static inline int task_has_idle_policy(struct task_struct *p) 206 { 207 return idle_policy(p->policy); 208 } 209 210 static inline int task_has_rt_policy(struct task_struct *p) 211 { 212 return rt_policy(p->policy); 213 } 214 215 static inline int task_has_dl_policy(struct task_struct *p) 216 { 217 return dl_policy(p->policy); 218 } 219 220 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 221 222 static inline void update_avg(u64 *avg, u64 sample) 223 { 224 s64 diff = sample - *avg; 225 *avg += diff / 8; 226 } 227 228 /* 229 * Shifting a value by an exponent greater *or equal* to the size of said value 230 * is UB; cap at size-1. 231 */ 232 #define shr_bound(val, shift) \ 233 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 234 235 /* 236 * !! For sched_setattr_nocheck() (kernel) only !! 237 * 238 * This is actually gross. :( 239 * 240 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 241 * tasks, but still be able to sleep. We need this on platforms that cannot 242 * atomically change clock frequency. Remove once fast switching will be 243 * available on such platforms. 244 * 245 * SUGOV stands for SchedUtil GOVernor. 246 */ 247 #define SCHED_FLAG_SUGOV 0x10000000 248 249 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 250 251 static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) 252 { 253 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 254 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 255 #else 256 return false; 257 #endif 258 } 259 260 /* 261 * Tells if entity @a should preempt entity @b. 262 */ 263 static inline bool dl_entity_preempt(const struct sched_dl_entity *a, 264 const struct sched_dl_entity *b) 265 { 266 return dl_entity_is_special(a) || 267 dl_time_before(a->deadline, b->deadline); 268 } 269 270 /* 271 * This is the priority-queue data structure of the RT scheduling class: 272 */ 273 struct rt_prio_array { 274 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 275 struct list_head queue[MAX_RT_PRIO]; 276 }; 277 278 struct rt_bandwidth { 279 /* nests inside the rq lock: */ 280 raw_spinlock_t rt_runtime_lock; 281 ktime_t rt_period; 282 u64 rt_runtime; 283 struct hrtimer rt_period_timer; 284 unsigned int rt_period_active; 285 }; 286 287 void __dl_clear_params(struct task_struct *p); 288 289 static inline int dl_bandwidth_enabled(void) 290 { 291 return sysctl_sched_rt_runtime >= 0; 292 } 293 294 /* 295 * To keep the bandwidth of -deadline tasks under control 296 * we need some place where: 297 * - store the maximum -deadline bandwidth of each cpu; 298 * - cache the fraction of bandwidth that is currently allocated in 299 * each root domain; 300 * 301 * This is all done in the data structure below. It is similar to the 302 * one used for RT-throttling (rt_bandwidth), with the main difference 303 * that, since here we are only interested in admission control, we 304 * do not decrease any runtime while the group "executes", neither we 305 * need a timer to replenish it. 306 * 307 * With respect to SMP, bandwidth is given on a per root domain basis, 308 * meaning that: 309 * - bw (< 100%) is the deadline bandwidth of each CPU; 310 * - total_bw is the currently allocated bandwidth in each root domain; 311 */ 312 struct dl_bw { 313 raw_spinlock_t lock; 314 u64 bw; 315 u64 total_bw; 316 }; 317 318 extern void init_dl_bw(struct dl_bw *dl_b); 319 extern int sched_dl_global_validate(void); 320 extern void sched_dl_do_global(void); 321 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 322 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 323 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 324 extern bool __checkparam_dl(const struct sched_attr *attr); 325 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 326 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 327 extern int dl_bw_check_overflow(int cpu); 328 329 #ifdef CONFIG_CGROUP_SCHED 330 331 struct cfs_rq; 332 struct rt_rq; 333 334 extern struct list_head task_groups; 335 336 struct cfs_bandwidth { 337 #ifdef CONFIG_CFS_BANDWIDTH 338 raw_spinlock_t lock; 339 ktime_t period; 340 u64 quota; 341 u64 runtime; 342 u64 burst; 343 u64 runtime_snap; 344 s64 hierarchical_quota; 345 346 u8 idle; 347 u8 period_active; 348 u8 slack_started; 349 struct hrtimer period_timer; 350 struct hrtimer slack_timer; 351 struct list_head throttled_cfs_rq; 352 353 /* Statistics: */ 354 int nr_periods; 355 int nr_throttled; 356 int nr_burst; 357 u64 throttled_time; 358 u64 burst_time; 359 #endif 360 }; 361 362 /* Task group related information */ 363 struct task_group { 364 struct cgroup_subsys_state css; 365 366 #ifdef CONFIG_FAIR_GROUP_SCHED 367 /* schedulable entities of this group on each CPU */ 368 struct sched_entity **se; 369 /* runqueue "owned" by this group on each CPU */ 370 struct cfs_rq **cfs_rq; 371 unsigned long shares; 372 373 /* A positive value indicates that this is a SCHED_IDLE group. */ 374 int idle; 375 376 #ifdef CONFIG_SMP 377 /* 378 * load_avg can be heavily contended at clock tick time, so put 379 * it in its own cacheline separated from the fields above which 380 * will also be accessed at each tick. 381 */ 382 atomic_long_t load_avg ____cacheline_aligned; 383 #endif 384 #endif 385 386 #ifdef CONFIG_RT_GROUP_SCHED 387 struct sched_rt_entity **rt_se; 388 struct rt_rq **rt_rq; 389 390 struct rt_bandwidth rt_bandwidth; 391 #endif 392 393 struct rcu_head rcu; 394 struct list_head list; 395 396 struct task_group *parent; 397 struct list_head siblings; 398 struct list_head children; 399 400 #ifdef CONFIG_SCHED_AUTOGROUP 401 struct autogroup *autogroup; 402 #endif 403 404 struct cfs_bandwidth cfs_bandwidth; 405 406 #ifdef CONFIG_UCLAMP_TASK_GROUP 407 /* The two decimal precision [%] value requested from user-space */ 408 unsigned int uclamp_pct[UCLAMP_CNT]; 409 /* Clamp values requested for a task group */ 410 struct uclamp_se uclamp_req[UCLAMP_CNT]; 411 /* Effective clamp values used for a task group */ 412 struct uclamp_se uclamp[UCLAMP_CNT]; 413 #endif 414 415 }; 416 417 #ifdef CONFIG_FAIR_GROUP_SCHED 418 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 419 420 /* 421 * A weight of 0 or 1 can cause arithmetics problems. 422 * A weight of a cfs_rq is the sum of weights of which entities 423 * are queued on this cfs_rq, so a weight of a entity should not be 424 * too large, so as the shares value of a task group. 425 * (The default weight is 1024 - so there's no practical 426 * limitation from this.) 427 */ 428 #define MIN_SHARES (1UL << 1) 429 #define MAX_SHARES (1UL << 18) 430 #endif 431 432 typedef int (*tg_visitor)(struct task_group *, void *); 433 434 extern int walk_tg_tree_from(struct task_group *from, 435 tg_visitor down, tg_visitor up, void *data); 436 437 /* 438 * Iterate the full tree, calling @down when first entering a node and @up when 439 * leaving it for the final time. 440 * 441 * Caller must hold rcu_lock or sufficient equivalent. 442 */ 443 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 444 { 445 return walk_tg_tree_from(&root_task_group, down, up, data); 446 } 447 448 extern int tg_nop(struct task_group *tg, void *data); 449 450 extern void free_fair_sched_group(struct task_group *tg); 451 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 452 extern void online_fair_sched_group(struct task_group *tg); 453 extern void unregister_fair_sched_group(struct task_group *tg); 454 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 455 struct sched_entity *se, int cpu, 456 struct sched_entity *parent); 457 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 458 459 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 460 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 461 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 462 463 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 464 struct sched_rt_entity *rt_se, int cpu, 465 struct sched_rt_entity *parent); 466 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 467 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 468 extern long sched_group_rt_runtime(struct task_group *tg); 469 extern long sched_group_rt_period(struct task_group *tg); 470 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 471 472 extern struct task_group *sched_create_group(struct task_group *parent); 473 extern void sched_online_group(struct task_group *tg, 474 struct task_group *parent); 475 extern void sched_destroy_group(struct task_group *tg); 476 extern void sched_release_group(struct task_group *tg); 477 478 extern void sched_move_task(struct task_struct *tsk); 479 480 #ifdef CONFIG_FAIR_GROUP_SCHED 481 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 482 483 extern int sched_group_set_idle(struct task_group *tg, long idle); 484 485 #ifdef CONFIG_SMP 486 extern void set_task_rq_fair(struct sched_entity *se, 487 struct cfs_rq *prev, struct cfs_rq *next); 488 #else /* !CONFIG_SMP */ 489 static inline void set_task_rq_fair(struct sched_entity *se, 490 struct cfs_rq *prev, struct cfs_rq *next) { } 491 #endif /* CONFIG_SMP */ 492 #endif /* CONFIG_FAIR_GROUP_SCHED */ 493 494 #else /* CONFIG_CGROUP_SCHED */ 495 496 struct cfs_bandwidth { }; 497 498 #endif /* CONFIG_CGROUP_SCHED */ 499 500 extern void unregister_rt_sched_group(struct task_group *tg); 501 extern void free_rt_sched_group(struct task_group *tg); 502 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 503 504 /* 505 * u64_u32_load/u64_u32_store 506 * 507 * Use a copy of a u64 value to protect against data race. This is only 508 * applicable for 32-bits architectures. 509 */ 510 #ifdef CONFIG_64BIT 511 # define u64_u32_load_copy(var, copy) var 512 # define u64_u32_store_copy(var, copy, val) (var = val) 513 #else 514 # define u64_u32_load_copy(var, copy) \ 515 ({ \ 516 u64 __val, __val_copy; \ 517 do { \ 518 __val_copy = copy; \ 519 /* \ 520 * paired with u64_u32_store_copy(), ordering access \ 521 * to var and copy. \ 522 */ \ 523 smp_rmb(); \ 524 __val = var; \ 525 } while (__val != __val_copy); \ 526 __val; \ 527 }) 528 # define u64_u32_store_copy(var, copy, val) \ 529 do { \ 530 typeof(val) __val = (val); \ 531 var = __val; \ 532 /* \ 533 * paired with u64_u32_load_copy(), ordering access to var and \ 534 * copy. \ 535 */ \ 536 smp_wmb(); \ 537 copy = __val; \ 538 } while (0) 539 #endif 540 # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) 541 # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) 542 543 /* CFS-related fields in a runqueue */ 544 struct cfs_rq { 545 struct load_weight load; 546 unsigned int nr_running; 547 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 548 unsigned int idle_nr_running; /* SCHED_IDLE */ 549 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 550 551 u64 exec_clock; 552 u64 min_vruntime; 553 #ifdef CONFIG_SCHED_CORE 554 unsigned int forceidle_seq; 555 u64 min_vruntime_fi; 556 #endif 557 558 #ifndef CONFIG_64BIT 559 u64 min_vruntime_copy; 560 #endif 561 562 struct rb_root_cached tasks_timeline; 563 564 /* 565 * 'curr' points to currently running entity on this cfs_rq. 566 * It is set to NULL otherwise (i.e when none are currently running). 567 */ 568 struct sched_entity *curr; 569 struct sched_entity *next; 570 struct sched_entity *last; 571 struct sched_entity *skip; 572 573 #ifdef CONFIG_SCHED_DEBUG 574 unsigned int nr_spread_over; 575 #endif 576 577 #ifdef CONFIG_SMP 578 /* 579 * CFS load tracking 580 */ 581 struct sched_avg avg; 582 #ifndef CONFIG_64BIT 583 u64 last_update_time_copy; 584 #endif 585 struct { 586 raw_spinlock_t lock ____cacheline_aligned; 587 int nr; 588 unsigned long load_avg; 589 unsigned long util_avg; 590 unsigned long runnable_avg; 591 } removed; 592 593 #ifdef CONFIG_FAIR_GROUP_SCHED 594 unsigned long tg_load_avg_contrib; 595 long propagate; 596 long prop_runnable_sum; 597 598 /* 599 * h_load = weight * f(tg) 600 * 601 * Where f(tg) is the recursive weight fraction assigned to 602 * this group. 603 */ 604 unsigned long h_load; 605 u64 last_h_load_update; 606 struct sched_entity *h_load_next; 607 #endif /* CONFIG_FAIR_GROUP_SCHED */ 608 #endif /* CONFIG_SMP */ 609 610 #ifdef CONFIG_FAIR_GROUP_SCHED 611 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 612 613 /* 614 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 615 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 616 * (like users, containers etc.) 617 * 618 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 619 * This list is used during load balance. 620 */ 621 int on_list; 622 struct list_head leaf_cfs_rq_list; 623 struct task_group *tg; /* group that "owns" this runqueue */ 624 625 /* Locally cached copy of our task_group's idle value */ 626 int idle; 627 628 #ifdef CONFIG_CFS_BANDWIDTH 629 int runtime_enabled; 630 s64 runtime_remaining; 631 632 u64 throttled_pelt_idle; 633 #ifndef CONFIG_64BIT 634 u64 throttled_pelt_idle_copy; 635 #endif 636 u64 throttled_clock; 637 u64 throttled_clock_pelt; 638 u64 throttled_clock_pelt_time; 639 int throttled; 640 int throttle_count; 641 struct list_head throttled_list; 642 #ifdef CONFIG_SMP 643 struct list_head throttled_csd_list; 644 #endif 645 #endif /* CONFIG_CFS_BANDWIDTH */ 646 #endif /* CONFIG_FAIR_GROUP_SCHED */ 647 }; 648 649 static inline int rt_bandwidth_enabled(void) 650 { 651 return sysctl_sched_rt_runtime >= 0; 652 } 653 654 /* RT IPI pull logic requires IRQ_WORK */ 655 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 656 # define HAVE_RT_PUSH_IPI 657 #endif 658 659 /* Real-Time classes' related field in a runqueue: */ 660 struct rt_rq { 661 struct rt_prio_array active; 662 unsigned int rt_nr_running; 663 unsigned int rr_nr_running; 664 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 665 struct { 666 int curr; /* highest queued rt task prio */ 667 #ifdef CONFIG_SMP 668 int next; /* next highest */ 669 #endif 670 } highest_prio; 671 #endif 672 #ifdef CONFIG_SMP 673 unsigned int rt_nr_migratory; 674 unsigned int rt_nr_total; 675 int overloaded; 676 struct plist_head pushable_tasks; 677 678 #endif /* CONFIG_SMP */ 679 int rt_queued; 680 681 int rt_throttled; 682 u64 rt_time; 683 u64 rt_runtime; 684 /* Nests inside the rq lock: */ 685 raw_spinlock_t rt_runtime_lock; 686 687 #ifdef CONFIG_RT_GROUP_SCHED 688 unsigned int rt_nr_boosted; 689 690 struct rq *rq; 691 struct task_group *tg; 692 #endif 693 }; 694 695 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 696 { 697 return rt_rq->rt_queued && rt_rq->rt_nr_running; 698 } 699 700 /* Deadline class' related fields in a runqueue */ 701 struct dl_rq { 702 /* runqueue is an rbtree, ordered by deadline */ 703 struct rb_root_cached root; 704 705 unsigned int dl_nr_running; 706 707 #ifdef CONFIG_SMP 708 /* 709 * Deadline values of the currently executing and the 710 * earliest ready task on this rq. Caching these facilitates 711 * the decision whether or not a ready but not running task 712 * should migrate somewhere else. 713 */ 714 struct { 715 u64 curr; 716 u64 next; 717 } earliest_dl; 718 719 unsigned int dl_nr_migratory; 720 int overloaded; 721 722 /* 723 * Tasks on this rq that can be pushed away. They are kept in 724 * an rb-tree, ordered by tasks' deadlines, with caching 725 * of the leftmost (earliest deadline) element. 726 */ 727 struct rb_root_cached pushable_dl_tasks_root; 728 #else 729 struct dl_bw dl_bw; 730 #endif 731 /* 732 * "Active utilization" for this runqueue: increased when a 733 * task wakes up (becomes TASK_RUNNING) and decreased when a 734 * task blocks 735 */ 736 u64 running_bw; 737 738 /* 739 * Utilization of the tasks "assigned" to this runqueue (including 740 * the tasks that are in runqueue and the tasks that executed on this 741 * CPU and blocked). Increased when a task moves to this runqueue, and 742 * decreased when the task moves away (migrates, changes scheduling 743 * policy, or terminates). 744 * This is needed to compute the "inactive utilization" for the 745 * runqueue (inactive utilization = this_bw - running_bw). 746 */ 747 u64 this_bw; 748 u64 extra_bw; 749 750 /* 751 * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM 752 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). 753 */ 754 u64 max_bw; 755 756 /* 757 * Inverse of the fraction of CPU utilization that can be reclaimed 758 * by the GRUB algorithm. 759 */ 760 u64 bw_ratio; 761 }; 762 763 #ifdef CONFIG_FAIR_GROUP_SCHED 764 /* An entity is a task if it doesn't "own" a runqueue */ 765 #define entity_is_task(se) (!se->my_q) 766 767 static inline void se_update_runnable(struct sched_entity *se) 768 { 769 if (!entity_is_task(se)) 770 se->runnable_weight = se->my_q->h_nr_running; 771 } 772 773 static inline long se_runnable(struct sched_entity *se) 774 { 775 if (entity_is_task(se)) 776 return !!se->on_rq; 777 else 778 return se->runnable_weight; 779 } 780 781 #else 782 #define entity_is_task(se) 1 783 784 static inline void se_update_runnable(struct sched_entity *se) {} 785 786 static inline long se_runnable(struct sched_entity *se) 787 { 788 return !!se->on_rq; 789 } 790 #endif 791 792 #ifdef CONFIG_SMP 793 /* 794 * XXX we want to get rid of these helpers and use the full load resolution. 795 */ 796 static inline long se_weight(struct sched_entity *se) 797 { 798 return scale_load_down(se->load.weight); 799 } 800 801 802 static inline bool sched_asym_prefer(int a, int b) 803 { 804 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 805 } 806 807 struct perf_domain { 808 struct em_perf_domain *em_pd; 809 struct perf_domain *next; 810 struct rcu_head rcu; 811 }; 812 813 /* Scheduling group status flags */ 814 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 815 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 816 817 /* 818 * We add the notion of a root-domain which will be used to define per-domain 819 * variables. Each exclusive cpuset essentially defines an island domain by 820 * fully partitioning the member CPUs from any other cpuset. Whenever a new 821 * exclusive cpuset is created, we also create and attach a new root-domain 822 * object. 823 * 824 */ 825 struct root_domain { 826 atomic_t refcount; 827 atomic_t rto_count; 828 struct rcu_head rcu; 829 cpumask_var_t span; 830 cpumask_var_t online; 831 832 /* 833 * Indicate pullable load on at least one CPU, e.g: 834 * - More than one runnable task 835 * - Running task is misfit 836 */ 837 int overload; 838 839 /* Indicate one or more cpus over-utilized (tipping point) */ 840 int overutilized; 841 842 /* 843 * The bit corresponding to a CPU gets set here if such CPU has more 844 * than one runnable -deadline task (as it is below for RT tasks). 845 */ 846 cpumask_var_t dlo_mask; 847 atomic_t dlo_count; 848 struct dl_bw dl_bw; 849 struct cpudl cpudl; 850 851 /* 852 * Indicate whether a root_domain's dl_bw has been checked or 853 * updated. It's monotonously increasing value. 854 * 855 * Also, some corner cases, like 'wrap around' is dangerous, but given 856 * that u64 is 'big enough'. So that shouldn't be a concern. 857 */ 858 u64 visit_gen; 859 860 #ifdef HAVE_RT_PUSH_IPI 861 /* 862 * For IPI pull requests, loop across the rto_mask. 863 */ 864 struct irq_work rto_push_work; 865 raw_spinlock_t rto_lock; 866 /* These are only updated and read within rto_lock */ 867 int rto_loop; 868 int rto_cpu; 869 /* These atomics are updated outside of a lock */ 870 atomic_t rto_loop_next; 871 atomic_t rto_loop_start; 872 #endif 873 /* 874 * The "RT overload" flag: it gets set if a CPU has more than 875 * one runnable RT task. 876 */ 877 cpumask_var_t rto_mask; 878 struct cpupri cpupri; 879 880 unsigned long max_cpu_capacity; 881 882 /* 883 * NULL-terminated list of performance domains intersecting with the 884 * CPUs of the rd. Protected by RCU. 885 */ 886 struct perf_domain __rcu *pd; 887 }; 888 889 extern void init_defrootdomain(void); 890 extern int sched_init_domains(const struct cpumask *cpu_map); 891 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 892 extern void sched_get_rd(struct root_domain *rd); 893 extern void sched_put_rd(struct root_domain *rd); 894 895 #ifdef HAVE_RT_PUSH_IPI 896 extern void rto_push_irq_work_func(struct irq_work *work); 897 #endif 898 #endif /* CONFIG_SMP */ 899 900 #ifdef CONFIG_UCLAMP_TASK 901 /* 902 * struct uclamp_bucket - Utilization clamp bucket 903 * @value: utilization clamp value for tasks on this clamp bucket 904 * @tasks: number of RUNNABLE tasks on this clamp bucket 905 * 906 * Keep track of how many tasks are RUNNABLE for a given utilization 907 * clamp value. 908 */ 909 struct uclamp_bucket { 910 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 911 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 912 }; 913 914 /* 915 * struct uclamp_rq - rq's utilization clamp 916 * @value: currently active clamp values for a rq 917 * @bucket: utilization clamp buckets affecting a rq 918 * 919 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 920 * A clamp value is affecting a rq when there is at least one task RUNNABLE 921 * (or actually running) with that value. 922 * 923 * There are up to UCLAMP_CNT possible different clamp values, currently there 924 * are only two: minimum utilization and maximum utilization. 925 * 926 * All utilization clamping values are MAX aggregated, since: 927 * - for util_min: we want to run the CPU at least at the max of the minimum 928 * utilization required by its currently RUNNABLE tasks. 929 * - for util_max: we want to allow the CPU to run up to the max of the 930 * maximum utilization allowed by its currently RUNNABLE tasks. 931 * 932 * Since on each system we expect only a limited number of different 933 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 934 * the metrics required to compute all the per-rq utilization clamp values. 935 */ 936 struct uclamp_rq { 937 unsigned int value; 938 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 939 }; 940 941 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 942 #endif /* CONFIG_UCLAMP_TASK */ 943 944 struct rq; 945 struct balance_callback { 946 struct balance_callback *next; 947 void (*func)(struct rq *rq); 948 }; 949 950 /* 951 * This is the main, per-CPU runqueue data structure. 952 * 953 * Locking rule: those places that want to lock multiple runqueues 954 * (such as the load balancing or the thread migration code), lock 955 * acquire operations must be ordered by ascending &runqueue. 956 */ 957 struct rq { 958 /* runqueue lock: */ 959 raw_spinlock_t __lock; 960 961 /* 962 * nr_running and cpu_load should be in the same cacheline because 963 * remote CPUs use both these fields when doing load calculation. 964 */ 965 unsigned int nr_running; 966 #ifdef CONFIG_NUMA_BALANCING 967 unsigned int nr_numa_running; 968 unsigned int nr_preferred_running; 969 unsigned int numa_migrate_on; 970 #endif 971 #ifdef CONFIG_NO_HZ_COMMON 972 #ifdef CONFIG_SMP 973 unsigned long last_blocked_load_update_tick; 974 unsigned int has_blocked_load; 975 call_single_data_t nohz_csd; 976 #endif /* CONFIG_SMP */ 977 unsigned int nohz_tick_stopped; 978 atomic_t nohz_flags; 979 #endif /* CONFIG_NO_HZ_COMMON */ 980 981 #ifdef CONFIG_SMP 982 unsigned int ttwu_pending; 983 #endif 984 u64 nr_switches; 985 986 #ifdef CONFIG_UCLAMP_TASK 987 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 988 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 989 unsigned int uclamp_flags; 990 #define UCLAMP_FLAG_IDLE 0x01 991 #endif 992 993 struct cfs_rq cfs; 994 struct rt_rq rt; 995 struct dl_rq dl; 996 997 #ifdef CONFIG_FAIR_GROUP_SCHED 998 /* list of leaf cfs_rq on this CPU: */ 999 struct list_head leaf_cfs_rq_list; 1000 struct list_head *tmp_alone_branch; 1001 #endif /* CONFIG_FAIR_GROUP_SCHED */ 1002 1003 /* 1004 * This is part of a global counter where only the total sum 1005 * over all CPUs matters. A task can increase this counter on 1006 * one CPU and if it got migrated afterwards it may decrease 1007 * it on another CPU. Always updated under the runqueue lock: 1008 */ 1009 unsigned int nr_uninterruptible; 1010 1011 struct task_struct __rcu *curr; 1012 struct task_struct *idle; 1013 struct task_struct *stop; 1014 unsigned long next_balance; 1015 struct mm_struct *prev_mm; 1016 1017 unsigned int clock_update_flags; 1018 u64 clock; 1019 /* Ensure that all clocks are in the same cache line */ 1020 u64 clock_task ____cacheline_aligned; 1021 u64 clock_pelt; 1022 unsigned long lost_idle_time; 1023 u64 clock_pelt_idle; 1024 u64 clock_idle; 1025 #ifndef CONFIG_64BIT 1026 u64 clock_pelt_idle_copy; 1027 u64 clock_idle_copy; 1028 #endif 1029 1030 atomic_t nr_iowait; 1031 1032 #ifdef CONFIG_SCHED_DEBUG 1033 u64 last_seen_need_resched_ns; 1034 int ticks_without_resched; 1035 #endif 1036 1037 #ifdef CONFIG_MEMBARRIER 1038 int membarrier_state; 1039 #endif 1040 1041 #ifdef CONFIG_SMP 1042 struct root_domain *rd; 1043 struct sched_domain __rcu *sd; 1044 1045 unsigned long cpu_capacity; 1046 unsigned long cpu_capacity_orig; 1047 1048 struct balance_callback *balance_callback; 1049 1050 unsigned char nohz_idle_balance; 1051 unsigned char idle_balance; 1052 1053 unsigned long misfit_task_load; 1054 1055 /* For active balancing */ 1056 int active_balance; 1057 int push_cpu; 1058 struct cpu_stop_work active_balance_work; 1059 1060 /* CPU of this runqueue: */ 1061 int cpu; 1062 int online; 1063 1064 struct list_head cfs_tasks; 1065 1066 struct sched_avg avg_rt; 1067 struct sched_avg avg_dl; 1068 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1069 struct sched_avg avg_irq; 1070 #endif 1071 #ifdef CONFIG_SCHED_THERMAL_PRESSURE 1072 struct sched_avg avg_thermal; 1073 #endif 1074 u64 idle_stamp; 1075 u64 avg_idle; 1076 1077 unsigned long wake_stamp; 1078 u64 wake_avg_idle; 1079 1080 /* This is used to determine avg_idle's max value */ 1081 u64 max_idle_balance_cost; 1082 1083 #ifdef CONFIG_HOTPLUG_CPU 1084 struct rcuwait hotplug_wait; 1085 #endif 1086 #endif /* CONFIG_SMP */ 1087 1088 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1089 u64 prev_irq_time; 1090 #endif 1091 #ifdef CONFIG_PARAVIRT 1092 u64 prev_steal_time; 1093 #endif 1094 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1095 u64 prev_steal_time_rq; 1096 #endif 1097 1098 /* calc_load related fields */ 1099 unsigned long calc_load_update; 1100 long calc_load_active; 1101 1102 #ifdef CONFIG_SCHED_HRTICK 1103 #ifdef CONFIG_SMP 1104 call_single_data_t hrtick_csd; 1105 #endif 1106 struct hrtimer hrtick_timer; 1107 ktime_t hrtick_time; 1108 #endif 1109 1110 #ifdef CONFIG_SCHEDSTATS 1111 /* latency stats */ 1112 struct sched_info rq_sched_info; 1113 unsigned long long rq_cpu_time; 1114 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1115 1116 /* sys_sched_yield() stats */ 1117 unsigned int yld_count; 1118 1119 /* schedule() stats */ 1120 unsigned int sched_count; 1121 unsigned int sched_goidle; 1122 1123 /* try_to_wake_up() stats */ 1124 unsigned int ttwu_count; 1125 unsigned int ttwu_local; 1126 #endif 1127 1128 #ifdef CONFIG_CPU_IDLE 1129 /* Must be inspected within a rcu lock section */ 1130 struct cpuidle_state *idle_state; 1131 #endif 1132 1133 #ifdef CONFIG_SMP 1134 unsigned int nr_pinned; 1135 #endif 1136 unsigned int push_busy; 1137 struct cpu_stop_work push_work; 1138 1139 #ifdef CONFIG_SCHED_CORE 1140 /* per rq */ 1141 struct rq *core; 1142 struct task_struct *core_pick; 1143 unsigned int core_enabled; 1144 unsigned int core_sched_seq; 1145 struct rb_root core_tree; 1146 1147 /* shared state -- careful with sched_core_cpu_deactivate() */ 1148 unsigned int core_task_seq; 1149 unsigned int core_pick_seq; 1150 unsigned long core_cookie; 1151 unsigned int core_forceidle_count; 1152 unsigned int core_forceidle_seq; 1153 unsigned int core_forceidle_occupation; 1154 u64 core_forceidle_start; 1155 #endif 1156 1157 /* Scratch cpumask to be temporarily used under rq_lock */ 1158 cpumask_var_t scratch_mask; 1159 1160 #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) 1161 call_single_data_t cfsb_csd; 1162 struct list_head cfsb_csd_list; 1163 #endif 1164 }; 1165 1166 #ifdef CONFIG_FAIR_GROUP_SCHED 1167 1168 /* CPU runqueue to which this cfs_rq is attached */ 1169 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1170 { 1171 return cfs_rq->rq; 1172 } 1173 1174 #else 1175 1176 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1177 { 1178 return container_of(cfs_rq, struct rq, cfs); 1179 } 1180 #endif 1181 1182 static inline int cpu_of(struct rq *rq) 1183 { 1184 #ifdef CONFIG_SMP 1185 return rq->cpu; 1186 #else 1187 return 0; 1188 #endif 1189 } 1190 1191 #define MDF_PUSH 0x01 1192 1193 static inline bool is_migration_disabled(struct task_struct *p) 1194 { 1195 #ifdef CONFIG_SMP 1196 return p->migration_disabled; 1197 #else 1198 return false; 1199 #endif 1200 } 1201 1202 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1203 1204 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1205 #define this_rq() this_cpu_ptr(&runqueues) 1206 #define task_rq(p) cpu_rq(task_cpu(p)) 1207 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1208 #define raw_rq() raw_cpu_ptr(&runqueues) 1209 1210 struct sched_group; 1211 #ifdef CONFIG_SCHED_CORE 1212 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1213 1214 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1215 1216 static inline bool sched_core_enabled(struct rq *rq) 1217 { 1218 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1219 } 1220 1221 static inline bool sched_core_disabled(void) 1222 { 1223 return !static_branch_unlikely(&__sched_core_enabled); 1224 } 1225 1226 /* 1227 * Be careful with this function; not for general use. The return value isn't 1228 * stable unless you actually hold a relevant rq->__lock. 1229 */ 1230 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1231 { 1232 if (sched_core_enabled(rq)) 1233 return &rq->core->__lock; 1234 1235 return &rq->__lock; 1236 } 1237 1238 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1239 { 1240 if (rq->core_enabled) 1241 return &rq->core->__lock; 1242 1243 return &rq->__lock; 1244 } 1245 1246 bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b, 1247 bool fi); 1248 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 1249 1250 /* 1251 * Helpers to check if the CPU's core cookie matches with the task's cookie 1252 * when core scheduling is enabled. 1253 * A special case is that the task's cookie always matches with CPU's core 1254 * cookie if the CPU is in an idle core. 1255 */ 1256 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1257 { 1258 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1259 if (!sched_core_enabled(rq)) 1260 return true; 1261 1262 return rq->core->core_cookie == p->core_cookie; 1263 } 1264 1265 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1266 { 1267 bool idle_core = true; 1268 int cpu; 1269 1270 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1271 if (!sched_core_enabled(rq)) 1272 return true; 1273 1274 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1275 if (!available_idle_cpu(cpu)) { 1276 idle_core = false; 1277 break; 1278 } 1279 } 1280 1281 /* 1282 * A CPU in an idle core is always the best choice for tasks with 1283 * cookies. 1284 */ 1285 return idle_core || rq->core->core_cookie == p->core_cookie; 1286 } 1287 1288 static inline bool sched_group_cookie_match(struct rq *rq, 1289 struct task_struct *p, 1290 struct sched_group *group) 1291 { 1292 int cpu; 1293 1294 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1295 if (!sched_core_enabled(rq)) 1296 return true; 1297 1298 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1299 if (sched_core_cookie_match(cpu_rq(cpu), p)) 1300 return true; 1301 } 1302 return false; 1303 } 1304 1305 static inline bool sched_core_enqueued(struct task_struct *p) 1306 { 1307 return !RB_EMPTY_NODE(&p->core_node); 1308 } 1309 1310 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1311 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); 1312 1313 extern void sched_core_get(void); 1314 extern void sched_core_put(void); 1315 1316 #else /* !CONFIG_SCHED_CORE */ 1317 1318 static inline bool sched_core_enabled(struct rq *rq) 1319 { 1320 return false; 1321 } 1322 1323 static inline bool sched_core_disabled(void) 1324 { 1325 return true; 1326 } 1327 1328 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1329 { 1330 return &rq->__lock; 1331 } 1332 1333 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1334 { 1335 return &rq->__lock; 1336 } 1337 1338 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1339 { 1340 return true; 1341 } 1342 1343 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1344 { 1345 return true; 1346 } 1347 1348 static inline bool sched_group_cookie_match(struct rq *rq, 1349 struct task_struct *p, 1350 struct sched_group *group) 1351 { 1352 return true; 1353 } 1354 #endif /* CONFIG_SCHED_CORE */ 1355 1356 static inline void lockdep_assert_rq_held(struct rq *rq) 1357 { 1358 lockdep_assert_held(__rq_lockp(rq)); 1359 } 1360 1361 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1362 extern bool raw_spin_rq_trylock(struct rq *rq); 1363 extern void raw_spin_rq_unlock(struct rq *rq); 1364 1365 static inline void raw_spin_rq_lock(struct rq *rq) 1366 { 1367 raw_spin_rq_lock_nested(rq, 0); 1368 } 1369 1370 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1371 { 1372 local_irq_disable(); 1373 raw_spin_rq_lock(rq); 1374 } 1375 1376 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1377 { 1378 raw_spin_rq_unlock(rq); 1379 local_irq_enable(); 1380 } 1381 1382 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1383 { 1384 unsigned long flags; 1385 local_irq_save(flags); 1386 raw_spin_rq_lock(rq); 1387 return flags; 1388 } 1389 1390 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1391 { 1392 raw_spin_rq_unlock(rq); 1393 local_irq_restore(flags); 1394 } 1395 1396 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1397 do { \ 1398 flags = _raw_spin_rq_lock_irqsave(rq); \ 1399 } while (0) 1400 1401 #ifdef CONFIG_SCHED_SMT 1402 extern void __update_idle_core(struct rq *rq); 1403 1404 static inline void update_idle_core(struct rq *rq) 1405 { 1406 if (static_branch_unlikely(&sched_smt_present)) 1407 __update_idle_core(rq); 1408 } 1409 1410 #else 1411 static inline void update_idle_core(struct rq *rq) { } 1412 #endif 1413 1414 #ifdef CONFIG_FAIR_GROUP_SCHED 1415 static inline struct task_struct *task_of(struct sched_entity *se) 1416 { 1417 SCHED_WARN_ON(!entity_is_task(se)); 1418 return container_of(se, struct task_struct, se); 1419 } 1420 1421 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1422 { 1423 return p->se.cfs_rq; 1424 } 1425 1426 /* runqueue on which this entity is (to be) queued */ 1427 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1428 { 1429 return se->cfs_rq; 1430 } 1431 1432 /* runqueue "owned" by this group */ 1433 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1434 { 1435 return grp->my_q; 1436 } 1437 1438 #else 1439 1440 #define task_of(_se) container_of(_se, struct task_struct, se) 1441 1442 static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) 1443 { 1444 return &task_rq(p)->cfs; 1445 } 1446 1447 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1448 { 1449 const struct task_struct *p = task_of(se); 1450 struct rq *rq = task_rq(p); 1451 1452 return &rq->cfs; 1453 } 1454 1455 /* runqueue "owned" by this group */ 1456 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1457 { 1458 return NULL; 1459 } 1460 #endif 1461 1462 extern void update_rq_clock(struct rq *rq); 1463 1464 /* 1465 * rq::clock_update_flags bits 1466 * 1467 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1468 * call to __schedule(). This is an optimisation to avoid 1469 * neighbouring rq clock updates. 1470 * 1471 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1472 * in effect and calls to update_rq_clock() are being ignored. 1473 * 1474 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1475 * made to update_rq_clock() since the last time rq::lock was pinned. 1476 * 1477 * If inside of __schedule(), clock_update_flags will have been 1478 * shifted left (a left shift is a cheap operation for the fast path 1479 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1480 * 1481 * if (rq-clock_update_flags >= RQCF_UPDATED) 1482 * 1483 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1484 * one position though, because the next rq_unpin_lock() will shift it 1485 * back. 1486 */ 1487 #define RQCF_REQ_SKIP 0x01 1488 #define RQCF_ACT_SKIP 0x02 1489 #define RQCF_UPDATED 0x04 1490 1491 static inline void assert_clock_updated(struct rq *rq) 1492 { 1493 /* 1494 * The only reason for not seeing a clock update since the 1495 * last rq_pin_lock() is if we're currently skipping updates. 1496 */ 1497 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1498 } 1499 1500 static inline u64 rq_clock(struct rq *rq) 1501 { 1502 lockdep_assert_rq_held(rq); 1503 assert_clock_updated(rq); 1504 1505 return rq->clock; 1506 } 1507 1508 static inline u64 rq_clock_task(struct rq *rq) 1509 { 1510 lockdep_assert_rq_held(rq); 1511 assert_clock_updated(rq); 1512 1513 return rq->clock_task; 1514 } 1515 1516 /** 1517 * By default the decay is the default pelt decay period. 1518 * The decay shift can change the decay period in 1519 * multiples of 32. 1520 * Decay shift Decay period(ms) 1521 * 0 32 1522 * 1 64 1523 * 2 128 1524 * 3 256 1525 * 4 512 1526 */ 1527 extern int sched_thermal_decay_shift; 1528 1529 static inline u64 rq_clock_thermal(struct rq *rq) 1530 { 1531 return rq_clock_task(rq) >> sched_thermal_decay_shift; 1532 } 1533 1534 static inline void rq_clock_skip_update(struct rq *rq) 1535 { 1536 lockdep_assert_rq_held(rq); 1537 rq->clock_update_flags |= RQCF_REQ_SKIP; 1538 } 1539 1540 /* 1541 * See rt task throttling, which is the only time a skip 1542 * request is canceled. 1543 */ 1544 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1545 { 1546 lockdep_assert_rq_held(rq); 1547 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1548 } 1549 1550 /* 1551 * During cpu offlining and rq wide unthrottling, we can trigger 1552 * an update_rq_clock() for several cfs and rt runqueues (Typically 1553 * when using list_for_each_entry_*) 1554 * rq_clock_start_loop_update() can be called after updating the clock 1555 * once and before iterating over the list to prevent multiple update. 1556 * After the iterative traversal, we need to call rq_clock_stop_loop_update() 1557 * to clear RQCF_ACT_SKIP of rq->clock_update_flags. 1558 */ 1559 static inline void rq_clock_start_loop_update(struct rq *rq) 1560 { 1561 lockdep_assert_rq_held(rq); 1562 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); 1563 rq->clock_update_flags |= RQCF_ACT_SKIP; 1564 } 1565 1566 static inline void rq_clock_stop_loop_update(struct rq *rq) 1567 { 1568 lockdep_assert_rq_held(rq); 1569 rq->clock_update_flags &= ~RQCF_ACT_SKIP; 1570 } 1571 1572 struct rq_flags { 1573 unsigned long flags; 1574 struct pin_cookie cookie; 1575 #ifdef CONFIG_SCHED_DEBUG 1576 /* 1577 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1578 * current pin context is stashed here in case it needs to be 1579 * restored in rq_repin_lock(). 1580 */ 1581 unsigned int clock_update_flags; 1582 #endif 1583 }; 1584 1585 extern struct balance_callback balance_push_callback; 1586 1587 /* 1588 * Lockdep annotation that avoids accidental unlocks; it's like a 1589 * sticky/continuous lockdep_assert_held(). 1590 * 1591 * This avoids code that has access to 'struct rq *rq' (basically everything in 1592 * the scheduler) from accidentally unlocking the rq if they do not also have a 1593 * copy of the (on-stack) 'struct rq_flags rf'. 1594 * 1595 * Also see Documentation/locking/lockdep-design.rst. 1596 */ 1597 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1598 { 1599 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1600 1601 #ifdef CONFIG_SCHED_DEBUG 1602 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1603 rf->clock_update_flags = 0; 1604 #ifdef CONFIG_SMP 1605 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1606 #endif 1607 #endif 1608 } 1609 1610 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1611 { 1612 #ifdef CONFIG_SCHED_DEBUG 1613 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1614 rf->clock_update_flags = RQCF_UPDATED; 1615 #endif 1616 1617 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1618 } 1619 1620 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1621 { 1622 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1623 1624 #ifdef CONFIG_SCHED_DEBUG 1625 /* 1626 * Restore the value we stashed in @rf for this pin context. 1627 */ 1628 rq->clock_update_flags |= rf->clock_update_flags; 1629 #endif 1630 } 1631 1632 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1633 __acquires(rq->lock); 1634 1635 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1636 __acquires(p->pi_lock) 1637 __acquires(rq->lock); 1638 1639 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1640 __releases(rq->lock) 1641 { 1642 rq_unpin_lock(rq, rf); 1643 raw_spin_rq_unlock(rq); 1644 } 1645 1646 static inline void 1647 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1648 __releases(rq->lock) 1649 __releases(p->pi_lock) 1650 { 1651 rq_unpin_lock(rq, rf); 1652 raw_spin_rq_unlock(rq); 1653 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1654 } 1655 1656 static inline void 1657 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1658 __acquires(rq->lock) 1659 { 1660 raw_spin_rq_lock_irqsave(rq, rf->flags); 1661 rq_pin_lock(rq, rf); 1662 } 1663 1664 static inline void 1665 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1666 __acquires(rq->lock) 1667 { 1668 raw_spin_rq_lock_irq(rq); 1669 rq_pin_lock(rq, rf); 1670 } 1671 1672 static inline void 1673 rq_lock(struct rq *rq, struct rq_flags *rf) 1674 __acquires(rq->lock) 1675 { 1676 raw_spin_rq_lock(rq); 1677 rq_pin_lock(rq, rf); 1678 } 1679 1680 static inline void 1681 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1682 __releases(rq->lock) 1683 { 1684 rq_unpin_lock(rq, rf); 1685 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1686 } 1687 1688 static inline void 1689 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1690 __releases(rq->lock) 1691 { 1692 rq_unpin_lock(rq, rf); 1693 raw_spin_rq_unlock_irq(rq); 1694 } 1695 1696 static inline void 1697 rq_unlock(struct rq *rq, struct rq_flags *rf) 1698 __releases(rq->lock) 1699 { 1700 rq_unpin_lock(rq, rf); 1701 raw_spin_rq_unlock(rq); 1702 } 1703 1704 static inline struct rq * 1705 this_rq_lock_irq(struct rq_flags *rf) 1706 __acquires(rq->lock) 1707 { 1708 struct rq *rq; 1709 1710 local_irq_disable(); 1711 rq = this_rq(); 1712 rq_lock(rq, rf); 1713 return rq; 1714 } 1715 1716 #ifdef CONFIG_NUMA 1717 enum numa_topology_type { 1718 NUMA_DIRECT, 1719 NUMA_GLUELESS_MESH, 1720 NUMA_BACKPLANE, 1721 }; 1722 extern enum numa_topology_type sched_numa_topology_type; 1723 extern int sched_max_numa_distance; 1724 extern bool find_numa_distance(int distance); 1725 extern void sched_init_numa(int offline_node); 1726 extern void sched_update_numa(int cpu, bool online); 1727 extern void sched_domains_numa_masks_set(unsigned int cpu); 1728 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1729 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1730 #else 1731 static inline void sched_init_numa(int offline_node) { } 1732 static inline void sched_update_numa(int cpu, bool online) { } 1733 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1734 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1735 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1736 { 1737 return nr_cpu_ids; 1738 } 1739 #endif 1740 1741 #ifdef CONFIG_NUMA_BALANCING 1742 /* The regions in numa_faults array from task_struct */ 1743 enum numa_faults_stats { 1744 NUMA_MEM = 0, 1745 NUMA_CPU, 1746 NUMA_MEMBUF, 1747 NUMA_CPUBUF 1748 }; 1749 extern void sched_setnuma(struct task_struct *p, int node); 1750 extern int migrate_task_to(struct task_struct *p, int cpu); 1751 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1752 int cpu, int scpu); 1753 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1754 #else 1755 static inline void 1756 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1757 { 1758 } 1759 #endif /* CONFIG_NUMA_BALANCING */ 1760 1761 #ifdef CONFIG_SMP 1762 1763 static inline void 1764 queue_balance_callback(struct rq *rq, 1765 struct balance_callback *head, 1766 void (*func)(struct rq *rq)) 1767 { 1768 lockdep_assert_rq_held(rq); 1769 1770 /* 1771 * Don't (re)queue an already queued item; nor queue anything when 1772 * balance_push() is active, see the comment with 1773 * balance_push_callback. 1774 */ 1775 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1776 return; 1777 1778 head->func = func; 1779 head->next = rq->balance_callback; 1780 rq->balance_callback = head; 1781 } 1782 1783 #define rcu_dereference_check_sched_domain(p) \ 1784 rcu_dereference_check((p), \ 1785 lockdep_is_held(&sched_domains_mutex)) 1786 1787 /* 1788 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1789 * See destroy_sched_domains: call_rcu for details. 1790 * 1791 * The domain tree of any CPU may only be accessed from within 1792 * preempt-disabled sections. 1793 */ 1794 #define for_each_domain(cpu, __sd) \ 1795 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1796 __sd; __sd = __sd->parent) 1797 1798 /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ 1799 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | 1800 static const unsigned int SD_SHARED_CHILD_MASK = 1801 #include <linux/sched/sd_flags.h> 1802 0; 1803 #undef SD_FLAG 1804 1805 /** 1806 * highest_flag_domain - Return highest sched_domain containing flag. 1807 * @cpu: The CPU whose highest level of sched domain is to 1808 * be returned. 1809 * @flag: The flag to check for the highest sched_domain 1810 * for the given CPU. 1811 * 1812 * Returns the highest sched_domain of a CPU which contains @flag. If @flag has 1813 * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. 1814 */ 1815 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1816 { 1817 struct sched_domain *sd, *hsd = NULL; 1818 1819 for_each_domain(cpu, sd) { 1820 if (sd->flags & flag) { 1821 hsd = sd; 1822 continue; 1823 } 1824 1825 /* 1826 * Stop the search if @flag is known to be shared at lower 1827 * levels. It will not be found further up. 1828 */ 1829 if (flag & SD_SHARED_CHILD_MASK) 1830 break; 1831 } 1832 1833 return hsd; 1834 } 1835 1836 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1837 { 1838 struct sched_domain *sd; 1839 1840 for_each_domain(cpu, sd) { 1841 if (sd->flags & flag) 1842 break; 1843 } 1844 1845 return sd; 1846 } 1847 1848 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1849 DECLARE_PER_CPU(int, sd_llc_size); 1850 DECLARE_PER_CPU(int, sd_llc_id); 1851 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1852 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1853 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1854 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1855 extern struct static_key_false sched_asym_cpucapacity; 1856 1857 static __always_inline bool sched_asym_cpucap_active(void) 1858 { 1859 return static_branch_unlikely(&sched_asym_cpucapacity); 1860 } 1861 1862 struct sched_group_capacity { 1863 atomic_t ref; 1864 /* 1865 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1866 * for a single CPU. 1867 */ 1868 unsigned long capacity; 1869 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1870 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1871 unsigned long next_update; 1872 int imbalance; /* XXX unrelated to capacity but shared group state */ 1873 1874 #ifdef CONFIG_SCHED_DEBUG 1875 int id; 1876 #endif 1877 1878 unsigned long cpumask[]; /* Balance mask */ 1879 }; 1880 1881 struct sched_group { 1882 struct sched_group *next; /* Must be a circular list */ 1883 atomic_t ref; 1884 1885 unsigned int group_weight; 1886 struct sched_group_capacity *sgc; 1887 int asym_prefer_cpu; /* CPU of highest priority in group */ 1888 int flags; 1889 1890 /* 1891 * The CPUs this group covers. 1892 * 1893 * NOTE: this field is variable length. (Allocated dynamically 1894 * by attaching extra space to the end of the structure, 1895 * depending on how many CPUs the kernel has booted up with) 1896 */ 1897 unsigned long cpumask[]; 1898 }; 1899 1900 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1901 { 1902 return to_cpumask(sg->cpumask); 1903 } 1904 1905 /* 1906 * See build_balance_mask(). 1907 */ 1908 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1909 { 1910 return to_cpumask(sg->sgc->cpumask); 1911 } 1912 1913 extern int group_balance_cpu(struct sched_group *sg); 1914 1915 #ifdef CONFIG_SCHED_DEBUG 1916 void update_sched_domain_debugfs(void); 1917 void dirty_sched_domain_sysctl(int cpu); 1918 #else 1919 static inline void update_sched_domain_debugfs(void) 1920 { 1921 } 1922 static inline void dirty_sched_domain_sysctl(int cpu) 1923 { 1924 } 1925 #endif 1926 1927 extern int sched_update_scaling(void); 1928 1929 static inline const struct cpumask *task_user_cpus(struct task_struct *p) 1930 { 1931 if (!p->user_cpus_ptr) 1932 return cpu_possible_mask; /* &init_task.cpus_mask */ 1933 return p->user_cpus_ptr; 1934 } 1935 #endif /* CONFIG_SMP */ 1936 1937 #include "stats.h" 1938 1939 #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) 1940 1941 extern void __sched_core_account_forceidle(struct rq *rq); 1942 1943 static inline void sched_core_account_forceidle(struct rq *rq) 1944 { 1945 if (schedstat_enabled()) 1946 __sched_core_account_forceidle(rq); 1947 } 1948 1949 extern void __sched_core_tick(struct rq *rq); 1950 1951 static inline void sched_core_tick(struct rq *rq) 1952 { 1953 if (sched_core_enabled(rq) && schedstat_enabled()) 1954 __sched_core_tick(rq); 1955 } 1956 1957 #else 1958 1959 static inline void sched_core_account_forceidle(struct rq *rq) {} 1960 1961 static inline void sched_core_tick(struct rq *rq) {} 1962 1963 #endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */ 1964 1965 #ifdef CONFIG_CGROUP_SCHED 1966 1967 /* 1968 * Return the group to which this tasks belongs. 1969 * 1970 * We cannot use task_css() and friends because the cgroup subsystem 1971 * changes that value before the cgroup_subsys::attach() method is called, 1972 * therefore we cannot pin it and might observe the wrong value. 1973 * 1974 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1975 * core changes this before calling sched_move_task(). 1976 * 1977 * Instead we use a 'copy' which is updated from sched_move_task() while 1978 * holding both task_struct::pi_lock and rq::lock. 1979 */ 1980 static inline struct task_group *task_group(struct task_struct *p) 1981 { 1982 return p->sched_task_group; 1983 } 1984 1985 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1986 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1987 { 1988 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1989 struct task_group *tg = task_group(p); 1990 #endif 1991 1992 #ifdef CONFIG_FAIR_GROUP_SCHED 1993 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1994 p->se.cfs_rq = tg->cfs_rq[cpu]; 1995 p->se.parent = tg->se[cpu]; 1996 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; 1997 #endif 1998 1999 #ifdef CONFIG_RT_GROUP_SCHED 2000 p->rt.rt_rq = tg->rt_rq[cpu]; 2001 p->rt.parent = tg->rt_se[cpu]; 2002 #endif 2003 } 2004 2005 #else /* CONFIG_CGROUP_SCHED */ 2006 2007 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 2008 static inline struct task_group *task_group(struct task_struct *p) 2009 { 2010 return NULL; 2011 } 2012 2013 #endif /* CONFIG_CGROUP_SCHED */ 2014 2015 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 2016 { 2017 set_task_rq(p, cpu); 2018 #ifdef CONFIG_SMP 2019 /* 2020 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 2021 * successfully executed on another CPU. We must ensure that updates of 2022 * per-task data have been completed by this moment. 2023 */ 2024 smp_wmb(); 2025 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 2026 p->wake_cpu = cpu; 2027 #endif 2028 } 2029 2030 /* 2031 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 2032 */ 2033 #ifdef CONFIG_SCHED_DEBUG 2034 # define const_debug __read_mostly 2035 #else 2036 # define const_debug const 2037 #endif 2038 2039 #define SCHED_FEAT(name, enabled) \ 2040 __SCHED_FEAT_##name , 2041 2042 enum { 2043 #include "features.h" 2044 __SCHED_FEAT_NR, 2045 }; 2046 2047 #undef SCHED_FEAT 2048 2049 #ifdef CONFIG_SCHED_DEBUG 2050 2051 /* 2052 * To support run-time toggling of sched features, all the translation units 2053 * (but core.c) reference the sysctl_sched_features defined in core.c. 2054 */ 2055 extern const_debug unsigned int sysctl_sched_features; 2056 2057 #ifdef CONFIG_JUMP_LABEL 2058 #define SCHED_FEAT(name, enabled) \ 2059 static __always_inline bool static_branch_##name(struct static_key *key) \ 2060 { \ 2061 return static_key_##enabled(key); \ 2062 } 2063 2064 #include "features.h" 2065 #undef SCHED_FEAT 2066 2067 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 2068 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 2069 2070 #else /* !CONFIG_JUMP_LABEL */ 2071 2072 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2073 2074 #endif /* CONFIG_JUMP_LABEL */ 2075 2076 #else /* !SCHED_DEBUG */ 2077 2078 /* 2079 * Each translation unit has its own copy of sysctl_sched_features to allow 2080 * constants propagation at compile time and compiler optimization based on 2081 * features default. 2082 */ 2083 #define SCHED_FEAT(name, enabled) \ 2084 (1UL << __SCHED_FEAT_##name) * enabled | 2085 static const_debug __maybe_unused unsigned int sysctl_sched_features = 2086 #include "features.h" 2087 0; 2088 #undef SCHED_FEAT 2089 2090 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2091 2092 #endif /* SCHED_DEBUG */ 2093 2094 extern struct static_key_false sched_numa_balancing; 2095 extern struct static_key_false sched_schedstats; 2096 2097 static inline u64 global_rt_period(void) 2098 { 2099 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 2100 } 2101 2102 static inline u64 global_rt_runtime(void) 2103 { 2104 if (sysctl_sched_rt_runtime < 0) 2105 return RUNTIME_INF; 2106 2107 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2108 } 2109 2110 static inline int task_current(struct rq *rq, struct task_struct *p) 2111 { 2112 return rq->curr == p; 2113 } 2114 2115 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) 2116 { 2117 #ifdef CONFIG_SMP 2118 return p->on_cpu; 2119 #else 2120 return task_current(rq, p); 2121 #endif 2122 } 2123 2124 static inline int task_on_rq_queued(struct task_struct *p) 2125 { 2126 return p->on_rq == TASK_ON_RQ_QUEUED; 2127 } 2128 2129 static inline int task_on_rq_migrating(struct task_struct *p) 2130 { 2131 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2132 } 2133 2134 /* Wake flags. The first three directly map to some SD flag value */ 2135 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2136 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2137 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2138 2139 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2140 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2141 2142 #ifdef CONFIG_SMP 2143 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2144 static_assert(WF_FORK == SD_BALANCE_FORK); 2145 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2146 #endif 2147 2148 /* 2149 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2150 * of tasks with abnormal "nice" values across CPUs the contribution that 2151 * each task makes to its run queue's load is weighted according to its 2152 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2153 * scaled version of the new time slice allocation that they receive on time 2154 * slice expiry etc. 2155 */ 2156 2157 #define WEIGHT_IDLEPRIO 3 2158 #define WMULT_IDLEPRIO 1431655765 2159 2160 extern const int sched_prio_to_weight[40]; 2161 extern const u32 sched_prio_to_wmult[40]; 2162 2163 /* 2164 * {de,en}queue flags: 2165 * 2166 * DEQUEUE_SLEEP - task is no longer runnable 2167 * ENQUEUE_WAKEUP - task just became runnable 2168 * 2169 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2170 * are in a known state which allows modification. Such pairs 2171 * should preserve as much state as possible. 2172 * 2173 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2174 * in the runqueue. 2175 * 2176 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2177 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2178 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2179 * 2180 */ 2181 2182 #define DEQUEUE_SLEEP 0x01 2183 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2184 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2185 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2186 2187 #define ENQUEUE_WAKEUP 0x01 2188 #define ENQUEUE_RESTORE 0x02 2189 #define ENQUEUE_MOVE 0x04 2190 #define ENQUEUE_NOCLOCK 0x08 2191 2192 #define ENQUEUE_HEAD 0x10 2193 #define ENQUEUE_REPLENISH 0x20 2194 #ifdef CONFIG_SMP 2195 #define ENQUEUE_MIGRATED 0x40 2196 #else 2197 #define ENQUEUE_MIGRATED 0x00 2198 #endif 2199 2200 #define RETRY_TASK ((void *)-1UL) 2201 2202 struct affinity_context { 2203 const struct cpumask *new_mask; 2204 struct cpumask *user_mask; 2205 unsigned int flags; 2206 }; 2207 2208 struct sched_class { 2209 2210 #ifdef CONFIG_UCLAMP_TASK 2211 int uclamp_enabled; 2212 #endif 2213 2214 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2215 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2216 void (*yield_task) (struct rq *rq); 2217 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2218 2219 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 2220 2221 struct task_struct *(*pick_next_task)(struct rq *rq); 2222 2223 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 2224 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2225 2226 #ifdef CONFIG_SMP 2227 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2228 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2229 2230 struct task_struct * (*pick_task)(struct rq *rq); 2231 2232 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2233 2234 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2235 2236 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); 2237 2238 void (*rq_online)(struct rq *rq); 2239 void (*rq_offline)(struct rq *rq); 2240 2241 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2242 #endif 2243 2244 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2245 void (*task_fork)(struct task_struct *p); 2246 void (*task_dead)(struct task_struct *p); 2247 2248 /* 2249 * The switched_from() call is allowed to drop rq->lock, therefore we 2250 * cannot assume the switched_from/switched_to pair is serialized by 2251 * rq->lock. They are however serialized by p->pi_lock. 2252 */ 2253 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2254 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2255 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2256 int oldprio); 2257 2258 unsigned int (*get_rr_interval)(struct rq *rq, 2259 struct task_struct *task); 2260 2261 void (*update_curr)(struct rq *rq); 2262 2263 #ifdef CONFIG_FAIR_GROUP_SCHED 2264 void (*task_change_group)(struct task_struct *p); 2265 #endif 2266 2267 #ifdef CONFIG_SCHED_CORE 2268 int (*task_is_throttled)(struct task_struct *p, int cpu); 2269 #endif 2270 }; 2271 2272 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2273 { 2274 WARN_ON_ONCE(rq->curr != prev); 2275 prev->sched_class->put_prev_task(rq, prev); 2276 } 2277 2278 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2279 { 2280 next->sched_class->set_next_task(rq, next, false); 2281 } 2282 2283 2284 /* 2285 * Helper to define a sched_class instance; each one is placed in a separate 2286 * section which is ordered by the linker script: 2287 * 2288 * include/asm-generic/vmlinux.lds.h 2289 * 2290 * *CAREFUL* they are laid out in *REVERSE* order!!! 2291 * 2292 * Also enforce alignment on the instance, not the type, to guarantee layout. 2293 */ 2294 #define DEFINE_SCHED_CLASS(name) \ 2295 const struct sched_class name##_sched_class \ 2296 __aligned(__alignof__(struct sched_class)) \ 2297 __section("__" #name "_sched_class") 2298 2299 /* Defined in include/asm-generic/vmlinux.lds.h */ 2300 extern struct sched_class __sched_class_highest[]; 2301 extern struct sched_class __sched_class_lowest[]; 2302 2303 #define for_class_range(class, _from, _to) \ 2304 for (class = (_from); class < (_to); class++) 2305 2306 #define for_each_class(class) \ 2307 for_class_range(class, __sched_class_highest, __sched_class_lowest) 2308 2309 #define sched_class_above(_a, _b) ((_a) < (_b)) 2310 2311 extern const struct sched_class stop_sched_class; 2312 extern const struct sched_class dl_sched_class; 2313 extern const struct sched_class rt_sched_class; 2314 extern const struct sched_class fair_sched_class; 2315 extern const struct sched_class idle_sched_class; 2316 2317 static inline bool sched_stop_runnable(struct rq *rq) 2318 { 2319 return rq->stop && task_on_rq_queued(rq->stop); 2320 } 2321 2322 static inline bool sched_dl_runnable(struct rq *rq) 2323 { 2324 return rq->dl.dl_nr_running > 0; 2325 } 2326 2327 static inline bool sched_rt_runnable(struct rq *rq) 2328 { 2329 return rq->rt.rt_queued > 0; 2330 } 2331 2332 static inline bool sched_fair_runnable(struct rq *rq) 2333 { 2334 return rq->cfs.nr_running > 0; 2335 } 2336 2337 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2338 extern struct task_struct *pick_next_task_idle(struct rq *rq); 2339 2340 #define SCA_CHECK 0x01 2341 #define SCA_MIGRATE_DISABLE 0x02 2342 #define SCA_MIGRATE_ENABLE 0x04 2343 #define SCA_USER 0x08 2344 2345 #ifdef CONFIG_SMP 2346 2347 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2348 2349 extern void trigger_load_balance(struct rq *rq); 2350 2351 extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); 2352 2353 static inline struct task_struct *get_push_task(struct rq *rq) 2354 { 2355 struct task_struct *p = rq->curr; 2356 2357 lockdep_assert_rq_held(rq); 2358 2359 if (rq->push_busy) 2360 return NULL; 2361 2362 if (p->nr_cpus_allowed == 1) 2363 return NULL; 2364 2365 if (p->migration_disabled) 2366 return NULL; 2367 2368 rq->push_busy = true; 2369 return get_task_struct(p); 2370 } 2371 2372 extern int push_cpu_stop(void *arg); 2373 2374 #endif 2375 2376 #ifdef CONFIG_CPU_IDLE 2377 static inline void idle_set_state(struct rq *rq, 2378 struct cpuidle_state *idle_state) 2379 { 2380 rq->idle_state = idle_state; 2381 } 2382 2383 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2384 { 2385 SCHED_WARN_ON(!rcu_read_lock_held()); 2386 2387 return rq->idle_state; 2388 } 2389 #else 2390 static inline void idle_set_state(struct rq *rq, 2391 struct cpuidle_state *idle_state) 2392 { 2393 } 2394 2395 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2396 { 2397 return NULL; 2398 } 2399 #endif 2400 2401 extern void schedule_idle(void); 2402 asmlinkage void schedule_user(void); 2403 2404 extern void sysrq_sched_debug_show(void); 2405 extern void sched_init_granularity(void); 2406 extern void update_max_interval(void); 2407 2408 extern void init_sched_dl_class(void); 2409 extern void init_sched_rt_class(void); 2410 extern void init_sched_fair_class(void); 2411 2412 extern void reweight_task(struct task_struct *p, int prio); 2413 2414 extern void resched_curr(struct rq *rq); 2415 extern void resched_cpu(int cpu); 2416 2417 extern struct rt_bandwidth def_rt_bandwidth; 2418 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2419 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 2420 2421 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 2422 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 2423 2424 #define BW_SHIFT 20 2425 #define BW_UNIT (1 << BW_SHIFT) 2426 #define RATIO_SHIFT 8 2427 #define MAX_BW_BITS (64 - BW_SHIFT) 2428 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2429 unsigned long to_ratio(u64 period, u64 runtime); 2430 2431 extern void init_entity_runnable_average(struct sched_entity *se); 2432 extern void post_init_entity_util_avg(struct task_struct *p); 2433 2434 #ifdef CONFIG_NO_HZ_FULL 2435 extern bool sched_can_stop_tick(struct rq *rq); 2436 extern int __init sched_tick_offload_init(void); 2437 2438 /* 2439 * Tick may be needed by tasks in the runqueue depending on their policy and 2440 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2441 * nohz mode if necessary. 2442 */ 2443 static inline void sched_update_tick_dependency(struct rq *rq) 2444 { 2445 int cpu = cpu_of(rq); 2446 2447 if (!tick_nohz_full_cpu(cpu)) 2448 return; 2449 2450 if (sched_can_stop_tick(rq)) 2451 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2452 else 2453 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2454 } 2455 #else 2456 static inline int sched_tick_offload_init(void) { return 0; } 2457 static inline void sched_update_tick_dependency(struct rq *rq) { } 2458 #endif 2459 2460 static inline void add_nr_running(struct rq *rq, unsigned count) 2461 { 2462 unsigned prev_nr = rq->nr_running; 2463 2464 rq->nr_running = prev_nr + count; 2465 if (trace_sched_update_nr_running_tp_enabled()) { 2466 call_trace_sched_update_nr_running(rq, count); 2467 } 2468 2469 #ifdef CONFIG_SMP 2470 if (prev_nr < 2 && rq->nr_running >= 2) { 2471 if (!READ_ONCE(rq->rd->overload)) 2472 WRITE_ONCE(rq->rd->overload, 1); 2473 } 2474 #endif 2475 2476 sched_update_tick_dependency(rq); 2477 } 2478 2479 static inline void sub_nr_running(struct rq *rq, unsigned count) 2480 { 2481 rq->nr_running -= count; 2482 if (trace_sched_update_nr_running_tp_enabled()) { 2483 call_trace_sched_update_nr_running(rq, -count); 2484 } 2485 2486 /* Check if we still need preemption */ 2487 sched_update_tick_dependency(rq); 2488 } 2489 2490 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2491 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2492 2493 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 2494 2495 #ifdef CONFIG_PREEMPT_RT 2496 #define SCHED_NR_MIGRATE_BREAK 8 2497 #else 2498 #define SCHED_NR_MIGRATE_BREAK 32 2499 #endif 2500 2501 extern const_debug unsigned int sysctl_sched_nr_migrate; 2502 extern const_debug unsigned int sysctl_sched_migration_cost; 2503 2504 #ifdef CONFIG_SCHED_DEBUG 2505 extern unsigned int sysctl_sched_latency; 2506 extern unsigned int sysctl_sched_min_granularity; 2507 extern unsigned int sysctl_sched_idle_min_granularity; 2508 extern unsigned int sysctl_sched_wakeup_granularity; 2509 extern int sysctl_resched_latency_warn_ms; 2510 extern int sysctl_resched_latency_warn_once; 2511 2512 extern unsigned int sysctl_sched_tunable_scaling; 2513 2514 extern unsigned int sysctl_numa_balancing_scan_delay; 2515 extern unsigned int sysctl_numa_balancing_scan_period_min; 2516 extern unsigned int sysctl_numa_balancing_scan_period_max; 2517 extern unsigned int sysctl_numa_balancing_scan_size; 2518 extern unsigned int sysctl_numa_balancing_hot_threshold; 2519 #endif 2520 2521 #ifdef CONFIG_SCHED_HRTICK 2522 2523 /* 2524 * Use hrtick when: 2525 * - enabled by features 2526 * - hrtimer is actually high res 2527 */ 2528 static inline int hrtick_enabled(struct rq *rq) 2529 { 2530 if (!cpu_active(cpu_of(rq))) 2531 return 0; 2532 return hrtimer_is_hres_active(&rq->hrtick_timer); 2533 } 2534 2535 static inline int hrtick_enabled_fair(struct rq *rq) 2536 { 2537 if (!sched_feat(HRTICK)) 2538 return 0; 2539 return hrtick_enabled(rq); 2540 } 2541 2542 static inline int hrtick_enabled_dl(struct rq *rq) 2543 { 2544 if (!sched_feat(HRTICK_DL)) 2545 return 0; 2546 return hrtick_enabled(rq); 2547 } 2548 2549 void hrtick_start(struct rq *rq, u64 delay); 2550 2551 #else 2552 2553 static inline int hrtick_enabled_fair(struct rq *rq) 2554 { 2555 return 0; 2556 } 2557 2558 static inline int hrtick_enabled_dl(struct rq *rq) 2559 { 2560 return 0; 2561 } 2562 2563 static inline int hrtick_enabled(struct rq *rq) 2564 { 2565 return 0; 2566 } 2567 2568 #endif /* CONFIG_SCHED_HRTICK */ 2569 2570 #ifndef arch_scale_freq_tick 2571 static __always_inline 2572 void arch_scale_freq_tick(void) 2573 { 2574 } 2575 #endif 2576 2577 #ifndef arch_scale_freq_capacity 2578 /** 2579 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2580 * @cpu: the CPU in question. 2581 * 2582 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2583 * 2584 * f_curr 2585 * ------ * SCHED_CAPACITY_SCALE 2586 * f_max 2587 */ 2588 static __always_inline 2589 unsigned long arch_scale_freq_capacity(int cpu) 2590 { 2591 return SCHED_CAPACITY_SCALE; 2592 } 2593 #endif 2594 2595 #ifdef CONFIG_SCHED_DEBUG 2596 /* 2597 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to 2598 * acquire rq lock instead of rq_lock(). So at the end of these two functions 2599 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of 2600 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. 2601 */ 2602 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) 2603 { 2604 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2605 /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ 2606 #ifdef CONFIG_SMP 2607 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2608 #endif 2609 } 2610 #else 2611 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {} 2612 #endif 2613 2614 #ifdef CONFIG_SMP 2615 2616 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2617 { 2618 #ifdef CONFIG_SCHED_CORE 2619 /* 2620 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2621 * order by core-id first and cpu-id second. 2622 * 2623 * Notably: 2624 * 2625 * double_rq_lock(0,3); will take core-0, core-1 lock 2626 * double_rq_lock(1,2); will take core-1, core-0 lock 2627 * 2628 * when only cpu-id is considered. 2629 */ 2630 if (rq1->core->cpu < rq2->core->cpu) 2631 return true; 2632 if (rq1->core->cpu > rq2->core->cpu) 2633 return false; 2634 2635 /* 2636 * __sched_core_flip() relies on SMT having cpu-id lock order. 2637 */ 2638 #endif 2639 return rq1->cpu < rq2->cpu; 2640 } 2641 2642 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2643 2644 #ifdef CONFIG_PREEMPTION 2645 2646 /* 2647 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2648 * way at the expense of forcing extra atomic operations in all 2649 * invocations. This assures that the double_lock is acquired using the 2650 * same underlying policy as the spinlock_t on this architecture, which 2651 * reduces latency compared to the unfair variant below. However, it 2652 * also adds more overhead and therefore may reduce throughput. 2653 */ 2654 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2655 __releases(this_rq->lock) 2656 __acquires(busiest->lock) 2657 __acquires(this_rq->lock) 2658 { 2659 raw_spin_rq_unlock(this_rq); 2660 double_rq_lock(this_rq, busiest); 2661 2662 return 1; 2663 } 2664 2665 #else 2666 /* 2667 * Unfair double_lock_balance: Optimizes throughput at the expense of 2668 * latency by eliminating extra atomic operations when the locks are 2669 * already in proper order on entry. This favors lower CPU-ids and will 2670 * grant the double lock to lower CPUs over higher ids under contention, 2671 * regardless of entry order into the function. 2672 */ 2673 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2674 __releases(this_rq->lock) 2675 __acquires(busiest->lock) 2676 __acquires(this_rq->lock) 2677 { 2678 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 2679 likely(raw_spin_rq_trylock(busiest))) { 2680 double_rq_clock_clear_update(this_rq, busiest); 2681 return 0; 2682 } 2683 2684 if (rq_order_less(this_rq, busiest)) { 2685 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2686 double_rq_clock_clear_update(this_rq, busiest); 2687 return 0; 2688 } 2689 2690 raw_spin_rq_unlock(this_rq); 2691 double_rq_lock(this_rq, busiest); 2692 2693 return 1; 2694 } 2695 2696 #endif /* CONFIG_PREEMPTION */ 2697 2698 /* 2699 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2700 */ 2701 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2702 { 2703 lockdep_assert_irqs_disabled(); 2704 2705 return _double_lock_balance(this_rq, busiest); 2706 } 2707 2708 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2709 __releases(busiest->lock) 2710 { 2711 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2712 raw_spin_rq_unlock(busiest); 2713 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2714 } 2715 2716 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2717 { 2718 if (l1 > l2) 2719 swap(l1, l2); 2720 2721 spin_lock(l1); 2722 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2723 } 2724 2725 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2726 { 2727 if (l1 > l2) 2728 swap(l1, l2); 2729 2730 spin_lock_irq(l1); 2731 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2732 } 2733 2734 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2735 { 2736 if (l1 > l2) 2737 swap(l1, l2); 2738 2739 raw_spin_lock(l1); 2740 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2741 } 2742 2743 /* 2744 * double_rq_unlock - safely unlock two runqueues 2745 * 2746 * Note this does not restore interrupts like task_rq_unlock, 2747 * you need to do so manually after calling. 2748 */ 2749 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2750 __releases(rq1->lock) 2751 __releases(rq2->lock) 2752 { 2753 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2754 raw_spin_rq_unlock(rq2); 2755 else 2756 __release(rq2->lock); 2757 raw_spin_rq_unlock(rq1); 2758 } 2759 2760 extern void set_rq_online (struct rq *rq); 2761 extern void set_rq_offline(struct rq *rq); 2762 extern bool sched_smp_initialized; 2763 2764 #else /* CONFIG_SMP */ 2765 2766 /* 2767 * double_rq_lock - safely lock two runqueues 2768 * 2769 * Note this does not disable interrupts like task_rq_lock, 2770 * you need to do so manually before calling. 2771 */ 2772 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2773 __acquires(rq1->lock) 2774 __acquires(rq2->lock) 2775 { 2776 WARN_ON_ONCE(!irqs_disabled()); 2777 WARN_ON_ONCE(rq1 != rq2); 2778 raw_spin_rq_lock(rq1); 2779 __acquire(rq2->lock); /* Fake it out ;) */ 2780 double_rq_clock_clear_update(rq1, rq2); 2781 } 2782 2783 /* 2784 * double_rq_unlock - safely unlock two runqueues 2785 * 2786 * Note this does not restore interrupts like task_rq_unlock, 2787 * you need to do so manually after calling. 2788 */ 2789 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2790 __releases(rq1->lock) 2791 __releases(rq2->lock) 2792 { 2793 WARN_ON_ONCE(rq1 != rq2); 2794 raw_spin_rq_unlock(rq1); 2795 __release(rq2->lock); 2796 } 2797 2798 #endif 2799 2800 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2801 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2802 2803 #ifdef CONFIG_SCHED_DEBUG 2804 extern bool sched_debug_verbose; 2805 2806 extern void print_cfs_stats(struct seq_file *m, int cpu); 2807 extern void print_rt_stats(struct seq_file *m, int cpu); 2808 extern void print_dl_stats(struct seq_file *m, int cpu); 2809 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2810 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2811 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2812 2813 extern void resched_latency_warn(int cpu, u64 latency); 2814 #ifdef CONFIG_NUMA_BALANCING 2815 extern void 2816 show_numa_stats(struct task_struct *p, struct seq_file *m); 2817 extern void 2818 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2819 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2820 #endif /* CONFIG_NUMA_BALANCING */ 2821 #else 2822 static inline void resched_latency_warn(int cpu, u64 latency) {} 2823 #endif /* CONFIG_SCHED_DEBUG */ 2824 2825 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2826 extern void init_rt_rq(struct rt_rq *rt_rq); 2827 extern void init_dl_rq(struct dl_rq *dl_rq); 2828 2829 extern void cfs_bandwidth_usage_inc(void); 2830 extern void cfs_bandwidth_usage_dec(void); 2831 2832 #ifdef CONFIG_NO_HZ_COMMON 2833 #define NOHZ_BALANCE_KICK_BIT 0 2834 #define NOHZ_STATS_KICK_BIT 1 2835 #define NOHZ_NEWILB_KICK_BIT 2 2836 #define NOHZ_NEXT_KICK_BIT 3 2837 2838 /* Run rebalance_domains() */ 2839 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2840 /* Update blocked load */ 2841 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2842 /* Update blocked load when entering idle */ 2843 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 2844 /* Update nohz.next_balance */ 2845 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 2846 2847 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 2848 2849 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2850 2851 extern void nohz_balance_exit_idle(struct rq *rq); 2852 #else 2853 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2854 #endif 2855 2856 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 2857 extern void nohz_run_idle_balance(int cpu); 2858 #else 2859 static inline void nohz_run_idle_balance(int cpu) { } 2860 #endif 2861 2862 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2863 struct irqtime { 2864 u64 total; 2865 u64 tick_delta; 2866 u64 irq_start_time; 2867 struct u64_stats_sync sync; 2868 }; 2869 2870 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2871 2872 /* 2873 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2874 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 2875 * and never move forward. 2876 */ 2877 static inline u64 irq_time_read(int cpu) 2878 { 2879 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2880 unsigned int seq; 2881 u64 total; 2882 2883 do { 2884 seq = __u64_stats_fetch_begin(&irqtime->sync); 2885 total = irqtime->total; 2886 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2887 2888 return total; 2889 } 2890 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2891 2892 #ifdef CONFIG_CPU_FREQ 2893 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2894 2895 /** 2896 * cpufreq_update_util - Take a note about CPU utilization changes. 2897 * @rq: Runqueue to carry out the update for. 2898 * @flags: Update reason flags. 2899 * 2900 * This function is called by the scheduler on the CPU whose utilization is 2901 * being updated. 2902 * 2903 * It can only be called from RCU-sched read-side critical sections. 2904 * 2905 * The way cpufreq is currently arranged requires it to evaluate the CPU 2906 * performance state (frequency/voltage) on a regular basis to prevent it from 2907 * being stuck in a completely inadequate performance level for too long. 2908 * That is not guaranteed to happen if the updates are only triggered from CFS 2909 * and DL, though, because they may not be coming in if only RT tasks are 2910 * active all the time (or there are RT tasks only). 2911 * 2912 * As a workaround for that issue, this function is called periodically by the 2913 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2914 * but that really is a band-aid. Going forward it should be replaced with 2915 * solutions targeted more specifically at RT tasks. 2916 */ 2917 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2918 { 2919 struct update_util_data *data; 2920 2921 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2922 cpu_of(rq))); 2923 if (data) 2924 data->func(data, rq_clock(rq), flags); 2925 } 2926 #else 2927 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2928 #endif /* CONFIG_CPU_FREQ */ 2929 2930 #ifdef arch_scale_freq_capacity 2931 # ifndef arch_scale_freq_invariant 2932 # define arch_scale_freq_invariant() true 2933 # endif 2934 #else 2935 # define arch_scale_freq_invariant() false 2936 #endif 2937 2938 #ifdef CONFIG_SMP 2939 static inline unsigned long capacity_orig_of(int cpu) 2940 { 2941 return cpu_rq(cpu)->cpu_capacity_orig; 2942 } 2943 2944 /** 2945 * enum cpu_util_type - CPU utilization type 2946 * @FREQUENCY_UTIL: Utilization used to select frequency 2947 * @ENERGY_UTIL: Utilization used during energy calculation 2948 * 2949 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2950 * need to be aggregated differently depending on the usage made of them. This 2951 * enum is used within effective_cpu_util() to differentiate the types of 2952 * utilization expected by the callers, and adjust the aggregation accordingly. 2953 */ 2954 enum cpu_util_type { 2955 FREQUENCY_UTIL, 2956 ENERGY_UTIL, 2957 }; 2958 2959 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 2960 enum cpu_util_type type, 2961 struct task_struct *p); 2962 2963 /* 2964 * Verify the fitness of task @p to run on @cpu taking into account the 2965 * CPU original capacity and the runtime/deadline ratio of the task. 2966 * 2967 * The function will return true if the original capacity of @cpu is 2968 * greater than or equal to task's deadline density right shifted by 2969 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. 2970 */ 2971 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 2972 { 2973 unsigned long cap = arch_scale_cpu_capacity(cpu); 2974 2975 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); 2976 } 2977 2978 static inline unsigned long cpu_bw_dl(struct rq *rq) 2979 { 2980 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2981 } 2982 2983 static inline unsigned long cpu_util_dl(struct rq *rq) 2984 { 2985 return READ_ONCE(rq->avg_dl.util_avg); 2986 } 2987 2988 2989 extern unsigned long cpu_util_cfs(int cpu); 2990 extern unsigned long cpu_util_cfs_boost(int cpu); 2991 2992 static inline unsigned long cpu_util_rt(struct rq *rq) 2993 { 2994 return READ_ONCE(rq->avg_rt.util_avg); 2995 } 2996 #endif 2997 2998 #ifdef CONFIG_UCLAMP_TASK 2999 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 3000 3001 static inline unsigned long uclamp_rq_get(struct rq *rq, 3002 enum uclamp_id clamp_id) 3003 { 3004 return READ_ONCE(rq->uclamp[clamp_id].value); 3005 } 3006 3007 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3008 unsigned int value) 3009 { 3010 WRITE_ONCE(rq->uclamp[clamp_id].value, value); 3011 } 3012 3013 static inline bool uclamp_rq_is_idle(struct rq *rq) 3014 { 3015 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; 3016 } 3017 3018 /** 3019 * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 3020 * @rq: The rq to clamp against. Must not be NULL. 3021 * @util: The util value to clamp. 3022 * @p: The task to clamp against. Can be NULL if you want to clamp 3023 * against @rq only. 3024 * 3025 * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 3026 * 3027 * If sched_uclamp_used static key is disabled, then just return the util 3028 * without any clamping since uclamp aggregation at the rq level in the fast 3029 * path is disabled, rendering this operation a NOP. 3030 * 3031 * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 3032 * will return the correct effective uclamp value of the task even if the 3033 * static key is disabled. 3034 */ 3035 static __always_inline 3036 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 3037 struct task_struct *p) 3038 { 3039 unsigned long min_util = 0; 3040 unsigned long max_util = 0; 3041 3042 if (!static_branch_likely(&sched_uclamp_used)) 3043 return util; 3044 3045 if (p) { 3046 min_util = uclamp_eff_value(p, UCLAMP_MIN); 3047 max_util = uclamp_eff_value(p, UCLAMP_MAX); 3048 3049 /* 3050 * Ignore last runnable task's max clamp, as this task will 3051 * reset it. Similarly, no need to read the rq's min clamp. 3052 */ 3053 if (uclamp_rq_is_idle(rq)) 3054 goto out; 3055 } 3056 3057 min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); 3058 max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); 3059 out: 3060 /* 3061 * Since CPU's {min,max}_util clamps are MAX aggregated considering 3062 * RUNNABLE tasks with _different_ clamps, we can end up with an 3063 * inversion. Fix it now when the clamps are applied. 3064 */ 3065 if (unlikely(min_util >= max_util)) 3066 return min_util; 3067 3068 return clamp(util, min_util, max_util); 3069 } 3070 3071 /* Is the rq being capped/throttled by uclamp_max? */ 3072 static inline bool uclamp_rq_is_capped(struct rq *rq) 3073 { 3074 unsigned long rq_util; 3075 unsigned long max_util; 3076 3077 if (!static_branch_likely(&sched_uclamp_used)) 3078 return false; 3079 3080 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); 3081 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 3082 3083 return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; 3084 } 3085 3086 /* 3087 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 3088 * by default in the fast path and only gets turned on once userspace performs 3089 * an operation that requires it. 3090 * 3091 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 3092 * hence is active. 3093 */ 3094 static inline bool uclamp_is_used(void) 3095 { 3096 return static_branch_likely(&sched_uclamp_used); 3097 } 3098 #else /* CONFIG_UCLAMP_TASK */ 3099 static inline unsigned long uclamp_eff_value(struct task_struct *p, 3100 enum uclamp_id clamp_id) 3101 { 3102 if (clamp_id == UCLAMP_MIN) 3103 return 0; 3104 3105 return SCHED_CAPACITY_SCALE; 3106 } 3107 3108 static inline 3109 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 3110 struct task_struct *p) 3111 { 3112 return util; 3113 } 3114 3115 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } 3116 3117 static inline bool uclamp_is_used(void) 3118 { 3119 return false; 3120 } 3121 3122 static inline unsigned long uclamp_rq_get(struct rq *rq, 3123 enum uclamp_id clamp_id) 3124 { 3125 if (clamp_id == UCLAMP_MIN) 3126 return 0; 3127 3128 return SCHED_CAPACITY_SCALE; 3129 } 3130 3131 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3132 unsigned int value) 3133 { 3134 } 3135 3136 static inline bool uclamp_rq_is_idle(struct rq *rq) 3137 { 3138 return false; 3139 } 3140 #endif /* CONFIG_UCLAMP_TASK */ 3141 3142 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 3143 static inline unsigned long cpu_util_irq(struct rq *rq) 3144 { 3145 return rq->avg_irq.util_avg; 3146 } 3147 3148 static inline 3149 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3150 { 3151 util *= (max - irq); 3152 util /= max; 3153 3154 return util; 3155 3156 } 3157 #else 3158 static inline unsigned long cpu_util_irq(struct rq *rq) 3159 { 3160 return 0; 3161 } 3162 3163 static inline 3164 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3165 { 3166 return util; 3167 } 3168 #endif 3169 3170 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 3171 3172 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 3173 3174 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 3175 3176 static inline bool sched_energy_enabled(void) 3177 { 3178 return static_branch_unlikely(&sched_energy_present); 3179 } 3180 3181 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 3182 3183 #define perf_domain_span(pd) NULL 3184 static inline bool sched_energy_enabled(void) { return false; } 3185 3186 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 3187 3188 #ifdef CONFIG_MEMBARRIER 3189 /* 3190 * The scheduler provides memory barriers required by membarrier between: 3191 * - prior user-space memory accesses and store to rq->membarrier_state, 3192 * - store to rq->membarrier_state and following user-space memory accesses. 3193 * In the same way it provides those guarantees around store to rq->curr. 3194 */ 3195 static inline void membarrier_switch_mm(struct rq *rq, 3196 struct mm_struct *prev_mm, 3197 struct mm_struct *next_mm) 3198 { 3199 int membarrier_state; 3200 3201 if (prev_mm == next_mm) 3202 return; 3203 3204 membarrier_state = atomic_read(&next_mm->membarrier_state); 3205 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3206 return; 3207 3208 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3209 } 3210 #else 3211 static inline void membarrier_switch_mm(struct rq *rq, 3212 struct mm_struct *prev_mm, 3213 struct mm_struct *next_mm) 3214 { 3215 } 3216 #endif 3217 3218 #ifdef CONFIG_SMP 3219 static inline bool is_per_cpu_kthread(struct task_struct *p) 3220 { 3221 if (!(p->flags & PF_KTHREAD)) 3222 return false; 3223 3224 if (p->nr_cpus_allowed != 1) 3225 return false; 3226 3227 return true; 3228 } 3229 #endif 3230 3231 extern void swake_up_all_locked(struct swait_queue_head *q); 3232 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3233 3234 #ifdef CONFIG_PREEMPT_DYNAMIC 3235 extern int preempt_dynamic_mode; 3236 extern int sched_dynamic_mode(const char *str); 3237 extern void sched_dynamic_update(int mode); 3238 #endif 3239 3240 static inline void update_current_exec_runtime(struct task_struct *curr, 3241 u64 now, u64 delta_exec) 3242 { 3243 curr->se.sum_exec_runtime += delta_exec; 3244 account_group_exec_runtime(curr, delta_exec); 3245 3246 curr->se.exec_start = now; 3247 cgroup_account_cputime(curr, delta_exec); 3248 } 3249 3250 #ifdef CONFIG_SCHED_MM_CID 3251 3252 #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ 3253 #define MM_CID_SCAN_DELAY 100 /* 100ms */ 3254 3255 extern raw_spinlock_t cid_lock; 3256 extern int use_cid_lock; 3257 3258 extern void sched_mm_cid_migrate_from(struct task_struct *t); 3259 extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t); 3260 extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr); 3261 extern void init_sched_mm_cid(struct task_struct *t); 3262 3263 static inline void __mm_cid_put(struct mm_struct *mm, int cid) 3264 { 3265 if (cid < 0) 3266 return; 3267 cpumask_clear_cpu(cid, mm_cidmask(mm)); 3268 } 3269 3270 /* 3271 * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to 3272 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to 3273 * be held to transition to other states. 3274 * 3275 * State transitions synchronized with cmpxchg or try_cmpxchg need to be 3276 * consistent across cpus, which prevents use of this_cpu_cmpxchg. 3277 */ 3278 static inline void mm_cid_put_lazy(struct task_struct *t) 3279 { 3280 struct mm_struct *mm = t->mm; 3281 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3282 int cid; 3283 3284 lockdep_assert_irqs_disabled(); 3285 cid = __this_cpu_read(pcpu_cid->cid); 3286 if (!mm_cid_is_lazy_put(cid) || 3287 !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3288 return; 3289 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3290 } 3291 3292 static inline int mm_cid_pcpu_unset(struct mm_struct *mm) 3293 { 3294 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3295 int cid, res; 3296 3297 lockdep_assert_irqs_disabled(); 3298 cid = __this_cpu_read(pcpu_cid->cid); 3299 for (;;) { 3300 if (mm_cid_is_unset(cid)) 3301 return MM_CID_UNSET; 3302 /* 3303 * Attempt transition from valid or lazy-put to unset. 3304 */ 3305 res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); 3306 if (res == cid) 3307 break; 3308 cid = res; 3309 } 3310 return cid; 3311 } 3312 3313 static inline void mm_cid_put(struct mm_struct *mm) 3314 { 3315 int cid; 3316 3317 lockdep_assert_irqs_disabled(); 3318 cid = mm_cid_pcpu_unset(mm); 3319 if (cid == MM_CID_UNSET) 3320 return; 3321 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3322 } 3323 3324 static inline int __mm_cid_try_get(struct mm_struct *mm) 3325 { 3326 struct cpumask *cpumask; 3327 int cid; 3328 3329 cpumask = mm_cidmask(mm); 3330 /* 3331 * Retry finding first zero bit if the mask is temporarily 3332 * filled. This only happens during concurrent remote-clear 3333 * which owns a cid without holding a rq lock. 3334 */ 3335 for (;;) { 3336 cid = cpumask_first_zero(cpumask); 3337 if (cid < nr_cpu_ids) 3338 break; 3339 cpu_relax(); 3340 } 3341 if (cpumask_test_and_set_cpu(cid, cpumask)) 3342 return -1; 3343 return cid; 3344 } 3345 3346 /* 3347 * Save a snapshot of the current runqueue time of this cpu 3348 * with the per-cpu cid value, allowing to estimate how recently it was used. 3349 */ 3350 static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) 3351 { 3352 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); 3353 3354 lockdep_assert_rq_held(rq); 3355 WRITE_ONCE(pcpu_cid->time, rq->clock); 3356 } 3357 3358 static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) 3359 { 3360 int cid; 3361 3362 /* 3363 * All allocations (even those using the cid_lock) are lock-free. If 3364 * use_cid_lock is set, hold the cid_lock to perform cid allocation to 3365 * guarantee forward progress. 3366 */ 3367 if (!READ_ONCE(use_cid_lock)) { 3368 cid = __mm_cid_try_get(mm); 3369 if (cid >= 0) 3370 goto end; 3371 raw_spin_lock(&cid_lock); 3372 } else { 3373 raw_spin_lock(&cid_lock); 3374 cid = __mm_cid_try_get(mm); 3375 if (cid >= 0) 3376 goto unlock; 3377 } 3378 3379 /* 3380 * cid concurrently allocated. Retry while forcing following 3381 * allocations to use the cid_lock to ensure forward progress. 3382 */ 3383 WRITE_ONCE(use_cid_lock, 1); 3384 /* 3385 * Set use_cid_lock before allocation. Only care about program order 3386 * because this is only required for forward progress. 3387 */ 3388 barrier(); 3389 /* 3390 * Retry until it succeeds. It is guaranteed to eventually succeed once 3391 * all newcoming allocations observe the use_cid_lock flag set. 3392 */ 3393 do { 3394 cid = __mm_cid_try_get(mm); 3395 cpu_relax(); 3396 } while (cid < 0); 3397 /* 3398 * Allocate before clearing use_cid_lock. Only care about 3399 * program order because this is for forward progress. 3400 */ 3401 barrier(); 3402 WRITE_ONCE(use_cid_lock, 0); 3403 unlock: 3404 raw_spin_unlock(&cid_lock); 3405 end: 3406 mm_cid_snapshot_time(rq, mm); 3407 return cid; 3408 } 3409 3410 static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) 3411 { 3412 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3413 struct cpumask *cpumask; 3414 int cid; 3415 3416 lockdep_assert_rq_held(rq); 3417 cpumask = mm_cidmask(mm); 3418 cid = __this_cpu_read(pcpu_cid->cid); 3419 if (mm_cid_is_valid(cid)) { 3420 mm_cid_snapshot_time(rq, mm); 3421 return cid; 3422 } 3423 if (mm_cid_is_lazy_put(cid)) { 3424 if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3425 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3426 } 3427 cid = __mm_cid_get(rq, mm); 3428 __this_cpu_write(pcpu_cid->cid, cid); 3429 return cid; 3430 } 3431 3432 static inline void switch_mm_cid(struct rq *rq, 3433 struct task_struct *prev, 3434 struct task_struct *next) 3435 { 3436 /* 3437 * Provide a memory barrier between rq->curr store and load of 3438 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. 3439 * 3440 * Should be adapted if context_switch() is modified. 3441 */ 3442 if (!next->mm) { // to kernel 3443 /* 3444 * user -> kernel transition does not guarantee a barrier, but 3445 * we can use the fact that it performs an atomic operation in 3446 * mmgrab(). 3447 */ 3448 if (prev->mm) // from user 3449 smp_mb__after_mmgrab(); 3450 /* 3451 * kernel -> kernel transition does not change rq->curr->mm 3452 * state. It stays NULL. 3453 */ 3454 } else { // to user 3455 /* 3456 * kernel -> user transition does not provide a barrier 3457 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. 3458 * Provide it here. 3459 */ 3460 if (!prev->mm) // from kernel 3461 smp_mb(); 3462 /* 3463 * user -> user transition guarantees a memory barrier through 3464 * switch_mm() when current->mm changes. If current->mm is 3465 * unchanged, no barrier is needed. 3466 */ 3467 } 3468 if (prev->mm_cid_active) { 3469 mm_cid_snapshot_time(rq, prev->mm); 3470 mm_cid_put_lazy(prev); 3471 prev->mm_cid = -1; 3472 } 3473 if (next->mm_cid_active) 3474 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); 3475 } 3476 3477 #else 3478 static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { } 3479 static inline void sched_mm_cid_migrate_from(struct task_struct *t) { } 3480 static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } 3481 static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } 3482 static inline void init_sched_mm_cid(struct task_struct *t) { } 3483 #endif 3484 3485 #endif /* _KERNEL_SCHED_SCHED_H */ 3486