1 2 #include <linux/sched.h> 3 #include <linux/sched/autogroup.h> 4 #include <linux/sched/sysctl.h> 5 #include <linux/sched/topology.h> 6 #include <linux/sched/rt.h> 7 #include <linux/sched/deadline.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/wake_q.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/numa_balancing.h> 12 #include <linux/sched/mm.h> 13 #include <linux/sched/cpufreq.h> 14 #include <linux/sched/stat.h> 15 #include <linux/sched/nohz.h> 16 #include <linux/sched/debug.h> 17 #include <linux/sched/hotplug.h> 18 #include <linux/sched/task.h> 19 #include <linux/sched/task_stack.h> 20 #include <linux/sched/cputime.h> 21 #include <linux/sched/init.h> 22 23 #include <linux/u64_stats_sync.h> 24 #include <linux/kernel_stat.h> 25 #include <linux/binfmts.h> 26 #include <linux/mutex.h> 27 #include <linux/spinlock.h> 28 #include <linux/stop_machine.h> 29 #include <linux/irq_work.h> 30 #include <linux/tick.h> 31 #include <linux/slab.h> 32 33 #ifdef CONFIG_PARAVIRT 34 #include <asm/paravirt.h> 35 #endif 36 37 #include "cpupri.h" 38 #include "cpudeadline.h" 39 #include "cpuacct.h" 40 41 #ifdef CONFIG_SCHED_DEBUG 42 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 43 #else 44 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 45 #endif 46 47 struct rq; 48 struct cpuidle_state; 49 50 /* task_struct::on_rq states: */ 51 #define TASK_ON_RQ_QUEUED 1 52 #define TASK_ON_RQ_MIGRATING 2 53 54 extern __read_mostly int scheduler_running; 55 56 extern unsigned long calc_load_update; 57 extern atomic_long_t calc_load_tasks; 58 59 extern void calc_global_load_tick(struct rq *this_rq); 60 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 61 62 #ifdef CONFIG_SMP 63 extern void cpu_load_update_active(struct rq *this_rq); 64 #else 65 static inline void cpu_load_update_active(struct rq *this_rq) { } 66 #endif 67 68 /* 69 * Helpers for converting nanosecond timing to jiffy resolution 70 */ 71 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 72 73 /* 74 * Increase resolution of nice-level calculations for 64-bit architectures. 75 * The extra resolution improves shares distribution and load balancing of 76 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 77 * hierarchies, especially on larger systems. This is not a user-visible change 78 * and does not change the user-interface for setting shares/weights. 79 * 80 * We increase resolution only if we have enough bits to allow this increased 81 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are 82 * pretty high and the returns do not justify the increased costs. 83 * 84 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to 85 * increase coverage and consistency always enable it on 64bit platforms. 86 */ 87 #ifdef CONFIG_64BIT 88 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 89 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 90 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 91 #else 92 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 93 # define scale_load(w) (w) 94 # define scale_load_down(w) (w) 95 #endif 96 97 /* 98 * Task weight (visible to users) and its load (invisible to users) have 99 * independent resolution, but they should be well calibrated. We use 100 * scale_load() and scale_load_down(w) to convert between them. The 101 * following must be true: 102 * 103 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 104 * 105 */ 106 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 107 108 /* 109 * Single value that decides SCHED_DEADLINE internal math precision. 110 * 10 -> just above 1us 111 * 9 -> just above 0.5us 112 */ 113 #define DL_SCALE (10) 114 115 /* 116 * These are the 'tuning knobs' of the scheduler: 117 */ 118 119 /* 120 * single value that denotes runtime == period, ie unlimited time. 121 */ 122 #define RUNTIME_INF ((u64)~0ULL) 123 124 static inline int idle_policy(int policy) 125 { 126 return policy == SCHED_IDLE; 127 } 128 static inline int fair_policy(int policy) 129 { 130 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 131 } 132 133 static inline int rt_policy(int policy) 134 { 135 return policy == SCHED_FIFO || policy == SCHED_RR; 136 } 137 138 static inline int dl_policy(int policy) 139 { 140 return policy == SCHED_DEADLINE; 141 } 142 static inline bool valid_policy(int policy) 143 { 144 return idle_policy(policy) || fair_policy(policy) || 145 rt_policy(policy) || dl_policy(policy); 146 } 147 148 static inline int task_has_rt_policy(struct task_struct *p) 149 { 150 return rt_policy(p->policy); 151 } 152 153 static inline int task_has_dl_policy(struct task_struct *p) 154 { 155 return dl_policy(p->policy); 156 } 157 158 /* 159 * Tells if entity @a should preempt entity @b. 160 */ 161 static inline bool 162 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 163 { 164 return dl_time_before(a->deadline, b->deadline); 165 } 166 167 /* 168 * This is the priority-queue data structure of the RT scheduling class: 169 */ 170 struct rt_prio_array { 171 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 172 struct list_head queue[MAX_RT_PRIO]; 173 }; 174 175 struct rt_bandwidth { 176 /* nests inside the rq lock: */ 177 raw_spinlock_t rt_runtime_lock; 178 ktime_t rt_period; 179 u64 rt_runtime; 180 struct hrtimer rt_period_timer; 181 unsigned int rt_period_active; 182 }; 183 184 void __dl_clear_params(struct task_struct *p); 185 186 /* 187 * To keep the bandwidth of -deadline tasks and groups under control 188 * we need some place where: 189 * - store the maximum -deadline bandwidth of the system (the group); 190 * - cache the fraction of that bandwidth that is currently allocated. 191 * 192 * This is all done in the data structure below. It is similar to the 193 * one used for RT-throttling (rt_bandwidth), with the main difference 194 * that, since here we are only interested in admission control, we 195 * do not decrease any runtime while the group "executes", neither we 196 * need a timer to replenish it. 197 * 198 * With respect to SMP, the bandwidth is given on a per-CPU basis, 199 * meaning that: 200 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 201 * - dl_total_bw array contains, in the i-eth element, the currently 202 * allocated bandwidth on the i-eth CPU. 203 * Moreover, groups consume bandwidth on each CPU, while tasks only 204 * consume bandwidth on the CPU they're running on. 205 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 206 * that will be shown the next time the proc or cgroup controls will 207 * be red. It on its turn can be changed by writing on its own 208 * control. 209 */ 210 struct dl_bandwidth { 211 raw_spinlock_t dl_runtime_lock; 212 u64 dl_runtime; 213 u64 dl_period; 214 }; 215 216 static inline int dl_bandwidth_enabled(void) 217 { 218 return sysctl_sched_rt_runtime >= 0; 219 } 220 221 struct dl_bw { 222 raw_spinlock_t lock; 223 u64 bw, total_bw; 224 }; 225 226 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 227 228 static inline 229 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 230 { 231 dl_b->total_bw -= tsk_bw; 232 __dl_update(dl_b, (s32)tsk_bw / cpus); 233 } 234 235 static inline 236 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 237 { 238 dl_b->total_bw += tsk_bw; 239 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 240 } 241 242 static inline 243 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 244 { 245 return dl_b->bw != -1 && 246 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 247 } 248 249 void dl_change_utilization(struct task_struct *p, u64 new_bw); 250 extern void init_dl_bw(struct dl_bw *dl_b); 251 extern int sched_dl_global_validate(void); 252 extern void sched_dl_do_global(void); 253 extern int sched_dl_overflow(struct task_struct *p, int policy, 254 const struct sched_attr *attr); 255 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 256 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 257 extern bool __checkparam_dl(const struct sched_attr *attr); 258 extern void __dl_clear_params(struct task_struct *p); 259 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 260 extern int dl_task_can_attach(struct task_struct *p, 261 const struct cpumask *cs_cpus_allowed); 262 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 263 const struct cpumask *trial); 264 extern bool dl_cpu_busy(unsigned int cpu); 265 266 #ifdef CONFIG_CGROUP_SCHED 267 268 #include <linux/cgroup.h> 269 270 struct cfs_rq; 271 struct rt_rq; 272 273 extern struct list_head task_groups; 274 275 struct cfs_bandwidth { 276 #ifdef CONFIG_CFS_BANDWIDTH 277 raw_spinlock_t lock; 278 ktime_t period; 279 u64 quota, runtime; 280 s64 hierarchical_quota; 281 u64 runtime_expires; 282 283 int idle, period_active; 284 struct hrtimer period_timer, slack_timer; 285 struct list_head throttled_cfs_rq; 286 287 /* statistics */ 288 int nr_periods, nr_throttled; 289 u64 throttled_time; 290 #endif 291 }; 292 293 /* task group related information */ 294 struct task_group { 295 struct cgroup_subsys_state css; 296 297 #ifdef CONFIG_FAIR_GROUP_SCHED 298 /* schedulable entities of this group on each cpu */ 299 struct sched_entity **se; 300 /* runqueue "owned" by this group on each cpu */ 301 struct cfs_rq **cfs_rq; 302 unsigned long shares; 303 304 #ifdef CONFIG_SMP 305 /* 306 * load_avg can be heavily contended at clock tick time, so put 307 * it in its own cacheline separated from the fields above which 308 * will also be accessed at each tick. 309 */ 310 atomic_long_t load_avg ____cacheline_aligned; 311 #endif 312 #endif 313 314 #ifdef CONFIG_RT_GROUP_SCHED 315 struct sched_rt_entity **rt_se; 316 struct rt_rq **rt_rq; 317 318 struct rt_bandwidth rt_bandwidth; 319 #endif 320 321 struct rcu_head rcu; 322 struct list_head list; 323 324 struct task_group *parent; 325 struct list_head siblings; 326 struct list_head children; 327 328 #ifdef CONFIG_SCHED_AUTOGROUP 329 struct autogroup *autogroup; 330 #endif 331 332 struct cfs_bandwidth cfs_bandwidth; 333 }; 334 335 #ifdef CONFIG_FAIR_GROUP_SCHED 336 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 337 338 /* 339 * A weight of 0 or 1 can cause arithmetics problems. 340 * A weight of a cfs_rq is the sum of weights of which entities 341 * are queued on this cfs_rq, so a weight of a entity should not be 342 * too large, so as the shares value of a task group. 343 * (The default weight is 1024 - so there's no practical 344 * limitation from this.) 345 */ 346 #define MIN_SHARES (1UL << 1) 347 #define MAX_SHARES (1UL << 18) 348 #endif 349 350 typedef int (*tg_visitor)(struct task_group *, void *); 351 352 extern int walk_tg_tree_from(struct task_group *from, 353 tg_visitor down, tg_visitor up, void *data); 354 355 /* 356 * Iterate the full tree, calling @down when first entering a node and @up when 357 * leaving it for the final time. 358 * 359 * Caller must hold rcu_lock or sufficient equivalent. 360 */ 361 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 362 { 363 return walk_tg_tree_from(&root_task_group, down, up, data); 364 } 365 366 extern int tg_nop(struct task_group *tg, void *data); 367 368 extern void free_fair_sched_group(struct task_group *tg); 369 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 370 extern void online_fair_sched_group(struct task_group *tg); 371 extern void unregister_fair_sched_group(struct task_group *tg); 372 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 373 struct sched_entity *se, int cpu, 374 struct sched_entity *parent); 375 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 376 377 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 378 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 379 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 380 381 extern void free_rt_sched_group(struct task_group *tg); 382 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 383 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 384 struct sched_rt_entity *rt_se, int cpu, 385 struct sched_rt_entity *parent); 386 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 387 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 388 extern long sched_group_rt_runtime(struct task_group *tg); 389 extern long sched_group_rt_period(struct task_group *tg); 390 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 391 392 extern struct task_group *sched_create_group(struct task_group *parent); 393 extern void sched_online_group(struct task_group *tg, 394 struct task_group *parent); 395 extern void sched_destroy_group(struct task_group *tg); 396 extern void sched_offline_group(struct task_group *tg); 397 398 extern void sched_move_task(struct task_struct *tsk); 399 400 #ifdef CONFIG_FAIR_GROUP_SCHED 401 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 402 403 #ifdef CONFIG_SMP 404 extern void set_task_rq_fair(struct sched_entity *se, 405 struct cfs_rq *prev, struct cfs_rq *next); 406 #else /* !CONFIG_SMP */ 407 static inline void set_task_rq_fair(struct sched_entity *se, 408 struct cfs_rq *prev, struct cfs_rq *next) { } 409 #endif /* CONFIG_SMP */ 410 #endif /* CONFIG_FAIR_GROUP_SCHED */ 411 412 #else /* CONFIG_CGROUP_SCHED */ 413 414 struct cfs_bandwidth { }; 415 416 #endif /* CONFIG_CGROUP_SCHED */ 417 418 /* CFS-related fields in a runqueue */ 419 struct cfs_rq { 420 struct load_weight load; 421 unsigned int nr_running, h_nr_running; 422 423 u64 exec_clock; 424 u64 min_vruntime; 425 #ifndef CONFIG_64BIT 426 u64 min_vruntime_copy; 427 #endif 428 429 struct rb_root_cached tasks_timeline; 430 431 /* 432 * 'curr' points to currently running entity on this cfs_rq. 433 * It is set to NULL otherwise (i.e when none are currently running). 434 */ 435 struct sched_entity *curr, *next, *last, *skip; 436 437 #ifdef CONFIG_SCHED_DEBUG 438 unsigned int nr_spread_over; 439 #endif 440 441 #ifdef CONFIG_SMP 442 /* 443 * CFS load tracking 444 */ 445 struct sched_avg avg; 446 u64 runnable_load_sum; 447 unsigned long runnable_load_avg; 448 #ifdef CONFIG_FAIR_GROUP_SCHED 449 unsigned long tg_load_avg_contrib; 450 unsigned long propagate_avg; 451 #endif 452 atomic_long_t removed_load_avg, removed_util_avg; 453 #ifndef CONFIG_64BIT 454 u64 load_last_update_time_copy; 455 #endif 456 457 #ifdef CONFIG_FAIR_GROUP_SCHED 458 /* 459 * h_load = weight * f(tg) 460 * 461 * Where f(tg) is the recursive weight fraction assigned to 462 * this group. 463 */ 464 unsigned long h_load; 465 u64 last_h_load_update; 466 struct sched_entity *h_load_next; 467 #endif /* CONFIG_FAIR_GROUP_SCHED */ 468 #endif /* CONFIG_SMP */ 469 470 #ifdef CONFIG_FAIR_GROUP_SCHED 471 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 472 473 /* 474 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 475 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 476 * (like users, containers etc.) 477 * 478 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 479 * list is used during load balance. 480 */ 481 int on_list; 482 struct list_head leaf_cfs_rq_list; 483 struct task_group *tg; /* group that "owns" this runqueue */ 484 485 #ifdef CONFIG_CFS_BANDWIDTH 486 int runtime_enabled; 487 u64 runtime_expires; 488 s64 runtime_remaining; 489 490 u64 throttled_clock, throttled_clock_task; 491 u64 throttled_clock_task_time; 492 int throttled, throttle_count; 493 struct list_head throttled_list; 494 #endif /* CONFIG_CFS_BANDWIDTH */ 495 #endif /* CONFIG_FAIR_GROUP_SCHED */ 496 }; 497 498 static inline int rt_bandwidth_enabled(void) 499 { 500 return sysctl_sched_rt_runtime >= 0; 501 } 502 503 /* RT IPI pull logic requires IRQ_WORK */ 504 #ifdef CONFIG_IRQ_WORK 505 # define HAVE_RT_PUSH_IPI 506 #endif 507 508 /* Real-Time classes' related field in a runqueue: */ 509 struct rt_rq { 510 struct rt_prio_array active; 511 unsigned int rt_nr_running; 512 unsigned int rr_nr_running; 513 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 514 struct { 515 int curr; /* highest queued rt task prio */ 516 #ifdef CONFIG_SMP 517 int next; /* next highest */ 518 #endif 519 } highest_prio; 520 #endif 521 #ifdef CONFIG_SMP 522 unsigned long rt_nr_migratory; 523 unsigned long rt_nr_total; 524 int overloaded; 525 struct plist_head pushable_tasks; 526 #ifdef HAVE_RT_PUSH_IPI 527 int push_flags; 528 int push_cpu; 529 struct irq_work push_work; 530 raw_spinlock_t push_lock; 531 #endif 532 #endif /* CONFIG_SMP */ 533 int rt_queued; 534 535 int rt_throttled; 536 u64 rt_time; 537 u64 rt_runtime; 538 /* Nests inside the rq lock: */ 539 raw_spinlock_t rt_runtime_lock; 540 541 #ifdef CONFIG_RT_GROUP_SCHED 542 unsigned long rt_nr_boosted; 543 544 struct rq *rq; 545 struct task_group *tg; 546 #endif 547 }; 548 549 /* Deadline class' related fields in a runqueue */ 550 struct dl_rq { 551 /* runqueue is an rbtree, ordered by deadline */ 552 struct rb_root_cached root; 553 554 unsigned long dl_nr_running; 555 556 #ifdef CONFIG_SMP 557 /* 558 * Deadline values of the currently executing and the 559 * earliest ready task on this rq. Caching these facilitates 560 * the decision wether or not a ready but not running task 561 * should migrate somewhere else. 562 */ 563 struct { 564 u64 curr; 565 u64 next; 566 } earliest_dl; 567 568 unsigned long dl_nr_migratory; 569 int overloaded; 570 571 /* 572 * Tasks on this rq that can be pushed away. They are kept in 573 * an rb-tree, ordered by tasks' deadlines, with caching 574 * of the leftmost (earliest deadline) element. 575 */ 576 struct rb_root_cached pushable_dl_tasks_root; 577 #else 578 struct dl_bw dl_bw; 579 #endif 580 /* 581 * "Active utilization" for this runqueue: increased when a 582 * task wakes up (becomes TASK_RUNNING) and decreased when a 583 * task blocks 584 */ 585 u64 running_bw; 586 587 /* 588 * Utilization of the tasks "assigned" to this runqueue (including 589 * the tasks that are in runqueue and the tasks that executed on this 590 * CPU and blocked). Increased when a task moves to this runqueue, and 591 * decreased when the task moves away (migrates, changes scheduling 592 * policy, or terminates). 593 * This is needed to compute the "inactive utilization" for the 594 * runqueue (inactive utilization = this_bw - running_bw). 595 */ 596 u64 this_bw; 597 u64 extra_bw; 598 599 /* 600 * Inverse of the fraction of CPU utilization that can be reclaimed 601 * by the GRUB algorithm. 602 */ 603 u64 bw_ratio; 604 }; 605 606 #ifdef CONFIG_SMP 607 608 static inline bool sched_asym_prefer(int a, int b) 609 { 610 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 611 } 612 613 /* 614 * We add the notion of a root-domain which will be used to define per-domain 615 * variables. Each exclusive cpuset essentially defines an island domain by 616 * fully partitioning the member cpus from any other cpuset. Whenever a new 617 * exclusive cpuset is created, we also create and attach a new root-domain 618 * object. 619 * 620 */ 621 struct root_domain { 622 atomic_t refcount; 623 atomic_t rto_count; 624 struct rcu_head rcu; 625 cpumask_var_t span; 626 cpumask_var_t online; 627 628 /* Indicate more than one runnable task for any CPU */ 629 bool overload; 630 631 /* 632 * The bit corresponding to a CPU gets set here if such CPU has more 633 * than one runnable -deadline task (as it is below for RT tasks). 634 */ 635 cpumask_var_t dlo_mask; 636 atomic_t dlo_count; 637 struct dl_bw dl_bw; 638 struct cpudl cpudl; 639 640 /* 641 * The "RT overload" flag: it gets set if a CPU has more than 642 * one runnable RT task. 643 */ 644 cpumask_var_t rto_mask; 645 struct cpupri cpupri; 646 647 unsigned long max_cpu_capacity; 648 }; 649 650 extern struct root_domain def_root_domain; 651 extern struct mutex sched_domains_mutex; 652 653 extern void init_defrootdomain(void); 654 extern int sched_init_domains(const struct cpumask *cpu_map); 655 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 656 657 #endif /* CONFIG_SMP */ 658 659 /* 660 * This is the main, per-CPU runqueue data structure. 661 * 662 * Locking rule: those places that want to lock multiple runqueues 663 * (such as the load balancing or the thread migration code), lock 664 * acquire operations must be ordered by ascending &runqueue. 665 */ 666 struct rq { 667 /* runqueue lock: */ 668 raw_spinlock_t lock; 669 670 /* 671 * nr_running and cpu_load should be in the same cacheline because 672 * remote CPUs use both these fields when doing load calculation. 673 */ 674 unsigned int nr_running; 675 #ifdef CONFIG_NUMA_BALANCING 676 unsigned int nr_numa_running; 677 unsigned int nr_preferred_running; 678 #endif 679 #define CPU_LOAD_IDX_MAX 5 680 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 681 #ifdef CONFIG_NO_HZ_COMMON 682 #ifdef CONFIG_SMP 683 unsigned long last_load_update_tick; 684 #endif /* CONFIG_SMP */ 685 unsigned long nohz_flags; 686 #endif /* CONFIG_NO_HZ_COMMON */ 687 #ifdef CONFIG_NO_HZ_FULL 688 unsigned long last_sched_tick; 689 #endif 690 /* capture load from *all* tasks on this cpu: */ 691 struct load_weight load; 692 unsigned long nr_load_updates; 693 u64 nr_switches; 694 695 struct cfs_rq cfs; 696 struct rt_rq rt; 697 struct dl_rq dl; 698 699 #ifdef CONFIG_FAIR_GROUP_SCHED 700 /* list of leaf cfs_rq on this cpu: */ 701 struct list_head leaf_cfs_rq_list; 702 struct list_head *tmp_alone_branch; 703 #endif /* CONFIG_FAIR_GROUP_SCHED */ 704 705 /* 706 * This is part of a global counter where only the total sum 707 * over all CPUs matters. A task can increase this counter on 708 * one CPU and if it got migrated afterwards it may decrease 709 * it on another CPU. Always updated under the runqueue lock: 710 */ 711 unsigned long nr_uninterruptible; 712 713 struct task_struct *curr, *idle, *stop; 714 unsigned long next_balance; 715 struct mm_struct *prev_mm; 716 717 unsigned int clock_update_flags; 718 u64 clock; 719 u64 clock_task; 720 721 atomic_t nr_iowait; 722 723 #ifdef CONFIG_SMP 724 struct root_domain *rd; 725 struct sched_domain *sd; 726 727 unsigned long cpu_capacity; 728 unsigned long cpu_capacity_orig; 729 730 struct callback_head *balance_callback; 731 732 unsigned char idle_balance; 733 /* For active balancing */ 734 int active_balance; 735 int push_cpu; 736 struct cpu_stop_work active_balance_work; 737 /* cpu of this runqueue: */ 738 int cpu; 739 int online; 740 741 struct list_head cfs_tasks; 742 743 u64 rt_avg; 744 u64 age_stamp; 745 u64 idle_stamp; 746 u64 avg_idle; 747 748 /* This is used to determine avg_idle's max value */ 749 u64 max_idle_balance_cost; 750 #endif 751 752 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 753 u64 prev_irq_time; 754 #endif 755 #ifdef CONFIG_PARAVIRT 756 u64 prev_steal_time; 757 #endif 758 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 759 u64 prev_steal_time_rq; 760 #endif 761 762 /* calc_load related fields */ 763 unsigned long calc_load_update; 764 long calc_load_active; 765 766 #ifdef CONFIG_SCHED_HRTICK 767 #ifdef CONFIG_SMP 768 int hrtick_csd_pending; 769 call_single_data_t hrtick_csd; 770 #endif 771 struct hrtimer hrtick_timer; 772 #endif 773 774 #ifdef CONFIG_SCHEDSTATS 775 /* latency stats */ 776 struct sched_info rq_sched_info; 777 unsigned long long rq_cpu_time; 778 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 779 780 /* sys_sched_yield() stats */ 781 unsigned int yld_count; 782 783 /* schedule() stats */ 784 unsigned int sched_count; 785 unsigned int sched_goidle; 786 787 /* try_to_wake_up() stats */ 788 unsigned int ttwu_count; 789 unsigned int ttwu_local; 790 #endif 791 792 #ifdef CONFIG_SMP 793 struct llist_head wake_list; 794 #endif 795 796 #ifdef CONFIG_CPU_IDLE 797 /* Must be inspected within a rcu lock section */ 798 struct cpuidle_state *idle_state; 799 #endif 800 }; 801 802 static inline int cpu_of(struct rq *rq) 803 { 804 #ifdef CONFIG_SMP 805 return rq->cpu; 806 #else 807 return 0; 808 #endif 809 } 810 811 812 #ifdef CONFIG_SCHED_SMT 813 814 extern struct static_key_false sched_smt_present; 815 816 extern void __update_idle_core(struct rq *rq); 817 818 static inline void update_idle_core(struct rq *rq) 819 { 820 if (static_branch_unlikely(&sched_smt_present)) 821 __update_idle_core(rq); 822 } 823 824 #else 825 static inline void update_idle_core(struct rq *rq) { } 826 #endif 827 828 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 829 830 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 831 #define this_rq() this_cpu_ptr(&runqueues) 832 #define task_rq(p) cpu_rq(task_cpu(p)) 833 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 834 #define raw_rq() raw_cpu_ptr(&runqueues) 835 836 static inline u64 __rq_clock_broken(struct rq *rq) 837 { 838 return READ_ONCE(rq->clock); 839 } 840 841 /* 842 * rq::clock_update_flags bits 843 * 844 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 845 * call to __schedule(). This is an optimisation to avoid 846 * neighbouring rq clock updates. 847 * 848 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 849 * in effect and calls to update_rq_clock() are being ignored. 850 * 851 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 852 * made to update_rq_clock() since the last time rq::lock was pinned. 853 * 854 * If inside of __schedule(), clock_update_flags will have been 855 * shifted left (a left shift is a cheap operation for the fast path 856 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 857 * 858 * if (rq-clock_update_flags >= RQCF_UPDATED) 859 * 860 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 861 * one position though, because the next rq_unpin_lock() will shift it 862 * back. 863 */ 864 #define RQCF_REQ_SKIP 0x01 865 #define RQCF_ACT_SKIP 0x02 866 #define RQCF_UPDATED 0x04 867 868 static inline void assert_clock_updated(struct rq *rq) 869 { 870 /* 871 * The only reason for not seeing a clock update since the 872 * last rq_pin_lock() is if we're currently skipping updates. 873 */ 874 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 875 } 876 877 static inline u64 rq_clock(struct rq *rq) 878 { 879 lockdep_assert_held(&rq->lock); 880 assert_clock_updated(rq); 881 882 return rq->clock; 883 } 884 885 static inline u64 rq_clock_task(struct rq *rq) 886 { 887 lockdep_assert_held(&rq->lock); 888 assert_clock_updated(rq); 889 890 return rq->clock_task; 891 } 892 893 static inline void rq_clock_skip_update(struct rq *rq, bool skip) 894 { 895 lockdep_assert_held(&rq->lock); 896 if (skip) 897 rq->clock_update_flags |= RQCF_REQ_SKIP; 898 else 899 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 900 } 901 902 struct rq_flags { 903 unsigned long flags; 904 struct pin_cookie cookie; 905 #ifdef CONFIG_SCHED_DEBUG 906 /* 907 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 908 * current pin context is stashed here in case it needs to be 909 * restored in rq_repin_lock(). 910 */ 911 unsigned int clock_update_flags; 912 #endif 913 }; 914 915 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 916 { 917 rf->cookie = lockdep_pin_lock(&rq->lock); 918 919 #ifdef CONFIG_SCHED_DEBUG 920 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 921 rf->clock_update_flags = 0; 922 #endif 923 } 924 925 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 926 { 927 #ifdef CONFIG_SCHED_DEBUG 928 if (rq->clock_update_flags > RQCF_ACT_SKIP) 929 rf->clock_update_flags = RQCF_UPDATED; 930 #endif 931 932 lockdep_unpin_lock(&rq->lock, rf->cookie); 933 } 934 935 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 936 { 937 lockdep_repin_lock(&rq->lock, rf->cookie); 938 939 #ifdef CONFIG_SCHED_DEBUG 940 /* 941 * Restore the value we stashed in @rf for this pin context. 942 */ 943 rq->clock_update_flags |= rf->clock_update_flags; 944 #endif 945 } 946 947 #ifdef CONFIG_NUMA 948 enum numa_topology_type { 949 NUMA_DIRECT, 950 NUMA_GLUELESS_MESH, 951 NUMA_BACKPLANE, 952 }; 953 extern enum numa_topology_type sched_numa_topology_type; 954 extern int sched_max_numa_distance; 955 extern bool find_numa_distance(int distance); 956 #endif 957 958 #ifdef CONFIG_NUMA 959 extern void sched_init_numa(void); 960 extern void sched_domains_numa_masks_set(unsigned int cpu); 961 extern void sched_domains_numa_masks_clear(unsigned int cpu); 962 #else 963 static inline void sched_init_numa(void) { } 964 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 965 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 966 #endif 967 968 #ifdef CONFIG_NUMA_BALANCING 969 /* The regions in numa_faults array from task_struct */ 970 enum numa_faults_stats { 971 NUMA_MEM = 0, 972 NUMA_CPU, 973 NUMA_MEMBUF, 974 NUMA_CPUBUF 975 }; 976 extern void sched_setnuma(struct task_struct *p, int node); 977 extern int migrate_task_to(struct task_struct *p, int cpu); 978 extern int migrate_swap(struct task_struct *, struct task_struct *); 979 #endif /* CONFIG_NUMA_BALANCING */ 980 981 #ifdef CONFIG_SMP 982 983 static inline void 984 queue_balance_callback(struct rq *rq, 985 struct callback_head *head, 986 void (*func)(struct rq *rq)) 987 { 988 lockdep_assert_held(&rq->lock); 989 990 if (unlikely(head->next)) 991 return; 992 993 head->func = (void (*)(struct callback_head *))func; 994 head->next = rq->balance_callback; 995 rq->balance_callback = head; 996 } 997 998 extern void sched_ttwu_pending(void); 999 1000 #define rcu_dereference_check_sched_domain(p) \ 1001 rcu_dereference_check((p), \ 1002 lockdep_is_held(&sched_domains_mutex)) 1003 1004 /* 1005 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1006 * See detach_destroy_domains: synchronize_sched for details. 1007 * 1008 * The domain tree of any CPU may only be accessed from within 1009 * preempt-disabled sections. 1010 */ 1011 #define for_each_domain(cpu, __sd) \ 1012 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1013 __sd; __sd = __sd->parent) 1014 1015 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 1016 1017 /** 1018 * highest_flag_domain - Return highest sched_domain containing flag. 1019 * @cpu: The cpu whose highest level of sched domain is to 1020 * be returned. 1021 * @flag: The flag to check for the highest sched_domain 1022 * for the given cpu. 1023 * 1024 * Returns the highest sched_domain of a cpu which contains the given flag. 1025 */ 1026 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1027 { 1028 struct sched_domain *sd, *hsd = NULL; 1029 1030 for_each_domain(cpu, sd) { 1031 if (!(sd->flags & flag)) 1032 break; 1033 hsd = sd; 1034 } 1035 1036 return hsd; 1037 } 1038 1039 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1040 { 1041 struct sched_domain *sd; 1042 1043 for_each_domain(cpu, sd) { 1044 if (sd->flags & flag) 1045 break; 1046 } 1047 1048 return sd; 1049 } 1050 1051 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 1052 DECLARE_PER_CPU(int, sd_llc_size); 1053 DECLARE_PER_CPU(int, sd_llc_id); 1054 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); 1055 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 1056 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 1057 1058 struct sched_group_capacity { 1059 atomic_t ref; 1060 /* 1061 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1062 * for a single CPU. 1063 */ 1064 unsigned long capacity; 1065 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1066 unsigned long next_update; 1067 int imbalance; /* XXX unrelated to capacity but shared group state */ 1068 1069 #ifdef CONFIG_SCHED_DEBUG 1070 int id; 1071 #endif 1072 1073 unsigned long cpumask[0]; /* balance mask */ 1074 }; 1075 1076 struct sched_group { 1077 struct sched_group *next; /* Must be a circular list */ 1078 atomic_t ref; 1079 1080 unsigned int group_weight; 1081 struct sched_group_capacity *sgc; 1082 int asym_prefer_cpu; /* cpu of highest priority in group */ 1083 1084 /* 1085 * The CPUs this group covers. 1086 * 1087 * NOTE: this field is variable length. (Allocated dynamically 1088 * by attaching extra space to the end of the structure, 1089 * depending on how many CPUs the kernel has booted up with) 1090 */ 1091 unsigned long cpumask[0]; 1092 }; 1093 1094 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1095 { 1096 return to_cpumask(sg->cpumask); 1097 } 1098 1099 /* 1100 * See build_balance_mask(). 1101 */ 1102 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1103 { 1104 return to_cpumask(sg->sgc->cpumask); 1105 } 1106 1107 /** 1108 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 1109 * @group: The group whose first cpu is to be returned. 1110 */ 1111 static inline unsigned int group_first_cpu(struct sched_group *group) 1112 { 1113 return cpumask_first(sched_group_span(group)); 1114 } 1115 1116 extern int group_balance_cpu(struct sched_group *sg); 1117 1118 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1119 void register_sched_domain_sysctl(void); 1120 void dirty_sched_domain_sysctl(int cpu); 1121 void unregister_sched_domain_sysctl(void); 1122 #else 1123 static inline void register_sched_domain_sysctl(void) 1124 { 1125 } 1126 static inline void dirty_sched_domain_sysctl(int cpu) 1127 { 1128 } 1129 static inline void unregister_sched_domain_sysctl(void) 1130 { 1131 } 1132 #endif 1133 1134 #else 1135 1136 static inline void sched_ttwu_pending(void) { } 1137 1138 #endif /* CONFIG_SMP */ 1139 1140 #include "stats.h" 1141 #include "autogroup.h" 1142 1143 #ifdef CONFIG_CGROUP_SCHED 1144 1145 /* 1146 * Return the group to which this tasks belongs. 1147 * 1148 * We cannot use task_css() and friends because the cgroup subsystem 1149 * changes that value before the cgroup_subsys::attach() method is called, 1150 * therefore we cannot pin it and might observe the wrong value. 1151 * 1152 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1153 * core changes this before calling sched_move_task(). 1154 * 1155 * Instead we use a 'copy' which is updated from sched_move_task() while 1156 * holding both task_struct::pi_lock and rq::lock. 1157 */ 1158 static inline struct task_group *task_group(struct task_struct *p) 1159 { 1160 return p->sched_task_group; 1161 } 1162 1163 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1164 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1165 { 1166 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1167 struct task_group *tg = task_group(p); 1168 #endif 1169 1170 #ifdef CONFIG_FAIR_GROUP_SCHED 1171 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1172 p->se.cfs_rq = tg->cfs_rq[cpu]; 1173 p->se.parent = tg->se[cpu]; 1174 #endif 1175 1176 #ifdef CONFIG_RT_GROUP_SCHED 1177 p->rt.rt_rq = tg->rt_rq[cpu]; 1178 p->rt.parent = tg->rt_se[cpu]; 1179 #endif 1180 } 1181 1182 #else /* CONFIG_CGROUP_SCHED */ 1183 1184 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1185 static inline struct task_group *task_group(struct task_struct *p) 1186 { 1187 return NULL; 1188 } 1189 1190 #endif /* CONFIG_CGROUP_SCHED */ 1191 1192 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1193 { 1194 set_task_rq(p, cpu); 1195 #ifdef CONFIG_SMP 1196 /* 1197 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1198 * successfuly executed on another CPU. We must ensure that updates of 1199 * per-task data have been completed by this moment. 1200 */ 1201 smp_wmb(); 1202 #ifdef CONFIG_THREAD_INFO_IN_TASK 1203 p->cpu = cpu; 1204 #else 1205 task_thread_info(p)->cpu = cpu; 1206 #endif 1207 p->wake_cpu = cpu; 1208 #endif 1209 } 1210 1211 /* 1212 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1213 */ 1214 #ifdef CONFIG_SCHED_DEBUG 1215 # include <linux/static_key.h> 1216 # define const_debug __read_mostly 1217 #else 1218 # define const_debug const 1219 #endif 1220 1221 extern const_debug unsigned int sysctl_sched_features; 1222 1223 #define SCHED_FEAT(name, enabled) \ 1224 __SCHED_FEAT_##name , 1225 1226 enum { 1227 #include "features.h" 1228 __SCHED_FEAT_NR, 1229 }; 1230 1231 #undef SCHED_FEAT 1232 1233 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 1234 #define SCHED_FEAT(name, enabled) \ 1235 static __always_inline bool static_branch_##name(struct static_key *key) \ 1236 { \ 1237 return static_key_##enabled(key); \ 1238 } 1239 1240 #include "features.h" 1241 1242 #undef SCHED_FEAT 1243 1244 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1245 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1246 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 1247 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1248 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1249 1250 extern struct static_key_false sched_numa_balancing; 1251 extern struct static_key_false sched_schedstats; 1252 1253 static inline u64 global_rt_period(void) 1254 { 1255 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1256 } 1257 1258 static inline u64 global_rt_runtime(void) 1259 { 1260 if (sysctl_sched_rt_runtime < 0) 1261 return RUNTIME_INF; 1262 1263 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1264 } 1265 1266 static inline int task_current(struct rq *rq, struct task_struct *p) 1267 { 1268 return rq->curr == p; 1269 } 1270 1271 static inline int task_running(struct rq *rq, struct task_struct *p) 1272 { 1273 #ifdef CONFIG_SMP 1274 return p->on_cpu; 1275 #else 1276 return task_current(rq, p); 1277 #endif 1278 } 1279 1280 static inline int task_on_rq_queued(struct task_struct *p) 1281 { 1282 return p->on_rq == TASK_ON_RQ_QUEUED; 1283 } 1284 1285 static inline int task_on_rq_migrating(struct task_struct *p) 1286 { 1287 return p->on_rq == TASK_ON_RQ_MIGRATING; 1288 } 1289 1290 #ifndef prepare_arch_switch 1291 # define prepare_arch_switch(next) do { } while (0) 1292 #endif 1293 #ifndef finish_arch_post_lock_switch 1294 # define finish_arch_post_lock_switch() do { } while (0) 1295 #endif 1296 1297 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 1298 { 1299 #ifdef CONFIG_SMP 1300 /* 1301 * We can optimise this out completely for !SMP, because the 1302 * SMP rebalancing from interrupt is the only thing that cares 1303 * here. 1304 */ 1305 next->on_cpu = 1; 1306 #endif 1307 } 1308 1309 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 1310 { 1311 #ifdef CONFIG_SMP 1312 /* 1313 * After ->on_cpu is cleared, the task can be moved to a different CPU. 1314 * We must ensure this doesn't happen until the switch is completely 1315 * finished. 1316 * 1317 * In particular, the load of prev->state in finish_task_switch() must 1318 * happen before this. 1319 * 1320 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 1321 */ 1322 smp_store_release(&prev->on_cpu, 0); 1323 #endif 1324 #ifdef CONFIG_DEBUG_SPINLOCK 1325 /* this is a valid case when another task releases the spinlock */ 1326 rq->lock.owner = current; 1327 #endif 1328 /* 1329 * If we are tracking spinlock dependencies then we have to 1330 * fix up the runqueue lock - which gets 'carried over' from 1331 * prev into current: 1332 */ 1333 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 1334 1335 raw_spin_unlock_irq(&rq->lock); 1336 } 1337 1338 /* 1339 * wake flags 1340 */ 1341 #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ 1342 #define WF_FORK 0x02 /* child wakeup after fork */ 1343 #define WF_MIGRATED 0x4 /* internal use, task got migrated */ 1344 1345 /* 1346 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1347 * of tasks with abnormal "nice" values across CPUs the contribution that 1348 * each task makes to its run queue's load is weighted according to its 1349 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1350 * scaled version of the new time slice allocation that they receive on time 1351 * slice expiry etc. 1352 */ 1353 1354 #define WEIGHT_IDLEPRIO 3 1355 #define WMULT_IDLEPRIO 1431655765 1356 1357 extern const int sched_prio_to_weight[40]; 1358 extern const u32 sched_prio_to_wmult[40]; 1359 1360 /* 1361 * {de,en}queue flags: 1362 * 1363 * DEQUEUE_SLEEP - task is no longer runnable 1364 * ENQUEUE_WAKEUP - task just became runnable 1365 * 1366 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1367 * are in a known state which allows modification. Such pairs 1368 * should preserve as much state as possible. 1369 * 1370 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1371 * in the runqueue. 1372 * 1373 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1374 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1375 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1376 * 1377 */ 1378 1379 #define DEQUEUE_SLEEP 0x01 1380 #define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ 1381 #define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ 1382 #define DEQUEUE_NOCLOCK 0x08 /* matches ENQUEUE_NOCLOCK */ 1383 1384 #define ENQUEUE_WAKEUP 0x01 1385 #define ENQUEUE_RESTORE 0x02 1386 #define ENQUEUE_MOVE 0x04 1387 #define ENQUEUE_NOCLOCK 0x08 1388 1389 #define ENQUEUE_HEAD 0x10 1390 #define ENQUEUE_REPLENISH 0x20 1391 #ifdef CONFIG_SMP 1392 #define ENQUEUE_MIGRATED 0x40 1393 #else 1394 #define ENQUEUE_MIGRATED 0x00 1395 #endif 1396 1397 #define RETRY_TASK ((void *)-1UL) 1398 1399 struct sched_class { 1400 const struct sched_class *next; 1401 1402 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1403 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1404 void (*yield_task) (struct rq *rq); 1405 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); 1406 1407 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1408 1409 /* 1410 * It is the responsibility of the pick_next_task() method that will 1411 * return the next task to call put_prev_task() on the @prev task or 1412 * something equivalent. 1413 * 1414 * May return RETRY_TASK when it finds a higher prio class has runnable 1415 * tasks. 1416 */ 1417 struct task_struct * (*pick_next_task) (struct rq *rq, 1418 struct task_struct *prev, 1419 struct rq_flags *rf); 1420 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1421 1422 #ifdef CONFIG_SMP 1423 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1424 void (*migrate_task_rq)(struct task_struct *p); 1425 1426 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1427 1428 void (*set_cpus_allowed)(struct task_struct *p, 1429 const struct cpumask *newmask); 1430 1431 void (*rq_online)(struct rq *rq); 1432 void (*rq_offline)(struct rq *rq); 1433 #endif 1434 1435 void (*set_curr_task) (struct rq *rq); 1436 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1437 void (*task_fork) (struct task_struct *p); 1438 void (*task_dead) (struct task_struct *p); 1439 1440 /* 1441 * The switched_from() call is allowed to drop rq->lock, therefore we 1442 * cannot assume the switched_from/switched_to pair is serliazed by 1443 * rq->lock. They are however serialized by p->pi_lock. 1444 */ 1445 void (*switched_from) (struct rq *this_rq, struct task_struct *task); 1446 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1447 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1448 int oldprio); 1449 1450 unsigned int (*get_rr_interval) (struct rq *rq, 1451 struct task_struct *task); 1452 1453 void (*update_curr) (struct rq *rq); 1454 1455 #define TASK_SET_GROUP 0 1456 #define TASK_MOVE_GROUP 1 1457 1458 #ifdef CONFIG_FAIR_GROUP_SCHED 1459 void (*task_change_group) (struct task_struct *p, int type); 1460 #endif 1461 }; 1462 1463 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1464 { 1465 prev->sched_class->put_prev_task(rq, prev); 1466 } 1467 1468 static inline void set_curr_task(struct rq *rq, struct task_struct *curr) 1469 { 1470 curr->sched_class->set_curr_task(rq); 1471 } 1472 1473 #ifdef CONFIG_SMP 1474 #define sched_class_highest (&stop_sched_class) 1475 #else 1476 #define sched_class_highest (&dl_sched_class) 1477 #endif 1478 #define for_each_class(class) \ 1479 for (class = sched_class_highest; class; class = class->next) 1480 1481 extern const struct sched_class stop_sched_class; 1482 extern const struct sched_class dl_sched_class; 1483 extern const struct sched_class rt_sched_class; 1484 extern const struct sched_class fair_sched_class; 1485 extern const struct sched_class idle_sched_class; 1486 1487 1488 #ifdef CONFIG_SMP 1489 1490 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1491 1492 extern void trigger_load_balance(struct rq *rq); 1493 1494 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1495 1496 #endif 1497 1498 #ifdef CONFIG_CPU_IDLE 1499 static inline void idle_set_state(struct rq *rq, 1500 struct cpuidle_state *idle_state) 1501 { 1502 rq->idle_state = idle_state; 1503 } 1504 1505 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1506 { 1507 SCHED_WARN_ON(!rcu_read_lock_held()); 1508 return rq->idle_state; 1509 } 1510 #else 1511 static inline void idle_set_state(struct rq *rq, 1512 struct cpuidle_state *idle_state) 1513 { 1514 } 1515 1516 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1517 { 1518 return NULL; 1519 } 1520 #endif 1521 1522 extern void schedule_idle(void); 1523 1524 extern void sysrq_sched_debug_show(void); 1525 extern void sched_init_granularity(void); 1526 extern void update_max_interval(void); 1527 1528 extern void init_sched_dl_class(void); 1529 extern void init_sched_rt_class(void); 1530 extern void init_sched_fair_class(void); 1531 1532 extern void resched_curr(struct rq *rq); 1533 extern void resched_cpu(int cpu); 1534 1535 extern struct rt_bandwidth def_rt_bandwidth; 1536 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1537 1538 extern struct dl_bandwidth def_dl_bandwidth; 1539 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1540 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1541 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1542 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1543 1544 #define BW_SHIFT 20 1545 #define BW_UNIT (1 << BW_SHIFT) 1546 #define RATIO_SHIFT 8 1547 unsigned long to_ratio(u64 period, u64 runtime); 1548 1549 extern void init_entity_runnable_average(struct sched_entity *se); 1550 extern void post_init_entity_util_avg(struct sched_entity *se); 1551 1552 #ifdef CONFIG_NO_HZ_FULL 1553 extern bool sched_can_stop_tick(struct rq *rq); 1554 1555 /* 1556 * Tick may be needed by tasks in the runqueue depending on their policy and 1557 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1558 * nohz mode if necessary. 1559 */ 1560 static inline void sched_update_tick_dependency(struct rq *rq) 1561 { 1562 int cpu; 1563 1564 if (!tick_nohz_full_enabled()) 1565 return; 1566 1567 cpu = cpu_of(rq); 1568 1569 if (!tick_nohz_full_cpu(cpu)) 1570 return; 1571 1572 if (sched_can_stop_tick(rq)) 1573 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1574 else 1575 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1576 } 1577 #else 1578 static inline void sched_update_tick_dependency(struct rq *rq) { } 1579 #endif 1580 1581 static inline void add_nr_running(struct rq *rq, unsigned count) 1582 { 1583 unsigned prev_nr = rq->nr_running; 1584 1585 rq->nr_running = prev_nr + count; 1586 1587 if (prev_nr < 2 && rq->nr_running >= 2) { 1588 #ifdef CONFIG_SMP 1589 if (!rq->rd->overload) 1590 rq->rd->overload = true; 1591 #endif 1592 } 1593 1594 sched_update_tick_dependency(rq); 1595 } 1596 1597 static inline void sub_nr_running(struct rq *rq, unsigned count) 1598 { 1599 rq->nr_running -= count; 1600 /* Check if we still need preemption */ 1601 sched_update_tick_dependency(rq); 1602 } 1603 1604 static inline void rq_last_tick_reset(struct rq *rq) 1605 { 1606 #ifdef CONFIG_NO_HZ_FULL 1607 rq->last_sched_tick = jiffies; 1608 #endif 1609 } 1610 1611 extern void update_rq_clock(struct rq *rq); 1612 1613 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1614 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1615 1616 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1617 1618 extern const_debug unsigned int sysctl_sched_time_avg; 1619 extern const_debug unsigned int sysctl_sched_nr_migrate; 1620 extern const_debug unsigned int sysctl_sched_migration_cost; 1621 1622 static inline u64 sched_avg_period(void) 1623 { 1624 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1625 } 1626 1627 #ifdef CONFIG_SCHED_HRTICK 1628 1629 /* 1630 * Use hrtick when: 1631 * - enabled by features 1632 * - hrtimer is actually high res 1633 */ 1634 static inline int hrtick_enabled(struct rq *rq) 1635 { 1636 if (!sched_feat(HRTICK)) 1637 return 0; 1638 if (!cpu_active(cpu_of(rq))) 1639 return 0; 1640 return hrtimer_is_hres_active(&rq->hrtick_timer); 1641 } 1642 1643 void hrtick_start(struct rq *rq, u64 delay); 1644 1645 #else 1646 1647 static inline int hrtick_enabled(struct rq *rq) 1648 { 1649 return 0; 1650 } 1651 1652 #endif /* CONFIG_SCHED_HRTICK */ 1653 1654 #ifdef CONFIG_SMP 1655 extern void sched_avg_update(struct rq *rq); 1656 1657 #ifndef arch_scale_freq_capacity 1658 static __always_inline 1659 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) 1660 { 1661 return SCHED_CAPACITY_SCALE; 1662 } 1663 #endif 1664 1665 #ifndef arch_scale_cpu_capacity 1666 static __always_inline 1667 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) 1668 { 1669 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1)) 1670 return sd->smt_gain / sd->span_weight; 1671 1672 return SCHED_CAPACITY_SCALE; 1673 } 1674 #endif 1675 1676 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1677 { 1678 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); 1679 sched_avg_update(rq); 1680 } 1681 #else 1682 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1683 static inline void sched_avg_update(struct rq *rq) { } 1684 #endif 1685 1686 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1687 __acquires(rq->lock); 1688 1689 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1690 __acquires(p->pi_lock) 1691 __acquires(rq->lock); 1692 1693 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1694 __releases(rq->lock) 1695 { 1696 rq_unpin_lock(rq, rf); 1697 raw_spin_unlock(&rq->lock); 1698 } 1699 1700 static inline void 1701 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1702 __releases(rq->lock) 1703 __releases(p->pi_lock) 1704 { 1705 rq_unpin_lock(rq, rf); 1706 raw_spin_unlock(&rq->lock); 1707 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1708 } 1709 1710 static inline void 1711 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1712 __acquires(rq->lock) 1713 { 1714 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1715 rq_pin_lock(rq, rf); 1716 } 1717 1718 static inline void 1719 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1720 __acquires(rq->lock) 1721 { 1722 raw_spin_lock_irq(&rq->lock); 1723 rq_pin_lock(rq, rf); 1724 } 1725 1726 static inline void 1727 rq_lock(struct rq *rq, struct rq_flags *rf) 1728 __acquires(rq->lock) 1729 { 1730 raw_spin_lock(&rq->lock); 1731 rq_pin_lock(rq, rf); 1732 } 1733 1734 static inline void 1735 rq_relock(struct rq *rq, struct rq_flags *rf) 1736 __acquires(rq->lock) 1737 { 1738 raw_spin_lock(&rq->lock); 1739 rq_repin_lock(rq, rf); 1740 } 1741 1742 static inline void 1743 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1744 __releases(rq->lock) 1745 { 1746 rq_unpin_lock(rq, rf); 1747 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1748 } 1749 1750 static inline void 1751 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1752 __releases(rq->lock) 1753 { 1754 rq_unpin_lock(rq, rf); 1755 raw_spin_unlock_irq(&rq->lock); 1756 } 1757 1758 static inline void 1759 rq_unlock(struct rq *rq, struct rq_flags *rf) 1760 __releases(rq->lock) 1761 { 1762 rq_unpin_lock(rq, rf); 1763 raw_spin_unlock(&rq->lock); 1764 } 1765 1766 #ifdef CONFIG_SMP 1767 #ifdef CONFIG_PREEMPT 1768 1769 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1770 1771 /* 1772 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1773 * way at the expense of forcing extra atomic operations in all 1774 * invocations. This assures that the double_lock is acquired using the 1775 * same underlying policy as the spinlock_t on this architecture, which 1776 * reduces latency compared to the unfair variant below. However, it 1777 * also adds more overhead and therefore may reduce throughput. 1778 */ 1779 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1780 __releases(this_rq->lock) 1781 __acquires(busiest->lock) 1782 __acquires(this_rq->lock) 1783 { 1784 raw_spin_unlock(&this_rq->lock); 1785 double_rq_lock(this_rq, busiest); 1786 1787 return 1; 1788 } 1789 1790 #else 1791 /* 1792 * Unfair double_lock_balance: Optimizes throughput at the expense of 1793 * latency by eliminating extra atomic operations when the locks are 1794 * already in proper order on entry. This favors lower cpu-ids and will 1795 * grant the double lock to lower cpus over higher ids under contention, 1796 * regardless of entry order into the function. 1797 */ 1798 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1799 __releases(this_rq->lock) 1800 __acquires(busiest->lock) 1801 __acquires(this_rq->lock) 1802 { 1803 int ret = 0; 1804 1805 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1806 if (busiest < this_rq) { 1807 raw_spin_unlock(&this_rq->lock); 1808 raw_spin_lock(&busiest->lock); 1809 raw_spin_lock_nested(&this_rq->lock, 1810 SINGLE_DEPTH_NESTING); 1811 ret = 1; 1812 } else 1813 raw_spin_lock_nested(&busiest->lock, 1814 SINGLE_DEPTH_NESTING); 1815 } 1816 return ret; 1817 } 1818 1819 #endif /* CONFIG_PREEMPT */ 1820 1821 /* 1822 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1823 */ 1824 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1825 { 1826 if (unlikely(!irqs_disabled())) { 1827 /* printk() doesn't work good under rq->lock */ 1828 raw_spin_unlock(&this_rq->lock); 1829 BUG_ON(1); 1830 } 1831 1832 return _double_lock_balance(this_rq, busiest); 1833 } 1834 1835 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1836 __releases(busiest->lock) 1837 { 1838 raw_spin_unlock(&busiest->lock); 1839 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1840 } 1841 1842 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 1843 { 1844 if (l1 > l2) 1845 swap(l1, l2); 1846 1847 spin_lock(l1); 1848 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1849 } 1850 1851 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 1852 { 1853 if (l1 > l2) 1854 swap(l1, l2); 1855 1856 spin_lock_irq(l1); 1857 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1858 } 1859 1860 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1861 { 1862 if (l1 > l2) 1863 swap(l1, l2); 1864 1865 raw_spin_lock(l1); 1866 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1867 } 1868 1869 /* 1870 * double_rq_lock - safely lock two runqueues 1871 * 1872 * Note this does not disable interrupts like task_rq_lock, 1873 * you need to do so manually before calling. 1874 */ 1875 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1876 __acquires(rq1->lock) 1877 __acquires(rq2->lock) 1878 { 1879 BUG_ON(!irqs_disabled()); 1880 if (rq1 == rq2) { 1881 raw_spin_lock(&rq1->lock); 1882 __acquire(rq2->lock); /* Fake it out ;) */ 1883 } else { 1884 if (rq1 < rq2) { 1885 raw_spin_lock(&rq1->lock); 1886 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1887 } else { 1888 raw_spin_lock(&rq2->lock); 1889 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1890 } 1891 } 1892 } 1893 1894 /* 1895 * double_rq_unlock - safely unlock two runqueues 1896 * 1897 * Note this does not restore interrupts like task_rq_unlock, 1898 * you need to do so manually after calling. 1899 */ 1900 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1901 __releases(rq1->lock) 1902 __releases(rq2->lock) 1903 { 1904 raw_spin_unlock(&rq1->lock); 1905 if (rq1 != rq2) 1906 raw_spin_unlock(&rq2->lock); 1907 else 1908 __release(rq2->lock); 1909 } 1910 1911 extern void set_rq_online (struct rq *rq); 1912 extern void set_rq_offline(struct rq *rq); 1913 extern bool sched_smp_initialized; 1914 1915 #else /* CONFIG_SMP */ 1916 1917 /* 1918 * double_rq_lock - safely lock two runqueues 1919 * 1920 * Note this does not disable interrupts like task_rq_lock, 1921 * you need to do so manually before calling. 1922 */ 1923 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1924 __acquires(rq1->lock) 1925 __acquires(rq2->lock) 1926 { 1927 BUG_ON(!irqs_disabled()); 1928 BUG_ON(rq1 != rq2); 1929 raw_spin_lock(&rq1->lock); 1930 __acquire(rq2->lock); /* Fake it out ;) */ 1931 } 1932 1933 /* 1934 * double_rq_unlock - safely unlock two runqueues 1935 * 1936 * Note this does not restore interrupts like task_rq_unlock, 1937 * you need to do so manually after calling. 1938 */ 1939 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1940 __releases(rq1->lock) 1941 __releases(rq2->lock) 1942 { 1943 BUG_ON(rq1 != rq2); 1944 raw_spin_unlock(&rq1->lock); 1945 __release(rq2->lock); 1946 } 1947 1948 #endif 1949 1950 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1951 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1952 1953 #ifdef CONFIG_SCHED_DEBUG 1954 extern bool sched_debug_enabled; 1955 1956 extern void print_cfs_stats(struct seq_file *m, int cpu); 1957 extern void print_rt_stats(struct seq_file *m, int cpu); 1958 extern void print_dl_stats(struct seq_file *m, int cpu); 1959 extern void 1960 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 1961 #ifdef CONFIG_NUMA_BALANCING 1962 extern void 1963 show_numa_stats(struct task_struct *p, struct seq_file *m); 1964 extern void 1965 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 1966 unsigned long tpf, unsigned long gsf, unsigned long gpf); 1967 #endif /* CONFIG_NUMA_BALANCING */ 1968 #endif /* CONFIG_SCHED_DEBUG */ 1969 1970 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1971 extern void init_rt_rq(struct rt_rq *rt_rq); 1972 extern void init_dl_rq(struct dl_rq *dl_rq); 1973 1974 extern void cfs_bandwidth_usage_inc(void); 1975 extern void cfs_bandwidth_usage_dec(void); 1976 1977 #ifdef CONFIG_NO_HZ_COMMON 1978 enum rq_nohz_flag_bits { 1979 NOHZ_TICK_STOPPED, 1980 NOHZ_BALANCE_KICK, 1981 }; 1982 1983 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 1984 1985 extern void nohz_balance_exit_idle(unsigned int cpu); 1986 #else 1987 static inline void nohz_balance_exit_idle(unsigned int cpu) { } 1988 #endif 1989 1990 1991 #ifdef CONFIG_SMP 1992 static inline 1993 void __dl_update(struct dl_bw *dl_b, s64 bw) 1994 { 1995 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 1996 int i; 1997 1998 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 1999 "sched RCU must be held"); 2000 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2001 struct rq *rq = cpu_rq(i); 2002 2003 rq->dl.extra_bw += bw; 2004 } 2005 } 2006 #else 2007 static inline 2008 void __dl_update(struct dl_bw *dl_b, s64 bw) 2009 { 2010 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2011 2012 dl->extra_bw += bw; 2013 } 2014 #endif 2015 2016 2017 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2018 struct irqtime { 2019 u64 total; 2020 u64 tick_delta; 2021 u64 irq_start_time; 2022 struct u64_stats_sync sync; 2023 }; 2024 2025 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2026 2027 /* 2028 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2029 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2030 * and never move forward. 2031 */ 2032 static inline u64 irq_time_read(int cpu) 2033 { 2034 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2035 unsigned int seq; 2036 u64 total; 2037 2038 do { 2039 seq = __u64_stats_fetch_begin(&irqtime->sync); 2040 total = irqtime->total; 2041 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2042 2043 return total; 2044 } 2045 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2046 2047 #ifdef CONFIG_CPU_FREQ 2048 DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); 2049 2050 /** 2051 * cpufreq_update_util - Take a note about CPU utilization changes. 2052 * @rq: Runqueue to carry out the update for. 2053 * @flags: Update reason flags. 2054 * 2055 * This function is called by the scheduler on the CPU whose utilization is 2056 * being updated. 2057 * 2058 * It can only be called from RCU-sched read-side critical sections. 2059 * 2060 * The way cpufreq is currently arranged requires it to evaluate the CPU 2061 * performance state (frequency/voltage) on a regular basis to prevent it from 2062 * being stuck in a completely inadequate performance level for too long. 2063 * That is not guaranteed to happen if the updates are only triggered from CFS, 2064 * though, because they may not be coming in if RT or deadline tasks are active 2065 * all the time (or there are RT and DL tasks only). 2066 * 2067 * As a workaround for that issue, this function is called by the RT and DL 2068 * sched classes to trigger extra cpufreq updates to prevent it from stalling, 2069 * but that really is a band-aid. Going forward it should be replaced with 2070 * solutions targeted more specifically at RT and DL tasks. 2071 */ 2072 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2073 { 2074 struct update_util_data *data; 2075 2076 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2077 cpu_of(rq))); 2078 if (data) 2079 data->func(data, rq_clock(rq), flags); 2080 } 2081 #else 2082 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2083 #endif /* CONFIG_CPU_FREQ */ 2084 2085 #ifdef arch_scale_freq_capacity 2086 #ifndef arch_scale_freq_invariant 2087 #define arch_scale_freq_invariant() (true) 2088 #endif 2089 #else /* arch_scale_freq_capacity */ 2090 #define arch_scale_freq_invariant() (false) 2091 #endif 2092