xref: /openbmc/linux/kernel/sched/sched.h (revision b9df3997)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Scheduler internal types and methods:
4  */
5 #include <linux/sched.h>
6 
7 #include <linux/sched/autogroup.h>
8 #include <linux/sched/clock.h>
9 #include <linux/sched/coredump.h>
10 #include <linux/sched/cpufreq.h>
11 #include <linux/sched/cputime.h>
12 #include <linux/sched/deadline.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/hotplug.h>
15 #include <linux/sched/idle.h>
16 #include <linux/sched/init.h>
17 #include <linux/sched/isolation.h>
18 #include <linux/sched/jobctl.h>
19 #include <linux/sched/loadavg.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/nohz.h>
22 #include <linux/sched/numa_balancing.h>
23 #include <linux/sched/prio.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/smt.h>
27 #include <linux/sched/stat.h>
28 #include <linux/sched/sysctl.h>
29 #include <linux/sched/task.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/sched/topology.h>
32 #include <linux/sched/user.h>
33 #include <linux/sched/wake_q.h>
34 #include <linux/sched/xacct.h>
35 
36 #include <uapi/linux/sched/types.h>
37 
38 #include <linux/binfmts.h>
39 #include <linux/blkdev.h>
40 #include <linux/compat.h>
41 #include <linux/context_tracking.h>
42 #include <linux/cpufreq.h>
43 #include <linux/cpuidle.h>
44 #include <linux/cpuset.h>
45 #include <linux/ctype.h>
46 #include <linux/debugfs.h>
47 #include <linux/delayacct.h>
48 #include <linux/energy_model.h>
49 #include <linux/init_task.h>
50 #include <linux/kprobes.h>
51 #include <linux/kthread.h>
52 #include <linux/membarrier.h>
53 #include <linux/migrate.h>
54 #include <linux/mmu_context.h>
55 #include <linux/nmi.h>
56 #include <linux/proc_fs.h>
57 #include <linux/prefetch.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcupdate_wait.h>
61 #include <linux/security.h>
62 #include <linux/stop_machine.h>
63 #include <linux/suspend.h>
64 #include <linux/swait.h>
65 #include <linux/syscalls.h>
66 #include <linux/task_work.h>
67 #include <linux/tsacct_kern.h>
68 
69 #include <asm/tlb.h>
70 
71 #ifdef CONFIG_PARAVIRT
72 # include <asm/paravirt.h>
73 #endif
74 
75 #include "cpupri.h"
76 #include "cpudeadline.h"
77 
78 #ifdef CONFIG_SCHED_DEBUG
79 # define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
80 #else
81 # define SCHED_WARN_ON(x)	({ (void)(x), 0; })
82 #endif
83 
84 struct rq;
85 struct cpuidle_state;
86 
87 /* task_struct::on_rq states: */
88 #define TASK_ON_RQ_QUEUED	1
89 #define TASK_ON_RQ_MIGRATING	2
90 
91 extern __read_mostly int scheduler_running;
92 
93 extern unsigned long calc_load_update;
94 extern atomic_long_t calc_load_tasks;
95 
96 extern void calc_global_load_tick(struct rq *this_rq);
97 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
98 
99 /*
100  * Helpers for converting nanosecond timing to jiffy resolution
101  */
102 #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
103 
104 /*
105  * Increase resolution of nice-level calculations for 64-bit architectures.
106  * The extra resolution improves shares distribution and load balancing of
107  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
108  * hierarchies, especially on larger systems. This is not a user-visible change
109  * and does not change the user-interface for setting shares/weights.
110  *
111  * We increase resolution only if we have enough bits to allow this increased
112  * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
113  * are pretty high and the returns do not justify the increased costs.
114  *
115  * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
116  * increase coverage and consistency always enable it on 64-bit platforms.
117  */
118 #ifdef CONFIG_64BIT
119 # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
120 # define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
121 # define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
122 #else
123 # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
124 # define scale_load(w)		(w)
125 # define scale_load_down(w)	(w)
126 #endif
127 
128 /*
129  * Task weight (visible to users) and its load (invisible to users) have
130  * independent resolution, but they should be well calibrated. We use
131  * scale_load() and scale_load_down(w) to convert between them. The
132  * following must be true:
133  *
134  *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
135  *
136  */
137 #define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
138 
139 /*
140  * Single value that decides SCHED_DEADLINE internal math precision.
141  * 10 -> just above 1us
142  * 9  -> just above 0.5us
143  */
144 #define DL_SCALE		10
145 
146 /*
147  * Single value that denotes runtime == period, ie unlimited time.
148  */
149 #define RUNTIME_INF		((u64)~0ULL)
150 
151 static inline int idle_policy(int policy)
152 {
153 	return policy == SCHED_IDLE;
154 }
155 static inline int fair_policy(int policy)
156 {
157 	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
158 }
159 
160 static inline int rt_policy(int policy)
161 {
162 	return policy == SCHED_FIFO || policy == SCHED_RR;
163 }
164 
165 static inline int dl_policy(int policy)
166 {
167 	return policy == SCHED_DEADLINE;
168 }
169 static inline bool valid_policy(int policy)
170 {
171 	return idle_policy(policy) || fair_policy(policy) ||
172 		rt_policy(policy) || dl_policy(policy);
173 }
174 
175 static inline int task_has_idle_policy(struct task_struct *p)
176 {
177 	return idle_policy(p->policy);
178 }
179 
180 static inline int task_has_rt_policy(struct task_struct *p)
181 {
182 	return rt_policy(p->policy);
183 }
184 
185 static inline int task_has_dl_policy(struct task_struct *p)
186 {
187 	return dl_policy(p->policy);
188 }
189 
190 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
191 
192 /*
193  * !! For sched_setattr_nocheck() (kernel) only !!
194  *
195  * This is actually gross. :(
196  *
197  * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
198  * tasks, but still be able to sleep. We need this on platforms that cannot
199  * atomically change clock frequency. Remove once fast switching will be
200  * available on such platforms.
201  *
202  * SUGOV stands for SchedUtil GOVernor.
203  */
204 #define SCHED_FLAG_SUGOV	0x10000000
205 
206 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
207 {
208 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
209 	return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
210 #else
211 	return false;
212 #endif
213 }
214 
215 /*
216  * Tells if entity @a should preempt entity @b.
217  */
218 static inline bool
219 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
220 {
221 	return dl_entity_is_special(a) ||
222 	       dl_time_before(a->deadline, b->deadline);
223 }
224 
225 /*
226  * This is the priority-queue data structure of the RT scheduling class:
227  */
228 struct rt_prio_array {
229 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
230 	struct list_head queue[MAX_RT_PRIO];
231 };
232 
233 struct rt_bandwidth {
234 	/* nests inside the rq lock: */
235 	raw_spinlock_t		rt_runtime_lock;
236 	ktime_t			rt_period;
237 	u64			rt_runtime;
238 	struct hrtimer		rt_period_timer;
239 	unsigned int		rt_period_active;
240 };
241 
242 void __dl_clear_params(struct task_struct *p);
243 
244 /*
245  * To keep the bandwidth of -deadline tasks and groups under control
246  * we need some place where:
247  *  - store the maximum -deadline bandwidth of the system (the group);
248  *  - cache the fraction of that bandwidth that is currently allocated.
249  *
250  * This is all done in the data structure below. It is similar to the
251  * one used for RT-throttling (rt_bandwidth), with the main difference
252  * that, since here we are only interested in admission control, we
253  * do not decrease any runtime while the group "executes", neither we
254  * need a timer to replenish it.
255  *
256  * With respect to SMP, the bandwidth is given on a per-CPU basis,
257  * meaning that:
258  *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
259  *  - dl_total_bw array contains, in the i-eth element, the currently
260  *    allocated bandwidth on the i-eth CPU.
261  * Moreover, groups consume bandwidth on each CPU, while tasks only
262  * consume bandwidth on the CPU they're running on.
263  * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
264  * that will be shown the next time the proc or cgroup controls will
265  * be red. It on its turn can be changed by writing on its own
266  * control.
267  */
268 struct dl_bandwidth {
269 	raw_spinlock_t		dl_runtime_lock;
270 	u64			dl_runtime;
271 	u64			dl_period;
272 };
273 
274 static inline int dl_bandwidth_enabled(void)
275 {
276 	return sysctl_sched_rt_runtime >= 0;
277 }
278 
279 struct dl_bw {
280 	raw_spinlock_t		lock;
281 	u64			bw;
282 	u64			total_bw;
283 };
284 
285 static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
286 
287 static inline
288 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
289 {
290 	dl_b->total_bw -= tsk_bw;
291 	__dl_update(dl_b, (s32)tsk_bw / cpus);
292 }
293 
294 static inline
295 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
296 {
297 	dl_b->total_bw += tsk_bw;
298 	__dl_update(dl_b, -((s32)tsk_bw / cpus));
299 }
300 
301 static inline
302 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
303 {
304 	return dl_b->bw != -1 &&
305 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
306 }
307 
308 extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
309 extern void init_dl_bw(struct dl_bw *dl_b);
310 extern int  sched_dl_global_validate(void);
311 extern void sched_dl_do_global(void);
312 extern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
313 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
314 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
315 extern bool __checkparam_dl(const struct sched_attr *attr);
316 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
317 extern int  dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
318 extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
319 extern bool dl_cpu_busy(unsigned int cpu);
320 
321 #ifdef CONFIG_CGROUP_SCHED
322 
323 #include <linux/cgroup.h>
324 #include <linux/psi.h>
325 
326 struct cfs_rq;
327 struct rt_rq;
328 
329 extern struct list_head task_groups;
330 
331 struct cfs_bandwidth {
332 #ifdef CONFIG_CFS_BANDWIDTH
333 	raw_spinlock_t		lock;
334 	ktime_t			period;
335 	u64			quota;
336 	u64			runtime;
337 	s64			hierarchical_quota;
338 
339 	u8			idle;
340 	u8			period_active;
341 	u8			distribute_running;
342 	u8			slack_started;
343 	struct hrtimer		period_timer;
344 	struct hrtimer		slack_timer;
345 	struct list_head	throttled_cfs_rq;
346 
347 	/* Statistics: */
348 	int			nr_periods;
349 	int			nr_throttled;
350 	u64			throttled_time;
351 #endif
352 };
353 
354 /* Task group related information */
355 struct task_group {
356 	struct cgroup_subsys_state css;
357 
358 #ifdef CONFIG_FAIR_GROUP_SCHED
359 	/* schedulable entities of this group on each CPU */
360 	struct sched_entity	**se;
361 	/* runqueue "owned" by this group on each CPU */
362 	struct cfs_rq		**cfs_rq;
363 	unsigned long		shares;
364 
365 #ifdef	CONFIG_SMP
366 	/*
367 	 * load_avg can be heavily contended at clock tick time, so put
368 	 * it in its own cacheline separated from the fields above which
369 	 * will also be accessed at each tick.
370 	 */
371 	atomic_long_t		load_avg ____cacheline_aligned;
372 #endif
373 #endif
374 
375 #ifdef CONFIG_RT_GROUP_SCHED
376 	struct sched_rt_entity	**rt_se;
377 	struct rt_rq		**rt_rq;
378 
379 	struct rt_bandwidth	rt_bandwidth;
380 #endif
381 
382 	struct rcu_head		rcu;
383 	struct list_head	list;
384 
385 	struct task_group	*parent;
386 	struct list_head	siblings;
387 	struct list_head	children;
388 
389 #ifdef CONFIG_SCHED_AUTOGROUP
390 	struct autogroup	*autogroup;
391 #endif
392 
393 	struct cfs_bandwidth	cfs_bandwidth;
394 
395 #ifdef CONFIG_UCLAMP_TASK_GROUP
396 	/* The two decimal precision [%] value requested from user-space */
397 	unsigned int		uclamp_pct[UCLAMP_CNT];
398 	/* Clamp values requested for a task group */
399 	struct uclamp_se	uclamp_req[UCLAMP_CNT];
400 	/* Effective clamp values used for a task group */
401 	struct uclamp_se	uclamp[UCLAMP_CNT];
402 #endif
403 
404 };
405 
406 #ifdef CONFIG_FAIR_GROUP_SCHED
407 #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
408 
409 /*
410  * A weight of 0 or 1 can cause arithmetics problems.
411  * A weight of a cfs_rq is the sum of weights of which entities
412  * are queued on this cfs_rq, so a weight of a entity should not be
413  * too large, so as the shares value of a task group.
414  * (The default weight is 1024 - so there's no practical
415  *  limitation from this.)
416  */
417 #define MIN_SHARES		(1UL <<  1)
418 #define MAX_SHARES		(1UL << 18)
419 #endif
420 
421 typedef int (*tg_visitor)(struct task_group *, void *);
422 
423 extern int walk_tg_tree_from(struct task_group *from,
424 			     tg_visitor down, tg_visitor up, void *data);
425 
426 /*
427  * Iterate the full tree, calling @down when first entering a node and @up when
428  * leaving it for the final time.
429  *
430  * Caller must hold rcu_lock or sufficient equivalent.
431  */
432 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
433 {
434 	return walk_tg_tree_from(&root_task_group, down, up, data);
435 }
436 
437 extern int tg_nop(struct task_group *tg, void *data);
438 
439 extern void free_fair_sched_group(struct task_group *tg);
440 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
441 extern void online_fair_sched_group(struct task_group *tg);
442 extern void unregister_fair_sched_group(struct task_group *tg);
443 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
444 			struct sched_entity *se, int cpu,
445 			struct sched_entity *parent);
446 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
447 
448 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
449 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
450 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
451 
452 extern void free_rt_sched_group(struct task_group *tg);
453 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
454 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
455 		struct sched_rt_entity *rt_se, int cpu,
456 		struct sched_rt_entity *parent);
457 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
458 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
459 extern long sched_group_rt_runtime(struct task_group *tg);
460 extern long sched_group_rt_period(struct task_group *tg);
461 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
462 
463 extern struct task_group *sched_create_group(struct task_group *parent);
464 extern void sched_online_group(struct task_group *tg,
465 			       struct task_group *parent);
466 extern void sched_destroy_group(struct task_group *tg);
467 extern void sched_offline_group(struct task_group *tg);
468 
469 extern void sched_move_task(struct task_struct *tsk);
470 
471 #ifdef CONFIG_FAIR_GROUP_SCHED
472 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
473 
474 #ifdef CONFIG_SMP
475 extern void set_task_rq_fair(struct sched_entity *se,
476 			     struct cfs_rq *prev, struct cfs_rq *next);
477 #else /* !CONFIG_SMP */
478 static inline void set_task_rq_fair(struct sched_entity *se,
479 			     struct cfs_rq *prev, struct cfs_rq *next) { }
480 #endif /* CONFIG_SMP */
481 #endif /* CONFIG_FAIR_GROUP_SCHED */
482 
483 #else /* CONFIG_CGROUP_SCHED */
484 
485 struct cfs_bandwidth { };
486 
487 #endif	/* CONFIG_CGROUP_SCHED */
488 
489 /* CFS-related fields in a runqueue */
490 struct cfs_rq {
491 	struct load_weight	load;
492 	unsigned long		runnable_weight;
493 	unsigned int		nr_running;
494 	unsigned int		h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
495 	unsigned int		idle_h_nr_running; /* SCHED_IDLE */
496 
497 	u64			exec_clock;
498 	u64			min_vruntime;
499 #ifndef CONFIG_64BIT
500 	u64			min_vruntime_copy;
501 #endif
502 
503 	struct rb_root_cached	tasks_timeline;
504 
505 	/*
506 	 * 'curr' points to currently running entity on this cfs_rq.
507 	 * It is set to NULL otherwise (i.e when none are currently running).
508 	 */
509 	struct sched_entity	*curr;
510 	struct sched_entity	*next;
511 	struct sched_entity	*last;
512 	struct sched_entity	*skip;
513 
514 #ifdef	CONFIG_SCHED_DEBUG
515 	unsigned int		nr_spread_over;
516 #endif
517 
518 #ifdef CONFIG_SMP
519 	/*
520 	 * CFS load tracking
521 	 */
522 	struct sched_avg	avg;
523 #ifndef CONFIG_64BIT
524 	u64			load_last_update_time_copy;
525 #endif
526 	struct {
527 		raw_spinlock_t	lock ____cacheline_aligned;
528 		int		nr;
529 		unsigned long	load_avg;
530 		unsigned long	util_avg;
531 		unsigned long	runnable_sum;
532 	} removed;
533 
534 #ifdef CONFIG_FAIR_GROUP_SCHED
535 	unsigned long		tg_load_avg_contrib;
536 	long			propagate;
537 	long			prop_runnable_sum;
538 
539 	/*
540 	 *   h_load = weight * f(tg)
541 	 *
542 	 * Where f(tg) is the recursive weight fraction assigned to
543 	 * this group.
544 	 */
545 	unsigned long		h_load;
546 	u64			last_h_load_update;
547 	struct sched_entity	*h_load_next;
548 #endif /* CONFIG_FAIR_GROUP_SCHED */
549 #endif /* CONFIG_SMP */
550 
551 #ifdef CONFIG_FAIR_GROUP_SCHED
552 	struct rq		*rq;	/* CPU runqueue to which this cfs_rq is attached */
553 
554 	/*
555 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
556 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
557 	 * (like users, containers etc.)
558 	 *
559 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
560 	 * This list is used during load balance.
561 	 */
562 	int			on_list;
563 	struct list_head	leaf_cfs_rq_list;
564 	struct task_group	*tg;	/* group that "owns" this runqueue */
565 
566 #ifdef CONFIG_CFS_BANDWIDTH
567 	int			runtime_enabled;
568 	s64			runtime_remaining;
569 
570 	u64			throttled_clock;
571 	u64			throttled_clock_task;
572 	u64			throttled_clock_task_time;
573 	int			throttled;
574 	int			throttle_count;
575 	struct list_head	throttled_list;
576 #endif /* CONFIG_CFS_BANDWIDTH */
577 #endif /* CONFIG_FAIR_GROUP_SCHED */
578 };
579 
580 static inline int rt_bandwidth_enabled(void)
581 {
582 	return sysctl_sched_rt_runtime >= 0;
583 }
584 
585 /* RT IPI pull logic requires IRQ_WORK */
586 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
587 # define HAVE_RT_PUSH_IPI
588 #endif
589 
590 /* Real-Time classes' related field in a runqueue: */
591 struct rt_rq {
592 	struct rt_prio_array	active;
593 	unsigned int		rt_nr_running;
594 	unsigned int		rr_nr_running;
595 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
596 	struct {
597 		int		curr; /* highest queued rt task prio */
598 #ifdef CONFIG_SMP
599 		int		next; /* next highest */
600 #endif
601 	} highest_prio;
602 #endif
603 #ifdef CONFIG_SMP
604 	unsigned long		rt_nr_migratory;
605 	unsigned long		rt_nr_total;
606 	int			overloaded;
607 	struct plist_head	pushable_tasks;
608 
609 #endif /* CONFIG_SMP */
610 	int			rt_queued;
611 
612 	int			rt_throttled;
613 	u64			rt_time;
614 	u64			rt_runtime;
615 	/* Nests inside the rq lock: */
616 	raw_spinlock_t		rt_runtime_lock;
617 
618 #ifdef CONFIG_RT_GROUP_SCHED
619 	unsigned long		rt_nr_boosted;
620 
621 	struct rq		*rq;
622 	struct task_group	*tg;
623 #endif
624 };
625 
626 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
627 {
628 	return rt_rq->rt_queued && rt_rq->rt_nr_running;
629 }
630 
631 /* Deadline class' related fields in a runqueue */
632 struct dl_rq {
633 	/* runqueue is an rbtree, ordered by deadline */
634 	struct rb_root_cached	root;
635 
636 	unsigned long		dl_nr_running;
637 
638 #ifdef CONFIG_SMP
639 	/*
640 	 * Deadline values of the currently executing and the
641 	 * earliest ready task on this rq. Caching these facilitates
642 	 * the decision whether or not a ready but not running task
643 	 * should migrate somewhere else.
644 	 */
645 	struct {
646 		u64		curr;
647 		u64		next;
648 	} earliest_dl;
649 
650 	unsigned long		dl_nr_migratory;
651 	int			overloaded;
652 
653 	/*
654 	 * Tasks on this rq that can be pushed away. They are kept in
655 	 * an rb-tree, ordered by tasks' deadlines, with caching
656 	 * of the leftmost (earliest deadline) element.
657 	 */
658 	struct rb_root_cached	pushable_dl_tasks_root;
659 #else
660 	struct dl_bw		dl_bw;
661 #endif
662 	/*
663 	 * "Active utilization" for this runqueue: increased when a
664 	 * task wakes up (becomes TASK_RUNNING) and decreased when a
665 	 * task blocks
666 	 */
667 	u64			running_bw;
668 
669 	/*
670 	 * Utilization of the tasks "assigned" to this runqueue (including
671 	 * the tasks that are in runqueue and the tasks that executed on this
672 	 * CPU and blocked). Increased when a task moves to this runqueue, and
673 	 * decreased when the task moves away (migrates, changes scheduling
674 	 * policy, or terminates).
675 	 * This is needed to compute the "inactive utilization" for the
676 	 * runqueue (inactive utilization = this_bw - running_bw).
677 	 */
678 	u64			this_bw;
679 	u64			extra_bw;
680 
681 	/*
682 	 * Inverse of the fraction of CPU utilization that can be reclaimed
683 	 * by the GRUB algorithm.
684 	 */
685 	u64			bw_ratio;
686 };
687 
688 #ifdef CONFIG_FAIR_GROUP_SCHED
689 /* An entity is a task if it doesn't "own" a runqueue */
690 #define entity_is_task(se)	(!se->my_q)
691 #else
692 #define entity_is_task(se)	1
693 #endif
694 
695 #ifdef CONFIG_SMP
696 /*
697  * XXX we want to get rid of these helpers and use the full load resolution.
698  */
699 static inline long se_weight(struct sched_entity *se)
700 {
701 	return scale_load_down(se->load.weight);
702 }
703 
704 static inline long se_runnable(struct sched_entity *se)
705 {
706 	return scale_load_down(se->runnable_weight);
707 }
708 
709 static inline bool sched_asym_prefer(int a, int b)
710 {
711 	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
712 }
713 
714 struct perf_domain {
715 	struct em_perf_domain *em_pd;
716 	struct perf_domain *next;
717 	struct rcu_head rcu;
718 };
719 
720 /* Scheduling group status flags */
721 #define SG_OVERLOAD		0x1 /* More than one runnable task on a CPU. */
722 #define SG_OVERUTILIZED		0x2 /* One or more CPUs are over-utilized. */
723 
724 /*
725  * We add the notion of a root-domain which will be used to define per-domain
726  * variables. Each exclusive cpuset essentially defines an island domain by
727  * fully partitioning the member CPUs from any other cpuset. Whenever a new
728  * exclusive cpuset is created, we also create and attach a new root-domain
729  * object.
730  *
731  */
732 struct root_domain {
733 	atomic_t		refcount;
734 	atomic_t		rto_count;
735 	struct rcu_head		rcu;
736 	cpumask_var_t		span;
737 	cpumask_var_t		online;
738 
739 	/*
740 	 * Indicate pullable load on at least one CPU, e.g:
741 	 * - More than one runnable task
742 	 * - Running task is misfit
743 	 */
744 	int			overload;
745 
746 	/* Indicate one or more cpus over-utilized (tipping point) */
747 	int			overutilized;
748 
749 	/*
750 	 * The bit corresponding to a CPU gets set here if such CPU has more
751 	 * than one runnable -deadline task (as it is below for RT tasks).
752 	 */
753 	cpumask_var_t		dlo_mask;
754 	atomic_t		dlo_count;
755 	struct dl_bw		dl_bw;
756 	struct cpudl		cpudl;
757 
758 #ifdef HAVE_RT_PUSH_IPI
759 	/*
760 	 * For IPI pull requests, loop across the rto_mask.
761 	 */
762 	struct irq_work		rto_push_work;
763 	raw_spinlock_t		rto_lock;
764 	/* These are only updated and read within rto_lock */
765 	int			rto_loop;
766 	int			rto_cpu;
767 	/* These atomics are updated outside of a lock */
768 	atomic_t		rto_loop_next;
769 	atomic_t		rto_loop_start;
770 #endif
771 	/*
772 	 * The "RT overload" flag: it gets set if a CPU has more than
773 	 * one runnable RT task.
774 	 */
775 	cpumask_var_t		rto_mask;
776 	struct cpupri		cpupri;
777 
778 	unsigned long		max_cpu_capacity;
779 
780 	/*
781 	 * NULL-terminated list of performance domains intersecting with the
782 	 * CPUs of the rd. Protected by RCU.
783 	 */
784 	struct perf_domain __rcu *pd;
785 };
786 
787 extern void init_defrootdomain(void);
788 extern int sched_init_domains(const struct cpumask *cpu_map);
789 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
790 extern void sched_get_rd(struct root_domain *rd);
791 extern void sched_put_rd(struct root_domain *rd);
792 
793 #ifdef HAVE_RT_PUSH_IPI
794 extern void rto_push_irq_work_func(struct irq_work *work);
795 #endif
796 #endif /* CONFIG_SMP */
797 
798 #ifdef CONFIG_UCLAMP_TASK
799 /*
800  * struct uclamp_bucket - Utilization clamp bucket
801  * @value: utilization clamp value for tasks on this clamp bucket
802  * @tasks: number of RUNNABLE tasks on this clamp bucket
803  *
804  * Keep track of how many tasks are RUNNABLE for a given utilization
805  * clamp value.
806  */
807 struct uclamp_bucket {
808 	unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
809 	unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
810 };
811 
812 /*
813  * struct uclamp_rq - rq's utilization clamp
814  * @value: currently active clamp values for a rq
815  * @bucket: utilization clamp buckets affecting a rq
816  *
817  * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values.
818  * A clamp value is affecting a rq when there is at least one task RUNNABLE
819  * (or actually running) with that value.
820  *
821  * There are up to UCLAMP_CNT possible different clamp values, currently there
822  * are only two: minimum utilization and maximum utilization.
823  *
824  * All utilization clamping values are MAX aggregated, since:
825  * - for util_min: we want to run the CPU at least at the max of the minimum
826  *   utilization required by its currently RUNNABLE tasks.
827  * - for util_max: we want to allow the CPU to run up to the max of the
828  *   maximum utilization allowed by its currently RUNNABLE tasks.
829  *
830  * Since on each system we expect only a limited number of different
831  * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track
832  * the metrics required to compute all the per-rq utilization clamp values.
833  */
834 struct uclamp_rq {
835 	unsigned int value;
836 	struct uclamp_bucket bucket[UCLAMP_BUCKETS];
837 };
838 #endif /* CONFIG_UCLAMP_TASK */
839 
840 /*
841  * This is the main, per-CPU runqueue data structure.
842  *
843  * Locking rule: those places that want to lock multiple runqueues
844  * (such as the load balancing or the thread migration code), lock
845  * acquire operations must be ordered by ascending &runqueue.
846  */
847 struct rq {
848 	/* runqueue lock: */
849 	raw_spinlock_t		lock;
850 
851 	/*
852 	 * nr_running and cpu_load should be in the same cacheline because
853 	 * remote CPUs use both these fields when doing load calculation.
854 	 */
855 	unsigned int		nr_running;
856 #ifdef CONFIG_NUMA_BALANCING
857 	unsigned int		nr_numa_running;
858 	unsigned int		nr_preferred_running;
859 	unsigned int		numa_migrate_on;
860 #endif
861 #ifdef CONFIG_NO_HZ_COMMON
862 #ifdef CONFIG_SMP
863 	unsigned long		last_load_update_tick;
864 	unsigned long		last_blocked_load_update_tick;
865 	unsigned int		has_blocked_load;
866 #endif /* CONFIG_SMP */
867 	unsigned int		nohz_tick_stopped;
868 	atomic_t nohz_flags;
869 #endif /* CONFIG_NO_HZ_COMMON */
870 
871 	unsigned long		nr_load_updates;
872 	u64			nr_switches;
873 
874 #ifdef CONFIG_UCLAMP_TASK
875 	/* Utilization clamp values based on CPU's RUNNABLE tasks */
876 	struct uclamp_rq	uclamp[UCLAMP_CNT] ____cacheline_aligned;
877 	unsigned int		uclamp_flags;
878 #define UCLAMP_FLAG_IDLE 0x01
879 #endif
880 
881 	struct cfs_rq		cfs;
882 	struct rt_rq		rt;
883 	struct dl_rq		dl;
884 
885 #ifdef CONFIG_FAIR_GROUP_SCHED
886 	/* list of leaf cfs_rq on this CPU: */
887 	struct list_head	leaf_cfs_rq_list;
888 	struct list_head	*tmp_alone_branch;
889 #endif /* CONFIG_FAIR_GROUP_SCHED */
890 
891 	/*
892 	 * This is part of a global counter where only the total sum
893 	 * over all CPUs matters. A task can increase this counter on
894 	 * one CPU and if it got migrated afterwards it may decrease
895 	 * it on another CPU. Always updated under the runqueue lock:
896 	 */
897 	unsigned long		nr_uninterruptible;
898 
899 	struct task_struct	*curr;
900 	struct task_struct	*idle;
901 	struct task_struct	*stop;
902 	unsigned long		next_balance;
903 	struct mm_struct	*prev_mm;
904 
905 	unsigned int		clock_update_flags;
906 	u64			clock;
907 	/* Ensure that all clocks are in the same cache line */
908 	u64			clock_task ____cacheline_aligned;
909 	u64			clock_pelt;
910 	unsigned long		lost_idle_time;
911 
912 	atomic_t		nr_iowait;
913 
914 #ifdef CONFIG_MEMBARRIER
915 	int membarrier_state;
916 #endif
917 
918 #ifdef CONFIG_SMP
919 	struct root_domain		*rd;
920 	struct sched_domain __rcu	*sd;
921 
922 	unsigned long		cpu_capacity;
923 	unsigned long		cpu_capacity_orig;
924 
925 	struct callback_head	*balance_callback;
926 
927 	unsigned char		idle_balance;
928 
929 	unsigned long		misfit_task_load;
930 
931 	/* For active balancing */
932 	int			active_balance;
933 	int			push_cpu;
934 	struct cpu_stop_work	active_balance_work;
935 
936 	/* CPU of this runqueue: */
937 	int			cpu;
938 	int			online;
939 
940 	struct list_head cfs_tasks;
941 
942 	struct sched_avg	avg_rt;
943 	struct sched_avg	avg_dl;
944 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
945 	struct sched_avg	avg_irq;
946 #endif
947 	u64			idle_stamp;
948 	u64			avg_idle;
949 
950 	/* This is used to determine avg_idle's max value */
951 	u64			max_idle_balance_cost;
952 #endif
953 
954 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
955 	u64			prev_irq_time;
956 #endif
957 #ifdef CONFIG_PARAVIRT
958 	u64			prev_steal_time;
959 #endif
960 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
961 	u64			prev_steal_time_rq;
962 #endif
963 
964 	/* calc_load related fields */
965 	unsigned long		calc_load_update;
966 	long			calc_load_active;
967 
968 #ifdef CONFIG_SCHED_HRTICK
969 #ifdef CONFIG_SMP
970 	int			hrtick_csd_pending;
971 	call_single_data_t	hrtick_csd;
972 #endif
973 	struct hrtimer		hrtick_timer;
974 #endif
975 
976 #ifdef CONFIG_SCHEDSTATS
977 	/* latency stats */
978 	struct sched_info	rq_sched_info;
979 	unsigned long long	rq_cpu_time;
980 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
981 
982 	/* sys_sched_yield() stats */
983 	unsigned int		yld_count;
984 
985 	/* schedule() stats */
986 	unsigned int		sched_count;
987 	unsigned int		sched_goidle;
988 
989 	/* try_to_wake_up() stats */
990 	unsigned int		ttwu_count;
991 	unsigned int		ttwu_local;
992 #endif
993 
994 #ifdef CONFIG_SMP
995 	struct llist_head	wake_list;
996 #endif
997 
998 #ifdef CONFIG_CPU_IDLE
999 	/* Must be inspected within a rcu lock section */
1000 	struct cpuidle_state	*idle_state;
1001 #endif
1002 };
1003 
1004 #ifdef CONFIG_FAIR_GROUP_SCHED
1005 
1006 /* CPU runqueue to which this cfs_rq is attached */
1007 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1008 {
1009 	return cfs_rq->rq;
1010 }
1011 
1012 #else
1013 
1014 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1015 {
1016 	return container_of(cfs_rq, struct rq, cfs);
1017 }
1018 #endif
1019 
1020 static inline int cpu_of(struct rq *rq)
1021 {
1022 #ifdef CONFIG_SMP
1023 	return rq->cpu;
1024 #else
1025 	return 0;
1026 #endif
1027 }
1028 
1029 
1030 #ifdef CONFIG_SCHED_SMT
1031 extern void __update_idle_core(struct rq *rq);
1032 
1033 static inline void update_idle_core(struct rq *rq)
1034 {
1035 	if (static_branch_unlikely(&sched_smt_present))
1036 		__update_idle_core(rq);
1037 }
1038 
1039 #else
1040 static inline void update_idle_core(struct rq *rq) { }
1041 #endif
1042 
1043 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1044 
1045 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
1046 #define this_rq()		this_cpu_ptr(&runqueues)
1047 #define task_rq(p)		cpu_rq(task_cpu(p))
1048 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
1049 #define raw_rq()		raw_cpu_ptr(&runqueues)
1050 
1051 extern void update_rq_clock(struct rq *rq);
1052 
1053 static inline u64 __rq_clock_broken(struct rq *rq)
1054 {
1055 	return READ_ONCE(rq->clock);
1056 }
1057 
1058 /*
1059  * rq::clock_update_flags bits
1060  *
1061  * %RQCF_REQ_SKIP - will request skipping of clock update on the next
1062  *  call to __schedule(). This is an optimisation to avoid
1063  *  neighbouring rq clock updates.
1064  *
1065  * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
1066  *  in effect and calls to update_rq_clock() are being ignored.
1067  *
1068  * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
1069  *  made to update_rq_clock() since the last time rq::lock was pinned.
1070  *
1071  * If inside of __schedule(), clock_update_flags will have been
1072  * shifted left (a left shift is a cheap operation for the fast path
1073  * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
1074  *
1075  *	if (rq-clock_update_flags >= RQCF_UPDATED)
1076  *
1077  * to check if %RQCF_UPADTED is set. It'll never be shifted more than
1078  * one position though, because the next rq_unpin_lock() will shift it
1079  * back.
1080  */
1081 #define RQCF_REQ_SKIP		0x01
1082 #define RQCF_ACT_SKIP		0x02
1083 #define RQCF_UPDATED		0x04
1084 
1085 static inline void assert_clock_updated(struct rq *rq)
1086 {
1087 	/*
1088 	 * The only reason for not seeing a clock update since the
1089 	 * last rq_pin_lock() is if we're currently skipping updates.
1090 	 */
1091 	SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1092 }
1093 
1094 static inline u64 rq_clock(struct rq *rq)
1095 {
1096 	lockdep_assert_held(&rq->lock);
1097 	assert_clock_updated(rq);
1098 
1099 	return rq->clock;
1100 }
1101 
1102 static inline u64 rq_clock_task(struct rq *rq)
1103 {
1104 	lockdep_assert_held(&rq->lock);
1105 	assert_clock_updated(rq);
1106 
1107 	return rq->clock_task;
1108 }
1109 
1110 static inline void rq_clock_skip_update(struct rq *rq)
1111 {
1112 	lockdep_assert_held(&rq->lock);
1113 	rq->clock_update_flags |= RQCF_REQ_SKIP;
1114 }
1115 
1116 /*
1117  * See rt task throttling, which is the only time a skip
1118  * request is cancelled.
1119  */
1120 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1121 {
1122 	lockdep_assert_held(&rq->lock);
1123 	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1124 }
1125 
1126 struct rq_flags {
1127 	unsigned long flags;
1128 	struct pin_cookie cookie;
1129 #ifdef CONFIG_SCHED_DEBUG
1130 	/*
1131 	 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1132 	 * current pin context is stashed here in case it needs to be
1133 	 * restored in rq_repin_lock().
1134 	 */
1135 	unsigned int clock_update_flags;
1136 #endif
1137 };
1138 
1139 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1140 {
1141 	rf->cookie = lockdep_pin_lock(&rq->lock);
1142 
1143 #ifdef CONFIG_SCHED_DEBUG
1144 	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1145 	rf->clock_update_flags = 0;
1146 #endif
1147 }
1148 
1149 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1150 {
1151 #ifdef CONFIG_SCHED_DEBUG
1152 	if (rq->clock_update_flags > RQCF_ACT_SKIP)
1153 		rf->clock_update_flags = RQCF_UPDATED;
1154 #endif
1155 
1156 	lockdep_unpin_lock(&rq->lock, rf->cookie);
1157 }
1158 
1159 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1160 {
1161 	lockdep_repin_lock(&rq->lock, rf->cookie);
1162 
1163 #ifdef CONFIG_SCHED_DEBUG
1164 	/*
1165 	 * Restore the value we stashed in @rf for this pin context.
1166 	 */
1167 	rq->clock_update_flags |= rf->clock_update_flags;
1168 #endif
1169 }
1170 
1171 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1172 	__acquires(rq->lock);
1173 
1174 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1175 	__acquires(p->pi_lock)
1176 	__acquires(rq->lock);
1177 
1178 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1179 	__releases(rq->lock)
1180 {
1181 	rq_unpin_lock(rq, rf);
1182 	raw_spin_unlock(&rq->lock);
1183 }
1184 
1185 static inline void
1186 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1187 	__releases(rq->lock)
1188 	__releases(p->pi_lock)
1189 {
1190 	rq_unpin_lock(rq, rf);
1191 	raw_spin_unlock(&rq->lock);
1192 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1193 }
1194 
1195 static inline void
1196 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1197 	__acquires(rq->lock)
1198 {
1199 	raw_spin_lock_irqsave(&rq->lock, rf->flags);
1200 	rq_pin_lock(rq, rf);
1201 }
1202 
1203 static inline void
1204 rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1205 	__acquires(rq->lock)
1206 {
1207 	raw_spin_lock_irq(&rq->lock);
1208 	rq_pin_lock(rq, rf);
1209 }
1210 
1211 static inline void
1212 rq_lock(struct rq *rq, struct rq_flags *rf)
1213 	__acquires(rq->lock)
1214 {
1215 	raw_spin_lock(&rq->lock);
1216 	rq_pin_lock(rq, rf);
1217 }
1218 
1219 static inline void
1220 rq_relock(struct rq *rq, struct rq_flags *rf)
1221 	__acquires(rq->lock)
1222 {
1223 	raw_spin_lock(&rq->lock);
1224 	rq_repin_lock(rq, rf);
1225 }
1226 
1227 static inline void
1228 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1229 	__releases(rq->lock)
1230 {
1231 	rq_unpin_lock(rq, rf);
1232 	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1233 }
1234 
1235 static inline void
1236 rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1237 	__releases(rq->lock)
1238 {
1239 	rq_unpin_lock(rq, rf);
1240 	raw_spin_unlock_irq(&rq->lock);
1241 }
1242 
1243 static inline void
1244 rq_unlock(struct rq *rq, struct rq_flags *rf)
1245 	__releases(rq->lock)
1246 {
1247 	rq_unpin_lock(rq, rf);
1248 	raw_spin_unlock(&rq->lock);
1249 }
1250 
1251 static inline struct rq *
1252 this_rq_lock_irq(struct rq_flags *rf)
1253 	__acquires(rq->lock)
1254 {
1255 	struct rq *rq;
1256 
1257 	local_irq_disable();
1258 	rq = this_rq();
1259 	rq_lock(rq, rf);
1260 	return rq;
1261 }
1262 
1263 #ifdef CONFIG_NUMA
1264 enum numa_topology_type {
1265 	NUMA_DIRECT,
1266 	NUMA_GLUELESS_MESH,
1267 	NUMA_BACKPLANE,
1268 };
1269 extern enum numa_topology_type sched_numa_topology_type;
1270 extern int sched_max_numa_distance;
1271 extern bool find_numa_distance(int distance);
1272 extern void sched_init_numa(void);
1273 extern void sched_domains_numa_masks_set(unsigned int cpu);
1274 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1275 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1276 #else
1277 static inline void sched_init_numa(void) { }
1278 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1279 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1280 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1281 {
1282 	return nr_cpu_ids;
1283 }
1284 #endif
1285 
1286 #ifdef CONFIG_NUMA_BALANCING
1287 /* The regions in numa_faults array from task_struct */
1288 enum numa_faults_stats {
1289 	NUMA_MEM = 0,
1290 	NUMA_CPU,
1291 	NUMA_MEMBUF,
1292 	NUMA_CPUBUF
1293 };
1294 extern void sched_setnuma(struct task_struct *p, int node);
1295 extern int migrate_task_to(struct task_struct *p, int cpu);
1296 extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1297 			int cpu, int scpu);
1298 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1299 #else
1300 static inline void
1301 init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1302 {
1303 }
1304 #endif /* CONFIG_NUMA_BALANCING */
1305 
1306 #ifdef CONFIG_SMP
1307 
1308 static inline void
1309 queue_balance_callback(struct rq *rq,
1310 		       struct callback_head *head,
1311 		       void (*func)(struct rq *rq))
1312 {
1313 	lockdep_assert_held(&rq->lock);
1314 
1315 	if (unlikely(head->next))
1316 		return;
1317 
1318 	head->func = (void (*)(struct callback_head *))func;
1319 	head->next = rq->balance_callback;
1320 	rq->balance_callback = head;
1321 }
1322 
1323 extern void sched_ttwu_pending(void);
1324 
1325 #define rcu_dereference_check_sched_domain(p) \
1326 	rcu_dereference_check((p), \
1327 			      lockdep_is_held(&sched_domains_mutex))
1328 
1329 /*
1330  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1331  * See destroy_sched_domains: call_rcu for details.
1332  *
1333  * The domain tree of any CPU may only be accessed from within
1334  * preempt-disabled sections.
1335  */
1336 #define for_each_domain(cpu, __sd) \
1337 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1338 			__sd; __sd = __sd->parent)
1339 
1340 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1341 
1342 /**
1343  * highest_flag_domain - Return highest sched_domain containing flag.
1344  * @cpu:	The CPU whose highest level of sched domain is to
1345  *		be returned.
1346  * @flag:	The flag to check for the highest sched_domain
1347  *		for the given CPU.
1348  *
1349  * Returns the highest sched_domain of a CPU which contains the given flag.
1350  */
1351 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1352 {
1353 	struct sched_domain *sd, *hsd = NULL;
1354 
1355 	for_each_domain(cpu, sd) {
1356 		if (!(sd->flags & flag))
1357 			break;
1358 		hsd = sd;
1359 	}
1360 
1361 	return hsd;
1362 }
1363 
1364 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1365 {
1366 	struct sched_domain *sd;
1367 
1368 	for_each_domain(cpu, sd) {
1369 		if (sd->flags & flag)
1370 			break;
1371 	}
1372 
1373 	return sd;
1374 }
1375 
1376 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1377 DECLARE_PER_CPU(int, sd_llc_size);
1378 DECLARE_PER_CPU(int, sd_llc_id);
1379 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1380 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1381 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1382 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1383 extern struct static_key_false sched_asym_cpucapacity;
1384 
1385 struct sched_group_capacity {
1386 	atomic_t		ref;
1387 	/*
1388 	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
1389 	 * for a single CPU.
1390 	 */
1391 	unsigned long		capacity;
1392 	unsigned long		min_capacity;		/* Min per-CPU capacity in group */
1393 	unsigned long		max_capacity;		/* Max per-CPU capacity in group */
1394 	unsigned long		next_update;
1395 	int			imbalance;		/* XXX unrelated to capacity but shared group state */
1396 
1397 #ifdef CONFIG_SCHED_DEBUG
1398 	int			id;
1399 #endif
1400 
1401 	unsigned long		cpumask[0];		/* Balance mask */
1402 };
1403 
1404 struct sched_group {
1405 	struct sched_group	*next;			/* Must be a circular list */
1406 	atomic_t		ref;
1407 
1408 	unsigned int		group_weight;
1409 	struct sched_group_capacity *sgc;
1410 	int			asym_prefer_cpu;	/* CPU of highest priority in group */
1411 
1412 	/*
1413 	 * The CPUs this group covers.
1414 	 *
1415 	 * NOTE: this field is variable length. (Allocated dynamically
1416 	 * by attaching extra space to the end of the structure,
1417 	 * depending on how many CPUs the kernel has booted up with)
1418 	 */
1419 	unsigned long		cpumask[0];
1420 };
1421 
1422 static inline struct cpumask *sched_group_span(struct sched_group *sg)
1423 {
1424 	return to_cpumask(sg->cpumask);
1425 }
1426 
1427 /*
1428  * See build_balance_mask().
1429  */
1430 static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1431 {
1432 	return to_cpumask(sg->sgc->cpumask);
1433 }
1434 
1435 /**
1436  * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
1437  * @group: The group whose first CPU is to be returned.
1438  */
1439 static inline unsigned int group_first_cpu(struct sched_group *group)
1440 {
1441 	return cpumask_first(sched_group_span(group));
1442 }
1443 
1444 extern int group_balance_cpu(struct sched_group *sg);
1445 
1446 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1447 void register_sched_domain_sysctl(void);
1448 void dirty_sched_domain_sysctl(int cpu);
1449 void unregister_sched_domain_sysctl(void);
1450 #else
1451 static inline void register_sched_domain_sysctl(void)
1452 {
1453 }
1454 static inline void dirty_sched_domain_sysctl(int cpu)
1455 {
1456 }
1457 static inline void unregister_sched_domain_sysctl(void)
1458 {
1459 }
1460 #endif
1461 
1462 extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
1463 
1464 #else
1465 
1466 static inline void sched_ttwu_pending(void) { }
1467 
1468 static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
1469 
1470 #endif /* CONFIG_SMP */
1471 
1472 #include "stats.h"
1473 #include "autogroup.h"
1474 
1475 #ifdef CONFIG_CGROUP_SCHED
1476 
1477 /*
1478  * Return the group to which this tasks belongs.
1479  *
1480  * We cannot use task_css() and friends because the cgroup subsystem
1481  * changes that value before the cgroup_subsys::attach() method is called,
1482  * therefore we cannot pin it and might observe the wrong value.
1483  *
1484  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1485  * core changes this before calling sched_move_task().
1486  *
1487  * Instead we use a 'copy' which is updated from sched_move_task() while
1488  * holding both task_struct::pi_lock and rq::lock.
1489  */
1490 static inline struct task_group *task_group(struct task_struct *p)
1491 {
1492 	return p->sched_task_group;
1493 }
1494 
1495 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1496 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1497 {
1498 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1499 	struct task_group *tg = task_group(p);
1500 #endif
1501 
1502 #ifdef CONFIG_FAIR_GROUP_SCHED
1503 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1504 	p->se.cfs_rq = tg->cfs_rq[cpu];
1505 	p->se.parent = tg->se[cpu];
1506 #endif
1507 
1508 #ifdef CONFIG_RT_GROUP_SCHED
1509 	p->rt.rt_rq  = tg->rt_rq[cpu];
1510 	p->rt.parent = tg->rt_se[cpu];
1511 #endif
1512 }
1513 
1514 #else /* CONFIG_CGROUP_SCHED */
1515 
1516 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1517 static inline struct task_group *task_group(struct task_struct *p)
1518 {
1519 	return NULL;
1520 }
1521 
1522 #endif /* CONFIG_CGROUP_SCHED */
1523 
1524 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1525 {
1526 	set_task_rq(p, cpu);
1527 #ifdef CONFIG_SMP
1528 	/*
1529 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1530 	 * successfully executed on another CPU. We must ensure that updates of
1531 	 * per-task data have been completed by this moment.
1532 	 */
1533 	smp_wmb();
1534 #ifdef CONFIG_THREAD_INFO_IN_TASK
1535 	WRITE_ONCE(p->cpu, cpu);
1536 #else
1537 	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1538 #endif
1539 	p->wake_cpu = cpu;
1540 #endif
1541 }
1542 
1543 /*
1544  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1545  */
1546 #ifdef CONFIG_SCHED_DEBUG
1547 # include <linux/static_key.h>
1548 # define const_debug __read_mostly
1549 #else
1550 # define const_debug const
1551 #endif
1552 
1553 #define SCHED_FEAT(name, enabled)	\
1554 	__SCHED_FEAT_##name ,
1555 
1556 enum {
1557 #include "features.h"
1558 	__SCHED_FEAT_NR,
1559 };
1560 
1561 #undef SCHED_FEAT
1562 
1563 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
1564 
1565 /*
1566  * To support run-time toggling of sched features, all the translation units
1567  * (but core.c) reference the sysctl_sched_features defined in core.c.
1568  */
1569 extern const_debug unsigned int sysctl_sched_features;
1570 
1571 #define SCHED_FEAT(name, enabled)					\
1572 static __always_inline bool static_branch_##name(struct static_key *key) \
1573 {									\
1574 	return static_key_##enabled(key);				\
1575 }
1576 
1577 #include "features.h"
1578 #undef SCHED_FEAT
1579 
1580 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1581 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1582 
1583 #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
1584 
1585 /*
1586  * Each translation unit has its own copy of sysctl_sched_features to allow
1587  * constants propagation at compile time and compiler optimization based on
1588  * features default.
1589  */
1590 #define SCHED_FEAT(name, enabled)	\
1591 	(1UL << __SCHED_FEAT_##name) * enabled |
1592 static const_debug __maybe_unused unsigned int sysctl_sched_features =
1593 #include "features.h"
1594 	0;
1595 #undef SCHED_FEAT
1596 
1597 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1598 
1599 #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
1600 
1601 extern struct static_key_false sched_numa_balancing;
1602 extern struct static_key_false sched_schedstats;
1603 
1604 static inline u64 global_rt_period(void)
1605 {
1606 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1607 }
1608 
1609 static inline u64 global_rt_runtime(void)
1610 {
1611 	if (sysctl_sched_rt_runtime < 0)
1612 		return RUNTIME_INF;
1613 
1614 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1615 }
1616 
1617 static inline int task_current(struct rq *rq, struct task_struct *p)
1618 {
1619 	return rq->curr == p;
1620 }
1621 
1622 static inline int task_running(struct rq *rq, struct task_struct *p)
1623 {
1624 #ifdef CONFIG_SMP
1625 	return p->on_cpu;
1626 #else
1627 	return task_current(rq, p);
1628 #endif
1629 }
1630 
1631 static inline int task_on_rq_queued(struct task_struct *p)
1632 {
1633 	return p->on_rq == TASK_ON_RQ_QUEUED;
1634 }
1635 
1636 static inline int task_on_rq_migrating(struct task_struct *p)
1637 {
1638 	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
1639 }
1640 
1641 /*
1642  * wake flags
1643  */
1644 #define WF_SYNC			0x01		/* Waker goes to sleep after wakeup */
1645 #define WF_FORK			0x02		/* Child wakeup after fork */
1646 #define WF_MIGRATED		0x4		/* Internal use, task got migrated */
1647 
1648 /*
1649  * To aid in avoiding the subversion of "niceness" due to uneven distribution
1650  * of tasks with abnormal "nice" values across CPUs the contribution that
1651  * each task makes to its run queue's load is weighted according to its
1652  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1653  * scaled version of the new time slice allocation that they receive on time
1654  * slice expiry etc.
1655  */
1656 
1657 #define WEIGHT_IDLEPRIO		3
1658 #define WMULT_IDLEPRIO		1431655765
1659 
1660 extern const int		sched_prio_to_weight[40];
1661 extern const u32		sched_prio_to_wmult[40];
1662 
1663 /*
1664  * {de,en}queue flags:
1665  *
1666  * DEQUEUE_SLEEP  - task is no longer runnable
1667  * ENQUEUE_WAKEUP - task just became runnable
1668  *
1669  * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1670  *                are in a known state which allows modification. Such pairs
1671  *                should preserve as much state as possible.
1672  *
1673  * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1674  *        in the runqueue.
1675  *
1676  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1677  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
1678  * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1679  *
1680  */
1681 
1682 #define DEQUEUE_SLEEP		0x01
1683 #define DEQUEUE_SAVE		0x02 /* Matches ENQUEUE_RESTORE */
1684 #define DEQUEUE_MOVE		0x04 /* Matches ENQUEUE_MOVE */
1685 #define DEQUEUE_NOCLOCK		0x08 /* Matches ENQUEUE_NOCLOCK */
1686 
1687 #define ENQUEUE_WAKEUP		0x01
1688 #define ENQUEUE_RESTORE		0x02
1689 #define ENQUEUE_MOVE		0x04
1690 #define ENQUEUE_NOCLOCK		0x08
1691 
1692 #define ENQUEUE_HEAD		0x10
1693 #define ENQUEUE_REPLENISH	0x20
1694 #ifdef CONFIG_SMP
1695 #define ENQUEUE_MIGRATED	0x40
1696 #else
1697 #define ENQUEUE_MIGRATED	0x00
1698 #endif
1699 
1700 #define RETRY_TASK		((void *)-1UL)
1701 
1702 struct sched_class {
1703 	const struct sched_class *next;
1704 
1705 #ifdef CONFIG_UCLAMP_TASK
1706 	int uclamp_enabled;
1707 #endif
1708 
1709 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1710 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1711 	void (*yield_task)   (struct rq *rq);
1712 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1713 
1714 	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1715 
1716 	/*
1717 	 * Both @prev and @rf are optional and may be NULL, in which case the
1718 	 * caller must already have invoked put_prev_task(rq, prev, rf).
1719 	 *
1720 	 * Otherwise it is the responsibility of the pick_next_task() to call
1721 	 * put_prev_task() on the @prev task or something equivalent, IFF it
1722 	 * returns a next task.
1723 	 *
1724 	 * In that case (@rf != NULL) it may return RETRY_TASK when it finds a
1725 	 * higher prio class has runnable tasks.
1726 	 */
1727 	struct task_struct * (*pick_next_task)(struct rq *rq,
1728 					       struct task_struct *prev,
1729 					       struct rq_flags *rf);
1730 	void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
1731 	void (*set_next_task)(struct rq *rq, struct task_struct *p);
1732 
1733 #ifdef CONFIG_SMP
1734 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1735 	void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1736 
1737 	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1738 
1739 	void (*set_cpus_allowed)(struct task_struct *p,
1740 				 const struct cpumask *newmask);
1741 
1742 	void (*rq_online)(struct rq *rq);
1743 	void (*rq_offline)(struct rq *rq);
1744 #endif
1745 
1746 	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1747 	void (*task_fork)(struct task_struct *p);
1748 	void (*task_dead)(struct task_struct *p);
1749 
1750 	/*
1751 	 * The switched_from() call is allowed to drop rq->lock, therefore we
1752 	 * cannot assume the switched_from/switched_to pair is serliazed by
1753 	 * rq->lock. They are however serialized by p->pi_lock.
1754 	 */
1755 	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1756 	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
1757 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1758 			      int oldprio);
1759 
1760 	unsigned int (*get_rr_interval)(struct rq *rq,
1761 					struct task_struct *task);
1762 
1763 	void (*update_curr)(struct rq *rq);
1764 
1765 #define TASK_SET_GROUP		0
1766 #define TASK_MOVE_GROUP		1
1767 
1768 #ifdef CONFIG_FAIR_GROUP_SCHED
1769 	void (*task_change_group)(struct task_struct *p, int type);
1770 #endif
1771 };
1772 
1773 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1774 {
1775 	WARN_ON_ONCE(rq->curr != prev);
1776 	prev->sched_class->put_prev_task(rq, prev, NULL);
1777 }
1778 
1779 static inline void set_next_task(struct rq *rq, struct task_struct *next)
1780 {
1781 	WARN_ON_ONCE(rq->curr != next);
1782 	next->sched_class->set_next_task(rq, next);
1783 }
1784 
1785 #ifdef CONFIG_SMP
1786 #define sched_class_highest (&stop_sched_class)
1787 #else
1788 #define sched_class_highest (&dl_sched_class)
1789 #endif
1790 #define for_each_class(class) \
1791    for (class = sched_class_highest; class; class = class->next)
1792 
1793 extern const struct sched_class stop_sched_class;
1794 extern const struct sched_class dl_sched_class;
1795 extern const struct sched_class rt_sched_class;
1796 extern const struct sched_class fair_sched_class;
1797 extern const struct sched_class idle_sched_class;
1798 
1799 
1800 #ifdef CONFIG_SMP
1801 
1802 extern void update_group_capacity(struct sched_domain *sd, int cpu);
1803 
1804 extern void trigger_load_balance(struct rq *rq);
1805 
1806 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1807 
1808 #endif
1809 
1810 #ifdef CONFIG_CPU_IDLE
1811 static inline void idle_set_state(struct rq *rq,
1812 				  struct cpuidle_state *idle_state)
1813 {
1814 	rq->idle_state = idle_state;
1815 }
1816 
1817 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1818 {
1819 	SCHED_WARN_ON(!rcu_read_lock_held());
1820 
1821 	return rq->idle_state;
1822 }
1823 #else
1824 static inline void idle_set_state(struct rq *rq,
1825 				  struct cpuidle_state *idle_state)
1826 {
1827 }
1828 
1829 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1830 {
1831 	return NULL;
1832 }
1833 #endif
1834 
1835 extern void schedule_idle(void);
1836 
1837 extern void sysrq_sched_debug_show(void);
1838 extern void sched_init_granularity(void);
1839 extern void update_max_interval(void);
1840 
1841 extern void init_sched_dl_class(void);
1842 extern void init_sched_rt_class(void);
1843 extern void init_sched_fair_class(void);
1844 
1845 extern void reweight_task(struct task_struct *p, int prio);
1846 
1847 extern void resched_curr(struct rq *rq);
1848 extern void resched_cpu(int cpu);
1849 
1850 extern struct rt_bandwidth def_rt_bandwidth;
1851 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1852 
1853 extern struct dl_bandwidth def_dl_bandwidth;
1854 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1855 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1856 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1857 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1858 
1859 #define BW_SHIFT		20
1860 #define BW_UNIT			(1 << BW_SHIFT)
1861 #define RATIO_SHIFT		8
1862 unsigned long to_ratio(u64 period, u64 runtime);
1863 
1864 extern void init_entity_runnable_average(struct sched_entity *se);
1865 extern void post_init_entity_util_avg(struct task_struct *p);
1866 
1867 #ifdef CONFIG_NO_HZ_FULL
1868 extern bool sched_can_stop_tick(struct rq *rq);
1869 extern int __init sched_tick_offload_init(void);
1870 
1871 /*
1872  * Tick may be needed by tasks in the runqueue depending on their policy and
1873  * requirements. If tick is needed, lets send the target an IPI to kick it out of
1874  * nohz mode if necessary.
1875  */
1876 static inline void sched_update_tick_dependency(struct rq *rq)
1877 {
1878 	int cpu;
1879 
1880 	if (!tick_nohz_full_enabled())
1881 		return;
1882 
1883 	cpu = cpu_of(rq);
1884 
1885 	if (!tick_nohz_full_cpu(cpu))
1886 		return;
1887 
1888 	if (sched_can_stop_tick(rq))
1889 		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1890 	else
1891 		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1892 }
1893 #else
1894 static inline int sched_tick_offload_init(void) { return 0; }
1895 static inline void sched_update_tick_dependency(struct rq *rq) { }
1896 #endif
1897 
1898 static inline void add_nr_running(struct rq *rq, unsigned count)
1899 {
1900 	unsigned prev_nr = rq->nr_running;
1901 
1902 	rq->nr_running = prev_nr + count;
1903 
1904 #ifdef CONFIG_SMP
1905 	if (prev_nr < 2 && rq->nr_running >= 2) {
1906 		if (!READ_ONCE(rq->rd->overload))
1907 			WRITE_ONCE(rq->rd->overload, 1);
1908 	}
1909 #endif
1910 
1911 	sched_update_tick_dependency(rq);
1912 }
1913 
1914 static inline void sub_nr_running(struct rq *rq, unsigned count)
1915 {
1916 	rq->nr_running -= count;
1917 	/* Check if we still need preemption */
1918 	sched_update_tick_dependency(rq);
1919 }
1920 
1921 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1922 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1923 
1924 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1925 
1926 extern const_debug unsigned int sysctl_sched_nr_migrate;
1927 extern const_debug unsigned int sysctl_sched_migration_cost;
1928 
1929 #ifdef CONFIG_SCHED_HRTICK
1930 
1931 /*
1932  * Use hrtick when:
1933  *  - enabled by features
1934  *  - hrtimer is actually high res
1935  */
1936 static inline int hrtick_enabled(struct rq *rq)
1937 {
1938 	if (!sched_feat(HRTICK))
1939 		return 0;
1940 	if (!cpu_active(cpu_of(rq)))
1941 		return 0;
1942 	return hrtimer_is_hres_active(&rq->hrtick_timer);
1943 }
1944 
1945 void hrtick_start(struct rq *rq, u64 delay);
1946 
1947 #else
1948 
1949 static inline int hrtick_enabled(struct rq *rq)
1950 {
1951 	return 0;
1952 }
1953 
1954 #endif /* CONFIG_SCHED_HRTICK */
1955 
1956 #ifndef arch_scale_freq_capacity
1957 static __always_inline
1958 unsigned long arch_scale_freq_capacity(int cpu)
1959 {
1960 	return SCHED_CAPACITY_SCALE;
1961 }
1962 #endif
1963 
1964 #ifdef CONFIG_SMP
1965 #ifdef CONFIG_PREEMPTION
1966 
1967 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1968 
1969 /*
1970  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1971  * way at the expense of forcing extra atomic operations in all
1972  * invocations.  This assures that the double_lock is acquired using the
1973  * same underlying policy as the spinlock_t on this architecture, which
1974  * reduces latency compared to the unfair variant below.  However, it
1975  * also adds more overhead and therefore may reduce throughput.
1976  */
1977 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1978 	__releases(this_rq->lock)
1979 	__acquires(busiest->lock)
1980 	__acquires(this_rq->lock)
1981 {
1982 	raw_spin_unlock(&this_rq->lock);
1983 	double_rq_lock(this_rq, busiest);
1984 
1985 	return 1;
1986 }
1987 
1988 #else
1989 /*
1990  * Unfair double_lock_balance: Optimizes throughput at the expense of
1991  * latency by eliminating extra atomic operations when the locks are
1992  * already in proper order on entry.  This favors lower CPU-ids and will
1993  * grant the double lock to lower CPUs over higher ids under contention,
1994  * regardless of entry order into the function.
1995  */
1996 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1997 	__releases(this_rq->lock)
1998 	__acquires(busiest->lock)
1999 	__acquires(this_rq->lock)
2000 {
2001 	int ret = 0;
2002 
2003 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2004 		if (busiest < this_rq) {
2005 			raw_spin_unlock(&this_rq->lock);
2006 			raw_spin_lock(&busiest->lock);
2007 			raw_spin_lock_nested(&this_rq->lock,
2008 					      SINGLE_DEPTH_NESTING);
2009 			ret = 1;
2010 		} else
2011 			raw_spin_lock_nested(&busiest->lock,
2012 					      SINGLE_DEPTH_NESTING);
2013 	}
2014 	return ret;
2015 }
2016 
2017 #endif /* CONFIG_PREEMPTION */
2018 
2019 /*
2020  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2021  */
2022 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2023 {
2024 	if (unlikely(!irqs_disabled())) {
2025 		/* printk() doesn't work well under rq->lock */
2026 		raw_spin_unlock(&this_rq->lock);
2027 		BUG_ON(1);
2028 	}
2029 
2030 	return _double_lock_balance(this_rq, busiest);
2031 }
2032 
2033 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2034 	__releases(busiest->lock)
2035 {
2036 	raw_spin_unlock(&busiest->lock);
2037 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2038 }
2039 
2040 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2041 {
2042 	if (l1 > l2)
2043 		swap(l1, l2);
2044 
2045 	spin_lock(l1);
2046 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2047 }
2048 
2049 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2050 {
2051 	if (l1 > l2)
2052 		swap(l1, l2);
2053 
2054 	spin_lock_irq(l1);
2055 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2056 }
2057 
2058 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2059 {
2060 	if (l1 > l2)
2061 		swap(l1, l2);
2062 
2063 	raw_spin_lock(l1);
2064 	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2065 }
2066 
2067 /*
2068  * double_rq_lock - safely lock two runqueues
2069  *
2070  * Note this does not disable interrupts like task_rq_lock,
2071  * you need to do so manually before calling.
2072  */
2073 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2074 	__acquires(rq1->lock)
2075 	__acquires(rq2->lock)
2076 {
2077 	BUG_ON(!irqs_disabled());
2078 	if (rq1 == rq2) {
2079 		raw_spin_lock(&rq1->lock);
2080 		__acquire(rq2->lock);	/* Fake it out ;) */
2081 	} else {
2082 		if (rq1 < rq2) {
2083 			raw_spin_lock(&rq1->lock);
2084 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2085 		} else {
2086 			raw_spin_lock(&rq2->lock);
2087 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2088 		}
2089 	}
2090 }
2091 
2092 /*
2093  * double_rq_unlock - safely unlock two runqueues
2094  *
2095  * Note this does not restore interrupts like task_rq_unlock,
2096  * you need to do so manually after calling.
2097  */
2098 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2099 	__releases(rq1->lock)
2100 	__releases(rq2->lock)
2101 {
2102 	raw_spin_unlock(&rq1->lock);
2103 	if (rq1 != rq2)
2104 		raw_spin_unlock(&rq2->lock);
2105 	else
2106 		__release(rq2->lock);
2107 }
2108 
2109 extern void set_rq_online (struct rq *rq);
2110 extern void set_rq_offline(struct rq *rq);
2111 extern bool sched_smp_initialized;
2112 
2113 #else /* CONFIG_SMP */
2114 
2115 /*
2116  * double_rq_lock - safely lock two runqueues
2117  *
2118  * Note this does not disable interrupts like task_rq_lock,
2119  * you need to do so manually before calling.
2120  */
2121 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2122 	__acquires(rq1->lock)
2123 	__acquires(rq2->lock)
2124 {
2125 	BUG_ON(!irqs_disabled());
2126 	BUG_ON(rq1 != rq2);
2127 	raw_spin_lock(&rq1->lock);
2128 	__acquire(rq2->lock);	/* Fake it out ;) */
2129 }
2130 
2131 /*
2132  * double_rq_unlock - safely unlock two runqueues
2133  *
2134  * Note this does not restore interrupts like task_rq_unlock,
2135  * you need to do so manually after calling.
2136  */
2137 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2138 	__releases(rq1->lock)
2139 	__releases(rq2->lock)
2140 {
2141 	BUG_ON(rq1 != rq2);
2142 	raw_spin_unlock(&rq1->lock);
2143 	__release(rq2->lock);
2144 }
2145 
2146 #endif
2147 
2148 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2149 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2150 
2151 #ifdef	CONFIG_SCHED_DEBUG
2152 extern bool sched_debug_enabled;
2153 
2154 extern void print_cfs_stats(struct seq_file *m, int cpu);
2155 extern void print_rt_stats(struct seq_file *m, int cpu);
2156 extern void print_dl_stats(struct seq_file *m, int cpu);
2157 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2158 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2159 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2160 #ifdef CONFIG_NUMA_BALANCING
2161 extern void
2162 show_numa_stats(struct task_struct *p, struct seq_file *m);
2163 extern void
2164 print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2165 	unsigned long tpf, unsigned long gsf, unsigned long gpf);
2166 #endif /* CONFIG_NUMA_BALANCING */
2167 #endif /* CONFIG_SCHED_DEBUG */
2168 
2169 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2170 extern void init_rt_rq(struct rt_rq *rt_rq);
2171 extern void init_dl_rq(struct dl_rq *dl_rq);
2172 
2173 extern void cfs_bandwidth_usage_inc(void);
2174 extern void cfs_bandwidth_usage_dec(void);
2175 
2176 #ifdef CONFIG_NO_HZ_COMMON
2177 #define NOHZ_BALANCE_KICK_BIT	0
2178 #define NOHZ_STATS_KICK_BIT	1
2179 
2180 #define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
2181 #define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
2182 
2183 #define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2184 
2185 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
2186 
2187 extern void nohz_balance_exit_idle(struct rq *rq);
2188 #else
2189 static inline void nohz_balance_exit_idle(struct rq *rq) { }
2190 #endif
2191 
2192 
2193 #ifdef CONFIG_SMP
2194 static inline
2195 void __dl_update(struct dl_bw *dl_b, s64 bw)
2196 {
2197 	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2198 	int i;
2199 
2200 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2201 			 "sched RCU must be held");
2202 	for_each_cpu_and(i, rd->span, cpu_active_mask) {
2203 		struct rq *rq = cpu_rq(i);
2204 
2205 		rq->dl.extra_bw += bw;
2206 	}
2207 }
2208 #else
2209 static inline
2210 void __dl_update(struct dl_bw *dl_b, s64 bw)
2211 {
2212 	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2213 
2214 	dl->extra_bw += bw;
2215 }
2216 #endif
2217 
2218 
2219 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2220 struct irqtime {
2221 	u64			total;
2222 	u64			tick_delta;
2223 	u64			irq_start_time;
2224 	struct u64_stats_sync	sync;
2225 };
2226 
2227 DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2228 
2229 /*
2230  * Returns the irqtime minus the softirq time computed by ksoftirqd.
2231  * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
2232  * and never move forward.
2233  */
2234 static inline u64 irq_time_read(int cpu)
2235 {
2236 	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2237 	unsigned int seq;
2238 	u64 total;
2239 
2240 	do {
2241 		seq = __u64_stats_fetch_begin(&irqtime->sync);
2242 		total = irqtime->total;
2243 	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2244 
2245 	return total;
2246 }
2247 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2248 
2249 #ifdef CONFIG_CPU_FREQ
2250 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2251 
2252 /**
2253  * cpufreq_update_util - Take a note about CPU utilization changes.
2254  * @rq: Runqueue to carry out the update for.
2255  * @flags: Update reason flags.
2256  *
2257  * This function is called by the scheduler on the CPU whose utilization is
2258  * being updated.
2259  *
2260  * It can only be called from RCU-sched read-side critical sections.
2261  *
2262  * The way cpufreq is currently arranged requires it to evaluate the CPU
2263  * performance state (frequency/voltage) on a regular basis to prevent it from
2264  * being stuck in a completely inadequate performance level for too long.
2265  * That is not guaranteed to happen if the updates are only triggered from CFS
2266  * and DL, though, because they may not be coming in if only RT tasks are
2267  * active all the time (or there are RT tasks only).
2268  *
2269  * As a workaround for that issue, this function is called periodically by the
2270  * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2271  * but that really is a band-aid.  Going forward it should be replaced with
2272  * solutions targeted more specifically at RT tasks.
2273  */
2274 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2275 {
2276 	struct update_util_data *data;
2277 
2278 	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2279 						  cpu_of(rq)));
2280 	if (data)
2281 		data->func(data, rq_clock(rq), flags);
2282 }
2283 #else
2284 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2285 #endif /* CONFIG_CPU_FREQ */
2286 
2287 #ifdef CONFIG_UCLAMP_TASK
2288 enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
2289 
2290 static __always_inline
2291 unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2292 			      struct task_struct *p)
2293 {
2294 	unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2295 	unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2296 
2297 	if (p) {
2298 		min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2299 		max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2300 	}
2301 
2302 	/*
2303 	 * Since CPU's {min,max}_util clamps are MAX aggregated considering
2304 	 * RUNNABLE tasks with _different_ clamps, we can end up with an
2305 	 * inversion. Fix it now when the clamps are applied.
2306 	 */
2307 	if (unlikely(min_util >= max_util))
2308 		return min_util;
2309 
2310 	return clamp(util, min_util, max_util);
2311 }
2312 
2313 static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2314 {
2315 	return uclamp_util_with(rq, util, NULL);
2316 }
2317 #else /* CONFIG_UCLAMP_TASK */
2318 static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2319 					    struct task_struct *p)
2320 {
2321 	return util;
2322 }
2323 static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2324 {
2325 	return util;
2326 }
2327 #endif /* CONFIG_UCLAMP_TASK */
2328 
2329 #ifdef arch_scale_freq_capacity
2330 # ifndef arch_scale_freq_invariant
2331 #  define arch_scale_freq_invariant()	true
2332 # endif
2333 #else
2334 # define arch_scale_freq_invariant()	false
2335 #endif
2336 
2337 #ifdef CONFIG_SMP
2338 static inline unsigned long capacity_orig_of(int cpu)
2339 {
2340 	return cpu_rq(cpu)->cpu_capacity_orig;
2341 }
2342 #endif
2343 
2344 /**
2345  * enum schedutil_type - CPU utilization type
2346  * @FREQUENCY_UTIL:	Utilization used to select frequency
2347  * @ENERGY_UTIL:	Utilization used during energy calculation
2348  *
2349  * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
2350  * need to be aggregated differently depending on the usage made of them. This
2351  * enum is used within schedutil_freq_util() to differentiate the types of
2352  * utilization expected by the callers, and adjust the aggregation accordingly.
2353  */
2354 enum schedutil_type {
2355 	FREQUENCY_UTIL,
2356 	ENERGY_UTIL,
2357 };
2358 
2359 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2360 
2361 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2362 				 unsigned long max, enum schedutil_type type,
2363 				 struct task_struct *p);
2364 
2365 static inline unsigned long cpu_bw_dl(struct rq *rq)
2366 {
2367 	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2368 }
2369 
2370 static inline unsigned long cpu_util_dl(struct rq *rq)
2371 {
2372 	return READ_ONCE(rq->avg_dl.util_avg);
2373 }
2374 
2375 static inline unsigned long cpu_util_cfs(struct rq *rq)
2376 {
2377 	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2378 
2379 	if (sched_feat(UTIL_EST)) {
2380 		util = max_t(unsigned long, util,
2381 			     READ_ONCE(rq->cfs.avg.util_est.enqueued));
2382 	}
2383 
2384 	return util;
2385 }
2386 
2387 static inline unsigned long cpu_util_rt(struct rq *rq)
2388 {
2389 	return READ_ONCE(rq->avg_rt.util_avg);
2390 }
2391 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2392 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2393 				 unsigned long max, enum schedutil_type type,
2394 				 struct task_struct *p)
2395 {
2396 	return 0;
2397 }
2398 #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2399 
2400 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
2401 static inline unsigned long cpu_util_irq(struct rq *rq)
2402 {
2403 	return rq->avg_irq.util_avg;
2404 }
2405 
2406 static inline
2407 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2408 {
2409 	util *= (max - irq);
2410 	util /= max;
2411 
2412 	return util;
2413 
2414 }
2415 #else
2416 static inline unsigned long cpu_util_irq(struct rq *rq)
2417 {
2418 	return 0;
2419 }
2420 
2421 static inline
2422 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2423 {
2424 	return util;
2425 }
2426 #endif
2427 
2428 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2429 
2430 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2431 
2432 DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2433 
2434 static inline bool sched_energy_enabled(void)
2435 {
2436 	return static_branch_unlikely(&sched_energy_present);
2437 }
2438 
2439 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
2440 
2441 #define perf_domain_span(pd) NULL
2442 static inline bool sched_energy_enabled(void) { return false; }
2443 
2444 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
2445 
2446 #ifdef CONFIG_MEMBARRIER
2447 /*
2448  * The scheduler provides memory barriers required by membarrier between:
2449  * - prior user-space memory accesses and store to rq->membarrier_state,
2450  * - store to rq->membarrier_state and following user-space memory accesses.
2451  * In the same way it provides those guarantees around store to rq->curr.
2452  */
2453 static inline void membarrier_switch_mm(struct rq *rq,
2454 					struct mm_struct *prev_mm,
2455 					struct mm_struct *next_mm)
2456 {
2457 	int membarrier_state;
2458 
2459 	if (prev_mm == next_mm)
2460 		return;
2461 
2462 	membarrier_state = atomic_read(&next_mm->membarrier_state);
2463 	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2464 		return;
2465 
2466 	WRITE_ONCE(rq->membarrier_state, membarrier_state);
2467 }
2468 #else
2469 static inline void membarrier_switch_mm(struct rq *rq,
2470 					struct mm_struct *prev_mm,
2471 					struct mm_struct *next_mm)
2472 {
2473 }
2474 #endif
2475