xref: /openbmc/linux/kernel/sched/sched.h (revision e2f1cf25)
1 
2 #include <linux/sched.h>
3 #include <linux/sched/sysctl.h>
4 #include <linux/sched/rt.h>
5 #include <linux/sched/deadline.h>
6 #include <linux/mutex.h>
7 #include <linux/spinlock.h>
8 #include <linux/stop_machine.h>
9 #include <linux/irq_work.h>
10 #include <linux/tick.h>
11 #include <linux/slab.h>
12 
13 #include "cpupri.h"
14 #include "cpudeadline.h"
15 #include "cpuacct.h"
16 
17 struct rq;
18 struct cpuidle_state;
19 
20 /* task_struct::on_rq states: */
21 #define TASK_ON_RQ_QUEUED	1
22 #define TASK_ON_RQ_MIGRATING	2
23 
24 extern __read_mostly int scheduler_running;
25 
26 extern unsigned long calc_load_update;
27 extern atomic_long_t calc_load_tasks;
28 
29 extern void calc_global_load_tick(struct rq *this_rq);
30 extern long calc_load_fold_active(struct rq *this_rq);
31 
32 #ifdef CONFIG_SMP
33 extern void update_cpu_load_active(struct rq *this_rq);
34 #else
35 static inline void update_cpu_load_active(struct rq *this_rq) { }
36 #endif
37 
38 /*
39  * Helpers for converting nanosecond timing to jiffy resolution
40  */
41 #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
42 
43 /*
44  * Increase resolution of nice-level calculations for 64-bit architectures.
45  * The extra resolution improves shares distribution and load balancing of
46  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
47  * hierarchies, especially on larger systems. This is not a user-visible change
48  * and does not change the user-interface for setting shares/weights.
49  *
50  * We increase resolution only if we have enough bits to allow this increased
51  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
52  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
53  * increased costs.
54  */
55 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
56 # define SCHED_LOAD_RESOLUTION	10
57 # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
58 # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
59 #else
60 # define SCHED_LOAD_RESOLUTION	0
61 # define scale_load(w)		(w)
62 # define scale_load_down(w)	(w)
63 #endif
64 
65 #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
66 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
67 
68 #define NICE_0_LOAD		SCHED_LOAD_SCALE
69 #define NICE_0_SHIFT		SCHED_LOAD_SHIFT
70 
71 /*
72  * Single value that decides SCHED_DEADLINE internal math precision.
73  * 10 -> just above 1us
74  * 9  -> just above 0.5us
75  */
76 #define DL_SCALE (10)
77 
78 /*
79  * These are the 'tuning knobs' of the scheduler:
80  */
81 
82 /*
83  * single value that denotes runtime == period, ie unlimited time.
84  */
85 #define RUNTIME_INF	((u64)~0ULL)
86 
87 static inline int fair_policy(int policy)
88 {
89 	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
90 }
91 
92 static inline int rt_policy(int policy)
93 {
94 	return policy == SCHED_FIFO || policy == SCHED_RR;
95 }
96 
97 static inline int dl_policy(int policy)
98 {
99 	return policy == SCHED_DEADLINE;
100 }
101 
102 static inline int task_has_rt_policy(struct task_struct *p)
103 {
104 	return rt_policy(p->policy);
105 }
106 
107 static inline int task_has_dl_policy(struct task_struct *p)
108 {
109 	return dl_policy(p->policy);
110 }
111 
112 static inline bool dl_time_before(u64 a, u64 b)
113 {
114 	return (s64)(a - b) < 0;
115 }
116 
117 /*
118  * Tells if entity @a should preempt entity @b.
119  */
120 static inline bool
121 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
122 {
123 	return dl_time_before(a->deadline, b->deadline);
124 }
125 
126 /*
127  * This is the priority-queue data structure of the RT scheduling class:
128  */
129 struct rt_prio_array {
130 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
131 	struct list_head queue[MAX_RT_PRIO];
132 };
133 
134 struct rt_bandwidth {
135 	/* nests inside the rq lock: */
136 	raw_spinlock_t		rt_runtime_lock;
137 	ktime_t			rt_period;
138 	u64			rt_runtime;
139 	struct hrtimer		rt_period_timer;
140 	unsigned int		rt_period_active;
141 };
142 
143 void __dl_clear_params(struct task_struct *p);
144 
145 /*
146  * To keep the bandwidth of -deadline tasks and groups under control
147  * we need some place where:
148  *  - store the maximum -deadline bandwidth of the system (the group);
149  *  - cache the fraction of that bandwidth that is currently allocated.
150  *
151  * This is all done in the data structure below. It is similar to the
152  * one used for RT-throttling (rt_bandwidth), with the main difference
153  * that, since here we are only interested in admission control, we
154  * do not decrease any runtime while the group "executes", neither we
155  * need a timer to replenish it.
156  *
157  * With respect to SMP, the bandwidth is given on a per-CPU basis,
158  * meaning that:
159  *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
160  *  - dl_total_bw array contains, in the i-eth element, the currently
161  *    allocated bandwidth on the i-eth CPU.
162  * Moreover, groups consume bandwidth on each CPU, while tasks only
163  * consume bandwidth on the CPU they're running on.
164  * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
165  * that will be shown the next time the proc or cgroup controls will
166  * be red. It on its turn can be changed by writing on its own
167  * control.
168  */
169 struct dl_bandwidth {
170 	raw_spinlock_t dl_runtime_lock;
171 	u64 dl_runtime;
172 	u64 dl_period;
173 };
174 
175 static inline int dl_bandwidth_enabled(void)
176 {
177 	return sysctl_sched_rt_runtime >= 0;
178 }
179 
180 extern struct dl_bw *dl_bw_of(int i);
181 
182 struct dl_bw {
183 	raw_spinlock_t lock;
184 	u64 bw, total_bw;
185 };
186 
187 static inline
188 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
189 {
190 	dl_b->total_bw -= tsk_bw;
191 }
192 
193 static inline
194 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
195 {
196 	dl_b->total_bw += tsk_bw;
197 }
198 
199 static inline
200 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
201 {
202 	return dl_b->bw != -1 &&
203 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
204 }
205 
206 extern struct mutex sched_domains_mutex;
207 
208 #ifdef CONFIG_CGROUP_SCHED
209 
210 #include <linux/cgroup.h>
211 
212 struct cfs_rq;
213 struct rt_rq;
214 
215 extern struct list_head task_groups;
216 
217 struct cfs_bandwidth {
218 #ifdef CONFIG_CFS_BANDWIDTH
219 	raw_spinlock_t lock;
220 	ktime_t period;
221 	u64 quota, runtime;
222 	s64 hierarchical_quota;
223 	u64 runtime_expires;
224 
225 	int idle, period_active;
226 	struct hrtimer period_timer, slack_timer;
227 	struct list_head throttled_cfs_rq;
228 
229 	/* statistics */
230 	int nr_periods, nr_throttled;
231 	u64 throttled_time;
232 #endif
233 };
234 
235 /* task group related information */
236 struct task_group {
237 	struct cgroup_subsys_state css;
238 
239 #ifdef CONFIG_FAIR_GROUP_SCHED
240 	/* schedulable entities of this group on each cpu */
241 	struct sched_entity **se;
242 	/* runqueue "owned" by this group on each cpu */
243 	struct cfs_rq **cfs_rq;
244 	unsigned long shares;
245 
246 #ifdef	CONFIG_SMP
247 	atomic_long_t load_avg;
248 	atomic_t runnable_avg;
249 #endif
250 #endif
251 
252 #ifdef CONFIG_RT_GROUP_SCHED
253 	struct sched_rt_entity **rt_se;
254 	struct rt_rq **rt_rq;
255 
256 	struct rt_bandwidth rt_bandwidth;
257 #endif
258 
259 	struct rcu_head rcu;
260 	struct list_head list;
261 
262 	struct task_group *parent;
263 	struct list_head siblings;
264 	struct list_head children;
265 
266 #ifdef CONFIG_SCHED_AUTOGROUP
267 	struct autogroup *autogroup;
268 #endif
269 
270 	struct cfs_bandwidth cfs_bandwidth;
271 };
272 
273 #ifdef CONFIG_FAIR_GROUP_SCHED
274 #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
275 
276 /*
277  * A weight of 0 or 1 can cause arithmetics problems.
278  * A weight of a cfs_rq is the sum of weights of which entities
279  * are queued on this cfs_rq, so a weight of a entity should not be
280  * too large, so as the shares value of a task group.
281  * (The default weight is 1024 - so there's no practical
282  *  limitation from this.)
283  */
284 #define MIN_SHARES	(1UL <<  1)
285 #define MAX_SHARES	(1UL << 18)
286 #endif
287 
288 typedef int (*tg_visitor)(struct task_group *, void *);
289 
290 extern int walk_tg_tree_from(struct task_group *from,
291 			     tg_visitor down, tg_visitor up, void *data);
292 
293 /*
294  * Iterate the full tree, calling @down when first entering a node and @up when
295  * leaving it for the final time.
296  *
297  * Caller must hold rcu_lock or sufficient equivalent.
298  */
299 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
300 {
301 	return walk_tg_tree_from(&root_task_group, down, up, data);
302 }
303 
304 extern int tg_nop(struct task_group *tg, void *data);
305 
306 extern void free_fair_sched_group(struct task_group *tg);
307 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
308 extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
309 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
310 			struct sched_entity *se, int cpu,
311 			struct sched_entity *parent);
312 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
313 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
314 
315 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
316 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
317 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
318 
319 extern void free_rt_sched_group(struct task_group *tg);
320 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
321 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
322 		struct sched_rt_entity *rt_se, int cpu,
323 		struct sched_rt_entity *parent);
324 
325 extern struct task_group *sched_create_group(struct task_group *parent);
326 extern void sched_online_group(struct task_group *tg,
327 			       struct task_group *parent);
328 extern void sched_destroy_group(struct task_group *tg);
329 extern void sched_offline_group(struct task_group *tg);
330 
331 extern void sched_move_task(struct task_struct *tsk);
332 
333 #ifdef CONFIG_FAIR_GROUP_SCHED
334 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
335 #endif
336 
337 #else /* CONFIG_CGROUP_SCHED */
338 
339 struct cfs_bandwidth { };
340 
341 #endif	/* CONFIG_CGROUP_SCHED */
342 
343 /* CFS-related fields in a runqueue */
344 struct cfs_rq {
345 	struct load_weight load;
346 	unsigned int nr_running, h_nr_running;
347 
348 	u64 exec_clock;
349 	u64 min_vruntime;
350 #ifndef CONFIG_64BIT
351 	u64 min_vruntime_copy;
352 #endif
353 
354 	struct rb_root tasks_timeline;
355 	struct rb_node *rb_leftmost;
356 
357 	/*
358 	 * 'curr' points to currently running entity on this cfs_rq.
359 	 * It is set to NULL otherwise (i.e when none are currently running).
360 	 */
361 	struct sched_entity *curr, *next, *last, *skip;
362 
363 #ifdef	CONFIG_SCHED_DEBUG
364 	unsigned int nr_spread_over;
365 #endif
366 
367 #ifdef CONFIG_SMP
368 	/*
369 	 * CFS Load tracking
370 	 * Under CFS, load is tracked on a per-entity basis and aggregated up.
371 	 * This allows for the description of both thread and group usage (in
372 	 * the FAIR_GROUP_SCHED case).
373 	 * runnable_load_avg is the sum of the load_avg_contrib of the
374 	 * sched_entities on the rq.
375 	 * blocked_load_avg is similar to runnable_load_avg except that its
376 	 * the blocked sched_entities on the rq.
377 	 * utilization_load_avg is the sum of the average running time of the
378 	 * sched_entities on the rq.
379 	 */
380 	unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
381 	atomic64_t decay_counter;
382 	u64 last_decay;
383 	atomic_long_t removed_load;
384 
385 #ifdef CONFIG_FAIR_GROUP_SCHED
386 	/* Required to track per-cpu representation of a task_group */
387 	u32 tg_runnable_contrib;
388 	unsigned long tg_load_contrib;
389 
390 	/*
391 	 *   h_load = weight * f(tg)
392 	 *
393 	 * Where f(tg) is the recursive weight fraction assigned to
394 	 * this group.
395 	 */
396 	unsigned long h_load;
397 	u64 last_h_load_update;
398 	struct sched_entity *h_load_next;
399 #endif /* CONFIG_FAIR_GROUP_SCHED */
400 #endif /* CONFIG_SMP */
401 
402 #ifdef CONFIG_FAIR_GROUP_SCHED
403 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
404 
405 	/*
406 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
407 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
408 	 * (like users, containers etc.)
409 	 *
410 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
411 	 * list is used during load balance.
412 	 */
413 	int on_list;
414 	struct list_head leaf_cfs_rq_list;
415 	struct task_group *tg;	/* group that "owns" this runqueue */
416 
417 #ifdef CONFIG_CFS_BANDWIDTH
418 	int runtime_enabled;
419 	u64 runtime_expires;
420 	s64 runtime_remaining;
421 
422 	u64 throttled_clock, throttled_clock_task;
423 	u64 throttled_clock_task_time;
424 	int throttled, throttle_count;
425 	struct list_head throttled_list;
426 #endif /* CONFIG_CFS_BANDWIDTH */
427 #endif /* CONFIG_FAIR_GROUP_SCHED */
428 };
429 
430 static inline int rt_bandwidth_enabled(void)
431 {
432 	return sysctl_sched_rt_runtime >= 0;
433 }
434 
435 /* RT IPI pull logic requires IRQ_WORK */
436 #ifdef CONFIG_IRQ_WORK
437 # define HAVE_RT_PUSH_IPI
438 #endif
439 
440 /* Real-Time classes' related field in a runqueue: */
441 struct rt_rq {
442 	struct rt_prio_array active;
443 	unsigned int rt_nr_running;
444 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
445 	struct {
446 		int curr; /* highest queued rt task prio */
447 #ifdef CONFIG_SMP
448 		int next; /* next highest */
449 #endif
450 	} highest_prio;
451 #endif
452 #ifdef CONFIG_SMP
453 	unsigned long rt_nr_migratory;
454 	unsigned long rt_nr_total;
455 	int overloaded;
456 	struct plist_head pushable_tasks;
457 #ifdef HAVE_RT_PUSH_IPI
458 	int push_flags;
459 	int push_cpu;
460 	struct irq_work push_work;
461 	raw_spinlock_t push_lock;
462 #endif
463 #endif /* CONFIG_SMP */
464 	int rt_queued;
465 
466 	int rt_throttled;
467 	u64 rt_time;
468 	u64 rt_runtime;
469 	/* Nests inside the rq lock: */
470 	raw_spinlock_t rt_runtime_lock;
471 
472 #ifdef CONFIG_RT_GROUP_SCHED
473 	unsigned long rt_nr_boosted;
474 
475 	struct rq *rq;
476 	struct task_group *tg;
477 #endif
478 };
479 
480 /* Deadline class' related fields in a runqueue */
481 struct dl_rq {
482 	/* runqueue is an rbtree, ordered by deadline */
483 	struct rb_root rb_root;
484 	struct rb_node *rb_leftmost;
485 
486 	unsigned long dl_nr_running;
487 
488 #ifdef CONFIG_SMP
489 	/*
490 	 * Deadline values of the currently executing and the
491 	 * earliest ready task on this rq. Caching these facilitates
492 	 * the decision wether or not a ready but not running task
493 	 * should migrate somewhere else.
494 	 */
495 	struct {
496 		u64 curr;
497 		u64 next;
498 	} earliest_dl;
499 
500 	unsigned long dl_nr_migratory;
501 	int overloaded;
502 
503 	/*
504 	 * Tasks on this rq that can be pushed away. They are kept in
505 	 * an rb-tree, ordered by tasks' deadlines, with caching
506 	 * of the leftmost (earliest deadline) element.
507 	 */
508 	struct rb_root pushable_dl_tasks_root;
509 	struct rb_node *pushable_dl_tasks_leftmost;
510 #else
511 	struct dl_bw dl_bw;
512 #endif
513 };
514 
515 #ifdef CONFIG_SMP
516 
517 /*
518  * We add the notion of a root-domain which will be used to define per-domain
519  * variables. Each exclusive cpuset essentially defines an island domain by
520  * fully partitioning the member cpus from any other cpuset. Whenever a new
521  * exclusive cpuset is created, we also create and attach a new root-domain
522  * object.
523  *
524  */
525 struct root_domain {
526 	atomic_t refcount;
527 	atomic_t rto_count;
528 	struct rcu_head rcu;
529 	cpumask_var_t span;
530 	cpumask_var_t online;
531 
532 	/* Indicate more than one runnable task for any CPU */
533 	bool overload;
534 
535 	/*
536 	 * The bit corresponding to a CPU gets set here if such CPU has more
537 	 * than one runnable -deadline task (as it is below for RT tasks).
538 	 */
539 	cpumask_var_t dlo_mask;
540 	atomic_t dlo_count;
541 	struct dl_bw dl_bw;
542 	struct cpudl cpudl;
543 
544 	/*
545 	 * The "RT overload" flag: it gets set if a CPU has more than
546 	 * one runnable RT task.
547 	 */
548 	cpumask_var_t rto_mask;
549 	struct cpupri cpupri;
550 };
551 
552 extern struct root_domain def_root_domain;
553 
554 #endif /* CONFIG_SMP */
555 
556 /*
557  * This is the main, per-CPU runqueue data structure.
558  *
559  * Locking rule: those places that want to lock multiple runqueues
560  * (such as the load balancing or the thread migration code), lock
561  * acquire operations must be ordered by ascending &runqueue.
562  */
563 struct rq {
564 	/* runqueue lock: */
565 	raw_spinlock_t lock;
566 
567 	/*
568 	 * nr_running and cpu_load should be in the same cacheline because
569 	 * remote CPUs use both these fields when doing load calculation.
570 	 */
571 	unsigned int nr_running;
572 #ifdef CONFIG_NUMA_BALANCING
573 	unsigned int nr_numa_running;
574 	unsigned int nr_preferred_running;
575 #endif
576 	#define CPU_LOAD_IDX_MAX 5
577 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
578 	unsigned long last_load_update_tick;
579 #ifdef CONFIG_NO_HZ_COMMON
580 	u64 nohz_stamp;
581 	unsigned long nohz_flags;
582 #endif
583 #ifdef CONFIG_NO_HZ_FULL
584 	unsigned long last_sched_tick;
585 #endif
586 	/* capture load from *all* tasks on this cpu: */
587 	struct load_weight load;
588 	unsigned long nr_load_updates;
589 	u64 nr_switches;
590 
591 	struct cfs_rq cfs;
592 	struct rt_rq rt;
593 	struct dl_rq dl;
594 
595 #ifdef CONFIG_FAIR_GROUP_SCHED
596 	/* list of leaf cfs_rq on this cpu: */
597 	struct list_head leaf_cfs_rq_list;
598 
599 	struct sched_avg avg;
600 #endif /* CONFIG_FAIR_GROUP_SCHED */
601 
602 	/*
603 	 * This is part of a global counter where only the total sum
604 	 * over all CPUs matters. A task can increase this counter on
605 	 * one CPU and if it got migrated afterwards it may decrease
606 	 * it on another CPU. Always updated under the runqueue lock:
607 	 */
608 	unsigned long nr_uninterruptible;
609 
610 	struct task_struct *curr, *idle, *stop;
611 	unsigned long next_balance;
612 	struct mm_struct *prev_mm;
613 
614 	unsigned int clock_skip_update;
615 	u64 clock;
616 	u64 clock_task;
617 
618 	atomic_t nr_iowait;
619 
620 #ifdef CONFIG_SMP
621 	struct root_domain *rd;
622 	struct sched_domain *sd;
623 
624 	unsigned long cpu_capacity;
625 	unsigned long cpu_capacity_orig;
626 
627 	struct callback_head *balance_callback;
628 
629 	unsigned char idle_balance;
630 	/* For active balancing */
631 	int active_balance;
632 	int push_cpu;
633 	struct cpu_stop_work active_balance_work;
634 	/* cpu of this runqueue: */
635 	int cpu;
636 	int online;
637 
638 	struct list_head cfs_tasks;
639 
640 	u64 rt_avg;
641 	u64 age_stamp;
642 	u64 idle_stamp;
643 	u64 avg_idle;
644 
645 	/* This is used to determine avg_idle's max value */
646 	u64 max_idle_balance_cost;
647 #endif
648 
649 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
650 	u64 prev_irq_time;
651 #endif
652 #ifdef CONFIG_PARAVIRT
653 	u64 prev_steal_time;
654 #endif
655 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
656 	u64 prev_steal_time_rq;
657 #endif
658 
659 	/* calc_load related fields */
660 	unsigned long calc_load_update;
661 	long calc_load_active;
662 
663 #ifdef CONFIG_SCHED_HRTICK
664 #ifdef CONFIG_SMP
665 	int hrtick_csd_pending;
666 	struct call_single_data hrtick_csd;
667 #endif
668 	struct hrtimer hrtick_timer;
669 #endif
670 
671 #ifdef CONFIG_SCHEDSTATS
672 	/* latency stats */
673 	struct sched_info rq_sched_info;
674 	unsigned long long rq_cpu_time;
675 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
676 
677 	/* sys_sched_yield() stats */
678 	unsigned int yld_count;
679 
680 	/* schedule() stats */
681 	unsigned int sched_count;
682 	unsigned int sched_goidle;
683 
684 	/* try_to_wake_up() stats */
685 	unsigned int ttwu_count;
686 	unsigned int ttwu_local;
687 #endif
688 
689 #ifdef CONFIG_SMP
690 	struct llist_head wake_list;
691 #endif
692 
693 #ifdef CONFIG_CPU_IDLE
694 	/* Must be inspected within a rcu lock section */
695 	struct cpuidle_state *idle_state;
696 #endif
697 };
698 
699 static inline int cpu_of(struct rq *rq)
700 {
701 #ifdef CONFIG_SMP
702 	return rq->cpu;
703 #else
704 	return 0;
705 #endif
706 }
707 
708 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
709 
710 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
711 #define this_rq()		this_cpu_ptr(&runqueues)
712 #define task_rq(p)		cpu_rq(task_cpu(p))
713 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
714 #define raw_rq()		raw_cpu_ptr(&runqueues)
715 
716 static inline u64 __rq_clock_broken(struct rq *rq)
717 {
718 	return READ_ONCE(rq->clock);
719 }
720 
721 static inline u64 rq_clock(struct rq *rq)
722 {
723 	lockdep_assert_held(&rq->lock);
724 	return rq->clock;
725 }
726 
727 static inline u64 rq_clock_task(struct rq *rq)
728 {
729 	lockdep_assert_held(&rq->lock);
730 	return rq->clock_task;
731 }
732 
733 #define RQCF_REQ_SKIP	0x01
734 #define RQCF_ACT_SKIP	0x02
735 
736 static inline void rq_clock_skip_update(struct rq *rq, bool skip)
737 {
738 	lockdep_assert_held(&rq->lock);
739 	if (skip)
740 		rq->clock_skip_update |= RQCF_REQ_SKIP;
741 	else
742 		rq->clock_skip_update &= ~RQCF_REQ_SKIP;
743 }
744 
745 #ifdef CONFIG_NUMA
746 enum numa_topology_type {
747 	NUMA_DIRECT,
748 	NUMA_GLUELESS_MESH,
749 	NUMA_BACKPLANE,
750 };
751 extern enum numa_topology_type sched_numa_topology_type;
752 extern int sched_max_numa_distance;
753 extern bool find_numa_distance(int distance);
754 #endif
755 
756 #ifdef CONFIG_NUMA_BALANCING
757 /* The regions in numa_faults array from task_struct */
758 enum numa_faults_stats {
759 	NUMA_MEM = 0,
760 	NUMA_CPU,
761 	NUMA_MEMBUF,
762 	NUMA_CPUBUF
763 };
764 extern void sched_setnuma(struct task_struct *p, int node);
765 extern int migrate_task_to(struct task_struct *p, int cpu);
766 extern int migrate_swap(struct task_struct *, struct task_struct *);
767 #endif /* CONFIG_NUMA_BALANCING */
768 
769 #ifdef CONFIG_SMP
770 
771 static inline void
772 queue_balance_callback(struct rq *rq,
773 		       struct callback_head *head,
774 		       void (*func)(struct rq *rq))
775 {
776 	lockdep_assert_held(&rq->lock);
777 
778 	if (unlikely(head->next))
779 		return;
780 
781 	head->func = (void (*)(struct callback_head *))func;
782 	head->next = rq->balance_callback;
783 	rq->balance_callback = head;
784 }
785 
786 extern void sched_ttwu_pending(void);
787 
788 #define rcu_dereference_check_sched_domain(p) \
789 	rcu_dereference_check((p), \
790 			      lockdep_is_held(&sched_domains_mutex))
791 
792 /*
793  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
794  * See detach_destroy_domains: synchronize_sched for details.
795  *
796  * The domain tree of any CPU may only be accessed from within
797  * preempt-disabled sections.
798  */
799 #define for_each_domain(cpu, __sd) \
800 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
801 			__sd; __sd = __sd->parent)
802 
803 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
804 
805 /**
806  * highest_flag_domain - Return highest sched_domain containing flag.
807  * @cpu:	The cpu whose highest level of sched domain is to
808  *		be returned.
809  * @flag:	The flag to check for the highest sched_domain
810  *		for the given cpu.
811  *
812  * Returns the highest sched_domain of a cpu which contains the given flag.
813  */
814 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
815 {
816 	struct sched_domain *sd, *hsd = NULL;
817 
818 	for_each_domain(cpu, sd) {
819 		if (!(sd->flags & flag))
820 			break;
821 		hsd = sd;
822 	}
823 
824 	return hsd;
825 }
826 
827 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
828 {
829 	struct sched_domain *sd;
830 
831 	for_each_domain(cpu, sd) {
832 		if (sd->flags & flag)
833 			break;
834 	}
835 
836 	return sd;
837 }
838 
839 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
840 DECLARE_PER_CPU(int, sd_llc_size);
841 DECLARE_PER_CPU(int, sd_llc_id);
842 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
843 DECLARE_PER_CPU(struct sched_domain *, sd_busy);
844 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
845 
846 struct sched_group_capacity {
847 	atomic_t ref;
848 	/*
849 	 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
850 	 * for a single CPU.
851 	 */
852 	unsigned int capacity;
853 	unsigned long next_update;
854 	int imbalance; /* XXX unrelated to capacity but shared group state */
855 	/*
856 	 * Number of busy cpus in this group.
857 	 */
858 	atomic_t nr_busy_cpus;
859 
860 	unsigned long cpumask[0]; /* iteration mask */
861 };
862 
863 struct sched_group {
864 	struct sched_group *next;	/* Must be a circular list */
865 	atomic_t ref;
866 
867 	unsigned int group_weight;
868 	struct sched_group_capacity *sgc;
869 
870 	/*
871 	 * The CPUs this group covers.
872 	 *
873 	 * NOTE: this field is variable length. (Allocated dynamically
874 	 * by attaching extra space to the end of the structure,
875 	 * depending on how many CPUs the kernel has booted up with)
876 	 */
877 	unsigned long cpumask[0];
878 };
879 
880 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
881 {
882 	return to_cpumask(sg->cpumask);
883 }
884 
885 /*
886  * cpumask masking which cpus in the group are allowed to iterate up the domain
887  * tree.
888  */
889 static inline struct cpumask *sched_group_mask(struct sched_group *sg)
890 {
891 	return to_cpumask(sg->sgc->cpumask);
892 }
893 
894 /**
895  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
896  * @group: The group whose first cpu is to be returned.
897  */
898 static inline unsigned int group_first_cpu(struct sched_group *group)
899 {
900 	return cpumask_first(sched_group_cpus(group));
901 }
902 
903 extern int group_balance_cpu(struct sched_group *sg);
904 
905 #else
906 
907 static inline void sched_ttwu_pending(void) { }
908 
909 #endif /* CONFIG_SMP */
910 
911 #include "stats.h"
912 #include "auto_group.h"
913 
914 #ifdef CONFIG_CGROUP_SCHED
915 
916 /*
917  * Return the group to which this tasks belongs.
918  *
919  * We cannot use task_css() and friends because the cgroup subsystem
920  * changes that value before the cgroup_subsys::attach() method is called,
921  * therefore we cannot pin it and might observe the wrong value.
922  *
923  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
924  * core changes this before calling sched_move_task().
925  *
926  * Instead we use a 'copy' which is updated from sched_move_task() while
927  * holding both task_struct::pi_lock and rq::lock.
928  */
929 static inline struct task_group *task_group(struct task_struct *p)
930 {
931 	return p->sched_task_group;
932 }
933 
934 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
935 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
936 {
937 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
938 	struct task_group *tg = task_group(p);
939 #endif
940 
941 #ifdef CONFIG_FAIR_GROUP_SCHED
942 	p->se.cfs_rq = tg->cfs_rq[cpu];
943 	p->se.parent = tg->se[cpu];
944 #endif
945 
946 #ifdef CONFIG_RT_GROUP_SCHED
947 	p->rt.rt_rq  = tg->rt_rq[cpu];
948 	p->rt.parent = tg->rt_se[cpu];
949 #endif
950 }
951 
952 #else /* CONFIG_CGROUP_SCHED */
953 
954 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
955 static inline struct task_group *task_group(struct task_struct *p)
956 {
957 	return NULL;
958 }
959 
960 #endif /* CONFIG_CGROUP_SCHED */
961 
962 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
963 {
964 	set_task_rq(p, cpu);
965 #ifdef CONFIG_SMP
966 	/*
967 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
968 	 * successfuly executed on another CPU. We must ensure that updates of
969 	 * per-task data have been completed by this moment.
970 	 */
971 	smp_wmb();
972 	task_thread_info(p)->cpu = cpu;
973 	p->wake_cpu = cpu;
974 #endif
975 }
976 
977 /*
978  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
979  */
980 #ifdef CONFIG_SCHED_DEBUG
981 # include <linux/static_key.h>
982 # define const_debug __read_mostly
983 #else
984 # define const_debug const
985 #endif
986 
987 extern const_debug unsigned int sysctl_sched_features;
988 
989 #define SCHED_FEAT(name, enabled)	\
990 	__SCHED_FEAT_##name ,
991 
992 enum {
993 #include "features.h"
994 	__SCHED_FEAT_NR,
995 };
996 
997 #undef SCHED_FEAT
998 
999 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1000 #define SCHED_FEAT(name, enabled)					\
1001 static __always_inline bool static_branch_##name(struct static_key *key) \
1002 {									\
1003 	return static_key_##enabled(key);				\
1004 }
1005 
1006 #include "features.h"
1007 
1008 #undef SCHED_FEAT
1009 
1010 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1011 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1012 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1013 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1014 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1015 
1016 #ifdef CONFIG_NUMA_BALANCING
1017 #define sched_feat_numa(x) sched_feat(x)
1018 #ifdef CONFIG_SCHED_DEBUG
1019 #define numabalancing_enabled sched_feat_numa(NUMA)
1020 #else
1021 extern bool numabalancing_enabled;
1022 #endif /* CONFIG_SCHED_DEBUG */
1023 #else
1024 #define sched_feat_numa(x) (0)
1025 #define numabalancing_enabled (0)
1026 #endif /* CONFIG_NUMA_BALANCING */
1027 
1028 static inline u64 global_rt_period(void)
1029 {
1030 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1031 }
1032 
1033 static inline u64 global_rt_runtime(void)
1034 {
1035 	if (sysctl_sched_rt_runtime < 0)
1036 		return RUNTIME_INF;
1037 
1038 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1039 }
1040 
1041 static inline int task_current(struct rq *rq, struct task_struct *p)
1042 {
1043 	return rq->curr == p;
1044 }
1045 
1046 static inline int task_running(struct rq *rq, struct task_struct *p)
1047 {
1048 #ifdef CONFIG_SMP
1049 	return p->on_cpu;
1050 #else
1051 	return task_current(rq, p);
1052 #endif
1053 }
1054 
1055 static inline int task_on_rq_queued(struct task_struct *p)
1056 {
1057 	return p->on_rq == TASK_ON_RQ_QUEUED;
1058 }
1059 
1060 static inline int task_on_rq_migrating(struct task_struct *p)
1061 {
1062 	return p->on_rq == TASK_ON_RQ_MIGRATING;
1063 }
1064 
1065 #ifndef prepare_arch_switch
1066 # define prepare_arch_switch(next)	do { } while (0)
1067 #endif
1068 #ifndef finish_arch_switch
1069 # define finish_arch_switch(prev)	do { } while (0)
1070 #endif
1071 #ifndef finish_arch_post_lock_switch
1072 # define finish_arch_post_lock_switch()	do { } while (0)
1073 #endif
1074 
1075 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1076 {
1077 #ifdef CONFIG_SMP
1078 	/*
1079 	 * We can optimise this out completely for !SMP, because the
1080 	 * SMP rebalancing from interrupt is the only thing that cares
1081 	 * here.
1082 	 */
1083 	next->on_cpu = 1;
1084 #endif
1085 }
1086 
1087 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1088 {
1089 #ifdef CONFIG_SMP
1090 	/*
1091 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1092 	 * We must ensure this doesn't happen until the switch is completely
1093 	 * finished.
1094 	 */
1095 	smp_wmb();
1096 	prev->on_cpu = 0;
1097 #endif
1098 #ifdef CONFIG_DEBUG_SPINLOCK
1099 	/* this is a valid case when another task releases the spinlock */
1100 	rq->lock.owner = current;
1101 #endif
1102 	/*
1103 	 * If we are tracking spinlock dependencies then we have to
1104 	 * fix up the runqueue lock - which gets 'carried over' from
1105 	 * prev into current:
1106 	 */
1107 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1108 
1109 	raw_spin_unlock_irq(&rq->lock);
1110 }
1111 
1112 /*
1113  * wake flags
1114  */
1115 #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
1116 #define WF_FORK		0x02		/* child wakeup after fork */
1117 #define WF_MIGRATED	0x4		/* internal use, task got migrated */
1118 
1119 /*
1120  * To aid in avoiding the subversion of "niceness" due to uneven distribution
1121  * of tasks with abnormal "nice" values across CPUs the contribution that
1122  * each task makes to its run queue's load is weighted according to its
1123  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1124  * scaled version of the new time slice allocation that they receive on time
1125  * slice expiry etc.
1126  */
1127 
1128 #define WEIGHT_IDLEPRIO                3
1129 #define WMULT_IDLEPRIO         1431655765
1130 
1131 /*
1132  * Nice levels are multiplicative, with a gentle 10% change for every
1133  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1134  * nice 1, it will get ~10% less CPU time than another CPU-bound task
1135  * that remained on nice 0.
1136  *
1137  * The "10% effect" is relative and cumulative: from _any_ nice level,
1138  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1139  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1140  * If a task goes up by ~10% and another task goes down by ~10% then
1141  * the relative distance between them is ~25%.)
1142  */
1143 static const int prio_to_weight[40] = {
1144  /* -20 */     88761,     71755,     56483,     46273,     36291,
1145  /* -15 */     29154,     23254,     18705,     14949,     11916,
1146  /* -10 */      9548,      7620,      6100,      4904,      3906,
1147  /*  -5 */      3121,      2501,      1991,      1586,      1277,
1148  /*   0 */      1024,       820,       655,       526,       423,
1149  /*   5 */       335,       272,       215,       172,       137,
1150  /*  10 */       110,        87,        70,        56,        45,
1151  /*  15 */        36,        29,        23,        18,        15,
1152 };
1153 
1154 /*
1155  * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1156  *
1157  * In cases where the weight does not change often, we can use the
1158  * precalculated inverse to speed up arithmetics by turning divisions
1159  * into multiplications:
1160  */
1161 static const u32 prio_to_wmult[40] = {
1162  /* -20 */     48388,     59856,     76040,     92818,    118348,
1163  /* -15 */    147320,    184698,    229616,    287308,    360437,
1164  /* -10 */    449829,    563644,    704093,    875809,   1099582,
1165  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
1166  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
1167  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
1168  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
1169  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1170 };
1171 
1172 #define ENQUEUE_WAKEUP		1
1173 #define ENQUEUE_HEAD		2
1174 #ifdef CONFIG_SMP
1175 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1176 #else
1177 #define ENQUEUE_WAKING		0
1178 #endif
1179 #define ENQUEUE_REPLENISH	8
1180 
1181 #define DEQUEUE_SLEEP		1
1182 
1183 #define RETRY_TASK		((void *)-1UL)
1184 
1185 struct sched_class {
1186 	const struct sched_class *next;
1187 
1188 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1189 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1190 	void (*yield_task) (struct rq *rq);
1191 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1192 
1193 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1194 
1195 	/*
1196 	 * It is the responsibility of the pick_next_task() method that will
1197 	 * return the next task to call put_prev_task() on the @prev task or
1198 	 * something equivalent.
1199 	 *
1200 	 * May return RETRY_TASK when it finds a higher prio class has runnable
1201 	 * tasks.
1202 	 */
1203 	struct task_struct * (*pick_next_task) (struct rq *rq,
1204 						struct task_struct *prev);
1205 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1206 
1207 #ifdef CONFIG_SMP
1208 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1209 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1210 
1211 	void (*task_waking) (struct task_struct *task);
1212 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1213 
1214 	void (*set_cpus_allowed)(struct task_struct *p,
1215 				 const struct cpumask *newmask);
1216 
1217 	void (*rq_online)(struct rq *rq);
1218 	void (*rq_offline)(struct rq *rq);
1219 #endif
1220 
1221 	void (*set_curr_task) (struct rq *rq);
1222 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1223 	void (*task_fork) (struct task_struct *p);
1224 	void (*task_dead) (struct task_struct *p);
1225 
1226 	/*
1227 	 * The switched_from() call is allowed to drop rq->lock, therefore we
1228 	 * cannot assume the switched_from/switched_to pair is serliazed by
1229 	 * rq->lock. They are however serialized by p->pi_lock.
1230 	 */
1231 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1232 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1233 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1234 			     int oldprio);
1235 
1236 	unsigned int (*get_rr_interval) (struct rq *rq,
1237 					 struct task_struct *task);
1238 
1239 	void (*update_curr) (struct rq *rq);
1240 
1241 #ifdef CONFIG_FAIR_GROUP_SCHED
1242 	void (*task_move_group) (struct task_struct *p, int on_rq);
1243 #endif
1244 };
1245 
1246 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1247 {
1248 	prev->sched_class->put_prev_task(rq, prev);
1249 }
1250 
1251 #define sched_class_highest (&stop_sched_class)
1252 #define for_each_class(class) \
1253    for (class = sched_class_highest; class; class = class->next)
1254 
1255 extern const struct sched_class stop_sched_class;
1256 extern const struct sched_class dl_sched_class;
1257 extern const struct sched_class rt_sched_class;
1258 extern const struct sched_class fair_sched_class;
1259 extern const struct sched_class idle_sched_class;
1260 
1261 
1262 #ifdef CONFIG_SMP
1263 
1264 extern void update_group_capacity(struct sched_domain *sd, int cpu);
1265 
1266 extern void trigger_load_balance(struct rq *rq);
1267 
1268 extern void idle_enter_fair(struct rq *this_rq);
1269 extern void idle_exit_fair(struct rq *this_rq);
1270 
1271 #else
1272 
1273 static inline void idle_enter_fair(struct rq *rq) { }
1274 static inline void idle_exit_fair(struct rq *rq) { }
1275 
1276 #endif
1277 
1278 #ifdef CONFIG_CPU_IDLE
1279 static inline void idle_set_state(struct rq *rq,
1280 				  struct cpuidle_state *idle_state)
1281 {
1282 	rq->idle_state = idle_state;
1283 }
1284 
1285 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1286 {
1287 	WARN_ON(!rcu_read_lock_held());
1288 	return rq->idle_state;
1289 }
1290 #else
1291 static inline void idle_set_state(struct rq *rq,
1292 				  struct cpuidle_state *idle_state)
1293 {
1294 }
1295 
1296 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1297 {
1298 	return NULL;
1299 }
1300 #endif
1301 
1302 extern void sysrq_sched_debug_show(void);
1303 extern void sched_init_granularity(void);
1304 extern void update_max_interval(void);
1305 
1306 extern void init_sched_dl_class(void);
1307 extern void init_sched_rt_class(void);
1308 extern void init_sched_fair_class(void);
1309 
1310 extern void resched_curr(struct rq *rq);
1311 extern void resched_cpu(int cpu);
1312 
1313 extern struct rt_bandwidth def_rt_bandwidth;
1314 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1315 
1316 extern struct dl_bandwidth def_dl_bandwidth;
1317 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1318 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1319 
1320 unsigned long to_ratio(u64 period, u64 runtime);
1321 
1322 extern void init_task_runnable_average(struct task_struct *p);
1323 
1324 static inline void add_nr_running(struct rq *rq, unsigned count)
1325 {
1326 	unsigned prev_nr = rq->nr_running;
1327 
1328 	rq->nr_running = prev_nr + count;
1329 
1330 	if (prev_nr < 2 && rq->nr_running >= 2) {
1331 #ifdef CONFIG_SMP
1332 		if (!rq->rd->overload)
1333 			rq->rd->overload = true;
1334 #endif
1335 
1336 #ifdef CONFIG_NO_HZ_FULL
1337 		if (tick_nohz_full_cpu(rq->cpu)) {
1338 			/*
1339 			 * Tick is needed if more than one task runs on a CPU.
1340 			 * Send the target an IPI to kick it out of nohz mode.
1341 			 *
1342 			 * We assume that IPI implies full memory barrier and the
1343 			 * new value of rq->nr_running is visible on reception
1344 			 * from the target.
1345 			 */
1346 			tick_nohz_full_kick_cpu(rq->cpu);
1347 		}
1348 #endif
1349 	}
1350 }
1351 
1352 static inline void sub_nr_running(struct rq *rq, unsigned count)
1353 {
1354 	rq->nr_running -= count;
1355 }
1356 
1357 static inline void rq_last_tick_reset(struct rq *rq)
1358 {
1359 #ifdef CONFIG_NO_HZ_FULL
1360 	rq->last_sched_tick = jiffies;
1361 #endif
1362 }
1363 
1364 extern void update_rq_clock(struct rq *rq);
1365 
1366 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1367 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1368 
1369 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1370 
1371 extern const_debug unsigned int sysctl_sched_time_avg;
1372 extern const_debug unsigned int sysctl_sched_nr_migrate;
1373 extern const_debug unsigned int sysctl_sched_migration_cost;
1374 
1375 static inline u64 sched_avg_period(void)
1376 {
1377 	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1378 }
1379 
1380 #ifdef CONFIG_SCHED_HRTICK
1381 
1382 /*
1383  * Use hrtick when:
1384  *  - enabled by features
1385  *  - hrtimer is actually high res
1386  */
1387 static inline int hrtick_enabled(struct rq *rq)
1388 {
1389 	if (!sched_feat(HRTICK))
1390 		return 0;
1391 	if (!cpu_active(cpu_of(rq)))
1392 		return 0;
1393 	return hrtimer_is_hres_active(&rq->hrtick_timer);
1394 }
1395 
1396 void hrtick_start(struct rq *rq, u64 delay);
1397 
1398 #else
1399 
1400 static inline int hrtick_enabled(struct rq *rq)
1401 {
1402 	return 0;
1403 }
1404 
1405 #endif /* CONFIG_SCHED_HRTICK */
1406 
1407 #ifdef CONFIG_SMP
1408 extern void sched_avg_update(struct rq *rq);
1409 
1410 #ifndef arch_scale_freq_capacity
1411 static __always_inline
1412 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1413 {
1414 	return SCHED_CAPACITY_SCALE;
1415 }
1416 #endif
1417 
1418 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1419 {
1420 	rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1421 	sched_avg_update(rq);
1422 }
1423 #else
1424 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1425 static inline void sched_avg_update(struct rq *rq) { }
1426 #endif
1427 
1428 /*
1429  * __task_rq_lock - lock the rq @p resides on.
1430  */
1431 static inline struct rq *__task_rq_lock(struct task_struct *p)
1432 	__acquires(rq->lock)
1433 {
1434 	struct rq *rq;
1435 
1436 	lockdep_assert_held(&p->pi_lock);
1437 
1438 	for (;;) {
1439 		rq = task_rq(p);
1440 		raw_spin_lock(&rq->lock);
1441 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1442 			lockdep_pin_lock(&rq->lock);
1443 			return rq;
1444 		}
1445 		raw_spin_unlock(&rq->lock);
1446 
1447 		while (unlikely(task_on_rq_migrating(p)))
1448 			cpu_relax();
1449 	}
1450 }
1451 
1452 /*
1453  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1454  */
1455 static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1456 	__acquires(p->pi_lock)
1457 	__acquires(rq->lock)
1458 {
1459 	struct rq *rq;
1460 
1461 	for (;;) {
1462 		raw_spin_lock_irqsave(&p->pi_lock, *flags);
1463 		rq = task_rq(p);
1464 		raw_spin_lock(&rq->lock);
1465 		/*
1466 		 *	move_queued_task()		task_rq_lock()
1467 		 *
1468 		 *	ACQUIRE (rq->lock)
1469 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
1470 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
1471 		 *	[S] ->cpu = new_cpu		[L] task_rq()
1472 		 *					[L] ->on_rq
1473 		 *	RELEASE (rq->lock)
1474 		 *
1475 		 * If we observe the old cpu in task_rq_lock, the acquire of
1476 		 * the old rq->lock will fully serialize against the stores.
1477 		 *
1478 		 * If we observe the new cpu in task_rq_lock, the acquire will
1479 		 * pair with the WMB to ensure we must then also see migrating.
1480 		 */
1481 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1482 			lockdep_pin_lock(&rq->lock);
1483 			return rq;
1484 		}
1485 		raw_spin_unlock(&rq->lock);
1486 		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1487 
1488 		while (unlikely(task_on_rq_migrating(p)))
1489 			cpu_relax();
1490 	}
1491 }
1492 
1493 static inline void __task_rq_unlock(struct rq *rq)
1494 	__releases(rq->lock)
1495 {
1496 	lockdep_unpin_lock(&rq->lock);
1497 	raw_spin_unlock(&rq->lock);
1498 }
1499 
1500 static inline void
1501 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1502 	__releases(rq->lock)
1503 	__releases(p->pi_lock)
1504 {
1505 	lockdep_unpin_lock(&rq->lock);
1506 	raw_spin_unlock(&rq->lock);
1507 	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1508 }
1509 
1510 #ifdef CONFIG_SMP
1511 #ifdef CONFIG_PREEMPT
1512 
1513 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1514 
1515 /*
1516  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1517  * way at the expense of forcing extra atomic operations in all
1518  * invocations.  This assures that the double_lock is acquired using the
1519  * same underlying policy as the spinlock_t on this architecture, which
1520  * reduces latency compared to the unfair variant below.  However, it
1521  * also adds more overhead and therefore may reduce throughput.
1522  */
1523 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1524 	__releases(this_rq->lock)
1525 	__acquires(busiest->lock)
1526 	__acquires(this_rq->lock)
1527 {
1528 	raw_spin_unlock(&this_rq->lock);
1529 	double_rq_lock(this_rq, busiest);
1530 
1531 	return 1;
1532 }
1533 
1534 #else
1535 /*
1536  * Unfair double_lock_balance: Optimizes throughput at the expense of
1537  * latency by eliminating extra atomic operations when the locks are
1538  * already in proper order on entry.  This favors lower cpu-ids and will
1539  * grant the double lock to lower cpus over higher ids under contention,
1540  * regardless of entry order into the function.
1541  */
1542 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1543 	__releases(this_rq->lock)
1544 	__acquires(busiest->lock)
1545 	__acquires(this_rq->lock)
1546 {
1547 	int ret = 0;
1548 
1549 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1550 		if (busiest < this_rq) {
1551 			raw_spin_unlock(&this_rq->lock);
1552 			raw_spin_lock(&busiest->lock);
1553 			raw_spin_lock_nested(&this_rq->lock,
1554 					      SINGLE_DEPTH_NESTING);
1555 			ret = 1;
1556 		} else
1557 			raw_spin_lock_nested(&busiest->lock,
1558 					      SINGLE_DEPTH_NESTING);
1559 	}
1560 	return ret;
1561 }
1562 
1563 #endif /* CONFIG_PREEMPT */
1564 
1565 /*
1566  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1567  */
1568 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1569 {
1570 	if (unlikely(!irqs_disabled())) {
1571 		/* printk() doesn't work good under rq->lock */
1572 		raw_spin_unlock(&this_rq->lock);
1573 		BUG_ON(1);
1574 	}
1575 
1576 	return _double_lock_balance(this_rq, busiest);
1577 }
1578 
1579 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1580 	__releases(busiest->lock)
1581 {
1582 	raw_spin_unlock(&busiest->lock);
1583 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1584 }
1585 
1586 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1587 {
1588 	if (l1 > l2)
1589 		swap(l1, l2);
1590 
1591 	spin_lock(l1);
1592 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1593 }
1594 
1595 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1596 {
1597 	if (l1 > l2)
1598 		swap(l1, l2);
1599 
1600 	spin_lock_irq(l1);
1601 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1602 }
1603 
1604 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1605 {
1606 	if (l1 > l2)
1607 		swap(l1, l2);
1608 
1609 	raw_spin_lock(l1);
1610 	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1611 }
1612 
1613 /*
1614  * double_rq_lock - safely lock two runqueues
1615  *
1616  * Note this does not disable interrupts like task_rq_lock,
1617  * you need to do so manually before calling.
1618  */
1619 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1620 	__acquires(rq1->lock)
1621 	__acquires(rq2->lock)
1622 {
1623 	BUG_ON(!irqs_disabled());
1624 	if (rq1 == rq2) {
1625 		raw_spin_lock(&rq1->lock);
1626 		__acquire(rq2->lock);	/* Fake it out ;) */
1627 	} else {
1628 		if (rq1 < rq2) {
1629 			raw_spin_lock(&rq1->lock);
1630 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1631 		} else {
1632 			raw_spin_lock(&rq2->lock);
1633 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1634 		}
1635 	}
1636 }
1637 
1638 /*
1639  * double_rq_unlock - safely unlock two runqueues
1640  *
1641  * Note this does not restore interrupts like task_rq_unlock,
1642  * you need to do so manually after calling.
1643  */
1644 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1645 	__releases(rq1->lock)
1646 	__releases(rq2->lock)
1647 {
1648 	raw_spin_unlock(&rq1->lock);
1649 	if (rq1 != rq2)
1650 		raw_spin_unlock(&rq2->lock);
1651 	else
1652 		__release(rq2->lock);
1653 }
1654 
1655 #else /* CONFIG_SMP */
1656 
1657 /*
1658  * double_rq_lock - safely lock two runqueues
1659  *
1660  * Note this does not disable interrupts like task_rq_lock,
1661  * you need to do so manually before calling.
1662  */
1663 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1664 	__acquires(rq1->lock)
1665 	__acquires(rq2->lock)
1666 {
1667 	BUG_ON(!irqs_disabled());
1668 	BUG_ON(rq1 != rq2);
1669 	raw_spin_lock(&rq1->lock);
1670 	__acquire(rq2->lock);	/* Fake it out ;) */
1671 }
1672 
1673 /*
1674  * double_rq_unlock - safely unlock two runqueues
1675  *
1676  * Note this does not restore interrupts like task_rq_unlock,
1677  * you need to do so manually after calling.
1678  */
1679 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1680 	__releases(rq1->lock)
1681 	__releases(rq2->lock)
1682 {
1683 	BUG_ON(rq1 != rq2);
1684 	raw_spin_unlock(&rq1->lock);
1685 	__release(rq2->lock);
1686 }
1687 
1688 #endif
1689 
1690 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1691 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1692 
1693 #ifdef	CONFIG_SCHED_DEBUG
1694 extern void print_cfs_stats(struct seq_file *m, int cpu);
1695 extern void print_rt_stats(struct seq_file *m, int cpu);
1696 extern void print_dl_stats(struct seq_file *m, int cpu);
1697 extern void
1698 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1699 
1700 #ifdef CONFIG_NUMA_BALANCING
1701 extern void
1702 show_numa_stats(struct task_struct *p, struct seq_file *m);
1703 extern void
1704 print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1705 	unsigned long tpf, unsigned long gsf, unsigned long gpf);
1706 #endif /* CONFIG_NUMA_BALANCING */
1707 #endif /* CONFIG_SCHED_DEBUG */
1708 
1709 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1710 extern void init_rt_rq(struct rt_rq *rt_rq);
1711 extern void init_dl_rq(struct dl_rq *dl_rq);
1712 
1713 extern void cfs_bandwidth_usage_inc(void);
1714 extern void cfs_bandwidth_usage_dec(void);
1715 
1716 #ifdef CONFIG_NO_HZ_COMMON
1717 enum rq_nohz_flag_bits {
1718 	NOHZ_TICK_STOPPED,
1719 	NOHZ_BALANCE_KICK,
1720 };
1721 
1722 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
1723 #endif
1724 
1725 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1726 
1727 DECLARE_PER_CPU(u64, cpu_hardirq_time);
1728 DECLARE_PER_CPU(u64, cpu_softirq_time);
1729 
1730 #ifndef CONFIG_64BIT
1731 DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1732 
1733 static inline void irq_time_write_begin(void)
1734 {
1735 	__this_cpu_inc(irq_time_seq.sequence);
1736 	smp_wmb();
1737 }
1738 
1739 static inline void irq_time_write_end(void)
1740 {
1741 	smp_wmb();
1742 	__this_cpu_inc(irq_time_seq.sequence);
1743 }
1744 
1745 static inline u64 irq_time_read(int cpu)
1746 {
1747 	u64 irq_time;
1748 	unsigned seq;
1749 
1750 	do {
1751 		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1752 		irq_time = per_cpu(cpu_softirq_time, cpu) +
1753 			   per_cpu(cpu_hardirq_time, cpu);
1754 	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1755 
1756 	return irq_time;
1757 }
1758 #else /* CONFIG_64BIT */
1759 static inline void irq_time_write_begin(void)
1760 {
1761 }
1762 
1763 static inline void irq_time_write_end(void)
1764 {
1765 }
1766 
1767 static inline u64 irq_time_read(int cpu)
1768 {
1769 	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1770 }
1771 #endif /* CONFIG_64BIT */
1772 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1773