xref: /openbmc/linux/kernel/sched/sched.h (revision 13784475)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
297fb7a0aSIngo Molnar /*
397fb7a0aSIngo Molnar  * Scheduler internal types and methods:
497fb7a0aSIngo Molnar  */
5391e43daSPeter Zijlstra #include <linux/sched.h>
6325ea10cSIngo Molnar 
7dfc3401aSIngo Molnar #include <linux/sched/autogroup.h>
8e6017571SIngo Molnar #include <linux/sched/clock.h>
9325ea10cSIngo Molnar #include <linux/sched/coredump.h>
1055687da1SIngo Molnar #include <linux/sched/cpufreq.h>
11325ea10cSIngo Molnar #include <linux/sched/cputime.h>
12325ea10cSIngo Molnar #include <linux/sched/deadline.h>
13b17b0153SIngo Molnar #include <linux/sched/debug.h>
14ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
15325ea10cSIngo Molnar #include <linux/sched/idle.h>
16325ea10cSIngo Molnar #include <linux/sched/init.h>
17325ea10cSIngo Molnar #include <linux/sched/isolation.h>
18325ea10cSIngo Molnar #include <linux/sched/jobctl.h>
19325ea10cSIngo Molnar #include <linux/sched/loadavg.h>
20325ea10cSIngo Molnar #include <linux/sched/mm.h>
21325ea10cSIngo Molnar #include <linux/sched/nohz.h>
22325ea10cSIngo Molnar #include <linux/sched/numa_balancing.h>
23325ea10cSIngo Molnar #include <linux/sched/prio.h>
24325ea10cSIngo Molnar #include <linux/sched/rt.h>
25325ea10cSIngo Molnar #include <linux/sched/signal.h>
26325ea10cSIngo Molnar #include <linux/sched/stat.h>
27325ea10cSIngo Molnar #include <linux/sched/sysctl.h>
2829930025SIngo Molnar #include <linux/sched/task.h>
2968db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
30325ea10cSIngo Molnar #include <linux/sched/topology.h>
31325ea10cSIngo Molnar #include <linux/sched/user.h>
32325ea10cSIngo Molnar #include <linux/sched/wake_q.h>
33325ea10cSIngo Molnar #include <linux/sched/xacct.h>
34ef8bd77fSIngo Molnar 
35325ea10cSIngo Molnar #include <uapi/linux/sched/types.h>
36325ea10cSIngo Molnar 
373866e845SSteven Rostedt (Red Hat) #include <linux/binfmts.h>
38325ea10cSIngo Molnar #include <linux/blkdev.h>
39325ea10cSIngo Molnar #include <linux/compat.h>
40325ea10cSIngo Molnar #include <linux/context_tracking.h>
41325ea10cSIngo Molnar #include <linux/cpufreq.h>
42325ea10cSIngo Molnar #include <linux/cpuidle.h>
43325ea10cSIngo Molnar #include <linux/cpuset.h>
44325ea10cSIngo Molnar #include <linux/ctype.h>
45325ea10cSIngo Molnar #include <linux/debugfs.h>
46325ea10cSIngo Molnar #include <linux/delayacct.h>
47325ea10cSIngo Molnar #include <linux/init_task.h>
48325ea10cSIngo Molnar #include <linux/kprobes.h>
49325ea10cSIngo Molnar #include <linux/kthread.h>
50325ea10cSIngo Molnar #include <linux/membarrier.h>
51325ea10cSIngo Molnar #include <linux/migrate.h>
52325ea10cSIngo Molnar #include <linux/mmu_context.h>
53325ea10cSIngo Molnar #include <linux/nmi.h>
54325ea10cSIngo Molnar #include <linux/proc_fs.h>
55325ea10cSIngo Molnar #include <linux/prefetch.h>
56325ea10cSIngo Molnar #include <linux/profile.h>
57325ea10cSIngo Molnar #include <linux/rcupdate_wait.h>
58325ea10cSIngo Molnar #include <linux/security.h>
59325ea10cSIngo Molnar #include <linux/stackprotector.h>
60391e43daSPeter Zijlstra #include <linux/stop_machine.h>
61325ea10cSIngo Molnar #include <linux/suspend.h>
62325ea10cSIngo Molnar #include <linux/swait.h>
63325ea10cSIngo Molnar #include <linux/syscalls.h>
64325ea10cSIngo Molnar #include <linux/task_work.h>
65325ea10cSIngo Molnar #include <linux/tsacct_kern.h>
66325ea10cSIngo Molnar 
67325ea10cSIngo Molnar #include <asm/tlb.h>
68391e43daSPeter Zijlstra 
697fce777cSIngo Molnar #ifdef CONFIG_PARAVIRT
707fce777cSIngo Molnar # include <asm/paravirt.h>
717fce777cSIngo Molnar #endif
727fce777cSIngo Molnar 
73391e43daSPeter Zijlstra #include "cpupri.h"
746bfd6d72SJuri Lelli #include "cpudeadline.h"
75391e43daSPeter Zijlstra 
769148a3a1SPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
779148a3a1SPeter Zijlstra # define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
789148a3a1SPeter Zijlstra #else
796d3aed3dSIngo Molnar # define SCHED_WARN_ON(x)	({ (void)(x), 0; })
809148a3a1SPeter Zijlstra #endif
819148a3a1SPeter Zijlstra 
8245ceebf7SPaul Gortmaker struct rq;
83442bf3aaSDaniel Lezcano struct cpuidle_state;
8445ceebf7SPaul Gortmaker 
85da0c1e65SKirill Tkhai /* task_struct::on_rq states: */
86da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED	1
87cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING	2
88da0c1e65SKirill Tkhai 
89391e43daSPeter Zijlstra extern __read_mostly int scheduler_running;
90391e43daSPeter Zijlstra 
9145ceebf7SPaul Gortmaker extern unsigned long calc_load_update;
9245ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks;
9345ceebf7SPaul Gortmaker 
943289bdb4SPeter Zijlstra extern void calc_global_load_tick(struct rq *this_rq);
95d60585c5SThomas Gleixner extern long calc_load_fold_active(struct rq *this_rq, long adjust);
963289bdb4SPeter Zijlstra 
973289bdb4SPeter Zijlstra #ifdef CONFIG_SMP
98cee1afceSFrederic Weisbecker extern void cpu_load_update_active(struct rq *this_rq);
993289bdb4SPeter Zijlstra #else
100cee1afceSFrederic Weisbecker static inline void cpu_load_update_active(struct rq *this_rq) { }
1013289bdb4SPeter Zijlstra #endif
10245ceebf7SPaul Gortmaker 
103391e43daSPeter Zijlstra /*
104391e43daSPeter Zijlstra  * Helpers for converting nanosecond timing to jiffy resolution
105391e43daSPeter Zijlstra  */
106391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
107391e43daSPeter Zijlstra 
108cc1f4b1fSLi Zefan /*
109cc1f4b1fSLi Zefan  * Increase resolution of nice-level calculations for 64-bit architectures.
110cc1f4b1fSLi Zefan  * The extra resolution improves shares distribution and load balancing of
111cc1f4b1fSLi Zefan  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
112cc1f4b1fSLi Zefan  * hierarchies, especially on larger systems. This is not a user-visible change
113cc1f4b1fSLi Zefan  * and does not change the user-interface for setting shares/weights.
114cc1f4b1fSLi Zefan  *
115cc1f4b1fSLi Zefan  * We increase resolution only if we have enough bits to allow this increased
11697fb7a0aSIngo Molnar  * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
11797fb7a0aSIngo Molnar  * are pretty high and the returns do not justify the increased costs.
1182159197dSPeter Zijlstra  *
11997fb7a0aSIngo Molnar  * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
12097fb7a0aSIngo Molnar  * increase coverage and consistency always enable it on 64-bit platforms.
121cc1f4b1fSLi Zefan  */
1222159197dSPeter Zijlstra #ifdef CONFIG_64BIT
123172895e6SYuyang Du # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
1246ecdd749SYuyang Du # define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
1256ecdd749SYuyang Du # define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
126cc1f4b1fSLi Zefan #else
127172895e6SYuyang Du # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
128cc1f4b1fSLi Zefan # define scale_load(w)		(w)
129cc1f4b1fSLi Zefan # define scale_load_down(w)	(w)
130cc1f4b1fSLi Zefan #endif
131cc1f4b1fSLi Zefan 
1326ecdd749SYuyang Du /*
133172895e6SYuyang Du  * Task weight (visible to users) and its load (invisible to users) have
134172895e6SYuyang Du  * independent resolution, but they should be well calibrated. We use
135172895e6SYuyang Du  * scale_load() and scale_load_down(w) to convert between them. The
136172895e6SYuyang Du  * following must be true:
137172895e6SYuyang Du  *
138172895e6SYuyang Du  *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
139172895e6SYuyang Du  *
1406ecdd749SYuyang Du  */
141172895e6SYuyang Du #define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
142391e43daSPeter Zijlstra 
143391e43daSPeter Zijlstra /*
144332ac17eSDario Faggioli  * Single value that decides SCHED_DEADLINE internal math precision.
145332ac17eSDario Faggioli  * 10 -> just above 1us
146332ac17eSDario Faggioli  * 9  -> just above 0.5us
147332ac17eSDario Faggioli  */
14897fb7a0aSIngo Molnar #define DL_SCALE		10
149332ac17eSDario Faggioli 
150332ac17eSDario Faggioli /*
15197fb7a0aSIngo Molnar  * Single value that denotes runtime == period, ie unlimited time.
152391e43daSPeter Zijlstra  */
153391e43daSPeter Zijlstra #define RUNTIME_INF		((u64)~0ULL)
154391e43daSPeter Zijlstra 
15520f9cd2aSHenrik Austad static inline int idle_policy(int policy)
15620f9cd2aSHenrik Austad {
15720f9cd2aSHenrik Austad 	return policy == SCHED_IDLE;
15820f9cd2aSHenrik Austad }
159d50dde5aSDario Faggioli static inline int fair_policy(int policy)
160d50dde5aSDario Faggioli {
161d50dde5aSDario Faggioli 	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
162d50dde5aSDario Faggioli }
163d50dde5aSDario Faggioli 
164391e43daSPeter Zijlstra static inline int rt_policy(int policy)
165391e43daSPeter Zijlstra {
166d50dde5aSDario Faggioli 	return policy == SCHED_FIFO || policy == SCHED_RR;
167391e43daSPeter Zijlstra }
168391e43daSPeter Zijlstra 
169aab03e05SDario Faggioli static inline int dl_policy(int policy)
170aab03e05SDario Faggioli {
171aab03e05SDario Faggioli 	return policy == SCHED_DEADLINE;
172aab03e05SDario Faggioli }
17320f9cd2aSHenrik Austad static inline bool valid_policy(int policy)
17420f9cd2aSHenrik Austad {
17520f9cd2aSHenrik Austad 	return idle_policy(policy) || fair_policy(policy) ||
17620f9cd2aSHenrik Austad 		rt_policy(policy) || dl_policy(policy);
17720f9cd2aSHenrik Austad }
178aab03e05SDario Faggioli 
179391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p)
180391e43daSPeter Zijlstra {
181391e43daSPeter Zijlstra 	return rt_policy(p->policy);
182391e43daSPeter Zijlstra }
183391e43daSPeter Zijlstra 
184aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p)
185aab03e05SDario Faggioli {
186aab03e05SDario Faggioli 	return dl_policy(p->policy);
187aab03e05SDario Faggioli }
188aab03e05SDario Faggioli 
18907881166SJuri Lelli #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
19007881166SJuri Lelli 
1912d3d891dSDario Faggioli /*
192794a56ebSJuri Lelli  * !! For sched_setattr_nocheck() (kernel) only !!
193794a56ebSJuri Lelli  *
194794a56ebSJuri Lelli  * This is actually gross. :(
195794a56ebSJuri Lelli  *
196794a56ebSJuri Lelli  * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
197794a56ebSJuri Lelli  * tasks, but still be able to sleep. We need this on platforms that cannot
198794a56ebSJuri Lelli  * atomically change clock frequency. Remove once fast switching will be
199794a56ebSJuri Lelli  * available on such platforms.
200794a56ebSJuri Lelli  *
201794a56ebSJuri Lelli  * SUGOV stands for SchedUtil GOVernor.
202794a56ebSJuri Lelli  */
203794a56ebSJuri Lelli #define SCHED_FLAG_SUGOV	0x10000000
204794a56ebSJuri Lelli 
205794a56ebSJuri Lelli static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
206794a56ebSJuri Lelli {
207794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
208794a56ebSJuri Lelli 	return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
209794a56ebSJuri Lelli #else
210794a56ebSJuri Lelli 	return false;
211794a56ebSJuri Lelli #endif
212794a56ebSJuri Lelli }
213794a56ebSJuri Lelli 
214794a56ebSJuri Lelli /*
2152d3d891dSDario Faggioli  * Tells if entity @a should preempt entity @b.
2162d3d891dSDario Faggioli  */
217332ac17eSDario Faggioli static inline bool
218332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
2192d3d891dSDario Faggioli {
220794a56ebSJuri Lelli 	return dl_entity_is_special(a) ||
221794a56ebSJuri Lelli 	       dl_time_before(a->deadline, b->deadline);
2222d3d891dSDario Faggioli }
2232d3d891dSDario Faggioli 
224391e43daSPeter Zijlstra /*
225391e43daSPeter Zijlstra  * This is the priority-queue data structure of the RT scheduling class:
226391e43daSPeter Zijlstra  */
227391e43daSPeter Zijlstra struct rt_prio_array {
228391e43daSPeter Zijlstra 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
229391e43daSPeter Zijlstra 	struct list_head queue[MAX_RT_PRIO];
230391e43daSPeter Zijlstra };
231391e43daSPeter Zijlstra 
232391e43daSPeter Zijlstra struct rt_bandwidth {
233391e43daSPeter Zijlstra 	/* nests inside the rq lock: */
234391e43daSPeter Zijlstra 	raw_spinlock_t		rt_runtime_lock;
235391e43daSPeter Zijlstra 	ktime_t			rt_period;
236391e43daSPeter Zijlstra 	u64			rt_runtime;
237391e43daSPeter Zijlstra 	struct hrtimer		rt_period_timer;
2384cfafd30SPeter Zijlstra 	unsigned int		rt_period_active;
239391e43daSPeter Zijlstra };
240a5e7be3bSJuri Lelli 
241a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p);
242a5e7be3bSJuri Lelli 
243332ac17eSDario Faggioli /*
244332ac17eSDario Faggioli  * To keep the bandwidth of -deadline tasks and groups under control
245332ac17eSDario Faggioli  * we need some place where:
246332ac17eSDario Faggioli  *  - store the maximum -deadline bandwidth of the system (the group);
247332ac17eSDario Faggioli  *  - cache the fraction of that bandwidth that is currently allocated.
248332ac17eSDario Faggioli  *
249332ac17eSDario Faggioli  * This is all done in the data structure below. It is similar to the
250332ac17eSDario Faggioli  * one used for RT-throttling (rt_bandwidth), with the main difference
251332ac17eSDario Faggioli  * that, since here we are only interested in admission control, we
252332ac17eSDario Faggioli  * do not decrease any runtime while the group "executes", neither we
253332ac17eSDario Faggioli  * need a timer to replenish it.
254332ac17eSDario Faggioli  *
255332ac17eSDario Faggioli  * With respect to SMP, the bandwidth is given on a per-CPU basis,
256332ac17eSDario Faggioli  * meaning that:
257332ac17eSDario Faggioli  *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
258332ac17eSDario Faggioli  *  - dl_total_bw array contains, in the i-eth element, the currently
259332ac17eSDario Faggioli  *    allocated bandwidth on the i-eth CPU.
260332ac17eSDario Faggioli  * Moreover, groups consume bandwidth on each CPU, while tasks only
261332ac17eSDario Faggioli  * consume bandwidth on the CPU they're running on.
262332ac17eSDario Faggioli  * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
263332ac17eSDario Faggioli  * that will be shown the next time the proc or cgroup controls will
264332ac17eSDario Faggioli  * be red. It on its turn can be changed by writing on its own
265332ac17eSDario Faggioli  * control.
266332ac17eSDario Faggioli  */
267332ac17eSDario Faggioli struct dl_bandwidth {
268332ac17eSDario Faggioli 	raw_spinlock_t		dl_runtime_lock;
269332ac17eSDario Faggioli 	u64			dl_runtime;
270332ac17eSDario Faggioli 	u64			dl_period;
271332ac17eSDario Faggioli };
272332ac17eSDario Faggioli 
273332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void)
274332ac17eSDario Faggioli {
2751724813dSPeter Zijlstra 	return sysctl_sched_rt_runtime >= 0;
276332ac17eSDario Faggioli }
277332ac17eSDario Faggioli 
278332ac17eSDario Faggioli struct dl_bw {
279332ac17eSDario Faggioli 	raw_spinlock_t		lock;
28097fb7a0aSIngo Molnar 	u64			bw;
28197fb7a0aSIngo Molnar 	u64			total_bw;
282332ac17eSDario Faggioli };
283332ac17eSDario Faggioli 
284daec5798SLuca Abeni static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
285daec5798SLuca Abeni 
2867f51412aSJuri Lelli static inline
2878c0944ceSPeter Zijlstra void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
2887f51412aSJuri Lelli {
2897f51412aSJuri Lelli 	dl_b->total_bw -= tsk_bw;
290daec5798SLuca Abeni 	__dl_update(dl_b, (s32)tsk_bw / cpus);
2917f51412aSJuri Lelli }
2927f51412aSJuri Lelli 
2937f51412aSJuri Lelli static inline
294daec5798SLuca Abeni void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
2957f51412aSJuri Lelli {
2967f51412aSJuri Lelli 	dl_b->total_bw += tsk_bw;
297daec5798SLuca Abeni 	__dl_update(dl_b, -((s32)tsk_bw / cpus));
2987f51412aSJuri Lelli }
2997f51412aSJuri Lelli 
3007f51412aSJuri Lelli static inline
3017f51412aSJuri Lelli bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
3027f51412aSJuri Lelli {
3037f51412aSJuri Lelli 	return dl_b->bw != -1 &&
3047f51412aSJuri Lelli 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
3057f51412aSJuri Lelli }
3067f51412aSJuri Lelli 
30797fb7a0aSIngo Molnar extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
308f2cb1360SIngo Molnar extern void init_dl_bw(struct dl_bw *dl_b);
30906a76fe0SNicolas Pitre extern int  sched_dl_global_validate(void);
31006a76fe0SNicolas Pitre extern void sched_dl_do_global(void);
31197fb7a0aSIngo Molnar extern int  sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
31206a76fe0SNicolas Pitre extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
31306a76fe0SNicolas Pitre extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
31406a76fe0SNicolas Pitre extern bool __checkparam_dl(const struct sched_attr *attr);
31506a76fe0SNicolas Pitre extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
31697fb7a0aSIngo Molnar extern int  dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
31797fb7a0aSIngo Molnar extern int  dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
31806a76fe0SNicolas Pitre extern bool dl_cpu_busy(unsigned int cpu);
319391e43daSPeter Zijlstra 
320391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
321391e43daSPeter Zijlstra 
322391e43daSPeter Zijlstra #include <linux/cgroup.h>
323391e43daSPeter Zijlstra 
324391e43daSPeter Zijlstra struct cfs_rq;
325391e43daSPeter Zijlstra struct rt_rq;
326391e43daSPeter Zijlstra 
32735cf4e50SMike Galbraith extern struct list_head task_groups;
328391e43daSPeter Zijlstra 
329391e43daSPeter Zijlstra struct cfs_bandwidth {
330391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH
331391e43daSPeter Zijlstra 	raw_spinlock_t		lock;
332391e43daSPeter Zijlstra 	ktime_t			period;
33397fb7a0aSIngo Molnar 	u64			quota;
33497fb7a0aSIngo Molnar 	u64			runtime;
3359c58c79aSZhihui Zhang 	s64			hierarchical_quota;
336391e43daSPeter Zijlstra 	u64			runtime_expires;
337391e43daSPeter Zijlstra 
33897fb7a0aSIngo Molnar 	int			idle;
33997fb7a0aSIngo Molnar 	int			period_active;
34097fb7a0aSIngo Molnar 	struct hrtimer		period_timer;
34197fb7a0aSIngo Molnar 	struct hrtimer		slack_timer;
342391e43daSPeter Zijlstra 	struct list_head	throttled_cfs_rq;
343391e43daSPeter Zijlstra 
34497fb7a0aSIngo Molnar 	/* Statistics: */
34597fb7a0aSIngo Molnar 	int			nr_periods;
34697fb7a0aSIngo Molnar 	int			nr_throttled;
347391e43daSPeter Zijlstra 	u64			throttled_time;
348391e43daSPeter Zijlstra #endif
349391e43daSPeter Zijlstra };
350391e43daSPeter Zijlstra 
35197fb7a0aSIngo Molnar /* Task group related information */
352391e43daSPeter Zijlstra struct task_group {
353391e43daSPeter Zijlstra 	struct cgroup_subsys_state css;
354391e43daSPeter Zijlstra 
355391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
35697fb7a0aSIngo Molnar 	/* schedulable entities of this group on each CPU */
357391e43daSPeter Zijlstra 	struct sched_entity	**se;
35897fb7a0aSIngo Molnar 	/* runqueue "owned" by this group on each CPU */
359391e43daSPeter Zijlstra 	struct cfs_rq		**cfs_rq;
360391e43daSPeter Zijlstra 	unsigned long		shares;
361391e43daSPeter Zijlstra 
362fa6bddebSAlex Shi #ifdef	CONFIG_SMP
363b0367629SWaiman Long 	/*
364b0367629SWaiman Long 	 * load_avg can be heavily contended at clock tick time, so put
365b0367629SWaiman Long 	 * it in its own cacheline separated from the fields above which
366b0367629SWaiman Long 	 * will also be accessed at each tick.
367b0367629SWaiman Long 	 */
368b0367629SWaiman Long 	atomic_long_t		load_avg ____cacheline_aligned;
369391e43daSPeter Zijlstra #endif
370fa6bddebSAlex Shi #endif
371391e43daSPeter Zijlstra 
372391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
373391e43daSPeter Zijlstra 	struct sched_rt_entity	**rt_se;
374391e43daSPeter Zijlstra 	struct rt_rq		**rt_rq;
375391e43daSPeter Zijlstra 
376391e43daSPeter Zijlstra 	struct rt_bandwidth	rt_bandwidth;
377391e43daSPeter Zijlstra #endif
378391e43daSPeter Zijlstra 
379391e43daSPeter Zijlstra 	struct rcu_head		rcu;
380391e43daSPeter Zijlstra 	struct list_head	list;
381391e43daSPeter Zijlstra 
382391e43daSPeter Zijlstra 	struct task_group	*parent;
383391e43daSPeter Zijlstra 	struct list_head	siblings;
384391e43daSPeter Zijlstra 	struct list_head	children;
385391e43daSPeter Zijlstra 
386391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP
387391e43daSPeter Zijlstra 	struct autogroup	*autogroup;
388391e43daSPeter Zijlstra #endif
389391e43daSPeter Zijlstra 
390391e43daSPeter Zijlstra 	struct cfs_bandwidth	cfs_bandwidth;
391391e43daSPeter Zijlstra };
392391e43daSPeter Zijlstra 
393391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
394391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
395391e43daSPeter Zijlstra 
396391e43daSPeter Zijlstra /*
397391e43daSPeter Zijlstra  * A weight of 0 or 1 can cause arithmetics problems.
398391e43daSPeter Zijlstra  * A weight of a cfs_rq is the sum of weights of which entities
399391e43daSPeter Zijlstra  * are queued on this cfs_rq, so a weight of a entity should not be
400391e43daSPeter Zijlstra  * too large, so as the shares value of a task group.
401391e43daSPeter Zijlstra  * (The default weight is 1024 - so there's no practical
402391e43daSPeter Zijlstra  *  limitation from this.)
403391e43daSPeter Zijlstra  */
404391e43daSPeter Zijlstra #define MIN_SHARES		(1UL <<  1)
405391e43daSPeter Zijlstra #define MAX_SHARES		(1UL << 18)
406391e43daSPeter Zijlstra #endif
407391e43daSPeter Zijlstra 
408391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *);
409391e43daSPeter Zijlstra 
410391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from,
411391e43daSPeter Zijlstra 			     tg_visitor down, tg_visitor up, void *data);
412391e43daSPeter Zijlstra 
413391e43daSPeter Zijlstra /*
414391e43daSPeter Zijlstra  * Iterate the full tree, calling @down when first entering a node and @up when
415391e43daSPeter Zijlstra  * leaving it for the final time.
416391e43daSPeter Zijlstra  *
417391e43daSPeter Zijlstra  * Caller must hold rcu_lock or sufficient equivalent.
418391e43daSPeter Zijlstra  */
419391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
420391e43daSPeter Zijlstra {
421391e43daSPeter Zijlstra 	return walk_tg_tree_from(&root_task_group, down, up, data);
422391e43daSPeter Zijlstra }
423391e43daSPeter Zijlstra 
424391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data);
425391e43daSPeter Zijlstra 
426391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg);
427391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
4288663e24dSPeter Zijlstra extern void online_fair_sched_group(struct task_group *tg);
4296fe1f348SPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg);
430391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
431391e43daSPeter Zijlstra 			struct sched_entity *se, int cpu,
432391e43daSPeter Zijlstra 			struct sched_entity *parent);
433391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
434391e43daSPeter Zijlstra 
435391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
43677a4d1a1SPeter Zijlstra extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
437391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
438391e43daSPeter Zijlstra 
439391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg);
440391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
441391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
442391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
443391e43daSPeter Zijlstra 		struct sched_rt_entity *parent);
4448887cd99SNicolas Pitre extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
4458887cd99SNicolas Pitre extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
4468887cd99SNicolas Pitre extern long sched_group_rt_runtime(struct task_group *tg);
4478887cd99SNicolas Pitre extern long sched_group_rt_period(struct task_group *tg);
4488887cd99SNicolas Pitre extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
449391e43daSPeter Zijlstra 
45025cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent);
45125cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg,
45225cc7da7SLi Zefan 			       struct task_group *parent);
45325cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg);
45425cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg);
45525cc7da7SLi Zefan 
45625cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk);
45725cc7da7SLi Zefan 
45825cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED
45925cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
460ad936d86SByungchul Park 
461ad936d86SByungchul Park #ifdef CONFIG_SMP
462ad936d86SByungchul Park extern void set_task_rq_fair(struct sched_entity *se,
463ad936d86SByungchul Park 			     struct cfs_rq *prev, struct cfs_rq *next);
464ad936d86SByungchul Park #else /* !CONFIG_SMP */
465ad936d86SByungchul Park static inline void set_task_rq_fair(struct sched_entity *se,
466ad936d86SByungchul Park 			     struct cfs_rq *prev, struct cfs_rq *next) { }
467ad936d86SByungchul Park #endif /* CONFIG_SMP */
468ad936d86SByungchul Park #endif /* CONFIG_FAIR_GROUP_SCHED */
46925cc7da7SLi Zefan 
470391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */
471391e43daSPeter Zijlstra 
472391e43daSPeter Zijlstra struct cfs_bandwidth { };
473391e43daSPeter Zijlstra 
474391e43daSPeter Zijlstra #endif	/* CONFIG_CGROUP_SCHED */
475391e43daSPeter Zijlstra 
476391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */
477391e43daSPeter Zijlstra struct cfs_rq {
478391e43daSPeter Zijlstra 	struct load_weight	load;
4791ea6c46aSPeter Zijlstra 	unsigned long		runnable_weight;
48097fb7a0aSIngo Molnar 	unsigned int		nr_running;
48197fb7a0aSIngo Molnar 	unsigned int		h_nr_running;
482391e43daSPeter Zijlstra 
483391e43daSPeter Zijlstra 	u64			exec_clock;
484391e43daSPeter Zijlstra 	u64			min_vruntime;
485391e43daSPeter Zijlstra #ifndef CONFIG_64BIT
486391e43daSPeter Zijlstra 	u64			min_vruntime_copy;
487391e43daSPeter Zijlstra #endif
488391e43daSPeter Zijlstra 
489bfb06889SDavidlohr Bueso 	struct rb_root_cached	tasks_timeline;
490391e43daSPeter Zijlstra 
491391e43daSPeter Zijlstra 	/*
492391e43daSPeter Zijlstra 	 * 'curr' points to currently running entity on this cfs_rq.
493391e43daSPeter Zijlstra 	 * It is set to NULL otherwise (i.e when none are currently running).
494391e43daSPeter Zijlstra 	 */
49597fb7a0aSIngo Molnar 	struct sched_entity	*curr;
49697fb7a0aSIngo Molnar 	struct sched_entity	*next;
49797fb7a0aSIngo Molnar 	struct sched_entity	*last;
49897fb7a0aSIngo Molnar 	struct sched_entity	*skip;
499391e43daSPeter Zijlstra 
500391e43daSPeter Zijlstra #ifdef	CONFIG_SCHED_DEBUG
501391e43daSPeter Zijlstra 	unsigned int		nr_spread_over;
502391e43daSPeter Zijlstra #endif
503391e43daSPeter Zijlstra 
5042dac754eSPaul Turner #ifdef CONFIG_SMP
5052dac754eSPaul Turner 	/*
5069d89c257SYuyang Du 	 * CFS load tracking
5072dac754eSPaul Turner 	 */
5089d89c257SYuyang Du 	struct sched_avg	avg;
5092a2f5d4eSPeter Zijlstra #ifndef CONFIG_64BIT
5102a2f5d4eSPeter Zijlstra 	u64			load_last_update_time_copy;
5112a2f5d4eSPeter Zijlstra #endif
5122a2f5d4eSPeter Zijlstra 	struct {
5132a2f5d4eSPeter Zijlstra 		raw_spinlock_t	lock ____cacheline_aligned;
5142a2f5d4eSPeter Zijlstra 		int		nr;
5152a2f5d4eSPeter Zijlstra 		unsigned long	load_avg;
5162a2f5d4eSPeter Zijlstra 		unsigned long	util_avg;
5170e2d2aaaSPeter Zijlstra 		unsigned long	runnable_sum;
5182a2f5d4eSPeter Zijlstra 	} removed;
519141965c7SAlex Shi 
520c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED
5210e2d2aaaSPeter Zijlstra 	unsigned long		tg_load_avg_contrib;
5220e2d2aaaSPeter Zijlstra 	long			propagate;
5230e2d2aaaSPeter Zijlstra 	long			prop_runnable_sum;
5240e2d2aaaSPeter Zijlstra 
52582958366SPaul Turner 	/*
52682958366SPaul Turner 	 *   h_load = weight * f(tg)
52782958366SPaul Turner 	 *
52882958366SPaul Turner 	 * Where f(tg) is the recursive weight fraction assigned to
52982958366SPaul Turner 	 * this group.
53082958366SPaul Turner 	 */
53182958366SPaul Turner 	unsigned long		h_load;
53268520796SVladimir Davydov 	u64			last_h_load_update;
53368520796SVladimir Davydov 	struct sched_entity	*h_load_next;
53468520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */
53582958366SPaul Turner #endif /* CONFIG_SMP */
53682958366SPaul Turner 
537391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
53897fb7a0aSIngo Molnar 	struct rq		*rq;	/* CPU runqueue to which this cfs_rq is attached */
539391e43daSPeter Zijlstra 
540391e43daSPeter Zijlstra 	/*
541391e43daSPeter Zijlstra 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
542391e43daSPeter Zijlstra 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
543391e43daSPeter Zijlstra 	 * (like users, containers etc.)
544391e43daSPeter Zijlstra 	 *
54597fb7a0aSIngo Molnar 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU.
54697fb7a0aSIngo Molnar 	 * This list is used during load balance.
547391e43daSPeter Zijlstra 	 */
548391e43daSPeter Zijlstra 	int			on_list;
549391e43daSPeter Zijlstra 	struct list_head	leaf_cfs_rq_list;
550391e43daSPeter Zijlstra 	struct task_group	*tg;	/* group that "owns" this runqueue */
551391e43daSPeter Zijlstra 
552391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH
553391e43daSPeter Zijlstra 	int			runtime_enabled;
554391e43daSPeter Zijlstra 	u64			runtime_expires;
555391e43daSPeter Zijlstra 	s64			runtime_remaining;
556391e43daSPeter Zijlstra 
55797fb7a0aSIngo Molnar 	u64			throttled_clock;
55897fb7a0aSIngo Molnar 	u64			throttled_clock_task;
559f1b17280SPaul Turner 	u64			throttled_clock_task_time;
56097fb7a0aSIngo Molnar 	int			throttled;
56197fb7a0aSIngo Molnar 	int			throttle_count;
562391e43daSPeter Zijlstra 	struct list_head	throttled_list;
563391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */
564391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */
565391e43daSPeter Zijlstra };
566391e43daSPeter Zijlstra 
567391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void)
568391e43daSPeter Zijlstra {
569391e43daSPeter Zijlstra 	return sysctl_sched_rt_runtime >= 0;
570391e43daSPeter Zijlstra }
571391e43daSPeter Zijlstra 
572b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */
5734bdced5cSSteven Rostedt (Red Hat) #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
574b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI
575b6366f04SSteven Rostedt #endif
576b6366f04SSteven Rostedt 
577391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */
578391e43daSPeter Zijlstra struct rt_rq {
579391e43daSPeter Zijlstra 	struct rt_prio_array	active;
580c82513e5SPeter Zijlstra 	unsigned int		rt_nr_running;
58101d36d0aSFrederic Weisbecker 	unsigned int		rr_nr_running;
582391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
583391e43daSPeter Zijlstra 	struct {
584391e43daSPeter Zijlstra 		int		curr; /* highest queued rt task prio */
585391e43daSPeter Zijlstra #ifdef CONFIG_SMP
586391e43daSPeter Zijlstra 		int		next; /* next highest */
587391e43daSPeter Zijlstra #endif
588391e43daSPeter Zijlstra 	} highest_prio;
589391e43daSPeter Zijlstra #endif
590391e43daSPeter Zijlstra #ifdef CONFIG_SMP
591391e43daSPeter Zijlstra 	unsigned long		rt_nr_migratory;
592391e43daSPeter Zijlstra 	unsigned long		rt_nr_total;
593391e43daSPeter Zijlstra 	int			overloaded;
594391e43daSPeter Zijlstra 	struct plist_head	pushable_tasks;
595b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
596f4ebcbc0SKirill Tkhai 	int			rt_queued;
597f4ebcbc0SKirill Tkhai 
598391e43daSPeter Zijlstra 	int			rt_throttled;
599391e43daSPeter Zijlstra 	u64			rt_time;
600391e43daSPeter Zijlstra 	u64			rt_runtime;
601391e43daSPeter Zijlstra 	/* Nests inside the rq lock: */
602391e43daSPeter Zijlstra 	raw_spinlock_t		rt_runtime_lock;
603391e43daSPeter Zijlstra 
604391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
605391e43daSPeter Zijlstra 	unsigned long		rt_nr_boosted;
606391e43daSPeter Zijlstra 
607391e43daSPeter Zijlstra 	struct rq		*rq;
608391e43daSPeter Zijlstra 	struct task_group	*tg;
609391e43daSPeter Zijlstra #endif
610391e43daSPeter Zijlstra };
611391e43daSPeter Zijlstra 
612aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */
613aab03e05SDario Faggioli struct dl_rq {
614aab03e05SDario Faggioli 	/* runqueue is an rbtree, ordered by deadline */
6152161573eSDavidlohr Bueso 	struct rb_root_cached	root;
616aab03e05SDario Faggioli 
617aab03e05SDario Faggioli 	unsigned long		dl_nr_running;
6181baca4ceSJuri Lelli 
6191baca4ceSJuri Lelli #ifdef CONFIG_SMP
6201baca4ceSJuri Lelli 	/*
6211baca4ceSJuri Lelli 	 * Deadline values of the currently executing and the
6221baca4ceSJuri Lelli 	 * earliest ready task on this rq. Caching these facilitates
6231baca4ceSJuri Lelli 	 * the decision wether or not a ready but not running task
6241baca4ceSJuri Lelli 	 * should migrate somewhere else.
6251baca4ceSJuri Lelli 	 */
6261baca4ceSJuri Lelli 	struct {
6271baca4ceSJuri Lelli 		u64		curr;
6281baca4ceSJuri Lelli 		u64		next;
6291baca4ceSJuri Lelli 	} earliest_dl;
6301baca4ceSJuri Lelli 
6311baca4ceSJuri Lelli 	unsigned long		dl_nr_migratory;
6321baca4ceSJuri Lelli 	int			overloaded;
6331baca4ceSJuri Lelli 
6341baca4ceSJuri Lelli 	/*
6351baca4ceSJuri Lelli 	 * Tasks on this rq that can be pushed away. They are kept in
6361baca4ceSJuri Lelli 	 * an rb-tree, ordered by tasks' deadlines, with caching
6371baca4ceSJuri Lelli 	 * of the leftmost (earliest deadline) element.
6381baca4ceSJuri Lelli 	 */
6392161573eSDavidlohr Bueso 	struct rb_root_cached	pushable_dl_tasks_root;
640332ac17eSDario Faggioli #else
641332ac17eSDario Faggioli 	struct dl_bw		dl_bw;
6421baca4ceSJuri Lelli #endif
643e36d8677SLuca Abeni 	/*
644e36d8677SLuca Abeni 	 * "Active utilization" for this runqueue: increased when a
645e36d8677SLuca Abeni 	 * task wakes up (becomes TASK_RUNNING) and decreased when a
646e36d8677SLuca Abeni 	 * task blocks
647e36d8677SLuca Abeni 	 */
648e36d8677SLuca Abeni 	u64			running_bw;
6494da3abceSLuca Abeni 
6504da3abceSLuca Abeni 	/*
6518fd27231SLuca Abeni 	 * Utilization of the tasks "assigned" to this runqueue (including
6528fd27231SLuca Abeni 	 * the tasks that are in runqueue and the tasks that executed on this
6538fd27231SLuca Abeni 	 * CPU and blocked). Increased when a task moves to this runqueue, and
6548fd27231SLuca Abeni 	 * decreased when the task moves away (migrates, changes scheduling
6558fd27231SLuca Abeni 	 * policy, or terminates).
6568fd27231SLuca Abeni 	 * This is needed to compute the "inactive utilization" for the
6578fd27231SLuca Abeni 	 * runqueue (inactive utilization = this_bw - running_bw).
6588fd27231SLuca Abeni 	 */
6598fd27231SLuca Abeni 	u64			this_bw;
660daec5798SLuca Abeni 	u64			extra_bw;
6618fd27231SLuca Abeni 
6628fd27231SLuca Abeni 	/*
6634da3abceSLuca Abeni 	 * Inverse of the fraction of CPU utilization that can be reclaimed
6644da3abceSLuca Abeni 	 * by the GRUB algorithm.
6654da3abceSLuca Abeni 	 */
6664da3abceSLuca Abeni 	u64			bw_ratio;
667aab03e05SDario Faggioli };
668aab03e05SDario Faggioli 
669391e43daSPeter Zijlstra #ifdef CONFIG_SMP
670391e43daSPeter Zijlstra 
671afe06efdSTim Chen static inline bool sched_asym_prefer(int a, int b)
672afe06efdSTim Chen {
673afe06efdSTim Chen 	return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
674afe06efdSTim Chen }
675afe06efdSTim Chen 
676391e43daSPeter Zijlstra /*
677391e43daSPeter Zijlstra  * We add the notion of a root-domain which will be used to define per-domain
678391e43daSPeter Zijlstra  * variables. Each exclusive cpuset essentially defines an island domain by
67997fb7a0aSIngo Molnar  * fully partitioning the member CPUs from any other cpuset. Whenever a new
680391e43daSPeter Zijlstra  * exclusive cpuset is created, we also create and attach a new root-domain
681391e43daSPeter Zijlstra  * object.
682391e43daSPeter Zijlstra  *
683391e43daSPeter Zijlstra  */
684391e43daSPeter Zijlstra struct root_domain {
685391e43daSPeter Zijlstra 	atomic_t		refcount;
686391e43daSPeter Zijlstra 	atomic_t		rto_count;
687391e43daSPeter Zijlstra 	struct rcu_head		rcu;
688391e43daSPeter Zijlstra 	cpumask_var_t		span;
689391e43daSPeter Zijlstra 	cpumask_var_t		online;
690391e43daSPeter Zijlstra 
6914486edd1STim Chen 	/* Indicate more than one runnable task for any CPU */
6924486edd1STim Chen 	bool			overload;
6934486edd1STim Chen 
694391e43daSPeter Zijlstra 	/*
6951baca4ceSJuri Lelli 	 * The bit corresponding to a CPU gets set here if such CPU has more
6961baca4ceSJuri Lelli 	 * than one runnable -deadline task (as it is below for RT tasks).
6971baca4ceSJuri Lelli 	 */
6981baca4ceSJuri Lelli 	cpumask_var_t		dlo_mask;
6991baca4ceSJuri Lelli 	atomic_t		dlo_count;
700332ac17eSDario Faggioli 	struct dl_bw		dl_bw;
7016bfd6d72SJuri Lelli 	struct cpudl		cpudl;
7021baca4ceSJuri Lelli 
7034bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI
7044bdced5cSSteven Rostedt (Red Hat) 	/*
7054bdced5cSSteven Rostedt (Red Hat) 	 * For IPI pull requests, loop across the rto_mask.
7064bdced5cSSteven Rostedt (Red Hat) 	 */
7074bdced5cSSteven Rostedt (Red Hat) 	struct irq_work		rto_push_work;
7084bdced5cSSteven Rostedt (Red Hat) 	raw_spinlock_t		rto_lock;
7094bdced5cSSteven Rostedt (Red Hat) 	/* These are only updated and read within rto_lock */
7104bdced5cSSteven Rostedt (Red Hat) 	int			rto_loop;
7114bdced5cSSteven Rostedt (Red Hat) 	int			rto_cpu;
7124bdced5cSSteven Rostedt (Red Hat) 	/* These atomics are updated outside of a lock */
7134bdced5cSSteven Rostedt (Red Hat) 	atomic_t		rto_loop_next;
7144bdced5cSSteven Rostedt (Red Hat) 	atomic_t		rto_loop_start;
7154bdced5cSSteven Rostedt (Red Hat) #endif
7161baca4ceSJuri Lelli 	/*
717391e43daSPeter Zijlstra 	 * The "RT overload" flag: it gets set if a CPU has more than
718391e43daSPeter Zijlstra 	 * one runnable RT task.
719391e43daSPeter Zijlstra 	 */
720391e43daSPeter Zijlstra 	cpumask_var_t		rto_mask;
721391e43daSPeter Zijlstra 	struct cpupri		cpupri;
722cd92bfd3SDietmar Eggemann 
723cd92bfd3SDietmar Eggemann 	unsigned long		max_cpu_capacity;
724391e43daSPeter Zijlstra };
725391e43daSPeter Zijlstra 
726391e43daSPeter Zijlstra extern struct root_domain def_root_domain;
727f2cb1360SIngo Molnar extern struct mutex sched_domains_mutex;
728f2cb1360SIngo Molnar 
729f2cb1360SIngo Molnar extern void init_defrootdomain(void);
7308d5dc512SPeter Zijlstra extern int sched_init_domains(const struct cpumask *cpu_map);
731f2cb1360SIngo Molnar extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
732364f5665SSteven Rostedt (VMware) extern void sched_get_rd(struct root_domain *rd);
733364f5665SSteven Rostedt (VMware) extern void sched_put_rd(struct root_domain *rd);
734391e43daSPeter Zijlstra 
7354bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI
7364bdced5cSSteven Rostedt (Red Hat) extern void rto_push_irq_work_func(struct irq_work *work);
7374bdced5cSSteven Rostedt (Red Hat) #endif
738391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
739391e43daSPeter Zijlstra 
740391e43daSPeter Zijlstra /*
741391e43daSPeter Zijlstra  * This is the main, per-CPU runqueue data structure.
742391e43daSPeter Zijlstra  *
743391e43daSPeter Zijlstra  * Locking rule: those places that want to lock multiple runqueues
744391e43daSPeter Zijlstra  * (such as the load balancing or the thread migration code), lock
745391e43daSPeter Zijlstra  * acquire operations must be ordered by ascending &runqueue.
746391e43daSPeter Zijlstra  */
747391e43daSPeter Zijlstra struct rq {
748391e43daSPeter Zijlstra 	/* runqueue lock: */
749391e43daSPeter Zijlstra 	raw_spinlock_t		lock;
750391e43daSPeter Zijlstra 
751391e43daSPeter Zijlstra 	/*
752391e43daSPeter Zijlstra 	 * nr_running and cpu_load should be in the same cacheline because
753391e43daSPeter Zijlstra 	 * remote CPUs use both these fields when doing load calculation.
754391e43daSPeter Zijlstra 	 */
755c82513e5SPeter Zijlstra 	unsigned int		nr_running;
7560ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
7570ec8aa00SPeter Zijlstra 	unsigned int		nr_numa_running;
7580ec8aa00SPeter Zijlstra 	unsigned int		nr_preferred_running;
7590ec8aa00SPeter Zijlstra #endif
760391e43daSPeter Zijlstra 	#define CPU_LOAD_IDX_MAX 5
761391e43daSPeter Zijlstra 	unsigned long		cpu_load[CPU_LOAD_IDX_MAX];
7623451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
7639fd81dd5SFrederic Weisbecker #ifdef CONFIG_SMP
7649fd81dd5SFrederic Weisbecker 	unsigned long		last_load_update_tick;
765e022e0d3SPeter Zijlstra 	unsigned long		last_blocked_load_update_tick;
766f643ea22SVincent Guittot 	unsigned int		has_blocked_load;
7679fd81dd5SFrederic Weisbecker #endif /* CONFIG_SMP */
76800357f5eSPeter Zijlstra 	unsigned int		nohz_tick_stopped;
769a22e47a4SPeter Zijlstra 	atomic_t nohz_flags;
7709fd81dd5SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
771dcdedb24SFrederic Weisbecker 
77297fb7a0aSIngo Molnar 	/* capture load from *all* tasks on this CPU: */
773391e43daSPeter Zijlstra 	struct load_weight	load;
774391e43daSPeter Zijlstra 	unsigned long		nr_load_updates;
775391e43daSPeter Zijlstra 	u64			nr_switches;
776391e43daSPeter Zijlstra 
777391e43daSPeter Zijlstra 	struct cfs_rq		cfs;
778391e43daSPeter Zijlstra 	struct rt_rq		rt;
779aab03e05SDario Faggioli 	struct dl_rq		dl;
780391e43daSPeter Zijlstra 
781391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
78297fb7a0aSIngo Molnar 	/* list of leaf cfs_rq on this CPU: */
783391e43daSPeter Zijlstra 	struct list_head	leaf_cfs_rq_list;
7849c2791f9SVincent Guittot 	struct list_head	*tmp_alone_branch;
785a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */
786a35b6466SPeter Zijlstra 
787391e43daSPeter Zijlstra 	/*
788391e43daSPeter Zijlstra 	 * This is part of a global counter where only the total sum
789391e43daSPeter Zijlstra 	 * over all CPUs matters. A task can increase this counter on
790391e43daSPeter Zijlstra 	 * one CPU and if it got migrated afterwards it may decrease
791391e43daSPeter Zijlstra 	 * it on another CPU. Always updated under the runqueue lock:
792391e43daSPeter Zijlstra 	 */
793391e43daSPeter Zijlstra 	unsigned long		nr_uninterruptible;
794391e43daSPeter Zijlstra 
79597fb7a0aSIngo Molnar 	struct task_struct	*curr;
79697fb7a0aSIngo Molnar 	struct task_struct	*idle;
79797fb7a0aSIngo Molnar 	struct task_struct	*stop;
798391e43daSPeter Zijlstra 	unsigned long		next_balance;
799391e43daSPeter Zijlstra 	struct mm_struct	*prev_mm;
800391e43daSPeter Zijlstra 
801cb42c9a3SMatt Fleming 	unsigned int		clock_update_flags;
802391e43daSPeter Zijlstra 	u64			clock;
803391e43daSPeter Zijlstra 	u64			clock_task;
804391e43daSPeter Zijlstra 
805391e43daSPeter Zijlstra 	atomic_t		nr_iowait;
806391e43daSPeter Zijlstra 
807391e43daSPeter Zijlstra #ifdef CONFIG_SMP
808391e43daSPeter Zijlstra 	struct root_domain	*rd;
809391e43daSPeter Zijlstra 	struct sched_domain	*sd;
810391e43daSPeter Zijlstra 
811ced549faSNicolas Pitre 	unsigned long		cpu_capacity;
812ca6d75e6SVincent Guittot 	unsigned long		cpu_capacity_orig;
813391e43daSPeter Zijlstra 
814e3fca9e7SPeter Zijlstra 	struct callback_head	*balance_callback;
815e3fca9e7SPeter Zijlstra 
816391e43daSPeter Zijlstra 	unsigned char		idle_balance;
81797fb7a0aSIngo Molnar 
818391e43daSPeter Zijlstra 	/* For active balancing */
819391e43daSPeter Zijlstra 	int			active_balance;
820391e43daSPeter Zijlstra 	int			push_cpu;
821391e43daSPeter Zijlstra 	struct cpu_stop_work	active_balance_work;
82297fb7a0aSIngo Molnar 
82397fb7a0aSIngo Molnar 	/* CPU of this runqueue: */
824391e43daSPeter Zijlstra 	int			cpu;
825391e43daSPeter Zijlstra 	int			online;
826391e43daSPeter Zijlstra 
827367456c7SPeter Zijlstra 	struct list_head cfs_tasks;
828367456c7SPeter Zijlstra 
829391e43daSPeter Zijlstra 	u64			rt_avg;
830391e43daSPeter Zijlstra 	u64			age_stamp;
831391e43daSPeter Zijlstra 	u64			idle_stamp;
832391e43daSPeter Zijlstra 	u64			avg_idle;
8339bd721c5SJason Low 
8349bd721c5SJason Low 	/* This is used to determine avg_idle's max value */
8359bd721c5SJason Low 	u64			max_idle_balance_cost;
836391e43daSPeter Zijlstra #endif
837391e43daSPeter Zijlstra 
838391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING
839391e43daSPeter Zijlstra 	u64			prev_irq_time;
840391e43daSPeter Zijlstra #endif
841391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT
842391e43daSPeter Zijlstra 	u64			prev_steal_time;
843391e43daSPeter Zijlstra #endif
844391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
845391e43daSPeter Zijlstra 	u64			prev_steal_time_rq;
846391e43daSPeter Zijlstra #endif
847391e43daSPeter Zijlstra 
848391e43daSPeter Zijlstra 	/* calc_load related fields */
849391e43daSPeter Zijlstra 	unsigned long		calc_load_update;
850391e43daSPeter Zijlstra 	long			calc_load_active;
851391e43daSPeter Zijlstra 
852391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK
853391e43daSPeter Zijlstra #ifdef CONFIG_SMP
854391e43daSPeter Zijlstra 	int			hrtick_csd_pending;
855966a9671SYing Huang 	call_single_data_t	hrtick_csd;
856391e43daSPeter Zijlstra #endif
857391e43daSPeter Zijlstra 	struct hrtimer		hrtick_timer;
858391e43daSPeter Zijlstra #endif
859391e43daSPeter Zijlstra 
860391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS
861391e43daSPeter Zijlstra 	/* latency stats */
862391e43daSPeter Zijlstra 	struct sched_info	rq_sched_info;
863391e43daSPeter Zijlstra 	unsigned long long	rq_cpu_time;
864391e43daSPeter Zijlstra 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
865391e43daSPeter Zijlstra 
866391e43daSPeter Zijlstra 	/* sys_sched_yield() stats */
867391e43daSPeter Zijlstra 	unsigned int		yld_count;
868391e43daSPeter Zijlstra 
869391e43daSPeter Zijlstra 	/* schedule() stats */
870391e43daSPeter Zijlstra 	unsigned int		sched_count;
871391e43daSPeter Zijlstra 	unsigned int		sched_goidle;
872391e43daSPeter Zijlstra 
873391e43daSPeter Zijlstra 	/* try_to_wake_up() stats */
874391e43daSPeter Zijlstra 	unsigned int		ttwu_count;
875391e43daSPeter Zijlstra 	unsigned int		ttwu_local;
876391e43daSPeter Zijlstra #endif
877391e43daSPeter Zijlstra 
878391e43daSPeter Zijlstra #ifdef CONFIG_SMP
879391e43daSPeter Zijlstra 	struct llist_head	wake_list;
880391e43daSPeter Zijlstra #endif
881442bf3aaSDaniel Lezcano 
882442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE
883442bf3aaSDaniel Lezcano 	/* Must be inspected within a rcu lock section */
884442bf3aaSDaniel Lezcano 	struct cpuidle_state	*idle_state;
885442bf3aaSDaniel Lezcano #endif
886391e43daSPeter Zijlstra };
887391e43daSPeter Zijlstra 
888391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq)
889391e43daSPeter Zijlstra {
890391e43daSPeter Zijlstra #ifdef CONFIG_SMP
891391e43daSPeter Zijlstra 	return rq->cpu;
892391e43daSPeter Zijlstra #else
893391e43daSPeter Zijlstra 	return 0;
894391e43daSPeter Zijlstra #endif
895391e43daSPeter Zijlstra }
896391e43daSPeter Zijlstra 
8971b568f0aSPeter Zijlstra 
8981b568f0aSPeter Zijlstra #ifdef CONFIG_SCHED_SMT
8991b568f0aSPeter Zijlstra 
9001b568f0aSPeter Zijlstra extern struct static_key_false sched_smt_present;
9011b568f0aSPeter Zijlstra 
9021b568f0aSPeter Zijlstra extern void __update_idle_core(struct rq *rq);
9031b568f0aSPeter Zijlstra 
9041b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq)
9051b568f0aSPeter Zijlstra {
9061b568f0aSPeter Zijlstra 	if (static_branch_unlikely(&sched_smt_present))
9071b568f0aSPeter Zijlstra 		__update_idle_core(rq);
9081b568f0aSPeter Zijlstra }
9091b568f0aSPeter Zijlstra 
9101b568f0aSPeter Zijlstra #else
9111b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) { }
9121b568f0aSPeter Zijlstra #endif
9131b568f0aSPeter Zijlstra 
9148b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
915391e43daSPeter Zijlstra 
916518cd623SPeter Zijlstra #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
9174a32fea9SChristoph Lameter #define this_rq()		this_cpu_ptr(&runqueues)
918518cd623SPeter Zijlstra #define task_rq(p)		cpu_rq(task_cpu(p))
919518cd623SPeter Zijlstra #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
9204a32fea9SChristoph Lameter #define raw_rq()		raw_cpu_ptr(&runqueues)
921518cd623SPeter Zijlstra 
922cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq)
923cebde6d6SPeter Zijlstra {
924316c1608SJason Low 	return READ_ONCE(rq->clock);
925cebde6d6SPeter Zijlstra }
926cebde6d6SPeter Zijlstra 
927cb42c9a3SMatt Fleming /*
928cb42c9a3SMatt Fleming  * rq::clock_update_flags bits
929cb42c9a3SMatt Fleming  *
930cb42c9a3SMatt Fleming  * %RQCF_REQ_SKIP - will request skipping of clock update on the next
931cb42c9a3SMatt Fleming  *  call to __schedule(). This is an optimisation to avoid
932cb42c9a3SMatt Fleming  *  neighbouring rq clock updates.
933cb42c9a3SMatt Fleming  *
934cb42c9a3SMatt Fleming  * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is
935cb42c9a3SMatt Fleming  *  in effect and calls to update_rq_clock() are being ignored.
936cb42c9a3SMatt Fleming  *
937cb42c9a3SMatt Fleming  * %RQCF_UPDATED - is a debug flag that indicates whether a call has been
938cb42c9a3SMatt Fleming  *  made to update_rq_clock() since the last time rq::lock was pinned.
939cb42c9a3SMatt Fleming  *
940cb42c9a3SMatt Fleming  * If inside of __schedule(), clock_update_flags will have been
941cb42c9a3SMatt Fleming  * shifted left (a left shift is a cheap operation for the fast path
942cb42c9a3SMatt Fleming  * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use,
943cb42c9a3SMatt Fleming  *
944cb42c9a3SMatt Fleming  *	if (rq-clock_update_flags >= RQCF_UPDATED)
945cb42c9a3SMatt Fleming  *
946cb42c9a3SMatt Fleming  * to check if %RQCF_UPADTED is set. It'll never be shifted more than
947cb42c9a3SMatt Fleming  * one position though, because the next rq_unpin_lock() will shift it
948cb42c9a3SMatt Fleming  * back.
949cb42c9a3SMatt Fleming  */
950cb42c9a3SMatt Fleming #define RQCF_REQ_SKIP		0x01
951cb42c9a3SMatt Fleming #define RQCF_ACT_SKIP		0x02
952cb42c9a3SMatt Fleming #define RQCF_UPDATED		0x04
953cb42c9a3SMatt Fleming 
954cb42c9a3SMatt Fleming static inline void assert_clock_updated(struct rq *rq)
955cb42c9a3SMatt Fleming {
956cb42c9a3SMatt Fleming 	/*
957cb42c9a3SMatt Fleming 	 * The only reason for not seeing a clock update since the
958cb42c9a3SMatt Fleming 	 * last rq_pin_lock() is if we're currently skipping updates.
959cb42c9a3SMatt Fleming 	 */
960cb42c9a3SMatt Fleming 	SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
961cb42c9a3SMatt Fleming }
962cb42c9a3SMatt Fleming 
96378becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq)
96478becc27SFrederic Weisbecker {
965cebde6d6SPeter Zijlstra 	lockdep_assert_held(&rq->lock);
966cb42c9a3SMatt Fleming 	assert_clock_updated(rq);
967cb42c9a3SMatt Fleming 
96878becc27SFrederic Weisbecker 	return rq->clock;
96978becc27SFrederic Weisbecker }
97078becc27SFrederic Weisbecker 
97178becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq)
97278becc27SFrederic Weisbecker {
973cebde6d6SPeter Zijlstra 	lockdep_assert_held(&rq->lock);
974cb42c9a3SMatt Fleming 	assert_clock_updated(rq);
975cb42c9a3SMatt Fleming 
97678becc27SFrederic Weisbecker 	return rq->clock_task;
97778becc27SFrederic Weisbecker }
97878becc27SFrederic Weisbecker 
979adcc8da8SDavidlohr Bueso static inline void rq_clock_skip_update(struct rq *rq)
9809edfbfedSPeter Zijlstra {
9819edfbfedSPeter Zijlstra 	lockdep_assert_held(&rq->lock);
982cb42c9a3SMatt Fleming 	rq->clock_update_flags |= RQCF_REQ_SKIP;
983adcc8da8SDavidlohr Bueso }
984adcc8da8SDavidlohr Bueso 
985adcc8da8SDavidlohr Bueso /*
986adcc8da8SDavidlohr Bueso  * See rt task throttoling, which is the only time a skip
987adcc8da8SDavidlohr Bueso  * request is cancelled.
988adcc8da8SDavidlohr Bueso  */
989adcc8da8SDavidlohr Bueso static inline void rq_clock_cancel_skipupdate(struct rq *rq)
990adcc8da8SDavidlohr Bueso {
991adcc8da8SDavidlohr Bueso 	lockdep_assert_held(&rq->lock);
992cb42c9a3SMatt Fleming 	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
9939edfbfedSPeter Zijlstra }
9949edfbfedSPeter Zijlstra 
995d8ac8971SMatt Fleming struct rq_flags {
996d8ac8971SMatt Fleming 	unsigned long flags;
997d8ac8971SMatt Fleming 	struct pin_cookie cookie;
998cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG
999cb42c9a3SMatt Fleming 	/*
1000cb42c9a3SMatt Fleming 	 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the
1001cb42c9a3SMatt Fleming 	 * current pin context is stashed here in case it needs to be
1002cb42c9a3SMatt Fleming 	 * restored in rq_repin_lock().
1003cb42c9a3SMatt Fleming 	 */
1004cb42c9a3SMatt Fleming 	unsigned int clock_update_flags;
1005cb42c9a3SMatt Fleming #endif
1006d8ac8971SMatt Fleming };
1007d8ac8971SMatt Fleming 
1008d8ac8971SMatt Fleming static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1009d8ac8971SMatt Fleming {
1010d8ac8971SMatt Fleming 	rf->cookie = lockdep_pin_lock(&rq->lock);
1011cb42c9a3SMatt Fleming 
1012cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG
1013cb42c9a3SMatt Fleming 	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1014cb42c9a3SMatt Fleming 	rf->clock_update_flags = 0;
1015cb42c9a3SMatt Fleming #endif
1016d8ac8971SMatt Fleming }
1017d8ac8971SMatt Fleming 
1018d8ac8971SMatt Fleming static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1019d8ac8971SMatt Fleming {
1020cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG
1021cb42c9a3SMatt Fleming 	if (rq->clock_update_flags > RQCF_ACT_SKIP)
1022cb42c9a3SMatt Fleming 		rf->clock_update_flags = RQCF_UPDATED;
1023cb42c9a3SMatt Fleming #endif
1024cb42c9a3SMatt Fleming 
1025d8ac8971SMatt Fleming 	lockdep_unpin_lock(&rq->lock, rf->cookie);
1026d8ac8971SMatt Fleming }
1027d8ac8971SMatt Fleming 
1028d8ac8971SMatt Fleming static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1029d8ac8971SMatt Fleming {
1030d8ac8971SMatt Fleming 	lockdep_repin_lock(&rq->lock, rf->cookie);
1031cb42c9a3SMatt Fleming 
1032cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG
1033cb42c9a3SMatt Fleming 	/*
1034cb42c9a3SMatt Fleming 	 * Restore the value we stashed in @rf for this pin context.
1035cb42c9a3SMatt Fleming 	 */
1036cb42c9a3SMatt Fleming 	rq->clock_update_flags |= rf->clock_update_flags;
1037cb42c9a3SMatt Fleming #endif
1038d8ac8971SMatt Fleming }
1039d8ac8971SMatt Fleming 
10409942f79bSRik van Riel #ifdef CONFIG_NUMA
1041e3fe70b1SRik van Riel enum numa_topology_type {
1042e3fe70b1SRik van Riel 	NUMA_DIRECT,
1043e3fe70b1SRik van Riel 	NUMA_GLUELESS_MESH,
1044e3fe70b1SRik van Riel 	NUMA_BACKPLANE,
1045e3fe70b1SRik van Riel };
1046e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type;
10479942f79bSRik van Riel extern int sched_max_numa_distance;
10489942f79bSRik van Riel extern bool find_numa_distance(int distance);
10499942f79bSRik van Riel #endif
10509942f79bSRik van Riel 
1051f2cb1360SIngo Molnar #ifdef CONFIG_NUMA
1052f2cb1360SIngo Molnar extern void sched_init_numa(void);
1053f2cb1360SIngo Molnar extern void sched_domains_numa_masks_set(unsigned int cpu);
1054f2cb1360SIngo Molnar extern void sched_domains_numa_masks_clear(unsigned int cpu);
1055f2cb1360SIngo Molnar #else
1056f2cb1360SIngo Molnar static inline void sched_init_numa(void) { }
1057f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1058f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1059f2cb1360SIngo Molnar #endif
1060f2cb1360SIngo Molnar 
1061f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING
106244dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */
106344dba3d5SIulia Manda enum numa_faults_stats {
106444dba3d5SIulia Manda 	NUMA_MEM = 0,
106544dba3d5SIulia Manda 	NUMA_CPU,
106644dba3d5SIulia Manda 	NUMA_MEMBUF,
106744dba3d5SIulia Manda 	NUMA_CPUBUF
106844dba3d5SIulia Manda };
10690ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node);
1070e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu);
1071ac66f547SPeter Zijlstra extern int migrate_swap(struct task_struct *, struct task_struct *);
107213784475SMel Gorman extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
107313784475SMel Gorman #else
107413784475SMel Gorman static inline void
107513784475SMel Gorman init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
107613784475SMel Gorman {
107713784475SMel Gorman }
1078f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
1079f809ca9aSMel Gorman 
1080518cd623SPeter Zijlstra #ifdef CONFIG_SMP
1081518cd623SPeter Zijlstra 
1082e3fca9e7SPeter Zijlstra static inline void
1083e3fca9e7SPeter Zijlstra queue_balance_callback(struct rq *rq,
1084e3fca9e7SPeter Zijlstra 		       struct callback_head *head,
1085e3fca9e7SPeter Zijlstra 		       void (*func)(struct rq *rq))
1086e3fca9e7SPeter Zijlstra {
1087e3fca9e7SPeter Zijlstra 	lockdep_assert_held(&rq->lock);
1088e3fca9e7SPeter Zijlstra 
1089e3fca9e7SPeter Zijlstra 	if (unlikely(head->next))
1090e3fca9e7SPeter Zijlstra 		return;
1091e3fca9e7SPeter Zijlstra 
1092e3fca9e7SPeter Zijlstra 	head->func = (void (*)(struct callback_head *))func;
1093e3fca9e7SPeter Zijlstra 	head->next = rq->balance_callback;
1094e3fca9e7SPeter Zijlstra 	rq->balance_callback = head;
1095e3fca9e7SPeter Zijlstra }
1096e3fca9e7SPeter Zijlstra 
1097e3baac47SPeter Zijlstra extern void sched_ttwu_pending(void);
1098e3baac47SPeter Zijlstra 
1099391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \
1100391e43daSPeter Zijlstra 	rcu_dereference_check((p), \
1101391e43daSPeter Zijlstra 			      lockdep_is_held(&sched_domains_mutex))
1102391e43daSPeter Zijlstra 
1103391e43daSPeter Zijlstra /*
1104391e43daSPeter Zijlstra  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1105391e43daSPeter Zijlstra  * See detach_destroy_domains: synchronize_sched for details.
1106391e43daSPeter Zijlstra  *
1107391e43daSPeter Zijlstra  * The domain tree of any CPU may only be accessed from within
1108391e43daSPeter Zijlstra  * preempt-disabled sections.
1109391e43daSPeter Zijlstra  */
1110391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \
1111518cd623SPeter Zijlstra 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1112518cd623SPeter Zijlstra 			__sd; __sd = __sd->parent)
1113391e43daSPeter Zijlstra 
111477e81365SSuresh Siddha #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
111577e81365SSuresh Siddha 
1116518cd623SPeter Zijlstra /**
1117518cd623SPeter Zijlstra  * highest_flag_domain - Return highest sched_domain containing flag.
111897fb7a0aSIngo Molnar  * @cpu:	The CPU whose highest level of sched domain is to
1119518cd623SPeter Zijlstra  *		be returned.
1120518cd623SPeter Zijlstra  * @flag:	The flag to check for the highest sched_domain
112197fb7a0aSIngo Molnar  *		for the given CPU.
1122518cd623SPeter Zijlstra  *
112397fb7a0aSIngo Molnar  * Returns the highest sched_domain of a CPU which contains the given flag.
1124518cd623SPeter Zijlstra  */
1125518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1126518cd623SPeter Zijlstra {
1127518cd623SPeter Zijlstra 	struct sched_domain *sd, *hsd = NULL;
1128518cd623SPeter Zijlstra 
1129518cd623SPeter Zijlstra 	for_each_domain(cpu, sd) {
1130518cd623SPeter Zijlstra 		if (!(sd->flags & flag))
1131518cd623SPeter Zijlstra 			break;
1132518cd623SPeter Zijlstra 		hsd = sd;
1133518cd623SPeter Zijlstra 	}
1134518cd623SPeter Zijlstra 
1135518cd623SPeter Zijlstra 	return hsd;
1136518cd623SPeter Zijlstra }
1137518cd623SPeter Zijlstra 
1138fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1139fb13c7eeSMel Gorman {
1140fb13c7eeSMel Gorman 	struct sched_domain *sd;
1141fb13c7eeSMel Gorman 
1142fb13c7eeSMel Gorman 	for_each_domain(cpu, sd) {
1143fb13c7eeSMel Gorman 		if (sd->flags & flag)
1144fb13c7eeSMel Gorman 			break;
1145fb13c7eeSMel Gorman 	}
1146fb13c7eeSMel Gorman 
1147fb13c7eeSMel Gorman 	return sd;
1148fb13c7eeSMel Gorman }
1149fb13c7eeSMel Gorman 
1150518cd623SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain *, sd_llc);
11517d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size);
1152518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id);
11530e369d75SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
1154fb13c7eeSMel Gorman DECLARE_PER_CPU(struct sched_domain *, sd_numa);
115537dc6b50SPreeti U Murthy DECLARE_PER_CPU(struct sched_domain *, sd_asym);
1156518cd623SPeter Zijlstra 
115763b2ca30SNicolas Pitre struct sched_group_capacity {
11585e6521eaSLi Zefan 	atomic_t		ref;
11595e6521eaSLi Zefan 	/*
1160172895e6SYuyang Du 	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
116163b2ca30SNicolas Pitre 	 * for a single CPU.
11625e6521eaSLi Zefan 	 */
1163bf475ce0SMorten Rasmussen 	unsigned long		capacity;
1164bf475ce0SMorten Rasmussen 	unsigned long		min_capacity;		/* Min per-CPU capacity in group */
11655e6521eaSLi Zefan 	unsigned long		next_update;
116663b2ca30SNicolas Pitre 	int			imbalance;		/* XXX unrelated to capacity but shared group state */
11675e6521eaSLi Zefan 
1168005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
1169005f874dSPeter Zijlstra 	int			id;
1170005f874dSPeter Zijlstra #endif
1171005f874dSPeter Zijlstra 
117297fb7a0aSIngo Molnar 	unsigned long		cpumask[0];		/* Balance mask */
11735e6521eaSLi Zefan };
11745e6521eaSLi Zefan 
11755e6521eaSLi Zefan struct sched_group {
11765e6521eaSLi Zefan 	struct sched_group	*next;			/* Must be a circular list */
11775e6521eaSLi Zefan 	atomic_t		ref;
11785e6521eaSLi Zefan 
11795e6521eaSLi Zefan 	unsigned int		group_weight;
118063b2ca30SNicolas Pitre 	struct sched_group_capacity *sgc;
118197fb7a0aSIngo Molnar 	int			asym_prefer_cpu;	/* CPU of highest priority in group */
11825e6521eaSLi Zefan 
11835e6521eaSLi Zefan 	/*
11845e6521eaSLi Zefan 	 * The CPUs this group covers.
11855e6521eaSLi Zefan 	 *
11865e6521eaSLi Zefan 	 * NOTE: this field is variable length. (Allocated dynamically
11875e6521eaSLi Zefan 	 * by attaching extra space to the end of the structure,
11885e6521eaSLi Zefan 	 * depending on how many CPUs the kernel has booted up with)
11895e6521eaSLi Zefan 	 */
11905e6521eaSLi Zefan 	unsigned long		cpumask[0];
11915e6521eaSLi Zefan };
11925e6521eaSLi Zefan 
1193ae4df9d6SPeter Zijlstra static inline struct cpumask *sched_group_span(struct sched_group *sg)
11945e6521eaSLi Zefan {
11955e6521eaSLi Zefan 	return to_cpumask(sg->cpumask);
11965e6521eaSLi Zefan }
11975e6521eaSLi Zefan 
11985e6521eaSLi Zefan /*
1199e5c14b1fSPeter Zijlstra  * See build_balance_mask().
12005e6521eaSLi Zefan  */
1201e5c14b1fSPeter Zijlstra static inline struct cpumask *group_balance_mask(struct sched_group *sg)
12025e6521eaSLi Zefan {
120363b2ca30SNicolas Pitre 	return to_cpumask(sg->sgc->cpumask);
12045e6521eaSLi Zefan }
12055e6521eaSLi Zefan 
12065e6521eaSLi Zefan /**
120797fb7a0aSIngo Molnar  * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
120897fb7a0aSIngo Molnar  * @group: The group whose first CPU is to be returned.
12095e6521eaSLi Zefan  */
12105e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group)
12115e6521eaSLi Zefan {
1212ae4df9d6SPeter Zijlstra 	return cpumask_first(sched_group_span(group));
12135e6521eaSLi Zefan }
12145e6521eaSLi Zefan 
1215c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg);
1216c1174876SPeter Zijlstra 
12173866e845SSteven Rostedt (Red Hat) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
12183866e845SSteven Rostedt (Red Hat) void register_sched_domain_sysctl(void);
1219bbdacdfeSPeter Zijlstra void dirty_sched_domain_sysctl(int cpu);
12203866e845SSteven Rostedt (Red Hat) void unregister_sched_domain_sysctl(void);
12213866e845SSteven Rostedt (Red Hat) #else
12223866e845SSteven Rostedt (Red Hat) static inline void register_sched_domain_sysctl(void)
12233866e845SSteven Rostedt (Red Hat) {
12243866e845SSteven Rostedt (Red Hat) }
1225bbdacdfeSPeter Zijlstra static inline void dirty_sched_domain_sysctl(int cpu)
1226bbdacdfeSPeter Zijlstra {
1227bbdacdfeSPeter Zijlstra }
12283866e845SSteven Rostedt (Red Hat) static inline void unregister_sched_domain_sysctl(void)
12293866e845SSteven Rostedt (Red Hat) {
12303866e845SSteven Rostedt (Red Hat) }
12313866e845SSteven Rostedt (Red Hat) #endif
12323866e845SSteven Rostedt (Red Hat) 
1233e3baac47SPeter Zijlstra #else
1234e3baac47SPeter Zijlstra 
1235e3baac47SPeter Zijlstra static inline void sched_ttwu_pending(void) { }
1236e3baac47SPeter Zijlstra 
1237518cd623SPeter Zijlstra #endif /* CONFIG_SMP */
1238391e43daSPeter Zijlstra 
1239391e43daSPeter Zijlstra #include "stats.h"
12401051408fSIngo Molnar #include "autogroup.h"
1241391e43daSPeter Zijlstra 
1242391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
1243391e43daSPeter Zijlstra 
1244391e43daSPeter Zijlstra /*
1245391e43daSPeter Zijlstra  * Return the group to which this tasks belongs.
1246391e43daSPeter Zijlstra  *
12478af01f56STejun Heo  * We cannot use task_css() and friends because the cgroup subsystem
12488af01f56STejun Heo  * changes that value before the cgroup_subsys::attach() method is called,
12498af01f56STejun Heo  * therefore we cannot pin it and might observe the wrong value.
12508323f26cSPeter Zijlstra  *
12518323f26cSPeter Zijlstra  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
12528323f26cSPeter Zijlstra  * core changes this before calling sched_move_task().
12538323f26cSPeter Zijlstra  *
12548323f26cSPeter Zijlstra  * Instead we use a 'copy' which is updated from sched_move_task() while
12558323f26cSPeter Zijlstra  * holding both task_struct::pi_lock and rq::lock.
1256391e43daSPeter Zijlstra  */
1257391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p)
1258391e43daSPeter Zijlstra {
12598323f26cSPeter Zijlstra 	return p->sched_task_group;
1260391e43daSPeter Zijlstra }
1261391e43daSPeter Zijlstra 
1262391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1263391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1264391e43daSPeter Zijlstra {
1265391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1266391e43daSPeter Zijlstra 	struct task_group *tg = task_group(p);
1267391e43daSPeter Zijlstra #endif
1268391e43daSPeter Zijlstra 
1269391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
1270ad936d86SByungchul Park 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1271391e43daSPeter Zijlstra 	p->se.cfs_rq = tg->cfs_rq[cpu];
1272391e43daSPeter Zijlstra 	p->se.parent = tg->se[cpu];
1273391e43daSPeter Zijlstra #endif
1274391e43daSPeter Zijlstra 
1275391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1276391e43daSPeter Zijlstra 	p->rt.rt_rq  = tg->rt_rq[cpu];
1277391e43daSPeter Zijlstra 	p->rt.parent = tg->rt_se[cpu];
1278391e43daSPeter Zijlstra #endif
1279391e43daSPeter Zijlstra }
1280391e43daSPeter Zijlstra 
1281391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */
1282391e43daSPeter Zijlstra 
1283391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1284391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p)
1285391e43daSPeter Zijlstra {
1286391e43daSPeter Zijlstra 	return NULL;
1287391e43daSPeter Zijlstra }
1288391e43daSPeter Zijlstra 
1289391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
1290391e43daSPeter Zijlstra 
1291391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1292391e43daSPeter Zijlstra {
1293391e43daSPeter Zijlstra 	set_task_rq(p, cpu);
1294391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1295391e43daSPeter Zijlstra 	/*
1296391e43daSPeter Zijlstra 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1297391e43daSPeter Zijlstra 	 * successfuly executed on another CPU. We must ensure that updates of
1298391e43daSPeter Zijlstra 	 * per-task data have been completed by this moment.
1299391e43daSPeter Zijlstra 	 */
1300391e43daSPeter Zijlstra 	smp_wmb();
1301c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1302c65eacbeSAndy Lutomirski 	p->cpu = cpu;
1303c65eacbeSAndy Lutomirski #else
1304391e43daSPeter Zijlstra 	task_thread_info(p)->cpu = cpu;
1305c65eacbeSAndy Lutomirski #endif
1306ac66f547SPeter Zijlstra 	p->wake_cpu = cpu;
1307391e43daSPeter Zijlstra #endif
1308391e43daSPeter Zijlstra }
1309391e43daSPeter Zijlstra 
1310391e43daSPeter Zijlstra /*
1311391e43daSPeter Zijlstra  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1312391e43daSPeter Zijlstra  */
1313391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
1314c5905afbSIngo Molnar # include <linux/static_key.h>
1315391e43daSPeter Zijlstra # define const_debug __read_mostly
1316391e43daSPeter Zijlstra #else
1317391e43daSPeter Zijlstra # define const_debug const
1318391e43daSPeter Zijlstra #endif
1319391e43daSPeter Zijlstra 
1320391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled)	\
1321391e43daSPeter Zijlstra 	__SCHED_FEAT_##name ,
1322391e43daSPeter Zijlstra 
1323391e43daSPeter Zijlstra enum {
1324391e43daSPeter Zijlstra #include "features.h"
1325f8b6d1ccSPeter Zijlstra 	__SCHED_FEAT_NR,
1326391e43daSPeter Zijlstra };
1327391e43daSPeter Zijlstra 
1328391e43daSPeter Zijlstra #undef SCHED_FEAT
1329391e43daSPeter Zijlstra 
1330f8b6d1ccSPeter Zijlstra #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1331765cc3a4SPatrick Bellasi 
1332765cc3a4SPatrick Bellasi /*
1333765cc3a4SPatrick Bellasi  * To support run-time toggling of sched features, all the translation units
1334765cc3a4SPatrick Bellasi  * (but core.c) reference the sysctl_sched_features defined in core.c.
1335765cc3a4SPatrick Bellasi  */
1336765cc3a4SPatrick Bellasi extern const_debug unsigned int sysctl_sched_features;
1337765cc3a4SPatrick Bellasi 
1338f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled)					\
1339c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \
1340f8b6d1ccSPeter Zijlstra {									\
13416e76ea8aSJason Baron 	return static_key_##enabled(key);				\
1342f8b6d1ccSPeter Zijlstra }
1343f8b6d1ccSPeter Zijlstra 
1344f8b6d1ccSPeter Zijlstra #include "features.h"
1345f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT
1346f8b6d1ccSPeter Zijlstra 
1347c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1348f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1349765cc3a4SPatrick Bellasi 
1350f8b6d1ccSPeter Zijlstra #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1351765cc3a4SPatrick Bellasi 
1352765cc3a4SPatrick Bellasi /*
1353765cc3a4SPatrick Bellasi  * Each translation unit has its own copy of sysctl_sched_features to allow
1354765cc3a4SPatrick Bellasi  * constants propagation at compile time and compiler optimization based on
1355765cc3a4SPatrick Bellasi  * features default.
1356765cc3a4SPatrick Bellasi  */
1357765cc3a4SPatrick Bellasi #define SCHED_FEAT(name, enabled)	\
1358765cc3a4SPatrick Bellasi 	(1UL << __SCHED_FEAT_##name) * enabled |
1359765cc3a4SPatrick Bellasi static const_debug __maybe_unused unsigned int sysctl_sched_features =
1360765cc3a4SPatrick Bellasi #include "features.h"
1361765cc3a4SPatrick Bellasi 	0;
1362765cc3a4SPatrick Bellasi #undef SCHED_FEAT
1363765cc3a4SPatrick Bellasi 
1364391e43daSPeter Zijlstra #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1365765cc3a4SPatrick Bellasi 
1366f8b6d1ccSPeter Zijlstra #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1367391e43daSPeter Zijlstra 
13682a595721SSrikar Dronamraju extern struct static_key_false sched_numa_balancing;
1369cb251765SMel Gorman extern struct static_key_false sched_schedstats;
1370cbee9f88SPeter Zijlstra 
1371391e43daSPeter Zijlstra static inline u64 global_rt_period(void)
1372391e43daSPeter Zijlstra {
1373391e43daSPeter Zijlstra 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1374391e43daSPeter Zijlstra }
1375391e43daSPeter Zijlstra 
1376391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void)
1377391e43daSPeter Zijlstra {
1378391e43daSPeter Zijlstra 	if (sysctl_sched_rt_runtime < 0)
1379391e43daSPeter Zijlstra 		return RUNTIME_INF;
1380391e43daSPeter Zijlstra 
1381391e43daSPeter Zijlstra 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1382391e43daSPeter Zijlstra }
1383391e43daSPeter Zijlstra 
1384391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p)
1385391e43daSPeter Zijlstra {
1386391e43daSPeter Zijlstra 	return rq->curr == p;
1387391e43daSPeter Zijlstra }
1388391e43daSPeter Zijlstra 
1389391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p)
1390391e43daSPeter Zijlstra {
1391391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1392391e43daSPeter Zijlstra 	return p->on_cpu;
1393391e43daSPeter Zijlstra #else
1394391e43daSPeter Zijlstra 	return task_current(rq, p);
1395391e43daSPeter Zijlstra #endif
1396391e43daSPeter Zijlstra }
1397391e43daSPeter Zijlstra 
1398da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p)
1399da0c1e65SKirill Tkhai {
1400da0c1e65SKirill Tkhai 	return p->on_rq == TASK_ON_RQ_QUEUED;
1401da0c1e65SKirill Tkhai }
1402391e43daSPeter Zijlstra 
1403cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p)
1404cca26e80SKirill Tkhai {
1405cca26e80SKirill Tkhai 	return p->on_rq == TASK_ON_RQ_MIGRATING;
1406cca26e80SKirill Tkhai }
1407cca26e80SKirill Tkhai 
1408b13095f0SLi Zefan /*
1409b13095f0SLi Zefan  * wake flags
1410b13095f0SLi Zefan  */
141197fb7a0aSIngo Molnar #define WF_SYNC			0x01		/* Waker goes to sleep after wakeup */
141297fb7a0aSIngo Molnar #define WF_FORK			0x02		/* Child wakeup after fork */
141397fb7a0aSIngo Molnar #define WF_MIGRATED		0x4		/* Internal use, task got migrated */
1414b13095f0SLi Zefan 
1415391e43daSPeter Zijlstra /*
1416391e43daSPeter Zijlstra  * To aid in avoiding the subversion of "niceness" due to uneven distribution
1417391e43daSPeter Zijlstra  * of tasks with abnormal "nice" values across CPUs the contribution that
1418391e43daSPeter Zijlstra  * each task makes to its run queue's load is weighted according to its
1419391e43daSPeter Zijlstra  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1420391e43daSPeter Zijlstra  * scaled version of the new time slice allocation that they receive on time
1421391e43daSPeter Zijlstra  * slice expiry etc.
1422391e43daSPeter Zijlstra  */
1423391e43daSPeter Zijlstra 
1424391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO		3
1425391e43daSPeter Zijlstra #define WMULT_IDLEPRIO		1431655765
1426391e43daSPeter Zijlstra 
1427ed82b8a1SAndi Kleen extern const int		sched_prio_to_weight[40];
1428ed82b8a1SAndi Kleen extern const u32		sched_prio_to_wmult[40];
1429391e43daSPeter Zijlstra 
1430ff77e468SPeter Zijlstra /*
1431ff77e468SPeter Zijlstra  * {de,en}queue flags:
1432ff77e468SPeter Zijlstra  *
1433ff77e468SPeter Zijlstra  * DEQUEUE_SLEEP  - task is no longer runnable
1434ff77e468SPeter Zijlstra  * ENQUEUE_WAKEUP - task just became runnable
1435ff77e468SPeter Zijlstra  *
1436ff77e468SPeter Zijlstra  * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1437ff77e468SPeter Zijlstra  *                are in a known state which allows modification. Such pairs
1438ff77e468SPeter Zijlstra  *                should preserve as much state as possible.
1439ff77e468SPeter Zijlstra  *
1440ff77e468SPeter Zijlstra  * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1441ff77e468SPeter Zijlstra  *        in the runqueue.
1442ff77e468SPeter Zijlstra  *
1443ff77e468SPeter Zijlstra  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1444ff77e468SPeter Zijlstra  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
144559efa0baSPeter Zijlstra  * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1446ff77e468SPeter Zijlstra  *
1447ff77e468SPeter Zijlstra  */
1448ff77e468SPeter Zijlstra 
1449ff77e468SPeter Zijlstra #define DEQUEUE_SLEEP		0x01
145097fb7a0aSIngo Molnar #define DEQUEUE_SAVE		0x02 /* Matches ENQUEUE_RESTORE */
145197fb7a0aSIngo Molnar #define DEQUEUE_MOVE		0x04 /* Matches ENQUEUE_MOVE */
145297fb7a0aSIngo Molnar #define DEQUEUE_NOCLOCK		0x08 /* Matches ENQUEUE_NOCLOCK */
1453ff77e468SPeter Zijlstra 
14541de64443SPeter Zijlstra #define ENQUEUE_WAKEUP		0x01
1455ff77e468SPeter Zijlstra #define ENQUEUE_RESTORE		0x02
1456ff77e468SPeter Zijlstra #define ENQUEUE_MOVE		0x04
14570a67d1eeSPeter Zijlstra #define ENQUEUE_NOCLOCK		0x08
1458ff77e468SPeter Zijlstra 
14590a67d1eeSPeter Zijlstra #define ENQUEUE_HEAD		0x10
14600a67d1eeSPeter Zijlstra #define ENQUEUE_REPLENISH	0x20
1461c82ba9faSLi Zefan #ifdef CONFIG_SMP
14620a67d1eeSPeter Zijlstra #define ENQUEUE_MIGRATED	0x40
1463c82ba9faSLi Zefan #else
146459efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED	0x00
1465c82ba9faSLi Zefan #endif
1466c82ba9faSLi Zefan 
146737e117c0SPeter Zijlstra #define RETRY_TASK		((void *)-1UL)
146837e117c0SPeter Zijlstra 
1469c82ba9faSLi Zefan struct sched_class {
1470c82ba9faSLi Zefan 	const struct sched_class *next;
1471c82ba9faSLi Zefan 
1472c82ba9faSLi Zefan 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1473c82ba9faSLi Zefan 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1474c82ba9faSLi Zefan 	void (*yield_task)   (struct rq *rq);
1475c82ba9faSLi Zefan 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1476c82ba9faSLi Zefan 
1477c82ba9faSLi Zefan 	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1478c82ba9faSLi Zefan 
1479606dba2eSPeter Zijlstra 	/*
1480606dba2eSPeter Zijlstra 	 * It is the responsibility of the pick_next_task() method that will
1481606dba2eSPeter Zijlstra 	 * return the next task to call put_prev_task() on the @prev task or
1482606dba2eSPeter Zijlstra 	 * something equivalent.
148337e117c0SPeter Zijlstra 	 *
148437e117c0SPeter Zijlstra 	 * May return RETRY_TASK when it finds a higher prio class has runnable
148537e117c0SPeter Zijlstra 	 * tasks.
1486606dba2eSPeter Zijlstra 	 */
1487606dba2eSPeter Zijlstra 	struct task_struct * (*pick_next_task)(struct rq *rq,
1488e7904a28SPeter Zijlstra 					       struct task_struct *prev,
1489d8ac8971SMatt Fleming 					       struct rq_flags *rf);
1490c82ba9faSLi Zefan 	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1491c82ba9faSLi Zefan 
1492c82ba9faSLi Zefan #ifdef CONFIG_SMP
1493ac66f547SPeter Zijlstra 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
14945a4fd036Sxiaofeng.yan 	void (*migrate_task_rq)(struct task_struct *p);
1495c82ba9faSLi Zefan 
1496c82ba9faSLi Zefan 	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1497c82ba9faSLi Zefan 
1498c82ba9faSLi Zefan 	void (*set_cpus_allowed)(struct task_struct *p,
1499c82ba9faSLi Zefan 				 const struct cpumask *newmask);
1500c82ba9faSLi Zefan 
1501c82ba9faSLi Zefan 	void (*rq_online)(struct rq *rq);
1502c82ba9faSLi Zefan 	void (*rq_offline)(struct rq *rq);
1503c82ba9faSLi Zefan #endif
1504c82ba9faSLi Zefan 
1505c82ba9faSLi Zefan 	void (*set_curr_task)(struct rq *rq);
1506c82ba9faSLi Zefan 	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1507c82ba9faSLi Zefan 	void (*task_fork)(struct task_struct *p);
1508e6c390f2SDario Faggioli 	void (*task_dead)(struct task_struct *p);
1509c82ba9faSLi Zefan 
151067dfa1b7SKirill Tkhai 	/*
151167dfa1b7SKirill Tkhai 	 * The switched_from() call is allowed to drop rq->lock, therefore we
151267dfa1b7SKirill Tkhai 	 * cannot assume the switched_from/switched_to pair is serliazed by
151367dfa1b7SKirill Tkhai 	 * rq->lock. They are however serialized by p->pi_lock.
151467dfa1b7SKirill Tkhai 	 */
1515c82ba9faSLi Zefan 	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1516c82ba9faSLi Zefan 	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
1517c82ba9faSLi Zefan 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1518c82ba9faSLi Zefan 			      int oldprio);
1519c82ba9faSLi Zefan 
1520c82ba9faSLi Zefan 	unsigned int (*get_rr_interval)(struct rq *rq,
1521c82ba9faSLi Zefan 					struct task_struct *task);
1522c82ba9faSLi Zefan 
15236e998916SStanislaw Gruszka 	void (*update_curr)(struct rq *rq);
15246e998916SStanislaw Gruszka 
1525ea86cb4bSVincent Guittot #define TASK_SET_GROUP		0
1526ea86cb4bSVincent Guittot #define TASK_MOVE_GROUP		1
1527ea86cb4bSVincent Guittot 
1528c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED
1529ea86cb4bSVincent Guittot 	void (*task_change_group)(struct task_struct *p, int type);
1530c82ba9faSLi Zefan #endif
1531c82ba9faSLi Zefan };
1532391e43daSPeter Zijlstra 
15333f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
15343f1d2a31SPeter Zijlstra {
15353f1d2a31SPeter Zijlstra 	prev->sched_class->put_prev_task(rq, prev);
15363f1d2a31SPeter Zijlstra }
15373f1d2a31SPeter Zijlstra 
1538b2bf6c31SPeter Zijlstra static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1539b2bf6c31SPeter Zijlstra {
1540b2bf6c31SPeter Zijlstra 	curr->sched_class->set_curr_task(rq);
1541b2bf6c31SPeter Zijlstra }
1542b2bf6c31SPeter Zijlstra 
1543f5832c19SNicolas Pitre #ifdef CONFIG_SMP
1544391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class)
1545f5832c19SNicolas Pitre #else
1546f5832c19SNicolas Pitre #define sched_class_highest (&dl_sched_class)
1547f5832c19SNicolas Pitre #endif
1548391e43daSPeter Zijlstra #define for_each_class(class) \
1549391e43daSPeter Zijlstra    for (class = sched_class_highest; class; class = class->next)
1550391e43daSPeter Zijlstra 
1551391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class;
1552aab03e05SDario Faggioli extern const struct sched_class dl_sched_class;
1553391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class;
1554391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class;
1555391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class;
1556391e43daSPeter Zijlstra 
1557391e43daSPeter Zijlstra 
1558391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1559391e43daSPeter Zijlstra 
156063b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu);
1561b719203bSLi Zefan 
15627caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq);
1563391e43daSPeter Zijlstra 
1564c5b28038SPeter Zijlstra extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1565c5b28038SPeter Zijlstra 
1566391e43daSPeter Zijlstra #endif
1567391e43daSPeter Zijlstra 
1568442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE
1569442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq,
1570442bf3aaSDaniel Lezcano 				  struct cpuidle_state *idle_state)
1571442bf3aaSDaniel Lezcano {
1572442bf3aaSDaniel Lezcano 	rq->idle_state = idle_state;
1573442bf3aaSDaniel Lezcano }
1574442bf3aaSDaniel Lezcano 
1575442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1576442bf3aaSDaniel Lezcano {
15779148a3a1SPeter Zijlstra 	SCHED_WARN_ON(!rcu_read_lock_held());
157897fb7a0aSIngo Molnar 
1579442bf3aaSDaniel Lezcano 	return rq->idle_state;
1580442bf3aaSDaniel Lezcano }
1581442bf3aaSDaniel Lezcano #else
1582442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq,
1583442bf3aaSDaniel Lezcano 				  struct cpuidle_state *idle_state)
1584442bf3aaSDaniel Lezcano {
1585442bf3aaSDaniel Lezcano }
1586442bf3aaSDaniel Lezcano 
1587442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1588442bf3aaSDaniel Lezcano {
1589442bf3aaSDaniel Lezcano 	return NULL;
1590442bf3aaSDaniel Lezcano }
1591442bf3aaSDaniel Lezcano #endif
1592442bf3aaSDaniel Lezcano 
15938663effbSSteven Rostedt (VMware) extern void schedule_idle(void);
15948663effbSSteven Rostedt (VMware) 
1595391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void);
1596391e43daSPeter Zijlstra extern void sched_init_granularity(void);
1597391e43daSPeter Zijlstra extern void update_max_interval(void);
15981baca4ceSJuri Lelli 
15991baca4ceSJuri Lelli extern void init_sched_dl_class(void);
1600391e43daSPeter Zijlstra extern void init_sched_rt_class(void);
1601391e43daSPeter Zijlstra extern void init_sched_fair_class(void);
1602391e43daSPeter Zijlstra 
16039059393eSVincent Guittot extern void reweight_task(struct task_struct *p, int prio);
16049059393eSVincent Guittot 
16058875125eSKirill Tkhai extern void resched_curr(struct rq *rq);
1606391e43daSPeter Zijlstra extern void resched_cpu(int cpu);
1607391e43daSPeter Zijlstra 
1608391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth;
1609391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1610391e43daSPeter Zijlstra 
1611332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth;
1612332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1613aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1614209a0cbdSLuca Abeni extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
16154da3abceSLuca Abeni extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1616aab03e05SDario Faggioli 
1617c52f14d3SLuca Abeni #define BW_SHIFT		20
1618c52f14d3SLuca Abeni #define BW_UNIT			(1 << BW_SHIFT)
16194da3abceSLuca Abeni #define RATIO_SHIFT		8
1620332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime);
1621332ac17eSDario Faggioli 
1622540247fbSYuyang Du extern void init_entity_runnable_average(struct sched_entity *se);
16232b8c41daSYuyang Du extern void post_init_entity_util_avg(struct sched_entity *se);
1624a75cdaa9SAlex Shi 
162576d92ac3SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
162676d92ac3SFrederic Weisbecker extern bool sched_can_stop_tick(struct rq *rq);
1627d84b3131SFrederic Weisbecker extern int __init sched_tick_offload_init(void);
162876d92ac3SFrederic Weisbecker 
162976d92ac3SFrederic Weisbecker /*
163076d92ac3SFrederic Weisbecker  * Tick may be needed by tasks in the runqueue depending on their policy and
163176d92ac3SFrederic Weisbecker  * requirements. If tick is needed, lets send the target an IPI to kick it out of
163276d92ac3SFrederic Weisbecker  * nohz mode if necessary.
163376d92ac3SFrederic Weisbecker  */
163476d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq)
163576d92ac3SFrederic Weisbecker {
163676d92ac3SFrederic Weisbecker 	int cpu;
163776d92ac3SFrederic Weisbecker 
163876d92ac3SFrederic Weisbecker 	if (!tick_nohz_full_enabled())
163976d92ac3SFrederic Weisbecker 		return;
164076d92ac3SFrederic Weisbecker 
164176d92ac3SFrederic Weisbecker 	cpu = cpu_of(rq);
164276d92ac3SFrederic Weisbecker 
164376d92ac3SFrederic Weisbecker 	if (!tick_nohz_full_cpu(cpu))
164476d92ac3SFrederic Weisbecker 		return;
164576d92ac3SFrederic Weisbecker 
164676d92ac3SFrederic Weisbecker 	if (sched_can_stop_tick(rq))
164776d92ac3SFrederic Weisbecker 		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
164876d92ac3SFrederic Weisbecker 	else
164976d92ac3SFrederic Weisbecker 		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
165076d92ac3SFrederic Weisbecker }
165176d92ac3SFrederic Weisbecker #else
1652d84b3131SFrederic Weisbecker static inline int sched_tick_offload_init(void) { return 0; }
165376d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) { }
165476d92ac3SFrederic Weisbecker #endif
165576d92ac3SFrederic Weisbecker 
165672465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count)
1657391e43daSPeter Zijlstra {
165872465447SKirill Tkhai 	unsigned prev_nr = rq->nr_running;
165972465447SKirill Tkhai 
166072465447SKirill Tkhai 	rq->nr_running = prev_nr + count;
16619f3660c2SFrederic Weisbecker 
166272465447SKirill Tkhai 	if (prev_nr < 2 && rq->nr_running >= 2) {
16634486edd1STim Chen #ifdef CONFIG_SMP
16644486edd1STim Chen 		if (!rq->rd->overload)
16654486edd1STim Chen 			rq->rd->overload = true;
16664486edd1STim Chen #endif
166776d92ac3SFrederic Weisbecker 	}
16684486edd1STim Chen 
166976d92ac3SFrederic Weisbecker 	sched_update_tick_dependency(rq);
16704486edd1STim Chen }
1671391e43daSPeter Zijlstra 
167272465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count)
1673391e43daSPeter Zijlstra {
167472465447SKirill Tkhai 	rq->nr_running -= count;
167576d92ac3SFrederic Weisbecker 	/* Check if we still need preemption */
167676d92ac3SFrederic Weisbecker 	sched_update_tick_dependency(rq);
1677391e43daSPeter Zijlstra }
1678391e43daSPeter Zijlstra 
1679391e43daSPeter Zijlstra extern void update_rq_clock(struct rq *rq);
1680391e43daSPeter Zijlstra 
1681391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1682391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1683391e43daSPeter Zijlstra 
1684391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1685391e43daSPeter Zijlstra 
1686391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_time_avg;
1687391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate;
1688391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost;
1689391e43daSPeter Zijlstra 
1690391e43daSPeter Zijlstra static inline u64 sched_avg_period(void)
1691391e43daSPeter Zijlstra {
1692391e43daSPeter Zijlstra 	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1693391e43daSPeter Zijlstra }
1694391e43daSPeter Zijlstra 
1695391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK
1696391e43daSPeter Zijlstra 
1697391e43daSPeter Zijlstra /*
1698391e43daSPeter Zijlstra  * Use hrtick when:
1699391e43daSPeter Zijlstra  *  - enabled by features
1700391e43daSPeter Zijlstra  *  - hrtimer is actually high res
1701391e43daSPeter Zijlstra  */
1702391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq)
1703391e43daSPeter Zijlstra {
1704391e43daSPeter Zijlstra 	if (!sched_feat(HRTICK))
1705391e43daSPeter Zijlstra 		return 0;
1706391e43daSPeter Zijlstra 	if (!cpu_active(cpu_of(rq)))
1707391e43daSPeter Zijlstra 		return 0;
1708391e43daSPeter Zijlstra 	return hrtimer_is_hres_active(&rq->hrtick_timer);
1709391e43daSPeter Zijlstra }
1710391e43daSPeter Zijlstra 
1711391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay);
1712391e43daSPeter Zijlstra 
1713b39e66eaSMike Galbraith #else
1714b39e66eaSMike Galbraith 
1715b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq)
1716b39e66eaSMike Galbraith {
1717b39e66eaSMike Galbraith 	return 0;
1718b39e66eaSMike Galbraith }
1719b39e66eaSMike Galbraith 
1720391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */
1721391e43daSPeter Zijlstra 
1722dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity
1723dfbca41fSPeter Zijlstra static __always_inline
17247673c8a4SJuri Lelli unsigned long arch_scale_freq_capacity(int cpu)
1725dfbca41fSPeter Zijlstra {
1726dfbca41fSPeter Zijlstra 	return SCHED_CAPACITY_SCALE;
1727dfbca41fSPeter Zijlstra }
1728dfbca41fSPeter Zijlstra #endif
1729b5b4860dSVincent Guittot 
17307e1a9208SJuri Lelli #ifdef CONFIG_SMP
17317e1a9208SJuri Lelli extern void sched_avg_update(struct rq *rq);
17327e1a9208SJuri Lelli 
17338cd5601cSMorten Rasmussen #ifndef arch_scale_cpu_capacity
17348cd5601cSMorten Rasmussen static __always_inline
17358cd5601cSMorten Rasmussen unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
17368cd5601cSMorten Rasmussen {
1737e3279a2eSDietmar Eggemann 	if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
17388cd5601cSMorten Rasmussen 		return sd->smt_gain / sd->span_weight;
17398cd5601cSMorten Rasmussen 
17408cd5601cSMorten Rasmussen 	return SCHED_CAPACITY_SCALE;
17418cd5601cSMorten Rasmussen }
17428cd5601cSMorten Rasmussen #endif
17438cd5601cSMorten Rasmussen 
1744391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1745391e43daSPeter Zijlstra {
17467673c8a4SJuri Lelli 	rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
1747391e43daSPeter Zijlstra 	sched_avg_update(rq);
1748391e43daSPeter Zijlstra }
1749391e43daSPeter Zijlstra #else
17507e1a9208SJuri Lelli #ifndef arch_scale_cpu_capacity
17517e1a9208SJuri Lelli static __always_inline
17527e1a9208SJuri Lelli unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
17537e1a9208SJuri Lelli {
17547e1a9208SJuri Lelli 	return SCHED_CAPACITY_SCALE;
17557e1a9208SJuri Lelli }
17567e1a9208SJuri Lelli #endif
1757391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1758391e43daSPeter Zijlstra static inline void sched_avg_update(struct rq *rq) { }
1759391e43daSPeter Zijlstra #endif
1760391e43daSPeter Zijlstra 
1761eb580751SPeter Zijlstra struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
17623e71a462SPeter Zijlstra 	__acquires(rq->lock);
17638a8c69c3SPeter Zijlstra 
1764eb580751SPeter Zijlstra struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
17653960c8c0SPeter Zijlstra 	__acquires(p->pi_lock)
17663e71a462SPeter Zijlstra 	__acquires(rq->lock);
17673960c8c0SPeter Zijlstra 
1768eb580751SPeter Zijlstra static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
17693960c8c0SPeter Zijlstra 	__releases(rq->lock)
17703960c8c0SPeter Zijlstra {
1771d8ac8971SMatt Fleming 	rq_unpin_lock(rq, rf);
17723960c8c0SPeter Zijlstra 	raw_spin_unlock(&rq->lock);
17733960c8c0SPeter Zijlstra }
17743960c8c0SPeter Zijlstra 
17753960c8c0SPeter Zijlstra static inline void
1776eb580751SPeter Zijlstra task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
17773960c8c0SPeter Zijlstra 	__releases(rq->lock)
17783960c8c0SPeter Zijlstra 	__releases(p->pi_lock)
17793960c8c0SPeter Zijlstra {
1780d8ac8971SMatt Fleming 	rq_unpin_lock(rq, rf);
17813960c8c0SPeter Zijlstra 	raw_spin_unlock(&rq->lock);
1782eb580751SPeter Zijlstra 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
17833960c8c0SPeter Zijlstra }
17843960c8c0SPeter Zijlstra 
17858a8c69c3SPeter Zijlstra static inline void
17868a8c69c3SPeter Zijlstra rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
17878a8c69c3SPeter Zijlstra 	__acquires(rq->lock)
17888a8c69c3SPeter Zijlstra {
17898a8c69c3SPeter Zijlstra 	raw_spin_lock_irqsave(&rq->lock, rf->flags);
17908a8c69c3SPeter Zijlstra 	rq_pin_lock(rq, rf);
17918a8c69c3SPeter Zijlstra }
17928a8c69c3SPeter Zijlstra 
17938a8c69c3SPeter Zijlstra static inline void
17948a8c69c3SPeter Zijlstra rq_lock_irq(struct rq *rq, struct rq_flags *rf)
17958a8c69c3SPeter Zijlstra 	__acquires(rq->lock)
17968a8c69c3SPeter Zijlstra {
17978a8c69c3SPeter Zijlstra 	raw_spin_lock_irq(&rq->lock);
17988a8c69c3SPeter Zijlstra 	rq_pin_lock(rq, rf);
17998a8c69c3SPeter Zijlstra }
18008a8c69c3SPeter Zijlstra 
18018a8c69c3SPeter Zijlstra static inline void
18028a8c69c3SPeter Zijlstra rq_lock(struct rq *rq, struct rq_flags *rf)
18038a8c69c3SPeter Zijlstra 	__acquires(rq->lock)
18048a8c69c3SPeter Zijlstra {
18058a8c69c3SPeter Zijlstra 	raw_spin_lock(&rq->lock);
18068a8c69c3SPeter Zijlstra 	rq_pin_lock(rq, rf);
18078a8c69c3SPeter Zijlstra }
18088a8c69c3SPeter Zijlstra 
18098a8c69c3SPeter Zijlstra static inline void
18108a8c69c3SPeter Zijlstra rq_relock(struct rq *rq, struct rq_flags *rf)
18118a8c69c3SPeter Zijlstra 	__acquires(rq->lock)
18128a8c69c3SPeter Zijlstra {
18138a8c69c3SPeter Zijlstra 	raw_spin_lock(&rq->lock);
18148a8c69c3SPeter Zijlstra 	rq_repin_lock(rq, rf);
18158a8c69c3SPeter Zijlstra }
18168a8c69c3SPeter Zijlstra 
18178a8c69c3SPeter Zijlstra static inline void
18188a8c69c3SPeter Zijlstra rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
18198a8c69c3SPeter Zijlstra 	__releases(rq->lock)
18208a8c69c3SPeter Zijlstra {
18218a8c69c3SPeter Zijlstra 	rq_unpin_lock(rq, rf);
18228a8c69c3SPeter Zijlstra 	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
18238a8c69c3SPeter Zijlstra }
18248a8c69c3SPeter Zijlstra 
18258a8c69c3SPeter Zijlstra static inline void
18268a8c69c3SPeter Zijlstra rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
18278a8c69c3SPeter Zijlstra 	__releases(rq->lock)
18288a8c69c3SPeter Zijlstra {
18298a8c69c3SPeter Zijlstra 	rq_unpin_lock(rq, rf);
18308a8c69c3SPeter Zijlstra 	raw_spin_unlock_irq(&rq->lock);
18318a8c69c3SPeter Zijlstra }
18328a8c69c3SPeter Zijlstra 
18338a8c69c3SPeter Zijlstra static inline void
18348a8c69c3SPeter Zijlstra rq_unlock(struct rq *rq, struct rq_flags *rf)
18358a8c69c3SPeter Zijlstra 	__releases(rq->lock)
18368a8c69c3SPeter Zijlstra {
18378a8c69c3SPeter Zijlstra 	rq_unpin_lock(rq, rf);
18388a8c69c3SPeter Zijlstra 	raw_spin_unlock(&rq->lock);
18398a8c69c3SPeter Zijlstra }
18408a8c69c3SPeter Zijlstra 
1841391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1842391e43daSPeter Zijlstra #ifdef CONFIG_PREEMPT
1843391e43daSPeter Zijlstra 
1844391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1845391e43daSPeter Zijlstra 
1846391e43daSPeter Zijlstra /*
1847391e43daSPeter Zijlstra  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1848391e43daSPeter Zijlstra  * way at the expense of forcing extra atomic operations in all
1849391e43daSPeter Zijlstra  * invocations.  This assures that the double_lock is acquired using the
1850391e43daSPeter Zijlstra  * same underlying policy as the spinlock_t on this architecture, which
1851391e43daSPeter Zijlstra  * reduces latency compared to the unfair variant below.  However, it
1852391e43daSPeter Zijlstra  * also adds more overhead and therefore may reduce throughput.
1853391e43daSPeter Zijlstra  */
1854391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1855391e43daSPeter Zijlstra 	__releases(this_rq->lock)
1856391e43daSPeter Zijlstra 	__acquires(busiest->lock)
1857391e43daSPeter Zijlstra 	__acquires(this_rq->lock)
1858391e43daSPeter Zijlstra {
1859391e43daSPeter Zijlstra 	raw_spin_unlock(&this_rq->lock);
1860391e43daSPeter Zijlstra 	double_rq_lock(this_rq, busiest);
1861391e43daSPeter Zijlstra 
1862391e43daSPeter Zijlstra 	return 1;
1863391e43daSPeter Zijlstra }
1864391e43daSPeter Zijlstra 
1865391e43daSPeter Zijlstra #else
1866391e43daSPeter Zijlstra /*
1867391e43daSPeter Zijlstra  * Unfair double_lock_balance: Optimizes throughput at the expense of
1868391e43daSPeter Zijlstra  * latency by eliminating extra atomic operations when the locks are
186997fb7a0aSIngo Molnar  * already in proper order on entry.  This favors lower CPU-ids and will
187097fb7a0aSIngo Molnar  * grant the double lock to lower CPUs over higher ids under contention,
1871391e43daSPeter Zijlstra  * regardless of entry order into the function.
1872391e43daSPeter Zijlstra  */
1873391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1874391e43daSPeter Zijlstra 	__releases(this_rq->lock)
1875391e43daSPeter Zijlstra 	__acquires(busiest->lock)
1876391e43daSPeter Zijlstra 	__acquires(this_rq->lock)
1877391e43daSPeter Zijlstra {
1878391e43daSPeter Zijlstra 	int ret = 0;
1879391e43daSPeter Zijlstra 
1880391e43daSPeter Zijlstra 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1881391e43daSPeter Zijlstra 		if (busiest < this_rq) {
1882391e43daSPeter Zijlstra 			raw_spin_unlock(&this_rq->lock);
1883391e43daSPeter Zijlstra 			raw_spin_lock(&busiest->lock);
1884391e43daSPeter Zijlstra 			raw_spin_lock_nested(&this_rq->lock,
1885391e43daSPeter Zijlstra 					      SINGLE_DEPTH_NESTING);
1886391e43daSPeter Zijlstra 			ret = 1;
1887391e43daSPeter Zijlstra 		} else
1888391e43daSPeter Zijlstra 			raw_spin_lock_nested(&busiest->lock,
1889391e43daSPeter Zijlstra 					      SINGLE_DEPTH_NESTING);
1890391e43daSPeter Zijlstra 	}
1891391e43daSPeter Zijlstra 	return ret;
1892391e43daSPeter Zijlstra }
1893391e43daSPeter Zijlstra 
1894391e43daSPeter Zijlstra #endif /* CONFIG_PREEMPT */
1895391e43daSPeter Zijlstra 
1896391e43daSPeter Zijlstra /*
1897391e43daSPeter Zijlstra  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1898391e43daSPeter Zijlstra  */
1899391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1900391e43daSPeter Zijlstra {
1901391e43daSPeter Zijlstra 	if (unlikely(!irqs_disabled())) {
190297fb7a0aSIngo Molnar 		/* printk() doesn't work well under rq->lock */
1903391e43daSPeter Zijlstra 		raw_spin_unlock(&this_rq->lock);
1904391e43daSPeter Zijlstra 		BUG_ON(1);
1905391e43daSPeter Zijlstra 	}
1906391e43daSPeter Zijlstra 
1907391e43daSPeter Zijlstra 	return _double_lock_balance(this_rq, busiest);
1908391e43daSPeter Zijlstra }
1909391e43daSPeter Zijlstra 
1910391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1911391e43daSPeter Zijlstra 	__releases(busiest->lock)
1912391e43daSPeter Zijlstra {
1913391e43daSPeter Zijlstra 	raw_spin_unlock(&busiest->lock);
1914391e43daSPeter Zijlstra 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1915391e43daSPeter Zijlstra }
1916391e43daSPeter Zijlstra 
191774602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
191874602315SPeter Zijlstra {
191974602315SPeter Zijlstra 	if (l1 > l2)
192074602315SPeter Zijlstra 		swap(l1, l2);
192174602315SPeter Zijlstra 
192274602315SPeter Zijlstra 	spin_lock(l1);
192374602315SPeter Zijlstra 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
192474602315SPeter Zijlstra }
192574602315SPeter Zijlstra 
192660e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
192760e69eedSMike Galbraith {
192860e69eedSMike Galbraith 	if (l1 > l2)
192960e69eedSMike Galbraith 		swap(l1, l2);
193060e69eedSMike Galbraith 
193160e69eedSMike Galbraith 	spin_lock_irq(l1);
193260e69eedSMike Galbraith 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
193360e69eedSMike Galbraith }
193460e69eedSMike Galbraith 
193574602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
193674602315SPeter Zijlstra {
193774602315SPeter Zijlstra 	if (l1 > l2)
193874602315SPeter Zijlstra 		swap(l1, l2);
193974602315SPeter Zijlstra 
194074602315SPeter Zijlstra 	raw_spin_lock(l1);
194174602315SPeter Zijlstra 	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
194274602315SPeter Zijlstra }
194374602315SPeter Zijlstra 
1944391e43daSPeter Zijlstra /*
1945391e43daSPeter Zijlstra  * double_rq_lock - safely lock two runqueues
1946391e43daSPeter Zijlstra  *
1947391e43daSPeter Zijlstra  * Note this does not disable interrupts like task_rq_lock,
1948391e43daSPeter Zijlstra  * you need to do so manually before calling.
1949391e43daSPeter Zijlstra  */
1950391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1951391e43daSPeter Zijlstra 	__acquires(rq1->lock)
1952391e43daSPeter Zijlstra 	__acquires(rq2->lock)
1953391e43daSPeter Zijlstra {
1954391e43daSPeter Zijlstra 	BUG_ON(!irqs_disabled());
1955391e43daSPeter Zijlstra 	if (rq1 == rq2) {
1956391e43daSPeter Zijlstra 		raw_spin_lock(&rq1->lock);
1957391e43daSPeter Zijlstra 		__acquire(rq2->lock);	/* Fake it out ;) */
1958391e43daSPeter Zijlstra 	} else {
1959391e43daSPeter Zijlstra 		if (rq1 < rq2) {
1960391e43daSPeter Zijlstra 			raw_spin_lock(&rq1->lock);
1961391e43daSPeter Zijlstra 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1962391e43daSPeter Zijlstra 		} else {
1963391e43daSPeter Zijlstra 			raw_spin_lock(&rq2->lock);
1964391e43daSPeter Zijlstra 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1965391e43daSPeter Zijlstra 		}
1966391e43daSPeter Zijlstra 	}
1967391e43daSPeter Zijlstra }
1968391e43daSPeter Zijlstra 
1969391e43daSPeter Zijlstra /*
1970391e43daSPeter Zijlstra  * double_rq_unlock - safely unlock two runqueues
1971391e43daSPeter Zijlstra  *
1972391e43daSPeter Zijlstra  * Note this does not restore interrupts like task_rq_unlock,
1973391e43daSPeter Zijlstra  * you need to do so manually after calling.
1974391e43daSPeter Zijlstra  */
1975391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1976391e43daSPeter Zijlstra 	__releases(rq1->lock)
1977391e43daSPeter Zijlstra 	__releases(rq2->lock)
1978391e43daSPeter Zijlstra {
1979391e43daSPeter Zijlstra 	raw_spin_unlock(&rq1->lock);
1980391e43daSPeter Zijlstra 	if (rq1 != rq2)
1981391e43daSPeter Zijlstra 		raw_spin_unlock(&rq2->lock);
1982391e43daSPeter Zijlstra 	else
1983391e43daSPeter Zijlstra 		__release(rq2->lock);
1984391e43daSPeter Zijlstra }
1985391e43daSPeter Zijlstra 
1986f2cb1360SIngo Molnar extern void set_rq_online (struct rq *rq);
1987f2cb1360SIngo Molnar extern void set_rq_offline(struct rq *rq);
1988f2cb1360SIngo Molnar extern bool sched_smp_initialized;
1989f2cb1360SIngo Molnar 
1990391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1991391e43daSPeter Zijlstra 
1992391e43daSPeter Zijlstra /*
1993391e43daSPeter Zijlstra  * double_rq_lock - safely lock two runqueues
1994391e43daSPeter Zijlstra  *
1995391e43daSPeter Zijlstra  * Note this does not disable interrupts like task_rq_lock,
1996391e43daSPeter Zijlstra  * you need to do so manually before calling.
1997391e43daSPeter Zijlstra  */
1998391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1999391e43daSPeter Zijlstra 	__acquires(rq1->lock)
2000391e43daSPeter Zijlstra 	__acquires(rq2->lock)
2001391e43daSPeter Zijlstra {
2002391e43daSPeter Zijlstra 	BUG_ON(!irqs_disabled());
2003391e43daSPeter Zijlstra 	BUG_ON(rq1 != rq2);
2004391e43daSPeter Zijlstra 	raw_spin_lock(&rq1->lock);
2005391e43daSPeter Zijlstra 	__acquire(rq2->lock);	/* Fake it out ;) */
2006391e43daSPeter Zijlstra }
2007391e43daSPeter Zijlstra 
2008391e43daSPeter Zijlstra /*
2009391e43daSPeter Zijlstra  * double_rq_unlock - safely unlock two runqueues
2010391e43daSPeter Zijlstra  *
2011391e43daSPeter Zijlstra  * Note this does not restore interrupts like task_rq_unlock,
2012391e43daSPeter Zijlstra  * you need to do so manually after calling.
2013391e43daSPeter Zijlstra  */
2014391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2015391e43daSPeter Zijlstra 	__releases(rq1->lock)
2016391e43daSPeter Zijlstra 	__releases(rq2->lock)
2017391e43daSPeter Zijlstra {
2018391e43daSPeter Zijlstra 	BUG_ON(rq1 != rq2);
2019391e43daSPeter Zijlstra 	raw_spin_unlock(&rq1->lock);
2020391e43daSPeter Zijlstra 	__release(rq2->lock);
2021391e43daSPeter Zijlstra }
2022391e43daSPeter Zijlstra 
2023391e43daSPeter Zijlstra #endif
2024391e43daSPeter Zijlstra 
2025391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2026391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
20276b55c965SSrikar Dronamraju 
20286b55c965SSrikar Dronamraju #ifdef	CONFIG_SCHED_DEBUG
20299469eb01SPeter Zijlstra extern bool sched_debug_enabled;
20309469eb01SPeter Zijlstra 
2031391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu);
2032391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu);
2033acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu);
20346b55c965SSrikar Dronamraju extern void
20356b55c965SSrikar Dronamraju print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2036397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING
2037397f2378SSrikar Dronamraju extern void
2038397f2378SSrikar Dronamraju show_numa_stats(struct task_struct *p, struct seq_file *m);
2039397f2378SSrikar Dronamraju extern void
2040397f2378SSrikar Dronamraju print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2041397f2378SSrikar Dronamraju 	unsigned long tpf, unsigned long gsf, unsigned long gpf);
2042397f2378SSrikar Dronamraju #endif /* CONFIG_NUMA_BALANCING */
2043397f2378SSrikar Dronamraju #endif /* CONFIG_SCHED_DEBUG */
2044391e43daSPeter Zijlstra 
2045391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq);
204607c54f7aSAbel Vesa extern void init_rt_rq(struct rt_rq *rt_rq);
204707c54f7aSAbel Vesa extern void init_dl_rq(struct dl_rq *dl_rq);
2048391e43daSPeter Zijlstra 
20491ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void);
20501ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void);
20511c792db7SSuresh Siddha 
20523451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
205300357f5eSPeter Zijlstra #define NOHZ_BALANCE_KICK_BIT	0
205400357f5eSPeter Zijlstra #define NOHZ_STATS_KICK_BIT	1
2055a22e47a4SPeter Zijlstra 
2056a22e47a4SPeter Zijlstra #define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
2057b7031a02SPeter Zijlstra #define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
2058b7031a02SPeter Zijlstra 
2059b7031a02SPeter Zijlstra #define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
20601c792db7SSuresh Siddha 
20611c792db7SSuresh Siddha #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
206220a5c8ccSThomas Gleixner 
206300357f5eSPeter Zijlstra extern void nohz_balance_exit_idle(struct rq *rq);
206420a5c8ccSThomas Gleixner #else
206500357f5eSPeter Zijlstra static inline void nohz_balance_exit_idle(struct rq *rq) { }
20661c792db7SSuresh Siddha #endif
206773fbec60SFrederic Weisbecker 
2068daec5798SLuca Abeni 
2069daec5798SLuca Abeni #ifdef CONFIG_SMP
2070daec5798SLuca Abeni static inline
2071daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw)
2072daec5798SLuca Abeni {
2073daec5798SLuca Abeni 	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2074daec5798SLuca Abeni 	int i;
2075daec5798SLuca Abeni 
2076daec5798SLuca Abeni 	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2077daec5798SLuca Abeni 			 "sched RCU must be held");
2078daec5798SLuca Abeni 	for_each_cpu_and(i, rd->span, cpu_active_mask) {
2079daec5798SLuca Abeni 		struct rq *rq = cpu_rq(i);
2080daec5798SLuca Abeni 
2081daec5798SLuca Abeni 		rq->dl.extra_bw += bw;
2082daec5798SLuca Abeni 	}
2083daec5798SLuca Abeni }
2084daec5798SLuca Abeni #else
2085daec5798SLuca Abeni static inline
2086daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw)
2087daec5798SLuca Abeni {
2088daec5798SLuca Abeni 	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2089daec5798SLuca Abeni 
2090daec5798SLuca Abeni 	dl->extra_bw += bw;
2091daec5798SLuca Abeni }
2092daec5798SLuca Abeni #endif
2093daec5798SLuca Abeni 
2094daec5798SLuca Abeni 
209573fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING
209619d23dbfSFrederic Weisbecker struct irqtime {
209725e2d8c1SFrederic Weisbecker 	u64			total;
2098a499a5a1SFrederic Weisbecker 	u64			tick_delta;
209919d23dbfSFrederic Weisbecker 	u64			irq_start_time;
210019d23dbfSFrederic Weisbecker 	struct u64_stats_sync	sync;
210119d23dbfSFrederic Weisbecker };
210273fbec60SFrederic Weisbecker 
210319d23dbfSFrederic Weisbecker DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
210473fbec60SFrederic Weisbecker 
210525e2d8c1SFrederic Weisbecker /*
210625e2d8c1SFrederic Weisbecker  * Returns the irqtime minus the softirq time computed by ksoftirqd.
210725e2d8c1SFrederic Weisbecker  * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
210825e2d8c1SFrederic Weisbecker  * and never move forward.
210925e2d8c1SFrederic Weisbecker  */
211073fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu)
211173fbec60SFrederic Weisbecker {
211219d23dbfSFrederic Weisbecker 	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
211319d23dbfSFrederic Weisbecker 	unsigned int seq;
211419d23dbfSFrederic Weisbecker 	u64 total;
211573fbec60SFrederic Weisbecker 
211673fbec60SFrederic Weisbecker 	do {
211719d23dbfSFrederic Weisbecker 		seq = __u64_stats_fetch_begin(&irqtime->sync);
211825e2d8c1SFrederic Weisbecker 		total = irqtime->total;
211919d23dbfSFrederic Weisbecker 	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
212073fbec60SFrederic Weisbecker 
212119d23dbfSFrederic Weisbecker 	return total;
212273fbec60SFrederic Weisbecker }
212373fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2124adaf9fcdSRafael J. Wysocki 
2125adaf9fcdSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ
2126adaf9fcdSRafael J. Wysocki DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
2127adaf9fcdSRafael J. Wysocki 
2128adaf9fcdSRafael J. Wysocki /**
2129adaf9fcdSRafael J. Wysocki  * cpufreq_update_util - Take a note about CPU utilization changes.
213012bde33dSRafael J. Wysocki  * @rq: Runqueue to carry out the update for.
213158919e83SRafael J. Wysocki  * @flags: Update reason flags.
2132adaf9fcdSRafael J. Wysocki  *
213358919e83SRafael J. Wysocki  * This function is called by the scheduler on the CPU whose utilization is
213458919e83SRafael J. Wysocki  * being updated.
2135adaf9fcdSRafael J. Wysocki  *
2136adaf9fcdSRafael J. Wysocki  * It can only be called from RCU-sched read-side critical sections.
2137adaf9fcdSRafael J. Wysocki  *
2138adaf9fcdSRafael J. Wysocki  * The way cpufreq is currently arranged requires it to evaluate the CPU
2139adaf9fcdSRafael J. Wysocki  * performance state (frequency/voltage) on a regular basis to prevent it from
2140adaf9fcdSRafael J. Wysocki  * being stuck in a completely inadequate performance level for too long.
2141e0367b12SJuri Lelli  * That is not guaranteed to happen if the updates are only triggered from CFS
2142e0367b12SJuri Lelli  * and DL, though, because they may not be coming in if only RT tasks are
2143e0367b12SJuri Lelli  * active all the time (or there are RT tasks only).
2144adaf9fcdSRafael J. Wysocki  *
2145e0367b12SJuri Lelli  * As a workaround for that issue, this function is called periodically by the
2146e0367b12SJuri Lelli  * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
2147adaf9fcdSRafael J. Wysocki  * but that really is a band-aid.  Going forward it should be replaced with
2148e0367b12SJuri Lelli  * solutions targeted more specifically at RT tasks.
2149adaf9fcdSRafael J. Wysocki  */
215012bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2151adaf9fcdSRafael J. Wysocki {
215258919e83SRafael J. Wysocki 	struct update_util_data *data;
215358919e83SRafael J. Wysocki 
2154674e7541SViresh Kumar 	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2155674e7541SViresh Kumar 						  cpu_of(rq)));
215658919e83SRafael J. Wysocki 	if (data)
215712bde33dSRafael J. Wysocki 		data->func(data, rq_clock(rq), flags);
215812bde33dSRafael J. Wysocki }
2159adaf9fcdSRafael J. Wysocki #else
216012bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2161adaf9fcdSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ */
2162be53f58fSLinus Torvalds 
21639bdcb44eSRafael J. Wysocki #ifdef arch_scale_freq_capacity
21649bdcb44eSRafael J. Wysocki # ifndef arch_scale_freq_invariant
216597fb7a0aSIngo Molnar #  define arch_scale_freq_invariant()	true
21669bdcb44eSRafael J. Wysocki # endif
216797fb7a0aSIngo Molnar #else
216897fb7a0aSIngo Molnar # define arch_scale_freq_invariant()	false
21699bdcb44eSRafael J. Wysocki #endif
2170d4edd662SJuri Lelli 
2171794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2172d4edd662SJuri Lelli static inline unsigned long cpu_util_dl(struct rq *rq)
2173d4edd662SJuri Lelli {
2174d4edd662SJuri Lelli 	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2175d4edd662SJuri Lelli }
2176d4edd662SJuri Lelli 
2177d4edd662SJuri Lelli static inline unsigned long cpu_util_cfs(struct rq *rq)
2178d4edd662SJuri Lelli {
2179a07630b8SPatrick Bellasi 	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2180a07630b8SPatrick Bellasi 
2181a07630b8SPatrick Bellasi 	if (sched_feat(UTIL_EST)) {
2182a07630b8SPatrick Bellasi 		util = max_t(unsigned long, util,
2183a07630b8SPatrick Bellasi 			     READ_ONCE(rq->cfs.avg.util_est.enqueued));
2184a07630b8SPatrick Bellasi 	}
2185a07630b8SPatrick Bellasi 
2186a07630b8SPatrick Bellasi 	return util;
2187d4edd662SJuri Lelli }
2188794a56ebSJuri Lelli #endif
2189