xref: /openbmc/linux/kernel/sched/sched.h (revision 18bf2805)
1391e43daSPeter Zijlstra 
2391e43daSPeter Zijlstra #include <linux/sched.h>
3391e43daSPeter Zijlstra #include <linux/mutex.h>
4391e43daSPeter Zijlstra #include <linux/spinlock.h>
5391e43daSPeter Zijlstra #include <linux/stop_machine.h>
6391e43daSPeter Zijlstra 
7391e43daSPeter Zijlstra #include "cpupri.h"
8391e43daSPeter Zijlstra 
9391e43daSPeter Zijlstra extern __read_mostly int scheduler_running;
10391e43daSPeter Zijlstra 
11391e43daSPeter Zijlstra /*
12391e43daSPeter Zijlstra  * Convert user-nice values [ -20 ... 0 ... 19 ]
13391e43daSPeter Zijlstra  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
14391e43daSPeter Zijlstra  * and back.
15391e43daSPeter Zijlstra  */
16391e43daSPeter Zijlstra #define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
17391e43daSPeter Zijlstra #define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
18391e43daSPeter Zijlstra #define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
19391e43daSPeter Zijlstra 
20391e43daSPeter Zijlstra /*
21391e43daSPeter Zijlstra  * 'User priority' is the nice value converted to something we
22391e43daSPeter Zijlstra  * can work with better when scaling various scheduler parameters,
23391e43daSPeter Zijlstra  * it's a [ 0 ... 39 ] range.
24391e43daSPeter Zijlstra  */
25391e43daSPeter Zijlstra #define USER_PRIO(p)		((p)-MAX_RT_PRIO)
26391e43daSPeter Zijlstra #define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
27391e43daSPeter Zijlstra #define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
28391e43daSPeter Zijlstra 
29391e43daSPeter Zijlstra /*
30391e43daSPeter Zijlstra  * Helpers for converting nanosecond timing to jiffy resolution
31391e43daSPeter Zijlstra  */
32391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
33391e43daSPeter Zijlstra 
34391e43daSPeter Zijlstra #define NICE_0_LOAD		SCHED_LOAD_SCALE
35391e43daSPeter Zijlstra #define NICE_0_SHIFT		SCHED_LOAD_SHIFT
36391e43daSPeter Zijlstra 
37391e43daSPeter Zijlstra /*
38391e43daSPeter Zijlstra  * These are the 'tuning knobs' of the scheduler:
39391e43daSPeter Zijlstra  */
40391e43daSPeter Zijlstra 
41391e43daSPeter Zijlstra /*
42391e43daSPeter Zijlstra  * single value that denotes runtime == period, ie unlimited time.
43391e43daSPeter Zijlstra  */
44391e43daSPeter Zijlstra #define RUNTIME_INF	((u64)~0ULL)
45391e43daSPeter Zijlstra 
46391e43daSPeter Zijlstra static inline int rt_policy(int policy)
47391e43daSPeter Zijlstra {
48391e43daSPeter Zijlstra 	if (policy == SCHED_FIFO || policy == SCHED_RR)
49391e43daSPeter Zijlstra 		return 1;
50391e43daSPeter Zijlstra 	return 0;
51391e43daSPeter Zijlstra }
52391e43daSPeter Zijlstra 
53391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p)
54391e43daSPeter Zijlstra {
55391e43daSPeter Zijlstra 	return rt_policy(p->policy);
56391e43daSPeter Zijlstra }
57391e43daSPeter Zijlstra 
58391e43daSPeter Zijlstra /*
59391e43daSPeter Zijlstra  * This is the priority-queue data structure of the RT scheduling class:
60391e43daSPeter Zijlstra  */
61391e43daSPeter Zijlstra struct rt_prio_array {
62391e43daSPeter Zijlstra 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
63391e43daSPeter Zijlstra 	struct list_head queue[MAX_RT_PRIO];
64391e43daSPeter Zijlstra };
65391e43daSPeter Zijlstra 
66391e43daSPeter Zijlstra struct rt_bandwidth {
67391e43daSPeter Zijlstra 	/* nests inside the rq lock: */
68391e43daSPeter Zijlstra 	raw_spinlock_t		rt_runtime_lock;
69391e43daSPeter Zijlstra 	ktime_t			rt_period;
70391e43daSPeter Zijlstra 	u64			rt_runtime;
71391e43daSPeter Zijlstra 	struct hrtimer		rt_period_timer;
72391e43daSPeter Zijlstra };
73391e43daSPeter Zijlstra 
74391e43daSPeter Zijlstra extern struct mutex sched_domains_mutex;
75391e43daSPeter Zijlstra 
76391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
77391e43daSPeter Zijlstra 
78391e43daSPeter Zijlstra #include <linux/cgroup.h>
79391e43daSPeter Zijlstra 
80391e43daSPeter Zijlstra struct cfs_rq;
81391e43daSPeter Zijlstra struct rt_rq;
82391e43daSPeter Zijlstra 
8335cf4e50SMike Galbraith extern struct list_head task_groups;
84391e43daSPeter Zijlstra 
85391e43daSPeter Zijlstra struct cfs_bandwidth {
86391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH
87391e43daSPeter Zijlstra 	raw_spinlock_t lock;
88391e43daSPeter Zijlstra 	ktime_t period;
89391e43daSPeter Zijlstra 	u64 quota, runtime;
90391e43daSPeter Zijlstra 	s64 hierarchal_quota;
91391e43daSPeter Zijlstra 	u64 runtime_expires;
92391e43daSPeter Zijlstra 
93391e43daSPeter Zijlstra 	int idle, timer_active;
94391e43daSPeter Zijlstra 	struct hrtimer period_timer, slack_timer;
95391e43daSPeter Zijlstra 	struct list_head throttled_cfs_rq;
96391e43daSPeter Zijlstra 
97391e43daSPeter Zijlstra 	/* statistics */
98391e43daSPeter Zijlstra 	int nr_periods, nr_throttled;
99391e43daSPeter Zijlstra 	u64 throttled_time;
100391e43daSPeter Zijlstra #endif
101391e43daSPeter Zijlstra };
102391e43daSPeter Zijlstra 
103391e43daSPeter Zijlstra /* task group related information */
104391e43daSPeter Zijlstra struct task_group {
105391e43daSPeter Zijlstra 	struct cgroup_subsys_state css;
106391e43daSPeter Zijlstra 
107391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
108391e43daSPeter Zijlstra 	/* schedulable entities of this group on each cpu */
109391e43daSPeter Zijlstra 	struct sched_entity **se;
110391e43daSPeter Zijlstra 	/* runqueue "owned" by this group on each cpu */
111391e43daSPeter Zijlstra 	struct cfs_rq **cfs_rq;
112391e43daSPeter Zijlstra 	unsigned long shares;
113391e43daSPeter Zijlstra 
114391e43daSPeter Zijlstra 	atomic_t load_weight;
115391e43daSPeter Zijlstra #endif
116391e43daSPeter Zijlstra 
117391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
118391e43daSPeter Zijlstra 	struct sched_rt_entity **rt_se;
119391e43daSPeter Zijlstra 	struct rt_rq **rt_rq;
120391e43daSPeter Zijlstra 
121391e43daSPeter Zijlstra 	struct rt_bandwidth rt_bandwidth;
122391e43daSPeter Zijlstra #endif
123391e43daSPeter Zijlstra 
124391e43daSPeter Zijlstra 	struct rcu_head rcu;
125391e43daSPeter Zijlstra 	struct list_head list;
126391e43daSPeter Zijlstra 
127391e43daSPeter Zijlstra 	struct task_group *parent;
128391e43daSPeter Zijlstra 	struct list_head siblings;
129391e43daSPeter Zijlstra 	struct list_head children;
130391e43daSPeter Zijlstra 
131391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP
132391e43daSPeter Zijlstra 	struct autogroup *autogroup;
133391e43daSPeter Zijlstra #endif
134391e43daSPeter Zijlstra 
135391e43daSPeter Zijlstra 	struct cfs_bandwidth cfs_bandwidth;
136391e43daSPeter Zijlstra };
137391e43daSPeter Zijlstra 
138391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
139391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
140391e43daSPeter Zijlstra 
141391e43daSPeter Zijlstra /*
142391e43daSPeter Zijlstra  * A weight of 0 or 1 can cause arithmetics problems.
143391e43daSPeter Zijlstra  * A weight of a cfs_rq is the sum of weights of which entities
144391e43daSPeter Zijlstra  * are queued on this cfs_rq, so a weight of a entity should not be
145391e43daSPeter Zijlstra  * too large, so as the shares value of a task group.
146391e43daSPeter Zijlstra  * (The default weight is 1024 - so there's no practical
147391e43daSPeter Zijlstra  *  limitation from this.)
148391e43daSPeter Zijlstra  */
149391e43daSPeter Zijlstra #define MIN_SHARES	(1UL <<  1)
150391e43daSPeter Zijlstra #define MAX_SHARES	(1UL << 18)
151391e43daSPeter Zijlstra #endif
152391e43daSPeter Zijlstra 
153391e43daSPeter Zijlstra /* Default task group.
154391e43daSPeter Zijlstra  *	Every task in system belong to this group at bootup.
155391e43daSPeter Zijlstra  */
156391e43daSPeter Zijlstra extern struct task_group root_task_group;
157391e43daSPeter Zijlstra 
158391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *);
159391e43daSPeter Zijlstra 
160391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from,
161391e43daSPeter Zijlstra 			     tg_visitor down, tg_visitor up, void *data);
162391e43daSPeter Zijlstra 
163391e43daSPeter Zijlstra /*
164391e43daSPeter Zijlstra  * Iterate the full tree, calling @down when first entering a node and @up when
165391e43daSPeter Zijlstra  * leaving it for the final time.
166391e43daSPeter Zijlstra  *
167391e43daSPeter Zijlstra  * Caller must hold rcu_lock or sufficient equivalent.
168391e43daSPeter Zijlstra  */
169391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
170391e43daSPeter Zijlstra {
171391e43daSPeter Zijlstra 	return walk_tg_tree_from(&root_task_group, down, up, data);
172391e43daSPeter Zijlstra }
173391e43daSPeter Zijlstra 
174391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data);
175391e43daSPeter Zijlstra 
176391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg);
177391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
178391e43daSPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
179391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
180391e43daSPeter Zijlstra 			struct sched_entity *se, int cpu,
181391e43daSPeter Zijlstra 			struct sched_entity *parent);
182391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
183391e43daSPeter Zijlstra extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
184391e43daSPeter Zijlstra 
185391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
186391e43daSPeter Zijlstra extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
187391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
188391e43daSPeter Zijlstra 
189391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg);
190391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
191391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
192391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
193391e43daSPeter Zijlstra 		struct sched_rt_entity *parent);
194391e43daSPeter Zijlstra 
195391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */
196391e43daSPeter Zijlstra 
197391e43daSPeter Zijlstra struct cfs_bandwidth { };
198391e43daSPeter Zijlstra 
199391e43daSPeter Zijlstra #endif	/* CONFIG_CGROUP_SCHED */
200391e43daSPeter Zijlstra 
201391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */
202391e43daSPeter Zijlstra struct cfs_rq {
203391e43daSPeter Zijlstra 	struct load_weight load;
204c82513e5SPeter Zijlstra 	unsigned int nr_running, h_nr_running;
205391e43daSPeter Zijlstra 
206391e43daSPeter Zijlstra 	u64 exec_clock;
207391e43daSPeter Zijlstra 	u64 min_vruntime;
208391e43daSPeter Zijlstra #ifndef CONFIG_64BIT
209391e43daSPeter Zijlstra 	u64 min_vruntime_copy;
210391e43daSPeter Zijlstra #endif
211391e43daSPeter Zijlstra 
212391e43daSPeter Zijlstra 	struct rb_root tasks_timeline;
213391e43daSPeter Zijlstra 	struct rb_node *rb_leftmost;
214391e43daSPeter Zijlstra 
215391e43daSPeter Zijlstra 	/*
216391e43daSPeter Zijlstra 	 * 'curr' points to currently running entity on this cfs_rq.
217391e43daSPeter Zijlstra 	 * It is set to NULL otherwise (i.e when none are currently running).
218391e43daSPeter Zijlstra 	 */
219391e43daSPeter Zijlstra 	struct sched_entity *curr, *next, *last, *skip;
220391e43daSPeter Zijlstra 
221391e43daSPeter Zijlstra #ifdef	CONFIG_SCHED_DEBUG
222391e43daSPeter Zijlstra 	unsigned int nr_spread_over;
223391e43daSPeter Zijlstra #endif
224391e43daSPeter Zijlstra 
225391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
226391e43daSPeter Zijlstra 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
227391e43daSPeter Zijlstra 
228391e43daSPeter Zijlstra 	/*
229391e43daSPeter Zijlstra 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
230391e43daSPeter Zijlstra 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
231391e43daSPeter Zijlstra 	 * (like users, containers etc.)
232391e43daSPeter Zijlstra 	 *
233391e43daSPeter Zijlstra 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
234391e43daSPeter Zijlstra 	 * list is used during load balance.
235391e43daSPeter Zijlstra 	 */
236391e43daSPeter Zijlstra 	int on_list;
237391e43daSPeter Zijlstra 	struct list_head leaf_cfs_rq_list;
238391e43daSPeter Zijlstra 	struct task_group *tg;	/* group that "owns" this runqueue */
239391e43daSPeter Zijlstra 
240391e43daSPeter Zijlstra #ifdef CONFIG_SMP
241391e43daSPeter Zijlstra 	/*
242391e43daSPeter Zijlstra 	 *   h_load = weight * f(tg)
243391e43daSPeter Zijlstra 	 *
244391e43daSPeter Zijlstra 	 * Where f(tg) is the recursive weight fraction assigned to
245391e43daSPeter Zijlstra 	 * this group.
246391e43daSPeter Zijlstra 	 */
247391e43daSPeter Zijlstra 	unsigned long h_load;
248391e43daSPeter Zijlstra 
249391e43daSPeter Zijlstra 	/*
250391e43daSPeter Zijlstra 	 * Maintaining per-cpu shares distribution for group scheduling
251391e43daSPeter Zijlstra 	 *
252391e43daSPeter Zijlstra 	 * load_stamp is the last time we updated the load average
253391e43daSPeter Zijlstra 	 * load_last is the last time we updated the load average and saw load
254391e43daSPeter Zijlstra 	 * load_unacc_exec_time is currently unaccounted execution time
255391e43daSPeter Zijlstra 	 */
256391e43daSPeter Zijlstra 	u64 load_avg;
257391e43daSPeter Zijlstra 	u64 load_period;
258391e43daSPeter Zijlstra 	u64 load_stamp, load_last, load_unacc_exec_time;
259391e43daSPeter Zijlstra 
260391e43daSPeter Zijlstra 	unsigned long load_contribution;
261391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
262391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH
263391e43daSPeter Zijlstra 	int runtime_enabled;
264391e43daSPeter Zijlstra 	u64 runtime_expires;
265391e43daSPeter Zijlstra 	s64 runtime_remaining;
266391e43daSPeter Zijlstra 
267391e43daSPeter Zijlstra 	u64 throttled_timestamp;
268391e43daSPeter Zijlstra 	int throttled, throttle_count;
269391e43daSPeter Zijlstra 	struct list_head throttled_list;
270391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */
271391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */
272391e43daSPeter Zijlstra };
273391e43daSPeter Zijlstra 
274391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void)
275391e43daSPeter Zijlstra {
276391e43daSPeter Zijlstra 	return sysctl_sched_rt_runtime >= 0;
277391e43daSPeter Zijlstra }
278391e43daSPeter Zijlstra 
279391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */
280391e43daSPeter Zijlstra struct rt_rq {
281391e43daSPeter Zijlstra 	struct rt_prio_array active;
282c82513e5SPeter Zijlstra 	unsigned int rt_nr_running;
283391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
284391e43daSPeter Zijlstra 	struct {
285391e43daSPeter Zijlstra 		int curr; /* highest queued rt task prio */
286391e43daSPeter Zijlstra #ifdef CONFIG_SMP
287391e43daSPeter Zijlstra 		int next; /* next highest */
288391e43daSPeter Zijlstra #endif
289391e43daSPeter Zijlstra 	} highest_prio;
290391e43daSPeter Zijlstra #endif
291391e43daSPeter Zijlstra #ifdef CONFIG_SMP
292391e43daSPeter Zijlstra 	unsigned long rt_nr_migratory;
293391e43daSPeter Zijlstra 	unsigned long rt_nr_total;
294391e43daSPeter Zijlstra 	int overloaded;
295391e43daSPeter Zijlstra 	struct plist_head pushable_tasks;
296391e43daSPeter Zijlstra #endif
297391e43daSPeter Zijlstra 	int rt_throttled;
298391e43daSPeter Zijlstra 	u64 rt_time;
299391e43daSPeter Zijlstra 	u64 rt_runtime;
300391e43daSPeter Zijlstra 	/* Nests inside the rq lock: */
301391e43daSPeter Zijlstra 	raw_spinlock_t rt_runtime_lock;
302391e43daSPeter Zijlstra 
303391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
304391e43daSPeter Zijlstra 	unsigned long rt_nr_boosted;
305391e43daSPeter Zijlstra 
306391e43daSPeter Zijlstra 	struct rq *rq;
307391e43daSPeter Zijlstra 	struct list_head leaf_rt_rq_list;
308391e43daSPeter Zijlstra 	struct task_group *tg;
309391e43daSPeter Zijlstra #endif
310391e43daSPeter Zijlstra };
311391e43daSPeter Zijlstra 
312391e43daSPeter Zijlstra #ifdef CONFIG_SMP
313391e43daSPeter Zijlstra 
314391e43daSPeter Zijlstra /*
315391e43daSPeter Zijlstra  * We add the notion of a root-domain which will be used to define per-domain
316391e43daSPeter Zijlstra  * variables. Each exclusive cpuset essentially defines an island domain by
317391e43daSPeter Zijlstra  * fully partitioning the member cpus from any other cpuset. Whenever a new
318391e43daSPeter Zijlstra  * exclusive cpuset is created, we also create and attach a new root-domain
319391e43daSPeter Zijlstra  * object.
320391e43daSPeter Zijlstra  *
321391e43daSPeter Zijlstra  */
322391e43daSPeter Zijlstra struct root_domain {
323391e43daSPeter Zijlstra 	atomic_t refcount;
324391e43daSPeter Zijlstra 	atomic_t rto_count;
325391e43daSPeter Zijlstra 	struct rcu_head rcu;
326391e43daSPeter Zijlstra 	cpumask_var_t span;
327391e43daSPeter Zijlstra 	cpumask_var_t online;
328391e43daSPeter Zijlstra 
329391e43daSPeter Zijlstra 	/*
330391e43daSPeter Zijlstra 	 * The "RT overload" flag: it gets set if a CPU has more than
331391e43daSPeter Zijlstra 	 * one runnable RT task.
332391e43daSPeter Zijlstra 	 */
333391e43daSPeter Zijlstra 	cpumask_var_t rto_mask;
334391e43daSPeter Zijlstra 	struct cpupri cpupri;
335391e43daSPeter Zijlstra };
336391e43daSPeter Zijlstra 
337391e43daSPeter Zijlstra extern struct root_domain def_root_domain;
338391e43daSPeter Zijlstra 
339391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
340391e43daSPeter Zijlstra 
341391e43daSPeter Zijlstra /*
342391e43daSPeter Zijlstra  * This is the main, per-CPU runqueue data structure.
343391e43daSPeter Zijlstra  *
344391e43daSPeter Zijlstra  * Locking rule: those places that want to lock multiple runqueues
345391e43daSPeter Zijlstra  * (such as the load balancing or the thread migration code), lock
346391e43daSPeter Zijlstra  * acquire operations must be ordered by ascending &runqueue.
347391e43daSPeter Zijlstra  */
348391e43daSPeter Zijlstra struct rq {
349391e43daSPeter Zijlstra 	/* runqueue lock: */
350391e43daSPeter Zijlstra 	raw_spinlock_t lock;
351391e43daSPeter Zijlstra 
352391e43daSPeter Zijlstra 	/*
353391e43daSPeter Zijlstra 	 * nr_running and cpu_load should be in the same cacheline because
354391e43daSPeter Zijlstra 	 * remote CPUs use both these fields when doing load calculation.
355391e43daSPeter Zijlstra 	 */
356c82513e5SPeter Zijlstra 	unsigned int nr_running;
357391e43daSPeter Zijlstra 	#define CPU_LOAD_IDX_MAX 5
358391e43daSPeter Zijlstra 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
359391e43daSPeter Zijlstra 	unsigned long last_load_update_tick;
360391e43daSPeter Zijlstra #ifdef CONFIG_NO_HZ
361391e43daSPeter Zijlstra 	u64 nohz_stamp;
3621c792db7SSuresh Siddha 	unsigned long nohz_flags;
363391e43daSPeter Zijlstra #endif
364391e43daSPeter Zijlstra 	int skip_clock_update;
365391e43daSPeter Zijlstra 
366391e43daSPeter Zijlstra 	/* capture load from *all* tasks on this cpu: */
367391e43daSPeter Zijlstra 	struct load_weight load;
368391e43daSPeter Zijlstra 	unsigned long nr_load_updates;
369391e43daSPeter Zijlstra 	u64 nr_switches;
370391e43daSPeter Zijlstra 
371391e43daSPeter Zijlstra 	struct cfs_rq cfs;
372391e43daSPeter Zijlstra 	struct rt_rq rt;
373391e43daSPeter Zijlstra 
374391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
375391e43daSPeter Zijlstra 	/* list of leaf cfs_rq on this cpu: */
376391e43daSPeter Zijlstra 	struct list_head leaf_cfs_rq_list;
377a35b6466SPeter Zijlstra #ifdef CONFIG_SMP
378a35b6466SPeter Zijlstra 	unsigned long h_load_throttle;
379a35b6466SPeter Zijlstra #endif /* CONFIG_SMP */
380a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */
381a35b6466SPeter Zijlstra 
382391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
383391e43daSPeter Zijlstra 	struct list_head leaf_rt_rq_list;
384391e43daSPeter Zijlstra #endif
385391e43daSPeter Zijlstra 
386391e43daSPeter Zijlstra 	/*
387391e43daSPeter Zijlstra 	 * This is part of a global counter where only the total sum
388391e43daSPeter Zijlstra 	 * over all CPUs matters. A task can increase this counter on
389391e43daSPeter Zijlstra 	 * one CPU and if it got migrated afterwards it may decrease
390391e43daSPeter Zijlstra 	 * it on another CPU. Always updated under the runqueue lock:
391391e43daSPeter Zijlstra 	 */
392391e43daSPeter Zijlstra 	unsigned long nr_uninterruptible;
393391e43daSPeter Zijlstra 
394391e43daSPeter Zijlstra 	struct task_struct *curr, *idle, *stop;
395391e43daSPeter Zijlstra 	unsigned long next_balance;
396391e43daSPeter Zijlstra 	struct mm_struct *prev_mm;
397391e43daSPeter Zijlstra 
398391e43daSPeter Zijlstra 	u64 clock;
399391e43daSPeter Zijlstra 	u64 clock_task;
400391e43daSPeter Zijlstra 
401391e43daSPeter Zijlstra 	atomic_t nr_iowait;
402391e43daSPeter Zijlstra 
403391e43daSPeter Zijlstra #ifdef CONFIG_SMP
404391e43daSPeter Zijlstra 	struct root_domain *rd;
405391e43daSPeter Zijlstra 	struct sched_domain *sd;
406391e43daSPeter Zijlstra 
407391e43daSPeter Zijlstra 	unsigned long cpu_power;
408391e43daSPeter Zijlstra 
409391e43daSPeter Zijlstra 	unsigned char idle_balance;
410391e43daSPeter Zijlstra 	/* For active balancing */
411391e43daSPeter Zijlstra 	int post_schedule;
412391e43daSPeter Zijlstra 	int active_balance;
413391e43daSPeter Zijlstra 	int push_cpu;
414391e43daSPeter Zijlstra 	struct cpu_stop_work active_balance_work;
415391e43daSPeter Zijlstra 	/* cpu of this runqueue: */
416391e43daSPeter Zijlstra 	int cpu;
417391e43daSPeter Zijlstra 	int online;
418391e43daSPeter Zijlstra 
419367456c7SPeter Zijlstra 	struct list_head cfs_tasks;
420367456c7SPeter Zijlstra 
421391e43daSPeter Zijlstra 	u64 rt_avg;
422391e43daSPeter Zijlstra 	u64 age_stamp;
423391e43daSPeter Zijlstra 	u64 idle_stamp;
424391e43daSPeter Zijlstra 	u64 avg_idle;
425391e43daSPeter Zijlstra #endif
426391e43daSPeter Zijlstra 
427391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING
428391e43daSPeter Zijlstra 	u64 prev_irq_time;
429391e43daSPeter Zijlstra #endif
430391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT
431391e43daSPeter Zijlstra 	u64 prev_steal_time;
432391e43daSPeter Zijlstra #endif
433391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
434391e43daSPeter Zijlstra 	u64 prev_steal_time_rq;
435391e43daSPeter Zijlstra #endif
436391e43daSPeter Zijlstra 
437391e43daSPeter Zijlstra 	/* calc_load related fields */
438391e43daSPeter Zijlstra 	unsigned long calc_load_update;
439391e43daSPeter Zijlstra 	long calc_load_active;
440391e43daSPeter Zijlstra 
441391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK
442391e43daSPeter Zijlstra #ifdef CONFIG_SMP
443391e43daSPeter Zijlstra 	int hrtick_csd_pending;
444391e43daSPeter Zijlstra 	struct call_single_data hrtick_csd;
445391e43daSPeter Zijlstra #endif
446391e43daSPeter Zijlstra 	struct hrtimer hrtick_timer;
447391e43daSPeter Zijlstra #endif
448391e43daSPeter Zijlstra 
449391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS
450391e43daSPeter Zijlstra 	/* latency stats */
451391e43daSPeter Zijlstra 	struct sched_info rq_sched_info;
452391e43daSPeter Zijlstra 	unsigned long long rq_cpu_time;
453391e43daSPeter Zijlstra 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
454391e43daSPeter Zijlstra 
455391e43daSPeter Zijlstra 	/* sys_sched_yield() stats */
456391e43daSPeter Zijlstra 	unsigned int yld_count;
457391e43daSPeter Zijlstra 
458391e43daSPeter Zijlstra 	/* schedule() stats */
459391e43daSPeter Zijlstra 	unsigned int sched_count;
460391e43daSPeter Zijlstra 	unsigned int sched_goidle;
461391e43daSPeter Zijlstra 
462391e43daSPeter Zijlstra 	/* try_to_wake_up() stats */
463391e43daSPeter Zijlstra 	unsigned int ttwu_count;
464391e43daSPeter Zijlstra 	unsigned int ttwu_local;
465391e43daSPeter Zijlstra #endif
466391e43daSPeter Zijlstra 
467391e43daSPeter Zijlstra #ifdef CONFIG_SMP
468391e43daSPeter Zijlstra 	struct llist_head wake_list;
469391e43daSPeter Zijlstra #endif
47018bf2805SBen Segall 
47118bf2805SBen Segall 	struct sched_avg avg;
472391e43daSPeter Zijlstra };
473391e43daSPeter Zijlstra 
474391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq)
475391e43daSPeter Zijlstra {
476391e43daSPeter Zijlstra #ifdef CONFIG_SMP
477391e43daSPeter Zijlstra 	return rq->cpu;
478391e43daSPeter Zijlstra #else
479391e43daSPeter Zijlstra 	return 0;
480391e43daSPeter Zijlstra #endif
481391e43daSPeter Zijlstra }
482391e43daSPeter Zijlstra 
483391e43daSPeter Zijlstra DECLARE_PER_CPU(struct rq, runqueues);
484391e43daSPeter Zijlstra 
485518cd623SPeter Zijlstra #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
486518cd623SPeter Zijlstra #define this_rq()		(&__get_cpu_var(runqueues))
487518cd623SPeter Zijlstra #define task_rq(p)		cpu_rq(task_cpu(p))
488518cd623SPeter Zijlstra #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
489518cd623SPeter Zijlstra #define raw_rq()		(&__raw_get_cpu_var(runqueues))
490518cd623SPeter Zijlstra 
491518cd623SPeter Zijlstra #ifdef CONFIG_SMP
492518cd623SPeter Zijlstra 
493391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \
494391e43daSPeter Zijlstra 	rcu_dereference_check((p), \
495391e43daSPeter Zijlstra 			      lockdep_is_held(&sched_domains_mutex))
496391e43daSPeter Zijlstra 
497391e43daSPeter Zijlstra /*
498391e43daSPeter Zijlstra  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
499391e43daSPeter Zijlstra  * See detach_destroy_domains: synchronize_sched for details.
500391e43daSPeter Zijlstra  *
501391e43daSPeter Zijlstra  * The domain tree of any CPU may only be accessed from within
502391e43daSPeter Zijlstra  * preempt-disabled sections.
503391e43daSPeter Zijlstra  */
504391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \
505518cd623SPeter Zijlstra 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
506518cd623SPeter Zijlstra 			__sd; __sd = __sd->parent)
507391e43daSPeter Zijlstra 
50877e81365SSuresh Siddha #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
50977e81365SSuresh Siddha 
510518cd623SPeter Zijlstra /**
511518cd623SPeter Zijlstra  * highest_flag_domain - Return highest sched_domain containing flag.
512518cd623SPeter Zijlstra  * @cpu:	The cpu whose highest level of sched domain is to
513518cd623SPeter Zijlstra  *		be returned.
514518cd623SPeter Zijlstra  * @flag:	The flag to check for the highest sched_domain
515518cd623SPeter Zijlstra  *		for the given cpu.
516518cd623SPeter Zijlstra  *
517518cd623SPeter Zijlstra  * Returns the highest sched_domain of a cpu which contains the given flag.
518518cd623SPeter Zijlstra  */
519518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
520518cd623SPeter Zijlstra {
521518cd623SPeter Zijlstra 	struct sched_domain *sd, *hsd = NULL;
522518cd623SPeter Zijlstra 
523518cd623SPeter Zijlstra 	for_each_domain(cpu, sd) {
524518cd623SPeter Zijlstra 		if (!(sd->flags & flag))
525518cd623SPeter Zijlstra 			break;
526518cd623SPeter Zijlstra 		hsd = sd;
527518cd623SPeter Zijlstra 	}
528518cd623SPeter Zijlstra 
529518cd623SPeter Zijlstra 	return hsd;
530518cd623SPeter Zijlstra }
531518cd623SPeter Zijlstra 
532518cd623SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain *, sd_llc);
533518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id);
534518cd623SPeter Zijlstra 
535c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg);
536c1174876SPeter Zijlstra 
537518cd623SPeter Zijlstra #endif /* CONFIG_SMP */
538391e43daSPeter Zijlstra 
539391e43daSPeter Zijlstra #include "stats.h"
540391e43daSPeter Zijlstra #include "auto_group.h"
541391e43daSPeter Zijlstra 
542391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
543391e43daSPeter Zijlstra 
544391e43daSPeter Zijlstra /*
545391e43daSPeter Zijlstra  * Return the group to which this tasks belongs.
546391e43daSPeter Zijlstra  *
5478323f26cSPeter Zijlstra  * We cannot use task_subsys_state() and friends because the cgroup
5488323f26cSPeter Zijlstra  * subsystem changes that value before the cgroup_subsys::attach() method
5498323f26cSPeter Zijlstra  * is called, therefore we cannot pin it and might observe the wrong value.
5508323f26cSPeter Zijlstra  *
5518323f26cSPeter Zijlstra  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
5528323f26cSPeter Zijlstra  * core changes this before calling sched_move_task().
5538323f26cSPeter Zijlstra  *
5548323f26cSPeter Zijlstra  * Instead we use a 'copy' which is updated from sched_move_task() while
5558323f26cSPeter Zijlstra  * holding both task_struct::pi_lock and rq::lock.
556391e43daSPeter Zijlstra  */
557391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p)
558391e43daSPeter Zijlstra {
5598323f26cSPeter Zijlstra 	return p->sched_task_group;
560391e43daSPeter Zijlstra }
561391e43daSPeter Zijlstra 
562391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
563391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
564391e43daSPeter Zijlstra {
565391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
566391e43daSPeter Zijlstra 	struct task_group *tg = task_group(p);
567391e43daSPeter Zijlstra #endif
568391e43daSPeter Zijlstra 
569391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
570391e43daSPeter Zijlstra 	p->se.cfs_rq = tg->cfs_rq[cpu];
571391e43daSPeter Zijlstra 	p->se.parent = tg->se[cpu];
572391e43daSPeter Zijlstra #endif
573391e43daSPeter Zijlstra 
574391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
575391e43daSPeter Zijlstra 	p->rt.rt_rq  = tg->rt_rq[cpu];
576391e43daSPeter Zijlstra 	p->rt.parent = tg->rt_se[cpu];
577391e43daSPeter Zijlstra #endif
578391e43daSPeter Zijlstra }
579391e43daSPeter Zijlstra 
580391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */
581391e43daSPeter Zijlstra 
582391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
583391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p)
584391e43daSPeter Zijlstra {
585391e43daSPeter Zijlstra 	return NULL;
586391e43daSPeter Zijlstra }
587391e43daSPeter Zijlstra 
588391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
589391e43daSPeter Zijlstra 
590391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
591391e43daSPeter Zijlstra {
592391e43daSPeter Zijlstra 	set_task_rq(p, cpu);
593391e43daSPeter Zijlstra #ifdef CONFIG_SMP
594391e43daSPeter Zijlstra 	/*
595391e43daSPeter Zijlstra 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
596391e43daSPeter Zijlstra 	 * successfuly executed on another CPU. We must ensure that updates of
597391e43daSPeter Zijlstra 	 * per-task data have been completed by this moment.
598391e43daSPeter Zijlstra 	 */
599391e43daSPeter Zijlstra 	smp_wmb();
600391e43daSPeter Zijlstra 	task_thread_info(p)->cpu = cpu;
601391e43daSPeter Zijlstra #endif
602391e43daSPeter Zijlstra }
603391e43daSPeter Zijlstra 
604391e43daSPeter Zijlstra /*
605391e43daSPeter Zijlstra  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
606391e43daSPeter Zijlstra  */
607391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
608c5905afbSIngo Molnar # include <linux/static_key.h>
609391e43daSPeter Zijlstra # define const_debug __read_mostly
610391e43daSPeter Zijlstra #else
611391e43daSPeter Zijlstra # define const_debug const
612391e43daSPeter Zijlstra #endif
613391e43daSPeter Zijlstra 
614391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_features;
615391e43daSPeter Zijlstra 
616391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled)	\
617391e43daSPeter Zijlstra 	__SCHED_FEAT_##name ,
618391e43daSPeter Zijlstra 
619391e43daSPeter Zijlstra enum {
620391e43daSPeter Zijlstra #include "features.h"
621f8b6d1ccSPeter Zijlstra 	__SCHED_FEAT_NR,
622391e43daSPeter Zijlstra };
623391e43daSPeter Zijlstra 
624391e43daSPeter Zijlstra #undef SCHED_FEAT
625391e43daSPeter Zijlstra 
626f8b6d1ccSPeter Zijlstra #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
627c5905afbSIngo Molnar static __always_inline bool static_branch__true(struct static_key *key)
628f8b6d1ccSPeter Zijlstra {
629c5905afbSIngo Molnar 	return static_key_true(key); /* Not out of line branch. */
630f8b6d1ccSPeter Zijlstra }
631f8b6d1ccSPeter Zijlstra 
632c5905afbSIngo Molnar static __always_inline bool static_branch__false(struct static_key *key)
633f8b6d1ccSPeter Zijlstra {
634c5905afbSIngo Molnar 	return static_key_false(key); /* Out of line branch. */
635f8b6d1ccSPeter Zijlstra }
636f8b6d1ccSPeter Zijlstra 
637f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled)					\
638c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \
639f8b6d1ccSPeter Zijlstra {									\
640f8b6d1ccSPeter Zijlstra 	return static_branch__##enabled(key);				\
641f8b6d1ccSPeter Zijlstra }
642f8b6d1ccSPeter Zijlstra 
643f8b6d1ccSPeter Zijlstra #include "features.h"
644f8b6d1ccSPeter Zijlstra 
645f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT
646f8b6d1ccSPeter Zijlstra 
647c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
648f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
649f8b6d1ccSPeter Zijlstra #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
650391e43daSPeter Zijlstra #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
651f8b6d1ccSPeter Zijlstra #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
652391e43daSPeter Zijlstra 
653391e43daSPeter Zijlstra static inline u64 global_rt_period(void)
654391e43daSPeter Zijlstra {
655391e43daSPeter Zijlstra 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
656391e43daSPeter Zijlstra }
657391e43daSPeter Zijlstra 
658391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void)
659391e43daSPeter Zijlstra {
660391e43daSPeter Zijlstra 	if (sysctl_sched_rt_runtime < 0)
661391e43daSPeter Zijlstra 		return RUNTIME_INF;
662391e43daSPeter Zijlstra 
663391e43daSPeter Zijlstra 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
664391e43daSPeter Zijlstra }
665391e43daSPeter Zijlstra 
666391e43daSPeter Zijlstra 
667391e43daSPeter Zijlstra 
668391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p)
669391e43daSPeter Zijlstra {
670391e43daSPeter Zijlstra 	return rq->curr == p;
671391e43daSPeter Zijlstra }
672391e43daSPeter Zijlstra 
673391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p)
674391e43daSPeter Zijlstra {
675391e43daSPeter Zijlstra #ifdef CONFIG_SMP
676391e43daSPeter Zijlstra 	return p->on_cpu;
677391e43daSPeter Zijlstra #else
678391e43daSPeter Zijlstra 	return task_current(rq, p);
679391e43daSPeter Zijlstra #endif
680391e43daSPeter Zijlstra }
681391e43daSPeter Zijlstra 
682391e43daSPeter Zijlstra 
683391e43daSPeter Zijlstra #ifndef prepare_arch_switch
684391e43daSPeter Zijlstra # define prepare_arch_switch(next)	do { } while (0)
685391e43daSPeter Zijlstra #endif
686391e43daSPeter Zijlstra #ifndef finish_arch_switch
687391e43daSPeter Zijlstra # define finish_arch_switch(prev)	do { } while (0)
688391e43daSPeter Zijlstra #endif
68901f23e16SCatalin Marinas #ifndef finish_arch_post_lock_switch
69001f23e16SCatalin Marinas # define finish_arch_post_lock_switch()	do { } while (0)
69101f23e16SCatalin Marinas #endif
692391e43daSPeter Zijlstra 
693391e43daSPeter Zijlstra #ifndef __ARCH_WANT_UNLOCKED_CTXSW
694391e43daSPeter Zijlstra static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
695391e43daSPeter Zijlstra {
696391e43daSPeter Zijlstra #ifdef CONFIG_SMP
697391e43daSPeter Zijlstra 	/*
698391e43daSPeter Zijlstra 	 * We can optimise this out completely for !SMP, because the
699391e43daSPeter Zijlstra 	 * SMP rebalancing from interrupt is the only thing that cares
700391e43daSPeter Zijlstra 	 * here.
701391e43daSPeter Zijlstra 	 */
702391e43daSPeter Zijlstra 	next->on_cpu = 1;
703391e43daSPeter Zijlstra #endif
704391e43daSPeter Zijlstra }
705391e43daSPeter Zijlstra 
706391e43daSPeter Zijlstra static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
707391e43daSPeter Zijlstra {
708391e43daSPeter Zijlstra #ifdef CONFIG_SMP
709391e43daSPeter Zijlstra 	/*
710391e43daSPeter Zijlstra 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
711391e43daSPeter Zijlstra 	 * We must ensure this doesn't happen until the switch is completely
712391e43daSPeter Zijlstra 	 * finished.
713391e43daSPeter Zijlstra 	 */
714391e43daSPeter Zijlstra 	smp_wmb();
715391e43daSPeter Zijlstra 	prev->on_cpu = 0;
716391e43daSPeter Zijlstra #endif
717391e43daSPeter Zijlstra #ifdef CONFIG_DEBUG_SPINLOCK
718391e43daSPeter Zijlstra 	/* this is a valid case when another task releases the spinlock */
719391e43daSPeter Zijlstra 	rq->lock.owner = current;
720391e43daSPeter Zijlstra #endif
721391e43daSPeter Zijlstra 	/*
722391e43daSPeter Zijlstra 	 * If we are tracking spinlock dependencies then we have to
723391e43daSPeter Zijlstra 	 * fix up the runqueue lock - which gets 'carried over' from
724391e43daSPeter Zijlstra 	 * prev into current:
725391e43daSPeter Zijlstra 	 */
726391e43daSPeter Zijlstra 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
727391e43daSPeter Zijlstra 
728391e43daSPeter Zijlstra 	raw_spin_unlock_irq(&rq->lock);
729391e43daSPeter Zijlstra }
730391e43daSPeter Zijlstra 
731391e43daSPeter Zijlstra #else /* __ARCH_WANT_UNLOCKED_CTXSW */
732391e43daSPeter Zijlstra static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
733391e43daSPeter Zijlstra {
734391e43daSPeter Zijlstra #ifdef CONFIG_SMP
735391e43daSPeter Zijlstra 	/*
736391e43daSPeter Zijlstra 	 * We can optimise this out completely for !SMP, because the
737391e43daSPeter Zijlstra 	 * SMP rebalancing from interrupt is the only thing that cares
738391e43daSPeter Zijlstra 	 * here.
739391e43daSPeter Zijlstra 	 */
740391e43daSPeter Zijlstra 	next->on_cpu = 1;
741391e43daSPeter Zijlstra #endif
742391e43daSPeter Zijlstra 	raw_spin_unlock(&rq->lock);
743391e43daSPeter Zijlstra }
744391e43daSPeter Zijlstra 
745391e43daSPeter Zijlstra static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
746391e43daSPeter Zijlstra {
747391e43daSPeter Zijlstra #ifdef CONFIG_SMP
748391e43daSPeter Zijlstra 	/*
749391e43daSPeter Zijlstra 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
750391e43daSPeter Zijlstra 	 * We must ensure this doesn't happen until the switch is completely
751391e43daSPeter Zijlstra 	 * finished.
752391e43daSPeter Zijlstra 	 */
753391e43daSPeter Zijlstra 	smp_wmb();
754391e43daSPeter Zijlstra 	prev->on_cpu = 0;
755391e43daSPeter Zijlstra #endif
756391e43daSPeter Zijlstra 	local_irq_enable();
757391e43daSPeter Zijlstra }
758391e43daSPeter Zijlstra #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
759391e43daSPeter Zijlstra 
760391e43daSPeter Zijlstra 
761391e43daSPeter Zijlstra static inline void update_load_add(struct load_weight *lw, unsigned long inc)
762391e43daSPeter Zijlstra {
763391e43daSPeter Zijlstra 	lw->weight += inc;
764391e43daSPeter Zijlstra 	lw->inv_weight = 0;
765391e43daSPeter Zijlstra }
766391e43daSPeter Zijlstra 
767391e43daSPeter Zijlstra static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
768391e43daSPeter Zijlstra {
769391e43daSPeter Zijlstra 	lw->weight -= dec;
770391e43daSPeter Zijlstra 	lw->inv_weight = 0;
771391e43daSPeter Zijlstra }
772391e43daSPeter Zijlstra 
773391e43daSPeter Zijlstra static inline void update_load_set(struct load_weight *lw, unsigned long w)
774391e43daSPeter Zijlstra {
775391e43daSPeter Zijlstra 	lw->weight = w;
776391e43daSPeter Zijlstra 	lw->inv_weight = 0;
777391e43daSPeter Zijlstra }
778391e43daSPeter Zijlstra 
779391e43daSPeter Zijlstra /*
780391e43daSPeter Zijlstra  * To aid in avoiding the subversion of "niceness" due to uneven distribution
781391e43daSPeter Zijlstra  * of tasks with abnormal "nice" values across CPUs the contribution that
782391e43daSPeter Zijlstra  * each task makes to its run queue's load is weighted according to its
783391e43daSPeter Zijlstra  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
784391e43daSPeter Zijlstra  * scaled version of the new time slice allocation that they receive on time
785391e43daSPeter Zijlstra  * slice expiry etc.
786391e43daSPeter Zijlstra  */
787391e43daSPeter Zijlstra 
788391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO                3
789391e43daSPeter Zijlstra #define WMULT_IDLEPRIO         1431655765
790391e43daSPeter Zijlstra 
791391e43daSPeter Zijlstra /*
792391e43daSPeter Zijlstra  * Nice levels are multiplicative, with a gentle 10% change for every
793391e43daSPeter Zijlstra  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
794391e43daSPeter Zijlstra  * nice 1, it will get ~10% less CPU time than another CPU-bound task
795391e43daSPeter Zijlstra  * that remained on nice 0.
796391e43daSPeter Zijlstra  *
797391e43daSPeter Zijlstra  * The "10% effect" is relative and cumulative: from _any_ nice level,
798391e43daSPeter Zijlstra  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
799391e43daSPeter Zijlstra  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
800391e43daSPeter Zijlstra  * If a task goes up by ~10% and another task goes down by ~10% then
801391e43daSPeter Zijlstra  * the relative distance between them is ~25%.)
802391e43daSPeter Zijlstra  */
803391e43daSPeter Zijlstra static const int prio_to_weight[40] = {
804391e43daSPeter Zijlstra  /* -20 */     88761,     71755,     56483,     46273,     36291,
805391e43daSPeter Zijlstra  /* -15 */     29154,     23254,     18705,     14949,     11916,
806391e43daSPeter Zijlstra  /* -10 */      9548,      7620,      6100,      4904,      3906,
807391e43daSPeter Zijlstra  /*  -5 */      3121,      2501,      1991,      1586,      1277,
808391e43daSPeter Zijlstra  /*   0 */      1024,       820,       655,       526,       423,
809391e43daSPeter Zijlstra  /*   5 */       335,       272,       215,       172,       137,
810391e43daSPeter Zijlstra  /*  10 */       110,        87,        70,        56,        45,
811391e43daSPeter Zijlstra  /*  15 */        36,        29,        23,        18,        15,
812391e43daSPeter Zijlstra };
813391e43daSPeter Zijlstra 
814391e43daSPeter Zijlstra /*
815391e43daSPeter Zijlstra  * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
816391e43daSPeter Zijlstra  *
817391e43daSPeter Zijlstra  * In cases where the weight does not change often, we can use the
818391e43daSPeter Zijlstra  * precalculated inverse to speed up arithmetics by turning divisions
819391e43daSPeter Zijlstra  * into multiplications:
820391e43daSPeter Zijlstra  */
821391e43daSPeter Zijlstra static const u32 prio_to_wmult[40] = {
822391e43daSPeter Zijlstra  /* -20 */     48388,     59856,     76040,     92818,    118348,
823391e43daSPeter Zijlstra  /* -15 */    147320,    184698,    229616,    287308,    360437,
824391e43daSPeter Zijlstra  /* -10 */    449829,    563644,    704093,    875809,   1099582,
825391e43daSPeter Zijlstra  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
826391e43daSPeter Zijlstra  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
827391e43daSPeter Zijlstra  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
828391e43daSPeter Zijlstra  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
829391e43daSPeter Zijlstra  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
830391e43daSPeter Zijlstra };
831391e43daSPeter Zijlstra 
832391e43daSPeter Zijlstra /* Time spent by the tasks of the cpu accounting group executing in ... */
833391e43daSPeter Zijlstra enum cpuacct_stat_index {
834391e43daSPeter Zijlstra 	CPUACCT_STAT_USER,	/* ... user mode */
835391e43daSPeter Zijlstra 	CPUACCT_STAT_SYSTEM,	/* ... kernel mode */
836391e43daSPeter Zijlstra 
837391e43daSPeter Zijlstra 	CPUACCT_STAT_NSTATS,
838391e43daSPeter Zijlstra };
839391e43daSPeter Zijlstra 
840391e43daSPeter Zijlstra 
841391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class)
842391e43daSPeter Zijlstra #define for_each_class(class) \
843391e43daSPeter Zijlstra    for (class = sched_class_highest; class; class = class->next)
844391e43daSPeter Zijlstra 
845391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class;
846391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class;
847391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class;
848391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class;
849391e43daSPeter Zijlstra 
850391e43daSPeter Zijlstra 
851391e43daSPeter Zijlstra #ifdef CONFIG_SMP
852391e43daSPeter Zijlstra 
853391e43daSPeter Zijlstra extern void trigger_load_balance(struct rq *rq, int cpu);
854391e43daSPeter Zijlstra extern void idle_balance(int this_cpu, struct rq *this_rq);
855391e43daSPeter Zijlstra 
856391e43daSPeter Zijlstra #else	/* CONFIG_SMP */
857391e43daSPeter Zijlstra 
858391e43daSPeter Zijlstra static inline void idle_balance(int cpu, struct rq *rq)
859391e43daSPeter Zijlstra {
860391e43daSPeter Zijlstra }
861391e43daSPeter Zijlstra 
862391e43daSPeter Zijlstra #endif
863391e43daSPeter Zijlstra 
864391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void);
865391e43daSPeter Zijlstra extern void sched_init_granularity(void);
866391e43daSPeter Zijlstra extern void update_max_interval(void);
867391e43daSPeter Zijlstra extern void update_group_power(struct sched_domain *sd, int cpu);
868391e43daSPeter Zijlstra extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
869391e43daSPeter Zijlstra extern void init_sched_rt_class(void);
870391e43daSPeter Zijlstra extern void init_sched_fair_class(void);
871391e43daSPeter Zijlstra 
872391e43daSPeter Zijlstra extern void resched_task(struct task_struct *p);
873391e43daSPeter Zijlstra extern void resched_cpu(int cpu);
874391e43daSPeter Zijlstra 
875391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth;
876391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
877391e43daSPeter Zijlstra 
878556061b0SPeter Zijlstra extern void update_idle_cpu_load(struct rq *this_rq);
879391e43daSPeter Zijlstra 
880391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_CPUACCT
88154c707e9SGlauber Costa #include <linux/cgroup.h>
88254c707e9SGlauber Costa /* track cpu usage of a group of tasks and its child groups */
88354c707e9SGlauber Costa struct cpuacct {
88454c707e9SGlauber Costa 	struct cgroup_subsys_state css;
88554c707e9SGlauber Costa 	/* cpuusage holds pointer to a u64-type object on every cpu */
88654c707e9SGlauber Costa 	u64 __percpu *cpuusage;
88754c707e9SGlauber Costa 	struct kernel_cpustat __percpu *cpustat;
88854c707e9SGlauber Costa };
88954c707e9SGlauber Costa 
89073fbec60SFrederic Weisbecker extern struct cgroup_subsys cpuacct_subsys;
89173fbec60SFrederic Weisbecker extern struct cpuacct root_cpuacct;
89273fbec60SFrederic Weisbecker 
89354c707e9SGlauber Costa /* return cpu accounting group corresponding to this container */
89454c707e9SGlauber Costa static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
89554c707e9SGlauber Costa {
89654c707e9SGlauber Costa 	return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
89754c707e9SGlauber Costa 			    struct cpuacct, css);
89854c707e9SGlauber Costa }
89954c707e9SGlauber Costa 
90054c707e9SGlauber Costa /* return cpu accounting group to which this task belongs */
90154c707e9SGlauber Costa static inline struct cpuacct *task_ca(struct task_struct *tsk)
90254c707e9SGlauber Costa {
90354c707e9SGlauber Costa 	return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
90454c707e9SGlauber Costa 			    struct cpuacct, css);
90554c707e9SGlauber Costa }
90654c707e9SGlauber Costa 
90754c707e9SGlauber Costa static inline struct cpuacct *parent_ca(struct cpuacct *ca)
90854c707e9SGlauber Costa {
90954c707e9SGlauber Costa 	if (!ca || !ca->css.cgroup->parent)
91054c707e9SGlauber Costa 		return NULL;
91154c707e9SGlauber Costa 	return cgroup_ca(ca->css.cgroup->parent);
91254c707e9SGlauber Costa }
91354c707e9SGlauber Costa 
914391e43daSPeter Zijlstra extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
915391e43daSPeter Zijlstra #else
916391e43daSPeter Zijlstra static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
917391e43daSPeter Zijlstra #endif
918391e43daSPeter Zijlstra 
91973fbec60SFrederic Weisbecker #ifdef CONFIG_PARAVIRT
92073fbec60SFrederic Weisbecker static inline u64 steal_ticks(u64 steal)
92173fbec60SFrederic Weisbecker {
92273fbec60SFrederic Weisbecker 	if (unlikely(steal > NSEC_PER_SEC))
92373fbec60SFrederic Weisbecker 		return div_u64(steal, TICK_NSEC);
92473fbec60SFrederic Weisbecker 
92573fbec60SFrederic Weisbecker 	return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
92673fbec60SFrederic Weisbecker }
92773fbec60SFrederic Weisbecker #endif
92873fbec60SFrederic Weisbecker 
929391e43daSPeter Zijlstra static inline void inc_nr_running(struct rq *rq)
930391e43daSPeter Zijlstra {
931391e43daSPeter Zijlstra 	rq->nr_running++;
932391e43daSPeter Zijlstra }
933391e43daSPeter Zijlstra 
934391e43daSPeter Zijlstra static inline void dec_nr_running(struct rq *rq)
935391e43daSPeter Zijlstra {
936391e43daSPeter Zijlstra 	rq->nr_running--;
937391e43daSPeter Zijlstra }
938391e43daSPeter Zijlstra 
939391e43daSPeter Zijlstra extern void update_rq_clock(struct rq *rq);
940391e43daSPeter Zijlstra 
941391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
942391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
943391e43daSPeter Zijlstra 
944391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
945391e43daSPeter Zijlstra 
946391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_time_avg;
947391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate;
948391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost;
949391e43daSPeter Zijlstra 
950391e43daSPeter Zijlstra static inline u64 sched_avg_period(void)
951391e43daSPeter Zijlstra {
952391e43daSPeter Zijlstra 	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
953391e43daSPeter Zijlstra }
954391e43daSPeter Zijlstra 
955391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK
956391e43daSPeter Zijlstra 
957391e43daSPeter Zijlstra /*
958391e43daSPeter Zijlstra  * Use hrtick when:
959391e43daSPeter Zijlstra  *  - enabled by features
960391e43daSPeter Zijlstra  *  - hrtimer is actually high res
961391e43daSPeter Zijlstra  */
962391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq)
963391e43daSPeter Zijlstra {
964391e43daSPeter Zijlstra 	if (!sched_feat(HRTICK))
965391e43daSPeter Zijlstra 		return 0;
966391e43daSPeter Zijlstra 	if (!cpu_active(cpu_of(rq)))
967391e43daSPeter Zijlstra 		return 0;
968391e43daSPeter Zijlstra 	return hrtimer_is_hres_active(&rq->hrtick_timer);
969391e43daSPeter Zijlstra }
970391e43daSPeter Zijlstra 
971391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay);
972391e43daSPeter Zijlstra 
973b39e66eaSMike Galbraith #else
974b39e66eaSMike Galbraith 
975b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq)
976b39e66eaSMike Galbraith {
977b39e66eaSMike Galbraith 	return 0;
978b39e66eaSMike Galbraith }
979b39e66eaSMike Galbraith 
980391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */
981391e43daSPeter Zijlstra 
982391e43daSPeter Zijlstra #ifdef CONFIG_SMP
983391e43daSPeter Zijlstra extern void sched_avg_update(struct rq *rq);
984391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
985391e43daSPeter Zijlstra {
986391e43daSPeter Zijlstra 	rq->rt_avg += rt_delta;
987391e43daSPeter Zijlstra 	sched_avg_update(rq);
988391e43daSPeter Zijlstra }
989391e43daSPeter Zijlstra #else
990391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
991391e43daSPeter Zijlstra static inline void sched_avg_update(struct rq *rq) { }
992391e43daSPeter Zijlstra #endif
993391e43daSPeter Zijlstra 
994391e43daSPeter Zijlstra extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
995391e43daSPeter Zijlstra 
996391e43daSPeter Zijlstra #ifdef CONFIG_SMP
997391e43daSPeter Zijlstra #ifdef CONFIG_PREEMPT
998391e43daSPeter Zijlstra 
999391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1000391e43daSPeter Zijlstra 
1001391e43daSPeter Zijlstra /*
1002391e43daSPeter Zijlstra  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1003391e43daSPeter Zijlstra  * way at the expense of forcing extra atomic operations in all
1004391e43daSPeter Zijlstra  * invocations.  This assures that the double_lock is acquired using the
1005391e43daSPeter Zijlstra  * same underlying policy as the spinlock_t on this architecture, which
1006391e43daSPeter Zijlstra  * reduces latency compared to the unfair variant below.  However, it
1007391e43daSPeter Zijlstra  * also adds more overhead and therefore may reduce throughput.
1008391e43daSPeter Zijlstra  */
1009391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1010391e43daSPeter Zijlstra 	__releases(this_rq->lock)
1011391e43daSPeter Zijlstra 	__acquires(busiest->lock)
1012391e43daSPeter Zijlstra 	__acquires(this_rq->lock)
1013391e43daSPeter Zijlstra {
1014391e43daSPeter Zijlstra 	raw_spin_unlock(&this_rq->lock);
1015391e43daSPeter Zijlstra 	double_rq_lock(this_rq, busiest);
1016391e43daSPeter Zijlstra 
1017391e43daSPeter Zijlstra 	return 1;
1018391e43daSPeter Zijlstra }
1019391e43daSPeter Zijlstra 
1020391e43daSPeter Zijlstra #else
1021391e43daSPeter Zijlstra /*
1022391e43daSPeter Zijlstra  * Unfair double_lock_balance: Optimizes throughput at the expense of
1023391e43daSPeter Zijlstra  * latency by eliminating extra atomic operations when the locks are
1024391e43daSPeter Zijlstra  * already in proper order on entry.  This favors lower cpu-ids and will
1025391e43daSPeter Zijlstra  * grant the double lock to lower cpus over higher ids under contention,
1026391e43daSPeter Zijlstra  * regardless of entry order into the function.
1027391e43daSPeter Zijlstra  */
1028391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1029391e43daSPeter Zijlstra 	__releases(this_rq->lock)
1030391e43daSPeter Zijlstra 	__acquires(busiest->lock)
1031391e43daSPeter Zijlstra 	__acquires(this_rq->lock)
1032391e43daSPeter Zijlstra {
1033391e43daSPeter Zijlstra 	int ret = 0;
1034391e43daSPeter Zijlstra 
1035391e43daSPeter Zijlstra 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1036391e43daSPeter Zijlstra 		if (busiest < this_rq) {
1037391e43daSPeter Zijlstra 			raw_spin_unlock(&this_rq->lock);
1038391e43daSPeter Zijlstra 			raw_spin_lock(&busiest->lock);
1039391e43daSPeter Zijlstra 			raw_spin_lock_nested(&this_rq->lock,
1040391e43daSPeter Zijlstra 					      SINGLE_DEPTH_NESTING);
1041391e43daSPeter Zijlstra 			ret = 1;
1042391e43daSPeter Zijlstra 		} else
1043391e43daSPeter Zijlstra 			raw_spin_lock_nested(&busiest->lock,
1044391e43daSPeter Zijlstra 					      SINGLE_DEPTH_NESTING);
1045391e43daSPeter Zijlstra 	}
1046391e43daSPeter Zijlstra 	return ret;
1047391e43daSPeter Zijlstra }
1048391e43daSPeter Zijlstra 
1049391e43daSPeter Zijlstra #endif /* CONFIG_PREEMPT */
1050391e43daSPeter Zijlstra 
1051391e43daSPeter Zijlstra /*
1052391e43daSPeter Zijlstra  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1053391e43daSPeter Zijlstra  */
1054391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1055391e43daSPeter Zijlstra {
1056391e43daSPeter Zijlstra 	if (unlikely(!irqs_disabled())) {
1057391e43daSPeter Zijlstra 		/* printk() doesn't work good under rq->lock */
1058391e43daSPeter Zijlstra 		raw_spin_unlock(&this_rq->lock);
1059391e43daSPeter Zijlstra 		BUG_ON(1);
1060391e43daSPeter Zijlstra 	}
1061391e43daSPeter Zijlstra 
1062391e43daSPeter Zijlstra 	return _double_lock_balance(this_rq, busiest);
1063391e43daSPeter Zijlstra }
1064391e43daSPeter Zijlstra 
1065391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1066391e43daSPeter Zijlstra 	__releases(busiest->lock)
1067391e43daSPeter Zijlstra {
1068391e43daSPeter Zijlstra 	raw_spin_unlock(&busiest->lock);
1069391e43daSPeter Zijlstra 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1070391e43daSPeter Zijlstra }
1071391e43daSPeter Zijlstra 
1072391e43daSPeter Zijlstra /*
1073391e43daSPeter Zijlstra  * double_rq_lock - safely lock two runqueues
1074391e43daSPeter Zijlstra  *
1075391e43daSPeter Zijlstra  * Note this does not disable interrupts like task_rq_lock,
1076391e43daSPeter Zijlstra  * you need to do so manually before calling.
1077391e43daSPeter Zijlstra  */
1078391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1079391e43daSPeter Zijlstra 	__acquires(rq1->lock)
1080391e43daSPeter Zijlstra 	__acquires(rq2->lock)
1081391e43daSPeter Zijlstra {
1082391e43daSPeter Zijlstra 	BUG_ON(!irqs_disabled());
1083391e43daSPeter Zijlstra 	if (rq1 == rq2) {
1084391e43daSPeter Zijlstra 		raw_spin_lock(&rq1->lock);
1085391e43daSPeter Zijlstra 		__acquire(rq2->lock);	/* Fake it out ;) */
1086391e43daSPeter Zijlstra 	} else {
1087391e43daSPeter Zijlstra 		if (rq1 < rq2) {
1088391e43daSPeter Zijlstra 			raw_spin_lock(&rq1->lock);
1089391e43daSPeter Zijlstra 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1090391e43daSPeter Zijlstra 		} else {
1091391e43daSPeter Zijlstra 			raw_spin_lock(&rq2->lock);
1092391e43daSPeter Zijlstra 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1093391e43daSPeter Zijlstra 		}
1094391e43daSPeter Zijlstra 	}
1095391e43daSPeter Zijlstra }
1096391e43daSPeter Zijlstra 
1097391e43daSPeter Zijlstra /*
1098391e43daSPeter Zijlstra  * double_rq_unlock - safely unlock two runqueues
1099391e43daSPeter Zijlstra  *
1100391e43daSPeter Zijlstra  * Note this does not restore interrupts like task_rq_unlock,
1101391e43daSPeter Zijlstra  * you need to do so manually after calling.
1102391e43daSPeter Zijlstra  */
1103391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1104391e43daSPeter Zijlstra 	__releases(rq1->lock)
1105391e43daSPeter Zijlstra 	__releases(rq2->lock)
1106391e43daSPeter Zijlstra {
1107391e43daSPeter Zijlstra 	raw_spin_unlock(&rq1->lock);
1108391e43daSPeter Zijlstra 	if (rq1 != rq2)
1109391e43daSPeter Zijlstra 		raw_spin_unlock(&rq2->lock);
1110391e43daSPeter Zijlstra 	else
1111391e43daSPeter Zijlstra 		__release(rq2->lock);
1112391e43daSPeter Zijlstra }
1113391e43daSPeter Zijlstra 
1114391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1115391e43daSPeter Zijlstra 
1116391e43daSPeter Zijlstra /*
1117391e43daSPeter Zijlstra  * double_rq_lock - safely lock two runqueues
1118391e43daSPeter Zijlstra  *
1119391e43daSPeter Zijlstra  * Note this does not disable interrupts like task_rq_lock,
1120391e43daSPeter Zijlstra  * you need to do so manually before calling.
1121391e43daSPeter Zijlstra  */
1122391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1123391e43daSPeter Zijlstra 	__acquires(rq1->lock)
1124391e43daSPeter Zijlstra 	__acquires(rq2->lock)
1125391e43daSPeter Zijlstra {
1126391e43daSPeter Zijlstra 	BUG_ON(!irqs_disabled());
1127391e43daSPeter Zijlstra 	BUG_ON(rq1 != rq2);
1128391e43daSPeter Zijlstra 	raw_spin_lock(&rq1->lock);
1129391e43daSPeter Zijlstra 	__acquire(rq2->lock);	/* Fake it out ;) */
1130391e43daSPeter Zijlstra }
1131391e43daSPeter Zijlstra 
1132391e43daSPeter Zijlstra /*
1133391e43daSPeter Zijlstra  * double_rq_unlock - safely unlock two runqueues
1134391e43daSPeter Zijlstra  *
1135391e43daSPeter Zijlstra  * Note this does not restore interrupts like task_rq_unlock,
1136391e43daSPeter Zijlstra  * you need to do so manually after calling.
1137391e43daSPeter Zijlstra  */
1138391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1139391e43daSPeter Zijlstra 	__releases(rq1->lock)
1140391e43daSPeter Zijlstra 	__releases(rq2->lock)
1141391e43daSPeter Zijlstra {
1142391e43daSPeter Zijlstra 	BUG_ON(rq1 != rq2);
1143391e43daSPeter Zijlstra 	raw_spin_unlock(&rq1->lock);
1144391e43daSPeter Zijlstra 	__release(rq2->lock);
1145391e43daSPeter Zijlstra }
1146391e43daSPeter Zijlstra 
1147391e43daSPeter Zijlstra #endif
1148391e43daSPeter Zijlstra 
1149391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1150391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1151391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu);
1152391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu);
1153391e43daSPeter Zijlstra 
1154391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1155391e43daSPeter Zijlstra extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1156391e43daSPeter Zijlstra 
1157391e43daSPeter Zijlstra extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
11581c792db7SSuresh Siddha 
11591c792db7SSuresh Siddha #ifdef CONFIG_NO_HZ
11601c792db7SSuresh Siddha enum rq_nohz_flag_bits {
11611c792db7SSuresh Siddha 	NOHZ_TICK_STOPPED,
11621c792db7SSuresh Siddha 	NOHZ_BALANCE_KICK,
116369e1e811SSuresh Siddha 	NOHZ_IDLE,
11641c792db7SSuresh Siddha };
11651c792db7SSuresh Siddha 
11661c792db7SSuresh Siddha #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
11671c792db7SSuresh Siddha #endif
116873fbec60SFrederic Weisbecker 
116973fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING
117073fbec60SFrederic Weisbecker 
117173fbec60SFrederic Weisbecker DECLARE_PER_CPU(u64, cpu_hardirq_time);
117273fbec60SFrederic Weisbecker DECLARE_PER_CPU(u64, cpu_softirq_time);
117373fbec60SFrederic Weisbecker 
117473fbec60SFrederic Weisbecker #ifndef CONFIG_64BIT
117573fbec60SFrederic Weisbecker DECLARE_PER_CPU(seqcount_t, irq_time_seq);
117673fbec60SFrederic Weisbecker 
117773fbec60SFrederic Weisbecker static inline void irq_time_write_begin(void)
117873fbec60SFrederic Weisbecker {
117973fbec60SFrederic Weisbecker 	__this_cpu_inc(irq_time_seq.sequence);
118073fbec60SFrederic Weisbecker 	smp_wmb();
118173fbec60SFrederic Weisbecker }
118273fbec60SFrederic Weisbecker 
118373fbec60SFrederic Weisbecker static inline void irq_time_write_end(void)
118473fbec60SFrederic Weisbecker {
118573fbec60SFrederic Weisbecker 	smp_wmb();
118673fbec60SFrederic Weisbecker 	__this_cpu_inc(irq_time_seq.sequence);
118773fbec60SFrederic Weisbecker }
118873fbec60SFrederic Weisbecker 
118973fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu)
119073fbec60SFrederic Weisbecker {
119173fbec60SFrederic Weisbecker 	u64 irq_time;
119273fbec60SFrederic Weisbecker 	unsigned seq;
119373fbec60SFrederic Weisbecker 
119473fbec60SFrederic Weisbecker 	do {
119573fbec60SFrederic Weisbecker 		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
119673fbec60SFrederic Weisbecker 		irq_time = per_cpu(cpu_softirq_time, cpu) +
119773fbec60SFrederic Weisbecker 			   per_cpu(cpu_hardirq_time, cpu);
119873fbec60SFrederic Weisbecker 	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
119973fbec60SFrederic Weisbecker 
120073fbec60SFrederic Weisbecker 	return irq_time;
120173fbec60SFrederic Weisbecker }
120273fbec60SFrederic Weisbecker #else /* CONFIG_64BIT */
120373fbec60SFrederic Weisbecker static inline void irq_time_write_begin(void)
120473fbec60SFrederic Weisbecker {
120573fbec60SFrederic Weisbecker }
120673fbec60SFrederic Weisbecker 
120773fbec60SFrederic Weisbecker static inline void irq_time_write_end(void)
120873fbec60SFrederic Weisbecker {
120973fbec60SFrederic Weisbecker }
121073fbec60SFrederic Weisbecker 
121173fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu)
121273fbec60SFrederic Weisbecker {
121373fbec60SFrederic Weisbecker 	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
121473fbec60SFrederic Weisbecker }
121573fbec60SFrederic Weisbecker #endif /* CONFIG_64BIT */
121673fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
121773fbec60SFrederic Weisbecker 
1218