xref: /openbmc/linux/kernel/sched/rt.c (revision 77a4d1a1b9a122ca1fa3507bd30aec1520d7a8a4)
1391e43daSPeter Zijlstra /*
2391e43daSPeter Zijlstra  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3391e43daSPeter Zijlstra  * policies)
4391e43daSPeter Zijlstra  */
5391e43daSPeter Zijlstra 
6391e43daSPeter Zijlstra #include "sched.h"
7391e43daSPeter Zijlstra 
8391e43daSPeter Zijlstra #include <linux/slab.h>
9b6366f04SSteven Rostedt #include <linux/irq_work.h>
10391e43daSPeter Zijlstra 
11ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE;
12ce0dbbbbSClark Williams 
13391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14391e43daSPeter Zijlstra 
15391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth;
16391e43daSPeter Zijlstra 
17391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18391e43daSPeter Zijlstra {
19391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b =
20391e43daSPeter Zijlstra 		container_of(timer, struct rt_bandwidth, rt_period_timer);
21391e43daSPeter Zijlstra 	int idle = 0;
22*77a4d1a1SPeter Zijlstra 	int overrun;
23391e43daSPeter Zijlstra 
24*77a4d1a1SPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
25391e43daSPeter Zijlstra 	for (;;) {
26*77a4d1a1SPeter Zijlstra 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
27391e43daSPeter Zijlstra 		if (!overrun)
28391e43daSPeter Zijlstra 			break;
29391e43daSPeter Zijlstra 
30*77a4d1a1SPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
31391e43daSPeter Zijlstra 		idle = do_sched_rt_period_timer(rt_b, overrun);
32*77a4d1a1SPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
33391e43daSPeter Zijlstra 	}
34*77a4d1a1SPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
35391e43daSPeter Zijlstra 
36391e43daSPeter Zijlstra 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
37391e43daSPeter Zijlstra }
38391e43daSPeter Zijlstra 
39391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
40391e43daSPeter Zijlstra {
41391e43daSPeter Zijlstra 	rt_b->rt_period = ns_to_ktime(period);
42391e43daSPeter Zijlstra 	rt_b->rt_runtime = runtime;
43391e43daSPeter Zijlstra 
44391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
45391e43daSPeter Zijlstra 
46391e43daSPeter Zijlstra 	hrtimer_init(&rt_b->rt_period_timer,
47391e43daSPeter Zijlstra 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
48391e43daSPeter Zijlstra 	rt_b->rt_period_timer.function = sched_rt_period_timer;
49391e43daSPeter Zijlstra }
50391e43daSPeter Zijlstra 
51391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
52391e43daSPeter Zijlstra {
53391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
54391e43daSPeter Zijlstra 		return;
55391e43daSPeter Zijlstra 
56391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
57391e43daSPeter Zijlstra 	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
58391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
59391e43daSPeter Zijlstra }
60391e43daSPeter Zijlstra 
61b6366f04SSteven Rostedt #ifdef CONFIG_SMP
62b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work);
63b6366f04SSteven Rostedt #endif
64b6366f04SSteven Rostedt 
6507c54f7aSAbel Vesa void init_rt_rq(struct rt_rq *rt_rq)
66391e43daSPeter Zijlstra {
67391e43daSPeter Zijlstra 	struct rt_prio_array *array;
68391e43daSPeter Zijlstra 	int i;
69391e43daSPeter Zijlstra 
70391e43daSPeter Zijlstra 	array = &rt_rq->active;
71391e43daSPeter Zijlstra 	for (i = 0; i < MAX_RT_PRIO; i++) {
72391e43daSPeter Zijlstra 		INIT_LIST_HEAD(array->queue + i);
73391e43daSPeter Zijlstra 		__clear_bit(i, array->bitmap);
74391e43daSPeter Zijlstra 	}
75391e43daSPeter Zijlstra 	/* delimiter for bitsearch: */
76391e43daSPeter Zijlstra 	__set_bit(MAX_RT_PRIO, array->bitmap);
77391e43daSPeter Zijlstra 
78391e43daSPeter Zijlstra #if defined CONFIG_SMP
79391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
80391e43daSPeter Zijlstra 	rt_rq->highest_prio.next = MAX_RT_PRIO;
81391e43daSPeter Zijlstra 	rt_rq->rt_nr_migratory = 0;
82391e43daSPeter Zijlstra 	rt_rq->overloaded = 0;
83391e43daSPeter Zijlstra 	plist_head_init(&rt_rq->pushable_tasks);
84b6366f04SSteven Rostedt 
85b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
86b6366f04SSteven Rostedt 	rt_rq->push_flags = 0;
87b6366f04SSteven Rostedt 	rt_rq->push_cpu = nr_cpu_ids;
88b6366f04SSteven Rostedt 	raw_spin_lock_init(&rt_rq->push_lock);
89b6366f04SSteven Rostedt 	init_irq_work(&rt_rq->push_work, push_irq_work_func);
90391e43daSPeter Zijlstra #endif
91b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
92f4ebcbc0SKirill Tkhai 	/* We start is dequeued state, because no RT tasks are queued */
93f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
94391e43daSPeter Zijlstra 
95391e43daSPeter Zijlstra 	rt_rq->rt_time = 0;
96391e43daSPeter Zijlstra 	rt_rq->rt_throttled = 0;
97391e43daSPeter Zijlstra 	rt_rq->rt_runtime = 0;
98391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
99391e43daSPeter Zijlstra }
100391e43daSPeter Zijlstra 
101391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
102391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
103391e43daSPeter Zijlstra {
104391e43daSPeter Zijlstra 	hrtimer_cancel(&rt_b->rt_period_timer);
105391e43daSPeter Zijlstra }
106391e43daSPeter Zijlstra 
107391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
108391e43daSPeter Zijlstra 
109391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
110391e43daSPeter Zijlstra {
111391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
112391e43daSPeter Zijlstra 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
113391e43daSPeter Zijlstra #endif
114391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
115391e43daSPeter Zijlstra }
116391e43daSPeter Zijlstra 
117391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
118391e43daSPeter Zijlstra {
119391e43daSPeter Zijlstra 	return rt_rq->rq;
120391e43daSPeter Zijlstra }
121391e43daSPeter Zijlstra 
122391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
123391e43daSPeter Zijlstra {
124391e43daSPeter Zijlstra 	return rt_se->rt_rq;
125391e43daSPeter Zijlstra }
126391e43daSPeter Zijlstra 
127653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
128653d07a6SKirill Tkhai {
129653d07a6SKirill Tkhai 	struct rt_rq *rt_rq = rt_se->rt_rq;
130653d07a6SKirill Tkhai 
131653d07a6SKirill Tkhai 	return rt_rq->rq;
132653d07a6SKirill Tkhai }
133653d07a6SKirill Tkhai 
134391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg)
135391e43daSPeter Zijlstra {
136391e43daSPeter Zijlstra 	int i;
137391e43daSPeter Zijlstra 
138391e43daSPeter Zijlstra 	if (tg->rt_se)
139391e43daSPeter Zijlstra 		destroy_rt_bandwidth(&tg->rt_bandwidth);
140391e43daSPeter Zijlstra 
141391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
142391e43daSPeter Zijlstra 		if (tg->rt_rq)
143391e43daSPeter Zijlstra 			kfree(tg->rt_rq[i]);
144391e43daSPeter Zijlstra 		if (tg->rt_se)
145391e43daSPeter Zijlstra 			kfree(tg->rt_se[i]);
146391e43daSPeter Zijlstra 	}
147391e43daSPeter Zijlstra 
148391e43daSPeter Zijlstra 	kfree(tg->rt_rq);
149391e43daSPeter Zijlstra 	kfree(tg->rt_se);
150391e43daSPeter Zijlstra }
151391e43daSPeter Zijlstra 
152391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
153391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
154391e43daSPeter Zijlstra 		struct sched_rt_entity *parent)
155391e43daSPeter Zijlstra {
156391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
157391e43daSPeter Zijlstra 
158391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
159391e43daSPeter Zijlstra 	rt_rq->rt_nr_boosted = 0;
160391e43daSPeter Zijlstra 	rt_rq->rq = rq;
161391e43daSPeter Zijlstra 	rt_rq->tg = tg;
162391e43daSPeter Zijlstra 
163391e43daSPeter Zijlstra 	tg->rt_rq[cpu] = rt_rq;
164391e43daSPeter Zijlstra 	tg->rt_se[cpu] = rt_se;
165391e43daSPeter Zijlstra 
166391e43daSPeter Zijlstra 	if (!rt_se)
167391e43daSPeter Zijlstra 		return;
168391e43daSPeter Zijlstra 
169391e43daSPeter Zijlstra 	if (!parent)
170391e43daSPeter Zijlstra 		rt_se->rt_rq = &rq->rt;
171391e43daSPeter Zijlstra 	else
172391e43daSPeter Zijlstra 		rt_se->rt_rq = parent->my_q;
173391e43daSPeter Zijlstra 
174391e43daSPeter Zijlstra 	rt_se->my_q = rt_rq;
175391e43daSPeter Zijlstra 	rt_se->parent = parent;
176391e43daSPeter Zijlstra 	INIT_LIST_HEAD(&rt_se->run_list);
177391e43daSPeter Zijlstra }
178391e43daSPeter Zijlstra 
179391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
180391e43daSPeter Zijlstra {
181391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
182391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
183391e43daSPeter Zijlstra 	int i;
184391e43daSPeter Zijlstra 
185391e43daSPeter Zijlstra 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
186391e43daSPeter Zijlstra 	if (!tg->rt_rq)
187391e43daSPeter Zijlstra 		goto err;
188391e43daSPeter Zijlstra 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
189391e43daSPeter Zijlstra 	if (!tg->rt_se)
190391e43daSPeter Zijlstra 		goto err;
191391e43daSPeter Zijlstra 
192391e43daSPeter Zijlstra 	init_rt_bandwidth(&tg->rt_bandwidth,
193391e43daSPeter Zijlstra 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
194391e43daSPeter Zijlstra 
195391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
196391e43daSPeter Zijlstra 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
197391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
198391e43daSPeter Zijlstra 		if (!rt_rq)
199391e43daSPeter Zijlstra 			goto err;
200391e43daSPeter Zijlstra 
201391e43daSPeter Zijlstra 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
202391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
203391e43daSPeter Zijlstra 		if (!rt_se)
204391e43daSPeter Zijlstra 			goto err_free_rq;
205391e43daSPeter Zijlstra 
20607c54f7aSAbel Vesa 		init_rt_rq(rt_rq);
207391e43daSPeter Zijlstra 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
208391e43daSPeter Zijlstra 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
209391e43daSPeter Zijlstra 	}
210391e43daSPeter Zijlstra 
211391e43daSPeter Zijlstra 	return 1;
212391e43daSPeter Zijlstra 
213391e43daSPeter Zijlstra err_free_rq:
214391e43daSPeter Zijlstra 	kfree(rt_rq);
215391e43daSPeter Zijlstra err:
216391e43daSPeter Zijlstra 	return 0;
217391e43daSPeter Zijlstra }
218391e43daSPeter Zijlstra 
219391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
220391e43daSPeter Zijlstra 
221391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1)
222391e43daSPeter Zijlstra 
223391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
224391e43daSPeter Zijlstra {
225391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
226391e43daSPeter Zijlstra }
227391e43daSPeter Zijlstra 
228391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
229391e43daSPeter Zijlstra {
230391e43daSPeter Zijlstra 	return container_of(rt_rq, struct rq, rt);
231391e43daSPeter Zijlstra }
232391e43daSPeter Zijlstra 
233653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
234391e43daSPeter Zijlstra {
235391e43daSPeter Zijlstra 	struct task_struct *p = rt_task_of(rt_se);
236653d07a6SKirill Tkhai 
237653d07a6SKirill Tkhai 	return task_rq(p);
238653d07a6SKirill Tkhai }
239653d07a6SKirill Tkhai 
240653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
241653d07a6SKirill Tkhai {
242653d07a6SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
243391e43daSPeter Zijlstra 
244391e43daSPeter Zijlstra 	return &rq->rt;
245391e43daSPeter Zijlstra }
246391e43daSPeter Zijlstra 
247391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { }
248391e43daSPeter Zijlstra 
249391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
250391e43daSPeter Zijlstra {
251391e43daSPeter Zijlstra 	return 1;
252391e43daSPeter Zijlstra }
253391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
254391e43daSPeter Zijlstra 
255391e43daSPeter Zijlstra #ifdef CONFIG_SMP
256391e43daSPeter Zijlstra 
25738033c37SPeter Zijlstra static int pull_rt_task(struct rq *this_rq);
25838033c37SPeter Zijlstra 
259dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
260dc877341SPeter Zijlstra {
261dc877341SPeter Zijlstra 	/* Try to pull RT tasks here if we lower this rq's prio */
262dc877341SPeter Zijlstra 	return rq->rt.highest_prio.curr > prev->prio;
263dc877341SPeter Zijlstra }
264dc877341SPeter Zijlstra 
265391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq)
266391e43daSPeter Zijlstra {
267391e43daSPeter Zijlstra 	return atomic_read(&rq->rd->rto_count);
268391e43daSPeter Zijlstra }
269391e43daSPeter Zijlstra 
270391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq)
271391e43daSPeter Zijlstra {
272391e43daSPeter Zijlstra 	if (!rq->online)
273391e43daSPeter Zijlstra 		return;
274391e43daSPeter Zijlstra 
275391e43daSPeter Zijlstra 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
276391e43daSPeter Zijlstra 	/*
277391e43daSPeter Zijlstra 	 * Make sure the mask is visible before we set
278391e43daSPeter Zijlstra 	 * the overload count. That is checked to determine
279391e43daSPeter Zijlstra 	 * if we should look at the mask. It would be a shame
280391e43daSPeter Zijlstra 	 * if we looked at the mask, but the mask was not
281391e43daSPeter Zijlstra 	 * updated yet.
2827c3f2ab7SPeter Zijlstra 	 *
2837c3f2ab7SPeter Zijlstra 	 * Matched by the barrier in pull_rt_task().
284391e43daSPeter Zijlstra 	 */
2857c3f2ab7SPeter Zijlstra 	smp_wmb();
286391e43daSPeter Zijlstra 	atomic_inc(&rq->rd->rto_count);
287391e43daSPeter Zijlstra }
288391e43daSPeter Zijlstra 
289391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq)
290391e43daSPeter Zijlstra {
291391e43daSPeter Zijlstra 	if (!rq->online)
292391e43daSPeter Zijlstra 		return;
293391e43daSPeter Zijlstra 
294391e43daSPeter Zijlstra 	/* the order here really doesn't matter */
295391e43daSPeter Zijlstra 	atomic_dec(&rq->rd->rto_count);
296391e43daSPeter Zijlstra 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
297391e43daSPeter Zijlstra }
298391e43daSPeter Zijlstra 
299391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq)
300391e43daSPeter Zijlstra {
301391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
302391e43daSPeter Zijlstra 		if (!rt_rq->overloaded) {
303391e43daSPeter Zijlstra 			rt_set_overload(rq_of_rt_rq(rt_rq));
304391e43daSPeter Zijlstra 			rt_rq->overloaded = 1;
305391e43daSPeter Zijlstra 		}
306391e43daSPeter Zijlstra 	} else if (rt_rq->overloaded) {
307391e43daSPeter Zijlstra 		rt_clear_overload(rq_of_rt_rq(rt_rq));
308391e43daSPeter Zijlstra 		rt_rq->overloaded = 0;
309391e43daSPeter Zijlstra 	}
310391e43daSPeter Zijlstra }
311391e43daSPeter Zijlstra 
312391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
313391e43daSPeter Zijlstra {
31429baa747SPeter Zijlstra 	struct task_struct *p;
31529baa747SPeter Zijlstra 
316391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
317391e43daSPeter Zijlstra 		return;
318391e43daSPeter Zijlstra 
31929baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
320391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
321391e43daSPeter Zijlstra 
322391e43daSPeter Zijlstra 	rt_rq->rt_nr_total++;
32329baa747SPeter Zijlstra 	if (p->nr_cpus_allowed > 1)
324391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory++;
325391e43daSPeter Zijlstra 
326391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
327391e43daSPeter Zijlstra }
328391e43daSPeter Zijlstra 
329391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
330391e43daSPeter Zijlstra {
33129baa747SPeter Zijlstra 	struct task_struct *p;
33229baa747SPeter Zijlstra 
333391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
334391e43daSPeter Zijlstra 		return;
335391e43daSPeter Zijlstra 
33629baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
337391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
338391e43daSPeter Zijlstra 
339391e43daSPeter Zijlstra 	rt_rq->rt_nr_total--;
34029baa747SPeter Zijlstra 	if (p->nr_cpus_allowed > 1)
341391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory--;
342391e43daSPeter Zijlstra 
343391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
344391e43daSPeter Zijlstra }
345391e43daSPeter Zijlstra 
346391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq)
347391e43daSPeter Zijlstra {
348391e43daSPeter Zijlstra 	return !plist_head_empty(&rq->rt.pushable_tasks);
349391e43daSPeter Zijlstra }
350391e43daSPeter Zijlstra 
351dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq)
352dc877341SPeter Zijlstra {
353dc877341SPeter Zijlstra 	/*
354dc877341SPeter Zijlstra 	 * We detect this state here so that we can avoid taking the RQ
355dc877341SPeter Zijlstra 	 * lock again later if there is no need to push
356dc877341SPeter Zijlstra 	 */
357dc877341SPeter Zijlstra 	rq->post_schedule = has_pushable_tasks(rq);
358dc877341SPeter Zijlstra }
359dc877341SPeter Zijlstra 
360391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
361391e43daSPeter Zijlstra {
362391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
363391e43daSPeter Zijlstra 	plist_node_init(&p->pushable_tasks, p->prio);
364391e43daSPeter Zijlstra 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
365391e43daSPeter Zijlstra 
366391e43daSPeter Zijlstra 	/* Update the highest prio pushable task */
367391e43daSPeter Zijlstra 	if (p->prio < rq->rt.highest_prio.next)
368391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
369391e43daSPeter Zijlstra }
370391e43daSPeter Zijlstra 
371391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
372391e43daSPeter Zijlstra {
373391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
374391e43daSPeter Zijlstra 
375391e43daSPeter Zijlstra 	/* Update the new highest prio pushable task */
376391e43daSPeter Zijlstra 	if (has_pushable_tasks(rq)) {
377391e43daSPeter Zijlstra 		p = plist_first_entry(&rq->rt.pushable_tasks,
378391e43daSPeter Zijlstra 				      struct task_struct, pushable_tasks);
379391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
380391e43daSPeter Zijlstra 	} else
381391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = MAX_RT_PRIO;
382391e43daSPeter Zijlstra }
383391e43daSPeter Zijlstra 
384391e43daSPeter Zijlstra #else
385391e43daSPeter Zijlstra 
386391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
387391e43daSPeter Zijlstra {
388391e43daSPeter Zijlstra }
389391e43daSPeter Zijlstra 
390391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
391391e43daSPeter Zijlstra {
392391e43daSPeter Zijlstra }
393391e43daSPeter Zijlstra 
394391e43daSPeter Zijlstra static inline
395391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
396391e43daSPeter Zijlstra {
397391e43daSPeter Zijlstra }
398391e43daSPeter Zijlstra 
399391e43daSPeter Zijlstra static inline
400391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
401391e43daSPeter Zijlstra {
402391e43daSPeter Zijlstra }
403391e43daSPeter Zijlstra 
404dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
405dc877341SPeter Zijlstra {
406dc877341SPeter Zijlstra 	return false;
407dc877341SPeter Zijlstra }
408dc877341SPeter Zijlstra 
409dc877341SPeter Zijlstra static inline int pull_rt_task(struct rq *this_rq)
410dc877341SPeter Zijlstra {
411dc877341SPeter Zijlstra 	return 0;
412dc877341SPeter Zijlstra }
413dc877341SPeter Zijlstra 
414dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq)
415dc877341SPeter Zijlstra {
416dc877341SPeter Zijlstra }
417391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
418391e43daSPeter Zijlstra 
419f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
420f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
421f4ebcbc0SKirill Tkhai 
422391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se)
423391e43daSPeter Zijlstra {
424391e43daSPeter Zijlstra 	return !list_empty(&rt_se->run_list);
425391e43daSPeter Zijlstra }
426391e43daSPeter Zijlstra 
427391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
428391e43daSPeter Zijlstra 
429391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
430391e43daSPeter Zijlstra {
431391e43daSPeter Zijlstra 	if (!rt_rq->tg)
432391e43daSPeter Zijlstra 		return RUNTIME_INF;
433391e43daSPeter Zijlstra 
434391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
435391e43daSPeter Zijlstra }
436391e43daSPeter Zijlstra 
437391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
438391e43daSPeter Zijlstra {
439391e43daSPeter Zijlstra 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
440391e43daSPeter Zijlstra }
441391e43daSPeter Zijlstra 
442391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t;
443391e43daSPeter Zijlstra 
444391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg)
445391e43daSPeter Zijlstra {
446391e43daSPeter Zijlstra 	do {
447391e43daSPeter Zijlstra 		tg = list_entry_rcu(tg->list.next,
448391e43daSPeter Zijlstra 			typeof(struct task_group), list);
449391e43daSPeter Zijlstra 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
450391e43daSPeter Zijlstra 
451391e43daSPeter Zijlstra 	if (&tg->list == &task_groups)
452391e43daSPeter Zijlstra 		tg = NULL;
453391e43daSPeter Zijlstra 
454391e43daSPeter Zijlstra 	return tg;
455391e43daSPeter Zijlstra }
456391e43daSPeter Zijlstra 
457391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq)					\
458391e43daSPeter Zijlstra 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
459391e43daSPeter Zijlstra 		(iter = next_task_group(iter)) &&			\
460391e43daSPeter Zijlstra 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
461391e43daSPeter Zijlstra 
462391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
463391e43daSPeter Zijlstra 	for (; rt_se; rt_se = rt_se->parent)
464391e43daSPeter Zijlstra 
465391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
466391e43daSPeter Zijlstra {
467391e43daSPeter Zijlstra 	return rt_se->my_q;
468391e43daSPeter Zijlstra }
469391e43daSPeter Zijlstra 
470391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
471391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
472391e43daSPeter Zijlstra 
473391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
474391e43daSPeter Zijlstra {
475391e43daSPeter Zijlstra 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
4768875125eSKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
477391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
478391e43daSPeter Zijlstra 
4798875125eSKirill Tkhai 	int cpu = cpu_of(rq);
480391e43daSPeter Zijlstra 
481391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
482391e43daSPeter Zijlstra 
483391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
484f4ebcbc0SKirill Tkhai 		if (!rt_se)
485f4ebcbc0SKirill Tkhai 			enqueue_top_rt_rq(rt_rq);
486f4ebcbc0SKirill Tkhai 		else if (!on_rt_rq(rt_se))
487391e43daSPeter Zijlstra 			enqueue_rt_entity(rt_se, false);
488f4ebcbc0SKirill Tkhai 
489391e43daSPeter Zijlstra 		if (rt_rq->highest_prio.curr < curr->prio)
4908875125eSKirill Tkhai 			resched_curr(rq);
491391e43daSPeter Zijlstra 	}
492391e43daSPeter Zijlstra }
493391e43daSPeter Zijlstra 
494391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
495391e43daSPeter Zijlstra {
496391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
497391e43daSPeter Zijlstra 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
498391e43daSPeter Zijlstra 
499391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
500391e43daSPeter Zijlstra 
501f4ebcbc0SKirill Tkhai 	if (!rt_se)
502f4ebcbc0SKirill Tkhai 		dequeue_top_rt_rq(rt_rq);
503f4ebcbc0SKirill Tkhai 	else if (on_rt_rq(rt_se))
504391e43daSPeter Zijlstra 		dequeue_rt_entity(rt_se);
505391e43daSPeter Zijlstra }
506391e43daSPeter Zijlstra 
50746383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
50846383648SKirill Tkhai {
50946383648SKirill Tkhai 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
51046383648SKirill Tkhai }
51146383648SKirill Tkhai 
512391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se)
513391e43daSPeter Zijlstra {
514391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
515391e43daSPeter Zijlstra 	struct task_struct *p;
516391e43daSPeter Zijlstra 
517391e43daSPeter Zijlstra 	if (rt_rq)
518391e43daSPeter Zijlstra 		return !!rt_rq->rt_nr_boosted;
519391e43daSPeter Zijlstra 
520391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
521391e43daSPeter Zijlstra 	return p->prio != p->normal_prio;
522391e43daSPeter Zijlstra }
523391e43daSPeter Zijlstra 
524391e43daSPeter Zijlstra #ifdef CONFIG_SMP
525391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
526391e43daSPeter Zijlstra {
527424c93feSNathan Zimmer 	return this_rq()->rd->span;
528391e43daSPeter Zijlstra }
529391e43daSPeter Zijlstra #else
530391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
531391e43daSPeter Zijlstra {
532391e43daSPeter Zijlstra 	return cpu_online_mask;
533391e43daSPeter Zijlstra }
534391e43daSPeter Zijlstra #endif
535391e43daSPeter Zijlstra 
536391e43daSPeter Zijlstra static inline
537391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
538391e43daSPeter Zijlstra {
539391e43daSPeter Zijlstra 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
540391e43daSPeter Zijlstra }
541391e43daSPeter Zijlstra 
542391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
543391e43daSPeter Zijlstra {
544391e43daSPeter Zijlstra 	return &rt_rq->tg->rt_bandwidth;
545391e43daSPeter Zijlstra }
546391e43daSPeter Zijlstra 
547391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */
548391e43daSPeter Zijlstra 
549391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
550391e43daSPeter Zijlstra {
551391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
552391e43daSPeter Zijlstra }
553391e43daSPeter Zijlstra 
554391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
555391e43daSPeter Zijlstra {
556391e43daSPeter Zijlstra 	return ktime_to_ns(def_rt_bandwidth.rt_period);
557391e43daSPeter Zijlstra }
558391e43daSPeter Zijlstra 
559391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t;
560391e43daSPeter Zijlstra 
561391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
562391e43daSPeter Zijlstra 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
563391e43daSPeter Zijlstra 
564391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
565391e43daSPeter Zijlstra 	for (; rt_se; rt_se = NULL)
566391e43daSPeter Zijlstra 
567391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
568391e43daSPeter Zijlstra {
569391e43daSPeter Zijlstra 	return NULL;
570391e43daSPeter Zijlstra }
571391e43daSPeter Zijlstra 
572391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
573391e43daSPeter Zijlstra {
574f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
575f4ebcbc0SKirill Tkhai 
576f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_nr_running)
577f4ebcbc0SKirill Tkhai 		return;
578f4ebcbc0SKirill Tkhai 
579f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(rt_rq);
5808875125eSKirill Tkhai 	resched_curr(rq);
581391e43daSPeter Zijlstra }
582391e43daSPeter Zijlstra 
583391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
584391e43daSPeter Zijlstra {
585f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq);
586391e43daSPeter Zijlstra }
587391e43daSPeter Zijlstra 
58846383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
58946383648SKirill Tkhai {
59046383648SKirill Tkhai 	return rt_rq->rt_throttled;
59146383648SKirill Tkhai }
59246383648SKirill Tkhai 
593391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
594391e43daSPeter Zijlstra {
595391e43daSPeter Zijlstra 	return cpu_online_mask;
596391e43daSPeter Zijlstra }
597391e43daSPeter Zijlstra 
598391e43daSPeter Zijlstra static inline
599391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
600391e43daSPeter Zijlstra {
601391e43daSPeter Zijlstra 	return &cpu_rq(cpu)->rt;
602391e43daSPeter Zijlstra }
603391e43daSPeter Zijlstra 
604391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
605391e43daSPeter Zijlstra {
606391e43daSPeter Zijlstra 	return &def_rt_bandwidth;
607391e43daSPeter Zijlstra }
608391e43daSPeter Zijlstra 
609391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
610391e43daSPeter Zijlstra 
611faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
612faa59937SJuri Lelli {
613faa59937SJuri Lelli 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
614faa59937SJuri Lelli 
615faa59937SJuri Lelli 	return (hrtimer_active(&rt_b->rt_period_timer) ||
616faa59937SJuri Lelli 		rt_rq->rt_time < rt_b->rt_runtime);
617faa59937SJuri Lelli }
618faa59937SJuri Lelli 
619391e43daSPeter Zijlstra #ifdef CONFIG_SMP
620391e43daSPeter Zijlstra /*
621391e43daSPeter Zijlstra  * We ran out of runtime, see if we can borrow some from our neighbours.
622391e43daSPeter Zijlstra  */
623391e43daSPeter Zijlstra static int do_balance_runtime(struct rt_rq *rt_rq)
624391e43daSPeter Zijlstra {
625391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
626aa7f6730SShawn Bohrer 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
627391e43daSPeter Zijlstra 	int i, weight, more = 0;
628391e43daSPeter Zijlstra 	u64 rt_period;
629391e43daSPeter Zijlstra 
630391e43daSPeter Zijlstra 	weight = cpumask_weight(rd->span);
631391e43daSPeter Zijlstra 
632391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
633391e43daSPeter Zijlstra 	rt_period = ktime_to_ns(rt_b->rt_period);
634391e43daSPeter Zijlstra 	for_each_cpu(i, rd->span) {
635391e43daSPeter Zijlstra 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
636391e43daSPeter Zijlstra 		s64 diff;
637391e43daSPeter Zijlstra 
638391e43daSPeter Zijlstra 		if (iter == rt_rq)
639391e43daSPeter Zijlstra 			continue;
640391e43daSPeter Zijlstra 
641391e43daSPeter Zijlstra 		raw_spin_lock(&iter->rt_runtime_lock);
642391e43daSPeter Zijlstra 		/*
643391e43daSPeter Zijlstra 		 * Either all rqs have inf runtime and there's nothing to steal
644391e43daSPeter Zijlstra 		 * or __disable_runtime() below sets a specific rq to inf to
645391e43daSPeter Zijlstra 		 * indicate its been disabled and disalow stealing.
646391e43daSPeter Zijlstra 		 */
647391e43daSPeter Zijlstra 		if (iter->rt_runtime == RUNTIME_INF)
648391e43daSPeter Zijlstra 			goto next;
649391e43daSPeter Zijlstra 
650391e43daSPeter Zijlstra 		/*
651391e43daSPeter Zijlstra 		 * From runqueues with spare time, take 1/n part of their
652391e43daSPeter Zijlstra 		 * spare time, but no more than our period.
653391e43daSPeter Zijlstra 		 */
654391e43daSPeter Zijlstra 		diff = iter->rt_runtime - iter->rt_time;
655391e43daSPeter Zijlstra 		if (diff > 0) {
656391e43daSPeter Zijlstra 			diff = div_u64((u64)diff, weight);
657391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime + diff > rt_period)
658391e43daSPeter Zijlstra 				diff = rt_period - rt_rq->rt_runtime;
659391e43daSPeter Zijlstra 			iter->rt_runtime -= diff;
660391e43daSPeter Zijlstra 			rt_rq->rt_runtime += diff;
661391e43daSPeter Zijlstra 			more = 1;
662391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime == rt_period) {
663391e43daSPeter Zijlstra 				raw_spin_unlock(&iter->rt_runtime_lock);
664391e43daSPeter Zijlstra 				break;
665391e43daSPeter Zijlstra 			}
666391e43daSPeter Zijlstra 		}
667391e43daSPeter Zijlstra next:
668391e43daSPeter Zijlstra 		raw_spin_unlock(&iter->rt_runtime_lock);
669391e43daSPeter Zijlstra 	}
670391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
671391e43daSPeter Zijlstra 
672391e43daSPeter Zijlstra 	return more;
673391e43daSPeter Zijlstra }
674391e43daSPeter Zijlstra 
675391e43daSPeter Zijlstra /*
676391e43daSPeter Zijlstra  * Ensure this RQ takes back all the runtime it lend to its neighbours.
677391e43daSPeter Zijlstra  */
678391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq)
679391e43daSPeter Zijlstra {
680391e43daSPeter Zijlstra 	struct root_domain *rd = rq->rd;
681391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
682391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
683391e43daSPeter Zijlstra 
684391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
685391e43daSPeter Zijlstra 		return;
686391e43daSPeter Zijlstra 
687391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
688391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
689391e43daSPeter Zijlstra 		s64 want;
690391e43daSPeter Zijlstra 		int i;
691391e43daSPeter Zijlstra 
692391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
693391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
694391e43daSPeter Zijlstra 		/*
695391e43daSPeter Zijlstra 		 * Either we're all inf and nobody needs to borrow, or we're
696391e43daSPeter Zijlstra 		 * already disabled and thus have nothing to do, or we have
697391e43daSPeter Zijlstra 		 * exactly the right amount of runtime to take out.
698391e43daSPeter Zijlstra 		 */
699391e43daSPeter Zijlstra 		if (rt_rq->rt_runtime == RUNTIME_INF ||
700391e43daSPeter Zijlstra 				rt_rq->rt_runtime == rt_b->rt_runtime)
701391e43daSPeter Zijlstra 			goto balanced;
702391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
703391e43daSPeter Zijlstra 
704391e43daSPeter Zijlstra 		/*
705391e43daSPeter Zijlstra 		 * Calculate the difference between what we started out with
706391e43daSPeter Zijlstra 		 * and what we current have, that's the amount of runtime
707391e43daSPeter Zijlstra 		 * we lend and now have to reclaim.
708391e43daSPeter Zijlstra 		 */
709391e43daSPeter Zijlstra 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
710391e43daSPeter Zijlstra 
711391e43daSPeter Zijlstra 		/*
712391e43daSPeter Zijlstra 		 * Greedy reclaim, take back as much as we can.
713391e43daSPeter Zijlstra 		 */
714391e43daSPeter Zijlstra 		for_each_cpu(i, rd->span) {
715391e43daSPeter Zijlstra 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
716391e43daSPeter Zijlstra 			s64 diff;
717391e43daSPeter Zijlstra 
718391e43daSPeter Zijlstra 			/*
719391e43daSPeter Zijlstra 			 * Can't reclaim from ourselves or disabled runqueues.
720391e43daSPeter Zijlstra 			 */
721391e43daSPeter Zijlstra 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
722391e43daSPeter Zijlstra 				continue;
723391e43daSPeter Zijlstra 
724391e43daSPeter Zijlstra 			raw_spin_lock(&iter->rt_runtime_lock);
725391e43daSPeter Zijlstra 			if (want > 0) {
726391e43daSPeter Zijlstra 				diff = min_t(s64, iter->rt_runtime, want);
727391e43daSPeter Zijlstra 				iter->rt_runtime -= diff;
728391e43daSPeter Zijlstra 				want -= diff;
729391e43daSPeter Zijlstra 			} else {
730391e43daSPeter Zijlstra 				iter->rt_runtime -= want;
731391e43daSPeter Zijlstra 				want -= want;
732391e43daSPeter Zijlstra 			}
733391e43daSPeter Zijlstra 			raw_spin_unlock(&iter->rt_runtime_lock);
734391e43daSPeter Zijlstra 
735391e43daSPeter Zijlstra 			if (!want)
736391e43daSPeter Zijlstra 				break;
737391e43daSPeter Zijlstra 		}
738391e43daSPeter Zijlstra 
739391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
740391e43daSPeter Zijlstra 		/*
741391e43daSPeter Zijlstra 		 * We cannot be left wanting - that would mean some runtime
742391e43daSPeter Zijlstra 		 * leaked out of the system.
743391e43daSPeter Zijlstra 		 */
744391e43daSPeter Zijlstra 		BUG_ON(want);
745391e43daSPeter Zijlstra balanced:
746391e43daSPeter Zijlstra 		/*
747391e43daSPeter Zijlstra 		 * Disable all the borrow logic by pretending we have inf
748391e43daSPeter Zijlstra 		 * runtime - in which case borrowing doesn't make sense.
749391e43daSPeter Zijlstra 		 */
750391e43daSPeter Zijlstra 		rt_rq->rt_runtime = RUNTIME_INF;
751a4c96ae3SPeter Boonstoppel 		rt_rq->rt_throttled = 0;
752391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
753391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
75499b62567SKirill Tkhai 
75599b62567SKirill Tkhai 		/* Make rt_rq available for pick_next_task() */
75699b62567SKirill Tkhai 		sched_rt_rq_enqueue(rt_rq);
757391e43daSPeter Zijlstra 	}
758391e43daSPeter Zijlstra }
759391e43daSPeter Zijlstra 
760391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq)
761391e43daSPeter Zijlstra {
762391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
763391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
764391e43daSPeter Zijlstra 
765391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
766391e43daSPeter Zijlstra 		return;
767391e43daSPeter Zijlstra 
768391e43daSPeter Zijlstra 	/*
769391e43daSPeter Zijlstra 	 * Reset each runqueue's bandwidth settings
770391e43daSPeter Zijlstra 	 */
771391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
772391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
773391e43daSPeter Zijlstra 
774391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
775391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
776391e43daSPeter Zijlstra 		rt_rq->rt_runtime = rt_b->rt_runtime;
777391e43daSPeter Zijlstra 		rt_rq->rt_time = 0;
778391e43daSPeter Zijlstra 		rt_rq->rt_throttled = 0;
779391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
780391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
781391e43daSPeter Zijlstra 	}
782391e43daSPeter Zijlstra }
783391e43daSPeter Zijlstra 
784391e43daSPeter Zijlstra static int balance_runtime(struct rt_rq *rt_rq)
785391e43daSPeter Zijlstra {
786391e43daSPeter Zijlstra 	int more = 0;
787391e43daSPeter Zijlstra 
788391e43daSPeter Zijlstra 	if (!sched_feat(RT_RUNTIME_SHARE))
789391e43daSPeter Zijlstra 		return more;
790391e43daSPeter Zijlstra 
791391e43daSPeter Zijlstra 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
792391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
793391e43daSPeter Zijlstra 		more = do_balance_runtime(rt_rq);
794391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
795391e43daSPeter Zijlstra 	}
796391e43daSPeter Zijlstra 
797391e43daSPeter Zijlstra 	return more;
798391e43daSPeter Zijlstra }
799391e43daSPeter Zijlstra #else /* !CONFIG_SMP */
800391e43daSPeter Zijlstra static inline int balance_runtime(struct rt_rq *rt_rq)
801391e43daSPeter Zijlstra {
802391e43daSPeter Zijlstra 	return 0;
803391e43daSPeter Zijlstra }
804391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
805391e43daSPeter Zijlstra 
806391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
807391e43daSPeter Zijlstra {
80842c62a58SPeter Zijlstra 	int i, idle = 1, throttled = 0;
809391e43daSPeter Zijlstra 	const struct cpumask *span;
810391e43daSPeter Zijlstra 
811391e43daSPeter Zijlstra 	span = sched_rt_period_mask();
812e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED
813e221d028SMike Galbraith 	/*
814e221d028SMike Galbraith 	 * FIXME: isolated CPUs should really leave the root task group,
815e221d028SMike Galbraith 	 * whether they are isolcpus or were isolated via cpusets, lest
816e221d028SMike Galbraith 	 * the timer run on a CPU which does not service all runqueues,
817e221d028SMike Galbraith 	 * potentially leaving other CPUs indefinitely throttled.  If
818e221d028SMike Galbraith 	 * isolation is really required, the user will turn the throttle
819e221d028SMike Galbraith 	 * off to kill the perturbations it causes anyway.  Meanwhile,
820e221d028SMike Galbraith 	 * this maintains functionality for boot and/or troubleshooting.
821e221d028SMike Galbraith 	 */
822e221d028SMike Galbraith 	if (rt_b == &root_task_group.rt_bandwidth)
823e221d028SMike Galbraith 		span = cpu_online_mask;
824e221d028SMike Galbraith #endif
825391e43daSPeter Zijlstra 	for_each_cpu(i, span) {
826391e43daSPeter Zijlstra 		int enqueue = 0;
827391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
828391e43daSPeter Zijlstra 		struct rq *rq = rq_of_rt_rq(rt_rq);
829391e43daSPeter Zijlstra 
830391e43daSPeter Zijlstra 		raw_spin_lock(&rq->lock);
831391e43daSPeter Zijlstra 		if (rt_rq->rt_time) {
832391e43daSPeter Zijlstra 			u64 runtime;
833391e43daSPeter Zijlstra 
834391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
835391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled)
836391e43daSPeter Zijlstra 				balance_runtime(rt_rq);
837391e43daSPeter Zijlstra 			runtime = rt_rq->rt_runtime;
838391e43daSPeter Zijlstra 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
839391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
840391e43daSPeter Zijlstra 				rt_rq->rt_throttled = 0;
841391e43daSPeter Zijlstra 				enqueue = 1;
842391e43daSPeter Zijlstra 
843391e43daSPeter Zijlstra 				/*
8449edfbfedSPeter Zijlstra 				 * When we're idle and a woken (rt) task is
8459edfbfedSPeter Zijlstra 				 * throttled check_preempt_curr() will set
8469edfbfedSPeter Zijlstra 				 * skip_update and the time between the wakeup
8479edfbfedSPeter Zijlstra 				 * and this unthrottle will get accounted as
8489edfbfedSPeter Zijlstra 				 * 'runtime'.
849391e43daSPeter Zijlstra 				 */
850391e43daSPeter Zijlstra 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
8519edfbfedSPeter Zijlstra 					rq_clock_skip_update(rq, false);
852391e43daSPeter Zijlstra 			}
853391e43daSPeter Zijlstra 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
854391e43daSPeter Zijlstra 				idle = 0;
855391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
856391e43daSPeter Zijlstra 		} else if (rt_rq->rt_nr_running) {
857391e43daSPeter Zijlstra 			idle = 0;
858391e43daSPeter Zijlstra 			if (!rt_rq_throttled(rt_rq))
859391e43daSPeter Zijlstra 				enqueue = 1;
860391e43daSPeter Zijlstra 		}
86142c62a58SPeter Zijlstra 		if (rt_rq->rt_throttled)
86242c62a58SPeter Zijlstra 			throttled = 1;
863391e43daSPeter Zijlstra 
864391e43daSPeter Zijlstra 		if (enqueue)
865391e43daSPeter Zijlstra 			sched_rt_rq_enqueue(rt_rq);
866391e43daSPeter Zijlstra 		raw_spin_unlock(&rq->lock);
867391e43daSPeter Zijlstra 	}
868391e43daSPeter Zijlstra 
86942c62a58SPeter Zijlstra 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
87042c62a58SPeter Zijlstra 		return 1;
87142c62a58SPeter Zijlstra 
872391e43daSPeter Zijlstra 	return idle;
873391e43daSPeter Zijlstra }
874391e43daSPeter Zijlstra 
875391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se)
876391e43daSPeter Zijlstra {
877391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
878391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
879391e43daSPeter Zijlstra 
880391e43daSPeter Zijlstra 	if (rt_rq)
881391e43daSPeter Zijlstra 		return rt_rq->highest_prio.curr;
882391e43daSPeter Zijlstra #endif
883391e43daSPeter Zijlstra 
884391e43daSPeter Zijlstra 	return rt_task_of(rt_se)->prio;
885391e43daSPeter Zijlstra }
886391e43daSPeter Zijlstra 
887391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
888391e43daSPeter Zijlstra {
889391e43daSPeter Zijlstra 	u64 runtime = sched_rt_runtime(rt_rq);
890391e43daSPeter Zijlstra 
891391e43daSPeter Zijlstra 	if (rt_rq->rt_throttled)
892391e43daSPeter Zijlstra 		return rt_rq_throttled(rt_rq);
893391e43daSPeter Zijlstra 
8945b680fd6SShan Hai 	if (runtime >= sched_rt_period(rt_rq))
895391e43daSPeter Zijlstra 		return 0;
896391e43daSPeter Zijlstra 
897391e43daSPeter Zijlstra 	balance_runtime(rt_rq);
898391e43daSPeter Zijlstra 	runtime = sched_rt_runtime(rt_rq);
899391e43daSPeter Zijlstra 	if (runtime == RUNTIME_INF)
900391e43daSPeter Zijlstra 		return 0;
901391e43daSPeter Zijlstra 
902391e43daSPeter Zijlstra 	if (rt_rq->rt_time > runtime) {
9037abc63b1SPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
9047abc63b1SPeter Zijlstra 
9057abc63b1SPeter Zijlstra 		/*
9067abc63b1SPeter Zijlstra 		 * Don't actually throttle groups that have no runtime assigned
9077abc63b1SPeter Zijlstra 		 * but accrue some time due to boosting.
9087abc63b1SPeter Zijlstra 		 */
9097abc63b1SPeter Zijlstra 		if (likely(rt_b->rt_runtime)) {
910391e43daSPeter Zijlstra 			rt_rq->rt_throttled = 1;
911c224815dSJohn Stultz 			printk_deferred_once("sched: RT throttling activated\n");
9127abc63b1SPeter Zijlstra 		} else {
9137abc63b1SPeter Zijlstra 			/*
9147abc63b1SPeter Zijlstra 			 * In case we did anyway, make it go away,
9157abc63b1SPeter Zijlstra 			 * replenishment is a joke, since it will replenish us
9167abc63b1SPeter Zijlstra 			 * with exactly 0 ns.
9177abc63b1SPeter Zijlstra 			 */
9187abc63b1SPeter Zijlstra 			rt_rq->rt_time = 0;
9197abc63b1SPeter Zijlstra 		}
9207abc63b1SPeter Zijlstra 
921391e43daSPeter Zijlstra 		if (rt_rq_throttled(rt_rq)) {
922391e43daSPeter Zijlstra 			sched_rt_rq_dequeue(rt_rq);
923391e43daSPeter Zijlstra 			return 1;
924391e43daSPeter Zijlstra 		}
925391e43daSPeter Zijlstra 	}
926391e43daSPeter Zijlstra 
927391e43daSPeter Zijlstra 	return 0;
928391e43daSPeter Zijlstra }
929391e43daSPeter Zijlstra 
930391e43daSPeter Zijlstra /*
931391e43daSPeter Zijlstra  * Update the current task's runtime statistics. Skip current tasks that
932391e43daSPeter Zijlstra  * are not in our scheduling class.
933391e43daSPeter Zijlstra  */
934391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq)
935391e43daSPeter Zijlstra {
936391e43daSPeter Zijlstra 	struct task_struct *curr = rq->curr;
937391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &curr->rt;
938391e43daSPeter Zijlstra 	u64 delta_exec;
939391e43daSPeter Zijlstra 
940391e43daSPeter Zijlstra 	if (curr->sched_class != &rt_sched_class)
941391e43daSPeter Zijlstra 		return;
942391e43daSPeter Zijlstra 
94378becc27SFrederic Weisbecker 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
944fc79e240SKirill Tkhai 	if (unlikely((s64)delta_exec <= 0))
945fc79e240SKirill Tkhai 		return;
946391e43daSPeter Zijlstra 
94742c62a58SPeter Zijlstra 	schedstat_set(curr->se.statistics.exec_max,
94842c62a58SPeter Zijlstra 		      max(curr->se.statistics.exec_max, delta_exec));
949391e43daSPeter Zijlstra 
950391e43daSPeter Zijlstra 	curr->se.sum_exec_runtime += delta_exec;
951391e43daSPeter Zijlstra 	account_group_exec_runtime(curr, delta_exec);
952391e43daSPeter Zijlstra 
95378becc27SFrederic Weisbecker 	curr->se.exec_start = rq_clock_task(rq);
954391e43daSPeter Zijlstra 	cpuacct_charge(curr, delta_exec);
955391e43daSPeter Zijlstra 
956391e43daSPeter Zijlstra 	sched_rt_avg_update(rq, delta_exec);
957391e43daSPeter Zijlstra 
958391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled())
959391e43daSPeter Zijlstra 		return;
960391e43daSPeter Zijlstra 
961391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
9620b07939cSGiedrius Rekasius 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
963391e43daSPeter Zijlstra 
964391e43daSPeter Zijlstra 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
965391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
966391e43daSPeter Zijlstra 			rt_rq->rt_time += delta_exec;
967391e43daSPeter Zijlstra 			if (sched_rt_runtime_exceeded(rt_rq))
9688875125eSKirill Tkhai 				resched_curr(rq);
969391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
970391e43daSPeter Zijlstra 		}
971391e43daSPeter Zijlstra 	}
972391e43daSPeter Zijlstra }
973391e43daSPeter Zijlstra 
974f4ebcbc0SKirill Tkhai static void
975f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq)
976f4ebcbc0SKirill Tkhai {
977f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
978f4ebcbc0SKirill Tkhai 
979f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
980f4ebcbc0SKirill Tkhai 
981f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
982f4ebcbc0SKirill Tkhai 		return;
983f4ebcbc0SKirill Tkhai 
984f4ebcbc0SKirill Tkhai 	BUG_ON(!rq->nr_running);
985f4ebcbc0SKirill Tkhai 
98672465447SKirill Tkhai 	sub_nr_running(rq, rt_rq->rt_nr_running);
987f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
988f4ebcbc0SKirill Tkhai }
989f4ebcbc0SKirill Tkhai 
990f4ebcbc0SKirill Tkhai static void
991f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq)
992f4ebcbc0SKirill Tkhai {
993f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
994f4ebcbc0SKirill Tkhai 
995f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
996f4ebcbc0SKirill Tkhai 
997f4ebcbc0SKirill Tkhai 	if (rt_rq->rt_queued)
998f4ebcbc0SKirill Tkhai 		return;
999f4ebcbc0SKirill Tkhai 	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1000f4ebcbc0SKirill Tkhai 		return;
1001f4ebcbc0SKirill Tkhai 
100272465447SKirill Tkhai 	add_nr_running(rq, rt_rq->rt_nr_running);
1003f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 1;
1004f4ebcbc0SKirill Tkhai }
1005f4ebcbc0SKirill Tkhai 
1006391e43daSPeter Zijlstra #if defined CONFIG_SMP
1007391e43daSPeter Zijlstra 
1008391e43daSPeter Zijlstra static void
1009391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1010391e43daSPeter Zijlstra {
1011391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1012391e43daSPeter Zijlstra 
1013757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1014757dfcaaSKirill Tkhai 	/*
1015757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1016757dfcaaSKirill Tkhai 	 */
1017757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1018757dfcaaSKirill Tkhai 		return;
1019757dfcaaSKirill Tkhai #endif
1020391e43daSPeter Zijlstra 	if (rq->online && prio < prev_prio)
1021391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1022391e43daSPeter Zijlstra }
1023391e43daSPeter Zijlstra 
1024391e43daSPeter Zijlstra static void
1025391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1026391e43daSPeter Zijlstra {
1027391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1028391e43daSPeter Zijlstra 
1029757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1030757dfcaaSKirill Tkhai 	/*
1031757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1032757dfcaaSKirill Tkhai 	 */
1033757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1034757dfcaaSKirill Tkhai 		return;
1035757dfcaaSKirill Tkhai #endif
1036391e43daSPeter Zijlstra 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1037391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1038391e43daSPeter Zijlstra }
1039391e43daSPeter Zijlstra 
1040391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1041391e43daSPeter Zijlstra 
1042391e43daSPeter Zijlstra static inline
1043391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1044391e43daSPeter Zijlstra static inline
1045391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1046391e43daSPeter Zijlstra 
1047391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1048391e43daSPeter Zijlstra 
1049391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1050391e43daSPeter Zijlstra static void
1051391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio)
1052391e43daSPeter Zijlstra {
1053391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1054391e43daSPeter Zijlstra 
1055391e43daSPeter Zijlstra 	if (prio < prev_prio)
1056391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = prio;
1057391e43daSPeter Zijlstra 
1058391e43daSPeter Zijlstra 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1059391e43daSPeter Zijlstra }
1060391e43daSPeter Zijlstra 
1061391e43daSPeter Zijlstra static void
1062391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio)
1063391e43daSPeter Zijlstra {
1064391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1065391e43daSPeter Zijlstra 
1066391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
1067391e43daSPeter Zijlstra 
1068391e43daSPeter Zijlstra 		WARN_ON(prio < prev_prio);
1069391e43daSPeter Zijlstra 
1070391e43daSPeter Zijlstra 		/*
1071391e43daSPeter Zijlstra 		 * This may have been our highest task, and therefore
1072391e43daSPeter Zijlstra 		 * we may have some recomputation to do
1073391e43daSPeter Zijlstra 		 */
1074391e43daSPeter Zijlstra 		if (prio == prev_prio) {
1075391e43daSPeter Zijlstra 			struct rt_prio_array *array = &rt_rq->active;
1076391e43daSPeter Zijlstra 
1077391e43daSPeter Zijlstra 			rt_rq->highest_prio.curr =
1078391e43daSPeter Zijlstra 				sched_find_first_bit(array->bitmap);
1079391e43daSPeter Zijlstra 		}
1080391e43daSPeter Zijlstra 
1081391e43daSPeter Zijlstra 	} else
1082391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1083391e43daSPeter Zijlstra 
1084391e43daSPeter Zijlstra 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1085391e43daSPeter Zijlstra }
1086391e43daSPeter Zijlstra 
1087391e43daSPeter Zijlstra #else
1088391e43daSPeter Zijlstra 
1089391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1090391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1091391e43daSPeter Zijlstra 
1092391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1093391e43daSPeter Zijlstra 
1094391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1095391e43daSPeter Zijlstra 
1096391e43daSPeter Zijlstra static void
1097391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1098391e43daSPeter Zijlstra {
1099391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1100391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted++;
1101391e43daSPeter Zijlstra 
1102391e43daSPeter Zijlstra 	if (rt_rq->tg)
1103391e43daSPeter Zijlstra 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1104391e43daSPeter Zijlstra }
1105391e43daSPeter Zijlstra 
1106391e43daSPeter Zijlstra static void
1107391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1108391e43daSPeter Zijlstra {
1109391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1110391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted--;
1111391e43daSPeter Zijlstra 
1112391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1113391e43daSPeter Zijlstra }
1114391e43daSPeter Zijlstra 
1115391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
1116391e43daSPeter Zijlstra 
1117391e43daSPeter Zijlstra static void
1118391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1119391e43daSPeter Zijlstra {
1120391e43daSPeter Zijlstra 	start_rt_bandwidth(&def_rt_bandwidth);
1121391e43daSPeter Zijlstra }
1122391e43daSPeter Zijlstra 
1123391e43daSPeter Zijlstra static inline
1124391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1125391e43daSPeter Zijlstra 
1126391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
1127391e43daSPeter Zijlstra 
1128391e43daSPeter Zijlstra static inline
112922abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
113022abdef3SKirill Tkhai {
113122abdef3SKirill Tkhai 	struct rt_rq *group_rq = group_rt_rq(rt_se);
113222abdef3SKirill Tkhai 
113322abdef3SKirill Tkhai 	if (group_rq)
113422abdef3SKirill Tkhai 		return group_rq->rt_nr_running;
113522abdef3SKirill Tkhai 	else
113622abdef3SKirill Tkhai 		return 1;
113722abdef3SKirill Tkhai }
113822abdef3SKirill Tkhai 
113922abdef3SKirill Tkhai static inline
1140391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1141391e43daSPeter Zijlstra {
1142391e43daSPeter Zijlstra 	int prio = rt_se_prio(rt_se);
1143391e43daSPeter Zijlstra 
1144391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(prio));
114522abdef3SKirill Tkhai 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1146391e43daSPeter Zijlstra 
1147391e43daSPeter Zijlstra 	inc_rt_prio(rt_rq, prio);
1148391e43daSPeter Zijlstra 	inc_rt_migration(rt_se, rt_rq);
1149391e43daSPeter Zijlstra 	inc_rt_group(rt_se, rt_rq);
1150391e43daSPeter Zijlstra }
1151391e43daSPeter Zijlstra 
1152391e43daSPeter Zijlstra static inline
1153391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1154391e43daSPeter Zijlstra {
1155391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1156391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running);
115722abdef3SKirill Tkhai 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1158391e43daSPeter Zijlstra 
1159391e43daSPeter Zijlstra 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1160391e43daSPeter Zijlstra 	dec_rt_migration(rt_se, rt_rq);
1161391e43daSPeter Zijlstra 	dec_rt_group(rt_se, rt_rq);
1162391e43daSPeter Zijlstra }
1163391e43daSPeter Zijlstra 
1164391e43daSPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1165391e43daSPeter Zijlstra {
1166391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1167391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1168391e43daSPeter Zijlstra 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1169391e43daSPeter Zijlstra 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1170391e43daSPeter Zijlstra 
1171391e43daSPeter Zijlstra 	/*
1172391e43daSPeter Zijlstra 	 * Don't enqueue the group if its throttled, or when empty.
1173391e43daSPeter Zijlstra 	 * The latter is a consequence of the former when a child group
1174391e43daSPeter Zijlstra 	 * get throttled and the current group doesn't have any other
1175391e43daSPeter Zijlstra 	 * active members.
1176391e43daSPeter Zijlstra 	 */
1177391e43daSPeter Zijlstra 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1178391e43daSPeter Zijlstra 		return;
1179391e43daSPeter Zijlstra 
1180391e43daSPeter Zijlstra 	if (head)
1181391e43daSPeter Zijlstra 		list_add(&rt_se->run_list, queue);
1182391e43daSPeter Zijlstra 	else
1183391e43daSPeter Zijlstra 		list_add_tail(&rt_se->run_list, queue);
1184391e43daSPeter Zijlstra 	__set_bit(rt_se_prio(rt_se), array->bitmap);
1185391e43daSPeter Zijlstra 
1186391e43daSPeter Zijlstra 	inc_rt_tasks(rt_se, rt_rq);
1187391e43daSPeter Zijlstra }
1188391e43daSPeter Zijlstra 
1189391e43daSPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1190391e43daSPeter Zijlstra {
1191391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1192391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1193391e43daSPeter Zijlstra 
1194391e43daSPeter Zijlstra 	list_del_init(&rt_se->run_list);
1195391e43daSPeter Zijlstra 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1196391e43daSPeter Zijlstra 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1197391e43daSPeter Zijlstra 
1198391e43daSPeter Zijlstra 	dec_rt_tasks(rt_se, rt_rq);
1199391e43daSPeter Zijlstra }
1200391e43daSPeter Zijlstra 
1201391e43daSPeter Zijlstra /*
1202391e43daSPeter Zijlstra  * Because the prio of an upper entry depends on the lower
1203391e43daSPeter Zijlstra  * entries, we must remove entries top - down.
1204391e43daSPeter Zijlstra  */
1205391e43daSPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1206391e43daSPeter Zijlstra {
1207391e43daSPeter Zijlstra 	struct sched_rt_entity *back = NULL;
1208391e43daSPeter Zijlstra 
1209391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1210391e43daSPeter Zijlstra 		rt_se->back = back;
1211391e43daSPeter Zijlstra 		back = rt_se;
1212391e43daSPeter Zijlstra 	}
1213391e43daSPeter Zijlstra 
1214f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq_of_se(back));
1215f4ebcbc0SKirill Tkhai 
1216391e43daSPeter Zijlstra 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1217391e43daSPeter Zijlstra 		if (on_rt_rq(rt_se))
1218391e43daSPeter Zijlstra 			__dequeue_rt_entity(rt_se);
1219391e43daSPeter Zijlstra 	}
1220391e43daSPeter Zijlstra }
1221391e43daSPeter Zijlstra 
1222391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1223391e43daSPeter Zijlstra {
1224f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1225f4ebcbc0SKirill Tkhai 
1226391e43daSPeter Zijlstra 	dequeue_rt_stack(rt_se);
1227391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se)
1228391e43daSPeter Zijlstra 		__enqueue_rt_entity(rt_se, head);
1229f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1230391e43daSPeter Zijlstra }
1231391e43daSPeter Zijlstra 
1232391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1233391e43daSPeter Zijlstra {
1234f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1235f4ebcbc0SKirill Tkhai 
1236391e43daSPeter Zijlstra 	dequeue_rt_stack(rt_se);
1237391e43daSPeter Zijlstra 
1238391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1239391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1240391e43daSPeter Zijlstra 
1241391e43daSPeter Zijlstra 		if (rt_rq && rt_rq->rt_nr_running)
1242391e43daSPeter Zijlstra 			__enqueue_rt_entity(rt_se, false);
1243391e43daSPeter Zijlstra 	}
1244f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1245391e43daSPeter Zijlstra }
1246391e43daSPeter Zijlstra 
1247391e43daSPeter Zijlstra /*
1248391e43daSPeter Zijlstra  * Adding/removing a task to/from a priority array:
1249391e43daSPeter Zijlstra  */
1250391e43daSPeter Zijlstra static void
1251391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1252391e43daSPeter Zijlstra {
1253391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1254391e43daSPeter Zijlstra 
1255391e43daSPeter Zijlstra 	if (flags & ENQUEUE_WAKEUP)
1256391e43daSPeter Zijlstra 		rt_se->timeout = 0;
1257391e43daSPeter Zijlstra 
1258391e43daSPeter Zijlstra 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1259391e43daSPeter Zijlstra 
126029baa747SPeter Zijlstra 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1261391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1262391e43daSPeter Zijlstra }
1263391e43daSPeter Zijlstra 
1264391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1265391e43daSPeter Zijlstra {
1266391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1267391e43daSPeter Zijlstra 
1268391e43daSPeter Zijlstra 	update_curr_rt(rq);
1269391e43daSPeter Zijlstra 	dequeue_rt_entity(rt_se);
1270391e43daSPeter Zijlstra 
1271391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1272391e43daSPeter Zijlstra }
1273391e43daSPeter Zijlstra 
1274391e43daSPeter Zijlstra /*
1275391e43daSPeter Zijlstra  * Put task to the head or the end of the run list without the overhead of
1276391e43daSPeter Zijlstra  * dequeue followed by enqueue.
1277391e43daSPeter Zijlstra  */
1278391e43daSPeter Zijlstra static void
1279391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1280391e43daSPeter Zijlstra {
1281391e43daSPeter Zijlstra 	if (on_rt_rq(rt_se)) {
1282391e43daSPeter Zijlstra 		struct rt_prio_array *array = &rt_rq->active;
1283391e43daSPeter Zijlstra 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1284391e43daSPeter Zijlstra 
1285391e43daSPeter Zijlstra 		if (head)
1286391e43daSPeter Zijlstra 			list_move(&rt_se->run_list, queue);
1287391e43daSPeter Zijlstra 		else
1288391e43daSPeter Zijlstra 			list_move_tail(&rt_se->run_list, queue);
1289391e43daSPeter Zijlstra 	}
1290391e43daSPeter Zijlstra }
1291391e43daSPeter Zijlstra 
1292391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1293391e43daSPeter Zijlstra {
1294391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1295391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
1296391e43daSPeter Zijlstra 
1297391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1298391e43daSPeter Zijlstra 		rt_rq = rt_rq_of_se(rt_se);
1299391e43daSPeter Zijlstra 		requeue_rt_entity(rt_rq, rt_se, head);
1300391e43daSPeter Zijlstra 	}
1301391e43daSPeter Zijlstra }
1302391e43daSPeter Zijlstra 
1303391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq)
1304391e43daSPeter Zijlstra {
1305391e43daSPeter Zijlstra 	requeue_task_rt(rq, rq->curr, 0);
1306391e43daSPeter Zijlstra }
1307391e43daSPeter Zijlstra 
1308391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1309391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task);
1310391e43daSPeter Zijlstra 
1311391e43daSPeter Zijlstra static int
1312ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1313391e43daSPeter Zijlstra {
1314391e43daSPeter Zijlstra 	struct task_struct *curr;
1315391e43daSPeter Zijlstra 	struct rq *rq;
1316391e43daSPeter Zijlstra 
1317391e43daSPeter Zijlstra 	/* For anything but wake ups, just return the task_cpu */
1318391e43daSPeter Zijlstra 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1319391e43daSPeter Zijlstra 		goto out;
1320391e43daSPeter Zijlstra 
1321391e43daSPeter Zijlstra 	rq = cpu_rq(cpu);
1322391e43daSPeter Zijlstra 
1323391e43daSPeter Zijlstra 	rcu_read_lock();
1324391e43daSPeter Zijlstra 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1325391e43daSPeter Zijlstra 
1326391e43daSPeter Zijlstra 	/*
1327391e43daSPeter Zijlstra 	 * If the current task on @p's runqueue is an RT task, then
1328391e43daSPeter Zijlstra 	 * try to see if we can wake this RT task up on another
1329391e43daSPeter Zijlstra 	 * runqueue. Otherwise simply start this RT task
1330391e43daSPeter Zijlstra 	 * on its current runqueue.
1331391e43daSPeter Zijlstra 	 *
1332391e43daSPeter Zijlstra 	 * We want to avoid overloading runqueues. If the woken
1333391e43daSPeter Zijlstra 	 * task is a higher priority, then it will stay on this CPU
1334391e43daSPeter Zijlstra 	 * and the lower prio task should be moved to another CPU.
1335391e43daSPeter Zijlstra 	 * Even though this will probably make the lower prio task
1336391e43daSPeter Zijlstra 	 * lose its cache, we do not want to bounce a higher task
1337391e43daSPeter Zijlstra 	 * around just because it gave up its CPU, perhaps for a
1338391e43daSPeter Zijlstra 	 * lock?
1339391e43daSPeter Zijlstra 	 *
1340391e43daSPeter Zijlstra 	 * For equal prio tasks, we just let the scheduler sort it out.
1341391e43daSPeter Zijlstra 	 *
1342391e43daSPeter Zijlstra 	 * Otherwise, just let it ride on the affined RQ and the
1343391e43daSPeter Zijlstra 	 * post-schedule router will push the preempted task away
1344391e43daSPeter Zijlstra 	 *
1345391e43daSPeter Zijlstra 	 * This test is optimistic, if we get it wrong the load-balancer
1346391e43daSPeter Zijlstra 	 * will have to sort it out.
1347391e43daSPeter Zijlstra 	 */
1348391e43daSPeter Zijlstra 	if (curr && unlikely(rt_task(curr)) &&
134929baa747SPeter Zijlstra 	    (curr->nr_cpus_allowed < 2 ||
13506bfa687cSShawn Bohrer 	     curr->prio <= p->prio)) {
1351391e43daSPeter Zijlstra 		int target = find_lowest_rq(p);
1352391e43daSPeter Zijlstra 
135380e3d87bSTim Chen 		/*
135480e3d87bSTim Chen 		 * Don't bother moving it if the destination CPU is
135580e3d87bSTim Chen 		 * not running a lower priority task.
135680e3d87bSTim Chen 		 */
135780e3d87bSTim Chen 		if (target != -1 &&
135880e3d87bSTim Chen 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1359391e43daSPeter Zijlstra 			cpu = target;
1360391e43daSPeter Zijlstra 	}
1361391e43daSPeter Zijlstra 	rcu_read_unlock();
1362391e43daSPeter Zijlstra 
1363391e43daSPeter Zijlstra out:
1364391e43daSPeter Zijlstra 	return cpu;
1365391e43daSPeter Zijlstra }
1366391e43daSPeter Zijlstra 
1367391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1368391e43daSPeter Zijlstra {
1369308a623aSWanpeng Li 	/*
1370308a623aSWanpeng Li 	 * Current can't be migrated, useless to reschedule,
1371308a623aSWanpeng Li 	 * let's hope p can move out.
1372308a623aSWanpeng Li 	 */
1373308a623aSWanpeng Li 	if (rq->curr->nr_cpus_allowed == 1 ||
1374308a623aSWanpeng Li 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1375391e43daSPeter Zijlstra 		return;
1376391e43daSPeter Zijlstra 
1377308a623aSWanpeng Li 	/*
1378308a623aSWanpeng Li 	 * p is migratable, so let's not schedule it and
1379308a623aSWanpeng Li 	 * see if it is pushed or pulled somewhere else.
1380308a623aSWanpeng Li 	 */
138129baa747SPeter Zijlstra 	if (p->nr_cpus_allowed != 1
1382391e43daSPeter Zijlstra 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1383391e43daSPeter Zijlstra 		return;
1384391e43daSPeter Zijlstra 
1385391e43daSPeter Zijlstra 	/*
1386391e43daSPeter Zijlstra 	 * There appears to be other cpus that can accept
1387391e43daSPeter Zijlstra 	 * current and none to run 'p', so lets reschedule
1388391e43daSPeter Zijlstra 	 * to try and push current away:
1389391e43daSPeter Zijlstra 	 */
1390391e43daSPeter Zijlstra 	requeue_task_rt(rq, p, 1);
13918875125eSKirill Tkhai 	resched_curr(rq);
1392391e43daSPeter Zijlstra }
1393391e43daSPeter Zijlstra 
1394391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1395391e43daSPeter Zijlstra 
1396391e43daSPeter Zijlstra /*
1397391e43daSPeter Zijlstra  * Preempt the current task with a newly woken task if needed:
1398391e43daSPeter Zijlstra  */
1399391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1400391e43daSPeter Zijlstra {
1401391e43daSPeter Zijlstra 	if (p->prio < rq->curr->prio) {
14028875125eSKirill Tkhai 		resched_curr(rq);
1403391e43daSPeter Zijlstra 		return;
1404391e43daSPeter Zijlstra 	}
1405391e43daSPeter Zijlstra 
1406391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1407391e43daSPeter Zijlstra 	/*
1408391e43daSPeter Zijlstra 	 * If:
1409391e43daSPeter Zijlstra 	 *
1410391e43daSPeter Zijlstra 	 * - the newly woken task is of equal priority to the current task
1411391e43daSPeter Zijlstra 	 * - the newly woken task is non-migratable while current is migratable
1412391e43daSPeter Zijlstra 	 * - current will be preempted on the next reschedule
1413391e43daSPeter Zijlstra 	 *
1414391e43daSPeter Zijlstra 	 * we should check to see if current can readily move to a different
1415391e43daSPeter Zijlstra 	 * cpu.  If so, we will reschedule to allow the push logic to try
1416391e43daSPeter Zijlstra 	 * to move current somewhere else, making room for our non-migratable
1417391e43daSPeter Zijlstra 	 * task.
1418391e43daSPeter Zijlstra 	 */
1419391e43daSPeter Zijlstra 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1420391e43daSPeter Zijlstra 		check_preempt_equal_prio(rq, p);
1421391e43daSPeter Zijlstra #endif
1422391e43daSPeter Zijlstra }
1423391e43daSPeter Zijlstra 
1424391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1425391e43daSPeter Zijlstra 						   struct rt_rq *rt_rq)
1426391e43daSPeter Zijlstra {
1427391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1428391e43daSPeter Zijlstra 	struct sched_rt_entity *next = NULL;
1429391e43daSPeter Zijlstra 	struct list_head *queue;
1430391e43daSPeter Zijlstra 	int idx;
1431391e43daSPeter Zijlstra 
1432391e43daSPeter Zijlstra 	idx = sched_find_first_bit(array->bitmap);
1433391e43daSPeter Zijlstra 	BUG_ON(idx >= MAX_RT_PRIO);
1434391e43daSPeter Zijlstra 
1435391e43daSPeter Zijlstra 	queue = array->queue + idx;
1436391e43daSPeter Zijlstra 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1437391e43daSPeter Zijlstra 
1438391e43daSPeter Zijlstra 	return next;
1439391e43daSPeter Zijlstra }
1440391e43daSPeter Zijlstra 
1441391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq)
1442391e43daSPeter Zijlstra {
1443391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
1444391e43daSPeter Zijlstra 	struct task_struct *p;
1445606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq  = &rq->rt;
1446391e43daSPeter Zijlstra 
1447391e43daSPeter Zijlstra 	do {
1448391e43daSPeter Zijlstra 		rt_se = pick_next_rt_entity(rq, rt_rq);
1449391e43daSPeter Zijlstra 		BUG_ON(!rt_se);
1450391e43daSPeter Zijlstra 		rt_rq = group_rt_rq(rt_se);
1451391e43daSPeter Zijlstra 	} while (rt_rq);
1452391e43daSPeter Zijlstra 
1453391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
145478becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
1455391e43daSPeter Zijlstra 
1456391e43daSPeter Zijlstra 	return p;
1457391e43daSPeter Zijlstra }
1458391e43daSPeter Zijlstra 
1459606dba2eSPeter Zijlstra static struct task_struct *
1460606dba2eSPeter Zijlstra pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1461391e43daSPeter Zijlstra {
1462606dba2eSPeter Zijlstra 	struct task_struct *p;
1463606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq = &rq->rt;
1464606dba2eSPeter Zijlstra 
146537e117c0SPeter Zijlstra 	if (need_pull_rt_task(rq, prev)) {
146638033c37SPeter Zijlstra 		pull_rt_task(rq);
146737e117c0SPeter Zijlstra 		/*
146837e117c0SPeter Zijlstra 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1469a1d9a323SKirill Tkhai 		 * means a dl or stop task can slip in, in which case we need
1470a1d9a323SKirill Tkhai 		 * to re-start task selection.
147137e117c0SPeter Zijlstra 		 */
1472da0c1e65SKirill Tkhai 		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1473a1d9a323SKirill Tkhai 			     rq->dl.dl_nr_running))
147437e117c0SPeter Zijlstra 			return RETRY_TASK;
147537e117c0SPeter Zijlstra 	}
147638033c37SPeter Zijlstra 
1477734ff2a7SKirill Tkhai 	/*
1478734ff2a7SKirill Tkhai 	 * We may dequeue prev's rt_rq in put_prev_task().
1479734ff2a7SKirill Tkhai 	 * So, we update time before rt_nr_running check.
1480734ff2a7SKirill Tkhai 	 */
1481734ff2a7SKirill Tkhai 	if (prev->sched_class == &rt_sched_class)
1482734ff2a7SKirill Tkhai 		update_curr_rt(rq);
1483734ff2a7SKirill Tkhai 
1484f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
1485606dba2eSPeter Zijlstra 		return NULL;
1486606dba2eSPeter Zijlstra 
14873f1d2a31SPeter Zijlstra 	put_prev_task(rq, prev);
1488606dba2eSPeter Zijlstra 
1489606dba2eSPeter Zijlstra 	p = _pick_next_task_rt(rq);
1490391e43daSPeter Zijlstra 
1491391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
1492391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1493391e43daSPeter Zijlstra 
1494dc877341SPeter Zijlstra 	set_post_schedule(rq);
1495391e43daSPeter Zijlstra 
1496391e43daSPeter Zijlstra 	return p;
1497391e43daSPeter Zijlstra }
1498391e43daSPeter Zijlstra 
1499391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1500391e43daSPeter Zijlstra {
1501391e43daSPeter Zijlstra 	update_curr_rt(rq);
1502391e43daSPeter Zijlstra 
1503391e43daSPeter Zijlstra 	/*
1504391e43daSPeter Zijlstra 	 * The previous task needs to be made eligible for pushing
1505391e43daSPeter Zijlstra 	 * if it is still active
1506391e43daSPeter Zijlstra 	 */
150729baa747SPeter Zijlstra 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1508391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1509391e43daSPeter Zijlstra }
1510391e43daSPeter Zijlstra 
1511391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1512391e43daSPeter Zijlstra 
1513391e43daSPeter Zijlstra /* Only try algorithms three times */
1514391e43daSPeter Zijlstra #define RT_MAX_TRIES 3
1515391e43daSPeter Zijlstra 
1516391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1517391e43daSPeter Zijlstra {
1518391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
151960334cafSKirill Tkhai 	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1520391e43daSPeter Zijlstra 		return 1;
1521391e43daSPeter Zijlstra 	return 0;
1522391e43daSPeter Zijlstra }
1523391e43daSPeter Zijlstra 
1524e23ee747SKirill Tkhai /*
1525e23ee747SKirill Tkhai  * Return the highest pushable rq's task, which is suitable to be executed
1526e23ee747SKirill Tkhai  * on the cpu, NULL otherwise
1527e23ee747SKirill Tkhai  */
1528e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1529391e43daSPeter Zijlstra {
1530e23ee747SKirill Tkhai 	struct plist_head *head = &rq->rt.pushable_tasks;
1531391e43daSPeter Zijlstra 	struct task_struct *p;
1532391e43daSPeter Zijlstra 
1533e23ee747SKirill Tkhai 	if (!has_pushable_tasks(rq))
1534e23ee747SKirill Tkhai 		return NULL;
1535391e43daSPeter Zijlstra 
1536e23ee747SKirill Tkhai 	plist_for_each_entry(p, head, pushable_tasks) {
1537e23ee747SKirill Tkhai 		if (pick_rt_task(rq, p, cpu))
1538e23ee747SKirill Tkhai 			return p;
1539391e43daSPeter Zijlstra 	}
1540391e43daSPeter Zijlstra 
1541e23ee747SKirill Tkhai 	return NULL;
1542391e43daSPeter Zijlstra }
1543391e43daSPeter Zijlstra 
1544391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1545391e43daSPeter Zijlstra 
1546391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task)
1547391e43daSPeter Zijlstra {
1548391e43daSPeter Zijlstra 	struct sched_domain *sd;
15494ba29684SChristoph Lameter 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1550391e43daSPeter Zijlstra 	int this_cpu = smp_processor_id();
1551391e43daSPeter Zijlstra 	int cpu      = task_cpu(task);
1552391e43daSPeter Zijlstra 
1553391e43daSPeter Zijlstra 	/* Make sure the mask is initialized first */
1554391e43daSPeter Zijlstra 	if (unlikely(!lowest_mask))
1555391e43daSPeter Zijlstra 		return -1;
1556391e43daSPeter Zijlstra 
155729baa747SPeter Zijlstra 	if (task->nr_cpus_allowed == 1)
1558391e43daSPeter Zijlstra 		return -1; /* No other targets possible */
1559391e43daSPeter Zijlstra 
1560391e43daSPeter Zijlstra 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1561391e43daSPeter Zijlstra 		return -1; /* No targets found */
1562391e43daSPeter Zijlstra 
1563391e43daSPeter Zijlstra 	/*
1564391e43daSPeter Zijlstra 	 * At this point we have built a mask of cpus representing the
1565391e43daSPeter Zijlstra 	 * lowest priority tasks in the system.  Now we want to elect
1566391e43daSPeter Zijlstra 	 * the best one based on our affinity and topology.
1567391e43daSPeter Zijlstra 	 *
1568391e43daSPeter Zijlstra 	 * We prioritize the last cpu that the task executed on since
1569391e43daSPeter Zijlstra 	 * it is most likely cache-hot in that location.
1570391e43daSPeter Zijlstra 	 */
1571391e43daSPeter Zijlstra 	if (cpumask_test_cpu(cpu, lowest_mask))
1572391e43daSPeter Zijlstra 		return cpu;
1573391e43daSPeter Zijlstra 
1574391e43daSPeter Zijlstra 	/*
1575391e43daSPeter Zijlstra 	 * Otherwise, we consult the sched_domains span maps to figure
1576391e43daSPeter Zijlstra 	 * out which cpu is logically closest to our hot cache data.
1577391e43daSPeter Zijlstra 	 */
1578391e43daSPeter Zijlstra 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1579391e43daSPeter Zijlstra 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1580391e43daSPeter Zijlstra 
1581391e43daSPeter Zijlstra 	rcu_read_lock();
1582391e43daSPeter Zijlstra 	for_each_domain(cpu, sd) {
1583391e43daSPeter Zijlstra 		if (sd->flags & SD_WAKE_AFFINE) {
1584391e43daSPeter Zijlstra 			int best_cpu;
1585391e43daSPeter Zijlstra 
1586391e43daSPeter Zijlstra 			/*
1587391e43daSPeter Zijlstra 			 * "this_cpu" is cheaper to preempt than a
1588391e43daSPeter Zijlstra 			 * remote processor.
1589391e43daSPeter Zijlstra 			 */
1590391e43daSPeter Zijlstra 			if (this_cpu != -1 &&
1591391e43daSPeter Zijlstra 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1592391e43daSPeter Zijlstra 				rcu_read_unlock();
1593391e43daSPeter Zijlstra 				return this_cpu;
1594391e43daSPeter Zijlstra 			}
1595391e43daSPeter Zijlstra 
1596391e43daSPeter Zijlstra 			best_cpu = cpumask_first_and(lowest_mask,
1597391e43daSPeter Zijlstra 						     sched_domain_span(sd));
1598391e43daSPeter Zijlstra 			if (best_cpu < nr_cpu_ids) {
1599391e43daSPeter Zijlstra 				rcu_read_unlock();
1600391e43daSPeter Zijlstra 				return best_cpu;
1601391e43daSPeter Zijlstra 			}
1602391e43daSPeter Zijlstra 		}
1603391e43daSPeter Zijlstra 	}
1604391e43daSPeter Zijlstra 	rcu_read_unlock();
1605391e43daSPeter Zijlstra 
1606391e43daSPeter Zijlstra 	/*
1607391e43daSPeter Zijlstra 	 * And finally, if there were no matches within the domains
1608391e43daSPeter Zijlstra 	 * just give the caller *something* to work with from the compatible
1609391e43daSPeter Zijlstra 	 * locations.
1610391e43daSPeter Zijlstra 	 */
1611391e43daSPeter Zijlstra 	if (this_cpu != -1)
1612391e43daSPeter Zijlstra 		return this_cpu;
1613391e43daSPeter Zijlstra 
1614391e43daSPeter Zijlstra 	cpu = cpumask_any(lowest_mask);
1615391e43daSPeter Zijlstra 	if (cpu < nr_cpu_ids)
1616391e43daSPeter Zijlstra 		return cpu;
1617391e43daSPeter Zijlstra 	return -1;
1618391e43daSPeter Zijlstra }
1619391e43daSPeter Zijlstra 
1620391e43daSPeter Zijlstra /* Will lock the rq it finds */
1621391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1622391e43daSPeter Zijlstra {
1623391e43daSPeter Zijlstra 	struct rq *lowest_rq = NULL;
1624391e43daSPeter Zijlstra 	int tries;
1625391e43daSPeter Zijlstra 	int cpu;
1626391e43daSPeter Zijlstra 
1627391e43daSPeter Zijlstra 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1628391e43daSPeter Zijlstra 		cpu = find_lowest_rq(task);
1629391e43daSPeter Zijlstra 
1630391e43daSPeter Zijlstra 		if ((cpu == -1) || (cpu == rq->cpu))
1631391e43daSPeter Zijlstra 			break;
1632391e43daSPeter Zijlstra 
1633391e43daSPeter Zijlstra 		lowest_rq = cpu_rq(cpu);
1634391e43daSPeter Zijlstra 
163580e3d87bSTim Chen 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
163680e3d87bSTim Chen 			/*
163780e3d87bSTim Chen 			 * Target rq has tasks of equal or higher priority,
163880e3d87bSTim Chen 			 * retrying does not release any lock and is unlikely
163980e3d87bSTim Chen 			 * to yield a different result.
164080e3d87bSTim Chen 			 */
164180e3d87bSTim Chen 			lowest_rq = NULL;
164280e3d87bSTim Chen 			break;
164380e3d87bSTim Chen 		}
164480e3d87bSTim Chen 
1645391e43daSPeter Zijlstra 		/* if the prio of this runqueue changed, try again */
1646391e43daSPeter Zijlstra 		if (double_lock_balance(rq, lowest_rq)) {
1647391e43daSPeter Zijlstra 			/*
1648391e43daSPeter Zijlstra 			 * We had to unlock the run queue. In
1649391e43daSPeter Zijlstra 			 * the mean time, task could have
1650391e43daSPeter Zijlstra 			 * migrated already or had its affinity changed.
1651391e43daSPeter Zijlstra 			 * Also make sure that it wasn't scheduled on its rq.
1652391e43daSPeter Zijlstra 			 */
1653391e43daSPeter Zijlstra 			if (unlikely(task_rq(task) != rq ||
1654391e43daSPeter Zijlstra 				     !cpumask_test_cpu(lowest_rq->cpu,
1655391e43daSPeter Zijlstra 						       tsk_cpus_allowed(task)) ||
1656391e43daSPeter Zijlstra 				     task_running(rq, task) ||
1657da0c1e65SKirill Tkhai 				     !task_on_rq_queued(task))) {
1658391e43daSPeter Zijlstra 
16597f1b4393SPeter Zijlstra 				double_unlock_balance(rq, lowest_rq);
1660391e43daSPeter Zijlstra 				lowest_rq = NULL;
1661391e43daSPeter Zijlstra 				break;
1662391e43daSPeter Zijlstra 			}
1663391e43daSPeter Zijlstra 		}
1664391e43daSPeter Zijlstra 
1665391e43daSPeter Zijlstra 		/* If this rq is still suitable use it. */
1666391e43daSPeter Zijlstra 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1667391e43daSPeter Zijlstra 			break;
1668391e43daSPeter Zijlstra 
1669391e43daSPeter Zijlstra 		/* try again */
1670391e43daSPeter Zijlstra 		double_unlock_balance(rq, lowest_rq);
1671391e43daSPeter Zijlstra 		lowest_rq = NULL;
1672391e43daSPeter Zijlstra 	}
1673391e43daSPeter Zijlstra 
1674391e43daSPeter Zijlstra 	return lowest_rq;
1675391e43daSPeter Zijlstra }
1676391e43daSPeter Zijlstra 
1677391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq)
1678391e43daSPeter Zijlstra {
1679391e43daSPeter Zijlstra 	struct task_struct *p;
1680391e43daSPeter Zijlstra 
1681391e43daSPeter Zijlstra 	if (!has_pushable_tasks(rq))
1682391e43daSPeter Zijlstra 		return NULL;
1683391e43daSPeter Zijlstra 
1684391e43daSPeter Zijlstra 	p = plist_first_entry(&rq->rt.pushable_tasks,
1685391e43daSPeter Zijlstra 			      struct task_struct, pushable_tasks);
1686391e43daSPeter Zijlstra 
1687391e43daSPeter Zijlstra 	BUG_ON(rq->cpu != task_cpu(p));
1688391e43daSPeter Zijlstra 	BUG_ON(task_current(rq, p));
168929baa747SPeter Zijlstra 	BUG_ON(p->nr_cpus_allowed <= 1);
1690391e43daSPeter Zijlstra 
1691da0c1e65SKirill Tkhai 	BUG_ON(!task_on_rq_queued(p));
1692391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
1693391e43daSPeter Zijlstra 
1694391e43daSPeter Zijlstra 	return p;
1695391e43daSPeter Zijlstra }
1696391e43daSPeter Zijlstra 
1697391e43daSPeter Zijlstra /*
1698391e43daSPeter Zijlstra  * If the current CPU has more than one RT task, see if the non
1699391e43daSPeter Zijlstra  * running task can migrate over to a CPU that is running a task
1700391e43daSPeter Zijlstra  * of lesser priority.
1701391e43daSPeter Zijlstra  */
1702391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq)
1703391e43daSPeter Zijlstra {
1704391e43daSPeter Zijlstra 	struct task_struct *next_task;
1705391e43daSPeter Zijlstra 	struct rq *lowest_rq;
1706391e43daSPeter Zijlstra 	int ret = 0;
1707391e43daSPeter Zijlstra 
1708391e43daSPeter Zijlstra 	if (!rq->rt.overloaded)
1709391e43daSPeter Zijlstra 		return 0;
1710391e43daSPeter Zijlstra 
1711391e43daSPeter Zijlstra 	next_task = pick_next_pushable_task(rq);
1712391e43daSPeter Zijlstra 	if (!next_task)
1713391e43daSPeter Zijlstra 		return 0;
1714391e43daSPeter Zijlstra 
1715391e43daSPeter Zijlstra retry:
1716391e43daSPeter Zijlstra 	if (unlikely(next_task == rq->curr)) {
1717391e43daSPeter Zijlstra 		WARN_ON(1);
1718391e43daSPeter Zijlstra 		return 0;
1719391e43daSPeter Zijlstra 	}
1720391e43daSPeter Zijlstra 
1721391e43daSPeter Zijlstra 	/*
1722391e43daSPeter Zijlstra 	 * It's possible that the next_task slipped in of
1723391e43daSPeter Zijlstra 	 * higher priority than current. If that's the case
1724391e43daSPeter Zijlstra 	 * just reschedule current.
1725391e43daSPeter Zijlstra 	 */
1726391e43daSPeter Zijlstra 	if (unlikely(next_task->prio < rq->curr->prio)) {
17278875125eSKirill Tkhai 		resched_curr(rq);
1728391e43daSPeter Zijlstra 		return 0;
1729391e43daSPeter Zijlstra 	}
1730391e43daSPeter Zijlstra 
1731391e43daSPeter Zijlstra 	/* We might release rq lock */
1732391e43daSPeter Zijlstra 	get_task_struct(next_task);
1733391e43daSPeter Zijlstra 
1734391e43daSPeter Zijlstra 	/* find_lock_lowest_rq locks the rq if found */
1735391e43daSPeter Zijlstra 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1736391e43daSPeter Zijlstra 	if (!lowest_rq) {
1737391e43daSPeter Zijlstra 		struct task_struct *task;
1738391e43daSPeter Zijlstra 		/*
1739391e43daSPeter Zijlstra 		 * find_lock_lowest_rq releases rq->lock
1740391e43daSPeter Zijlstra 		 * so it is possible that next_task has migrated.
1741391e43daSPeter Zijlstra 		 *
1742391e43daSPeter Zijlstra 		 * We need to make sure that the task is still on the same
1743391e43daSPeter Zijlstra 		 * run-queue and is also still the next task eligible for
1744391e43daSPeter Zijlstra 		 * pushing.
1745391e43daSPeter Zijlstra 		 */
1746391e43daSPeter Zijlstra 		task = pick_next_pushable_task(rq);
1747391e43daSPeter Zijlstra 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1748391e43daSPeter Zijlstra 			/*
1749391e43daSPeter Zijlstra 			 * The task hasn't migrated, and is still the next
1750391e43daSPeter Zijlstra 			 * eligible task, but we failed to find a run-queue
1751391e43daSPeter Zijlstra 			 * to push it to.  Do not retry in this case, since
1752391e43daSPeter Zijlstra 			 * other cpus will pull from us when ready.
1753391e43daSPeter Zijlstra 			 */
1754391e43daSPeter Zijlstra 			goto out;
1755391e43daSPeter Zijlstra 		}
1756391e43daSPeter Zijlstra 
1757391e43daSPeter Zijlstra 		if (!task)
1758391e43daSPeter Zijlstra 			/* No more tasks, just exit */
1759391e43daSPeter Zijlstra 			goto out;
1760391e43daSPeter Zijlstra 
1761391e43daSPeter Zijlstra 		/*
1762391e43daSPeter Zijlstra 		 * Something has shifted, try again.
1763391e43daSPeter Zijlstra 		 */
1764391e43daSPeter Zijlstra 		put_task_struct(next_task);
1765391e43daSPeter Zijlstra 		next_task = task;
1766391e43daSPeter Zijlstra 		goto retry;
1767391e43daSPeter Zijlstra 	}
1768391e43daSPeter Zijlstra 
1769391e43daSPeter Zijlstra 	deactivate_task(rq, next_task, 0);
1770391e43daSPeter Zijlstra 	set_task_cpu(next_task, lowest_rq->cpu);
1771391e43daSPeter Zijlstra 	activate_task(lowest_rq, next_task, 0);
1772391e43daSPeter Zijlstra 	ret = 1;
1773391e43daSPeter Zijlstra 
17748875125eSKirill Tkhai 	resched_curr(lowest_rq);
1775391e43daSPeter Zijlstra 
1776391e43daSPeter Zijlstra 	double_unlock_balance(rq, lowest_rq);
1777391e43daSPeter Zijlstra 
1778391e43daSPeter Zijlstra out:
1779391e43daSPeter Zijlstra 	put_task_struct(next_task);
1780391e43daSPeter Zijlstra 
1781391e43daSPeter Zijlstra 	return ret;
1782391e43daSPeter Zijlstra }
1783391e43daSPeter Zijlstra 
1784391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq)
1785391e43daSPeter Zijlstra {
1786391e43daSPeter Zijlstra 	/* push_rt_task will return true if it moved an RT */
1787391e43daSPeter Zijlstra 	while (push_rt_task(rq))
1788391e43daSPeter Zijlstra 		;
1789391e43daSPeter Zijlstra }
1790391e43daSPeter Zijlstra 
1791b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1792b6366f04SSteven Rostedt /*
1793b6366f04SSteven Rostedt  * The search for the next cpu always starts at rq->cpu and ends
1794b6366f04SSteven Rostedt  * when we reach rq->cpu again. It will never return rq->cpu.
1795b6366f04SSteven Rostedt  * This returns the next cpu to check, or nr_cpu_ids if the loop
1796b6366f04SSteven Rostedt  * is complete.
1797b6366f04SSteven Rostedt  *
1798b6366f04SSteven Rostedt  * rq->rt.push_cpu holds the last cpu returned by this function,
1799b6366f04SSteven Rostedt  * or if this is the first instance, it must hold rq->cpu.
1800b6366f04SSteven Rostedt  */
1801b6366f04SSteven Rostedt static int rto_next_cpu(struct rq *rq)
1802b6366f04SSteven Rostedt {
1803b6366f04SSteven Rostedt 	int prev_cpu = rq->rt.push_cpu;
1804b6366f04SSteven Rostedt 	int cpu;
1805b6366f04SSteven Rostedt 
1806b6366f04SSteven Rostedt 	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1807b6366f04SSteven Rostedt 
1808b6366f04SSteven Rostedt 	/*
1809b6366f04SSteven Rostedt 	 * If the previous cpu is less than the rq's CPU, then it already
1810b6366f04SSteven Rostedt 	 * passed the end of the mask, and has started from the beginning.
1811b6366f04SSteven Rostedt 	 * We end if the next CPU is greater or equal to rq's CPU.
1812b6366f04SSteven Rostedt 	 */
1813b6366f04SSteven Rostedt 	if (prev_cpu < rq->cpu) {
1814b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1815b6366f04SSteven Rostedt 			return nr_cpu_ids;
1816b6366f04SSteven Rostedt 
1817b6366f04SSteven Rostedt 	} else if (cpu >= nr_cpu_ids) {
1818b6366f04SSteven Rostedt 		/*
1819b6366f04SSteven Rostedt 		 * We passed the end of the mask, start at the beginning.
1820b6366f04SSteven Rostedt 		 * If the result is greater or equal to the rq's CPU, then
1821b6366f04SSteven Rostedt 		 * the loop is finished.
1822b6366f04SSteven Rostedt 		 */
1823b6366f04SSteven Rostedt 		cpu = cpumask_first(rq->rd->rto_mask);
1824b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1825b6366f04SSteven Rostedt 			return nr_cpu_ids;
1826b6366f04SSteven Rostedt 	}
1827b6366f04SSteven Rostedt 	rq->rt.push_cpu = cpu;
1828b6366f04SSteven Rostedt 
1829b6366f04SSteven Rostedt 	/* Return cpu to let the caller know if the loop is finished or not */
1830b6366f04SSteven Rostedt 	return cpu;
1831b6366f04SSteven Rostedt }
1832b6366f04SSteven Rostedt 
1833b6366f04SSteven Rostedt static int find_next_push_cpu(struct rq *rq)
1834b6366f04SSteven Rostedt {
1835b6366f04SSteven Rostedt 	struct rq *next_rq;
1836b6366f04SSteven Rostedt 	int cpu;
1837b6366f04SSteven Rostedt 
1838b6366f04SSteven Rostedt 	while (1) {
1839b6366f04SSteven Rostedt 		cpu = rto_next_cpu(rq);
1840b6366f04SSteven Rostedt 		if (cpu >= nr_cpu_ids)
1841b6366f04SSteven Rostedt 			break;
1842b6366f04SSteven Rostedt 		next_rq = cpu_rq(cpu);
1843b6366f04SSteven Rostedt 
1844b6366f04SSteven Rostedt 		/* Make sure the next rq can push to this rq */
1845b6366f04SSteven Rostedt 		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1846b6366f04SSteven Rostedt 			break;
1847b6366f04SSteven Rostedt 	}
1848b6366f04SSteven Rostedt 
1849b6366f04SSteven Rostedt 	return cpu;
1850b6366f04SSteven Rostedt }
1851b6366f04SSteven Rostedt 
1852b6366f04SSteven Rostedt #define RT_PUSH_IPI_EXECUTING		1
1853b6366f04SSteven Rostedt #define RT_PUSH_IPI_RESTART		2
1854b6366f04SSteven Rostedt 
1855b6366f04SSteven Rostedt static void tell_cpu_to_push(struct rq *rq)
1856b6366f04SSteven Rostedt {
1857b6366f04SSteven Rostedt 	int cpu;
1858b6366f04SSteven Rostedt 
1859b6366f04SSteven Rostedt 	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1860b6366f04SSteven Rostedt 		raw_spin_lock(&rq->rt.push_lock);
1861b6366f04SSteven Rostedt 		/* Make sure it's still executing */
1862b6366f04SSteven Rostedt 		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1863b6366f04SSteven Rostedt 			/*
1864b6366f04SSteven Rostedt 			 * Tell the IPI to restart the loop as things have
1865b6366f04SSteven Rostedt 			 * changed since it started.
1866b6366f04SSteven Rostedt 			 */
1867b6366f04SSteven Rostedt 			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1868b6366f04SSteven Rostedt 			raw_spin_unlock(&rq->rt.push_lock);
1869b6366f04SSteven Rostedt 			return;
1870b6366f04SSteven Rostedt 		}
1871b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->rt.push_lock);
1872b6366f04SSteven Rostedt 	}
1873b6366f04SSteven Rostedt 
1874b6366f04SSteven Rostedt 	/* When here, there's no IPI going around */
1875b6366f04SSteven Rostedt 
1876b6366f04SSteven Rostedt 	rq->rt.push_cpu = rq->cpu;
1877b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(rq);
1878b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1879b6366f04SSteven Rostedt 		return;
1880b6366f04SSteven Rostedt 
1881b6366f04SSteven Rostedt 	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1882b6366f04SSteven Rostedt 
1883b6366f04SSteven Rostedt 	irq_work_queue_on(&rq->rt.push_work, cpu);
1884b6366f04SSteven Rostedt }
1885b6366f04SSteven Rostedt 
1886b6366f04SSteven Rostedt /* Called from hardirq context */
1887b6366f04SSteven Rostedt static void try_to_push_tasks(void *arg)
1888b6366f04SSteven Rostedt {
1889b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = arg;
1890b6366f04SSteven Rostedt 	struct rq *rq, *src_rq;
1891b6366f04SSteven Rostedt 	int this_cpu;
1892b6366f04SSteven Rostedt 	int cpu;
1893b6366f04SSteven Rostedt 
1894b6366f04SSteven Rostedt 	this_cpu = rt_rq->push_cpu;
1895b6366f04SSteven Rostedt 
1896b6366f04SSteven Rostedt 	/* Paranoid check */
1897b6366f04SSteven Rostedt 	BUG_ON(this_cpu != smp_processor_id());
1898b6366f04SSteven Rostedt 
1899b6366f04SSteven Rostedt 	rq = cpu_rq(this_cpu);
1900b6366f04SSteven Rostedt 	src_rq = rq_of_rt_rq(rt_rq);
1901b6366f04SSteven Rostedt 
1902b6366f04SSteven Rostedt again:
1903b6366f04SSteven Rostedt 	if (has_pushable_tasks(rq)) {
1904b6366f04SSteven Rostedt 		raw_spin_lock(&rq->lock);
1905b6366f04SSteven Rostedt 		push_rt_task(rq);
1906b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->lock);
1907b6366f04SSteven Rostedt 	}
1908b6366f04SSteven Rostedt 
1909b6366f04SSteven Rostedt 	/* Pass the IPI to the next rt overloaded queue */
1910b6366f04SSteven Rostedt 	raw_spin_lock(&rt_rq->push_lock);
1911b6366f04SSteven Rostedt 	/*
1912b6366f04SSteven Rostedt 	 * If the source queue changed since the IPI went out,
1913b6366f04SSteven Rostedt 	 * we need to restart the search from that CPU again.
1914b6366f04SSteven Rostedt 	 */
1915b6366f04SSteven Rostedt 	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1916b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1917b6366f04SSteven Rostedt 		rt_rq->push_cpu = src_rq->cpu;
1918b6366f04SSteven Rostedt 	}
1919b6366f04SSteven Rostedt 
1920b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(src_rq);
1921b6366f04SSteven Rostedt 
1922b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1923b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1924b6366f04SSteven Rostedt 	raw_spin_unlock(&rt_rq->push_lock);
1925b6366f04SSteven Rostedt 
1926b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1927b6366f04SSteven Rostedt 		return;
1928b6366f04SSteven Rostedt 
1929b6366f04SSteven Rostedt 	/*
1930b6366f04SSteven Rostedt 	 * It is possible that a restart caused this CPU to be
1931b6366f04SSteven Rostedt 	 * chosen again. Don't bother with an IPI, just see if we
1932b6366f04SSteven Rostedt 	 * have more to push.
1933b6366f04SSteven Rostedt 	 */
1934b6366f04SSteven Rostedt 	if (unlikely(cpu == rq->cpu))
1935b6366f04SSteven Rostedt 		goto again;
1936b6366f04SSteven Rostedt 
1937b6366f04SSteven Rostedt 	/* Try the next RT overloaded CPU */
1938b6366f04SSteven Rostedt 	irq_work_queue_on(&rt_rq->push_work, cpu);
1939b6366f04SSteven Rostedt }
1940b6366f04SSteven Rostedt 
1941b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work)
1942b6366f04SSteven Rostedt {
1943b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1944b6366f04SSteven Rostedt 
1945b6366f04SSteven Rostedt 	try_to_push_tasks(rt_rq);
1946b6366f04SSteven Rostedt }
1947b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */
1948b6366f04SSteven Rostedt 
1949391e43daSPeter Zijlstra static int pull_rt_task(struct rq *this_rq)
1950391e43daSPeter Zijlstra {
1951391e43daSPeter Zijlstra 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1952391e43daSPeter Zijlstra 	struct task_struct *p;
1953391e43daSPeter Zijlstra 	struct rq *src_rq;
1954391e43daSPeter Zijlstra 
1955391e43daSPeter Zijlstra 	if (likely(!rt_overloaded(this_rq)))
1956391e43daSPeter Zijlstra 		return 0;
1957391e43daSPeter Zijlstra 
19587c3f2ab7SPeter Zijlstra 	/*
19597c3f2ab7SPeter Zijlstra 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
19607c3f2ab7SPeter Zijlstra 	 * see overloaded we must also see the rto_mask bit.
19617c3f2ab7SPeter Zijlstra 	 */
19627c3f2ab7SPeter Zijlstra 	smp_rmb();
19637c3f2ab7SPeter Zijlstra 
1964b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1965b6366f04SSteven Rostedt 	if (sched_feat(RT_PUSH_IPI)) {
1966b6366f04SSteven Rostedt 		tell_cpu_to_push(this_rq);
1967b6366f04SSteven Rostedt 		return 0;
1968b6366f04SSteven Rostedt 	}
1969b6366f04SSteven Rostedt #endif
1970b6366f04SSteven Rostedt 
1971391e43daSPeter Zijlstra 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1972391e43daSPeter Zijlstra 		if (this_cpu == cpu)
1973391e43daSPeter Zijlstra 			continue;
1974391e43daSPeter Zijlstra 
1975391e43daSPeter Zijlstra 		src_rq = cpu_rq(cpu);
1976391e43daSPeter Zijlstra 
1977391e43daSPeter Zijlstra 		/*
1978391e43daSPeter Zijlstra 		 * Don't bother taking the src_rq->lock if the next highest
1979391e43daSPeter Zijlstra 		 * task is known to be lower-priority than our current task.
1980391e43daSPeter Zijlstra 		 * This may look racy, but if this value is about to go
1981391e43daSPeter Zijlstra 		 * logically higher, the src_rq will push this task away.
1982391e43daSPeter Zijlstra 		 * And if its going logically lower, we do not care
1983391e43daSPeter Zijlstra 		 */
1984391e43daSPeter Zijlstra 		if (src_rq->rt.highest_prio.next >=
1985391e43daSPeter Zijlstra 		    this_rq->rt.highest_prio.curr)
1986391e43daSPeter Zijlstra 			continue;
1987391e43daSPeter Zijlstra 
1988391e43daSPeter Zijlstra 		/*
1989391e43daSPeter Zijlstra 		 * We can potentially drop this_rq's lock in
1990391e43daSPeter Zijlstra 		 * double_lock_balance, and another CPU could
1991391e43daSPeter Zijlstra 		 * alter this_rq
1992391e43daSPeter Zijlstra 		 */
1993391e43daSPeter Zijlstra 		double_lock_balance(this_rq, src_rq);
1994391e43daSPeter Zijlstra 
1995391e43daSPeter Zijlstra 		/*
1996e23ee747SKirill Tkhai 		 * We can pull only a task, which is pushable
1997e23ee747SKirill Tkhai 		 * on its rq, and no others.
1998391e43daSPeter Zijlstra 		 */
1999e23ee747SKirill Tkhai 		p = pick_highest_pushable_task(src_rq, this_cpu);
2000391e43daSPeter Zijlstra 
2001391e43daSPeter Zijlstra 		/*
2002391e43daSPeter Zijlstra 		 * Do we have an RT task that preempts
2003391e43daSPeter Zijlstra 		 * the to-be-scheduled task?
2004391e43daSPeter Zijlstra 		 */
2005391e43daSPeter Zijlstra 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2006391e43daSPeter Zijlstra 			WARN_ON(p == src_rq->curr);
2007da0c1e65SKirill Tkhai 			WARN_ON(!task_on_rq_queued(p));
2008391e43daSPeter Zijlstra 
2009391e43daSPeter Zijlstra 			/*
2010391e43daSPeter Zijlstra 			 * There's a chance that p is higher in priority
2011391e43daSPeter Zijlstra 			 * than what's currently running on its cpu.
2012391e43daSPeter Zijlstra 			 * This is just that p is wakeing up and hasn't
2013391e43daSPeter Zijlstra 			 * had a chance to schedule. We only pull
2014391e43daSPeter Zijlstra 			 * p if it is lower in priority than the
2015391e43daSPeter Zijlstra 			 * current task on the run queue
2016391e43daSPeter Zijlstra 			 */
2017391e43daSPeter Zijlstra 			if (p->prio < src_rq->curr->prio)
2018391e43daSPeter Zijlstra 				goto skip;
2019391e43daSPeter Zijlstra 
2020391e43daSPeter Zijlstra 			ret = 1;
2021391e43daSPeter Zijlstra 
2022391e43daSPeter Zijlstra 			deactivate_task(src_rq, p, 0);
2023391e43daSPeter Zijlstra 			set_task_cpu(p, this_cpu);
2024391e43daSPeter Zijlstra 			activate_task(this_rq, p, 0);
2025391e43daSPeter Zijlstra 			/*
2026391e43daSPeter Zijlstra 			 * We continue with the search, just in
2027391e43daSPeter Zijlstra 			 * case there's an even higher prio task
2028391e43daSPeter Zijlstra 			 * in another runqueue. (low likelihood
2029391e43daSPeter Zijlstra 			 * but possible)
2030391e43daSPeter Zijlstra 			 */
2031391e43daSPeter Zijlstra 		}
2032391e43daSPeter Zijlstra skip:
2033391e43daSPeter Zijlstra 		double_unlock_balance(this_rq, src_rq);
2034391e43daSPeter Zijlstra 	}
2035391e43daSPeter Zijlstra 
2036391e43daSPeter Zijlstra 	return ret;
2037391e43daSPeter Zijlstra }
2038391e43daSPeter Zijlstra 
2039391e43daSPeter Zijlstra static void post_schedule_rt(struct rq *rq)
2040391e43daSPeter Zijlstra {
2041391e43daSPeter Zijlstra 	push_rt_tasks(rq);
2042391e43daSPeter Zijlstra }
2043391e43daSPeter Zijlstra 
2044391e43daSPeter Zijlstra /*
2045391e43daSPeter Zijlstra  * If we are not running and we are not going to reschedule soon, we should
2046391e43daSPeter Zijlstra  * try to push tasks away now
2047391e43daSPeter Zijlstra  */
2048391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p)
2049391e43daSPeter Zijlstra {
2050391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
2051391e43daSPeter Zijlstra 	    !test_tsk_need_resched(rq->curr) &&
2052391e43daSPeter Zijlstra 	    has_pushable_tasks(rq) &&
205329baa747SPeter Zijlstra 	    p->nr_cpus_allowed > 1 &&
20541baca4ceSJuri Lelli 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
205529baa747SPeter Zijlstra 	    (rq->curr->nr_cpus_allowed < 2 ||
2056391e43daSPeter Zijlstra 	     rq->curr->prio <= p->prio))
2057391e43daSPeter Zijlstra 		push_rt_tasks(rq);
2058391e43daSPeter Zijlstra }
2059391e43daSPeter Zijlstra 
2060391e43daSPeter Zijlstra static void set_cpus_allowed_rt(struct task_struct *p,
2061391e43daSPeter Zijlstra 				const struct cpumask *new_mask)
2062391e43daSPeter Zijlstra {
20638d3d5adaSKirill Tkhai 	struct rq *rq;
20648d3d5adaSKirill Tkhai 	int weight;
2065391e43daSPeter Zijlstra 
2066391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
2067391e43daSPeter Zijlstra 
2068da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
20698d3d5adaSKirill Tkhai 		return;
2070391e43daSPeter Zijlstra 
20718d3d5adaSKirill Tkhai 	weight = cpumask_weight(new_mask);
20728d3d5adaSKirill Tkhai 
2073391e43daSPeter Zijlstra 	/*
20748d3d5adaSKirill Tkhai 	 * Only update if the process changes its state from whether it
20758d3d5adaSKirill Tkhai 	 * can migrate or not.
2076391e43daSPeter Zijlstra 	 */
207729baa747SPeter Zijlstra 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
20788d3d5adaSKirill Tkhai 		return;
20798d3d5adaSKirill Tkhai 
20808d3d5adaSKirill Tkhai 	rq = task_rq(p);
20818d3d5adaSKirill Tkhai 
20828d3d5adaSKirill Tkhai 	/*
20838d3d5adaSKirill Tkhai 	 * The process used to be able to migrate OR it can now migrate
20848d3d5adaSKirill Tkhai 	 */
20858d3d5adaSKirill Tkhai 	if (weight <= 1) {
20868d3d5adaSKirill Tkhai 		if (!task_current(rq, p))
2087391e43daSPeter Zijlstra 			dequeue_pushable_task(rq, p);
2088391e43daSPeter Zijlstra 		BUG_ON(!rq->rt.rt_nr_migratory);
2089391e43daSPeter Zijlstra 		rq->rt.rt_nr_migratory--;
20908d3d5adaSKirill Tkhai 	} else {
20918d3d5adaSKirill Tkhai 		if (!task_current(rq, p))
20928d3d5adaSKirill Tkhai 			enqueue_pushable_task(rq, p);
20938d3d5adaSKirill Tkhai 		rq->rt.rt_nr_migratory++;
2094391e43daSPeter Zijlstra 	}
2095391e43daSPeter Zijlstra 
2096391e43daSPeter Zijlstra 	update_rt_migration(&rq->rt);
2097391e43daSPeter Zijlstra }
2098391e43daSPeter Zijlstra 
2099391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2100391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq)
2101391e43daSPeter Zijlstra {
2102391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2103391e43daSPeter Zijlstra 		rt_set_overload(rq);
2104391e43daSPeter Zijlstra 
2105391e43daSPeter Zijlstra 	__enable_runtime(rq);
2106391e43daSPeter Zijlstra 
2107391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2108391e43daSPeter Zijlstra }
2109391e43daSPeter Zijlstra 
2110391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2111391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq)
2112391e43daSPeter Zijlstra {
2113391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2114391e43daSPeter Zijlstra 		rt_clear_overload(rq);
2115391e43daSPeter Zijlstra 
2116391e43daSPeter Zijlstra 	__disable_runtime(rq);
2117391e43daSPeter Zijlstra 
2118391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2119391e43daSPeter Zijlstra }
2120391e43daSPeter Zijlstra 
2121391e43daSPeter Zijlstra /*
2122391e43daSPeter Zijlstra  * When switch from the rt queue, we bring ourselves to a position
2123391e43daSPeter Zijlstra  * that we might want to pull RT tasks from other runqueues.
2124391e43daSPeter Zijlstra  */
2125391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p)
2126391e43daSPeter Zijlstra {
2127391e43daSPeter Zijlstra 	/*
2128391e43daSPeter Zijlstra 	 * If there are other RT tasks then we will reschedule
2129391e43daSPeter Zijlstra 	 * and the scheduling of the other RT tasks will handle
2130391e43daSPeter Zijlstra 	 * the balancing. But if we are the last RT task
2131391e43daSPeter Zijlstra 	 * we may need to handle the pulling of RT tasks
2132391e43daSPeter Zijlstra 	 * now.
2133391e43daSPeter Zijlstra 	 */
2134da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
21351158ddb5SKirill Tkhai 		return;
21361158ddb5SKirill Tkhai 
21371158ddb5SKirill Tkhai 	if (pull_rt_task(rq))
21388875125eSKirill Tkhai 		resched_curr(rq);
2139391e43daSPeter Zijlstra }
2140391e43daSPeter Zijlstra 
214111c785b7SLi Zefan void __init init_sched_rt_class(void)
2142391e43daSPeter Zijlstra {
2143391e43daSPeter Zijlstra 	unsigned int i;
2144391e43daSPeter Zijlstra 
2145391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
2146391e43daSPeter Zijlstra 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2147391e43daSPeter Zijlstra 					GFP_KERNEL, cpu_to_node(i));
2148391e43daSPeter Zijlstra 	}
2149391e43daSPeter Zijlstra }
2150391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2151391e43daSPeter Zijlstra 
2152391e43daSPeter Zijlstra /*
2153391e43daSPeter Zijlstra  * When switching a task to RT, we may overload the runqueue
2154391e43daSPeter Zijlstra  * with RT tasks. In this case we try to push them off to
2155391e43daSPeter Zijlstra  * other runqueues.
2156391e43daSPeter Zijlstra  */
2157391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p)
2158391e43daSPeter Zijlstra {
2159391e43daSPeter Zijlstra 	int check_resched = 1;
2160391e43daSPeter Zijlstra 
2161391e43daSPeter Zijlstra 	/*
2162391e43daSPeter Zijlstra 	 * If we are already running, then there's nothing
2163391e43daSPeter Zijlstra 	 * that needs to be done. But if we are not running
2164391e43daSPeter Zijlstra 	 * we may need to preempt the current running task.
2165391e43daSPeter Zijlstra 	 * If that current running task is also an RT task
2166391e43daSPeter Zijlstra 	 * then see if we can move to another run queue.
2167391e43daSPeter Zijlstra 	 */
2168da0c1e65SKirill Tkhai 	if (task_on_rq_queued(p) && rq->curr != p) {
2169391e43daSPeter Zijlstra #ifdef CONFIG_SMP
217010447917SKirill V Tkhai 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
2171391e43daSPeter Zijlstra 		    /* Don't resched if we changed runqueues */
217210447917SKirill V Tkhai 		    push_rt_task(rq) && rq != task_rq(p))
2173391e43daSPeter Zijlstra 			check_resched = 0;
2174391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2175391e43daSPeter Zijlstra 		if (check_resched && p->prio < rq->curr->prio)
21768875125eSKirill Tkhai 			resched_curr(rq);
2177391e43daSPeter Zijlstra 	}
2178391e43daSPeter Zijlstra }
2179391e43daSPeter Zijlstra 
2180391e43daSPeter Zijlstra /*
2181391e43daSPeter Zijlstra  * Priority of the task has changed. This may cause
2182391e43daSPeter Zijlstra  * us to initiate a push or pull.
2183391e43daSPeter Zijlstra  */
2184391e43daSPeter Zijlstra static void
2185391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2186391e43daSPeter Zijlstra {
2187da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
2188391e43daSPeter Zijlstra 		return;
2189391e43daSPeter Zijlstra 
2190391e43daSPeter Zijlstra 	if (rq->curr == p) {
2191391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2192391e43daSPeter Zijlstra 		/*
2193391e43daSPeter Zijlstra 		 * If our priority decreases while running, we
2194391e43daSPeter Zijlstra 		 * may need to pull tasks to this runqueue.
2195391e43daSPeter Zijlstra 		 */
2196391e43daSPeter Zijlstra 		if (oldprio < p->prio)
2197391e43daSPeter Zijlstra 			pull_rt_task(rq);
2198391e43daSPeter Zijlstra 		/*
2199391e43daSPeter Zijlstra 		 * If there's a higher priority task waiting to run
2200391e43daSPeter Zijlstra 		 * then reschedule. Note, the above pull_rt_task
2201391e43daSPeter Zijlstra 		 * can release the rq lock and p could migrate.
2202391e43daSPeter Zijlstra 		 * Only reschedule if p is still on the same runqueue.
2203391e43daSPeter Zijlstra 		 */
2204391e43daSPeter Zijlstra 		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
22058875125eSKirill Tkhai 			resched_curr(rq);
2206391e43daSPeter Zijlstra #else
2207391e43daSPeter Zijlstra 		/* For UP simply resched on drop of prio */
2208391e43daSPeter Zijlstra 		if (oldprio < p->prio)
22098875125eSKirill Tkhai 			resched_curr(rq);
2210391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2211391e43daSPeter Zijlstra 	} else {
2212391e43daSPeter Zijlstra 		/*
2213391e43daSPeter Zijlstra 		 * This task is not running, but if it is
2214391e43daSPeter Zijlstra 		 * greater than the current running task
2215391e43daSPeter Zijlstra 		 * then reschedule.
2216391e43daSPeter Zijlstra 		 */
2217391e43daSPeter Zijlstra 		if (p->prio < rq->curr->prio)
22188875125eSKirill Tkhai 			resched_curr(rq);
2219391e43daSPeter Zijlstra 	}
2220391e43daSPeter Zijlstra }
2221391e43daSPeter Zijlstra 
2222391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p)
2223391e43daSPeter Zijlstra {
2224391e43daSPeter Zijlstra 	unsigned long soft, hard;
2225391e43daSPeter Zijlstra 
2226391e43daSPeter Zijlstra 	/* max may change after cur was read, this will be fixed next tick */
2227391e43daSPeter Zijlstra 	soft = task_rlimit(p, RLIMIT_RTTIME);
2228391e43daSPeter Zijlstra 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2229391e43daSPeter Zijlstra 
2230391e43daSPeter Zijlstra 	if (soft != RLIM_INFINITY) {
2231391e43daSPeter Zijlstra 		unsigned long next;
2232391e43daSPeter Zijlstra 
223357d2aa00SYing Xue 		if (p->rt.watchdog_stamp != jiffies) {
2234391e43daSPeter Zijlstra 			p->rt.timeout++;
223557d2aa00SYing Xue 			p->rt.watchdog_stamp = jiffies;
223657d2aa00SYing Xue 		}
223757d2aa00SYing Xue 
2238391e43daSPeter Zijlstra 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2239391e43daSPeter Zijlstra 		if (p->rt.timeout > next)
2240391e43daSPeter Zijlstra 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2241391e43daSPeter Zijlstra 	}
2242391e43daSPeter Zijlstra }
2243391e43daSPeter Zijlstra 
2244391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2245391e43daSPeter Zijlstra {
2246454c7999SColin Cross 	struct sched_rt_entity *rt_se = &p->rt;
2247454c7999SColin Cross 
2248391e43daSPeter Zijlstra 	update_curr_rt(rq);
2249391e43daSPeter Zijlstra 
2250391e43daSPeter Zijlstra 	watchdog(rq, p);
2251391e43daSPeter Zijlstra 
2252391e43daSPeter Zijlstra 	/*
2253391e43daSPeter Zijlstra 	 * RR tasks need a special form of timeslice management.
2254391e43daSPeter Zijlstra 	 * FIFO tasks have no timeslices.
2255391e43daSPeter Zijlstra 	 */
2256391e43daSPeter Zijlstra 	if (p->policy != SCHED_RR)
2257391e43daSPeter Zijlstra 		return;
2258391e43daSPeter Zijlstra 
2259391e43daSPeter Zijlstra 	if (--p->rt.time_slice)
2260391e43daSPeter Zijlstra 		return;
2261391e43daSPeter Zijlstra 
2262ce0dbbbbSClark Williams 	p->rt.time_slice = sched_rr_timeslice;
2263391e43daSPeter Zijlstra 
2264391e43daSPeter Zijlstra 	/*
2265e9aa39bbSLi Bin 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2266e9aa39bbSLi Bin 	 * the only element on the queue
2267391e43daSPeter Zijlstra 	 */
2268454c7999SColin Cross 	for_each_sched_rt_entity(rt_se) {
2269454c7999SColin Cross 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2270391e43daSPeter Zijlstra 			requeue_task_rt(rq, p, 0);
22718aa6f0ebSKirill Tkhai 			resched_curr(rq);
2272454c7999SColin Cross 			return;
2273454c7999SColin Cross 		}
2274391e43daSPeter Zijlstra 	}
2275391e43daSPeter Zijlstra }
2276391e43daSPeter Zijlstra 
2277391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq)
2278391e43daSPeter Zijlstra {
2279391e43daSPeter Zijlstra 	struct task_struct *p = rq->curr;
2280391e43daSPeter Zijlstra 
228178becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
2282391e43daSPeter Zijlstra 
2283391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
2284391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
2285391e43daSPeter Zijlstra }
2286391e43daSPeter Zijlstra 
2287391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2288391e43daSPeter Zijlstra {
2289391e43daSPeter Zijlstra 	/*
2290391e43daSPeter Zijlstra 	 * Time slice is 0 for SCHED_FIFO tasks
2291391e43daSPeter Zijlstra 	 */
2292391e43daSPeter Zijlstra 	if (task->policy == SCHED_RR)
2293ce0dbbbbSClark Williams 		return sched_rr_timeslice;
2294391e43daSPeter Zijlstra 	else
2295391e43daSPeter Zijlstra 		return 0;
2296391e43daSPeter Zijlstra }
2297391e43daSPeter Zijlstra 
2298391e43daSPeter Zijlstra const struct sched_class rt_sched_class = {
2299391e43daSPeter Zijlstra 	.next			= &fair_sched_class,
2300391e43daSPeter Zijlstra 	.enqueue_task		= enqueue_task_rt,
2301391e43daSPeter Zijlstra 	.dequeue_task		= dequeue_task_rt,
2302391e43daSPeter Zijlstra 	.yield_task		= yield_task_rt,
2303391e43daSPeter Zijlstra 
2304391e43daSPeter Zijlstra 	.check_preempt_curr	= check_preempt_curr_rt,
2305391e43daSPeter Zijlstra 
2306391e43daSPeter Zijlstra 	.pick_next_task		= pick_next_task_rt,
2307391e43daSPeter Zijlstra 	.put_prev_task		= put_prev_task_rt,
2308391e43daSPeter Zijlstra 
2309391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2310391e43daSPeter Zijlstra 	.select_task_rq		= select_task_rq_rt,
2311391e43daSPeter Zijlstra 
2312391e43daSPeter Zijlstra 	.set_cpus_allowed       = set_cpus_allowed_rt,
2313391e43daSPeter Zijlstra 	.rq_online              = rq_online_rt,
2314391e43daSPeter Zijlstra 	.rq_offline             = rq_offline_rt,
2315391e43daSPeter Zijlstra 	.post_schedule		= post_schedule_rt,
2316391e43daSPeter Zijlstra 	.task_woken		= task_woken_rt,
2317391e43daSPeter Zijlstra 	.switched_from		= switched_from_rt,
2318391e43daSPeter Zijlstra #endif
2319391e43daSPeter Zijlstra 
2320391e43daSPeter Zijlstra 	.set_curr_task          = set_curr_task_rt,
2321391e43daSPeter Zijlstra 	.task_tick		= task_tick_rt,
2322391e43daSPeter Zijlstra 
2323391e43daSPeter Zijlstra 	.get_rr_interval	= get_rr_interval_rt,
2324391e43daSPeter Zijlstra 
2325391e43daSPeter Zijlstra 	.prio_changed		= prio_changed_rt,
2326391e43daSPeter Zijlstra 	.switched_to		= switched_to_rt,
23276e998916SStanislaw Gruszka 
23286e998916SStanislaw Gruszka 	.update_curr		= update_curr_rt,
2329391e43daSPeter Zijlstra };
2330391e43daSPeter Zijlstra 
2331391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
2332391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2333391e43daSPeter Zijlstra 
2334391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu)
2335391e43daSPeter Zijlstra {
2336391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
2337391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
2338391e43daSPeter Zijlstra 
2339391e43daSPeter Zijlstra 	rcu_read_lock();
2340391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2341391e43daSPeter Zijlstra 		print_rt_rq(m, cpu, rt_rq);
2342391e43daSPeter Zijlstra 	rcu_read_unlock();
2343391e43daSPeter Zijlstra }
2344391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */
2345