xref: /openbmc/linux/kernel/sched/rt.c (revision b6366f048e0caff28af5335b7af2031266e1b06b)
1391e43daSPeter Zijlstra /*
2391e43daSPeter Zijlstra  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3391e43daSPeter Zijlstra  * policies)
4391e43daSPeter Zijlstra  */
5391e43daSPeter Zijlstra 
6391e43daSPeter Zijlstra #include "sched.h"
7391e43daSPeter Zijlstra 
8391e43daSPeter Zijlstra #include <linux/slab.h>
9*b6366f04SSteven Rostedt #include <linux/irq_work.h>
10391e43daSPeter Zijlstra 
11ce0dbbbbSClark Williams int sched_rr_timeslice = RR_TIMESLICE;
12ce0dbbbbSClark Williams 
13391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
14391e43daSPeter Zijlstra 
15391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth;
16391e43daSPeter Zijlstra 
17391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
18391e43daSPeter Zijlstra {
19391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b =
20391e43daSPeter Zijlstra 		container_of(timer, struct rt_bandwidth, rt_period_timer);
21391e43daSPeter Zijlstra 	ktime_t now;
22391e43daSPeter Zijlstra 	int overrun;
23391e43daSPeter Zijlstra 	int idle = 0;
24391e43daSPeter Zijlstra 
25391e43daSPeter Zijlstra 	for (;;) {
26391e43daSPeter Zijlstra 		now = hrtimer_cb_get_time(timer);
27391e43daSPeter Zijlstra 		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
28391e43daSPeter Zijlstra 
29391e43daSPeter Zijlstra 		if (!overrun)
30391e43daSPeter Zijlstra 			break;
31391e43daSPeter Zijlstra 
32391e43daSPeter Zijlstra 		idle = do_sched_rt_period_timer(rt_b, overrun);
33391e43daSPeter Zijlstra 	}
34391e43daSPeter Zijlstra 
35391e43daSPeter Zijlstra 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
36391e43daSPeter Zijlstra }
37391e43daSPeter Zijlstra 
38391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
39391e43daSPeter Zijlstra {
40391e43daSPeter Zijlstra 	rt_b->rt_period = ns_to_ktime(period);
41391e43daSPeter Zijlstra 	rt_b->rt_runtime = runtime;
42391e43daSPeter Zijlstra 
43391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
44391e43daSPeter Zijlstra 
45391e43daSPeter Zijlstra 	hrtimer_init(&rt_b->rt_period_timer,
46391e43daSPeter Zijlstra 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
47391e43daSPeter Zijlstra 	rt_b->rt_period_timer.function = sched_rt_period_timer;
48391e43daSPeter Zijlstra }
49391e43daSPeter Zijlstra 
50391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
51391e43daSPeter Zijlstra {
52391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
53391e43daSPeter Zijlstra 		return;
54391e43daSPeter Zijlstra 
55391e43daSPeter Zijlstra 	if (hrtimer_active(&rt_b->rt_period_timer))
56391e43daSPeter Zijlstra 		return;
57391e43daSPeter Zijlstra 
58391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
59391e43daSPeter Zijlstra 	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
60391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
61391e43daSPeter Zijlstra }
62391e43daSPeter Zijlstra 
63*b6366f04SSteven Rostedt #ifdef CONFIG_SMP
64*b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work);
65*b6366f04SSteven Rostedt #endif
66*b6366f04SSteven Rostedt 
67391e43daSPeter Zijlstra void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
68391e43daSPeter Zijlstra {
69391e43daSPeter Zijlstra 	struct rt_prio_array *array;
70391e43daSPeter Zijlstra 	int i;
71391e43daSPeter Zijlstra 
72391e43daSPeter Zijlstra 	array = &rt_rq->active;
73391e43daSPeter Zijlstra 	for (i = 0; i < MAX_RT_PRIO; i++) {
74391e43daSPeter Zijlstra 		INIT_LIST_HEAD(array->queue + i);
75391e43daSPeter Zijlstra 		__clear_bit(i, array->bitmap);
76391e43daSPeter Zijlstra 	}
77391e43daSPeter Zijlstra 	/* delimiter for bitsearch: */
78391e43daSPeter Zijlstra 	__set_bit(MAX_RT_PRIO, array->bitmap);
79391e43daSPeter Zijlstra 
80391e43daSPeter Zijlstra #if defined CONFIG_SMP
81391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
82391e43daSPeter Zijlstra 	rt_rq->highest_prio.next = MAX_RT_PRIO;
83391e43daSPeter Zijlstra 	rt_rq->rt_nr_migratory = 0;
84391e43daSPeter Zijlstra 	rt_rq->overloaded = 0;
85391e43daSPeter Zijlstra 	plist_head_init(&rt_rq->pushable_tasks);
86*b6366f04SSteven Rostedt 
87*b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
88*b6366f04SSteven Rostedt 	rt_rq->push_flags = 0;
89*b6366f04SSteven Rostedt 	rt_rq->push_cpu = nr_cpu_ids;
90*b6366f04SSteven Rostedt 	raw_spin_lock_init(&rt_rq->push_lock);
91*b6366f04SSteven Rostedt 	init_irq_work(&rt_rq->push_work, push_irq_work_func);
92391e43daSPeter Zijlstra #endif
93*b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
94f4ebcbc0SKirill Tkhai 	/* We start is dequeued state, because no RT tasks are queued */
95f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
96391e43daSPeter Zijlstra 
97391e43daSPeter Zijlstra 	rt_rq->rt_time = 0;
98391e43daSPeter Zijlstra 	rt_rq->rt_throttled = 0;
99391e43daSPeter Zijlstra 	rt_rq->rt_runtime = 0;
100391e43daSPeter Zijlstra 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
101391e43daSPeter Zijlstra }
102391e43daSPeter Zijlstra 
103391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
104391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
105391e43daSPeter Zijlstra {
106391e43daSPeter Zijlstra 	hrtimer_cancel(&rt_b->rt_period_timer);
107391e43daSPeter Zijlstra }
108391e43daSPeter Zijlstra 
109391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
110391e43daSPeter Zijlstra 
111391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
112391e43daSPeter Zijlstra {
113391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
114391e43daSPeter Zijlstra 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
115391e43daSPeter Zijlstra #endif
116391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
117391e43daSPeter Zijlstra }
118391e43daSPeter Zijlstra 
119391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
120391e43daSPeter Zijlstra {
121391e43daSPeter Zijlstra 	return rt_rq->rq;
122391e43daSPeter Zijlstra }
123391e43daSPeter Zijlstra 
124391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
125391e43daSPeter Zijlstra {
126391e43daSPeter Zijlstra 	return rt_se->rt_rq;
127391e43daSPeter Zijlstra }
128391e43daSPeter Zijlstra 
129653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
130653d07a6SKirill Tkhai {
131653d07a6SKirill Tkhai 	struct rt_rq *rt_rq = rt_se->rt_rq;
132653d07a6SKirill Tkhai 
133653d07a6SKirill Tkhai 	return rt_rq->rq;
134653d07a6SKirill Tkhai }
135653d07a6SKirill Tkhai 
136391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg)
137391e43daSPeter Zijlstra {
138391e43daSPeter Zijlstra 	int i;
139391e43daSPeter Zijlstra 
140391e43daSPeter Zijlstra 	if (tg->rt_se)
141391e43daSPeter Zijlstra 		destroy_rt_bandwidth(&tg->rt_bandwidth);
142391e43daSPeter Zijlstra 
143391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
144391e43daSPeter Zijlstra 		if (tg->rt_rq)
145391e43daSPeter Zijlstra 			kfree(tg->rt_rq[i]);
146391e43daSPeter Zijlstra 		if (tg->rt_se)
147391e43daSPeter Zijlstra 			kfree(tg->rt_se[i]);
148391e43daSPeter Zijlstra 	}
149391e43daSPeter Zijlstra 
150391e43daSPeter Zijlstra 	kfree(tg->rt_rq);
151391e43daSPeter Zijlstra 	kfree(tg->rt_se);
152391e43daSPeter Zijlstra }
153391e43daSPeter Zijlstra 
154391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
155391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
156391e43daSPeter Zijlstra 		struct sched_rt_entity *parent)
157391e43daSPeter Zijlstra {
158391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
159391e43daSPeter Zijlstra 
160391e43daSPeter Zijlstra 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
161391e43daSPeter Zijlstra 	rt_rq->rt_nr_boosted = 0;
162391e43daSPeter Zijlstra 	rt_rq->rq = rq;
163391e43daSPeter Zijlstra 	rt_rq->tg = tg;
164391e43daSPeter Zijlstra 
165391e43daSPeter Zijlstra 	tg->rt_rq[cpu] = rt_rq;
166391e43daSPeter Zijlstra 	tg->rt_se[cpu] = rt_se;
167391e43daSPeter Zijlstra 
168391e43daSPeter Zijlstra 	if (!rt_se)
169391e43daSPeter Zijlstra 		return;
170391e43daSPeter Zijlstra 
171391e43daSPeter Zijlstra 	if (!parent)
172391e43daSPeter Zijlstra 		rt_se->rt_rq = &rq->rt;
173391e43daSPeter Zijlstra 	else
174391e43daSPeter Zijlstra 		rt_se->rt_rq = parent->my_q;
175391e43daSPeter Zijlstra 
176391e43daSPeter Zijlstra 	rt_se->my_q = rt_rq;
177391e43daSPeter Zijlstra 	rt_se->parent = parent;
178391e43daSPeter Zijlstra 	INIT_LIST_HEAD(&rt_se->run_list);
179391e43daSPeter Zijlstra }
180391e43daSPeter Zijlstra 
181391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
182391e43daSPeter Zijlstra {
183391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
184391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
185391e43daSPeter Zijlstra 	int i;
186391e43daSPeter Zijlstra 
187391e43daSPeter Zijlstra 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
188391e43daSPeter Zijlstra 	if (!tg->rt_rq)
189391e43daSPeter Zijlstra 		goto err;
190391e43daSPeter Zijlstra 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
191391e43daSPeter Zijlstra 	if (!tg->rt_se)
192391e43daSPeter Zijlstra 		goto err;
193391e43daSPeter Zijlstra 
194391e43daSPeter Zijlstra 	init_rt_bandwidth(&tg->rt_bandwidth,
195391e43daSPeter Zijlstra 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
196391e43daSPeter Zijlstra 
197391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
198391e43daSPeter Zijlstra 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
199391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
200391e43daSPeter Zijlstra 		if (!rt_rq)
201391e43daSPeter Zijlstra 			goto err;
202391e43daSPeter Zijlstra 
203391e43daSPeter Zijlstra 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
204391e43daSPeter Zijlstra 				     GFP_KERNEL, cpu_to_node(i));
205391e43daSPeter Zijlstra 		if (!rt_se)
206391e43daSPeter Zijlstra 			goto err_free_rq;
207391e43daSPeter Zijlstra 
208391e43daSPeter Zijlstra 		init_rt_rq(rt_rq, cpu_rq(i));
209391e43daSPeter Zijlstra 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
210391e43daSPeter Zijlstra 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
211391e43daSPeter Zijlstra 	}
212391e43daSPeter Zijlstra 
213391e43daSPeter Zijlstra 	return 1;
214391e43daSPeter Zijlstra 
215391e43daSPeter Zijlstra err_free_rq:
216391e43daSPeter Zijlstra 	kfree(rt_rq);
217391e43daSPeter Zijlstra err:
218391e43daSPeter Zijlstra 	return 0;
219391e43daSPeter Zijlstra }
220391e43daSPeter Zijlstra 
221391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
222391e43daSPeter Zijlstra 
223391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1)
224391e43daSPeter Zijlstra 
225391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
226391e43daSPeter Zijlstra {
227391e43daSPeter Zijlstra 	return container_of(rt_se, struct task_struct, rt);
228391e43daSPeter Zijlstra }
229391e43daSPeter Zijlstra 
230391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
231391e43daSPeter Zijlstra {
232391e43daSPeter Zijlstra 	return container_of(rt_rq, struct rq, rt);
233391e43daSPeter Zijlstra }
234391e43daSPeter Zijlstra 
235653d07a6SKirill Tkhai static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
236391e43daSPeter Zijlstra {
237391e43daSPeter Zijlstra 	struct task_struct *p = rt_task_of(rt_se);
238653d07a6SKirill Tkhai 
239653d07a6SKirill Tkhai 	return task_rq(p);
240653d07a6SKirill Tkhai }
241653d07a6SKirill Tkhai 
242653d07a6SKirill Tkhai static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
243653d07a6SKirill Tkhai {
244653d07a6SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
245391e43daSPeter Zijlstra 
246391e43daSPeter Zijlstra 	return &rq->rt;
247391e43daSPeter Zijlstra }
248391e43daSPeter Zijlstra 
249391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { }
250391e43daSPeter Zijlstra 
251391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
252391e43daSPeter Zijlstra {
253391e43daSPeter Zijlstra 	return 1;
254391e43daSPeter Zijlstra }
255391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
256391e43daSPeter Zijlstra 
257391e43daSPeter Zijlstra #ifdef CONFIG_SMP
258391e43daSPeter Zijlstra 
25938033c37SPeter Zijlstra static int pull_rt_task(struct rq *this_rq);
26038033c37SPeter Zijlstra 
261dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
262dc877341SPeter Zijlstra {
263dc877341SPeter Zijlstra 	/* Try to pull RT tasks here if we lower this rq's prio */
264dc877341SPeter Zijlstra 	return rq->rt.highest_prio.curr > prev->prio;
265dc877341SPeter Zijlstra }
266dc877341SPeter Zijlstra 
267391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq)
268391e43daSPeter Zijlstra {
269391e43daSPeter Zijlstra 	return atomic_read(&rq->rd->rto_count);
270391e43daSPeter Zijlstra }
271391e43daSPeter Zijlstra 
272391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq)
273391e43daSPeter Zijlstra {
274391e43daSPeter Zijlstra 	if (!rq->online)
275391e43daSPeter Zijlstra 		return;
276391e43daSPeter Zijlstra 
277391e43daSPeter Zijlstra 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
278391e43daSPeter Zijlstra 	/*
279391e43daSPeter Zijlstra 	 * Make sure the mask is visible before we set
280391e43daSPeter Zijlstra 	 * the overload count. That is checked to determine
281391e43daSPeter Zijlstra 	 * if we should look at the mask. It would be a shame
282391e43daSPeter Zijlstra 	 * if we looked at the mask, but the mask was not
283391e43daSPeter Zijlstra 	 * updated yet.
2847c3f2ab7SPeter Zijlstra 	 *
2857c3f2ab7SPeter Zijlstra 	 * Matched by the barrier in pull_rt_task().
286391e43daSPeter Zijlstra 	 */
2877c3f2ab7SPeter Zijlstra 	smp_wmb();
288391e43daSPeter Zijlstra 	atomic_inc(&rq->rd->rto_count);
289391e43daSPeter Zijlstra }
290391e43daSPeter Zijlstra 
291391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq)
292391e43daSPeter Zijlstra {
293391e43daSPeter Zijlstra 	if (!rq->online)
294391e43daSPeter Zijlstra 		return;
295391e43daSPeter Zijlstra 
296391e43daSPeter Zijlstra 	/* the order here really doesn't matter */
297391e43daSPeter Zijlstra 	atomic_dec(&rq->rd->rto_count);
298391e43daSPeter Zijlstra 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
299391e43daSPeter Zijlstra }
300391e43daSPeter Zijlstra 
301391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq)
302391e43daSPeter Zijlstra {
303391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
304391e43daSPeter Zijlstra 		if (!rt_rq->overloaded) {
305391e43daSPeter Zijlstra 			rt_set_overload(rq_of_rt_rq(rt_rq));
306391e43daSPeter Zijlstra 			rt_rq->overloaded = 1;
307391e43daSPeter Zijlstra 		}
308391e43daSPeter Zijlstra 	} else if (rt_rq->overloaded) {
309391e43daSPeter Zijlstra 		rt_clear_overload(rq_of_rt_rq(rt_rq));
310391e43daSPeter Zijlstra 		rt_rq->overloaded = 0;
311391e43daSPeter Zijlstra 	}
312391e43daSPeter Zijlstra }
313391e43daSPeter Zijlstra 
314391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
315391e43daSPeter Zijlstra {
31629baa747SPeter Zijlstra 	struct task_struct *p;
31729baa747SPeter Zijlstra 
318391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
319391e43daSPeter Zijlstra 		return;
320391e43daSPeter Zijlstra 
32129baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
322391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
323391e43daSPeter Zijlstra 
324391e43daSPeter Zijlstra 	rt_rq->rt_nr_total++;
32529baa747SPeter Zijlstra 	if (p->nr_cpus_allowed > 1)
326391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory++;
327391e43daSPeter Zijlstra 
328391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
329391e43daSPeter Zijlstra }
330391e43daSPeter Zijlstra 
331391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
332391e43daSPeter Zijlstra {
33329baa747SPeter Zijlstra 	struct task_struct *p;
33429baa747SPeter Zijlstra 
335391e43daSPeter Zijlstra 	if (!rt_entity_is_task(rt_se))
336391e43daSPeter Zijlstra 		return;
337391e43daSPeter Zijlstra 
33829baa747SPeter Zijlstra 	p = rt_task_of(rt_se);
339391e43daSPeter Zijlstra 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
340391e43daSPeter Zijlstra 
341391e43daSPeter Zijlstra 	rt_rq->rt_nr_total--;
34229baa747SPeter Zijlstra 	if (p->nr_cpus_allowed > 1)
343391e43daSPeter Zijlstra 		rt_rq->rt_nr_migratory--;
344391e43daSPeter Zijlstra 
345391e43daSPeter Zijlstra 	update_rt_migration(rt_rq);
346391e43daSPeter Zijlstra }
347391e43daSPeter Zijlstra 
348391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq)
349391e43daSPeter Zijlstra {
350391e43daSPeter Zijlstra 	return !plist_head_empty(&rq->rt.pushable_tasks);
351391e43daSPeter Zijlstra }
352391e43daSPeter Zijlstra 
353dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq)
354dc877341SPeter Zijlstra {
355dc877341SPeter Zijlstra 	/*
356dc877341SPeter Zijlstra 	 * We detect this state here so that we can avoid taking the RQ
357dc877341SPeter Zijlstra 	 * lock again later if there is no need to push
358dc877341SPeter Zijlstra 	 */
359dc877341SPeter Zijlstra 	rq->post_schedule = has_pushable_tasks(rq);
360dc877341SPeter Zijlstra }
361dc877341SPeter Zijlstra 
362391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
363391e43daSPeter Zijlstra {
364391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
365391e43daSPeter Zijlstra 	plist_node_init(&p->pushable_tasks, p->prio);
366391e43daSPeter Zijlstra 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
367391e43daSPeter Zijlstra 
368391e43daSPeter Zijlstra 	/* Update the highest prio pushable task */
369391e43daSPeter Zijlstra 	if (p->prio < rq->rt.highest_prio.next)
370391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
371391e43daSPeter Zijlstra }
372391e43daSPeter Zijlstra 
373391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
374391e43daSPeter Zijlstra {
375391e43daSPeter Zijlstra 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
376391e43daSPeter Zijlstra 
377391e43daSPeter Zijlstra 	/* Update the new highest prio pushable task */
378391e43daSPeter Zijlstra 	if (has_pushable_tasks(rq)) {
379391e43daSPeter Zijlstra 		p = plist_first_entry(&rq->rt.pushable_tasks,
380391e43daSPeter Zijlstra 				      struct task_struct, pushable_tasks);
381391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = p->prio;
382391e43daSPeter Zijlstra 	} else
383391e43daSPeter Zijlstra 		rq->rt.highest_prio.next = MAX_RT_PRIO;
384391e43daSPeter Zijlstra }
385391e43daSPeter Zijlstra 
386391e43daSPeter Zijlstra #else
387391e43daSPeter Zijlstra 
388391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
389391e43daSPeter Zijlstra {
390391e43daSPeter Zijlstra }
391391e43daSPeter Zijlstra 
392391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
393391e43daSPeter Zijlstra {
394391e43daSPeter Zijlstra }
395391e43daSPeter Zijlstra 
396391e43daSPeter Zijlstra static inline
397391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
398391e43daSPeter Zijlstra {
399391e43daSPeter Zijlstra }
400391e43daSPeter Zijlstra 
401391e43daSPeter Zijlstra static inline
402391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
403391e43daSPeter Zijlstra {
404391e43daSPeter Zijlstra }
405391e43daSPeter Zijlstra 
406dc877341SPeter Zijlstra static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
407dc877341SPeter Zijlstra {
408dc877341SPeter Zijlstra 	return false;
409dc877341SPeter Zijlstra }
410dc877341SPeter Zijlstra 
411dc877341SPeter Zijlstra static inline int pull_rt_task(struct rq *this_rq)
412dc877341SPeter Zijlstra {
413dc877341SPeter Zijlstra 	return 0;
414dc877341SPeter Zijlstra }
415dc877341SPeter Zijlstra 
416dc877341SPeter Zijlstra static inline void set_post_schedule(struct rq *rq)
417dc877341SPeter Zijlstra {
418dc877341SPeter Zijlstra }
419391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
420391e43daSPeter Zijlstra 
421f4ebcbc0SKirill Tkhai static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
422f4ebcbc0SKirill Tkhai static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
423f4ebcbc0SKirill Tkhai 
424391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se)
425391e43daSPeter Zijlstra {
426391e43daSPeter Zijlstra 	return !list_empty(&rt_se->run_list);
427391e43daSPeter Zijlstra }
428391e43daSPeter Zijlstra 
429391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
430391e43daSPeter Zijlstra 
431391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
432391e43daSPeter Zijlstra {
433391e43daSPeter Zijlstra 	if (!rt_rq->tg)
434391e43daSPeter Zijlstra 		return RUNTIME_INF;
435391e43daSPeter Zijlstra 
436391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
437391e43daSPeter Zijlstra }
438391e43daSPeter Zijlstra 
439391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
440391e43daSPeter Zijlstra {
441391e43daSPeter Zijlstra 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
442391e43daSPeter Zijlstra }
443391e43daSPeter Zijlstra 
444391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t;
445391e43daSPeter Zijlstra 
446391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg)
447391e43daSPeter Zijlstra {
448391e43daSPeter Zijlstra 	do {
449391e43daSPeter Zijlstra 		tg = list_entry_rcu(tg->list.next,
450391e43daSPeter Zijlstra 			typeof(struct task_group), list);
451391e43daSPeter Zijlstra 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
452391e43daSPeter Zijlstra 
453391e43daSPeter Zijlstra 	if (&tg->list == &task_groups)
454391e43daSPeter Zijlstra 		tg = NULL;
455391e43daSPeter Zijlstra 
456391e43daSPeter Zijlstra 	return tg;
457391e43daSPeter Zijlstra }
458391e43daSPeter Zijlstra 
459391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq)					\
460391e43daSPeter Zijlstra 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
461391e43daSPeter Zijlstra 		(iter = next_task_group(iter)) &&			\
462391e43daSPeter Zijlstra 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
463391e43daSPeter Zijlstra 
464391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
465391e43daSPeter Zijlstra 	for (; rt_se; rt_se = rt_se->parent)
466391e43daSPeter Zijlstra 
467391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
468391e43daSPeter Zijlstra {
469391e43daSPeter Zijlstra 	return rt_se->my_q;
470391e43daSPeter Zijlstra }
471391e43daSPeter Zijlstra 
472391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
473391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
474391e43daSPeter Zijlstra 
475391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
476391e43daSPeter Zijlstra {
477391e43daSPeter Zijlstra 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
4788875125eSKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
479391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
480391e43daSPeter Zijlstra 
4818875125eSKirill Tkhai 	int cpu = cpu_of(rq);
482391e43daSPeter Zijlstra 
483391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
484391e43daSPeter Zijlstra 
485391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
486f4ebcbc0SKirill Tkhai 		if (!rt_se)
487f4ebcbc0SKirill Tkhai 			enqueue_top_rt_rq(rt_rq);
488f4ebcbc0SKirill Tkhai 		else if (!on_rt_rq(rt_se))
489391e43daSPeter Zijlstra 			enqueue_rt_entity(rt_se, false);
490f4ebcbc0SKirill Tkhai 
491391e43daSPeter Zijlstra 		if (rt_rq->highest_prio.curr < curr->prio)
4928875125eSKirill Tkhai 			resched_curr(rq);
493391e43daSPeter Zijlstra 	}
494391e43daSPeter Zijlstra }
495391e43daSPeter Zijlstra 
496391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
497391e43daSPeter Zijlstra {
498391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
499391e43daSPeter Zijlstra 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
500391e43daSPeter Zijlstra 
501391e43daSPeter Zijlstra 	rt_se = rt_rq->tg->rt_se[cpu];
502391e43daSPeter Zijlstra 
503f4ebcbc0SKirill Tkhai 	if (!rt_se)
504f4ebcbc0SKirill Tkhai 		dequeue_top_rt_rq(rt_rq);
505f4ebcbc0SKirill Tkhai 	else if (on_rt_rq(rt_se))
506391e43daSPeter Zijlstra 		dequeue_rt_entity(rt_se);
507391e43daSPeter Zijlstra }
508391e43daSPeter Zijlstra 
50946383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
51046383648SKirill Tkhai {
51146383648SKirill Tkhai 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
51246383648SKirill Tkhai }
51346383648SKirill Tkhai 
514391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se)
515391e43daSPeter Zijlstra {
516391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
517391e43daSPeter Zijlstra 	struct task_struct *p;
518391e43daSPeter Zijlstra 
519391e43daSPeter Zijlstra 	if (rt_rq)
520391e43daSPeter Zijlstra 		return !!rt_rq->rt_nr_boosted;
521391e43daSPeter Zijlstra 
522391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
523391e43daSPeter Zijlstra 	return p->prio != p->normal_prio;
524391e43daSPeter Zijlstra }
525391e43daSPeter Zijlstra 
526391e43daSPeter Zijlstra #ifdef CONFIG_SMP
527391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
528391e43daSPeter Zijlstra {
529424c93feSNathan Zimmer 	return this_rq()->rd->span;
530391e43daSPeter Zijlstra }
531391e43daSPeter Zijlstra #else
532391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
533391e43daSPeter Zijlstra {
534391e43daSPeter Zijlstra 	return cpu_online_mask;
535391e43daSPeter Zijlstra }
536391e43daSPeter Zijlstra #endif
537391e43daSPeter Zijlstra 
538391e43daSPeter Zijlstra static inline
539391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
540391e43daSPeter Zijlstra {
541391e43daSPeter Zijlstra 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
542391e43daSPeter Zijlstra }
543391e43daSPeter Zijlstra 
544391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
545391e43daSPeter Zijlstra {
546391e43daSPeter Zijlstra 	return &rt_rq->tg->rt_bandwidth;
547391e43daSPeter Zijlstra }
548391e43daSPeter Zijlstra 
549391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */
550391e43daSPeter Zijlstra 
551391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
552391e43daSPeter Zijlstra {
553391e43daSPeter Zijlstra 	return rt_rq->rt_runtime;
554391e43daSPeter Zijlstra }
555391e43daSPeter Zijlstra 
556391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq)
557391e43daSPeter Zijlstra {
558391e43daSPeter Zijlstra 	return ktime_to_ns(def_rt_bandwidth.rt_period);
559391e43daSPeter Zijlstra }
560391e43daSPeter Zijlstra 
561391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t;
562391e43daSPeter Zijlstra 
563391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \
564391e43daSPeter Zijlstra 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
565391e43daSPeter Zijlstra 
566391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \
567391e43daSPeter Zijlstra 	for (; rt_se; rt_se = NULL)
568391e43daSPeter Zijlstra 
569391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
570391e43daSPeter Zijlstra {
571391e43daSPeter Zijlstra 	return NULL;
572391e43daSPeter Zijlstra }
573391e43daSPeter Zijlstra 
574391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
575391e43daSPeter Zijlstra {
576f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
577f4ebcbc0SKirill Tkhai 
578f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_nr_running)
579f4ebcbc0SKirill Tkhai 		return;
580f4ebcbc0SKirill Tkhai 
581f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(rt_rq);
5828875125eSKirill Tkhai 	resched_curr(rq);
583391e43daSPeter Zijlstra }
584391e43daSPeter Zijlstra 
585391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
586391e43daSPeter Zijlstra {
587f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq);
588391e43daSPeter Zijlstra }
589391e43daSPeter Zijlstra 
59046383648SKirill Tkhai static inline int rt_rq_throttled(struct rt_rq *rt_rq)
59146383648SKirill Tkhai {
59246383648SKirill Tkhai 	return rt_rq->rt_throttled;
59346383648SKirill Tkhai }
59446383648SKirill Tkhai 
595391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void)
596391e43daSPeter Zijlstra {
597391e43daSPeter Zijlstra 	return cpu_online_mask;
598391e43daSPeter Zijlstra }
599391e43daSPeter Zijlstra 
600391e43daSPeter Zijlstra static inline
601391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
602391e43daSPeter Zijlstra {
603391e43daSPeter Zijlstra 	return &cpu_rq(cpu)->rt;
604391e43daSPeter Zijlstra }
605391e43daSPeter Zijlstra 
606391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
607391e43daSPeter Zijlstra {
608391e43daSPeter Zijlstra 	return &def_rt_bandwidth;
609391e43daSPeter Zijlstra }
610391e43daSPeter Zijlstra 
611391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
612391e43daSPeter Zijlstra 
613faa59937SJuri Lelli bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
614faa59937SJuri Lelli {
615faa59937SJuri Lelli 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
616faa59937SJuri Lelli 
617faa59937SJuri Lelli 	return (hrtimer_active(&rt_b->rt_period_timer) ||
618faa59937SJuri Lelli 		rt_rq->rt_time < rt_b->rt_runtime);
619faa59937SJuri Lelli }
620faa59937SJuri Lelli 
621391e43daSPeter Zijlstra #ifdef CONFIG_SMP
622391e43daSPeter Zijlstra /*
623391e43daSPeter Zijlstra  * We ran out of runtime, see if we can borrow some from our neighbours.
624391e43daSPeter Zijlstra  */
625391e43daSPeter Zijlstra static int do_balance_runtime(struct rt_rq *rt_rq)
626391e43daSPeter Zijlstra {
627391e43daSPeter Zijlstra 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
628aa7f6730SShawn Bohrer 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
629391e43daSPeter Zijlstra 	int i, weight, more = 0;
630391e43daSPeter Zijlstra 	u64 rt_period;
631391e43daSPeter Zijlstra 
632391e43daSPeter Zijlstra 	weight = cpumask_weight(rd->span);
633391e43daSPeter Zijlstra 
634391e43daSPeter Zijlstra 	raw_spin_lock(&rt_b->rt_runtime_lock);
635391e43daSPeter Zijlstra 	rt_period = ktime_to_ns(rt_b->rt_period);
636391e43daSPeter Zijlstra 	for_each_cpu(i, rd->span) {
637391e43daSPeter Zijlstra 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
638391e43daSPeter Zijlstra 		s64 diff;
639391e43daSPeter Zijlstra 
640391e43daSPeter Zijlstra 		if (iter == rt_rq)
641391e43daSPeter Zijlstra 			continue;
642391e43daSPeter Zijlstra 
643391e43daSPeter Zijlstra 		raw_spin_lock(&iter->rt_runtime_lock);
644391e43daSPeter Zijlstra 		/*
645391e43daSPeter Zijlstra 		 * Either all rqs have inf runtime and there's nothing to steal
646391e43daSPeter Zijlstra 		 * or __disable_runtime() below sets a specific rq to inf to
647391e43daSPeter Zijlstra 		 * indicate its been disabled and disalow stealing.
648391e43daSPeter Zijlstra 		 */
649391e43daSPeter Zijlstra 		if (iter->rt_runtime == RUNTIME_INF)
650391e43daSPeter Zijlstra 			goto next;
651391e43daSPeter Zijlstra 
652391e43daSPeter Zijlstra 		/*
653391e43daSPeter Zijlstra 		 * From runqueues with spare time, take 1/n part of their
654391e43daSPeter Zijlstra 		 * spare time, but no more than our period.
655391e43daSPeter Zijlstra 		 */
656391e43daSPeter Zijlstra 		diff = iter->rt_runtime - iter->rt_time;
657391e43daSPeter Zijlstra 		if (diff > 0) {
658391e43daSPeter Zijlstra 			diff = div_u64((u64)diff, weight);
659391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime + diff > rt_period)
660391e43daSPeter Zijlstra 				diff = rt_period - rt_rq->rt_runtime;
661391e43daSPeter Zijlstra 			iter->rt_runtime -= diff;
662391e43daSPeter Zijlstra 			rt_rq->rt_runtime += diff;
663391e43daSPeter Zijlstra 			more = 1;
664391e43daSPeter Zijlstra 			if (rt_rq->rt_runtime == rt_period) {
665391e43daSPeter Zijlstra 				raw_spin_unlock(&iter->rt_runtime_lock);
666391e43daSPeter Zijlstra 				break;
667391e43daSPeter Zijlstra 			}
668391e43daSPeter Zijlstra 		}
669391e43daSPeter Zijlstra next:
670391e43daSPeter Zijlstra 		raw_spin_unlock(&iter->rt_runtime_lock);
671391e43daSPeter Zijlstra 	}
672391e43daSPeter Zijlstra 	raw_spin_unlock(&rt_b->rt_runtime_lock);
673391e43daSPeter Zijlstra 
674391e43daSPeter Zijlstra 	return more;
675391e43daSPeter Zijlstra }
676391e43daSPeter Zijlstra 
677391e43daSPeter Zijlstra /*
678391e43daSPeter Zijlstra  * Ensure this RQ takes back all the runtime it lend to its neighbours.
679391e43daSPeter Zijlstra  */
680391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq)
681391e43daSPeter Zijlstra {
682391e43daSPeter Zijlstra 	struct root_domain *rd = rq->rd;
683391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
684391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
685391e43daSPeter Zijlstra 
686391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
687391e43daSPeter Zijlstra 		return;
688391e43daSPeter Zijlstra 
689391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
690391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
691391e43daSPeter Zijlstra 		s64 want;
692391e43daSPeter Zijlstra 		int i;
693391e43daSPeter Zijlstra 
694391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
695391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
696391e43daSPeter Zijlstra 		/*
697391e43daSPeter Zijlstra 		 * Either we're all inf and nobody needs to borrow, or we're
698391e43daSPeter Zijlstra 		 * already disabled and thus have nothing to do, or we have
699391e43daSPeter Zijlstra 		 * exactly the right amount of runtime to take out.
700391e43daSPeter Zijlstra 		 */
701391e43daSPeter Zijlstra 		if (rt_rq->rt_runtime == RUNTIME_INF ||
702391e43daSPeter Zijlstra 				rt_rq->rt_runtime == rt_b->rt_runtime)
703391e43daSPeter Zijlstra 			goto balanced;
704391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
705391e43daSPeter Zijlstra 
706391e43daSPeter Zijlstra 		/*
707391e43daSPeter Zijlstra 		 * Calculate the difference between what we started out with
708391e43daSPeter Zijlstra 		 * and what we current have, that's the amount of runtime
709391e43daSPeter Zijlstra 		 * we lend and now have to reclaim.
710391e43daSPeter Zijlstra 		 */
711391e43daSPeter Zijlstra 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
712391e43daSPeter Zijlstra 
713391e43daSPeter Zijlstra 		/*
714391e43daSPeter Zijlstra 		 * Greedy reclaim, take back as much as we can.
715391e43daSPeter Zijlstra 		 */
716391e43daSPeter Zijlstra 		for_each_cpu(i, rd->span) {
717391e43daSPeter Zijlstra 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
718391e43daSPeter Zijlstra 			s64 diff;
719391e43daSPeter Zijlstra 
720391e43daSPeter Zijlstra 			/*
721391e43daSPeter Zijlstra 			 * Can't reclaim from ourselves or disabled runqueues.
722391e43daSPeter Zijlstra 			 */
723391e43daSPeter Zijlstra 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
724391e43daSPeter Zijlstra 				continue;
725391e43daSPeter Zijlstra 
726391e43daSPeter Zijlstra 			raw_spin_lock(&iter->rt_runtime_lock);
727391e43daSPeter Zijlstra 			if (want > 0) {
728391e43daSPeter Zijlstra 				diff = min_t(s64, iter->rt_runtime, want);
729391e43daSPeter Zijlstra 				iter->rt_runtime -= diff;
730391e43daSPeter Zijlstra 				want -= diff;
731391e43daSPeter Zijlstra 			} else {
732391e43daSPeter Zijlstra 				iter->rt_runtime -= want;
733391e43daSPeter Zijlstra 				want -= want;
734391e43daSPeter Zijlstra 			}
735391e43daSPeter Zijlstra 			raw_spin_unlock(&iter->rt_runtime_lock);
736391e43daSPeter Zijlstra 
737391e43daSPeter Zijlstra 			if (!want)
738391e43daSPeter Zijlstra 				break;
739391e43daSPeter Zijlstra 		}
740391e43daSPeter Zijlstra 
741391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
742391e43daSPeter Zijlstra 		/*
743391e43daSPeter Zijlstra 		 * We cannot be left wanting - that would mean some runtime
744391e43daSPeter Zijlstra 		 * leaked out of the system.
745391e43daSPeter Zijlstra 		 */
746391e43daSPeter Zijlstra 		BUG_ON(want);
747391e43daSPeter Zijlstra balanced:
748391e43daSPeter Zijlstra 		/*
749391e43daSPeter Zijlstra 		 * Disable all the borrow logic by pretending we have inf
750391e43daSPeter Zijlstra 		 * runtime - in which case borrowing doesn't make sense.
751391e43daSPeter Zijlstra 		 */
752391e43daSPeter Zijlstra 		rt_rq->rt_runtime = RUNTIME_INF;
753a4c96ae3SPeter Boonstoppel 		rt_rq->rt_throttled = 0;
754391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
755391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
75699b62567SKirill Tkhai 
75799b62567SKirill Tkhai 		/* Make rt_rq available for pick_next_task() */
75899b62567SKirill Tkhai 		sched_rt_rq_enqueue(rt_rq);
759391e43daSPeter Zijlstra 	}
760391e43daSPeter Zijlstra }
761391e43daSPeter Zijlstra 
762391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq)
763391e43daSPeter Zijlstra {
764391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
765391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
766391e43daSPeter Zijlstra 
767391e43daSPeter Zijlstra 	if (unlikely(!scheduler_running))
768391e43daSPeter Zijlstra 		return;
769391e43daSPeter Zijlstra 
770391e43daSPeter Zijlstra 	/*
771391e43daSPeter Zijlstra 	 * Reset each runqueue's bandwidth settings
772391e43daSPeter Zijlstra 	 */
773391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, rq) {
774391e43daSPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
775391e43daSPeter Zijlstra 
776391e43daSPeter Zijlstra 		raw_spin_lock(&rt_b->rt_runtime_lock);
777391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
778391e43daSPeter Zijlstra 		rt_rq->rt_runtime = rt_b->rt_runtime;
779391e43daSPeter Zijlstra 		rt_rq->rt_time = 0;
780391e43daSPeter Zijlstra 		rt_rq->rt_throttled = 0;
781391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
782391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_b->rt_runtime_lock);
783391e43daSPeter Zijlstra 	}
784391e43daSPeter Zijlstra }
785391e43daSPeter Zijlstra 
786391e43daSPeter Zijlstra static int balance_runtime(struct rt_rq *rt_rq)
787391e43daSPeter Zijlstra {
788391e43daSPeter Zijlstra 	int more = 0;
789391e43daSPeter Zijlstra 
790391e43daSPeter Zijlstra 	if (!sched_feat(RT_RUNTIME_SHARE))
791391e43daSPeter Zijlstra 		return more;
792391e43daSPeter Zijlstra 
793391e43daSPeter Zijlstra 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
794391e43daSPeter Zijlstra 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
795391e43daSPeter Zijlstra 		more = do_balance_runtime(rt_rq);
796391e43daSPeter Zijlstra 		raw_spin_lock(&rt_rq->rt_runtime_lock);
797391e43daSPeter Zijlstra 	}
798391e43daSPeter Zijlstra 
799391e43daSPeter Zijlstra 	return more;
800391e43daSPeter Zijlstra }
801391e43daSPeter Zijlstra #else /* !CONFIG_SMP */
802391e43daSPeter Zijlstra static inline int balance_runtime(struct rt_rq *rt_rq)
803391e43daSPeter Zijlstra {
804391e43daSPeter Zijlstra 	return 0;
805391e43daSPeter Zijlstra }
806391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
807391e43daSPeter Zijlstra 
808391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
809391e43daSPeter Zijlstra {
81042c62a58SPeter Zijlstra 	int i, idle = 1, throttled = 0;
811391e43daSPeter Zijlstra 	const struct cpumask *span;
812391e43daSPeter Zijlstra 
813391e43daSPeter Zijlstra 	span = sched_rt_period_mask();
814e221d028SMike Galbraith #ifdef CONFIG_RT_GROUP_SCHED
815e221d028SMike Galbraith 	/*
816e221d028SMike Galbraith 	 * FIXME: isolated CPUs should really leave the root task group,
817e221d028SMike Galbraith 	 * whether they are isolcpus or were isolated via cpusets, lest
818e221d028SMike Galbraith 	 * the timer run on a CPU which does not service all runqueues,
819e221d028SMike Galbraith 	 * potentially leaving other CPUs indefinitely throttled.  If
820e221d028SMike Galbraith 	 * isolation is really required, the user will turn the throttle
821e221d028SMike Galbraith 	 * off to kill the perturbations it causes anyway.  Meanwhile,
822e221d028SMike Galbraith 	 * this maintains functionality for boot and/or troubleshooting.
823e221d028SMike Galbraith 	 */
824e221d028SMike Galbraith 	if (rt_b == &root_task_group.rt_bandwidth)
825e221d028SMike Galbraith 		span = cpu_online_mask;
826e221d028SMike Galbraith #endif
827391e43daSPeter Zijlstra 	for_each_cpu(i, span) {
828391e43daSPeter Zijlstra 		int enqueue = 0;
829391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
830391e43daSPeter Zijlstra 		struct rq *rq = rq_of_rt_rq(rt_rq);
831391e43daSPeter Zijlstra 
832391e43daSPeter Zijlstra 		raw_spin_lock(&rq->lock);
833391e43daSPeter Zijlstra 		if (rt_rq->rt_time) {
834391e43daSPeter Zijlstra 			u64 runtime;
835391e43daSPeter Zijlstra 
836391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
837391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled)
838391e43daSPeter Zijlstra 				balance_runtime(rt_rq);
839391e43daSPeter Zijlstra 			runtime = rt_rq->rt_runtime;
840391e43daSPeter Zijlstra 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
841391e43daSPeter Zijlstra 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
842391e43daSPeter Zijlstra 				rt_rq->rt_throttled = 0;
843391e43daSPeter Zijlstra 				enqueue = 1;
844391e43daSPeter Zijlstra 
845391e43daSPeter Zijlstra 				/*
8469edfbfedSPeter Zijlstra 				 * When we're idle and a woken (rt) task is
8479edfbfedSPeter Zijlstra 				 * throttled check_preempt_curr() will set
8489edfbfedSPeter Zijlstra 				 * skip_update and the time between the wakeup
8499edfbfedSPeter Zijlstra 				 * and this unthrottle will get accounted as
8509edfbfedSPeter Zijlstra 				 * 'runtime'.
851391e43daSPeter Zijlstra 				 */
852391e43daSPeter Zijlstra 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
8539edfbfedSPeter Zijlstra 					rq_clock_skip_update(rq, false);
854391e43daSPeter Zijlstra 			}
855391e43daSPeter Zijlstra 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
856391e43daSPeter Zijlstra 				idle = 0;
857391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
858391e43daSPeter Zijlstra 		} else if (rt_rq->rt_nr_running) {
859391e43daSPeter Zijlstra 			idle = 0;
860391e43daSPeter Zijlstra 			if (!rt_rq_throttled(rt_rq))
861391e43daSPeter Zijlstra 				enqueue = 1;
862391e43daSPeter Zijlstra 		}
86342c62a58SPeter Zijlstra 		if (rt_rq->rt_throttled)
86442c62a58SPeter Zijlstra 			throttled = 1;
865391e43daSPeter Zijlstra 
866391e43daSPeter Zijlstra 		if (enqueue)
867391e43daSPeter Zijlstra 			sched_rt_rq_enqueue(rt_rq);
868391e43daSPeter Zijlstra 		raw_spin_unlock(&rq->lock);
869391e43daSPeter Zijlstra 	}
870391e43daSPeter Zijlstra 
87142c62a58SPeter Zijlstra 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
87242c62a58SPeter Zijlstra 		return 1;
87342c62a58SPeter Zijlstra 
874391e43daSPeter Zijlstra 	return idle;
875391e43daSPeter Zijlstra }
876391e43daSPeter Zijlstra 
877391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se)
878391e43daSPeter Zijlstra {
879391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
880391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
881391e43daSPeter Zijlstra 
882391e43daSPeter Zijlstra 	if (rt_rq)
883391e43daSPeter Zijlstra 		return rt_rq->highest_prio.curr;
884391e43daSPeter Zijlstra #endif
885391e43daSPeter Zijlstra 
886391e43daSPeter Zijlstra 	return rt_task_of(rt_se)->prio;
887391e43daSPeter Zijlstra }
888391e43daSPeter Zijlstra 
889391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
890391e43daSPeter Zijlstra {
891391e43daSPeter Zijlstra 	u64 runtime = sched_rt_runtime(rt_rq);
892391e43daSPeter Zijlstra 
893391e43daSPeter Zijlstra 	if (rt_rq->rt_throttled)
894391e43daSPeter Zijlstra 		return rt_rq_throttled(rt_rq);
895391e43daSPeter Zijlstra 
8965b680fd6SShan Hai 	if (runtime >= sched_rt_period(rt_rq))
897391e43daSPeter Zijlstra 		return 0;
898391e43daSPeter Zijlstra 
899391e43daSPeter Zijlstra 	balance_runtime(rt_rq);
900391e43daSPeter Zijlstra 	runtime = sched_rt_runtime(rt_rq);
901391e43daSPeter Zijlstra 	if (runtime == RUNTIME_INF)
902391e43daSPeter Zijlstra 		return 0;
903391e43daSPeter Zijlstra 
904391e43daSPeter Zijlstra 	if (rt_rq->rt_time > runtime) {
9057abc63b1SPeter Zijlstra 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
9067abc63b1SPeter Zijlstra 
9077abc63b1SPeter Zijlstra 		/*
9087abc63b1SPeter Zijlstra 		 * Don't actually throttle groups that have no runtime assigned
9097abc63b1SPeter Zijlstra 		 * but accrue some time due to boosting.
9107abc63b1SPeter Zijlstra 		 */
9117abc63b1SPeter Zijlstra 		if (likely(rt_b->rt_runtime)) {
912391e43daSPeter Zijlstra 			rt_rq->rt_throttled = 1;
913c224815dSJohn Stultz 			printk_deferred_once("sched: RT throttling activated\n");
9147abc63b1SPeter Zijlstra 		} else {
9157abc63b1SPeter Zijlstra 			/*
9167abc63b1SPeter Zijlstra 			 * In case we did anyway, make it go away,
9177abc63b1SPeter Zijlstra 			 * replenishment is a joke, since it will replenish us
9187abc63b1SPeter Zijlstra 			 * with exactly 0 ns.
9197abc63b1SPeter Zijlstra 			 */
9207abc63b1SPeter Zijlstra 			rt_rq->rt_time = 0;
9217abc63b1SPeter Zijlstra 		}
9227abc63b1SPeter Zijlstra 
923391e43daSPeter Zijlstra 		if (rt_rq_throttled(rt_rq)) {
924391e43daSPeter Zijlstra 			sched_rt_rq_dequeue(rt_rq);
925391e43daSPeter Zijlstra 			return 1;
926391e43daSPeter Zijlstra 		}
927391e43daSPeter Zijlstra 	}
928391e43daSPeter Zijlstra 
929391e43daSPeter Zijlstra 	return 0;
930391e43daSPeter Zijlstra }
931391e43daSPeter Zijlstra 
932391e43daSPeter Zijlstra /*
933391e43daSPeter Zijlstra  * Update the current task's runtime statistics. Skip current tasks that
934391e43daSPeter Zijlstra  * are not in our scheduling class.
935391e43daSPeter Zijlstra  */
936391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq)
937391e43daSPeter Zijlstra {
938391e43daSPeter Zijlstra 	struct task_struct *curr = rq->curr;
939391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &curr->rt;
940391e43daSPeter Zijlstra 	u64 delta_exec;
941391e43daSPeter Zijlstra 
942391e43daSPeter Zijlstra 	if (curr->sched_class != &rt_sched_class)
943391e43daSPeter Zijlstra 		return;
944391e43daSPeter Zijlstra 
94578becc27SFrederic Weisbecker 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
946fc79e240SKirill Tkhai 	if (unlikely((s64)delta_exec <= 0))
947fc79e240SKirill Tkhai 		return;
948391e43daSPeter Zijlstra 
94942c62a58SPeter Zijlstra 	schedstat_set(curr->se.statistics.exec_max,
95042c62a58SPeter Zijlstra 		      max(curr->se.statistics.exec_max, delta_exec));
951391e43daSPeter Zijlstra 
952391e43daSPeter Zijlstra 	curr->se.sum_exec_runtime += delta_exec;
953391e43daSPeter Zijlstra 	account_group_exec_runtime(curr, delta_exec);
954391e43daSPeter Zijlstra 
95578becc27SFrederic Weisbecker 	curr->se.exec_start = rq_clock_task(rq);
956391e43daSPeter Zijlstra 	cpuacct_charge(curr, delta_exec);
957391e43daSPeter Zijlstra 
958391e43daSPeter Zijlstra 	sched_rt_avg_update(rq, delta_exec);
959391e43daSPeter Zijlstra 
960391e43daSPeter Zijlstra 	if (!rt_bandwidth_enabled())
961391e43daSPeter Zijlstra 		return;
962391e43daSPeter Zijlstra 
963391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
9640b07939cSGiedrius Rekasius 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
965391e43daSPeter Zijlstra 
966391e43daSPeter Zijlstra 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
967391e43daSPeter Zijlstra 			raw_spin_lock(&rt_rq->rt_runtime_lock);
968391e43daSPeter Zijlstra 			rt_rq->rt_time += delta_exec;
969391e43daSPeter Zijlstra 			if (sched_rt_runtime_exceeded(rt_rq))
9708875125eSKirill Tkhai 				resched_curr(rq);
971391e43daSPeter Zijlstra 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
972391e43daSPeter Zijlstra 		}
973391e43daSPeter Zijlstra 	}
974391e43daSPeter Zijlstra }
975391e43daSPeter Zijlstra 
976f4ebcbc0SKirill Tkhai static void
977f4ebcbc0SKirill Tkhai dequeue_top_rt_rq(struct rt_rq *rt_rq)
978f4ebcbc0SKirill Tkhai {
979f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
980f4ebcbc0SKirill Tkhai 
981f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
982f4ebcbc0SKirill Tkhai 
983f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
984f4ebcbc0SKirill Tkhai 		return;
985f4ebcbc0SKirill Tkhai 
986f4ebcbc0SKirill Tkhai 	BUG_ON(!rq->nr_running);
987f4ebcbc0SKirill Tkhai 
98872465447SKirill Tkhai 	sub_nr_running(rq, rt_rq->rt_nr_running);
989f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 0;
990f4ebcbc0SKirill Tkhai }
991f4ebcbc0SKirill Tkhai 
992f4ebcbc0SKirill Tkhai static void
993f4ebcbc0SKirill Tkhai enqueue_top_rt_rq(struct rt_rq *rt_rq)
994f4ebcbc0SKirill Tkhai {
995f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_rq(rt_rq);
996f4ebcbc0SKirill Tkhai 
997f4ebcbc0SKirill Tkhai 	BUG_ON(&rq->rt != rt_rq);
998f4ebcbc0SKirill Tkhai 
999f4ebcbc0SKirill Tkhai 	if (rt_rq->rt_queued)
1000f4ebcbc0SKirill Tkhai 		return;
1001f4ebcbc0SKirill Tkhai 	if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1002f4ebcbc0SKirill Tkhai 		return;
1003f4ebcbc0SKirill Tkhai 
100472465447SKirill Tkhai 	add_nr_running(rq, rt_rq->rt_nr_running);
1005f4ebcbc0SKirill Tkhai 	rt_rq->rt_queued = 1;
1006f4ebcbc0SKirill Tkhai }
1007f4ebcbc0SKirill Tkhai 
1008391e43daSPeter Zijlstra #if defined CONFIG_SMP
1009391e43daSPeter Zijlstra 
1010391e43daSPeter Zijlstra static void
1011391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1012391e43daSPeter Zijlstra {
1013391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1014391e43daSPeter Zijlstra 
1015757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1016757dfcaaSKirill Tkhai 	/*
1017757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1018757dfcaaSKirill Tkhai 	 */
1019757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1020757dfcaaSKirill Tkhai 		return;
1021757dfcaaSKirill Tkhai #endif
1022391e43daSPeter Zijlstra 	if (rq->online && prio < prev_prio)
1023391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1024391e43daSPeter Zijlstra }
1025391e43daSPeter Zijlstra 
1026391e43daSPeter Zijlstra static void
1027391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1028391e43daSPeter Zijlstra {
1029391e43daSPeter Zijlstra 	struct rq *rq = rq_of_rt_rq(rt_rq);
1030391e43daSPeter Zijlstra 
1031757dfcaaSKirill Tkhai #ifdef CONFIG_RT_GROUP_SCHED
1032757dfcaaSKirill Tkhai 	/*
1033757dfcaaSKirill Tkhai 	 * Change rq's cpupri only if rt_rq is the top queue.
1034757dfcaaSKirill Tkhai 	 */
1035757dfcaaSKirill Tkhai 	if (&rq->rt != rt_rq)
1036757dfcaaSKirill Tkhai 		return;
1037757dfcaaSKirill Tkhai #endif
1038391e43daSPeter Zijlstra 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1039391e43daSPeter Zijlstra 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1040391e43daSPeter Zijlstra }
1041391e43daSPeter Zijlstra 
1042391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1043391e43daSPeter Zijlstra 
1044391e43daSPeter Zijlstra static inline
1045391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1046391e43daSPeter Zijlstra static inline
1047391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1048391e43daSPeter Zijlstra 
1049391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1050391e43daSPeter Zijlstra 
1051391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1052391e43daSPeter Zijlstra static void
1053391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio)
1054391e43daSPeter Zijlstra {
1055391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1056391e43daSPeter Zijlstra 
1057391e43daSPeter Zijlstra 	if (prio < prev_prio)
1058391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = prio;
1059391e43daSPeter Zijlstra 
1060391e43daSPeter Zijlstra 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1061391e43daSPeter Zijlstra }
1062391e43daSPeter Zijlstra 
1063391e43daSPeter Zijlstra static void
1064391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio)
1065391e43daSPeter Zijlstra {
1066391e43daSPeter Zijlstra 	int prev_prio = rt_rq->highest_prio.curr;
1067391e43daSPeter Zijlstra 
1068391e43daSPeter Zijlstra 	if (rt_rq->rt_nr_running) {
1069391e43daSPeter Zijlstra 
1070391e43daSPeter Zijlstra 		WARN_ON(prio < prev_prio);
1071391e43daSPeter Zijlstra 
1072391e43daSPeter Zijlstra 		/*
1073391e43daSPeter Zijlstra 		 * This may have been our highest task, and therefore
1074391e43daSPeter Zijlstra 		 * we may have some recomputation to do
1075391e43daSPeter Zijlstra 		 */
1076391e43daSPeter Zijlstra 		if (prio == prev_prio) {
1077391e43daSPeter Zijlstra 			struct rt_prio_array *array = &rt_rq->active;
1078391e43daSPeter Zijlstra 
1079391e43daSPeter Zijlstra 			rt_rq->highest_prio.curr =
1080391e43daSPeter Zijlstra 				sched_find_first_bit(array->bitmap);
1081391e43daSPeter Zijlstra 		}
1082391e43daSPeter Zijlstra 
1083391e43daSPeter Zijlstra 	} else
1084391e43daSPeter Zijlstra 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1085391e43daSPeter Zijlstra 
1086391e43daSPeter Zijlstra 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1087391e43daSPeter Zijlstra }
1088391e43daSPeter Zijlstra 
1089391e43daSPeter Zijlstra #else
1090391e43daSPeter Zijlstra 
1091391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1092391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1093391e43daSPeter Zijlstra 
1094391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1095391e43daSPeter Zijlstra 
1096391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
1097391e43daSPeter Zijlstra 
1098391e43daSPeter Zijlstra static void
1099391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1100391e43daSPeter Zijlstra {
1101391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1102391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted++;
1103391e43daSPeter Zijlstra 
1104391e43daSPeter Zijlstra 	if (rt_rq->tg)
1105391e43daSPeter Zijlstra 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1106391e43daSPeter Zijlstra }
1107391e43daSPeter Zijlstra 
1108391e43daSPeter Zijlstra static void
1109391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1110391e43daSPeter Zijlstra {
1111391e43daSPeter Zijlstra 	if (rt_se_boosted(rt_se))
1112391e43daSPeter Zijlstra 		rt_rq->rt_nr_boosted--;
1113391e43daSPeter Zijlstra 
1114391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1115391e43daSPeter Zijlstra }
1116391e43daSPeter Zijlstra 
1117391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */
1118391e43daSPeter Zijlstra 
1119391e43daSPeter Zijlstra static void
1120391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1121391e43daSPeter Zijlstra {
1122391e43daSPeter Zijlstra 	start_rt_bandwidth(&def_rt_bandwidth);
1123391e43daSPeter Zijlstra }
1124391e43daSPeter Zijlstra 
1125391e43daSPeter Zijlstra static inline
1126391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1127391e43daSPeter Zijlstra 
1128391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */
1129391e43daSPeter Zijlstra 
1130391e43daSPeter Zijlstra static inline
113122abdef3SKirill Tkhai unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
113222abdef3SKirill Tkhai {
113322abdef3SKirill Tkhai 	struct rt_rq *group_rq = group_rt_rq(rt_se);
113422abdef3SKirill Tkhai 
113522abdef3SKirill Tkhai 	if (group_rq)
113622abdef3SKirill Tkhai 		return group_rq->rt_nr_running;
113722abdef3SKirill Tkhai 	else
113822abdef3SKirill Tkhai 		return 1;
113922abdef3SKirill Tkhai }
114022abdef3SKirill Tkhai 
114122abdef3SKirill Tkhai static inline
1142391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1143391e43daSPeter Zijlstra {
1144391e43daSPeter Zijlstra 	int prio = rt_se_prio(rt_se);
1145391e43daSPeter Zijlstra 
1146391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(prio));
114722abdef3SKirill Tkhai 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1148391e43daSPeter Zijlstra 
1149391e43daSPeter Zijlstra 	inc_rt_prio(rt_rq, prio);
1150391e43daSPeter Zijlstra 	inc_rt_migration(rt_se, rt_rq);
1151391e43daSPeter Zijlstra 	inc_rt_group(rt_se, rt_rq);
1152391e43daSPeter Zijlstra }
1153391e43daSPeter Zijlstra 
1154391e43daSPeter Zijlstra static inline
1155391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1156391e43daSPeter Zijlstra {
1157391e43daSPeter Zijlstra 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1158391e43daSPeter Zijlstra 	WARN_ON(!rt_rq->rt_nr_running);
115922abdef3SKirill Tkhai 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1160391e43daSPeter Zijlstra 
1161391e43daSPeter Zijlstra 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1162391e43daSPeter Zijlstra 	dec_rt_migration(rt_se, rt_rq);
1163391e43daSPeter Zijlstra 	dec_rt_group(rt_se, rt_rq);
1164391e43daSPeter Zijlstra }
1165391e43daSPeter Zijlstra 
1166391e43daSPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1167391e43daSPeter Zijlstra {
1168391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1169391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1170391e43daSPeter Zijlstra 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1171391e43daSPeter Zijlstra 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1172391e43daSPeter Zijlstra 
1173391e43daSPeter Zijlstra 	/*
1174391e43daSPeter Zijlstra 	 * Don't enqueue the group if its throttled, or when empty.
1175391e43daSPeter Zijlstra 	 * The latter is a consequence of the former when a child group
1176391e43daSPeter Zijlstra 	 * get throttled and the current group doesn't have any other
1177391e43daSPeter Zijlstra 	 * active members.
1178391e43daSPeter Zijlstra 	 */
1179391e43daSPeter Zijlstra 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1180391e43daSPeter Zijlstra 		return;
1181391e43daSPeter Zijlstra 
1182391e43daSPeter Zijlstra 	if (head)
1183391e43daSPeter Zijlstra 		list_add(&rt_se->run_list, queue);
1184391e43daSPeter Zijlstra 	else
1185391e43daSPeter Zijlstra 		list_add_tail(&rt_se->run_list, queue);
1186391e43daSPeter Zijlstra 	__set_bit(rt_se_prio(rt_se), array->bitmap);
1187391e43daSPeter Zijlstra 
1188391e43daSPeter Zijlstra 	inc_rt_tasks(rt_se, rt_rq);
1189391e43daSPeter Zijlstra }
1190391e43daSPeter Zijlstra 
1191391e43daSPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1192391e43daSPeter Zijlstra {
1193391e43daSPeter Zijlstra 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1194391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1195391e43daSPeter Zijlstra 
1196391e43daSPeter Zijlstra 	list_del_init(&rt_se->run_list);
1197391e43daSPeter Zijlstra 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1198391e43daSPeter Zijlstra 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1199391e43daSPeter Zijlstra 
1200391e43daSPeter Zijlstra 	dec_rt_tasks(rt_se, rt_rq);
1201391e43daSPeter Zijlstra }
1202391e43daSPeter Zijlstra 
1203391e43daSPeter Zijlstra /*
1204391e43daSPeter Zijlstra  * Because the prio of an upper entry depends on the lower
1205391e43daSPeter Zijlstra  * entries, we must remove entries top - down.
1206391e43daSPeter Zijlstra  */
1207391e43daSPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1208391e43daSPeter Zijlstra {
1209391e43daSPeter Zijlstra 	struct sched_rt_entity *back = NULL;
1210391e43daSPeter Zijlstra 
1211391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1212391e43daSPeter Zijlstra 		rt_se->back = back;
1213391e43daSPeter Zijlstra 		back = rt_se;
1214391e43daSPeter Zijlstra 	}
1215391e43daSPeter Zijlstra 
1216f4ebcbc0SKirill Tkhai 	dequeue_top_rt_rq(rt_rq_of_se(back));
1217f4ebcbc0SKirill Tkhai 
1218391e43daSPeter Zijlstra 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1219391e43daSPeter Zijlstra 		if (on_rt_rq(rt_se))
1220391e43daSPeter Zijlstra 			__dequeue_rt_entity(rt_se);
1221391e43daSPeter Zijlstra 	}
1222391e43daSPeter Zijlstra }
1223391e43daSPeter Zijlstra 
1224391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1225391e43daSPeter Zijlstra {
1226f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1227f4ebcbc0SKirill Tkhai 
1228391e43daSPeter Zijlstra 	dequeue_rt_stack(rt_se);
1229391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se)
1230391e43daSPeter Zijlstra 		__enqueue_rt_entity(rt_se, head);
1231f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1232391e43daSPeter Zijlstra }
1233391e43daSPeter Zijlstra 
1234391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1235391e43daSPeter Zijlstra {
1236f4ebcbc0SKirill Tkhai 	struct rq *rq = rq_of_rt_se(rt_se);
1237f4ebcbc0SKirill Tkhai 
1238391e43daSPeter Zijlstra 	dequeue_rt_stack(rt_se);
1239391e43daSPeter Zijlstra 
1240391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1241391e43daSPeter Zijlstra 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1242391e43daSPeter Zijlstra 
1243391e43daSPeter Zijlstra 		if (rt_rq && rt_rq->rt_nr_running)
1244391e43daSPeter Zijlstra 			__enqueue_rt_entity(rt_se, false);
1245391e43daSPeter Zijlstra 	}
1246f4ebcbc0SKirill Tkhai 	enqueue_top_rt_rq(&rq->rt);
1247391e43daSPeter Zijlstra }
1248391e43daSPeter Zijlstra 
1249391e43daSPeter Zijlstra /*
1250391e43daSPeter Zijlstra  * Adding/removing a task to/from a priority array:
1251391e43daSPeter Zijlstra  */
1252391e43daSPeter Zijlstra static void
1253391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1254391e43daSPeter Zijlstra {
1255391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1256391e43daSPeter Zijlstra 
1257391e43daSPeter Zijlstra 	if (flags & ENQUEUE_WAKEUP)
1258391e43daSPeter Zijlstra 		rt_se->timeout = 0;
1259391e43daSPeter Zijlstra 
1260391e43daSPeter Zijlstra 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1261391e43daSPeter Zijlstra 
126229baa747SPeter Zijlstra 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1263391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1264391e43daSPeter Zijlstra }
1265391e43daSPeter Zijlstra 
1266391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1267391e43daSPeter Zijlstra {
1268391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1269391e43daSPeter Zijlstra 
1270391e43daSPeter Zijlstra 	update_curr_rt(rq);
1271391e43daSPeter Zijlstra 	dequeue_rt_entity(rt_se);
1272391e43daSPeter Zijlstra 
1273391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1274391e43daSPeter Zijlstra }
1275391e43daSPeter Zijlstra 
1276391e43daSPeter Zijlstra /*
1277391e43daSPeter Zijlstra  * Put task to the head or the end of the run list without the overhead of
1278391e43daSPeter Zijlstra  * dequeue followed by enqueue.
1279391e43daSPeter Zijlstra  */
1280391e43daSPeter Zijlstra static void
1281391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1282391e43daSPeter Zijlstra {
1283391e43daSPeter Zijlstra 	if (on_rt_rq(rt_se)) {
1284391e43daSPeter Zijlstra 		struct rt_prio_array *array = &rt_rq->active;
1285391e43daSPeter Zijlstra 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1286391e43daSPeter Zijlstra 
1287391e43daSPeter Zijlstra 		if (head)
1288391e43daSPeter Zijlstra 			list_move(&rt_se->run_list, queue);
1289391e43daSPeter Zijlstra 		else
1290391e43daSPeter Zijlstra 			list_move_tail(&rt_se->run_list, queue);
1291391e43daSPeter Zijlstra 	}
1292391e43daSPeter Zijlstra }
1293391e43daSPeter Zijlstra 
1294391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1295391e43daSPeter Zijlstra {
1296391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se = &p->rt;
1297391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
1298391e43daSPeter Zijlstra 
1299391e43daSPeter Zijlstra 	for_each_sched_rt_entity(rt_se) {
1300391e43daSPeter Zijlstra 		rt_rq = rt_rq_of_se(rt_se);
1301391e43daSPeter Zijlstra 		requeue_rt_entity(rt_rq, rt_se, head);
1302391e43daSPeter Zijlstra 	}
1303391e43daSPeter Zijlstra }
1304391e43daSPeter Zijlstra 
1305391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq)
1306391e43daSPeter Zijlstra {
1307391e43daSPeter Zijlstra 	requeue_task_rt(rq, rq->curr, 0);
1308391e43daSPeter Zijlstra }
1309391e43daSPeter Zijlstra 
1310391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1311391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task);
1312391e43daSPeter Zijlstra 
1313391e43daSPeter Zijlstra static int
1314ac66f547SPeter Zijlstra select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1315391e43daSPeter Zijlstra {
1316391e43daSPeter Zijlstra 	struct task_struct *curr;
1317391e43daSPeter Zijlstra 	struct rq *rq;
1318391e43daSPeter Zijlstra 
1319391e43daSPeter Zijlstra 	/* For anything but wake ups, just return the task_cpu */
1320391e43daSPeter Zijlstra 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1321391e43daSPeter Zijlstra 		goto out;
1322391e43daSPeter Zijlstra 
1323391e43daSPeter Zijlstra 	rq = cpu_rq(cpu);
1324391e43daSPeter Zijlstra 
1325391e43daSPeter Zijlstra 	rcu_read_lock();
1326391e43daSPeter Zijlstra 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1327391e43daSPeter Zijlstra 
1328391e43daSPeter Zijlstra 	/*
1329391e43daSPeter Zijlstra 	 * If the current task on @p's runqueue is an RT task, then
1330391e43daSPeter Zijlstra 	 * try to see if we can wake this RT task up on another
1331391e43daSPeter Zijlstra 	 * runqueue. Otherwise simply start this RT task
1332391e43daSPeter Zijlstra 	 * on its current runqueue.
1333391e43daSPeter Zijlstra 	 *
1334391e43daSPeter Zijlstra 	 * We want to avoid overloading runqueues. If the woken
1335391e43daSPeter Zijlstra 	 * task is a higher priority, then it will stay on this CPU
1336391e43daSPeter Zijlstra 	 * and the lower prio task should be moved to another CPU.
1337391e43daSPeter Zijlstra 	 * Even though this will probably make the lower prio task
1338391e43daSPeter Zijlstra 	 * lose its cache, we do not want to bounce a higher task
1339391e43daSPeter Zijlstra 	 * around just because it gave up its CPU, perhaps for a
1340391e43daSPeter Zijlstra 	 * lock?
1341391e43daSPeter Zijlstra 	 *
1342391e43daSPeter Zijlstra 	 * For equal prio tasks, we just let the scheduler sort it out.
1343391e43daSPeter Zijlstra 	 *
1344391e43daSPeter Zijlstra 	 * Otherwise, just let it ride on the affined RQ and the
1345391e43daSPeter Zijlstra 	 * post-schedule router will push the preempted task away
1346391e43daSPeter Zijlstra 	 *
1347391e43daSPeter Zijlstra 	 * This test is optimistic, if we get it wrong the load-balancer
1348391e43daSPeter Zijlstra 	 * will have to sort it out.
1349391e43daSPeter Zijlstra 	 */
1350391e43daSPeter Zijlstra 	if (curr && unlikely(rt_task(curr)) &&
135129baa747SPeter Zijlstra 	    (curr->nr_cpus_allowed < 2 ||
13526bfa687cSShawn Bohrer 	     curr->prio <= p->prio)) {
1353391e43daSPeter Zijlstra 		int target = find_lowest_rq(p);
1354391e43daSPeter Zijlstra 
135580e3d87bSTim Chen 		/*
135680e3d87bSTim Chen 		 * Don't bother moving it if the destination CPU is
135780e3d87bSTim Chen 		 * not running a lower priority task.
135880e3d87bSTim Chen 		 */
135980e3d87bSTim Chen 		if (target != -1 &&
136080e3d87bSTim Chen 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1361391e43daSPeter Zijlstra 			cpu = target;
1362391e43daSPeter Zijlstra 	}
1363391e43daSPeter Zijlstra 	rcu_read_unlock();
1364391e43daSPeter Zijlstra 
1365391e43daSPeter Zijlstra out:
1366391e43daSPeter Zijlstra 	return cpu;
1367391e43daSPeter Zijlstra }
1368391e43daSPeter Zijlstra 
1369391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1370391e43daSPeter Zijlstra {
1371308a623aSWanpeng Li 	/*
1372308a623aSWanpeng Li 	 * Current can't be migrated, useless to reschedule,
1373308a623aSWanpeng Li 	 * let's hope p can move out.
1374308a623aSWanpeng Li 	 */
1375308a623aSWanpeng Li 	if (rq->curr->nr_cpus_allowed == 1 ||
1376308a623aSWanpeng Li 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1377391e43daSPeter Zijlstra 		return;
1378391e43daSPeter Zijlstra 
1379308a623aSWanpeng Li 	/*
1380308a623aSWanpeng Li 	 * p is migratable, so let's not schedule it and
1381308a623aSWanpeng Li 	 * see if it is pushed or pulled somewhere else.
1382308a623aSWanpeng Li 	 */
138329baa747SPeter Zijlstra 	if (p->nr_cpus_allowed != 1
1384391e43daSPeter Zijlstra 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1385391e43daSPeter Zijlstra 		return;
1386391e43daSPeter Zijlstra 
1387391e43daSPeter Zijlstra 	/*
1388391e43daSPeter Zijlstra 	 * There appears to be other cpus that can accept
1389391e43daSPeter Zijlstra 	 * current and none to run 'p', so lets reschedule
1390391e43daSPeter Zijlstra 	 * to try and push current away:
1391391e43daSPeter Zijlstra 	 */
1392391e43daSPeter Zijlstra 	requeue_task_rt(rq, p, 1);
13938875125eSKirill Tkhai 	resched_curr(rq);
1394391e43daSPeter Zijlstra }
1395391e43daSPeter Zijlstra 
1396391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
1397391e43daSPeter Zijlstra 
1398391e43daSPeter Zijlstra /*
1399391e43daSPeter Zijlstra  * Preempt the current task with a newly woken task if needed:
1400391e43daSPeter Zijlstra  */
1401391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1402391e43daSPeter Zijlstra {
1403391e43daSPeter Zijlstra 	if (p->prio < rq->curr->prio) {
14048875125eSKirill Tkhai 		resched_curr(rq);
1405391e43daSPeter Zijlstra 		return;
1406391e43daSPeter Zijlstra 	}
1407391e43daSPeter Zijlstra 
1408391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1409391e43daSPeter Zijlstra 	/*
1410391e43daSPeter Zijlstra 	 * If:
1411391e43daSPeter Zijlstra 	 *
1412391e43daSPeter Zijlstra 	 * - the newly woken task is of equal priority to the current task
1413391e43daSPeter Zijlstra 	 * - the newly woken task is non-migratable while current is migratable
1414391e43daSPeter Zijlstra 	 * - current will be preempted on the next reschedule
1415391e43daSPeter Zijlstra 	 *
1416391e43daSPeter Zijlstra 	 * we should check to see if current can readily move to a different
1417391e43daSPeter Zijlstra 	 * cpu.  If so, we will reschedule to allow the push logic to try
1418391e43daSPeter Zijlstra 	 * to move current somewhere else, making room for our non-migratable
1419391e43daSPeter Zijlstra 	 * task.
1420391e43daSPeter Zijlstra 	 */
1421391e43daSPeter Zijlstra 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1422391e43daSPeter Zijlstra 		check_preempt_equal_prio(rq, p);
1423391e43daSPeter Zijlstra #endif
1424391e43daSPeter Zijlstra }
1425391e43daSPeter Zijlstra 
1426391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1427391e43daSPeter Zijlstra 						   struct rt_rq *rt_rq)
1428391e43daSPeter Zijlstra {
1429391e43daSPeter Zijlstra 	struct rt_prio_array *array = &rt_rq->active;
1430391e43daSPeter Zijlstra 	struct sched_rt_entity *next = NULL;
1431391e43daSPeter Zijlstra 	struct list_head *queue;
1432391e43daSPeter Zijlstra 	int idx;
1433391e43daSPeter Zijlstra 
1434391e43daSPeter Zijlstra 	idx = sched_find_first_bit(array->bitmap);
1435391e43daSPeter Zijlstra 	BUG_ON(idx >= MAX_RT_PRIO);
1436391e43daSPeter Zijlstra 
1437391e43daSPeter Zijlstra 	queue = array->queue + idx;
1438391e43daSPeter Zijlstra 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1439391e43daSPeter Zijlstra 
1440391e43daSPeter Zijlstra 	return next;
1441391e43daSPeter Zijlstra }
1442391e43daSPeter Zijlstra 
1443391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq)
1444391e43daSPeter Zijlstra {
1445391e43daSPeter Zijlstra 	struct sched_rt_entity *rt_se;
1446391e43daSPeter Zijlstra 	struct task_struct *p;
1447606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq  = &rq->rt;
1448391e43daSPeter Zijlstra 
1449391e43daSPeter Zijlstra 	do {
1450391e43daSPeter Zijlstra 		rt_se = pick_next_rt_entity(rq, rt_rq);
1451391e43daSPeter Zijlstra 		BUG_ON(!rt_se);
1452391e43daSPeter Zijlstra 		rt_rq = group_rt_rq(rt_se);
1453391e43daSPeter Zijlstra 	} while (rt_rq);
1454391e43daSPeter Zijlstra 
1455391e43daSPeter Zijlstra 	p = rt_task_of(rt_se);
145678becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
1457391e43daSPeter Zijlstra 
1458391e43daSPeter Zijlstra 	return p;
1459391e43daSPeter Zijlstra }
1460391e43daSPeter Zijlstra 
1461606dba2eSPeter Zijlstra static struct task_struct *
1462606dba2eSPeter Zijlstra pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1463391e43daSPeter Zijlstra {
1464606dba2eSPeter Zijlstra 	struct task_struct *p;
1465606dba2eSPeter Zijlstra 	struct rt_rq *rt_rq = &rq->rt;
1466606dba2eSPeter Zijlstra 
146737e117c0SPeter Zijlstra 	if (need_pull_rt_task(rq, prev)) {
146838033c37SPeter Zijlstra 		pull_rt_task(rq);
146937e117c0SPeter Zijlstra 		/*
147037e117c0SPeter Zijlstra 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1471a1d9a323SKirill Tkhai 		 * means a dl or stop task can slip in, in which case we need
1472a1d9a323SKirill Tkhai 		 * to re-start task selection.
147337e117c0SPeter Zijlstra 		 */
1474da0c1e65SKirill Tkhai 		if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1475a1d9a323SKirill Tkhai 			     rq->dl.dl_nr_running))
147637e117c0SPeter Zijlstra 			return RETRY_TASK;
147737e117c0SPeter Zijlstra 	}
147838033c37SPeter Zijlstra 
1479734ff2a7SKirill Tkhai 	/*
1480734ff2a7SKirill Tkhai 	 * We may dequeue prev's rt_rq in put_prev_task().
1481734ff2a7SKirill Tkhai 	 * So, we update time before rt_nr_running check.
1482734ff2a7SKirill Tkhai 	 */
1483734ff2a7SKirill Tkhai 	if (prev->sched_class == &rt_sched_class)
1484734ff2a7SKirill Tkhai 		update_curr_rt(rq);
1485734ff2a7SKirill Tkhai 
1486f4ebcbc0SKirill Tkhai 	if (!rt_rq->rt_queued)
1487606dba2eSPeter Zijlstra 		return NULL;
1488606dba2eSPeter Zijlstra 
14893f1d2a31SPeter Zijlstra 	put_prev_task(rq, prev);
1490606dba2eSPeter Zijlstra 
1491606dba2eSPeter Zijlstra 	p = _pick_next_task_rt(rq);
1492391e43daSPeter Zijlstra 
1493391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
1494391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
1495391e43daSPeter Zijlstra 
1496dc877341SPeter Zijlstra 	set_post_schedule(rq);
1497391e43daSPeter Zijlstra 
1498391e43daSPeter Zijlstra 	return p;
1499391e43daSPeter Zijlstra }
1500391e43daSPeter Zijlstra 
1501391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1502391e43daSPeter Zijlstra {
1503391e43daSPeter Zijlstra 	update_curr_rt(rq);
1504391e43daSPeter Zijlstra 
1505391e43daSPeter Zijlstra 	/*
1506391e43daSPeter Zijlstra 	 * The previous task needs to be made eligible for pushing
1507391e43daSPeter Zijlstra 	 * if it is still active
1508391e43daSPeter Zijlstra 	 */
150929baa747SPeter Zijlstra 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1510391e43daSPeter Zijlstra 		enqueue_pushable_task(rq, p);
1511391e43daSPeter Zijlstra }
1512391e43daSPeter Zijlstra 
1513391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1514391e43daSPeter Zijlstra 
1515391e43daSPeter Zijlstra /* Only try algorithms three times */
1516391e43daSPeter Zijlstra #define RT_MAX_TRIES 3
1517391e43daSPeter Zijlstra 
1518391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1519391e43daSPeter Zijlstra {
1520391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
152160334cafSKirill Tkhai 	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1522391e43daSPeter Zijlstra 		return 1;
1523391e43daSPeter Zijlstra 	return 0;
1524391e43daSPeter Zijlstra }
1525391e43daSPeter Zijlstra 
1526e23ee747SKirill Tkhai /*
1527e23ee747SKirill Tkhai  * Return the highest pushable rq's task, which is suitable to be executed
1528e23ee747SKirill Tkhai  * on the cpu, NULL otherwise
1529e23ee747SKirill Tkhai  */
1530e23ee747SKirill Tkhai static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1531391e43daSPeter Zijlstra {
1532e23ee747SKirill Tkhai 	struct plist_head *head = &rq->rt.pushable_tasks;
1533391e43daSPeter Zijlstra 	struct task_struct *p;
1534391e43daSPeter Zijlstra 
1535e23ee747SKirill Tkhai 	if (!has_pushable_tasks(rq))
1536e23ee747SKirill Tkhai 		return NULL;
1537391e43daSPeter Zijlstra 
1538e23ee747SKirill Tkhai 	plist_for_each_entry(p, head, pushable_tasks) {
1539e23ee747SKirill Tkhai 		if (pick_rt_task(rq, p, cpu))
1540e23ee747SKirill Tkhai 			return p;
1541391e43daSPeter Zijlstra 	}
1542391e43daSPeter Zijlstra 
1543e23ee747SKirill Tkhai 	return NULL;
1544391e43daSPeter Zijlstra }
1545391e43daSPeter Zijlstra 
1546391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1547391e43daSPeter Zijlstra 
1548391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task)
1549391e43daSPeter Zijlstra {
1550391e43daSPeter Zijlstra 	struct sched_domain *sd;
15514ba29684SChristoph Lameter 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1552391e43daSPeter Zijlstra 	int this_cpu = smp_processor_id();
1553391e43daSPeter Zijlstra 	int cpu      = task_cpu(task);
1554391e43daSPeter Zijlstra 
1555391e43daSPeter Zijlstra 	/* Make sure the mask is initialized first */
1556391e43daSPeter Zijlstra 	if (unlikely(!lowest_mask))
1557391e43daSPeter Zijlstra 		return -1;
1558391e43daSPeter Zijlstra 
155929baa747SPeter Zijlstra 	if (task->nr_cpus_allowed == 1)
1560391e43daSPeter Zijlstra 		return -1; /* No other targets possible */
1561391e43daSPeter Zijlstra 
1562391e43daSPeter Zijlstra 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1563391e43daSPeter Zijlstra 		return -1; /* No targets found */
1564391e43daSPeter Zijlstra 
1565391e43daSPeter Zijlstra 	/*
1566391e43daSPeter Zijlstra 	 * At this point we have built a mask of cpus representing the
1567391e43daSPeter Zijlstra 	 * lowest priority tasks in the system.  Now we want to elect
1568391e43daSPeter Zijlstra 	 * the best one based on our affinity and topology.
1569391e43daSPeter Zijlstra 	 *
1570391e43daSPeter Zijlstra 	 * We prioritize the last cpu that the task executed on since
1571391e43daSPeter Zijlstra 	 * it is most likely cache-hot in that location.
1572391e43daSPeter Zijlstra 	 */
1573391e43daSPeter Zijlstra 	if (cpumask_test_cpu(cpu, lowest_mask))
1574391e43daSPeter Zijlstra 		return cpu;
1575391e43daSPeter Zijlstra 
1576391e43daSPeter Zijlstra 	/*
1577391e43daSPeter Zijlstra 	 * Otherwise, we consult the sched_domains span maps to figure
1578391e43daSPeter Zijlstra 	 * out which cpu is logically closest to our hot cache data.
1579391e43daSPeter Zijlstra 	 */
1580391e43daSPeter Zijlstra 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1581391e43daSPeter Zijlstra 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1582391e43daSPeter Zijlstra 
1583391e43daSPeter Zijlstra 	rcu_read_lock();
1584391e43daSPeter Zijlstra 	for_each_domain(cpu, sd) {
1585391e43daSPeter Zijlstra 		if (sd->flags & SD_WAKE_AFFINE) {
1586391e43daSPeter Zijlstra 			int best_cpu;
1587391e43daSPeter Zijlstra 
1588391e43daSPeter Zijlstra 			/*
1589391e43daSPeter Zijlstra 			 * "this_cpu" is cheaper to preempt than a
1590391e43daSPeter Zijlstra 			 * remote processor.
1591391e43daSPeter Zijlstra 			 */
1592391e43daSPeter Zijlstra 			if (this_cpu != -1 &&
1593391e43daSPeter Zijlstra 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1594391e43daSPeter Zijlstra 				rcu_read_unlock();
1595391e43daSPeter Zijlstra 				return this_cpu;
1596391e43daSPeter Zijlstra 			}
1597391e43daSPeter Zijlstra 
1598391e43daSPeter Zijlstra 			best_cpu = cpumask_first_and(lowest_mask,
1599391e43daSPeter Zijlstra 						     sched_domain_span(sd));
1600391e43daSPeter Zijlstra 			if (best_cpu < nr_cpu_ids) {
1601391e43daSPeter Zijlstra 				rcu_read_unlock();
1602391e43daSPeter Zijlstra 				return best_cpu;
1603391e43daSPeter Zijlstra 			}
1604391e43daSPeter Zijlstra 		}
1605391e43daSPeter Zijlstra 	}
1606391e43daSPeter Zijlstra 	rcu_read_unlock();
1607391e43daSPeter Zijlstra 
1608391e43daSPeter Zijlstra 	/*
1609391e43daSPeter Zijlstra 	 * And finally, if there were no matches within the domains
1610391e43daSPeter Zijlstra 	 * just give the caller *something* to work with from the compatible
1611391e43daSPeter Zijlstra 	 * locations.
1612391e43daSPeter Zijlstra 	 */
1613391e43daSPeter Zijlstra 	if (this_cpu != -1)
1614391e43daSPeter Zijlstra 		return this_cpu;
1615391e43daSPeter Zijlstra 
1616391e43daSPeter Zijlstra 	cpu = cpumask_any(lowest_mask);
1617391e43daSPeter Zijlstra 	if (cpu < nr_cpu_ids)
1618391e43daSPeter Zijlstra 		return cpu;
1619391e43daSPeter Zijlstra 	return -1;
1620391e43daSPeter Zijlstra }
1621391e43daSPeter Zijlstra 
1622391e43daSPeter Zijlstra /* Will lock the rq it finds */
1623391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1624391e43daSPeter Zijlstra {
1625391e43daSPeter Zijlstra 	struct rq *lowest_rq = NULL;
1626391e43daSPeter Zijlstra 	int tries;
1627391e43daSPeter Zijlstra 	int cpu;
1628391e43daSPeter Zijlstra 
1629391e43daSPeter Zijlstra 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1630391e43daSPeter Zijlstra 		cpu = find_lowest_rq(task);
1631391e43daSPeter Zijlstra 
1632391e43daSPeter Zijlstra 		if ((cpu == -1) || (cpu == rq->cpu))
1633391e43daSPeter Zijlstra 			break;
1634391e43daSPeter Zijlstra 
1635391e43daSPeter Zijlstra 		lowest_rq = cpu_rq(cpu);
1636391e43daSPeter Zijlstra 
163780e3d87bSTim Chen 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
163880e3d87bSTim Chen 			/*
163980e3d87bSTim Chen 			 * Target rq has tasks of equal or higher priority,
164080e3d87bSTim Chen 			 * retrying does not release any lock and is unlikely
164180e3d87bSTim Chen 			 * to yield a different result.
164280e3d87bSTim Chen 			 */
164380e3d87bSTim Chen 			lowest_rq = NULL;
164480e3d87bSTim Chen 			break;
164580e3d87bSTim Chen 		}
164680e3d87bSTim Chen 
1647391e43daSPeter Zijlstra 		/* if the prio of this runqueue changed, try again */
1648391e43daSPeter Zijlstra 		if (double_lock_balance(rq, lowest_rq)) {
1649391e43daSPeter Zijlstra 			/*
1650391e43daSPeter Zijlstra 			 * We had to unlock the run queue. In
1651391e43daSPeter Zijlstra 			 * the mean time, task could have
1652391e43daSPeter Zijlstra 			 * migrated already or had its affinity changed.
1653391e43daSPeter Zijlstra 			 * Also make sure that it wasn't scheduled on its rq.
1654391e43daSPeter Zijlstra 			 */
1655391e43daSPeter Zijlstra 			if (unlikely(task_rq(task) != rq ||
1656391e43daSPeter Zijlstra 				     !cpumask_test_cpu(lowest_rq->cpu,
1657391e43daSPeter Zijlstra 						       tsk_cpus_allowed(task)) ||
1658391e43daSPeter Zijlstra 				     task_running(rq, task) ||
1659da0c1e65SKirill Tkhai 				     !task_on_rq_queued(task))) {
1660391e43daSPeter Zijlstra 
16617f1b4393SPeter Zijlstra 				double_unlock_balance(rq, lowest_rq);
1662391e43daSPeter Zijlstra 				lowest_rq = NULL;
1663391e43daSPeter Zijlstra 				break;
1664391e43daSPeter Zijlstra 			}
1665391e43daSPeter Zijlstra 		}
1666391e43daSPeter Zijlstra 
1667391e43daSPeter Zijlstra 		/* If this rq is still suitable use it. */
1668391e43daSPeter Zijlstra 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1669391e43daSPeter Zijlstra 			break;
1670391e43daSPeter Zijlstra 
1671391e43daSPeter Zijlstra 		/* try again */
1672391e43daSPeter Zijlstra 		double_unlock_balance(rq, lowest_rq);
1673391e43daSPeter Zijlstra 		lowest_rq = NULL;
1674391e43daSPeter Zijlstra 	}
1675391e43daSPeter Zijlstra 
1676391e43daSPeter Zijlstra 	return lowest_rq;
1677391e43daSPeter Zijlstra }
1678391e43daSPeter Zijlstra 
1679391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq)
1680391e43daSPeter Zijlstra {
1681391e43daSPeter Zijlstra 	struct task_struct *p;
1682391e43daSPeter Zijlstra 
1683391e43daSPeter Zijlstra 	if (!has_pushable_tasks(rq))
1684391e43daSPeter Zijlstra 		return NULL;
1685391e43daSPeter Zijlstra 
1686391e43daSPeter Zijlstra 	p = plist_first_entry(&rq->rt.pushable_tasks,
1687391e43daSPeter Zijlstra 			      struct task_struct, pushable_tasks);
1688391e43daSPeter Zijlstra 
1689391e43daSPeter Zijlstra 	BUG_ON(rq->cpu != task_cpu(p));
1690391e43daSPeter Zijlstra 	BUG_ON(task_current(rq, p));
169129baa747SPeter Zijlstra 	BUG_ON(p->nr_cpus_allowed <= 1);
1692391e43daSPeter Zijlstra 
1693da0c1e65SKirill Tkhai 	BUG_ON(!task_on_rq_queued(p));
1694391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
1695391e43daSPeter Zijlstra 
1696391e43daSPeter Zijlstra 	return p;
1697391e43daSPeter Zijlstra }
1698391e43daSPeter Zijlstra 
1699391e43daSPeter Zijlstra /*
1700391e43daSPeter Zijlstra  * If the current CPU has more than one RT task, see if the non
1701391e43daSPeter Zijlstra  * running task can migrate over to a CPU that is running a task
1702391e43daSPeter Zijlstra  * of lesser priority.
1703391e43daSPeter Zijlstra  */
1704391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq)
1705391e43daSPeter Zijlstra {
1706391e43daSPeter Zijlstra 	struct task_struct *next_task;
1707391e43daSPeter Zijlstra 	struct rq *lowest_rq;
1708391e43daSPeter Zijlstra 	int ret = 0;
1709391e43daSPeter Zijlstra 
1710391e43daSPeter Zijlstra 	if (!rq->rt.overloaded)
1711391e43daSPeter Zijlstra 		return 0;
1712391e43daSPeter Zijlstra 
1713391e43daSPeter Zijlstra 	next_task = pick_next_pushable_task(rq);
1714391e43daSPeter Zijlstra 	if (!next_task)
1715391e43daSPeter Zijlstra 		return 0;
1716391e43daSPeter Zijlstra 
1717391e43daSPeter Zijlstra retry:
1718391e43daSPeter Zijlstra 	if (unlikely(next_task == rq->curr)) {
1719391e43daSPeter Zijlstra 		WARN_ON(1);
1720391e43daSPeter Zijlstra 		return 0;
1721391e43daSPeter Zijlstra 	}
1722391e43daSPeter Zijlstra 
1723391e43daSPeter Zijlstra 	/*
1724391e43daSPeter Zijlstra 	 * It's possible that the next_task slipped in of
1725391e43daSPeter Zijlstra 	 * higher priority than current. If that's the case
1726391e43daSPeter Zijlstra 	 * just reschedule current.
1727391e43daSPeter Zijlstra 	 */
1728391e43daSPeter Zijlstra 	if (unlikely(next_task->prio < rq->curr->prio)) {
17298875125eSKirill Tkhai 		resched_curr(rq);
1730391e43daSPeter Zijlstra 		return 0;
1731391e43daSPeter Zijlstra 	}
1732391e43daSPeter Zijlstra 
1733391e43daSPeter Zijlstra 	/* We might release rq lock */
1734391e43daSPeter Zijlstra 	get_task_struct(next_task);
1735391e43daSPeter Zijlstra 
1736391e43daSPeter Zijlstra 	/* find_lock_lowest_rq locks the rq if found */
1737391e43daSPeter Zijlstra 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1738391e43daSPeter Zijlstra 	if (!lowest_rq) {
1739391e43daSPeter Zijlstra 		struct task_struct *task;
1740391e43daSPeter Zijlstra 		/*
1741391e43daSPeter Zijlstra 		 * find_lock_lowest_rq releases rq->lock
1742391e43daSPeter Zijlstra 		 * so it is possible that next_task has migrated.
1743391e43daSPeter Zijlstra 		 *
1744391e43daSPeter Zijlstra 		 * We need to make sure that the task is still on the same
1745391e43daSPeter Zijlstra 		 * run-queue and is also still the next task eligible for
1746391e43daSPeter Zijlstra 		 * pushing.
1747391e43daSPeter Zijlstra 		 */
1748391e43daSPeter Zijlstra 		task = pick_next_pushable_task(rq);
1749391e43daSPeter Zijlstra 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1750391e43daSPeter Zijlstra 			/*
1751391e43daSPeter Zijlstra 			 * The task hasn't migrated, and is still the next
1752391e43daSPeter Zijlstra 			 * eligible task, but we failed to find a run-queue
1753391e43daSPeter Zijlstra 			 * to push it to.  Do not retry in this case, since
1754391e43daSPeter Zijlstra 			 * other cpus will pull from us when ready.
1755391e43daSPeter Zijlstra 			 */
1756391e43daSPeter Zijlstra 			goto out;
1757391e43daSPeter Zijlstra 		}
1758391e43daSPeter Zijlstra 
1759391e43daSPeter Zijlstra 		if (!task)
1760391e43daSPeter Zijlstra 			/* No more tasks, just exit */
1761391e43daSPeter Zijlstra 			goto out;
1762391e43daSPeter Zijlstra 
1763391e43daSPeter Zijlstra 		/*
1764391e43daSPeter Zijlstra 		 * Something has shifted, try again.
1765391e43daSPeter Zijlstra 		 */
1766391e43daSPeter Zijlstra 		put_task_struct(next_task);
1767391e43daSPeter Zijlstra 		next_task = task;
1768391e43daSPeter Zijlstra 		goto retry;
1769391e43daSPeter Zijlstra 	}
1770391e43daSPeter Zijlstra 
1771391e43daSPeter Zijlstra 	deactivate_task(rq, next_task, 0);
1772391e43daSPeter Zijlstra 	set_task_cpu(next_task, lowest_rq->cpu);
1773391e43daSPeter Zijlstra 	activate_task(lowest_rq, next_task, 0);
1774391e43daSPeter Zijlstra 	ret = 1;
1775391e43daSPeter Zijlstra 
17768875125eSKirill Tkhai 	resched_curr(lowest_rq);
1777391e43daSPeter Zijlstra 
1778391e43daSPeter Zijlstra 	double_unlock_balance(rq, lowest_rq);
1779391e43daSPeter Zijlstra 
1780391e43daSPeter Zijlstra out:
1781391e43daSPeter Zijlstra 	put_task_struct(next_task);
1782391e43daSPeter Zijlstra 
1783391e43daSPeter Zijlstra 	return ret;
1784391e43daSPeter Zijlstra }
1785391e43daSPeter Zijlstra 
1786391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq)
1787391e43daSPeter Zijlstra {
1788391e43daSPeter Zijlstra 	/* push_rt_task will return true if it moved an RT */
1789391e43daSPeter Zijlstra 	while (push_rt_task(rq))
1790391e43daSPeter Zijlstra 		;
1791391e43daSPeter Zijlstra }
1792391e43daSPeter Zijlstra 
1793*b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1794*b6366f04SSteven Rostedt /*
1795*b6366f04SSteven Rostedt  * The search for the next cpu always starts at rq->cpu and ends
1796*b6366f04SSteven Rostedt  * when we reach rq->cpu again. It will never return rq->cpu.
1797*b6366f04SSteven Rostedt  * This returns the next cpu to check, or nr_cpu_ids if the loop
1798*b6366f04SSteven Rostedt  * is complete.
1799*b6366f04SSteven Rostedt  *
1800*b6366f04SSteven Rostedt  * rq->rt.push_cpu holds the last cpu returned by this function,
1801*b6366f04SSteven Rostedt  * or if this is the first instance, it must hold rq->cpu.
1802*b6366f04SSteven Rostedt  */
1803*b6366f04SSteven Rostedt static int rto_next_cpu(struct rq *rq)
1804*b6366f04SSteven Rostedt {
1805*b6366f04SSteven Rostedt 	int prev_cpu = rq->rt.push_cpu;
1806*b6366f04SSteven Rostedt 	int cpu;
1807*b6366f04SSteven Rostedt 
1808*b6366f04SSteven Rostedt 	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
1809*b6366f04SSteven Rostedt 
1810*b6366f04SSteven Rostedt 	/*
1811*b6366f04SSteven Rostedt 	 * If the previous cpu is less than the rq's CPU, then it already
1812*b6366f04SSteven Rostedt 	 * passed the end of the mask, and has started from the beginning.
1813*b6366f04SSteven Rostedt 	 * We end if the next CPU is greater or equal to rq's CPU.
1814*b6366f04SSteven Rostedt 	 */
1815*b6366f04SSteven Rostedt 	if (prev_cpu < rq->cpu) {
1816*b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1817*b6366f04SSteven Rostedt 			return nr_cpu_ids;
1818*b6366f04SSteven Rostedt 
1819*b6366f04SSteven Rostedt 	} else if (cpu >= nr_cpu_ids) {
1820*b6366f04SSteven Rostedt 		/*
1821*b6366f04SSteven Rostedt 		 * We passed the end of the mask, start at the beginning.
1822*b6366f04SSteven Rostedt 		 * If the result is greater or equal to the rq's CPU, then
1823*b6366f04SSteven Rostedt 		 * the loop is finished.
1824*b6366f04SSteven Rostedt 		 */
1825*b6366f04SSteven Rostedt 		cpu = cpumask_first(rq->rd->rto_mask);
1826*b6366f04SSteven Rostedt 		if (cpu >= rq->cpu)
1827*b6366f04SSteven Rostedt 			return nr_cpu_ids;
1828*b6366f04SSteven Rostedt 	}
1829*b6366f04SSteven Rostedt 	rq->rt.push_cpu = cpu;
1830*b6366f04SSteven Rostedt 
1831*b6366f04SSteven Rostedt 	/* Return cpu to let the caller know if the loop is finished or not */
1832*b6366f04SSteven Rostedt 	return cpu;
1833*b6366f04SSteven Rostedt }
1834*b6366f04SSteven Rostedt 
1835*b6366f04SSteven Rostedt static int find_next_push_cpu(struct rq *rq)
1836*b6366f04SSteven Rostedt {
1837*b6366f04SSteven Rostedt 	struct rq *next_rq;
1838*b6366f04SSteven Rostedt 	int cpu;
1839*b6366f04SSteven Rostedt 
1840*b6366f04SSteven Rostedt 	while (1) {
1841*b6366f04SSteven Rostedt 		cpu = rto_next_cpu(rq);
1842*b6366f04SSteven Rostedt 		if (cpu >= nr_cpu_ids)
1843*b6366f04SSteven Rostedt 			break;
1844*b6366f04SSteven Rostedt 		next_rq = cpu_rq(cpu);
1845*b6366f04SSteven Rostedt 
1846*b6366f04SSteven Rostedt 		/* Make sure the next rq can push to this rq */
1847*b6366f04SSteven Rostedt 		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
1848*b6366f04SSteven Rostedt 			break;
1849*b6366f04SSteven Rostedt 	}
1850*b6366f04SSteven Rostedt 
1851*b6366f04SSteven Rostedt 	return cpu;
1852*b6366f04SSteven Rostedt }
1853*b6366f04SSteven Rostedt 
1854*b6366f04SSteven Rostedt #define RT_PUSH_IPI_EXECUTING		1
1855*b6366f04SSteven Rostedt #define RT_PUSH_IPI_RESTART		2
1856*b6366f04SSteven Rostedt 
1857*b6366f04SSteven Rostedt static void tell_cpu_to_push(struct rq *rq)
1858*b6366f04SSteven Rostedt {
1859*b6366f04SSteven Rostedt 	int cpu;
1860*b6366f04SSteven Rostedt 
1861*b6366f04SSteven Rostedt 	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1862*b6366f04SSteven Rostedt 		raw_spin_lock(&rq->rt.push_lock);
1863*b6366f04SSteven Rostedt 		/* Make sure it's still executing */
1864*b6366f04SSteven Rostedt 		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
1865*b6366f04SSteven Rostedt 			/*
1866*b6366f04SSteven Rostedt 			 * Tell the IPI to restart the loop as things have
1867*b6366f04SSteven Rostedt 			 * changed since it started.
1868*b6366f04SSteven Rostedt 			 */
1869*b6366f04SSteven Rostedt 			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
1870*b6366f04SSteven Rostedt 			raw_spin_unlock(&rq->rt.push_lock);
1871*b6366f04SSteven Rostedt 			return;
1872*b6366f04SSteven Rostedt 		}
1873*b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->rt.push_lock);
1874*b6366f04SSteven Rostedt 	}
1875*b6366f04SSteven Rostedt 
1876*b6366f04SSteven Rostedt 	/* When here, there's no IPI going around */
1877*b6366f04SSteven Rostedt 
1878*b6366f04SSteven Rostedt 	rq->rt.push_cpu = rq->cpu;
1879*b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(rq);
1880*b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1881*b6366f04SSteven Rostedt 		return;
1882*b6366f04SSteven Rostedt 
1883*b6366f04SSteven Rostedt 	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
1884*b6366f04SSteven Rostedt 
1885*b6366f04SSteven Rostedt 	irq_work_queue_on(&rq->rt.push_work, cpu);
1886*b6366f04SSteven Rostedt }
1887*b6366f04SSteven Rostedt 
1888*b6366f04SSteven Rostedt /* Called from hardirq context */
1889*b6366f04SSteven Rostedt static void try_to_push_tasks(void *arg)
1890*b6366f04SSteven Rostedt {
1891*b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = arg;
1892*b6366f04SSteven Rostedt 	struct rq *rq, *src_rq;
1893*b6366f04SSteven Rostedt 	int this_cpu;
1894*b6366f04SSteven Rostedt 	int cpu;
1895*b6366f04SSteven Rostedt 
1896*b6366f04SSteven Rostedt 	this_cpu = rt_rq->push_cpu;
1897*b6366f04SSteven Rostedt 
1898*b6366f04SSteven Rostedt 	/* Paranoid check */
1899*b6366f04SSteven Rostedt 	BUG_ON(this_cpu != smp_processor_id());
1900*b6366f04SSteven Rostedt 
1901*b6366f04SSteven Rostedt 	rq = cpu_rq(this_cpu);
1902*b6366f04SSteven Rostedt 	src_rq = rq_of_rt_rq(rt_rq);
1903*b6366f04SSteven Rostedt 
1904*b6366f04SSteven Rostedt again:
1905*b6366f04SSteven Rostedt 	if (has_pushable_tasks(rq)) {
1906*b6366f04SSteven Rostedt 		raw_spin_lock(&rq->lock);
1907*b6366f04SSteven Rostedt 		push_rt_task(rq);
1908*b6366f04SSteven Rostedt 		raw_spin_unlock(&rq->lock);
1909*b6366f04SSteven Rostedt 	}
1910*b6366f04SSteven Rostedt 
1911*b6366f04SSteven Rostedt 	/* Pass the IPI to the next rt overloaded queue */
1912*b6366f04SSteven Rostedt 	raw_spin_lock(&rt_rq->push_lock);
1913*b6366f04SSteven Rostedt 	/*
1914*b6366f04SSteven Rostedt 	 * If the source queue changed since the IPI went out,
1915*b6366f04SSteven Rostedt 	 * we need to restart the search from that CPU again.
1916*b6366f04SSteven Rostedt 	 */
1917*b6366f04SSteven Rostedt 	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
1918*b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
1919*b6366f04SSteven Rostedt 		rt_rq->push_cpu = src_rq->cpu;
1920*b6366f04SSteven Rostedt 	}
1921*b6366f04SSteven Rostedt 
1922*b6366f04SSteven Rostedt 	cpu = find_next_push_cpu(src_rq);
1923*b6366f04SSteven Rostedt 
1924*b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1925*b6366f04SSteven Rostedt 		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
1926*b6366f04SSteven Rostedt 	raw_spin_unlock(&rt_rq->push_lock);
1927*b6366f04SSteven Rostedt 
1928*b6366f04SSteven Rostedt 	if (cpu >= nr_cpu_ids)
1929*b6366f04SSteven Rostedt 		return;
1930*b6366f04SSteven Rostedt 
1931*b6366f04SSteven Rostedt 	/*
1932*b6366f04SSteven Rostedt 	 * It is possible that a restart caused this CPU to be
1933*b6366f04SSteven Rostedt 	 * chosen again. Don't bother with an IPI, just see if we
1934*b6366f04SSteven Rostedt 	 * have more to push.
1935*b6366f04SSteven Rostedt 	 */
1936*b6366f04SSteven Rostedt 	if (unlikely(cpu == rq->cpu))
1937*b6366f04SSteven Rostedt 		goto again;
1938*b6366f04SSteven Rostedt 
1939*b6366f04SSteven Rostedt 	/* Try the next RT overloaded CPU */
1940*b6366f04SSteven Rostedt 	irq_work_queue_on(&rt_rq->push_work, cpu);
1941*b6366f04SSteven Rostedt }
1942*b6366f04SSteven Rostedt 
1943*b6366f04SSteven Rostedt static void push_irq_work_func(struct irq_work *work)
1944*b6366f04SSteven Rostedt {
1945*b6366f04SSteven Rostedt 	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
1946*b6366f04SSteven Rostedt 
1947*b6366f04SSteven Rostedt 	try_to_push_tasks(rt_rq);
1948*b6366f04SSteven Rostedt }
1949*b6366f04SSteven Rostedt #endif /* HAVE_RT_PUSH_IPI */
1950*b6366f04SSteven Rostedt 
1951391e43daSPeter Zijlstra static int pull_rt_task(struct rq *this_rq)
1952391e43daSPeter Zijlstra {
1953391e43daSPeter Zijlstra 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1954391e43daSPeter Zijlstra 	struct task_struct *p;
1955391e43daSPeter Zijlstra 	struct rq *src_rq;
1956391e43daSPeter Zijlstra 
1957391e43daSPeter Zijlstra 	if (likely(!rt_overloaded(this_rq)))
1958391e43daSPeter Zijlstra 		return 0;
1959391e43daSPeter Zijlstra 
19607c3f2ab7SPeter Zijlstra 	/*
19617c3f2ab7SPeter Zijlstra 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
19627c3f2ab7SPeter Zijlstra 	 * see overloaded we must also see the rto_mask bit.
19637c3f2ab7SPeter Zijlstra 	 */
19647c3f2ab7SPeter Zijlstra 	smp_rmb();
19657c3f2ab7SPeter Zijlstra 
1966*b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
1967*b6366f04SSteven Rostedt 	if (sched_feat(RT_PUSH_IPI)) {
1968*b6366f04SSteven Rostedt 		tell_cpu_to_push(this_rq);
1969*b6366f04SSteven Rostedt 		return 0;
1970*b6366f04SSteven Rostedt 	}
1971*b6366f04SSteven Rostedt #endif
1972*b6366f04SSteven Rostedt 
1973391e43daSPeter Zijlstra 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1974391e43daSPeter Zijlstra 		if (this_cpu == cpu)
1975391e43daSPeter Zijlstra 			continue;
1976391e43daSPeter Zijlstra 
1977391e43daSPeter Zijlstra 		src_rq = cpu_rq(cpu);
1978391e43daSPeter Zijlstra 
1979391e43daSPeter Zijlstra 		/*
1980391e43daSPeter Zijlstra 		 * Don't bother taking the src_rq->lock if the next highest
1981391e43daSPeter Zijlstra 		 * task is known to be lower-priority than our current task.
1982391e43daSPeter Zijlstra 		 * This may look racy, but if this value is about to go
1983391e43daSPeter Zijlstra 		 * logically higher, the src_rq will push this task away.
1984391e43daSPeter Zijlstra 		 * And if its going logically lower, we do not care
1985391e43daSPeter Zijlstra 		 */
1986391e43daSPeter Zijlstra 		if (src_rq->rt.highest_prio.next >=
1987391e43daSPeter Zijlstra 		    this_rq->rt.highest_prio.curr)
1988391e43daSPeter Zijlstra 			continue;
1989391e43daSPeter Zijlstra 
1990391e43daSPeter Zijlstra 		/*
1991391e43daSPeter Zijlstra 		 * We can potentially drop this_rq's lock in
1992391e43daSPeter Zijlstra 		 * double_lock_balance, and another CPU could
1993391e43daSPeter Zijlstra 		 * alter this_rq
1994391e43daSPeter Zijlstra 		 */
1995391e43daSPeter Zijlstra 		double_lock_balance(this_rq, src_rq);
1996391e43daSPeter Zijlstra 
1997391e43daSPeter Zijlstra 		/*
1998e23ee747SKirill Tkhai 		 * We can pull only a task, which is pushable
1999e23ee747SKirill Tkhai 		 * on its rq, and no others.
2000391e43daSPeter Zijlstra 		 */
2001e23ee747SKirill Tkhai 		p = pick_highest_pushable_task(src_rq, this_cpu);
2002391e43daSPeter Zijlstra 
2003391e43daSPeter Zijlstra 		/*
2004391e43daSPeter Zijlstra 		 * Do we have an RT task that preempts
2005391e43daSPeter Zijlstra 		 * the to-be-scheduled task?
2006391e43daSPeter Zijlstra 		 */
2007391e43daSPeter Zijlstra 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2008391e43daSPeter Zijlstra 			WARN_ON(p == src_rq->curr);
2009da0c1e65SKirill Tkhai 			WARN_ON(!task_on_rq_queued(p));
2010391e43daSPeter Zijlstra 
2011391e43daSPeter Zijlstra 			/*
2012391e43daSPeter Zijlstra 			 * There's a chance that p is higher in priority
2013391e43daSPeter Zijlstra 			 * than what's currently running on its cpu.
2014391e43daSPeter Zijlstra 			 * This is just that p is wakeing up and hasn't
2015391e43daSPeter Zijlstra 			 * had a chance to schedule. We only pull
2016391e43daSPeter Zijlstra 			 * p if it is lower in priority than the
2017391e43daSPeter Zijlstra 			 * current task on the run queue
2018391e43daSPeter Zijlstra 			 */
2019391e43daSPeter Zijlstra 			if (p->prio < src_rq->curr->prio)
2020391e43daSPeter Zijlstra 				goto skip;
2021391e43daSPeter Zijlstra 
2022391e43daSPeter Zijlstra 			ret = 1;
2023391e43daSPeter Zijlstra 
2024391e43daSPeter Zijlstra 			deactivate_task(src_rq, p, 0);
2025391e43daSPeter Zijlstra 			set_task_cpu(p, this_cpu);
2026391e43daSPeter Zijlstra 			activate_task(this_rq, p, 0);
2027391e43daSPeter Zijlstra 			/*
2028391e43daSPeter Zijlstra 			 * We continue with the search, just in
2029391e43daSPeter Zijlstra 			 * case there's an even higher prio task
2030391e43daSPeter Zijlstra 			 * in another runqueue. (low likelihood
2031391e43daSPeter Zijlstra 			 * but possible)
2032391e43daSPeter Zijlstra 			 */
2033391e43daSPeter Zijlstra 		}
2034391e43daSPeter Zijlstra skip:
2035391e43daSPeter Zijlstra 		double_unlock_balance(this_rq, src_rq);
2036391e43daSPeter Zijlstra 	}
2037391e43daSPeter Zijlstra 
2038391e43daSPeter Zijlstra 	return ret;
2039391e43daSPeter Zijlstra }
2040391e43daSPeter Zijlstra 
2041391e43daSPeter Zijlstra static void post_schedule_rt(struct rq *rq)
2042391e43daSPeter Zijlstra {
2043391e43daSPeter Zijlstra 	push_rt_tasks(rq);
2044391e43daSPeter Zijlstra }
2045391e43daSPeter Zijlstra 
2046391e43daSPeter Zijlstra /*
2047391e43daSPeter Zijlstra  * If we are not running and we are not going to reschedule soon, we should
2048391e43daSPeter Zijlstra  * try to push tasks away now
2049391e43daSPeter Zijlstra  */
2050391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p)
2051391e43daSPeter Zijlstra {
2052391e43daSPeter Zijlstra 	if (!task_running(rq, p) &&
2053391e43daSPeter Zijlstra 	    !test_tsk_need_resched(rq->curr) &&
2054391e43daSPeter Zijlstra 	    has_pushable_tasks(rq) &&
205529baa747SPeter Zijlstra 	    p->nr_cpus_allowed > 1 &&
20561baca4ceSJuri Lelli 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
205729baa747SPeter Zijlstra 	    (rq->curr->nr_cpus_allowed < 2 ||
2058391e43daSPeter Zijlstra 	     rq->curr->prio <= p->prio))
2059391e43daSPeter Zijlstra 		push_rt_tasks(rq);
2060391e43daSPeter Zijlstra }
2061391e43daSPeter Zijlstra 
2062391e43daSPeter Zijlstra static void set_cpus_allowed_rt(struct task_struct *p,
2063391e43daSPeter Zijlstra 				const struct cpumask *new_mask)
2064391e43daSPeter Zijlstra {
20658d3d5adaSKirill Tkhai 	struct rq *rq;
20668d3d5adaSKirill Tkhai 	int weight;
2067391e43daSPeter Zijlstra 
2068391e43daSPeter Zijlstra 	BUG_ON(!rt_task(p));
2069391e43daSPeter Zijlstra 
2070da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
20718d3d5adaSKirill Tkhai 		return;
2072391e43daSPeter Zijlstra 
20738d3d5adaSKirill Tkhai 	weight = cpumask_weight(new_mask);
20748d3d5adaSKirill Tkhai 
2075391e43daSPeter Zijlstra 	/*
20768d3d5adaSKirill Tkhai 	 * Only update if the process changes its state from whether it
20778d3d5adaSKirill Tkhai 	 * can migrate or not.
2078391e43daSPeter Zijlstra 	 */
207929baa747SPeter Zijlstra 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
20808d3d5adaSKirill Tkhai 		return;
20818d3d5adaSKirill Tkhai 
20828d3d5adaSKirill Tkhai 	rq = task_rq(p);
20838d3d5adaSKirill Tkhai 
20848d3d5adaSKirill Tkhai 	/*
20858d3d5adaSKirill Tkhai 	 * The process used to be able to migrate OR it can now migrate
20868d3d5adaSKirill Tkhai 	 */
20878d3d5adaSKirill Tkhai 	if (weight <= 1) {
20888d3d5adaSKirill Tkhai 		if (!task_current(rq, p))
2089391e43daSPeter Zijlstra 			dequeue_pushable_task(rq, p);
2090391e43daSPeter Zijlstra 		BUG_ON(!rq->rt.rt_nr_migratory);
2091391e43daSPeter Zijlstra 		rq->rt.rt_nr_migratory--;
20928d3d5adaSKirill Tkhai 	} else {
20938d3d5adaSKirill Tkhai 		if (!task_current(rq, p))
20948d3d5adaSKirill Tkhai 			enqueue_pushable_task(rq, p);
20958d3d5adaSKirill Tkhai 		rq->rt.rt_nr_migratory++;
2096391e43daSPeter Zijlstra 	}
2097391e43daSPeter Zijlstra 
2098391e43daSPeter Zijlstra 	update_rt_migration(&rq->rt);
2099391e43daSPeter Zijlstra }
2100391e43daSPeter Zijlstra 
2101391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2102391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq)
2103391e43daSPeter Zijlstra {
2104391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2105391e43daSPeter Zijlstra 		rt_set_overload(rq);
2106391e43daSPeter Zijlstra 
2107391e43daSPeter Zijlstra 	__enable_runtime(rq);
2108391e43daSPeter Zijlstra 
2109391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2110391e43daSPeter Zijlstra }
2111391e43daSPeter Zijlstra 
2112391e43daSPeter Zijlstra /* Assumes rq->lock is held */
2113391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq)
2114391e43daSPeter Zijlstra {
2115391e43daSPeter Zijlstra 	if (rq->rt.overloaded)
2116391e43daSPeter Zijlstra 		rt_clear_overload(rq);
2117391e43daSPeter Zijlstra 
2118391e43daSPeter Zijlstra 	__disable_runtime(rq);
2119391e43daSPeter Zijlstra 
2120391e43daSPeter Zijlstra 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2121391e43daSPeter Zijlstra }
2122391e43daSPeter Zijlstra 
2123391e43daSPeter Zijlstra /*
2124391e43daSPeter Zijlstra  * When switch from the rt queue, we bring ourselves to a position
2125391e43daSPeter Zijlstra  * that we might want to pull RT tasks from other runqueues.
2126391e43daSPeter Zijlstra  */
2127391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p)
2128391e43daSPeter Zijlstra {
2129391e43daSPeter Zijlstra 	/*
2130391e43daSPeter Zijlstra 	 * If there are other RT tasks then we will reschedule
2131391e43daSPeter Zijlstra 	 * and the scheduling of the other RT tasks will handle
2132391e43daSPeter Zijlstra 	 * the balancing. But if we are the last RT task
2133391e43daSPeter Zijlstra 	 * we may need to handle the pulling of RT tasks
2134391e43daSPeter Zijlstra 	 * now.
2135391e43daSPeter Zijlstra 	 */
2136da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
21371158ddb5SKirill Tkhai 		return;
21381158ddb5SKirill Tkhai 
21391158ddb5SKirill Tkhai 	if (pull_rt_task(rq))
21408875125eSKirill Tkhai 		resched_curr(rq);
2141391e43daSPeter Zijlstra }
2142391e43daSPeter Zijlstra 
214311c785b7SLi Zefan void __init init_sched_rt_class(void)
2144391e43daSPeter Zijlstra {
2145391e43daSPeter Zijlstra 	unsigned int i;
2146391e43daSPeter Zijlstra 
2147391e43daSPeter Zijlstra 	for_each_possible_cpu(i) {
2148391e43daSPeter Zijlstra 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2149391e43daSPeter Zijlstra 					GFP_KERNEL, cpu_to_node(i));
2150391e43daSPeter Zijlstra 	}
2151391e43daSPeter Zijlstra }
2152391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2153391e43daSPeter Zijlstra 
2154391e43daSPeter Zijlstra /*
2155391e43daSPeter Zijlstra  * When switching a task to RT, we may overload the runqueue
2156391e43daSPeter Zijlstra  * with RT tasks. In this case we try to push them off to
2157391e43daSPeter Zijlstra  * other runqueues.
2158391e43daSPeter Zijlstra  */
2159391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p)
2160391e43daSPeter Zijlstra {
2161391e43daSPeter Zijlstra 	int check_resched = 1;
2162391e43daSPeter Zijlstra 
2163391e43daSPeter Zijlstra 	/*
2164391e43daSPeter Zijlstra 	 * If we are already running, then there's nothing
2165391e43daSPeter Zijlstra 	 * that needs to be done. But if we are not running
2166391e43daSPeter Zijlstra 	 * we may need to preempt the current running task.
2167391e43daSPeter Zijlstra 	 * If that current running task is also an RT task
2168391e43daSPeter Zijlstra 	 * then see if we can move to another run queue.
2169391e43daSPeter Zijlstra 	 */
2170da0c1e65SKirill Tkhai 	if (task_on_rq_queued(p) && rq->curr != p) {
2171391e43daSPeter Zijlstra #ifdef CONFIG_SMP
217210447917SKirill V Tkhai 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
2173391e43daSPeter Zijlstra 		    /* Don't resched if we changed runqueues */
217410447917SKirill V Tkhai 		    push_rt_task(rq) && rq != task_rq(p))
2175391e43daSPeter Zijlstra 			check_resched = 0;
2176391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2177391e43daSPeter Zijlstra 		if (check_resched && p->prio < rq->curr->prio)
21788875125eSKirill Tkhai 			resched_curr(rq);
2179391e43daSPeter Zijlstra 	}
2180391e43daSPeter Zijlstra }
2181391e43daSPeter Zijlstra 
2182391e43daSPeter Zijlstra /*
2183391e43daSPeter Zijlstra  * Priority of the task has changed. This may cause
2184391e43daSPeter Zijlstra  * us to initiate a push or pull.
2185391e43daSPeter Zijlstra  */
2186391e43daSPeter Zijlstra static void
2187391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2188391e43daSPeter Zijlstra {
2189da0c1e65SKirill Tkhai 	if (!task_on_rq_queued(p))
2190391e43daSPeter Zijlstra 		return;
2191391e43daSPeter Zijlstra 
2192391e43daSPeter Zijlstra 	if (rq->curr == p) {
2193391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2194391e43daSPeter Zijlstra 		/*
2195391e43daSPeter Zijlstra 		 * If our priority decreases while running, we
2196391e43daSPeter Zijlstra 		 * may need to pull tasks to this runqueue.
2197391e43daSPeter Zijlstra 		 */
2198391e43daSPeter Zijlstra 		if (oldprio < p->prio)
2199391e43daSPeter Zijlstra 			pull_rt_task(rq);
2200391e43daSPeter Zijlstra 		/*
2201391e43daSPeter Zijlstra 		 * If there's a higher priority task waiting to run
2202391e43daSPeter Zijlstra 		 * then reschedule. Note, the above pull_rt_task
2203391e43daSPeter Zijlstra 		 * can release the rq lock and p could migrate.
2204391e43daSPeter Zijlstra 		 * Only reschedule if p is still on the same runqueue.
2205391e43daSPeter Zijlstra 		 */
2206391e43daSPeter Zijlstra 		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
22078875125eSKirill Tkhai 			resched_curr(rq);
2208391e43daSPeter Zijlstra #else
2209391e43daSPeter Zijlstra 		/* For UP simply resched on drop of prio */
2210391e43daSPeter Zijlstra 		if (oldprio < p->prio)
22118875125eSKirill Tkhai 			resched_curr(rq);
2212391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
2213391e43daSPeter Zijlstra 	} else {
2214391e43daSPeter Zijlstra 		/*
2215391e43daSPeter Zijlstra 		 * This task is not running, but if it is
2216391e43daSPeter Zijlstra 		 * greater than the current running task
2217391e43daSPeter Zijlstra 		 * then reschedule.
2218391e43daSPeter Zijlstra 		 */
2219391e43daSPeter Zijlstra 		if (p->prio < rq->curr->prio)
22208875125eSKirill Tkhai 			resched_curr(rq);
2221391e43daSPeter Zijlstra 	}
2222391e43daSPeter Zijlstra }
2223391e43daSPeter Zijlstra 
2224391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p)
2225391e43daSPeter Zijlstra {
2226391e43daSPeter Zijlstra 	unsigned long soft, hard;
2227391e43daSPeter Zijlstra 
2228391e43daSPeter Zijlstra 	/* max may change after cur was read, this will be fixed next tick */
2229391e43daSPeter Zijlstra 	soft = task_rlimit(p, RLIMIT_RTTIME);
2230391e43daSPeter Zijlstra 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2231391e43daSPeter Zijlstra 
2232391e43daSPeter Zijlstra 	if (soft != RLIM_INFINITY) {
2233391e43daSPeter Zijlstra 		unsigned long next;
2234391e43daSPeter Zijlstra 
223557d2aa00SYing Xue 		if (p->rt.watchdog_stamp != jiffies) {
2236391e43daSPeter Zijlstra 			p->rt.timeout++;
223757d2aa00SYing Xue 			p->rt.watchdog_stamp = jiffies;
223857d2aa00SYing Xue 		}
223957d2aa00SYing Xue 
2240391e43daSPeter Zijlstra 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2241391e43daSPeter Zijlstra 		if (p->rt.timeout > next)
2242391e43daSPeter Zijlstra 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2243391e43daSPeter Zijlstra 	}
2244391e43daSPeter Zijlstra }
2245391e43daSPeter Zijlstra 
2246391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2247391e43daSPeter Zijlstra {
2248454c7999SColin Cross 	struct sched_rt_entity *rt_se = &p->rt;
2249454c7999SColin Cross 
2250391e43daSPeter Zijlstra 	update_curr_rt(rq);
2251391e43daSPeter Zijlstra 
2252391e43daSPeter Zijlstra 	watchdog(rq, p);
2253391e43daSPeter Zijlstra 
2254391e43daSPeter Zijlstra 	/*
2255391e43daSPeter Zijlstra 	 * RR tasks need a special form of timeslice management.
2256391e43daSPeter Zijlstra 	 * FIFO tasks have no timeslices.
2257391e43daSPeter Zijlstra 	 */
2258391e43daSPeter Zijlstra 	if (p->policy != SCHED_RR)
2259391e43daSPeter Zijlstra 		return;
2260391e43daSPeter Zijlstra 
2261391e43daSPeter Zijlstra 	if (--p->rt.time_slice)
2262391e43daSPeter Zijlstra 		return;
2263391e43daSPeter Zijlstra 
2264ce0dbbbbSClark Williams 	p->rt.time_slice = sched_rr_timeslice;
2265391e43daSPeter Zijlstra 
2266391e43daSPeter Zijlstra 	/*
2267e9aa39bbSLi Bin 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2268e9aa39bbSLi Bin 	 * the only element on the queue
2269391e43daSPeter Zijlstra 	 */
2270454c7999SColin Cross 	for_each_sched_rt_entity(rt_se) {
2271454c7999SColin Cross 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2272391e43daSPeter Zijlstra 			requeue_task_rt(rq, p, 0);
22738aa6f0ebSKirill Tkhai 			resched_curr(rq);
2274454c7999SColin Cross 			return;
2275454c7999SColin Cross 		}
2276391e43daSPeter Zijlstra 	}
2277391e43daSPeter Zijlstra }
2278391e43daSPeter Zijlstra 
2279391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq)
2280391e43daSPeter Zijlstra {
2281391e43daSPeter Zijlstra 	struct task_struct *p = rq->curr;
2282391e43daSPeter Zijlstra 
228378becc27SFrederic Weisbecker 	p->se.exec_start = rq_clock_task(rq);
2284391e43daSPeter Zijlstra 
2285391e43daSPeter Zijlstra 	/* The running task is never eligible for pushing */
2286391e43daSPeter Zijlstra 	dequeue_pushable_task(rq, p);
2287391e43daSPeter Zijlstra }
2288391e43daSPeter Zijlstra 
2289391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2290391e43daSPeter Zijlstra {
2291391e43daSPeter Zijlstra 	/*
2292391e43daSPeter Zijlstra 	 * Time slice is 0 for SCHED_FIFO tasks
2293391e43daSPeter Zijlstra 	 */
2294391e43daSPeter Zijlstra 	if (task->policy == SCHED_RR)
2295ce0dbbbbSClark Williams 		return sched_rr_timeslice;
2296391e43daSPeter Zijlstra 	else
2297391e43daSPeter Zijlstra 		return 0;
2298391e43daSPeter Zijlstra }
2299391e43daSPeter Zijlstra 
2300391e43daSPeter Zijlstra const struct sched_class rt_sched_class = {
2301391e43daSPeter Zijlstra 	.next			= &fair_sched_class,
2302391e43daSPeter Zijlstra 	.enqueue_task		= enqueue_task_rt,
2303391e43daSPeter Zijlstra 	.dequeue_task		= dequeue_task_rt,
2304391e43daSPeter Zijlstra 	.yield_task		= yield_task_rt,
2305391e43daSPeter Zijlstra 
2306391e43daSPeter Zijlstra 	.check_preempt_curr	= check_preempt_curr_rt,
2307391e43daSPeter Zijlstra 
2308391e43daSPeter Zijlstra 	.pick_next_task		= pick_next_task_rt,
2309391e43daSPeter Zijlstra 	.put_prev_task		= put_prev_task_rt,
2310391e43daSPeter Zijlstra 
2311391e43daSPeter Zijlstra #ifdef CONFIG_SMP
2312391e43daSPeter Zijlstra 	.select_task_rq		= select_task_rq_rt,
2313391e43daSPeter Zijlstra 
2314391e43daSPeter Zijlstra 	.set_cpus_allowed       = set_cpus_allowed_rt,
2315391e43daSPeter Zijlstra 	.rq_online              = rq_online_rt,
2316391e43daSPeter Zijlstra 	.rq_offline             = rq_offline_rt,
2317391e43daSPeter Zijlstra 	.post_schedule		= post_schedule_rt,
2318391e43daSPeter Zijlstra 	.task_woken		= task_woken_rt,
2319391e43daSPeter Zijlstra 	.switched_from		= switched_from_rt,
2320391e43daSPeter Zijlstra #endif
2321391e43daSPeter Zijlstra 
2322391e43daSPeter Zijlstra 	.set_curr_task          = set_curr_task_rt,
2323391e43daSPeter Zijlstra 	.task_tick		= task_tick_rt,
2324391e43daSPeter Zijlstra 
2325391e43daSPeter Zijlstra 	.get_rr_interval	= get_rr_interval_rt,
2326391e43daSPeter Zijlstra 
2327391e43daSPeter Zijlstra 	.prio_changed		= prio_changed_rt,
2328391e43daSPeter Zijlstra 	.switched_to		= switched_to_rt,
23296e998916SStanislaw Gruszka 
23306e998916SStanislaw Gruszka 	.update_curr		= update_curr_rt,
2331391e43daSPeter Zijlstra };
2332391e43daSPeter Zijlstra 
2333391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
2334391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2335391e43daSPeter Zijlstra 
2336391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu)
2337391e43daSPeter Zijlstra {
2338391e43daSPeter Zijlstra 	rt_rq_iter_t iter;
2339391e43daSPeter Zijlstra 	struct rt_rq *rt_rq;
2340391e43daSPeter Zijlstra 
2341391e43daSPeter Zijlstra 	rcu_read_lock();
2342391e43daSPeter Zijlstra 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2343391e43daSPeter Zijlstra 		print_rt_rq(m, cpu, rt_rq);
2344391e43daSPeter Zijlstra 	rcu_read_unlock();
2345391e43daSPeter Zijlstra }
2346391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */
2347